id
stringlengths
33
40
content
stringlengths
662
61.5k
max_stars_repo_path
stringlengths
85
97
bugs-dot-jar_data_MATH-1045_c979a6f0
--- BugID: MATH-1045 Summary: EigenDecomposition.Solver should consider tiny values 0 for purposes of determining singularity Description: |- EigenDecomposition.Solver tests for singularity by comparing eigenvalues to 0 for exact equality. Elsewhere in the class and in the code, of course, very small values are considered 0. This causes the solver to consider some singular matrices as non-singular. The patch here includes a test as well showing the behavior -- the matrix is clearly singular but isn't considered as such since one eigenvalue are ~1e-14 rather than exactly 0. (What I am not sure of is whether we should really be evaluating the *norm* of the imaginary eigenvalues rather than real/imag components separately. But the javadoc says the solver only supports real eigenvalues anyhow, so it's kind of moot since imag=0 for all eigenvalues.) diff --git a/src/main/java/org/apache/commons/math3/linear/EigenDecomposition.java b/src/main/java/org/apache/commons/math3/linear/EigenDecomposition.java index 4d4d794..2bb8fd7 100644 --- a/src/main/java/org/apache/commons/math3/linear/EigenDecomposition.java +++ b/src/main/java/org/apache/commons/math3/linear/EigenDecomposition.java @@ -513,8 +513,12 @@ public class EigenDecomposition { * @return true if the decomposed matrix is non-singular. */ public boolean isNonSingular() { - // The eigenvalues are sorted by size, descending - double largestEigenvalueNorm = eigenvalueNorm(0); + double largestEigenvalueNorm = 0.0; + // Looping over all values (in case they are not sorted in decreasing + // order of their norm). + for (int i = 0; i < realEigenvalues.length; ++i) { + largestEigenvalueNorm = FastMath.max(largestEigenvalueNorm, eigenvalueNorm(i)); + } // Corner case: zero matrix, all exactly 0 eigenvalues if (largestEigenvalueNorm == 0.0) { return false;
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1045_c979a6f0.diff
bugs-dot-jar_data_MATH-812_6eb46555
--- BugID: MATH-812 Summary: In RealVector, dotProduct and outerProduct return wrong results due to misuse of sparse iterators Description: |- In class {{RealVector}}, the default implementation of {{RealMatrix outerProduct(RealVector)}} uses sparse iterators on the entries of the two vectors. The rationale behind this is that {{0d * x == 0d}} is {{true}} for all {{double x}}. This assumption is in fact false, since {{0d * NaN == NaN}}. Proposed fix is to loop through *all* entries of both vectors. This can have a significant impact on the CPU cost, but robustness should probably be preferred over speed in default implementations. Same issue occurs with {{double dotProduct(RealVector)}}, which uses sparse iterators for {{this}} only. Another option would be to through an exception if {{isNaN()}} is {{true}}, in which case caching could be used for both {{isNaN()}} and {{isInfinite()}}. diff --git a/src/main/java/org/apache/commons/math3/linear/ArrayRealVector.java b/src/main/java/org/apache/commons/math3/linear/ArrayRealVector.java index ee67e03..ef02a75 100644 --- a/src/main/java/org/apache/commons/math3/linear/ArrayRealVector.java +++ b/src/main/java/org/apache/commons/math3/linear/ArrayRealVector.java @@ -455,16 +455,8 @@ public class ArrayRealVector extends RealVector implements Serializable { dot += data[i] * vData[i]; } return dot; - } else { - checkVectorDimensions(v); - double dot = 0; - Iterator<Entry> it = v.sparseIterator(); - while (it.hasNext()) { - final Entry e = it.next(); - dot += data[e.getIndex()] * e.getValue(); - } - return dot; } + return super.dotProduct(v); } /** {@inheritDoc} */ diff --git a/src/main/java/org/apache/commons/math3/linear/OpenMapRealVector.java b/src/main/java/org/apache/commons/math3/linear/OpenMapRealVector.java index 49e22c2..b3e04be 100644 --- a/src/main/java/org/apache/commons/math3/linear/OpenMapRealVector.java +++ b/src/main/java/org/apache/commons/math3/linear/OpenMapRealVector.java @@ -304,38 +304,6 @@ public class OpenMapRealVector extends SparseRealVector return new OpenMapRealVector(this); } - /** - * Optimized method to compute the dot product with an OpenMapRealVector. - * It iterates over the smallest of the two. - * - * @param v Cector to compute the dot product with. - * @return the dot product of {@code this} and {@code v}. - * @throws org.apache.commons.math3.exception.DimensionMismatchException - * if the dimensions do not match. - */ - public double dotProduct(OpenMapRealVector v) { - checkVectorDimensions(v.getDimension()); - boolean thisIsSmaller = entries.size() < v.entries.size(); - Iterator iter = thisIsSmaller ? entries.iterator() : v.entries.iterator(); - OpenIntToDoubleHashMap larger = thisIsSmaller ? v.entries : entries; - double d = 0; - while(iter.hasNext()) { - iter.advance(); - d += iter.value() * larger.get(iter.key()); - } - return d; - } - - /** {@inheritDoc} */ - @Override - public double dotProduct(RealVector v) { - if(v instanceof OpenMapRealVector) { - return dotProduct((OpenMapRealVector)v); - } else { - return super.dotProduct(v); - } - } - /** {@inheritDoc} */ @Override public OpenMapRealVector ebeDivide(RealVector v) { diff --git a/src/main/java/org/apache/commons/math3/linear/RealVector.java b/src/main/java/org/apache/commons/math3/linear/RealVector.java index 5dc8ddf..89611c3 100644 --- a/src/main/java/org/apache/commons/math3/linear/RealVector.java +++ b/src/main/java/org/apache/commons/math3/linear/RealVector.java @@ -303,10 +303,9 @@ public abstract class RealVector { public double dotProduct(RealVector v) { checkVectorDimensions(v); double d = 0; - Iterator<Entry> it = sparseIterator(); - while (it.hasNext()) { - final Entry e = it.next(); - d += e.getValue() * v.getEntry(e.getIndex()); + final int n = getDimension(); + for (int i = 0; i < n; i++) { + d += getEntry(i) * v.getEntry(i); } return d; }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-812_6eb46555.diff
bugs-dot-jar_data_MATH-1148_4080feff
--- BugID: MATH-1148 Summary: MonotoneChain handling of collinear points drops low points in a near-column Description: "This code\n{code}\nval points = List(\n new Vector2D(\n 16.078200000000184,\n \ -36.52519999989808\n ),\n new Vector2D(\n 19.164300000000186,\n -36.52519999989808\n \ ),\n new Vector2D(\n 19.1643,\n -25.28136477910407\n ),\n new Vector2D(\n \ 19.1643,\n -17.678400000004157\n )\n)\nnew hull.MonotoneChain().generate(points.asJava)\n{code}\n\nresults in the exception:\n{code}\norg.apache.commons.math3.exception.ConvergenceException: illegal state: convergence failed\n\tat org.apache.commons.math3.geometry.euclidean.twod.hull.AbstractConvexHullGenerator2D.generate(AbstractConvexHullGenerator2D.java:106)\n\tat org.apache.commons.math3.geometry.euclidean.twod.hull.MonotoneChain.generate(MonotoneChain.java:50)\n\tat .<init>(<console>:13)\n\tat .<clinit>(<console>)\n\tat .<init>(<console>:11)\n\tat .<clinit>(<console>)\n\tat $print(<console>)\n\tat sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)\n\tat sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)\n\tat java.lang.reflect.Method.invoke(Method.java:597)\n\tat scala.tools.nsc.interpreter.IMain$ReadEvalPrint.call(IMain.scala:704)\n\tat scala.tools.nsc.interpreter.IMain$Request$$anonfun$14.apply(IMain.scala:920)\n\tat scala.tools.nsc.interpreter.Line$$anonfun$1.apply$mcV$sp(Line.scala:43)\n\tat scala.tools.nsc.io.package$$anon$2.run(package.scala:25)\n\tat java.lang.Thread.run(Thread.java:662)\n{code}\n\nThis will be tricky to fix. Not only is the point (19.164300000000186, -36.52519999989808) is being dropped incorrectly, but any point dropped in one hull risks creating a kink when combined with the other hull.\n\n" diff --git a/src/main/java/org/apache/commons/math3/geometry/euclidean/twod/hull/ConvexHull2D.java b/src/main/java/org/apache/commons/math3/geometry/euclidean/twod/hull/ConvexHull2D.java index 1e0eec3..5d9734b 100644 --- a/src/main/java/org/apache/commons/math3/geometry/euclidean/twod/hull/ConvexHull2D.java +++ b/src/main/java/org/apache/commons/math3/geometry/euclidean/twod/hull/ConvexHull2D.java @@ -28,8 +28,8 @@ import org.apache.commons.math3.geometry.euclidean.twod.Vector2D; import org.apache.commons.math3.geometry.hull.ConvexHull; import org.apache.commons.math3.geometry.partitioning.Region; import org.apache.commons.math3.geometry.partitioning.RegionFactory; -import org.apache.commons.math3.util.FastMath; import org.apache.commons.math3.util.MathArrays; +import org.apache.commons.math3.util.Precision; /** * This class represents a convex hull in an two-dimensional euclidean space. @@ -62,12 +62,14 @@ public class ConvexHull2D implements ConvexHull<Euclidean2D, Vector2D>, Serializ public ConvexHull2D(final Vector2D[] vertices, final double tolerance) throws MathIllegalArgumentException { + // assign tolerance as it will be used by the isConvex method + this.tolerance = tolerance; + if (!isConvex(vertices)) { throw new MathIllegalArgumentException(LocalizedFormats.NOT_CONVEX); } this.vertices = vertices.clone(); - this.tolerance = tolerance; } /** @@ -80,7 +82,7 @@ public class ConvexHull2D implements ConvexHull<Euclidean2D, Vector2D>, Serializ return true; } - double sign = 0.0; + int sign = 0; for (int i = 0; i < hullVertices.length; i++) { final Vector2D p1 = hullVertices[i == 0 ? hullVertices.length - 1 : i - 1]; final Vector2D p2 = hullVertices[i]; @@ -89,14 +91,14 @@ public class ConvexHull2D implements ConvexHull<Euclidean2D, Vector2D>, Serializ final Vector2D d1 = p2.subtract(p1); final Vector2D d2 = p3.subtract(p2); - final double cross = FastMath.signum(MathArrays.linearCombination( d1.getX(), d2.getY(), - -d1.getY(), d2.getX())); + final double crossProduct = MathArrays.linearCombination(d1.getX(), d2.getY(), -d1.getY(), d2.getX()); + final int cmp = Precision.compareTo(crossProduct, 0.0, tolerance); // in case of collinear points the cross product will be zero - if (cross != 0.0) { - if (sign != 0.0 && cross != sign) { + if (cmp != 0.0) { + if (sign != 0.0 && cmp != sign) { return false; } - sign = cross; + sign = cmp; } } diff --git a/src/main/java/org/apache/commons/math3/geometry/euclidean/twod/hull/MonotoneChain.java b/src/main/java/org/apache/commons/math3/geometry/euclidean/twod/hull/MonotoneChain.java index 6e56fc6..a811dda 100644 --- a/src/main/java/org/apache/commons/math3/geometry/euclidean/twod/hull/MonotoneChain.java +++ b/src/main/java/org/apache/commons/math3/geometry/euclidean/twod/hull/MonotoneChain.java @@ -25,6 +25,7 @@ import java.util.List; import org.apache.commons.math3.geometry.euclidean.twod.Line; import org.apache.commons.math3.geometry.euclidean.twod.Vector2D; import org.apache.commons.math3.util.FastMath; +import org.apache.commons.math3.util.Precision; /** * Implements Andrew's monotone chain method to generate the convex hull of a finite set of @@ -80,9 +81,12 @@ public class MonotoneChain extends AbstractConvexHullGenerator2D { // sort the points in increasing order on the x-axis Collections.sort(pointsSortedByXAxis, new Comparator<Vector2D>() { public int compare(final Vector2D o1, final Vector2D o2) { - final int diff = (int) FastMath.signum(o1.getX() - o2.getX()); + final double tolerance = getTolerance(); + // need to take the tolerance value into account, otherwise collinear points + // will not be handled correctly when building the upper/lower hull + final int diff = Precision.compareTo(o1.getX(), o2.getX(), tolerance); if (diff == 0) { - return (int) FastMath.signum(o1.getY() - o2.getY()); + return Precision.compareTo(o1.getY(), o2.getY(), tolerance); } else { return diff; }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1148_4080feff.diff
bugs-dot-jar_data_MATH-699_b2e24119
--- BugID: MATH-699 Summary: inverseCumulativeDistribution fails with cumulative distribution having a plateau Description: This bug report follows MATH-692. The attached unit test fails. As required by the definition in MATH-692, the lower-bound of the interval on which the cdf is constant should be returned. This is not so at the moment. diff --git a/src/main/java/org/apache/commons/math/distribution/AbstractRealDistribution.java b/src/main/java/org/apache/commons/math/distribution/AbstractRealDistribution.java index a37d2d2..d83837d 100644 --- a/src/main/java/org/apache/commons/math/distribution/AbstractRealDistribution.java +++ b/src/main/java/org/apache/commons/math/distribution/AbstractRealDistribution.java @@ -20,7 +20,6 @@ import java.io.Serializable; import org.apache.commons.math.analysis.UnivariateFunction; import org.apache.commons.math.analysis.solvers.UnivariateRealSolverUtils; -import org.apache.commons.math.exception.MathInternalError; import org.apache.commons.math.exception.NotStrictlyPositiveException; import org.apache.commons.math.exception.NumberIsTooLargeException; import org.apache.commons.math.exception.OutOfRangeException; @@ -69,50 +68,80 @@ implements RealDistribution, Serializable { /** {@inheritDoc} */ public double inverseCumulativeProbability(final double p) throws OutOfRangeException { - if (p < 0.0 || p > 1.0) { throw new OutOfRangeException(p, 0, 1); } - // by default, do simple root finding using bracketing and default solver. - // subclasses can override if there is a better method. - UnivariateFunction rootFindingFunction = - new UnivariateFunction() { - public double value(double x) { + double lowerBound = getSupportLowerBound(); + if (p == 0.0) { + return lowerBound; + } + + double upperBound = getSupportUpperBound(); + if (p == 1.0) { + return upperBound; + } + + final double mu = getNumericalMean(); + final double sig = FastMath.sqrt(getNumericalVariance()); + final boolean chebyshevApplies; + chebyshevApplies = !(Double.isInfinite(mu) || Double.isNaN(mu) || + Double.isInfinite(sig) || Double.isNaN(sig)); + + if (lowerBound == Double.NEGATIVE_INFINITY) { + if (chebyshevApplies) { + lowerBound = mu - sig * FastMath.sqrt((1. - p) / p); + } else { + lowerBound = -1.0; + while (cumulativeProbability(lowerBound) >= p) { + lowerBound *= 2.0; + } + } + } + + if (upperBound == Double.POSITIVE_INFINITY) { + if (chebyshevApplies) { + upperBound = mu + sig * FastMath.sqrt(p / (1. - p)); + } else { + upperBound = 1.0; + while (cumulativeProbability(upperBound) < p) { + upperBound *= 2.0; + } + } + } + + final UnivariateFunction toSolve = new UnivariateFunction() { + + public double value(final double x) { return cumulativeProbability(x) - p; } }; - // Try to bracket root, test domain endpoints if this fails - double lowerBound = getDomainLowerBound(p); - double upperBound = getDomainUpperBound(p); - double[] bracket = null; - try { - bracket = UnivariateRealSolverUtils.bracket( - rootFindingFunction, getInitialDomain(p), - lowerBound, upperBound); - } catch (NumberIsTooLargeException ex) { - /* - * Check domain endpoints to see if one gives value that is within - * the default solver's defaultAbsoluteAccuracy of 0 (will be the - * case if density has bounded support and p is 0 or 1). - */ - if (FastMath.abs(rootFindingFunction.value(lowerBound)) < getSolverAbsoluteAccuracy()) { - return lowerBound; - } - if (FastMath.abs(rootFindingFunction.value(upperBound)) < getSolverAbsoluteAccuracy()) { - return upperBound; + double x = UnivariateRealSolverUtils.solve(toSolve, + lowerBound, + upperBound, + getSolverAbsoluteAccuracy()); + + if (!isSupportConnected()) { + /* Test for plateau. */ + final double dx = getSolverAbsoluteAccuracy(); + if (x - dx >= getSupportLowerBound()) { + double px = cumulativeProbability(x); + if (cumulativeProbability(x - dx) == px) { + upperBound = x; + while (upperBound - lowerBound > dx) { + final double midPoint = 0.5 * (lowerBound + upperBound); + if (cumulativeProbability(midPoint) < px) { + lowerBound = midPoint; + } else { + upperBound = midPoint; + } + } + return upperBound; + } } - // Failed bracket convergence was not because of corner solution - throw new MathInternalError(ex); } - - // find root - double root = UnivariateRealSolverUtils.solve(rootFindingFunction, - // override getSolverAbsoluteAccuracy() to use a Brent solver with - // absolute accuracy different from the default. - bracket[0],bracket[1], getSolverAbsoluteAccuracy()); - return root; + return x; } /**
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-699_b2e24119.diff
bugs-dot-jar_data_MATH-718_3a08bfa6
--- BugID: MATH-718 Summary: inverseCumulativeProbability of BinomialDistribution returns wrong value for large trials. Description: |- The inverseCumulativeProbability method of the BinomialDistributionImpl class returns wrong value for large trials. Following code will be reproduce the problem. {{System.out.println(new BinomialDistributionImpl(1000000, 0.5).inverseCumulativeProbability(0.5));}} This returns 499525, though it should be 499999. I'm not sure how it should be fixed, but the cause is that the cumulativeProbability method returns Infinity, not NaN. As the result the checkedCumulativeProbability method doesn't work as expected. diff --git a/src/main/java/org/apache/commons/math3/util/ContinuedFraction.java b/src/main/java/org/apache/commons/math3/util/ContinuedFraction.java index 35fe916..253f3c5 100644 --- a/src/main/java/org/apache/commons/math3/util/ContinuedFraction.java +++ b/src/main/java/org/apache/commons/math3/util/ContinuedFraction.java @@ -101,19 +101,18 @@ public abstract class ContinuedFraction { * </p> * * <p> - * The implementation of this method is based on equations 14-17 of: + * The implementation of this method is based on the modified Lentz algorithm as described + * on page 18 ff. in: * <ul> * <li> - * Eric W. Weisstein. "Continued Fraction." From MathWorld--A Wolfram Web - * Resource. <a target="_blank" - * href="http://mathworld.wolfram.com/ContinuedFraction.html"> - * http://mathworld.wolfram.com/ContinuedFraction.html</a> + * I. J. Thompson, A. R. Barnett. "Coulomb and Bessel Functions of Complex Arguments and Order." + * <a target="_blank" href="http://www.fresco.org.uk/papers/Thompson-JCP64p490.pdf"> + * http://www.fresco.org.uk/papers/Thompson-JCP64p490.pdf</a> * </li> * </ul> - * The recurrence relationship defined in those equations can result in - * very large intermediate results which can result in numerical overflow. - * As a means to combat these overflow conditions, the intermediate results - * are scaled whenever they threaten to become numerically unstable.</p> + * Note: the implementation uses the terms a<sub>i</sub> and b<sub>i</sub> as defined in + * <a href="http://mathworld.wolfram.com/ContinuedFraction.html">Continued Fraction / MathWorld</a>. + * </p> * * @param x the evaluation point. * @param epsilon maximum error allowed. @@ -122,72 +121,53 @@ public abstract class ContinuedFraction { * @throws ConvergenceException if the algorithm fails to converge. */ public double evaluate(double x, double epsilon, int maxIterations) { - double p0 = 1.0; - double p1 = getA(0, x); - double q0 = 0.0; - double q1 = 1.0; - double c = p1 / q1; - int n = 0; - double relativeError = Double.MAX_VALUE; - while (n < maxIterations && relativeError > epsilon) { - ++n; - double a = getA(n, x); - double b = getB(n, x); - double p2 = a * p1 + b * p0; - double q2 = a * q1 + b * q0; - boolean infinite = false; - if (Double.isInfinite(p2) || Double.isInfinite(q2)) { - /* - * Need to scale. Try successive powers of the larger of a or b - * up to 5th power. Throw ConvergenceException if one or both - * of p2, q2 still overflow. - */ - double scaleFactor = 1d; - double lastScaleFactor = 1d; - final int maxPower = 5; - final double scale = FastMath.max(a,b); - if (scale <= 0) { // Can't scale - throw new ConvergenceException(LocalizedFormats.CONTINUED_FRACTION_INFINITY_DIVERGENCE, - x); - } - infinite = true; - for (int i = 0; i < maxPower; i++) { - lastScaleFactor = scaleFactor; - scaleFactor *= scale; - if (a != 0.0 && a > b) { - p2 = p1 / lastScaleFactor + (b / scaleFactor * p0); - q2 = q1 / lastScaleFactor + (b / scaleFactor * q0); - } else if (b != 0) { - p2 = (a / scaleFactor * p1) + p0 / lastScaleFactor; - q2 = (a / scaleFactor * q1) + q0 / lastScaleFactor; - } - infinite = Double.isInfinite(p2) || Double.isInfinite(q2); - if (!infinite) { - break; - } - } - } + final double small = 1e-50; + double hPrev = getA(0, x); + + // use the value of small as epsilon criteria for zero checks + if (Precision.equals(hPrev, 0.0, small)) { + hPrev = small; + } - if (infinite) { - // Scaling failed - throw new ConvergenceException(LocalizedFormats.CONTINUED_FRACTION_INFINITY_DIVERGENCE, - x); + int n = 1; + double dPrev = 0.0; + double cPrev = hPrev; + double hN = hPrev; + + while (n < maxIterations) { + final double a = getA(n, x); + final double b = getB(n, x); + + double dN = a + b * dPrev; + if (Precision.equals(dN, 0.0, small)) { + dN = small; + } + double cN = a + b / cPrev; + if (Precision.equals(cN, 0.0, small)) { + cN = small; } - double r = p2 / q2; + dN = 1 / dN; + final double deltaN = cN * dN; + hN = hPrev * deltaN; - if (Double.isNaN(r)) { + if (Double.isInfinite(hN)) { + throw new ConvergenceException(LocalizedFormats.CONTINUED_FRACTION_INFINITY_DIVERGENCE, + x); + } + if (Double.isNaN(hN)) { throw new ConvergenceException(LocalizedFormats.CONTINUED_FRACTION_NAN_DIVERGENCE, x); } - relativeError = FastMath.abs(r / c - 1.0); - - // prepare for next iteration - c = p2 / q2; - p0 = p1; - p1 = p2; - q0 = q1; - q1 = q2; + + if (FastMath.abs(deltaN - 1.0) < epsilon) { + break; + } + + dPrev = dN; + cPrev = cN; + hPrev = hN; + n++; } if (n >= maxIterations) { @@ -195,6 +175,7 @@ public abstract class ContinuedFraction { maxIterations, x); } - return c; + return hN; } + }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-718_3a08bfa6.diff
bugs-dot-jar_data_MATH-293_59a0da9c
--- BugID: MATH-293 Summary: Matrix's "OutOfBoundException" in SimplexSolver Description: "Hi all,\nThis bug is somehow related to incident MATH-286, but not necessarily...\n\nLet's say I have an LP and I solve it using SimplexSolver. Then I create a second LP similar to the first one, but with \"stronger\" constraints. The second LP has the following properties:\n* the only point in the feasible region for the second LP is the solution returned for the first LP\n* the solution returned for the first LP is also the (only possible) solution to the second LP\n\nThis shows the problem:\n\n{code:borderStyle=solid}\nLinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 0.8, 0.2, 0.7, 0.3, 0.4, 0.6}, 0 );\nCollection<LinearConstraint> constraints = new ArrayList<LinearConstraint>();\nconstraints.add(new LinearConstraint(new double[] { 1, 0, 1, 0, 1, 0 }, Relationship.EQ, 30.0));\nconstraints.add(new LinearConstraint(new double[] { 0, 1, 0, 1, 0, 1 }, Relationship.EQ, 30.0));\nconstraints.add(new LinearConstraint(new double[] { 0.8, 0.2, 0.0, 0.0, 0.0, 0.0 }, Relationship.GEQ, 10.0));\nconstraints.add(new LinearConstraint(new double[] { 0.0, 0.0, 0.7, 0.3, 0.0, 0.0 }, Relationship.GEQ, 10.0));\nconstraints.add(new LinearConstraint(new double[] { 0.0, 0.0, 0.0, 0.0, 0.4, 0.6 }, Relationship.GEQ, 10.0));\n\nRealPointValuePair solution = new SimplexSolver().optimize(f, constraints, GoalType.MAXIMIZE, true);\n\ndouble valA = 0.8 * solution.getPoint()[0] + 0.2 * solution.getPoint()[1];\ndouble valB = 0.7 * solution.getPoint()[2] + 0.3 * solution.getPoint()[3];\ndouble valC = 0.4 * solution.getPoint()[4] + 0.6 * solution.getPoint()[5];\n\nf = new LinearObjectiveFunction(new double[] { 0.8, 0.2, 0.7, 0.3, 0.4, 0.6}, 0 );\nconstraints = new ArrayList<LinearConstraint>();\nconstraints.add(new LinearConstraint(new double[] { 1, 0, 1, 0, 1, 0 }, Relationship.EQ, 30.0));\nconstraints.add(new LinearConstraint(new double[] { 0, 1, 0, 1, 0, 1 }, Relationship.EQ, 30.0));\nconstraints.add(new LinearConstraint(new double[] { 0.8, 0.2, 0.0, 0.0, 0.0, 0.0 }, Relationship.GEQ, valA));\nconstraints.add(new LinearConstraint(new double[] { 0.0, 0.0, 0.7, 0.3, 0.0, 0.0 }, Relationship.GEQ, valB));\nconstraints.add(new LinearConstraint(new double[] { 0.0, 0.0, 0.0, 0.0, 0.4, 0.6 }, Relationship.GEQ, valC));\n\nsolution = new SimplexSolver().optimize(f, constraints, GoalType.MAXIMIZE, true);\n{code} \n\nInstead of returning the solution, SimplexSolver throws an Exception:\n\n{noformat} Exception in thread \"main\" org.apache.commons.math.linear.MatrixIndexException: no entry at indices (0, 7) in a 6x7 matrix\n\tat org.apache.commons.math.linear.Array2DRowRealMatrix.getEntry(Array2DRowRealMatrix.java:356)\n\tat org.apache.commons.math.optimization.linear.SimplexTableau.getEntry(SimplexTableau.java:408)\n\tat org.apache.commons.math.optimization.linear.SimplexTableau.getBasicRow(SimplexTableau.java:258)\n\tat org.apache.commons.math.optimization.linear.SimplexTableau.getSolution(SimplexTableau.java:336)\n\tat org.apache.commons.math.optimization.linear.SimplexSolver.doOptimize(SimplexSolver.java:182)\n\tat org.apache.commons.math.optimization.linear.AbstractLinearOptimizer.optimize(AbstractLinearOptimizer.java:106){noformat} \n\nI was too optimistic with the bug MATH-286 ;-)" diff --git a/src/main/java/org/apache/commons/math/optimization/linear/SimplexTableau.java b/src/main/java/org/apache/commons/math/optimization/linear/SimplexTableau.java index 1cb45cc..23dc90c 100644 --- a/src/main/java/org/apache/commons/math/optimization/linear/SimplexTableau.java +++ b/src/main/java/org/apache/commons/math/optimization/linear/SimplexTableau.java @@ -74,6 +74,9 @@ class SimplexTableau implements Serializable { /** Whether to restrict the variables to non-negative values. */ private final boolean restrictToNonNegative; + /** The variables each column represents */ + private final List<String> columnLabels = new ArrayList<String>(); + /** Simple tableau. */ private transient RealMatrix tableau; @@ -113,6 +116,27 @@ class SimplexTableau implements Serializable { this.numArtificialVariables = getConstraintTypeCounts(Relationship.EQ) + getConstraintTypeCounts(Relationship.GEQ); this.tableau = createTableau(goalType == GoalType.MAXIMIZE); + initializeColumnLabels(); + } + + protected void initializeColumnLabels() { + if (getNumObjectiveFunctions() == 2) { + columnLabels.add("W"); + } + columnLabels.add("Z"); + for (int i = 0; i < getOriginalNumDecisionVariables(); i++) { + columnLabels.add("x" + i); + } + if (!restrictToNonNegative) { + columnLabels.add("x-"); + } + for (int i = 0; i < getNumSlackVariables(); i++) { + columnLabels.add("s" + i); + } + for (int i = 0; i < getNumArtificialVariables(); i++) { + columnLabels.add("a" + i); + } + columnLabels.add("RHS"); } /** @@ -301,6 +325,10 @@ class SimplexTableau implements Serializable { } } + for (int i = columnsToDrop.size() - 1; i >= 0; i--) { + columnLabels.remove((int) columnsToDrop.get(i)); + } + this.tableau = new Array2DRowRealMatrix(matrix); this.numArtificialVariables = 0; } @@ -332,12 +360,19 @@ class SimplexTableau implements Serializable { * @return current solution */ protected RealPointValuePair getSolution() { - double[] coefficients = new double[getOriginalNumDecisionVariables()]; - Integer negativeVarBasicRow = getBasicRow(getNegativeDecisionVariableOffset()); + int negativeVarColumn = columnLabels.indexOf("x-"); + Integer negativeVarBasicRow = negativeVarColumn > 0 ? getBasicRow(negativeVarColumn) : null; double mostNegative = negativeVarBasicRow == null ? 0 : getEntry(negativeVarBasicRow, getRhsOffset()); + Set<Integer> basicRows = new HashSet<Integer>(); + double[] coefficients = new double[getOriginalNumDecisionVariables()]; for (int i = 0; i < coefficients.length; i++) { - Integer basicRow = getBasicRow(getNumObjectiveFunctions() + i); + int colIndex = columnLabels.indexOf("x" + i); + if (colIndex < 0) { + coefficients[i] = 0; + continue; + } + Integer basicRow = getBasicRow(colIndex); if (basicRows.contains(basicRow)) { // if multiple variables can take a given value // then we choose the first and set the rest equal to 0 @@ -349,7 +384,7 @@ class SimplexTableau implements Serializable { (restrictToNonNegative ? 0 : mostNegative); } } - return new RealPointValuePair(coefficients, f.getValue(coefficients)); + return new RealPointValuePair(coefficients, f.getValue(coefficients)); } /** @@ -443,15 +478,6 @@ class SimplexTableau implements Serializable { } /** - * Returns the offset of the extra decision variable added when there is a - * negative decision variable in the original problem. - * @return the offset of x- - */ - protected final int getNegativeDecisionVariableOffset() { - return getNumObjectiveFunctions() + getOriginalNumDecisionVariables(); - } - - /** * Get the number of decision variables. * <p> * If variables are not restricted to positive values, this will include 1 @@ -471,7 +497,7 @@ class SimplexTableau implements Serializable { * @see #getNumDecisionVariables() */ protected final int getOriginalNumDecisionVariables() { - return restrictToNonNegative ? numDecisionVariables : numDecisionVariables - 1; + return f.getCoefficients().getDimension(); } /** @@ -562,4 +588,5 @@ class SimplexTableau implements Serializable { ois.defaultReadObject(); MatrixUtils.deserializeRealMatrix(this, "tableau", ois); } + }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-293_59a0da9c.diff
bugs-dot-jar_data_MATH-744_8a83581e
--- BugID: MATH-744 Summary: BigFraction.doubleValue() returns Double.NaN for large numerators or denominators Description: |- The current implementation of doubleValue() divides numerator.doubleValue() / denominator.doubleValue(). BigInteger.doubleValue() fails for any number greater than Double.MAX_VALUE. So if the user has 308-digit numerator or denominator, the resulting quotient fails, even in cases where the result would be well inside Double's range. I have a patch to fix it, if I can figure out how to attach it here I will. diff --git a/src/main/java/org/apache/commons/math/fraction/BigFraction.java b/src/main/java/org/apache/commons/math/fraction/BigFraction.java index e93d76d..a6672e4 100644 --- a/src/main/java/org/apache/commons/math/fraction/BigFraction.java +++ b/src/main/java/org/apache/commons/math/fraction/BigFraction.java @@ -682,7 +682,16 @@ public class BigFraction */ @Override public double doubleValue() { - return numerator.doubleValue() / denominator.doubleValue(); + double result = numerator.doubleValue() / denominator.doubleValue(); + if (Double.isNaN(result)) { + // Numerator and/or denominator must be out of range: + // Calculate how far to shift them to put them in range. + int shift = Math.max(numerator.bitLength(), + denominator.bitLength()) - Double.MAX_EXPONENT; + result = numerator.shiftRight(shift).doubleValue() / + denominator.shiftRight(shift).doubleValue(); + } + return result; } /** @@ -726,7 +735,16 @@ public class BigFraction */ @Override public float floatValue() { - return numerator.floatValue() / denominator.floatValue(); + float result = numerator.floatValue() / denominator.floatValue(); + if (Double.isNaN(result)) { + // Numerator and/or denominator must be out of range: + // Calculate how far to shift them to put them in range. + int shift = Math.max(numerator.bitLength(), + denominator.bitLength()) - Float.MAX_EXPONENT; + result = numerator.shiftRight(shift).floatValue() / + denominator.shiftRight(shift).floatValue(); + } + return result; } /**
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-744_8a83581e.diff
bugs-dot-jar_data_MATH-1103_a6f96306
--- BugID: MATH-1103 Summary: Convergence Checker Fixes Description: diff --git a/src/main/java/org/apache/commons/math3/fitting/leastsquares/GaussNewtonOptimizer.java b/src/main/java/org/apache/commons/math3/fitting/leastsquares/GaussNewtonOptimizer.java index 9240856..c17c870 100644 --- a/src/main/java/org/apache/commons/math3/fitting/leastsquares/GaussNewtonOptimizer.java +++ b/src/main/java/org/apache/commons/math3/fitting/leastsquares/GaussNewtonOptimizer.java @@ -197,9 +197,7 @@ public class GaussNewtonOptimizer implements LeastSquaresOptimizer { throw new NullArgumentException(); } - final int nC = lsp.getParameterSize(); - - final RealVector currentPoint = lsp.getStart(); + RealVector currentPoint = lsp.getStart(); // iterate until convergence is reached Evaluation current = null; @@ -227,9 +225,7 @@ public class GaussNewtonOptimizer implements LeastSquaresOptimizer { // solve the linearized least squares problem final RealVector dX = this.decomposition.solve(weightedJacobian, currentResiduals); // update the estimated parameters - for (int i = 0; i < nC; ++i) { - currentPoint.setEntry(i, currentPoint.getEntry(i) + dX.getEntry(i)); - } + currentPoint = currentPoint.add(dX); } } diff --git a/src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresFactory.java b/src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresFactory.java index 0853d03..16ac659 100644 --- a/src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresFactory.java +++ b/src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresFactory.java @@ -332,7 +332,8 @@ public class LeastSquaresFactory { value.getFirst(), value.getSecond(), this.target, - point); + // copy so optimizer can change point without changing our instance + point.copy()); } /** diff --git a/src/main/java/org/apache/commons/math3/fitting/leastsquares/LevenbergMarquardtOptimizer.java b/src/main/java/org/apache/commons/math3/fitting/leastsquares/LevenbergMarquardtOptimizer.java index 1cd4e51..5f0527c 100644 --- a/src/main/java/org/apache/commons/math3/fitting/leastsquares/LevenbergMarquardtOptimizer.java +++ b/src/main/java/org/apache/commons/math3/fitting/leastsquares/LevenbergMarquardtOptimizer.java @@ -328,7 +328,7 @@ public class LevenbergMarquardtOptimizer implements LeastSquaresOptimizer { // Evaluate the function at the starting point and calculate its norm. evaluationCounter.incrementCount(); //value will be reassigned in the loop - Evaluation current = problem.evaluate(new ArrayRealVector(currentPoint, false)); + Evaluation current = problem.evaluate(new ArrayRealVector(currentPoint)); double[] currentResiduals = current.getResiduals().toArray(); double currentCost = current.getCost(); @@ -445,7 +445,7 @@ public class LevenbergMarquardtOptimizer implements LeastSquaresOptimizer { // Evaluate the function at x + p and calculate its norm. evaluationCounter.incrementCount(); - current = problem.evaluate(new ArrayRealVector(currentPoint,false)); + current = problem.evaluate(new ArrayRealVector(currentPoint)); currentResiduals = current.getResiduals().toArray(); currentCost = current.getCost();
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1103_a6f96306.diff
bugs-dot-jar_data_MATH-559_fc409e88
--- BugID: MATH-559 Summary: Remove "assert" from "MathUtils.equals" Description: 'The "assert" in methods "equals(double,double,int)" and "equals(float,float,int)" is not necessary. ' diff --git a/src/main/java/org/apache/commons/math/util/MathUtils.java b/src/main/java/org/apache/commons/math/util/MathUtils.java index 06c87c6..e061a35 100644 --- a/src/main/java/org/apache/commons/math/util/MathUtils.java +++ b/src/main/java/org/apache/commons/math/util/MathUtils.java @@ -515,10 +515,6 @@ public final class MathUtils { * @since 2.2 */ public static boolean equals(float x, float y, int maxUlps) { - // Check that "maxUlps" is non-negative and small enough so that - // NaN won't compare as equal to anything (except another NaN). - assert maxUlps > 0 && maxUlps < NAN_GAP; - int xInt = Float.floatToIntBits(x); int yInt = Float.floatToIntBits(y); @@ -675,10 +671,6 @@ public final class MathUtils { * point values between {@code x} and {@code y}. */ public static boolean equals(double x, double y, int maxUlps) { - // Check that "maxUlps" is non-negative and small enough so that - // NaN won't compare as equal to anything (except another NaN). - assert maxUlps > 0 && maxUlps < NAN_GAP; - long xInt = Double.doubleToLongBits(x); long yInt = Double.doubleToLongBits(y);
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-559_fc409e88.diff
bugs-dot-jar_data_MATH-950_424cbd20
--- BugID: MATH-950 Summary: event state not updated if an unrelated event triggers a RESET_STATE during ODE integration Description: | When an ODE solver manages several different event types, there are some unwanted side effects. If one event handler asks for a RESET_STATE (for integration state) when its eventOccurred method is called, the other event handlers that did not trigger an event in the same step are not updated correctly, due to an early return. As a result, when the next step is processed with a reset integration state, the forgotten event still refer to the start date of the previous state. This implies that when these event handlers will be checked for In some cases, the function defining an event g(double t, double[] y) is called with state parameters y that are completely wrong. In one case when the y array should have contained values between -1 and +1, one function call got values up to 1.0e20. The attached file reproduces the problem. diff --git a/src/main/java/org/apache/commons/math3/ode/AbstractIntegrator.java b/src/main/java/org/apache/commons/math3/ode/AbstractIntegrator.java index 00827db..6e0237f 100644 --- a/src/main/java/org/apache/commons/math3/ode/AbstractIntegrator.java +++ b/src/main/java/org/apache/commons/math3/ode/AbstractIntegrator.java @@ -338,11 +338,15 @@ public abstract class AbstractIntegrator implements FirstOrderIntegrator { interpolator.setSoftPreviousTime(previousT); interpolator.setSoftCurrentTime(eventT); - // trigger the event + // get state at event time interpolator.setInterpolatedTime(eventT); final double[] eventY = interpolator.getInterpolatedState().clone(); - currentEvent.stepAccepted(eventT, eventY); - isLastStep = currentEvent.stop(); + + // advance all event states to current time + for (final EventState state : eventsStates) { + state.stepAccepted(eventT, eventY); + isLastStep = isLastStep || state.stop(); + } // handle the first part of the step, up to the event for (final StepHandler handler : stepHandlers) { @@ -352,21 +356,19 @@ public abstract class AbstractIntegrator implements FirstOrderIntegrator { if (isLastStep) { // the event asked to stop integration System.arraycopy(eventY, 0, y, 0, y.length); - for (final EventState remaining : occuringEvents) { - remaining.stepAccepted(eventT, eventY); - } return eventT; } - if (currentEvent.reset(eventT, eventY)) { + boolean needReset = false; + for (final EventState state : eventsStates) { + needReset = needReset || state.reset(eventT, eventY); + } + if (needReset) { // some event handler has triggered changes that // invalidate the derivatives, we need to recompute them System.arraycopy(eventY, 0, y, 0, y.length); computeDerivatives(eventT, y, yDot); resetOccurred = true; - for (final EventState remaining : occuringEvents) { - remaining.stepAccepted(eventT, eventY); - } return eventT; } @@ -383,6 +385,7 @@ public abstract class AbstractIntegrator implements FirstOrderIntegrator { } + // last part of the step, after the last event interpolator.setInterpolatedTime(currentT); final double[] currentY = interpolator.getInterpolatedState(); for (final EventState state : eventsStates) {
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-950_424cbd20.diff
bugs-dot-jar_data_MATH-309_0596e314
--- BugID: MATH-309 Summary: nextExponential parameter check bug - patch supplied Description: "Index: src/main/java/org/apache/commons/math/random/RandomDataImpl.java\n===================================================================\n--- src/main/java/org/apache/commons/math/random/RandomDataImpl.java\t(revision 830102)\n+++ src/main/java/org/apache/commons/math/random/RandomDataImpl.java\t(working copy)\n@@ -462,7 +462,7 @@\n * @return the random Exponential value\n */\n public double nextExponential(double mean) {\n- if (mean < 0.0) {\n+ if (mean <= 0.0) {\n throw MathRuntimeException.createIllegalArgumentException(\n \ \"mean must be positive ({0})\", mean);\n }" diff --git a/src/main/java/org/apache/commons/math/random/RandomDataImpl.java b/src/main/java/org/apache/commons/math/random/RandomDataImpl.java index d5d2474..a44720f 100644 --- a/src/main/java/org/apache/commons/math/random/RandomDataImpl.java +++ b/src/main/java/org/apache/commons/math/random/RandomDataImpl.java @@ -457,12 +457,11 @@ public class RandomDataImpl implements RandomData, Serializable { * uniform deviates. * </p> * - * @param mean - * the mean of the distribution + * @param mean the mean of the distribution * @return the random Exponential value */ public double nextExponential(double mean) { - if (mean < 0.0) { + if (mean <= 0.0) { throw MathRuntimeException.createIllegalArgumentException( "mean must be positive ({0})", mean); }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-309_0596e314.diff
bugs-dot-jar_data_MATH-369_f4a4464b
--- BugID: MATH-369 Summary: BisectionSolver.solve(final UnivariateRealFunction f, double min, double max, double initial) throws NullPointerException Description: "Method \n\n BisectionSolver.solve(final UnivariateRealFunction f, double min, double max, double initial) \n\ninvokes \n\n BisectionSolver.solve(double min, double max) \n\nwhich throws NullPointerException, as member variable\n\n UnivariateRealSolverImpl.f \n\nis null.\n\nInstead the method:\n\n BisectionSolver.solve(final UnivariateRealFunction f, double min, double max)\n\nshould be called.\n\nSteps to reproduce:\n\ninvoke:\n\n \ new BisectionSolver().solve(someUnivariateFunctionImpl, 0.0, 1.0, 0.5);\n\nNullPointerException will be thrown.\n\n\n" diff --git a/src/main/java/org/apache/commons/math/analysis/solvers/BisectionSolver.java b/src/main/java/org/apache/commons/math/analysis/solvers/BisectionSolver.java index 3f66927..180caef 100644 --- a/src/main/java/org/apache/commons/math/analysis/solvers/BisectionSolver.java +++ b/src/main/java/org/apache/commons/math/analysis/solvers/BisectionSolver.java @@ -69,7 +69,7 @@ public class BisectionSolver extends UnivariateRealSolverImpl { /** {@inheritDoc} */ public double solve(final UnivariateRealFunction f, double min, double max, double initial) throws MaxIterationsExceededException, FunctionEvaluationException { - return solve(min, max); + return solve(f, min, max); } /** {@inheritDoc} */
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-369_f4a4464b.diff
bugs-dot-jar_data_MATH-779_ebadb558
--- BugID: MATH-779 Summary: ListPopulation Iterator allows you to remove chromosomes from the population. Description: Calling the iterator method of ListPopulation returns an iterator of the protected modifiable list. Before returning the iterator we should wrap it in an unmodifiable list. diff --git a/src/main/java/org/apache/commons/math3/genetics/ListPopulation.java b/src/main/java/org/apache/commons/math3/genetics/ListPopulation.java index 8fe3ffe..3f3919c 100644 --- a/src/main/java/org/apache/commons/math3/genetics/ListPopulation.java +++ b/src/main/java/org/apache/commons/math3/genetics/ListPopulation.java @@ -200,11 +200,12 @@ public abstract class ListPopulation implements Population { } /** - * Chromosome list iterator + * Returns an iterator over the unmodifiable list of chromosomes. + * <p>Any call to {@link Iterator#remove()} will result in a {@link UnsupportedOperationException}.</p> * * @return chromosome iterator */ public Iterator<Chromosome> iterator() { - return chromosomes.iterator(); + return getChromosomes().iterator(); } }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-779_ebadb558.diff
bugs-dot-jar_data_MATH-1252_09fe956a
--- BugID: MATH-1252 Summary: ResizableDoubleArray does not work with double array of size 1 Description: | When attempting to create a ResizableDoubleArray with an array of a single value (e.g. {4.0}), the constructor creates an internal array with 16 entries that are all 0.0 Bug looks like it might be on line 414 of ResizableDoubleArray.java: if (data != null && data.length > 1) { diff --git a/src/main/java/org/apache/commons/math4/util/ResizableDoubleArray.java b/src/main/java/org/apache/commons/math4/util/ResizableDoubleArray.java index 6377141..7d9547c 100644 --- a/src/main/java/org/apache/commons/math4/util/ResizableDoubleArray.java +++ b/src/main/java/org/apache/commons/math4/util/ResizableDoubleArray.java @@ -287,7 +287,7 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { numElements = 0; startIndex = 0; - if (data != null && data.length > 1) { + if (data != null && data.length > 0) { addElements(data); } }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1252_09fe956a.diff
bugs-dot-jar_data_MATH-1300_1d635088
--- BugID: MATH-1300 Summary: BitsStreamGenerator#nextBytes(byte[]) is wrong Description: |- Sequential calls to the BitsStreamGenerator#nextBytes(byte[]) must generate the same sequence of bytes, no matter by chunks of which size it was divided. This is also how java.util.Random#nextBytes(byte[]) works. When nextBytes(byte[]) is called with a bytes array of length multiple of 4 it makes one unneeded call to next(int) method. This is wrong and produces an inconsistent behavior of classes like MersenneTwister. I made a new implementation of the BitsStreamGenerator#nextBytes(byte[]) see attached code. diff --git a/src/main/java/org/apache/commons/math4/random/AbstractRandomGenerator.java b/src/main/java/org/apache/commons/math4/random/AbstractRandomGenerator.java index 1b9cead..173f4ee 100644 --- a/src/main/java/org/apache/commons/math4/random/AbstractRandomGenerator.java +++ b/src/main/java/org/apache/commons/math4/random/AbstractRandomGenerator.java @@ -109,16 +109,18 @@ public abstract class AbstractRandomGenerator implements RandomGenerator { public void nextBytes(byte[] bytes) { int bytesOut = 0; while (bytesOut < bytes.length) { - int randInt = nextInt(); - for (int i = 0; i < 3; i++) { - if ( i > 0) { - randInt >>= 8; - } - bytes[bytesOut++] = (byte) randInt; - if (bytesOut == bytes.length) { - return; - } - } + int randInt = nextInt(); + for (int i = 0; i < 3; i++) { + if (i > 0) { + randInt >>= 8; + } + } + if (bytesOut < bytes.length) { + bytes[bytesOut++] = (byte) randInt; + if (bytesOut == bytes.length) { + return; + } + } } } diff --git a/src/main/java/org/apache/commons/math4/random/BitsStreamGenerator.java b/src/main/java/org/apache/commons/math4/random/BitsStreamGenerator.java index 81968e2..7c89b60 100644 --- a/src/main/java/org/apache/commons/math4/random/BitsStreamGenerator.java +++ b/src/main/java/org/apache/commons/math4/random/BitsStreamGenerator.java @@ -82,10 +82,12 @@ public abstract class BitsStreamGenerator bytes[i + 3] = (byte) ((random >> 24) & 0xff); i += 4; } - int random = next(32); - while (i < bytes.length) { - bytes[i++] = (byte) (random & 0xff); - random >>= 8; + if (i < bytes.length) { + int random = next(32); + while (i < bytes.length) { + bytes[i++] = (byte) (random & 0xff); + random >>= 8; + } } }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1300_1d635088.diff
bugs-dot-jar_data_MATH-288_38983e82
--- BugID: MATH-288 Summary: SimplexSolver not working as expected 2 Description: |- SimplexSolver didn't find the optimal solution. Program for Lpsolve: ===================== /* Objective function */ max: 7 a 3 b; /* Constraints */ R1: +3 a -5 c <= 0; R2: +2 a -5 d <= 0; R3: +2 b -5 c <= 0; R4: +3 b -5 d <= 0; R5: +3 a +2 b <= 5; R6: +2 a +3 b <= 5; /* Variable bounds */ a <= 1; b <= 1; ===================== Results(correct): a = 1, b = 1, value = 10 Program for SimplexSolve: ===================== LinearObjectiveFunction kritFcia = new LinearObjectiveFunction(new double[]{7, 3, 0, 0}, 0); Collection<LinearConstraint> podmienky = new ArrayList<LinearConstraint>(); podmienky.add(new LinearConstraint(new double[]{1, 0, 0, 0}, Relationship.LEQ, 1)); podmienky.add(new LinearConstraint(new double[]{0, 1, 0, 0}, Relationship.LEQ, 1)); podmienky.add(new LinearConstraint(new double[]{3, 0, -5, 0}, Relationship.LEQ, 0)); podmienky.add(new LinearConstraint(new double[]{2, 0, 0, -5}, Relationship.LEQ, 0)); podmienky.add(new LinearConstraint(new double[]{0, 2, -5, 0}, Relationship.LEQ, 0)); podmienky.add(new LinearConstraint(new double[]{0, 3, 0, -5}, Relationship.LEQ, 0)); podmienky.add(new LinearConstraint(new double[]{3, 2, 0, 0}, Relationship.LEQ, 5)); podmienky.add(new LinearConstraint(new double[]{2, 3, 0, 0}, Relationship.LEQ, 5)); SimplexSolver solver = new SimplexSolver(); RealPointValuePair result = solver.optimize(kritFcia, podmienky, GoalType.MAXIMIZE, true); ===================== Results(incorrect): a = 1, b = 0.5, value = 8.5 P.S. I used the latest software from the repository (including MATH-286 fix). diff --git a/src/main/java/org/apache/commons/math/optimization/linear/SimplexSolver.java b/src/main/java/org/apache/commons/math/optimization/linear/SimplexSolver.java index 16d3bae..60a1b3a 100644 --- a/src/main/java/org/apache/commons/math/optimization/linear/SimplexSolver.java +++ b/src/main/java/org/apache/commons/math/optimization/linear/SimplexSolver.java @@ -77,9 +77,10 @@ public class SimplexSolver extends AbstractLinearOptimizer { double minRatio = Double.MAX_VALUE; Integer minRatioPos = null; for (int i = tableau.getNumObjectiveFunctions(); i < tableau.getHeight(); i++) { - double rhs = tableau.getEntry(i, tableau.getWidth() - 1); - if (MathUtils.compareTo(tableau.getEntry(i, col), 0, epsilon) >= 0) { - double ratio = rhs / tableau.getEntry(i, col); + final double rhs = tableau.getEntry(i, tableau.getWidth() - 1); + final double entry = tableau.getEntry(i, col); + if (MathUtils.compareTo(entry, 0, epsilon) > 0) { + final double ratio = rhs / entry; if (ratio < minRatio) { minRatio = ratio; minRatioPos = i;
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-288_38983e82.diff
bugs-dot-jar_data_MATH-848_ad252a8c
--- BugID: MATH-848 Summary: EigenDecomposition fails for certain matrices Description: | The Schurtransformation of the following matrix fails, which is a preliminary step for the Eigendecomposition: RealMatrix m = MatrixUtils.DEFAULT_FORMAT.parse("{{0.184944928,-0.0646971046,0.0774755812,-0.0969651755,-0.0692648806,0.3282344352,-0.0177423074,0.206313634},{-0.0742700134,-0.028906303,-0.001726946,-0.0375550146,-0.0487737922,-0.2616837868,-0.0821201295,-0.2530000167},{0.2549910127,0.0995733692,-0.0009718388,0.0149282808,0.1791878897,-0.0823182816,0.0582629256,0.3219545182},{-0.0694747557,-0.1880649148,-0.2740630911,0.0720096468,-0.1800836914,-0.3518996425,0.2486747833,0.6257938167},{0.0536360918,-0.1339297778,0.2241579764,-0.0195327484,-0.0054103808,0.0347564518,0.5120802482,-0.0329902864},{-0.5933332356,-0.2488721082,0.2357173629,0.0177285473,0.0856630593,-0.35671263,-0.1600668126,-0.1010899621},{-0.0514349819,-0.0854319435,0.1125050061,0.006345356,-0.2250000688,-0.220934309,0.1964623477,-0.1512329924},{0.0197395947,-0.1997170581,-0.1425959019,-0.274947791,-0.0969467073,0.060368852,-0.2826905192,0.1794315473}}"); diff --git a/src/main/java/org/apache/commons/math3/linear/SchurTransformer.java b/src/main/java/org/apache/commons/math3/linear/SchurTransformer.java index 584505d..a2ea88e 100644 --- a/src/main/java/org/apache/commons/math3/linear/SchurTransformer.java +++ b/src/main/java/org/apache/commons/math3/linear/SchurTransformer.java @@ -140,69 +140,66 @@ class SchurTransformer { // Outer loop over eigenvalue index int iteration = 0; - int idx = n - 1; - while (idx >= 0) { + int iu = n - 1; + while (iu >= 0) { // Look for single small sub-diagonal element - final int l = findSmallSubDiagonalElement(idx, norm); + final int il = findSmallSubDiagonalElement(iu, norm); // Check for convergence - if (l == idx) { + if (il == iu) { // One root found - matrixT[idx][idx] = matrixT[idx][idx] + shift.exShift; - idx--; + matrixT[iu][iu] = matrixT[iu][iu] + shift.exShift; + iu--; iteration = 0; - } else if (l == idx - 1) { + } else if (il == iu - 1) { // Two roots found - shift.w = matrixT[idx][idx - 1] * matrixT[idx - 1][idx]; - double p = (matrixT[idx - 1][idx - 1] - matrixT[idx][idx]) / 2.0; - double q = p * p + shift.w; - double z = FastMath.sqrt(FastMath.abs(q)); - matrixT[idx][idx] = matrixT[idx][idx] + shift.exShift; - matrixT[idx - 1][idx - 1] = matrixT[idx - 1][idx - 1] + shift.exShift; - shift.x = matrixT[idx][idx]; + double p = (matrixT[iu - 1][iu - 1] - matrixT[iu][iu]) / 2.0; + double q = p * p + matrixT[iu][iu - 1] * matrixT[iu - 1][iu]; + matrixT[iu][iu] += shift.exShift; + matrixT[iu - 1][iu - 1] += shift.exShift; if (q >= 0) { + double z = FastMath.sqrt(FastMath.abs(q)); if (p >= 0) { z = p + z; } else { z = p - z; } - shift.x = matrixT[idx][idx - 1]; - double s = FastMath.abs(shift.x) + FastMath.abs(z); - p = shift.x / s; + final double x = matrixT[iu][iu - 1]; + final double s = FastMath.abs(x) + FastMath.abs(z); + p = x / s; q = z / s; - double r = FastMath.sqrt(p * p + q * q); + final double r = FastMath.sqrt(p * p + q * q); p = p / r; q = q / r; // Row modification - for (int j = idx - 1; j < n; j++) { - z = matrixT[idx - 1][j]; - matrixT[idx - 1][j] = q * z + p * matrixT[idx][j]; - matrixT[idx][j] = q * matrixT[idx][j] - p * z; + for (int j = iu - 1; j < n; j++) { + z = matrixT[iu - 1][j]; + matrixT[iu - 1][j] = q * z + p * matrixT[iu][j]; + matrixT[iu][j] = q * matrixT[iu][j] - p * z; } // Column modification - for (int i = 0; i <= idx; i++) { - z = matrixT[i][idx - 1]; - matrixT[i][idx - 1] = q * z + p * matrixT[i][idx]; - matrixT[i][idx] = q * matrixT[i][idx] - p * z; + for (int i = 0; i <= iu; i++) { + z = matrixT[i][iu - 1]; + matrixT[i][iu - 1] = q * z + p * matrixT[i][iu]; + matrixT[i][iu] = q * matrixT[i][iu] - p * z; } // Accumulate transformations for (int i = 0; i <= n - 1; i++) { - z = matrixP[i][idx - 1]; - matrixP[i][idx - 1] = q * z + p * matrixP[i][idx]; - matrixP[i][idx] = q * matrixP[i][idx] - p * z; + z = matrixP[i][iu - 1]; + matrixP[i][iu - 1] = q * z + p * matrixP[i][iu]; + matrixP[i][iu] = q * matrixP[i][iu] - p * z; } } - idx -= 2; + iu -= 2; iteration = 0; } else { // No convergence yet - - computeShift(l, idx, iteration, shift); + computeShift(il, iu, iteration, shift); // stop transformation after too many iterations if (++iteration > MAX_ITERATIONS) { @@ -210,43 +207,11 @@ class SchurTransformer { MAX_ITERATIONS); } - // Look for two consecutive small sub-diagonal elements - int m = idx - 2; - // the initial houseHolder vector for the QR step final double[] hVec = new double[3]; - while (m >= l) { - double z = matrixT[m][m]; - hVec[2] = shift.x - z; - double s = shift.y - z; - hVec[0] = (hVec[2] * s - shift.w) / matrixT[m + 1][m] + matrixT[m][m + 1]; - hVec[1] = matrixT[m + 1][m + 1] - z - hVec[2] - s; - hVec[2] = matrixT[m + 2][m + 1]; - s = FastMath.abs(hVec[0]) + FastMath.abs(hVec[1]) + FastMath.abs(hVec[2]); - - if (m == l) { - break; - } - - for (int i = 0; i < hVec.length; i++) { - hVec[i] /= s; - } - - final double lhs = FastMath.abs(matrixT[m][m - 1]) * - (FastMath.abs(hVec[1]) + FastMath.abs(hVec[2])); - - final double rhs = FastMath.abs(hVec[0]) * - (FastMath.abs(matrixT[m - 1][m - 1]) + FastMath.abs(z) + - FastMath.abs(matrixT[m + 1][m + 1])); - - if (lhs < epsilon * rhs) { - break; - } - m--; - } - - performDoubleQRStep(l, m, idx, shift, hVec); + final int im = initQRStep(il, iu, shift, hVec); + performDoubleQRStep(il, im, iu, shift, hVec); } } } @@ -278,7 +243,7 @@ class SchurTransformer { int l = startIdx; while (l > 0) { double s = FastMath.abs(matrixT[l - 1][l - 1]) + FastMath.abs(matrixT[l][l]); - if (Precision.equals(s, 0.0, epsilon)) { + if (s == 0.0) { s = norm; } if (FastMath.abs(matrixT[l][l - 1]) < epsilon * s) { @@ -312,8 +277,9 @@ class SchurTransformer { for (int i = 0; i <= idx; i++) { matrixT[i][i] -= shift.x; } - double s = FastMath.abs(matrixT[idx][idx - 1]) + FastMath.abs(matrixT[idx - 1][idx - 2]); - shift.x = shift.y = 0.75 * s; + final double s = FastMath.abs(matrixT[idx][idx - 1]) + FastMath.abs(matrixT[idx - 1][idx - 2]); + shift.x = 0.75 * s; + shift.y = 0.75 * s; shift.w = -0.4375 * s * s; } @@ -321,7 +287,7 @@ class SchurTransformer { if (iteration == 30) { double s = (shift.y - shift.x) / 2.0; s = s * s + shift.w; - if (Precision.compareTo(s, 0.0d, epsilon) > 0) { + if (s > 0.0) { s = FastMath.sqrt(s); if (shift.y < shift.x) { s = -s; @@ -337,15 +303,53 @@ class SchurTransformer { } /** + * Initialize the householder vectors for the QR step. + * + * @param il the index of the small sub-diagonal element + * @param iu the current eigenvalue index + * @param shift shift information holder + * @param hVec the initial houseHolder vector + * @return the start index for the QR step + */ + private int initQRStep(int il, final int iu, final ShiftInfo shift, double[] hVec) { + // Look for two consecutive small sub-diagonal elements + int im = iu - 2; + while (im >= il) { + final double z = matrixT[im][im]; + final double r = shift.x - z; + double s = shift.y - z; + hVec[0] = (r * s - shift.w) / matrixT[im + 1][im] + matrixT[im][im + 1]; + hVec[1] = matrixT[im + 1][im + 1] - z - r - s; + hVec[2] = matrixT[im + 2][im + 1]; + + if (im == il) { + break; + } + + final double lhs = FastMath.abs(matrixT[im][im - 1]) * (FastMath.abs(hVec[1]) + FastMath.abs(hVec[2])); + final double rhs = FastMath.abs(hVec[0]) * (FastMath.abs(matrixT[im - 1][im - 1]) + + FastMath.abs(z) + + FastMath.abs(matrixT[im + 1][im + 1])); + + if (lhs < epsilon * rhs) { + break; + } + im--; + } + + return im; + } + + /** * Perform a double QR step involving rows l:idx and columns m:n * - * @param l the index of the small sub-diagonal element - * @param m the start index for the QR step - * @param idx the current eigenvalue index + * @param il the index of the small sub-diagonal element + * @param im the start index for the QR step + * @param iu the current eigenvalue index * @param shift shift information holder * @param hVec the initial houseHolder vector */ - private void performDoubleQRStep(final int l, final int m, final int idx, + private void performDoubleQRStep(final int il, final int im, final int iu, final ShiftInfo shift, final double[] hVec) { final int n = matrixT.length; @@ -353,9 +357,9 @@ class SchurTransformer { double q = hVec[1]; double r = hVec[2]; - for (int k = m; k <= idx - 1; k++) { - boolean notlast = k != idx - 1; - if (k != m) { + for (int k = im; k <= iu - 1; k++) { + boolean notlast = k != (iu - 1); + if (k != im) { p = matrixT[k][k - 1]; q = matrixT[k + 1][k - 1]; r = notlast ? matrixT[k + 2][k - 1] : 0.0; @@ -366,17 +370,17 @@ class SchurTransformer { r = r / shift.x; } } - if (Precision.equals(shift.x, 0.0, epsilon)) { + if (shift.x == 0.0) { break; } double s = FastMath.sqrt(p * p + q * q + r * r); if (p < 0.0) { s = -s; } - if (!Precision.equals(s, 0.0, epsilon)) { - if (k != m) { + if (s != 0.0) { + if (k != im) { matrixT[k][k - 1] = -s * shift.x; - } else if (l != m) { + } else if (il != im) { matrixT[k][k - 1] = -matrixT[k][k - 1]; } p = p + s; @@ -398,7 +402,7 @@ class SchurTransformer { } // Column modification - for (int i = 0; i <= FastMath.min(idx, k + 3); i++) { + for (int i = 0; i <= FastMath.min(iu, k + 3); i++) { p = shift.x * matrixT[i][k] + shift.y * matrixT[i][k + 1]; if (notlast) { p = p + z * matrixT[i][k + 2]; @@ -423,9 +427,9 @@ class SchurTransformer { } // k loop // clean up pollution due to round-off errors - for (int i = m+2; i <= idx; i++) { + for (int i = im + 2; i <= iu; i++) { matrixT[i][i-2] = 0.0; - if (i > m+2) { + if (i > im + 2) { matrixT[i][i-3] = 0.0; } }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-848_ad252a8c.diff
bugs-dot-jar_data_MATH-1051_bda25b40
--- BugID: MATH-1051 Summary: EigenDecomposition may not converge for certain matrices Description: |- Jama-1.0.3 contains a bugfix for certain matrices where the original code goes into an infinite loop. The commons-math translations would throw a MaxCountExceededException, so fails to compute the eigen decomposition. Port the fix from jama to CM. diff --git a/src/main/java/org/apache/commons/math3/linear/SchurTransformer.java b/src/main/java/org/apache/commons/math3/linear/SchurTransformer.java index a2ea88e..b566de7 100644 --- a/src/main/java/org/apache/commons/math3/linear/SchurTransformer.java +++ b/src/main/java/org/apache/commons/math3/linear/SchurTransformer.java @@ -364,14 +364,12 @@ class SchurTransformer { q = matrixT[k + 1][k - 1]; r = notlast ? matrixT[k + 2][k - 1] : 0.0; shift.x = FastMath.abs(p) + FastMath.abs(q) + FastMath.abs(r); - if (!Precision.equals(shift.x, 0.0, epsilon)) { - p = p / shift.x; - q = q / shift.x; - r = r / shift.x; + if (Precision.equals(shift.x, 0.0, epsilon)) { + continue; } - } - if (shift.x == 0.0) { - break; + p = p / shift.x; + q = q / shift.x; + r = r / shift.x; } double s = FastMath.sqrt(p * p + q * q + r * r); if (p < 0.0) {
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1051_bda25b40.diff
bugs-dot-jar_data_MATH-722_95d15eff
--- BugID: MATH-722 Summary: '[math] Complex Tanh for "big" numbers' Description: | Hi, In Complex.java the tanh is computed with the following formula: tanh(a + bi) = sinh(2a)/(cosh(2a)+cos(2b)) + [sin(2b)/(cosh(2a)+cos(2b))]i The problem that I'm finding is that as soon as "a" is a "big" number, both sinh(2a) and cosh(2a) are infinity and then the method tanh returns in the real part NaN (infinity/infinity) when it should return 1.0. Wouldn't it be appropiate to add something as in the FastMath library??: if (real>20.0){ return createComplex(1.0, 0.0); } if (real<-20.0){ return createComplex(-1.0, 0.0); } Best regards, JBB diff --git a/src/main/java/org/apache/commons/math/complex/Complex.java b/src/main/java/org/apache/commons/math/complex/Complex.java index 3822a89..ed76a29 100644 --- a/src/main/java/org/apache/commons/math/complex/Complex.java +++ b/src/main/java/org/apache/commons/math/complex/Complex.java @@ -993,8 +993,8 @@ public class Complex implements FieldElement<Complex>, Serializable { * </code> * </pre> * where the (real) functions on the right-hand side are - * {@link java.lang.Math#sin}, {@link java.lang.Math#cos}, - * {@link FastMath#cosh} and {@link FastMath#sinh}. + * {@link FastMath#sin}, {@link FastMath#cos}, {@link FastMath#cosh} and + * {@link FastMath#sinh}. * <br/> * Returns {@link Complex#NaN} if either real or imaginary part of the * input argument is {@code NaN}. @@ -1004,8 +1004,8 @@ public class Complex implements FieldElement<Complex>, Serializable { * <pre> * Examples: * <code> - * tan(1 &plusmn; INFINITY i) = 0 + NaN i - * tan(&plusmn;INFINITY + i) = NaN + NaN i + * tan(a &plusmn; INFINITY i) = 0 &plusmn; i + * tan(&plusmn;INFINITY + bi) = NaN + NaN i * tan(&plusmn;INFINITY &plusmn; INFINITY i) = NaN + NaN i * tan(&plusmn;&pi;/2 + 0 i) = &plusmn;INFINITY + NaN i * </code> @@ -1015,9 +1015,15 @@ public class Complex implements FieldElement<Complex>, Serializable { * @since 1.2 */ public Complex tan() { - if (isNaN) { + if (isNaN || Double.isInfinite(real)) { return NaN; } + if (imaginary > 20.0) { + return createComplex(0.0, 1.0); + } + if (imaginary < -20.0) { + return createComplex(0.0, -1.0); + } double real2 = 2.0 * real; double imaginary2 = 2.0 * imaginary; @@ -1038,8 +1044,8 @@ public class Complex implements FieldElement<Complex>, Serializable { * </code> * </pre> * where the (real) functions on the right-hand side are - * {@link java.lang.Math#sin}, {@link java.lang.Math#cos}, - * {@link FastMath#cosh} and {@link FastMath#sinh}. + * {@link FastMath#sin}, {@link FastMath#cos}, {@link FastMath#cosh} and + * {@link FastMath#sinh}. * <br/> * Returns {@link Complex#NaN} if either real or imaginary part of the * input argument is {@code NaN}. @@ -1049,8 +1055,8 @@ public class Complex implements FieldElement<Complex>, Serializable { * <pre> * Examples: * <code> - * tanh(1 &plusmn; INFINITY i) = NaN + NaN i - * tanh(&plusmn;INFINITY + i) = NaN + 0 i + * tanh(a &plusmn; INFINITY i) = NaN + NaN i + * tanh(&plusmn;INFINITY + bi) = &plusmn;1 + 0 i * tanh(&plusmn;INFINITY &plusmn; INFINITY i) = NaN + NaN i * tanh(0 + (&pi;/2)i) = NaN + INFINITY i * </code> @@ -1060,10 +1066,15 @@ public class Complex implements FieldElement<Complex>, Serializable { * @since 1.2 */ public Complex tanh() { - if (isNaN) { + if (isNaN || Double.isInfinite(imaginary)) { return NaN; } - + if (real > 20.0) { + return createComplex(1.0, 0.0); + } + if (real < -20.0) { + return createComplex(-1.0, 0.0); + } double real2 = 2.0 * real; double imaginary2 = 2.0 * imaginary; double d = FastMath.cosh(real2) + FastMath.cos(imaginary2);
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-722_95d15eff.diff
bugs-dot-jar_data_MATH-1080_b285f170
--- BugID: MATH-1080 Summary: The LinearConstraintSet shall return its constraints in a deterministic way Description: |- As previously discussed on the mailinglist, the LinearConstraintSet should return its internally stored LinearConstraints in the same iteration order as they have been provided via its constructor. This ensures that the execution of the same linear problem results in the same results each time it is executed. This is especially important when linear problems are loaded from a file, e.g. mps format, and makes it simpler to debug problems and compare with other solvers which do the same thing. diff --git a/src/main/java/org/apache/commons/math3/optim/linear/LinearConstraintSet.java b/src/main/java/org/apache/commons/math3/optim/linear/LinearConstraintSet.java index cf5279a..b2a1209 100644 --- a/src/main/java/org/apache/commons/math3/optim/linear/LinearConstraintSet.java +++ b/src/main/java/org/apache/commons/math3/optim/linear/LinearConstraintSet.java @@ -16,10 +16,11 @@ */ package org.apache.commons.math3.optim.linear; +import java.util.LinkedHashSet; import java.util.Set; -import java.util.HashSet; import java.util.Collection; import java.util.Collections; + import org.apache.commons.math3.optim.OptimizationData; /** @@ -30,8 +31,7 @@ import org.apache.commons.math3.optim.OptimizationData; */ public class LinearConstraintSet implements OptimizationData { /** Set of constraints. */ - private final Set<LinearConstraint> linearConstraints - = new HashSet<LinearConstraint>(); + private final Set<LinearConstraint> linearConstraints = new LinkedHashSet<LinearConstraint>(); /** * Creates a set containing the given constraints.
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1080_b285f170.diff
bugs-dot-jar_data_MATH-370_495f04bc
--- BugID: MATH-370 Summary: NaN in "equals" methods Description: | In "MathUtils", some "equals" methods will return true if both argument are NaN. Unless I'm mistaken, this contradicts the IEEE standard. If nobody objects, I'm going to make the changes. diff --git a/src/main/java/org/apache/commons/math/util/MathUtils.java b/src/main/java/org/apache/commons/math/util/MathUtils.java index 887dc66..ab0fe33 100644 --- a/src/main/java/org/apache/commons/math/util/MathUtils.java +++ b/src/main/java/org/apache/commons/math/util/MathUtils.java @@ -407,20 +407,14 @@ public final class MathUtils { /** * Returns true iff they are equal as defined by - * {@link #equals(double,double,int) this method}. + * {@link #equals(double,double,int) equals(x, y, 1)}. * * @param x first value * @param y second value * @return {@code true} if the values are equal. - * @deprecated This method considers that {@code NaN == NaN}. In release - * 3.0, the semantics will change in order to comply with IEEE754 where it - * is specified that {@code NaN != NaN}. - * New methods have been added for those cases wher the old semantics is - * useful (see e.g. {@link #equalsIncludingNaN(double,double) - * equalsIncludingNaN}. */ public static boolean equals(double x, double y) { - return (Double.isNaN(x) && Double.isNaN(y)) || x == y; + return equals(x, y, 1); } /** @@ -524,12 +518,6 @@ public final class MathUtils { * @param y second array * @return true if the values are both null or have same dimension * and equal elements. - * @deprecated This method considers that {@code NaN == NaN}. In release - * 3.0, the semantics will change in order to comply with IEEE754 where it - * is specified that {@code NaN != NaN}. - * New methods have been added for those cases wher the old semantics is - * useful (see e.g. {@link #equalsIncludingNaN(double[],double[]) - * equalsIncludingNaN}. */ public static boolean equals(double[] x, double[] y) { if ((x == null) || (y == null)) { @@ -1102,29 +1090,6 @@ public final class MathUtils { } /** - * Get the next machine representable number after a number, moving - * in the direction of another number. - * <p> - * If <code>direction</code> is greater than or equal to<code>d</code>, - * the smallest machine representable number strictly greater than - * <code>d</code> is returned; otherwise the largest representable number - * strictly less than <code>d</code> is returned.</p> - * <p> - * If <code>d</code> is NaN or Infinite, it is returned unchanged.</p> - * - * @param d base number - * @param direction (the only important thing is whether - * direction is greater or smaller than d) - * @return the next machine representable number in the specified direction - * @since 1.2 - * @deprecated as of 2.2, replaced by {@link FastMath#nextAfter(double, double)} - */ - @Deprecated - public static double nextAfter(double d, double direction) { - return FastMath.nextAfter(d, direction); - } - - /** * Scale a number by 2<sup>scaleFactor</sup>. * <p>If <code>d</code> is 0 or NaN or Infinite, it is returned unchanged.</p> * @@ -1318,23 +1283,23 @@ public final class MathUtils { switch (roundingMethod) { case BigDecimal.ROUND_CEILING : if (sign == -1) { - unscaled = FastMath.floor(nextAfter(unscaled, Double.NEGATIVE_INFINITY)); + unscaled = FastMath.floor(FastMath.nextAfter(unscaled, Double.NEGATIVE_INFINITY)); } else { - unscaled = FastMath.ceil(nextAfter(unscaled, Double.POSITIVE_INFINITY)); + unscaled = FastMath.ceil(FastMath.nextAfter(unscaled, Double.POSITIVE_INFINITY)); } break; case BigDecimal.ROUND_DOWN : - unscaled = FastMath.floor(nextAfter(unscaled, Double.NEGATIVE_INFINITY)); + unscaled = FastMath.floor(FastMath.nextAfter(unscaled, Double.NEGATIVE_INFINITY)); break; case BigDecimal.ROUND_FLOOR : if (sign == -1) { - unscaled = FastMath.ceil(nextAfter(unscaled, Double.POSITIVE_INFINITY)); + unscaled = FastMath.ceil(FastMath.nextAfter(unscaled, Double.POSITIVE_INFINITY)); } else { - unscaled = FastMath.floor(nextAfter(unscaled, Double.NEGATIVE_INFINITY)); + unscaled = FastMath.floor(FastMath.nextAfter(unscaled, Double.NEGATIVE_INFINITY)); } break; case BigDecimal.ROUND_HALF_DOWN : { - unscaled = nextAfter(unscaled, Double.NEGATIVE_INFINITY); + unscaled = FastMath.nextAfter(unscaled, Double.NEGATIVE_INFINITY); double fraction = unscaled - FastMath.floor(unscaled); if (fraction > 0.5) { unscaled = FastMath.ceil(unscaled); @@ -1361,7 +1326,7 @@ public final class MathUtils { break; } case BigDecimal.ROUND_HALF_UP : { - unscaled = nextAfter(unscaled, Double.POSITIVE_INFINITY); + unscaled = FastMath.nextAfter(unscaled, Double.POSITIVE_INFINITY); double fraction = unscaled - FastMath.floor(unscaled); if (fraction >= 0.5) { unscaled = FastMath.ceil(unscaled); @@ -1376,7 +1341,7 @@ public final class MathUtils { } break; case BigDecimal.ROUND_UP : - unscaled = FastMath.ceil(nextAfter(unscaled, Double.POSITIVE_INFINITY)); + unscaled = FastMath.ceil(FastMath.nextAfter(unscaled, Double.POSITIVE_INFINITY)); break; default : throw MathRuntimeException.createIllegalArgumentException( @@ -1905,24 +1870,6 @@ public final class MathUtils { } /** - * Checks that the given array is sorted. - * - * @param val Values - * @param dir Order direction (-1 for decreasing, 1 for increasing) - * @param strict Whether the order should be strict - * @throws NonMonotonousSequenceException if the array is not sorted. - * @deprecated as of 2.2 (please use the new {@link #checkOrder(double[],OrderDirection,boolean) - * checkOrder} method). To be removed in 3.0. - */ - public static void checkOrder(double[] val, int dir, boolean strict) { - if (dir > 0) { - checkOrder(val, OrderDirection.INCREASING, strict); - } else { - checkOrder(val, OrderDirection.DECREASING, strict); - } - } - - /** * Returns the Cartesian norm (2-norm), handling both overflow and underflow. * Translation of the minpack enorm subroutine. *
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-370_495f04bc.diff
bugs-dot-jar_data_MATH-934_724795b5
--- BugID: MATH-934 Summary: Complex.ZERO.reciprocal() returns NaN but should return INF. Description: | Complex.ZERO.reciprocal() returns NaN but should return INF. Class: org.apache.commons.math3.complex.Complex; Method: reciprocal() @version $Id: Complex.java 1416643 2012-12-03 19:37:14Z tn $ diff --git a/src/main/java/org/apache/commons/math3/complex/Complex.java b/src/main/java/org/apache/commons/math3/complex/Complex.java index 22b23f2..ac8185b 100644 --- a/src/main/java/org/apache/commons/math3/complex/Complex.java +++ b/src/main/java/org/apache/commons/math3/complex/Complex.java @@ -302,7 +302,7 @@ public class Complex implements FieldElement<Complex>, Serializable { } if (real == 0.0 && imaginary == 0.0) { - return NaN; + return INF; } if (isInfinite) {
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-934_724795b5.diff
bugs-dot-jar_data_MATH-836_d7c0f27e
--- BugID: MATH-836 Summary: Fraction(double, int) constructor strange behaviour Description: "The Fraction constructor Fraction(double, int) takes a double value and a int maximal denominator, and approximates a fraction. When the double value is a large, negative number with many digits in the fractional part, and the maximal denominator is a big, positive integer (in the 100'000s), two distinct bugs can manifest:\n\n1: the constructor returns a positive Fraction. Calling Fraction(-33655.1677817278, 371880) returns the fraction 410517235/243036, which both has the wrong sign, and is far away from the absolute value of the given value\n\n2: the constructor does not manage to reduce the Fraction properly. Calling Fraction(-43979.60679604749, 366081) returns the fraction -1651878166/256677, which should have* been reduced to -24654898/3831.\n\nI have, as of yet, not found a solution. The constructor looks like this:\n\npublic Fraction(double value, int maxDenominator)\n throws FractionConversionException\n {\n this(value, 0, maxDenominator, 100);\n \ }\n\nIncreasing the 100 value (max iterations) does not fix the problem for all cases. Changing the 0-value (the epsilon, maximum allowed error) to something small does not work either, as this breaks the tests in FractionTest. \n\nThe problem is not neccissarily that the algorithm is unable to approximate a fraction correctly. A solution where a FractionConversionException had been thrown in each of these examples would probably be the best solution if an improvement on the approximation algorithm turns out to be hard to find.\n\nThis bug has been found when trying to explore the idea of axiom-based testing (http://bldl.ii.uib.no/testing.html). Attached is a java test class FractionTestByAxiom (junit, goes into org.apache.commons.math3.fraction) which shows these bugs through a simplified approach to this kind of testing, and a text file describing some of the value/maxDenominator combinations which causes one of these failures.\n\n* It is never specified in the documentation that the Fraction class guarantees that completely reduced rational numbers are constructed, but a comment inside the equals method claims that \"since fractions are always in lowest terms, numerators and can be compared directly for equality\", so it seems like this is the intention. " diff --git a/src/main/java/org/apache/commons/math3/fraction/Fraction.java b/src/main/java/org/apache/commons/math3/fraction/Fraction.java index f81ff0a..08d3b95 100644 --- a/src/main/java/org/apache/commons/math3/fraction/Fraction.java +++ b/src/main/java/org/apache/commons/math3/fraction/Fraction.java @@ -178,7 +178,7 @@ public class Fraction long overflow = Integer.MAX_VALUE; double r0 = value; long a0 = (long)FastMath.floor(r0); - if (a0 > overflow) { + if (FastMath.abs(a0) > overflow) { throw new FractionConversionException(value, a0, 1l); } @@ -206,7 +206,7 @@ public class Fraction long a1 = (long)FastMath.floor(r1); p2 = (a1 * p1) + p0; q2 = (a1 * q1) + q0; - if ((p2 > overflow) || (q2 > overflow)) { + if ((FastMath.abs(p2) > overflow) || (FastMath.abs(q2) > overflow)) { throw new FractionConversionException(value, p2, q2); }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-836_d7c0f27e.diff
bugs-dot-jar_data_MATH-631_334c01e6
--- BugID: MATH-631 Summary: '"RegulaFalsiSolver" failure' Description: | The following unit test: {code} @Test public void testBug() { final UnivariateRealFunction f = new UnivariateRealFunction() { @Override public double value(double x) { return Math.exp(x) - Math.pow(Math.PI, 3.0); } }; UnivariateRealSolver solver = new RegulaFalsiSolver(); double root = solver.solve(100, f, 1, 10); } {code} fails with {noformat} illegal state: maximal count (100) exceeded: evaluations {noformat} Using "PegasusSolver", the answer is found after 17 evaluations. diff --git a/src/main/java/org/apache/commons/math/analysis/solvers/BaseSecantSolver.java b/src/main/java/org/apache/commons/math/analysis/solvers/BaseSecantSolver.java index fde7172..b3a23a1 100644 --- a/src/main/java/org/apache/commons/math/analysis/solvers/BaseSecantSolver.java +++ b/src/main/java/org/apache/commons/math/analysis/solvers/BaseSecantSolver.java @@ -169,25 +169,37 @@ public abstract class BaseSecantSolver // Update the bounds with the new approximation. if (f1 * fx < 0) { - // We had [x0..x1]. We update it to [x1, x]. Note that the - // value of x1 has switched to the other bound, thus inverting + // The value of x1 has switched to the other bound, thus inverting // the interval. x0 = x1; f0 = f1; - x1 = x; - f1 = fx; inverted = !inverted; } else { - // We had [x0..x1]. We update it to [x0, x]. - if (method == Method.ILLINOIS) { + switch (method) { + case ILLINOIS: f0 *= 0.5; - } - if (method == Method.PEGASUS) { + break; + case PEGASUS: f0 *= f1 / (f1 + fx); + break; + case REGULA_FALSI: + if (x == x1) { + final double delta = FastMath.max(rtol * FastMath.abs(x1), + atol); + // Update formula cannot make any progress: Update the + // search interval. + x0 = 0.5 * (x0 + x1 - delta); + f0 = computeObjectiveValue(x0); + } + break; + default: + // Should never happen. + throw new MathInternalError(); } - x1 = x; - f1 = fx; } + // Update from [x0, x1] to [x0, x]. + x1 = x; + f1 = fx; // If the function value of the last approximation is too small, // given the function value accuracy, then we can't get closer to
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-631_334c01e6.diff
bugs-dot-jar_data_MATH-1065_996c0c16
--- BugID: MATH-1065 Summary: EnumeratedRealDistribution.inverseCumulativeProbability returns values not in the samples set Description: | The method EnumeratedRealDistribution.inverseCumulativeProbability() sometimes returns values that are not in the initial samples domain... I will attach a test to exploit this bug. diff --git a/src/main/java/org/apache/commons/math3/distribution/EnumeratedRealDistribution.java b/src/main/java/org/apache/commons/math3/distribution/EnumeratedRealDistribution.java index d457434..270764f 100644 --- a/src/main/java/org/apache/commons/math3/distribution/EnumeratedRealDistribution.java +++ b/src/main/java/org/apache/commons/math3/distribution/EnumeratedRealDistribution.java @@ -18,11 +18,13 @@ package org.apache.commons.math3.distribution; import java.util.ArrayList; import java.util.List; + import org.apache.commons.math3.exception.DimensionMismatchException; import org.apache.commons.math3.exception.MathArithmeticException; import org.apache.commons.math3.exception.NotANumberException; import org.apache.commons.math3.exception.NotFiniteNumberException; import org.apache.commons.math3.exception.NotPositiveException; +import org.apache.commons.math3.exception.OutOfRangeException; import org.apache.commons.math3.random.RandomGenerator; import org.apache.commons.math3.random.Well19937c; import org.apache.commons.math3.util.Pair; @@ -138,6 +140,33 @@ public class EnumeratedRealDistribution extends AbstractRealDistribution { /** * {@inheritDoc} + */ + @Override + public double inverseCumulativeProbability(final double p) throws OutOfRangeException { + if (p < 0.0 || p > 1.0) { + throw new OutOfRangeException(p, 0, 1); + } + + double probability = 0; + double x = getSupportLowerBound(); + for (final Pair<Double, Double> sample : innerDistribution.getPmf()) { + if (sample.getValue() == 0.0) { + continue; + } + + probability += sample.getValue(); + x = sample.getKey(); + + if (probability >= p) { + break; + } + } + + return x; + } + + /** + * {@inheritDoc} * * @return {@code sum(singletons[i] * probabilities[i])} */
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1065_996c0c16.diff
bugs-dot-jar_data_MATH-938_73605560
--- BugID: MATH-938 Summary: Line.revert() is imprecise Description: |- Line.revert() only maintains ~10 digits for the direction. This becomes an issue when the line's position is evaluated far from the origin. A simple fix would be to use Vector3D.negate() for the direction. Also, is there a reason why Line is not immutable? It is just comprised of two vectors. diff --git a/src/main/java/org/apache/commons/math3/geometry/euclidean/threed/Line.java b/src/main/java/org/apache/commons/math3/geometry/euclidean/threed/Line.java index 0c39411..7c9a28f 100644 --- a/src/main/java/org/apache/commons/math3/geometry/euclidean/threed/Line.java +++ b/src/main/java/org/apache/commons/math3/geometry/euclidean/threed/Line.java @@ -84,7 +84,9 @@ public class Line implements Embedding<Euclidean3D, Euclidean1D> { * @return a new instance, with reversed direction */ public Line revert() { - return new Line(zero, zero.subtract(direction)); + final Line reverted = new Line(this); + reverted.direction = reverted.direction.negate(); + return reverted; } /** Get the normalized direction vector.
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-938_73605560.diff
bugs-dot-jar_data_MATH-631_c0b49542
--- BugID: MATH-631 Summary: '"RegulaFalsiSolver" failure' Description: | The following unit test: {code} @Test public void testBug() { final UnivariateRealFunction f = new UnivariateRealFunction() { @Override public double value(double x) { return Math.exp(x) - Math.pow(Math.PI, 3.0); } }; UnivariateRealSolver solver = new RegulaFalsiSolver(); double root = solver.solve(100, f, 1, 10); } {code} fails with {noformat} illegal state: maximal count (100) exceeded: evaluations {noformat} Using "PegasusSolver", the answer is found after 17 evaluations. diff --git a/src/main/java/org/apache/commons/math/analysis/solvers/BaseSecantSolver.java b/src/main/java/org/apache/commons/math/analysis/solvers/BaseSecantSolver.java index c781a90..0347525 100644 --- a/src/main/java/org/apache/commons/math/analysis/solvers/BaseSecantSolver.java +++ b/src/main/java/org/apache/commons/math/analysis/solvers/BaseSecantSolver.java @@ -19,6 +19,7 @@ package org.apache.commons.math.analysis.solvers; import org.apache.commons.math.util.FastMath; import org.apache.commons.math.analysis.UnivariateRealFunction; +import org.apache.commons.math.exception.ConvergenceException; import org.apache.commons.math.exception.MathInternalError; /** @@ -61,8 +62,8 @@ public abstract class BaseSecantSolver /** * Construct a solver. * - * @param absoluteAccuracy absolute accuracy - * @param method <em>Secant</em>-based root-finding method to use + * @param absoluteAccuracy Absolute accuracy. + * @param method <em>Secant</em>-based root-finding method to use. */ protected BaseSecantSolver(final double absoluteAccuracy, final Method method) { super(absoluteAccuracy); @@ -73,9 +74,9 @@ public abstract class BaseSecantSolver /** * Construct a solver. * - * @param relativeAccuracy relative accuracy - * @param absoluteAccuracy absolute accuracy - * @param method <em>Secant</em>-based root-finding method to use + * @param relativeAccuracy Relative accuracy. + * @param absoluteAccuracy Absolute accuracy. + * @param method <em>Secant</em>-based root-finding method to use. */ protected BaseSecantSolver(final double relativeAccuracy, final double absoluteAccuracy, @@ -183,7 +184,11 @@ public abstract class BaseSecantSolver f0 *= f1 / (f1 + fx); break; case REGULA_FALSI: - // Nothing. + // Detect early that algorithm is stuck, instead of waiting + // for the maximum number of iterations to be exceeded. + if (x == x1) { + throw new ConvergenceException(); + } break; default: // Should never happen. diff --git a/src/main/java/org/apache/commons/math/analysis/solvers/IllinoisSolver.java b/src/main/java/org/apache/commons/math/analysis/solvers/IllinoisSolver.java index ede0c27..47c340c 100644 --- a/src/main/java/org/apache/commons/math/analysis/solvers/IllinoisSolver.java +++ b/src/main/java/org/apache/commons/math/analysis/solvers/IllinoisSolver.java @@ -26,7 +26,9 @@ package org.apache.commons.math.analysis.solvers; * <p>Like the <em>Regula Falsi</em> method, convergence is guaranteed by * maintaining a bracketed solution. The <em>Illinois</em> method however, * should converge much faster than the original <em>Regula Falsi</em> - * method.</p> + * method. Furthermore, this implementation of the <em>Illinois</em> method + * should not suffer from the same implementation issues as the <em>Regula + * Falsi</em> method, which may fail to convergence in certain cases.</p> * * <p>The <em>Illinois</em> method assumes that the function is continuous, * but not necessarily smooth.</p> @@ -49,7 +51,7 @@ public class IllinoisSolver extends BaseSecantSolver { /** * Construct a solver. * - * @param absoluteAccuracy absolute accuracy + * @param absoluteAccuracy Absolute accuracy. */ public IllinoisSolver(final double absoluteAccuracy) { super(absoluteAccuracy, Method.ILLINOIS); @@ -58,8 +60,8 @@ public class IllinoisSolver extends BaseSecantSolver { /** * Construct a solver. * - * @param relativeAccuracy relative accuracy - * @param absoluteAccuracy absolute accuracy + * @param relativeAccuracy Relative accuracy. + * @param absoluteAccuracy Absolute accuracy. */ public IllinoisSolver(final double relativeAccuracy, final double absoluteAccuracy) { @@ -69,8 +71,8 @@ public class IllinoisSolver extends BaseSecantSolver { /** * Construct a solver. * - * @param relativeAccuracy relative accuracy - * @param absoluteAccuracy absolute accuracy + * @param relativeAccuracy Relative accuracy. + * @param absoluteAccuracy Absolute accuracy. * @param functionValueAccuracy Maximum function value error. */ public IllinoisSolver(final double relativeAccuracy, @@ -78,5 +80,4 @@ public class IllinoisSolver extends BaseSecantSolver { final double functionValueAccuracy) { super(relativeAccuracy, absoluteAccuracy, functionValueAccuracy, Method.PEGASUS); } - } diff --git a/src/main/java/org/apache/commons/math/analysis/solvers/PegasusSolver.java b/src/main/java/org/apache/commons/math/analysis/solvers/PegasusSolver.java index 08fb0c7..457c958 100644 --- a/src/main/java/org/apache/commons/math/analysis/solvers/PegasusSolver.java +++ b/src/main/java/org/apache/commons/math/analysis/solvers/PegasusSolver.java @@ -24,10 +24,13 @@ package org.apache.commons.math.analysis.solvers; * * <p>Like the <em>Regula Falsi</em> method, convergence is guaranteed by * maintaining a bracketed solution. The <em>Pegasus</em> method however, - * should converge much faster than the original <em>Regula Falsi</em> method. - * Furthermore, it should converge faster than the - * {@link IllinoisSolver <em>Illinois</em>} method, another - * <em>Regula Falsi</em>-based method.</p> + * should converge much faster than the original <em>Regula Falsi</em> + * method. Furthermore, this implementation of the <em>Pegasus</em> method + * should not suffer from the same implementation issues as the <em>Regula + * Falsi</em> method, which may fail to convergence in certain cases. Also, + * the <em>Pegasus</em> method should converge faster than the + * {@link IllinoisSolver <em>Illinois</em>} method, another <em>Regula + * Falsi</em>-based method.</p> * * <p>The <em>Pegasus</em> method assumes that the function is continuous, * but not necessarily smooth.</p> @@ -50,7 +53,7 @@ public class PegasusSolver extends BaseSecantSolver { /** * Construct a solver. * - * @param absoluteAccuracy absolute accuracy + * @param absoluteAccuracy Absolute accuracy. */ public PegasusSolver(final double absoluteAccuracy) { super(absoluteAccuracy, Method.PEGASUS); @@ -59,8 +62,8 @@ public class PegasusSolver extends BaseSecantSolver { /** * Construct a solver. * - * @param relativeAccuracy relative accuracy - * @param absoluteAccuracy absolute accuracy + * @param relativeAccuracy Relative accuracy. + * @param absoluteAccuracy Absolute accuracy. */ public PegasusSolver(final double relativeAccuracy, final double absoluteAccuracy) { @@ -70,8 +73,8 @@ public class PegasusSolver extends BaseSecantSolver { /** * Construct a solver. * - * @param relativeAccuracy relative accuracy - * @param absoluteAccuracy absolute accuracy + * @param relativeAccuracy Relative accuracy. + * @param absoluteAccuracy Absolute accuracy. * @param functionValueAccuracy Maximum function value error. */ public PegasusSolver(final double relativeAccuracy, @@ -79,5 +82,4 @@ public class PegasusSolver extends BaseSecantSolver { final double functionValueAccuracy) { super(relativeAccuracy, absoluteAccuracy, functionValueAccuracy, Method.PEGASUS); } - } diff --git a/src/main/java/org/apache/commons/math/analysis/solvers/RegulaFalsiSolver.java b/src/main/java/org/apache/commons/math/analysis/solvers/RegulaFalsiSolver.java index 89c50df..71224a0 100644 --- a/src/main/java/org/apache/commons/math/analysis/solvers/RegulaFalsiSolver.java +++ b/src/main/java/org/apache/commons/math/analysis/solvers/RegulaFalsiSolver.java @@ -17,13 +17,30 @@ package org.apache.commons.math.analysis.solvers; - /** * Implements the <em>Regula Falsi</em> or <em>False position</em> method for * root-finding (approximating a zero of a univariate real function). It is a - * modified {@link SecantSolver <em>Secant</em>} method. Unlike the - * <em>Secant</em> method, convergence is guaranteed by maintaining a - * bracketed solution. + * modified {@link SecantSolver <em>Secant</em>} method. + * + * <p>The <em>Regula Falsi</em> method is included for completeness, for + * testing purposes, for educational purposes, for comparison to other + * algorithms, etc. It is however <strong>not</strong> intended to be used + * for actual problems, as one of the bounds often remains fixed, resulting + * in very slow convergence. Instead, one of the well-known modified + * <em>Regula Falsi</em> algorithms can be used ({@link IllinoisSolver + * <em>Illinois</em>} or {@link PegasusSolver <em>Pegasus</em>}). These two + * algorithms solve the fundamental issues of the original <em>Regula + * Falsi</em> algorithm, and greatly out-performs it for most, if not all, + * (practical) functions. + * + * <p>Unlike the <em>Secant</em> method, the <em>Regula Falsi</em> guarantees + * convergence, by maintaining a bracketed solution. Note however, that due to + * the finite/limited precision of Java's {@link Double double} type, which is + * used in this implementation, the algorithm may get stuck in a situation + * where it no longer makes any progress. Such cases are detected and result + * in a {@code ConvergenceException} exception being thrown. In other words, + * the algorithm theoretically guarantees convergence, but the implementation + * does not.</p> * * <p>The <em>Regula Falsi</em> method assumes that the function is continuous, * but not necessarily smooth.</p> @@ -46,7 +63,7 @@ public class RegulaFalsiSolver extends BaseSecantSolver { /** * Construct a solver. * - * @param absoluteAccuracy absolute accuracy + * @param absoluteAccuracy Absolute accuracy. */ public RegulaFalsiSolver(final double absoluteAccuracy) { super(absoluteAccuracy, Method.REGULA_FALSI); @@ -55,8 +72,8 @@ public class RegulaFalsiSolver extends BaseSecantSolver { /** * Construct a solver. * - * @param relativeAccuracy relative accuracy - * @param absoluteAccuracy absolute accuracy + * @param relativeAccuracy Relative accuracy. + * @param absoluteAccuracy Absolute accuracy. */ public RegulaFalsiSolver(final double relativeAccuracy, final double absoluteAccuracy) { @@ -66,8 +83,8 @@ public class RegulaFalsiSolver extends BaseSecantSolver { /** * Construct a solver. * - * @param relativeAccuracy relative accuracy - * @param absoluteAccuracy absolute accuracy + * @param relativeAccuracy Relative accuracy. + * @param absoluteAccuracy Absolute accuracy. * @param functionValueAccuracy Maximum function value error. */ public RegulaFalsiSolver(final double relativeAccuracy,
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-631_c0b49542.diff
bugs-dot-jar_data_MATH-1121_5a6ccd58
--- BugID: MATH-1121 Summary: Brent optimizer doesn't use the Base optimizer iteration counter Description: |- BrentOptimizer uses "iter" defined in "doOptimize" to count iterations. It should ideally use the iteration counter defined for the BaseOptimizer. diff --git a/src/main/java/org/apache/commons/math3/optim/univariate/BrentOptimizer.java b/src/main/java/org/apache/commons/math3/optim/univariate/BrentOptimizer.java index a5fca86..b788c2c 100644 --- a/src/main/java/org/apache/commons/math3/optim/univariate/BrentOptimizer.java +++ b/src/main/java/org/apache/commons/math3/optim/univariate/BrentOptimizer.java @@ -150,7 +150,6 @@ public class BrentOptimizer extends UnivariateOptimizer { // Best point encountered so far (which is the initial guess). UnivariatePointValuePair best = current; - int iter = 0; while (true) { final double m = 0.5 * (a + b); final double tol1 = relativeThreshold * FastMath.abs(x) + absoluteThreshold; @@ -238,7 +237,7 @@ public class BrentOptimizer extends UnivariateOptimizer { isMinim), isMinim); - if (checker != null && checker.converged(iter, previous, current)) { + if (checker != null && checker.converged(getIterations(), previous, current)) { return best; } @@ -281,7 +280,8 @@ public class BrentOptimizer extends UnivariateOptimizer { isMinim), isMinim); } - ++iter; + + incrementIterationCount(); } }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1121_5a6ccd58.diff
bugs-dot-jar_data_MATH-1165_596ccd59
--- BugID: MATH-1165 Summary: Rare case for updateMembershipMatrix() in FuzzyKMeansClusterer Description: |- The function updateMembershipMatrix() in FuzzyKMeansClusterer assigns the points to the cluster with the highest membership. Consider the following case: If the distance between a point and the cluster center is zero, then we will have a cluster membership of one, and all other membership values will be zero. So the if condition: if (membershipMatrix[i][j] > maxMembership) { maxMembership = membershipMatrix[i][j]; newCluster = j; } will never be true during the for loop and newCluster will remain -1. This will throw an exception because of the line: clusters.get(newCluster) .addPoint(point); Adding the following condition can solve the problem: double d; if (sum == 0) d = 1; else d = 1.0/sum; diff --git a/src/main/java/org/apache/commons/math3/ml/clustering/FuzzyKMeansClusterer.java b/src/main/java/org/apache/commons/math3/ml/clustering/FuzzyKMeansClusterer.java index ed22047..5f89934 100644 --- a/src/main/java/org/apache/commons/math3/ml/clustering/FuzzyKMeansClusterer.java +++ b/src/main/java/org/apache/commons/math3/ml/clustering/FuzzyKMeansClusterer.java @@ -346,18 +346,32 @@ public class FuzzyKMeansClusterer<T extends Clusterable> extends Clusterer<T> { private void updateMembershipMatrix() { for (int i = 0; i < points.size(); i++) { final T point = points.get(i); - double maxMembership = 0.0; + double maxMembership = Double.MIN_VALUE; int newCluster = -1; for (int j = 0; j < clusters.size(); j++) { double sum = 0.0; final double distA = FastMath.abs(distance(point, clusters.get(j).getCenter())); - for (final CentroidCluster<T> c : clusters) { - final double distB = FastMath.abs(distance(point, c.getCenter())); - sum += FastMath.pow(distA / distB, 2.0 / (fuzziness - 1.0)); + if (distA != 0.0) { + for (final CentroidCluster<T> c : clusters) { + final double distB = FastMath.abs(distance(point, c.getCenter())); + if (distB == 0.0) { + sum = Double.POSITIVE_INFINITY; + break; + } + sum += FastMath.pow(distA / distB, 2.0 / (fuzziness - 1.0)); + } } - membershipMatrix[i][j] = 1.0 / sum; + double membership; + if (sum == 0.0) { + membership = 1.0; + } else if (sum == Double.POSITIVE_INFINITY) { + membership = 0.0; + } else { + membership = 1.0 / sum; + } + membershipMatrix[i][j] = membership; if (membershipMatrix[i][j] > maxMembership) { maxMembership = membershipMatrix[i][j];
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1165_596ccd59.diff
bugs-dot-jar_data_MATH-924_b07ecae3
--- BugID: MATH-924 Summary: new multivariate vector optimizers cannot be used with large number of weights Description: |- When using the Weigth class to pass a large number of weights to multivariate vector optimizers, an nxn full matrix is created (and copied) when a n elements vector is used. This exhausts memory when n is large. This happens for example when using curve fitters (even simple curve fitters like polynomial ones for low degree) with large number of points. I encountered this with curve fitting on 41200 points, which created a matrix with 1.7 billion elements. diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/Weight.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/Weight.java index 789bc25..aa3299a 100644 --- a/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/Weight.java +++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/Weight.java @@ -18,7 +18,7 @@ package org.apache.commons.math3.optim.nonlinear.vector; import org.apache.commons.math3.optim.OptimizationData; import org.apache.commons.math3.linear.RealMatrix; -import org.apache.commons.math3.linear.MatrixUtils; +import org.apache.commons.math3.linear.DiagonalMatrix; import org.apache.commons.math3.linear.NonSquareMatrixException; /** @@ -40,10 +40,7 @@ public class Weight implements OptimizationData { */ public Weight(double[] weight) { final int dim = weight.length; - weightMatrix = MatrixUtils.createRealMatrix(dim, dim); - for (int i = 0; i < dim; i++) { - weightMatrix.setEntry(i, i, weight[i]); - } + weightMatrix = new DiagonalMatrix(weight); } /** diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/AbstractLeastSquaresOptimizer.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/AbstractLeastSquaresOptimizer.java index b7bb6f5..ef52943 100644 --- a/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/AbstractLeastSquaresOptimizer.java +++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/AbstractLeastSquaresOptimizer.java @@ -20,6 +20,7 @@ import org.apache.commons.math3.exception.DimensionMismatchException; import org.apache.commons.math3.exception.TooManyEvaluationsException; import org.apache.commons.math3.linear.ArrayRealVector; import org.apache.commons.math3.linear.RealMatrix; +import org.apache.commons.math3.linear.DiagonalMatrix; import org.apache.commons.math3.linear.DecompositionSolver; import org.apache.commons.math3.linear.MatrixUtils; import org.apache.commons.math3.linear.QRDecomposition; @@ -263,7 +264,16 @@ public abstract class AbstractLeastSquaresOptimizer * @return the square-root of the weight matrix. */ private RealMatrix squareRoot(RealMatrix m) { - final EigenDecomposition dec = new EigenDecomposition(m); - return dec.getSquareRoot(); + if (m instanceof DiagonalMatrix) { + final int dim = m.getRowDimension(); + final RealMatrix sqrtM = new DiagonalMatrix(dim); + for (int i = 0; i < dim; i++) { + sqrtM.setEntry(i, i, FastMath.sqrt(m.getEntry(i, i))); + } + return sqrtM; + } else { + final EigenDecomposition dec = new EigenDecomposition(m); + return dec.getSquareRoot(); + } } }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-924_b07ecae3.diff
bugs-dot-jar_data_MATH-1096_19c1c3bb
--- BugID: MATH-1096 Summary: implementation of smallest enclosing ball algorithm sometime fails Description: |- The algorithm for finding the smallest ball is designed in such a way the radius should be strictly increasing at each iteration. In some cases, it is not true and one iteration has a smaller ball. In most cases, there is no consequence, there is just one or two more iterations. However, in rare cases discovered while testing 3D, this generates an infinite loop. Some very short offending cases have already been identified and added to the test suite. These cases are currently deactivated in the main repository while I am already working on them. The test cases are * WelzlEncloser2DTest.testReducingBall * WelzlEncloser2DTest.testLargeSamples * WelzlEncloser3DTest.testInfiniteLoop * WelzlEncloser3DTest.testLargeSamples diff --git a/src/main/java/org/apache/commons/math3/geometry/enclosing/WelzlEncloser.java b/src/main/java/org/apache/commons/math3/geometry/enclosing/WelzlEncloser.java index 1bacb25..12a645f 100644 --- a/src/main/java/org/apache/commons/math3/geometry/enclosing/WelzlEncloser.java +++ b/src/main/java/org/apache/commons/math3/geometry/enclosing/WelzlEncloser.java @@ -89,6 +89,7 @@ public class WelzlEncloser<S extends Space, P extends Point<S>> implements Enclo // select the point farthest to current ball final P farthest = selectFarthest(points, ball); + if (ball.contains(farthest, tolerance)) { // we have found a ball containing all points return ball; @@ -100,7 +101,7 @@ public class WelzlEncloser<S extends Space, P extends Point<S>> implements Enclo EnclosingBall<S, P> savedBall = ball; ball = moveToFrontBall(extreme, extreme.size(), support); if (ball.getRadius() < savedBall.getRadius()) { - // TODO: fix this, it should never happen but it does! + // this should never happen throw new MathInternalError(); } diff --git a/src/main/java/org/apache/commons/math3/geometry/euclidean/threed/SphereGenerator.java b/src/main/java/org/apache/commons/math3/geometry/euclidean/threed/SphereGenerator.java index 03e4450..f5a6b7c 100644 --- a/src/main/java/org/apache/commons/math3/geometry/euclidean/threed/SphereGenerator.java +++ b/src/main/java/org/apache/commons/math3/geometry/euclidean/threed/SphereGenerator.java @@ -19,12 +19,13 @@ package org.apache.commons.math3.geometry.euclidean.threed; import java.util.Arrays; import java.util.List; +import org.apache.commons.math3.fraction.BigFraction; import org.apache.commons.math3.geometry.enclosing.EnclosingBall; import org.apache.commons.math3.geometry.enclosing.SupportBallGenerator; import org.apache.commons.math3.geometry.euclidean.twod.DiskGenerator; import org.apache.commons.math3.geometry.euclidean.twod.Euclidean2D; import org.apache.commons.math3.geometry.euclidean.twod.Vector2D; -import org.apache.commons.math3.util.MathArrays; +import org.apache.commons.math3.util.FastMath; /** Class generating an enclosing ball from its support points. * @version $Id$ @@ -88,24 +89,39 @@ public class SphereGenerator implements SupportBallGenerator<Euclidean3D, Vector // z_0 = +m_14 / (2 m_11) // Note that the minors m_11, m_12, m_13 and m_14 all have the last column // filled with 1.0, hence simplifying the computation - final double[] c1 = new double[] { - vA.getNormSq(), vB.getNormSq(), vC.getNormSq(), vD.getNormSq() + final BigFraction[] c2 = new BigFraction[] { + new BigFraction(vA.getX()), new BigFraction(vB.getX()), + new BigFraction(vC.getX()), new BigFraction(vD.getX()) }; - final double[] c2 = new double[] { - vA.getX(), vB.getX(), vC.getX(), vD.getX() + final BigFraction[] c3 = new BigFraction[] { + new BigFraction(vA.getY()), new BigFraction(vB.getY()), + new BigFraction(vC.getY()), new BigFraction(vD.getY()) }; - final double[] c3 = new double[] { - vA.getY(), vB.getY(), vC.getY(), vD.getY() + final BigFraction[] c4 = new BigFraction[] { + new BigFraction(vA.getZ()), new BigFraction(vB.getZ()), + new BigFraction(vC.getZ()), new BigFraction(vD.getZ()) }; - final double[] c4 = new double[] { - vA.getZ(), vB.getZ(), vC.getZ(), vD.getZ() + final BigFraction[] c1 = new BigFraction[] { + c2[0].multiply(c2[0]).add(c3[0].multiply(c3[0])).add(c4[0].multiply(c4[0])), + c2[1].multiply(c2[1]).add(c3[1].multiply(c3[1])).add(c4[1].multiply(c4[1])), + c2[2].multiply(c2[2]).add(c3[2].multiply(c3[2])).add(c4[2].multiply(c4[2])), + c2[3].multiply(c2[3]).add(c3[3].multiply(c3[3])).add(c4[3].multiply(c4[3])) }; - final double m11 = minor(c2, c3, c4); - final double m12 = minor(c1, c3, c4); - final double m13 = minor(c1, c2, c4); - final double m14 = minor(c1, c2, c3); - final Vector3D center = new Vector3D(0.5 * m12 / m11, -0.5 * m13 / m11, 0.5 * m14 / m11); - return new EnclosingBall<Euclidean3D, Vector3D>(center, center.distance(vA), + final BigFraction twoM11 = minor(c2, c3, c4).multiply(2); + final BigFraction m12 = minor(c1, c3, c4); + final BigFraction m13 = minor(c1, c2, c4); + final BigFraction m14 = minor(c1, c2, c3); + final BigFraction centerX = m12.divide(twoM11); + final BigFraction centerY = m13.divide(twoM11).negate(); + final BigFraction centerZ = m14.divide(twoM11); + final BigFraction dx = c2[0].subtract(centerX); + final BigFraction dy = c3[0].subtract(centerY); + final BigFraction dz = c4[0].subtract(centerZ); + final BigFraction r2 = dx.multiply(dx).add(dy.multiply(dy)).add(dz.multiply(dz)); + return new EnclosingBall<Euclidean3D, Vector3D>(new Vector3D(centerX.doubleValue(), + centerY.doubleValue(), + centerZ.doubleValue()), + FastMath.sqrt(r2.doubleValue()), vA, vB, vC, vD); } } @@ -114,41 +130,24 @@ public class SphereGenerator implements SupportBallGenerator<Euclidean3D, Vector } /** Compute a dimension 4 minor, when 4<sup>th</sup> column is known to be filled with 1.0. - * <p> - * The computation is performed using {@link MathArrays#linearCombination(double[], double[]) - * high accuracy sum of products}, trying to avoid cancellations effect. This should reduce - * risks in case of near co-planar points. - * </p> * @param c1 first column * @param c2 second column * @param c3 third column - * @return value of the minor computed to high accuracy + * @return value of the minor computed has an exact fraction */ - private double minor(final double[] c1, final double[] c2, final double[] c3) { - final double m01 = c2[0] * c3[1]; - final double m02 = c2[0] * c3[2]; - final double m03 = c2[0] * c3[3]; - final double m10 = c2[1] * c3[0]; - final double m12 = c2[1] * c3[2]; - final double m13 = c2[1] * c3[3]; - final double m20 = c2[2] * c3[0]; - final double m21 = c2[2] * c3[1]; - final double m23 = c2[2] * c3[3]; - final double m30 = c2[3] * c3[0]; - final double m31 = c2[3] * c3[1]; - final double m32 = c2[3] * c3[2]; - return MathArrays.linearCombination(new double[] { - c1[2], c1[1], c1[3], -c1[1], -c1[3], -c1[2], - c1[0], c1[3], c1[2], -c1[3], -c1[0], -c1[2], - c1[1], c1[0], c1[3], -c1[0], -c1[3], -c1[1], - c1[0], c1[2], c1[1], -c1[2], -c1[0], -c1[1] - }, - new double[] { - m13, m32, m21, m23, m12, m31, - m23, m02, m30, m20, m32, m03, - m03, m31, m10, m13, m01, m30, - m12, m01, m20, m10, m21, m02 - }); + private BigFraction minor(final BigFraction[] c1, final BigFraction[] c2, final BigFraction[] c3) { + return c2[0].multiply(c3[1]).multiply(c1[2].subtract(c1[3])). + add(c2[0].multiply(c3[2]).multiply(c1[3].subtract(c1[1]))). + add(c2[0].multiply(c3[3]).multiply(c1[1].subtract(c1[2]))). + add(c2[1].multiply(c3[0]).multiply(c1[3].subtract(c1[2]))). + add(c2[1].multiply(c3[2]).multiply(c1[0].subtract(c1[3]))). + add(c2[1].multiply(c3[3]).multiply(c1[2].subtract(c1[0]))). + add(c2[2].multiply(c3[0]).multiply(c1[1].subtract(c1[3]))). + add(c2[2].multiply(c3[1]).multiply(c1[3].subtract(c1[0]))). + add(c2[2].multiply(c3[3]).multiply(c1[0].subtract(c1[1]))). + add(c2[3].multiply(c3[0]).multiply(c1[2].subtract(c1[1]))). + add(c2[3].multiply(c3[1]).multiply(c1[0].subtract(c1[2]))). + add(c2[3].multiply(c3[2]).multiply(c1[1].subtract(c1[0]))); } } diff --git a/src/main/java/org/apache/commons/math3/geometry/euclidean/twod/DiskGenerator.java b/src/main/java/org/apache/commons/math3/geometry/euclidean/twod/DiskGenerator.java index 514df14..d06c013 100644 --- a/src/main/java/org/apache/commons/math3/geometry/euclidean/twod/DiskGenerator.java +++ b/src/main/java/org/apache/commons/math3/geometry/euclidean/twod/DiskGenerator.java @@ -18,9 +18,10 @@ package org.apache.commons.math3.geometry.euclidean.twod; import java.util.List; +import org.apache.commons.math3.fraction.BigFraction; import org.apache.commons.math3.geometry.enclosing.EnclosingBall; import org.apache.commons.math3.geometry.enclosing.SupportBallGenerator; -import org.apache.commons.math3.util.MathArrays; +import org.apache.commons.math3.util.FastMath; /** Class generating an enclosing ball from its support points. * @version $Id$ @@ -66,42 +67,43 @@ public class DiskGenerator implements SupportBallGenerator<Euclidean2D, Vector2D // y_0 = -m_13 / (2 m_11) // Note that the minors m_11, m_12 and m_13 all have the last column // filled with 1.0, hence simplifying the computation - final double[] c1 = new double[] { - vA.getNormSq(), vB.getNormSq(), vC.getNormSq() + final BigFraction[] c2 = new BigFraction[] { + new BigFraction(vA.getX()), new BigFraction(vB.getX()), new BigFraction(vC.getX()) }; - final double[] c2 = new double[] { - vA.getX(), vB.getX(), vC.getX() + final BigFraction[] c3 = new BigFraction[] { + new BigFraction(vA.getY()), new BigFraction(vB.getY()), new BigFraction(vC.getY()) }; - final double[] c3 = new double[] { - vA.getY(), vB.getY(), vC.getY() + final BigFraction[] c1 = new BigFraction[] { + c2[0].multiply(c2[0]).add(c3[0].multiply(c3[0])), + c2[1].multiply(c2[1]).add(c3[1].multiply(c3[1])), + c2[2].multiply(c2[2]).add(c3[2].multiply(c3[2])) }; - final double m11 = minor(c2, c3); - final double m12 = minor(c1, c3); - final double m13 = minor(c1, c2); - final Vector2D center = new Vector2D(0.5 * m12 / m11, -0.5 * m13 / m11); - return new EnclosingBall<Euclidean2D, Vector2D>(center, center.distance(vA), vA, vB, vC); + final BigFraction twoM11 = minor(c2, c3).multiply(2); + final BigFraction m12 = minor(c1, c3); + final BigFraction m13 = minor(c1, c2); + final BigFraction centerX = m12.divide(twoM11); + final BigFraction centerY = m13.divide(twoM11).negate(); + final BigFraction dx = c2[0].subtract(centerX); + final BigFraction dy = c3[0].subtract(centerY); + final BigFraction r2 = dx.multiply(dx).add(dy.multiply(dy)); + return new EnclosingBall<Euclidean2D, Vector2D>(new Vector2D(centerX.doubleValue(), + centerY.doubleValue()), + FastMath.sqrt(r2.doubleValue()), + vA, vB, vC); } } } } /** Compute a dimension 3 minor, when 3<sup>d</sup> column is known to be filled with 1.0. - * <p> - * The computation is performed using {@link MathArrays#linearCombination(double[], double[]) - * high accuracy sum of products}, trying to avoid cancellations effect. This should reduce - * risks in case of near co-planar points. - * </p> * @param c1 first column * @param c2 second column - * @return value of the minor computed to high accuracy + * @return value of the minor computed has an exact fraction */ - private double minor(final double[] c1, final double[] c2) { - return MathArrays.linearCombination(new double[] { - c1[0], c1[2], c1[1], -c1[2], -c1[0], -c1[1] - }, - new double[] { - c2[1], c2[0], c2[2], c2[1], c2[2], c2[0] - }); + private BigFraction minor(final BigFraction[] c1, final BigFraction[] c2) { + return c2[0].multiply(c1[2].subtract(c1[1])). + add(c2[1].multiply(c1[0].subtract(c1[2]))). + add(c2[2].multiply(c1[1].subtract(c1[0]))); } }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1096_19c1c3bb.diff
bugs-dot-jar_data_MATH-695_7980a242
--- BugID: MATH-695 Summary: Incomplete reinitialization with some events handling Description: |- I get a bug with event handling: I track 2 events that occur in the same step, when the first one is accepted, it resets the state but the reinitialization is not complete and the second one becomes unable to find its way. I can't give my context, which is rather large, but I tried a patch that works for me, unfortunately it breaks the unit tests. diff --git a/src/main/java/org/apache/commons/math/ode/AbstractIntegrator.java b/src/main/java/org/apache/commons/math/ode/AbstractIntegrator.java index 2d878b1..8f315e3 100644 --- a/src/main/java/org/apache/commons/math/ode/AbstractIntegrator.java +++ b/src/main/java/org/apache/commons/math/ode/AbstractIntegrator.java @@ -40,7 +40,6 @@ import org.apache.commons.math.ode.sampling.AbstractStepInterpolator; import org.apache.commons.math.ode.sampling.StepHandler; import org.apache.commons.math.util.FastMath; import org.apache.commons.math.util.Incrementor; -import org.apache.commons.math.util.MathUtils; import org.apache.commons.math.util.Precision; /** @@ -278,7 +277,6 @@ public abstract class AbstractIntegrator implements FirstOrderIntegrator { double previousT = interpolator.getGlobalPreviousTime(); final double currentT = interpolator.getGlobalCurrentTime(); - resetOccurred = false; // initialize the events states if needed if (! statesInitialized) { @@ -332,6 +330,9 @@ public abstract class AbstractIntegrator implements FirstOrderIntegrator { if (isLastStep) { // the event asked to stop integration System.arraycopy(eventY, 0, y, 0, y.length); + for (final EventState remaining : occuringEvents) { + remaining.stepAccepted(eventT, eventY); + } return eventT; } @@ -341,6 +342,9 @@ public abstract class AbstractIntegrator implements FirstOrderIntegrator { System.arraycopy(eventY, 0, y, 0, y.length); computeDerivatives(eventT, y, yDot); resetOccurred = true; + for (final EventState remaining : occuringEvents) { + remaining.stepAccepted(eventT, eventY); + } return eventT; }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-695_7980a242.diff
bugs-dot-jar_data_MATH-393_d4b02f6a
--- BugID: MATH-393 Summary: Method "getResult()" in "MultiStartUnivariateRealOptimizer" Description: "In \"MultiStartUnivariateRealOptimizer\" (package \"optimization\"), the method \"getResult\" returns the result of the last run of the \"underlying\" optimizer; this last result might not be the best one, in which case it will not correspond to the value returned by the \"optimize\" method. This is confusing and does not seem very useful. I think that \"getResult\" should be defined as\n{code} \npublic double getResult() {\n return optima[0];\n}\n{code}\nand similarly\n{code}\npublic double getFunctionValue() {\n return optimaValues[0];\n}\n{code}\n" diff --git a/src/main/java/org/apache/commons/math/optimization/MultiStartUnivariateRealOptimizer.java b/src/main/java/org/apache/commons/math/optimization/MultiStartUnivariateRealOptimizer.java index 10bc9e6..26fd5e0 100644 --- a/src/main/java/org/apache/commons/math/optimization/MultiStartUnivariateRealOptimizer.java +++ b/src/main/java/org/apache/commons/math/optimization/MultiStartUnivariateRealOptimizer.java @@ -89,12 +89,12 @@ public class MultiStartUnivariateRealOptimizer implements UnivariateRealOptimize /** {@inheritDoc} */ public double getFunctionValue() { - return optimizer.getFunctionValue(); + return optimaValues[0]; } /** {@inheritDoc} */ public double getResult() { - return optimizer.getResult(); + return optima[0]; } /** {@inheritDoc} */ @@ -315,5 +315,4 @@ public class MultiStartUnivariateRealOptimizer implements UnivariateRealOptimize throws ConvergenceException, FunctionEvaluationException { return optimize(f, goalType, min, max); } - } diff --git a/src/main/java/org/apache/commons/math/optimization/UnivariateRealOptimizer.java b/src/main/java/org/apache/commons/math/optimization/UnivariateRealOptimizer.java index bf406ab..07d8c4f 100644 --- a/src/main/java/org/apache/commons/math/optimization/UnivariateRealOptimizer.java +++ b/src/main/java/org/apache/commons/math/optimization/UnivariateRealOptimizer.java @@ -36,7 +36,7 @@ public interface UnivariateRealOptimizer extends ConvergingAlgorithm { void setMaxEvaluations(int maxEvaluations); /** Get the maximal number of functions evaluations. - * @return maximal number of functions evaluations + * @return the maximal number of functions evaluations. */ int getMaxEvaluations(); @@ -46,7 +46,7 @@ public interface UnivariateRealOptimizer extends ConvergingAlgorithm { * {@link #optimize(UnivariateRealFunction, GoalType, double, double) optimize} * method. It is 0 if the method has not been called yet. * </p> - * @return number of evaluations of the objective function + * @return the number of evaluations of the objective function. */ int getEvaluations(); @@ -57,16 +57,16 @@ public interface UnivariateRealOptimizer extends ConvergingAlgorithm { * </p> * @param f the function to optimize. * @param goalType type of optimization goal: either {@link GoalType#MAXIMIZE} - * or {@link GoalType#MINIMIZE} + * or {@link GoalType#MINIMIZE}. * @param min the lower bound for the interval. * @param max the upper bound for the interval. - * @return a value where the function is optimum + * @return a value where the function is optimum. * @throws ConvergenceException if the maximum iteration count is exceeded * or the optimizer detects convergence problems otherwise. * @throws FunctionEvaluationException if an error occurs evaluating the - * function + * function. * @throws IllegalArgumentException if min > max or the endpoints do not - * satisfy the requirements specified by the optimizer + * satisfy the requirements specified by the optimizer. */ double optimize(UnivariateRealFunction f, GoalType goalType, double min, double max) @@ -79,17 +79,17 @@ public interface UnivariateRealOptimizer extends ConvergingAlgorithm { * </p> * @param f the function to optimize. * @param goalType type of optimization goal: either {@link GoalType#MAXIMIZE} - * or {@link GoalType#MINIMIZE} + * or {@link GoalType#MINIMIZE}. * @param min the lower bound for the interval. * @param max the upper bound for the interval. - * @param startValue the start value to use - * @return a value where the function is optimum + * @param startValue the start value to use. + * @return a value where the function is optimum. * @throws ConvergenceException if the maximum iteration count is exceeded * or the optimizer detects convergence problems otherwise. * @throws FunctionEvaluationException if an error occurs evaluating the - * function + * function. * @throws IllegalArgumentException if min > max or the arguments do not - * satisfy the requirements specified by the optimizer + * satisfy the requirements specified by the optimizer. */ double optimize(UnivariateRealFunction f, GoalType goalType, double min, double max, double startValue) @@ -98,7 +98,7 @@ public interface UnivariateRealOptimizer extends ConvergingAlgorithm { /** * Get the result of the last run of the optimizer. * - * @return the last result. + * @return the optimum. * @throws IllegalStateException if there is no result available, either * because no result was yet computed or the last attempt failed. */ @@ -107,10 +107,9 @@ public interface UnivariateRealOptimizer extends ConvergingAlgorithm { /** * Get the result of the last run of the optimizer. * - * @return the value of the function at the last result. + * @return the value of the function at the optimum. * @throws IllegalStateException if there is no result available, either * because no result was yet computed or the last attempt failed. */ double getFunctionValue(); - }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-393_d4b02f6a.diff
bugs-dot-jar_data_MATH-1203_b148046a
--- BugID: MATH-1203 Summary: getKernel fails for buckets with only multiple instances of the same value in random.EmpiricalDistribution Description: |- After loading a set of values into an EmpericalDistribution, assume that there's a case where a single bin ONLY contains multiple instances of the same value. In this case the standard deviation will equal zero. This will fail when getKernel attempts to create a NormalDistribution. The other case where stddev=0 is when there is only a single value in the bin, and this is handled by returning a ConstantRealDistribution rather than a NormalDistrbution. See: https://issues.apache.org/jira/browse/MATH-984 diff --git a/src/main/java/org/apache/commons/math4/random/EmpiricalDistribution.java b/src/main/java/org/apache/commons/math4/random/EmpiricalDistribution.java index 3b3a864..61bb65b 100644 --- a/src/main/java/org/apache/commons/math4/random/EmpiricalDistribution.java +++ b/src/main/java/org/apache/commons/math4/random/EmpiricalDistribution.java @@ -593,7 +593,9 @@ public class EmpiricalDistribution extends AbstractRealDistribution { * <li>Compute K(B) = the probability mass of B with respect to the within-bin kernel * and K(B-) = the kernel distribution evaluated at the lower endpoint of B</li> * <li>Return P(B-) + P(B) * [K(x) - K(B-)] / K(B) where - * K(x) is the within-bin kernel distribution function evaluated at x.</li></ol></p> + * K(x) is the within-bin kernel distribution function evaluated at x.</li></ol> + * If K is a constant distribution, we return P(B-) + P(B) (counting the full + * mass of B).</p> * * @since 3.1 */ @@ -606,10 +608,13 @@ public class EmpiricalDistribution extends AbstractRealDistribution { final int binIndex = findBin(x); final double pBminus = pBminus(binIndex); final double pB = pB(binIndex); + final RealDistribution kernel = k(x); + if (kernel instanceof ConstantRealDistribution) { + return pBminus + pB; + } final double[] binBounds = getUpperBounds(); final double kB = kB(binIndex); final double lower = binIndex == 0 ? min : binBounds[binIndex - 1]; - final RealDistribution kernel = k(x); final double withinBinCum = (kernel.cumulativeProbability(x) - kernel.cumulativeProbability(lower)) / kB; return pBminus + pB * withinBinCum;
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1203_b148046a.diff
bugs-dot-jar_data_MATH-1226_c44bfe00
--- BugID: MATH-1226 Summary: Exception thrown in ode for a pair of close events Description: |- When two discrete events occur closer to each other than the convergence threshold used for locating them, this sometimes triggers a NumberIsTooLargeException. The exception happens because the EventState class think the second event is simply a numerical artifact (a repetition of the already triggerred first event) and tries to skip past it. If there are no other event in the same step later on, one interval boundary finally reach step end and the interval bounds are reversed. diff --git a/src/main/java/org/apache/commons/math4/ode/events/EventState.java b/src/main/java/org/apache/commons/math4/ode/events/EventState.java index fe3039a..1908440 100644 --- a/src/main/java/org/apache/commons/math4/ode/events/EventState.java +++ b/src/main/java/org/apache/commons/math4/ode/events/EventState.java @@ -296,7 +296,18 @@ public class EventState { ta = forward ? ta + convergence : ta - convergence; ga = f.value(ta); } while ((g0Positive ^ (ga >= 0)) && (forward ^ (ta >= tb))); - --i; + + if (forward ^ (ta >= tb)) { + // we were able to skip this spurious root + --i; + } else { + // we can't avoid this root before the end of the step, + // we have to handle it despite it is close to the former one + // maybe we have two very close roots + pendingEventTime = root; + pendingEvent = true; + return true; + } } else if (Double.isNaN(previousEventTime) || (FastMath.abs(previousEventTime - root) > convergence)) { pendingEventTime = root;
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1226_c44bfe00.diff
bugs-dot-jar_data_MATH-880_2a9cbbab
--- BugID: MATH-880 Summary: Polygon difference produces erronious results in some cases Description: "The 2D polygon difference method is returning incorrect\nresults. Below is a test case of subtracting two polygons (Sorry,\nthis is the simplest case that I could find that duplicates the\nproblem). \n\nThere are three problems with the result. The first is that the first\npoint of the first set of vertices is null (and the first point of the\nsecond set is also null). The second is that, even if the first null\npoints are ignored, the returned polygon is not the correct result.\nThe first and last points are way off, and the remaining points do not\nmatch the original polygon boundaries. Additionally, there are two\nholes that are returned in the results. This subtraction case should\nnot have holes.\n\n{code:title=\"Complex Polygon Difference Test\"}\npublic void testComplexDifference() {\n Vector2D[][] vertices1 = new Vector2D[][] {\n new Vector2D[] {\n new Vector2D( 90.08714908223715, 38.370299337260235),\n new Vector2D( 90.08709517675004, 38.3702895991413),\n new Vector2D( 90.08401538704919, \ 38.368849330127944),\n new Vector2D( 90.08258210430711, 38.367634558585564),\n \ new Vector2D( 90.08251455106665, 38.36763409247078),\n new Vector2D( 90.08106599752608, 38.36761621664249),\n new Vector2D( 90.08249585300035, 38.36753627557965),\n new Vector2D( 90.09075743352184, \ 38.35914647644972),\n new Vector2D( 90.09099945896571, 38.35896264724079),\n \ new Vector2D( 90.09269383800086, 38.34595756121246),\n new Vector2D( 90.09638631543191, 38.3457988093121),\n new Vector2D( 90.09666417351019, 38.34523360999418),\n new Vector2D( 90.1297082145872, \ 38.337670454923625),\n new Vector2D( 90.12971687748956, 38.337669827794684),\n \ new Vector2D( 90.1240820219179, 38.34328502001131),\n new Vector2D( 90.13084259656404, 38.34017811765017),\n new Vector2D( 90.13378567942857, 38.33860579180606),\n new Vector2D( 90.13519557833206, \ 38.33621054663689),\n new Vector2D( 90.13545616732307, 38.33614965452864),\n \ new Vector2D( 90.13553111202748, 38.33613962818305),\n new Vector2D( 90.1356903436448, 38.33610227127048),\n new Vector2D( 90.13576283227428, 38.33609255422783),\n new Vector2D( 90.13595870833188, \ 38.33604606376991),\n new Vector2D( 90.1361556630693, 38.3360024198866),\n \ new Vector2D( 90.13622408795709, 38.335987048115726),\n new Vector2D( 90.13696189099994, 38.33581914328681),\n new Vector2D( 90.13746655304897, 38.33616706665265),\n new Vector2D( 90.13845973716064, \ 38.33650776167099),\n new Vector2D( 90.13950901827667, 38.3368469456463),\n \ new Vector2D( 90.14393814424852, 38.337591835857495),\n new Vector2D( 90.14483839716831, 38.337076122362475),\n new Vector2D( 90.14565474433601, 38.33769000964429),\n new Vector2D( 90.14569421179482, \ 38.3377117256905),\n new Vector2D( 90.14577067124333, 38.33770883625908),\n \ new Vector2D( 90.14600350631684, 38.337714326520995),\n new Vector2D( 90.14600355139731, 38.33771435193319),\n new Vector2D( 90.14600369112401, 38.33771443882085),\n new Vector2D( 90.14600382486884, \ 38.33771453466096),\n new Vector2D( 90.14600395205912, 38.33771463904344),\n \ new Vector2D( 90.14600407214999, 38.337714751520764),\n new Vector2D( 90.14600418462749, 38.337714871611695),\n new Vector2D( 90.14600422249327, 38.337714915811034),\n new Vector2D( 90.14867838361471, \ 38.34113888210675),\n new Vector2D( 90.14923750157374, 38.341582537502575),\n \ new Vector2D( 90.14877083250991, 38.34160685841391),\n new Vector2D( 90.14816667319519, 38.34244232585684),\n new Vector2D( 90.14797696744586, 38.34248455284745),\n new Vector2D( 90.14484318014337, \ 38.34385573215269),\n new Vector2D( 90.14477919958296, 38.3453797747614),\n \ new Vector2D( 90.14202393306448, 38.34464324839456),\n new Vector2D( 90.14198920640195, 38.344651155237216),\n new Vector2D( 90.14155207025175, 38.34486424263724),\n new Vector2D( 90.1415196143314, \ 38.344871730519),\n new Vector2D( 90.14128611910814, 38.34500196593859),\n \ new Vector2D( 90.14047850603913, 38.34600084496253),\n new Vector2D( 90.14045907000337, 38.34601860032171),\n new Vector2D( 90.14039496493928, 38.346223030432384),\n new Vector2D( 90.14037626063737, \ 38.346240203360026),\n new Vector2D( 90.14030005823724, 38.34646920000705),\n \ new Vector2D( 90.13799164754806, 38.34903093011013),\n new Vector2D( 90.11045289492762, 38.36801537312368),\n new Vector2D( 90.10871471476526, 38.36878044144294),\n new Vector2D( 90.10424901707671, \ 38.374300101757),\n new Vector2D( 90.10263482039932, 38.37310041316073),\n \ new Vector2D( 90.09834601753448, 38.373615053823414),\n new Vector2D( 90.0979455456843, 38.373578376172475),\n new Vector2D( 90.09086514328669, 38.37527884194668),\n new Vector2D( 90.09084931407364, \ 38.37590801712463),\n new Vector2D( 90.09081227075944, 38.37526295920463),\n \ new Vector2D( 90.09081378927135, 38.375193883266434)\n }\n \ };\n PolygonsSet set1 = buildSet(vertices1);\n\n Vector2D[][] vertices2 = new Vector2D[][] {\n new Vector2D[] {\n new Vector2D( 90.13067558880044, 38.36977255037573),\n new Vector2D( 90.12907570488, 38.36817308242706),\n new Vector2D( 90.1342774136516, \ 38.356886880294724),\n new Vector2D( 90.13090330629757, 38.34664392676211),\n \ new Vector2D( 90.13078571364593, 38.344904617518466),\n new Vector2D( 90.1315602208914, 38.3447185040846),\n new Vector2D( 90.1316336226821, 38.34470643148342),\n new Vector2D( 90.134020944832, \ 38.340936644972885),\n new Vector2D( 90.13912536387306, 38.335497255122334),\n \ new Vector2D( 90.1396178806582, 38.334878075552126),\n new Vector2D( 90.14083049696671, 38.33316530644106),\n new Vector2D( 90.14145252901329, 38.33152722916191),\n new Vector2D( 90.1404779335565, \ 38.32863516047786),\n new Vector2D( 90.14282712131586, 38.327504432532066),\n \ new Vector2D( 90.14616669875488, 38.3237354115015),\n new Vector2D( 90.14860976050608, 38.315714862457924),\n new Vector2D( 90.14999277782437, 38.3164932507504),\n new Vector2D( 90.15005207194997, \ 38.316534677663356),\n new Vector2D( 90.15508513859612, 38.31878731691609),\n \ new Vector2D( 90.15919938519221, 38.31852743183782),\n new Vector2D( 90.16093758658837, 38.31880662005153),\n new Vector2D( 90.16099420184912, 38.318825953291594),\n new Vector2D( 90.1665411125756, \ 38.31859497874757),\n new Vector2D( 90.16999653861313, 38.32505772048029),\n \ new Vector2D( 90.17475243391698, 38.32594398441148),\n new Vector2D( 90.17940844844992, 38.327427213761325),\n new Vector2D( 90.20951909541378, 38.330616833491774),\n new Vector2D( 90.2155400467941, \ 38.331746223670336),\n new Vector2D( 90.21559881391778, 38.33175551425302),\n \ new Vector2D( 90.21916646426041, 38.332584299620805),\n new Vector2D( 90.23863749852285, 38.34778978875795),\n new Vector2D( 90.25459855175802, 38.357790570608984),\n new Vector2D( 90.25964298227257, \ 38.356918010203174),\n new Vector2D( 90.26024593994703, 38.361692743151366),\n \ new Vector2D( 90.26146187570015, 38.36311080550837),\n new Vector2D( 90.26614159359622, 38.36510808579902),\n new Vector2D( 90.26621342936448, 38.36507942500333),\n new Vector2D( 90.26652190211962, \ 38.36494042196722),\n new Vector2D( 90.26621240678867, 38.365113172030874),\n \ new Vector2D( 90.26614057102057, 38.365141832826794),\n new Vector2D( 90.26380080055299, 38.3660381760273),\n new Vector2D( 90.26315345241, 38.36670658276421),\n new Vector2D( 90.26251574942881, \ 38.367490323488084),\n new Vector2D( 90.26247873448426, 38.36755266444749),\n \ new Vector2D( 90.26234628016698, 38.36787989125406),\n new Vector2D( 90.26214559424784, 38.36945909356126),\n new Vector2D( 90.25861728442555, 38.37200753430875),\n new Vector2D( 90.23905557537864, \ 38.375405314295904),\n new Vector2D( 90.22517251874075, 38.38984691662256),\n \ new Vector2D( 90.22549955153215, 38.3911564273979),\n new Vector2D( 90.22434386063355, 38.391476432092134),\n new Vector2D( 90.22147729457276, 38.39134652252034),\n new Vector2D( 90.22142070120117, \ 38.391349167741964),\n new Vector2D( 90.20665060751588, 38.39475580900313),\n \ new Vector2D( 90.20042268367109, 38.39842558622888),\n new Vector2D( 90.17423771242085, 38.402727751805344),\n new Vector2D( 90.16756796257476, 38.40913898597597),\n new Vector2D( 90.16728283954308, \ 38.411255399912875),\n new Vector2D( 90.16703538220418, 38.41136059866693),\n \ new Vector2D( 90.16725865657685, 38.41013618805954),\n new Vector2D( 90.16746107640665, 38.40902614307544),\n new Vector2D( 90.16122795307462, 38.39773101873203)\n }\n };\n PolygonsSet set2 = buildSet(vertices2);\n PolygonsSet set = (PolygonsSet) new\nRegionFactory<Euclidean2D>().difference(set1.copySelf(),\n\n \ set2.copySelf());\n\n Vector2D[][] verticies = set.getVertices();\n \ Assert.assertTrue(verticies[0][0] != null);\n Assert.assertEquals(1, verticies.length);\n }\n{code}" diff --git a/src/main/java/org/apache/commons/math3/geometry/euclidean/twod/PolygonsSet.java b/src/main/java/org/apache/commons/math3/geometry/euclidean/twod/PolygonsSet.java index 9bd9dae..9cb82de 100644 --- a/src/main/java/org/apache/commons/math3/geometry/euclidean/twod/PolygonsSet.java +++ b/src/main/java/org/apache/commons/math3/geometry/euclidean/twod/PolygonsSet.java @@ -111,6 +111,20 @@ public class PolygonsSet extends AbstractRegion<Euclidean2D, Euclidean1D> { * constructor} using {@link SubHyperplane subhyperplanes}.</p> * <p>If the list is empty, the region will represent the whole * space.</p> + * <p> + * Polygons with thin pikes or dents are inherently difficult to handle because + * they involve lines with almost opposite directions at some vertices. Polygons + * whose vertices come from some physical measurement with noise are also + * difficult because an edge that should be straight may be broken in lots of + * different pieces with almost equal directions. In both cases, computing the + * lines intersections is not numerically robust due to the almost 0 or almost + * &pi; angle. Such cases need to carefully adjust the {@code hyperplaneThickness} + * parameter. A too small value would often lead to completely wrong polygons + * with large area wrongly identified as inside or outside. Large values are + * often much safer. As a rule of thumb, a value slightly below the size of the + * most accurate detail needed is a good value for the {@code hyperplaneThickness} + * parameter. + * </p> * @param hyperplaneThickness tolerance below which points are considered to * belong to the hyperplane (which is therefore more a slab) * @param vertices vertices of the simple loop boundary @@ -157,20 +171,50 @@ public class PolygonsSet extends AbstractRegion<Euclidean2D, Euclidean1D> { private static BSPTree<Euclidean2D> verticesToTree(final double hyperplaneThickness, final Vector2D ... vertices) { - if (vertices.length == 0) { + final int n = vertices.length; + if (n == 0) { // the tree represents the whole space return new BSPTree<Euclidean2D>(Boolean.TRUE); } - // at start, none of the edges have been processed - final BSPTree<Euclidean2D> tree = new BSPTree<Euclidean2D>(); - List<Vertex> list = new ArrayList<PolygonsSet.Vertex>(vertices.length); - for (final Vector2D vertex : vertices) { - list.add(new Vertex(vertex)); + // build the vertices + final Vertex[] vArray = new Vertex[n]; + for (int i = 0; i < n; ++i) { + vArray[i] = new Vertex(vertices[i]); + } + + // build the edges + List<Edge> edges = new ArrayList<Edge>(); + for (int i = 0; i < n; ++i) { + + // get the endpoints of the edge + final Vertex start = vArray[i]; + final Vertex end = vArray[(i + 1) % n]; + + // get the line supporting the edge, taking care not to recreate it + // if it was already created earlier due to another edge being aligned + // with the current one + Line line = start.sharedLineWith(end); + if (line == null) { + line = new Line(start.getLocation(), end.getLocation()); + } + + // create the edge and store it + edges.add(new Edge(start, end, line)); + + // check if another vertex also happens to be on this line + for (final Vertex vertex : vArray) { + if (vertex != start && vertex != end && + FastMath.abs(line.getOffset(vertex.getLocation())) <= hyperplaneThickness) { + vertex.bindWith(line); + } + } + } // build the tree top-down - insertVertices(hyperplaneThickness, tree, list); + final BSPTree<Euclidean2D> tree = new BSPTree<Euclidean2D>(); + insertEdges(hyperplaneThickness, tree, edges); return tree; @@ -181,45 +225,32 @@ public class PolygonsSet extends AbstractRegion<Euclidean2D, Euclidean1D> { * belong to the hyperplane (which is therefore more a slab) * @param node current tree node (it is a leaf node at the beginning * of the call) - * @param vertices list of vertices belonging to the boundary of the - * cell defined by the node + * @param edges list of edges to insert in the cell defined by this node + * (excluding edges not belonging to the cell defined by this node) */ - private static void insertVertices(final double hyperplaneThickness, - final BSPTree<Euclidean2D> node, - final List<Vertex> vertices) { + private static void insertEdges(final double hyperplaneThickness, + final BSPTree<Euclidean2D> node, + final List<Edge> edges) { - Vertex current = vertices.get(vertices.size() - 1); + // find an edge with an hyperplane that can be inserted in the node int index = 0; - Line inserted = null; - while (inserted == null && index < vertices.size()) { - final Vertex previous = current; - current = vertices.get(index++); - if (previous.outgoingNeedsProcessing() && current.incomingNeedsProcessing()) { - - if (previous.shareNodeWith(current)) { - // both vertices are already handled by an existing node, - // closer to the tree root, they were probably created - // when split points were introduced - inserted = null; + Edge inserted =null; + while (inserted == null && index < edges.size()) { + inserted = edges.get(index++); + if (inserted.getNode() == null) { + if (node.insertCut(inserted.getLine())) { + inserted.setNode(node); } else { - - inserted = new Line(previous.getLocation(), current.getLocation()); - - if (node.insertCut(inserted)) { - previous.addNode(node); - previous.outgoingProcessed(); - current.addNode(node); - current.incomingProcessed(); - } else { - inserted = null; - } - + inserted = null; } - + } else { + inserted = null; } } - if (node.getCut() == null) { + if (inserted == null) { + // no suitable edge was found, the node remains a leaf node + // we need to set its inside/outside boolean indicator final BSPTree<Euclidean2D> parent = node.getParent(); if (parent == null || node == parent.getMinus()) { node.setAttribute(Boolean.TRUE); @@ -229,67 +260,58 @@ public class PolygonsSet extends AbstractRegion<Euclidean2D, Euclidean1D> { return; } - // distribute the remaining vertices in the two sub-trees - Side currentSide = Side.HYPER; - final List<Vertex> plusList = new ArrayList<Vertex>(); - plusList.add(current); - int plusCount = 0; - final List<Vertex> minusList = new ArrayList<Vertex>(); - minusList.add(current); - int minusCount = 0; - while (index < vertices.size()) { - final Vertex previous = current; - final Side previousSide = currentSide; - current = vertices.get(index++); - final double currentOffset = inserted.getOffset(current.getLocation()); - currentSide = (FastMath.abs(currentOffset) <= hyperplaneThickness) ? - Side.HYPER : - ((currentOffset < 0) ? Side.MINUS : Side.PLUS); - switch (currentSide) { - case PLUS: - if (previousSide == Side.MINUS) { - // we need to insert a split point on the hyperplane - final Line line = new Line(previous.getLocation(), current.getLocation()); - final Vertex splitPoint = new Vertex(inserted.intersection(line)); - splitPoint.addNode(node); - minusList.add(splitPoint); - plusList.add(splitPoint); - } - plusList.add(current); - if (current.incomingNeedsProcessing() || current.outgoingNeedsProcessing()) { - ++plusCount; - } - break; - case MINUS: - if (previousSide == Side.PLUS) { - // we need to insert a split point on the hyperplane - final Line line = new Line(previous.getLocation(), current.getLocation()); - final Vertex splitPoint = new Vertex(inserted.intersection(line)); - splitPoint.addNode(node); - minusList.add(splitPoint); - plusList.add(splitPoint); - } - minusList.add(current); - if (current.incomingNeedsProcessing() || current.outgoingNeedsProcessing()) { - ++minusCount; + // we have split the node by inserted an edge as a cut sub-hyperplane + // distribute the remaining edges in the two sub-trees + final List<Edge> plusList = new ArrayList<Edge>(); + final List<Edge> minusList = new ArrayList<Edge>(); + for (final Edge edge : edges) { + if (edge != inserted) { + final double startOffset = inserted.getLine().getOffset(edge.getStart().getLocation()); + final double endOffset = inserted.getLine().getOffset(edge.getEnd().getLocation()); + Side startSide = (FastMath.abs(startOffset) <= hyperplaneThickness) ? + Side.HYPER : ((startOffset < 0) ? Side.MINUS : Side.PLUS); + Side endSide = (FastMath.abs(endOffset) <= hyperplaneThickness) ? + Side.HYPER : ((endOffset < 0) ? Side.MINUS : Side.PLUS); + switch (startSide) { + case PLUS: + if (endSide == Side.MINUS) { + // we need to insert a split point on the hyperplane + final Vertex splitPoint = edge.split(inserted.getLine()); + minusList.add(splitPoint.getOutgoing()); + plusList.add(splitPoint.getIncoming()); + } else { + plusList.add(edge); + } + break; + case MINUS: + if (endSide == Side.PLUS) { + // we need to insert a split point on the hyperplane + final Vertex splitPoint = edge.split(inserted.getLine()); + minusList.add(splitPoint.getIncoming()); + plusList.add(splitPoint.getOutgoing()); + } else { + minusList.add(edge); + } + break; + default: + if (endSide == Side.PLUS) { + plusList.add(edge); + } else if (endSide == Side.MINUS) { + minusList.add(edge); + } + break; } - break; - default: - current.addNode(node); - plusList.add(current); - minusList.add(current); - break; } } // recurse through lower levels - if (plusCount > 0) { - insertVertices(hyperplaneThickness, node.getPlus(), plusList); + if (!plusList.isEmpty()) { + insertEdges(hyperplaneThickness, node.getPlus(), plusList); } else { node.getPlus().setAttribute(Boolean.FALSE); } - if (minusCount > 0) { - insertVertices(hyperplaneThickness, node.getMinus(), minusList); + if (!minusList.isEmpty()) { + insertEdges(hyperplaneThickness, node.getMinus(), minusList); } else { node.getMinus().setAttribute(Boolean.TRUE); } @@ -302,23 +324,23 @@ public class PolygonsSet extends AbstractRegion<Euclidean2D, Euclidean1D> { /** Vertex location. */ private final Vector2D location; - /** Nodes associated with the hyperplane containing this vertex. */ - private final List<BSPTree<Euclidean2D>> nodes; + /** Incoming edge. */ + private Edge incoming; - /** Indicator for incoming edges that still need processing. */ - private boolean incomingNeedsProcessing; + /** Outgoing edge. */ + private Edge outgoing; - /** Indicator for outgoing edges that still need processing. */ - private boolean outgoingNeedsProcessing; + /** Lines bound with this vertex. */ + private final List<Line> lines; /** Build a non-processed vertex not owned by any node yet. * @param location vertex location */ public Vertex(final Vector2D location) { - this.location = location; - this.nodes = new ArrayList<BSPTree<Euclidean2D>>(); - this.incomingNeedsProcessing = true; - this.outgoingNeedsProcessing = true; + this.location = location; + this.incoming = null; + this.outgoing = null; + this.lines = new ArrayList<Line>(); } /** Get Vertex location. @@ -328,57 +350,160 @@ public class PolygonsSet extends AbstractRegion<Euclidean2D, Euclidean1D> { return location; } - /** Check if the instance and another vertex share a node. + /** Bind a line considered to contain this vertex. + * @param line line to bind with this vertex + */ + public void bindWith(final Line line) { + lines.add(line); + } + + /** Get the common line bound with both the instance and another vertex, if any. * <p> - * When two vertices share a node, this means they are already handled - * by the hyperplane of this node, so there is no need to create a cut - * hyperplane for them. + * When two vertices are both bound to the same line, this means they are + * already handled by node associated with this line, so there is no need + * to create a cut hyperplane for them. * </p> * @param vertex other vertex to check instance against - * @return true if the instance and another vertex share a node + * @return line bound with both the instance and another vertex, or null if the + * two vertices do not share a line yet */ - public boolean shareNodeWith(final Vertex vertex) { - for (final BSPTree<Euclidean2D> node1 : nodes) { - for (final BSPTree<Euclidean2D> node2 : vertex.nodes) { - if (node1 == node2) { - return true; + public Line sharedLineWith(final Vertex vertex) { + for (final Line line1 : lines) { + for (final Line line2 : vertex.lines) { + if (line1 == line2) { + return line1; } } } - return false; + return null; } - /** Add a node whose hyperplane contains this vertex. - * @param node node whose hyperplane contains this vertex + /** Set incoming edge. + * <p> + * The line supporting the incoming edge is automatically bound + * with the instance. + * </p> + * @param incoming incoming edge */ - public void addNode(final BSPTree<Euclidean2D> node) { - nodes.add(node); + public void setIncoming(final Edge incoming) { + this.incoming = incoming; + bindWith(incoming.getLine()); } - /** Check incoming edge processed indicator. - * @return true if incoming edge needs processing + /** Get incoming edge. + * @return incoming edge */ - public boolean incomingNeedsProcessing() { - return incomingNeedsProcessing; + public Edge getIncoming() { + return incoming; } - /** Check outgoing edge processed indicator. - * @return true if outgoing edge needs processing + /** Set outgoing edge. + * <p> + * The line supporting the outgoing edge is automatically bound + * with the instance. + * </p> + * @param incoming outgoing edge + */ + public void setOutgoing(final Edge outgoing) { + this.outgoing = outgoing; + bindWith(outgoing.getLine()); + } + + /** Get outgoing edge. + * @return outgoing edge + */ + public Edge getOutgoing() { + return outgoing; + } + + } + + /** Internal class for holding edges while they are processed to build a BSP tree. */ + private static class Edge { + + /** Start vertex. */ + private final Vertex start; + + /** End vertex. */ + private final Vertex end; + + /** Line supporting the edge. */ + private final Line line; + + /** Node whose cut hyperplane contains this edge. */ + private BSPTree<Euclidean2D> node; + + /** Build an edge not contained in any node yet. + * @param start start vertex + * @param end end vertex + * @param line line supporting the edge + */ + public Edge(final Vertex start, final Vertex end, final Line line) { + + this.start = start; + this.end = end; + this.line = line; + this.node = null; + + // connect the vertices back to the edge + start.setOutgoing(this); + end.setIncoming(this); + + } + + /** Get start vertex. + * @return start vertex + */ + public Vertex getStart() { + return start; + } + + /** Get end vertex. + * @return end vertex */ - public boolean outgoingNeedsProcessing() { - return outgoingNeedsProcessing; + public Vertex getEnd() { + return end; } - /** Mark the incoming edge as processed. + /** Get the line supporting this edge. + * @return line supporting this edge */ - public void incomingProcessed() { - incomingNeedsProcessing = false; + public Line getLine() { + return line; } - /** Mark the outgoing edge as processed. + /** Set the node whose cut hyperplane contains this edge. + * @param node node whose cut hyperplane contains this edge + */ + public void setNode(final BSPTree<Euclidean2D> node) { + this.node = node; + } + + /** Get the node whose cut hyperplane contains this edge. + * @return node whose cut hyperplane contains this edge + * (null if edge has not yet been inserted into the BSP tree) + */ + public BSPTree<Euclidean2D> getNode() { + return node; + } + + /** Split the edge. + * <p> + * Once split, this edge is not referenced anymore by the vertices, + * it is replaced by the two half-edges and an intermediate splitting + * vertex is introduced to connect these two halves. + * </p> + * @param splitLine line splitting the edge in two halves + * @return split vertex (its incoming and outgoing edges are the two halves) */ - public void outgoingProcessed() { - outgoingNeedsProcessing = false; + public Vertex split(final Line splitLine) { + final Vertex splitVertex = new Vertex(line.intersection(splitLine)); + splitVertex.bindWith(splitLine); + final Edge startHalf = new Edge(start, splitVertex, line); + final Edge endHalf = new Edge(splitVertex, end, line); + startHalf.node = node; + endHalf.node = node; + return splitVertex; } }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-880_2a9cbbab.diff
bugs-dot-jar_data_MATH-988_d270055e
--- BugID: MATH-988 Summary: NPE when calling SubLine.intersection() with non-intersecting lines Description: |+ When calling SubLine.intersection() with two lines that not intersect, then a NullPointerException is thrown in Line.toSubSpace(). This bug is in the twod and threed implementations. The attached patch fixes both implementations and adds the required test cases. diff --git a/src/main/java/org/apache/commons/math3/geometry/euclidean/threed/SubLine.java b/src/main/java/org/apache/commons/math3/geometry/euclidean/threed/SubLine.java index a146cb6..aad7b65 100644 --- a/src/main/java/org/apache/commons/math3/geometry/euclidean/threed/SubLine.java +++ b/src/main/java/org/apache/commons/math3/geometry/euclidean/threed/SubLine.java @@ -111,6 +111,9 @@ public class SubLine { // compute the intersection on infinite line Vector3D v1D = line.intersection(subLine.line); + if (v1D == null) { + return null; + } // check location of point with respect to first sub-line Location loc1 = remainingRegion.checkPoint(line.toSubSpace(v1D)); diff --git a/src/main/java/org/apache/commons/math3/geometry/euclidean/twod/SubLine.java b/src/main/java/org/apache/commons/math3/geometry/euclidean/twod/SubLine.java index a9d621a..ea9e96a 100644 --- a/src/main/java/org/apache/commons/math3/geometry/euclidean/twod/SubLine.java +++ b/src/main/java/org/apache/commons/math3/geometry/euclidean/twod/SubLine.java @@ -115,6 +115,9 @@ public class SubLine extends AbstractSubHyperplane<Euclidean2D, Euclidean1D> { // compute the intersection on infinite line Vector2D v2D = line1.intersection(line2); + if (v2D == null) { + return null; + } // check location of point with respect to first sub-line Location loc1 = getRemainingRegion().checkPoint(line1.toSubSpace(v2D));
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-988_d270055e.diff
bugs-dot-jar_data_MATH-1208_ce2badf0
--- BugID: MATH-1208 Summary: EmpiricalDistribution cumulativeProbability can return NaN when evaluated within a constant bin Description: If x belongs to a bin with no variance or to which a ConstantRealDistribution kernel has been assigned, cumulativeProbability(x) can return NaN. diff --git a/src/main/java/org/apache/commons/math4/random/EmpiricalDistribution.java b/src/main/java/org/apache/commons/math4/random/EmpiricalDistribution.java index 61bb65b..685c08b 100644 --- a/src/main/java/org/apache/commons/math4/random/EmpiricalDistribution.java +++ b/src/main/java/org/apache/commons/math4/random/EmpiricalDistribution.java @@ -610,7 +610,11 @@ public class EmpiricalDistribution extends AbstractRealDistribution { final double pB = pB(binIndex); final RealDistribution kernel = k(x); if (kernel instanceof ConstantRealDistribution) { - return pBminus + pB; + if (x < kernel.getNumericalMean()) { + return pBminus; + } else { + return pBminus + pB; + } } final double[] binBounds = getUpperBounds(); final double kB = kB(binIndex);
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1208_ce2badf0.diff
bugs-dot-jar_data_MATH-373_bfe4623c
--- BugID: MATH-373 Summary: StatUtils.sum returns NaN for zero-length arrays Description: |- StatUtils.sum returns NaN for zero-length arrays, which is: 1. inconsistent with the mathematical notion of sum: in maths, sum_{i=0}^{N-1} a_i will be 0 for N=0. In particular, the identity sum_{i=0}^{k-1} a_i + sum_{i=k}^{N-1} = sum_{i=0}^{N-1} is broken for k = 0, since NaN + x = NaN, not x. 2. introduces hard to debug erros (returning a NaN is one of the worst forms of reporting an exceptional condition, as NaNs propagate silently and require manual tracing during the debugging) 3. enforces "special case" handling when the user expects that the summed array can have a zero length. The correct behaviour is, in my opinion, to return 0.0, not NaN in the above case. diff --git a/src/main/java/org/apache/commons/math/stat/descriptive/summary/Product.java b/src/main/java/org/apache/commons/math/stat/descriptive/summary/Product.java index da6e380..f9796b4 100644 --- a/src/main/java/org/apache/commons/math/stat/descriptive/summary/Product.java +++ b/src/main/java/org/apache/commons/math/stat/descriptive/summary/Product.java @@ -25,7 +25,8 @@ import org.apache.commons.math.util.FastMath; /** * Returns the product of the available values. * <p> - * If there are no values in the dataset, or any of the values are + * If there are no values in the dataset, then 1 is returned. + * If any of the values are * <code>NaN</code>, then <code>NaN</code> is returned.</p> * <p> * <strong>Note that this implementation is not synchronized.</strong> If @@ -53,7 +54,7 @@ public class Product extends AbstractStorelessUnivariateStatistic implements Ser */ public Product() { n = 0; - value = Double.NaN; + value = 1; } /** @@ -71,11 +72,7 @@ public class Product extends AbstractStorelessUnivariateStatistic implements Ser */ @Override public void increment(final double d) { - if (n == 0) { - value = d; - } else { - value *= d; - } + value *= d; n++; } @@ -99,7 +96,7 @@ public class Product extends AbstractStorelessUnivariateStatistic implements Ser */ @Override public void clear() { - value = Double.NaN; + value = 1; n = 0; } @@ -113,14 +110,14 @@ public class Product extends AbstractStorelessUnivariateStatistic implements Ser * @param values the input array * @param begin index of the first array element to include * @param length the number of elements to include - * @return the product of the values or Double.NaN if length = 0 + * @return the product of the values or 1 if length = 0 * @throws IllegalArgumentException if the array is null or the array index * parameters are not valid */ @Override public double evaluate(final double[] values, final int begin, final int length) { double product = Double.NaN; - if (test(values, begin, length)) { + if (test(values, begin, length, true)) { product = 1.0; for (int i = begin; i < begin + length; i++) { product *= values[i]; @@ -153,14 +150,14 @@ public class Product extends AbstractStorelessUnivariateStatistic implements Ser * @param weights the weights array * @param begin index of the first array element to include * @param length the number of elements to include - * @return the product of the values or Double.NaN if length = 0 + * @return the product of the values or 1 if length = 0 * @throws IllegalArgumentException if the parameters are not valid * @since 2.1 */ public double evaluate(final double[] values, final double[] weights, final int begin, final int length) { double product = Double.NaN; - if (test(values, weights, begin, length)) { + if (test(values, weights, begin, length, true)) { product = 1.0; for (int i = begin; i < begin + length; i++) { product *= FastMath.pow(values[i], weights[i]); diff --git a/src/main/java/org/apache/commons/math/stat/descriptive/summary/Sum.java b/src/main/java/org/apache/commons/math/stat/descriptive/summary/Sum.java index 2b0fd9b..3543575 100644 --- a/src/main/java/org/apache/commons/math/stat/descriptive/summary/Sum.java +++ b/src/main/java/org/apache/commons/math/stat/descriptive/summary/Sum.java @@ -24,7 +24,8 @@ import org.apache.commons.math.stat.descriptive.AbstractStorelessUnivariateStati /** * Returns the sum of the available values. * <p> - * If there are no values in the dataset, or any of the values are + * If there are no values in the dataset, then 0 is returned. + * If any of the values are * <code>NaN</code>, then <code>NaN</code> is returned.</p> * <p> * <strong>Note that this implementation is not synchronized.</strong> If @@ -52,7 +53,7 @@ public class Sum extends AbstractStorelessUnivariateStatistic implements Seriali */ public Sum() { n = 0; - value = Double.NaN; + value = 0; } /** @@ -70,11 +71,7 @@ public class Sum extends AbstractStorelessUnivariateStatistic implements Seriali */ @Override public void increment(final double d) { - if (n == 0) { - value = d; - } else { - value += d; - } + value += d; n++; } @@ -98,13 +95,13 @@ public class Sum extends AbstractStorelessUnivariateStatistic implements Seriali */ @Override public void clear() { - value = Double.NaN; + value = 0; n = 0; } /** * The sum of the entries in the specified portion of - * the input array, or <code>Double.NaN</code> if the designated subarray + * the input array, or 0 if the designated subarray * is empty. * <p> * Throws <code>IllegalArgumentException</code> if the array is null.</p> @@ -112,14 +109,14 @@ public class Sum extends AbstractStorelessUnivariateStatistic implements Seriali * @param values the input array * @param begin index of the first array element to include * @param length the number of elements to include - * @return the sum of the values or Double.NaN if length = 0 + * @return the sum of the values or 0 if length = 0 * @throws IllegalArgumentException if the array is null or the array index * parameters are not valid */ @Override public double evaluate(final double[] values, final int begin, final int length) { double sum = Double.NaN; - if (test(values, begin, length)) { + if (test(values, begin, length, true)) { sum = 0.0; for (int i = begin; i < begin + length; i++) { sum += values[i]; @@ -130,7 +127,7 @@ public class Sum extends AbstractStorelessUnivariateStatistic implements Seriali /** * The weighted sum of the entries in the specified portion of - * the input array, or <code>Double.NaN</code> if the designated subarray + * the input array, or 0 if the designated subarray * is empty. * <p> * Throws <code>IllegalArgumentException</code> if any of the following are true: @@ -151,14 +148,14 @@ public class Sum extends AbstractStorelessUnivariateStatistic implements Seriali * @param weights the weights array * @param begin index of the first array element to include * @param length the number of elements to include - * @return the sum of the values or Double.NaN if length = 0 + * @return the sum of the values or 0 if length = 0 * @throws IllegalArgumentException if the parameters are not valid * @since 2.1 */ public double evaluate(final double[] values, final double[] weights, final int begin, final int length) { double sum = Double.NaN; - if (test(values, weights, begin, length)) { + if (test(values, weights, begin, length, true)) { sum = 0.0; for (int i = begin; i < begin + length; i++) { sum += values[i] * weights[i]; diff --git a/src/main/java/org/apache/commons/math/stat/descriptive/summary/SumOfLogs.java b/src/main/java/org/apache/commons/math/stat/descriptive/summary/SumOfLogs.java index b4280cc..24960e3 100644 --- a/src/main/java/org/apache/commons/math/stat/descriptive/summary/SumOfLogs.java +++ b/src/main/java/org/apache/commons/math/stat/descriptive/summary/SumOfLogs.java @@ -24,7 +24,7 @@ import org.apache.commons.math.util.FastMath; /** * Returns the sum of the natural logs for this collection of values. * <p> - * Uses {@link java.lang.Math#log(double)} to compute the logs. Therefore, + * Uses {@link org.apache.commons.Math.util.FastMath#log(double)} to compute the logs. Therefore, * <ul> * <li>If any of values are < 0, the result is <code>NaN.</code></li> * <li>If all values are non-negative and less than @@ -87,11 +87,7 @@ public class SumOfLogs extends AbstractStorelessUnivariateStatistic implements S */ @Override public double getResult() { - if (n > 0) { - return value; - } else { - return Double.NaN; - } + return value; } /** @@ -122,7 +118,7 @@ public class SumOfLogs extends AbstractStorelessUnivariateStatistic implements S * @param values the input array * @param begin index of the first array element to include * @param length the number of elements to include - * @return the sum of the natural logs of the values or Double.NaN if + * @return the sum of the natural logs of the values or 0 if * length = 0 * @throws IllegalArgumentException if the array is null or the array index * parameters are not valid @@ -130,7 +126,7 @@ public class SumOfLogs extends AbstractStorelessUnivariateStatistic implements S @Override public double evaluate(final double[] values, final int begin, final int length) { double sumLog = Double.NaN; - if (test(values, begin, length)) { + if (test(values, begin, length, true)) { sumLog = 0.0; for (int i = begin; i < begin + length; i++) { sumLog += FastMath.log(values[i]); diff --git a/src/main/java/org/apache/commons/math/stat/descriptive/summary/SumOfSquares.java b/src/main/java/org/apache/commons/math/stat/descriptive/summary/SumOfSquares.java index a1429f9..36a2168 100644 --- a/src/main/java/org/apache/commons/math/stat/descriptive/summary/SumOfSquares.java +++ b/src/main/java/org/apache/commons/math/stat/descriptive/summary/SumOfSquares.java @@ -23,7 +23,8 @@ import org.apache.commons.math.stat.descriptive.AbstractStorelessUnivariateStati /** * Returns the sum of the squares of the available values. * <p> - * If there are no values in the dataset, or any of the values are + * If there are no values in the dataset, then 0 is returned. + * If any of the values are * <code>NaN</code>, then <code>NaN</code> is returned.</p> * <p> * <strong>Note that this implementation is not synchronized.</strong> If @@ -51,7 +52,7 @@ public class SumOfSquares extends AbstractStorelessUnivariateStatistic implement */ public SumOfSquares() { n = 0; - value = Double.NaN; + value = 0; } /** @@ -69,11 +70,7 @@ public class SumOfSquares extends AbstractStorelessUnivariateStatistic implement */ @Override public void increment(final double d) { - if (n == 0) { - value = d * d; - } else { - value += d * d; - } + value += d * d; n++; } @@ -97,7 +94,7 @@ public class SumOfSquares extends AbstractStorelessUnivariateStatistic implement */ @Override public void clear() { - value = Double.NaN; + value = 0; n = 0; } @@ -111,14 +108,14 @@ public class SumOfSquares extends AbstractStorelessUnivariateStatistic implement * @param values the input array * @param begin index of the first array element to include * @param length the number of elements to include - * @return the sum of the squares of the values or Double.NaN if length = 0 + * @return the sum of the squares of the values or 0 if length = 0 * @throws IllegalArgumentException if the array is null or the array index * parameters are not valid */ @Override public double evaluate(final double[] values,final int begin, final int length) { double sumSq = Double.NaN; - if (test(values, begin, length)) { + if (test(values, begin, length, true)) { sumSq = 0.0; for (int i = begin; i < begin + length; i++) { sumSq += values[i] * values[i];
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-373_bfe4623c.diff
bugs-dot-jar_data_MATH-835_63a48705
--- BugID: MATH-835 Summary: Fraction percentageValue rare overflow Description: |- The percentageValue() method of the Fraction class works by first multiplying the Fraction by 100, then converting the Fraction to a double. This causes overflows when the numerator is greater than Integer.MAX_VALUE/100, even when the value of the fraction is far below this value. The patch changes the method to first convert to a double value, and then multiply this value by 100 - the result should be the same, but with less overflows. An addition to the test for the method that covers this bug is also included. diff --git a/src/main/java/org/apache/commons/math3/fraction/Fraction.java b/src/main/java/org/apache/commons/math3/fraction/Fraction.java index f84218e..f81ff0a 100644 --- a/src/main/java/org/apache/commons/math3/fraction/Fraction.java +++ b/src/main/java/org/apache/commons/math3/fraction/Fraction.java @@ -594,7 +594,7 @@ public class Fraction * @return the fraction percentage as a <tt>double</tt>. */ public double percentageValue() { - return multiply(100).doubleValue(); + return 100 * doubleValue(); } /**
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-835_63a48705.diff
bugs-dot-jar_data_MATH-679_5e638976
--- BugID: MATH-679 Summary: Integer overflow in OpenMapRealMatrix Description: |- computeKey() has an integer overflow. Since it is a sparse matrix, this is quite easily encountered long before heap space is exhausted. The attached code demonstrates the problem, which could potentially be a security vulnerability (for example, if one was to use this matrix to store access control information). Workaround: never create an OpenMapRealMatrix with more cells than are addressable with an int. diff --git a/src/main/java/org/apache/commons/math/linear/OpenMapRealMatrix.java b/src/main/java/org/apache/commons/math/linear/OpenMapRealMatrix.java index 4369db7..f05c878 100644 --- a/src/main/java/org/apache/commons/math/linear/OpenMapRealMatrix.java +++ b/src/main/java/org/apache/commons/math/linear/OpenMapRealMatrix.java @@ -19,6 +19,7 @@ package org.apache.commons.math.linear; import java.io.Serializable; +import org.apache.commons.math.exception.NumberIsTooLargeException; import org.apache.commons.math.util.OpenIntToDoubleHashMap; /** @@ -46,6 +47,11 @@ public class OpenMapRealMatrix extends AbstractRealMatrix */ public OpenMapRealMatrix(int rowDimension, int columnDimension) { super(rowDimension, columnDimension); + long lRow = (long) rowDimension; + long lCol = (long) columnDimension; + if (lRow * lCol >= (long) Integer.MAX_VALUE) { + throw new NumberIsTooLargeException(lRow * lCol, Integer.MAX_VALUE, false); + } this.rows = rowDimension; this.columns = columnDimension; this.entries = new OpenIntToDoubleHashMap(0.0);
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-679_5e638976.diff
bugs-dot-jar_data_MATH-327_262fe4c0
--- BugID: MATH-327 Summary: " Maximal number of iterations (540) exceeded" Description: "I have a matrix of size 49x19 and when I apply SVD on this matrix it raises the following exception. The problem which I am facing is that SVD works for some matrix and doesn't work for others. I have no clue what is the possible reason.\n\nException::\nCorrespondenceAnalysis: org.apache.commons.math.MaxIterationsExceededException: Maximal number of iterations (540) exceeded \n[org.apache.commons.math.linear.EigenDecompositionImpl.processGeneralBlock(EigenDecompositionImpl.java:881), org.apache.commons.math.linear.EigenDecompositionImpl.findEigenvalues(EigenDecompositionImpl.java:651), org.apache.commons.math.linear.EigenDecompositionImpl.decompose(EigenDecompositionImpl.java:243), org.apache.commons.math.linear.EigenDecompositionImpl.<init>(EigenDecompositionImpl.java:202), org.apache.commons.math.linear.SingularValueDecompositionImpl.<init>(SingularValueDecompositionImpl.java:114),\n\n\nRealMatrix m = [[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 0.94999999, 0.95833331, 1.00000000, 1.00000000, 0.99107143, 0.94583333, 1.00000000, 0.95000000, 0.98333333, 0.92106681, 0.97368419, 1.00000000, 0.95357142, 0.95238096, 1.00000000, 0.93333334, 0.96428573],[1.00000000, 1.00000000, 0.94999999, 0.95833331, 1.00000000, 1.00000000, 0.99107143, 0.94583333, 1.00000000, 0.95000000, 0.98333333, 0.92106681, 0.97368419, 1.00000000, 0.95357142, 0.95238096, 1.00000000, 0.93333334, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 0.94999999, 0.95833331, 1.00000000, 1.00000000, 0.99107143, 0.94583333, 1.00000000, 0.95000000, 0.98333333, 0.92106681, 0.97368419, 1.00000000, 0.95357142, 0.95238096, 1.00000000, 0.93333334, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 0.94999999, 0.95833331, 1.00000000, 1.00000000, 0.99107143, 0.94583333, 1.00000000, 0.95000000, 0.98333333, 0.92106681, 0.97368419, 1.00000000, 0.95357142, 0.95238096, 1.00000000, 0.93333334, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 0.94999999, 0.95833331, 1.00000000, 1.00000000, 0.99107143, 0.94583333, 1.00000000, 0.95000000, 0.98333333, 0.92106681, 0.97368419, 1.00000000, 0.95357142, 0.95238096, 1.00000000, 0.93333334, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 0.94999999, 0.95833331, 1.00000000, 1.00000000, 0.99107143, 0.94583333, 1.00000000, 0.95000000, 0.98333333, 0.92106681, 0.97368419, 1.00000000, 0.95357142, 0.95238096, 1.00000000, 0.93333334, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 0.94999999, 0.95833331, 1.00000000, 1.00000000, 0.99107143, 0.94583333, 1.00000000, 0.95000000, 0.98333333, 0.92106681, 0.97368419, 1.00000000, 0.95357142, 0.95238096, 1.00000000, 0.93333334, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.99107143, 1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.94450431, 1.00000000, 1.00000000, 0.99107143, 0.95238096, 1.00000000, 1.00000000, 0.96428573],[1.00000000, 1.00000000, 0.94999999, 0.95833331, 1.00000000, 1.00000000, 0.99107143, 0.94583333, 1.00000000, 0.95000000, 0.98333333, 0.92106681, 0.97368419, 1.00000000, 0.95357142, 0.95238096, 1.00000000, 0.93333334, 0.96428573],[1.00000000, 1.00000000, 0.94999999, 0.95833331, 1.00000000, 1.00000000, 0.99107143, 0.94583333, 1.00000000, 0.95000000, 0.98333333, 0.92106681, 0.97368419, 1.00000000, 0.95357142, 0.95238096, 1.00000000, 0.93333334, 0.96428573]]\n\nRealMatrix rcp = MatrixUtils.createRealMatrix(CP);\t\nSingularValueDecomposition svd = new SingularValueDecompositionImpl(rcp);\t\t\n\nRealMatrix U = svd.getU();\nRealMatrix S = svd.getS();\nRealMatrix Vt = svd.getVT();\ndouble[] singularValues = svd.getSingularValues();" diff --git a/src/main/java/org/apache/commons/math/linear/SingularValueDecompositionImpl.java b/src/main/java/org/apache/commons/math/linear/SingularValueDecompositionImpl.java index 5e27413..c2c655e 100644 --- a/src/main/java/org/apache/commons/math/linear/SingularValueDecompositionImpl.java +++ b/src/main/java/org/apache/commons/math/linear/SingularValueDecompositionImpl.java @@ -14,7 +14,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.commons.math.linear; import org.apache.commons.math.exception.NumberIsTooLargeException; @@ -35,23 +34,38 @@ import org.apache.commons.math.util.FastMath; * @since 2.0 */ public class SingularValueDecompositionImpl implements SingularValueDecomposition { - /** Number of rows of the initial matrix. */ + + /** Relative threshold for small singular values. */ + private static final double EPS = 0x1.0p-52; + + /** Absolute threshold for small singular values. */ + private static final double TINY = 0x1.0p-966; + + /** Computed singular values. */ + private double[] singularValues; + + /** Row dimension. */ private int m; - /** Number of columns of the initial matrix. */ + + /** Column dimension. */ private int n; - /** Eigen decomposition of the tridiagonal matrix. */ - private EigenDecomposition eigenDecomposition; - /** Singular values. */ - private double[] singularValues; - /** Cached value of U. */ + + /** Indicator for transposed matrix. */ + private boolean transposed; + + /** Cached value of U matrix. */ private RealMatrix cachedU; - /** Cached value of U<sup>T</sup>. */ + + /** Cached value of transposed U matrix. */ private RealMatrix cachedUt; - /** Cached value of S. */ + + /** Cached value of S (diagonal) matrix. */ private RealMatrix cachedS; - /** Cached value of V. */ + + /** Cached value of V matrix. */ private RealMatrix cachedV; - /** Cached value of V<sup>T</sup>. */ + + /** Cached value of transposed V matrix. */ private RealMatrix cachedVt; /** @@ -60,81 +74,398 @@ public class SingularValueDecompositionImpl implements SingularValueDecompositio * @param matrix Matrix to decompose. */ public SingularValueDecompositionImpl(final RealMatrix matrix) { + + // Derived from LINPACK code. + // Initialize. + double[][] A; m = matrix.getRowDimension(); n = matrix.getColumnDimension(); - - cachedU = null; - cachedS = null; - cachedV = null; - cachedVt = null; - - double[][] localcopy = matrix.getData(); - double[][] matATA = new double[n][n]; - // - // create A^T*A - // - for (int i = 0; i < n; i++) { - for (int j = i; j < n; j++) { - matATA[i][j] = 0.0; - for (int k = 0; k < m; k++) { - matATA[i][j] += localcopy[k][i] * localcopy[k][j]; + if (matrix.getRowDimension() < matrix.getColumnDimension()) { + transposed = true; + A = matrix.transpose().getData(); + m = matrix.getColumnDimension(); + n = matrix.getRowDimension(); + } else { + transposed = false; + A = matrix.getData(); + m = matrix.getRowDimension(); + n = matrix.getColumnDimension(); + } + int nu = FastMath.min(m, n); + singularValues = new double[FastMath.min(m + 1, n)]; + double[][] U = new double[m][nu]; + double[][] V = new double[n][n]; + double[] e = new double[n]; + double[] work = new double[m]; + boolean wantu = true; + boolean wantv = true; + // Reduce A to bidiagonal form, storing the diagonal elements + // in s and the super-diagonal elements in e. + int nct = FastMath.min(m - 1, n); + int nrt = FastMath.max(0, FastMath.min(n - 2, m)); + for (int k = 0; k < FastMath.max(nct, nrt); k++) { + if (k < nct) { + // Compute the transformation for the k-th column and + // place the k-th diagonal in s[k]. + // Compute 2-norm of k-th column without under/overflow. + singularValues[k] = 0; + for (int i = k; i < m; i++) { + singularValues[k] = FastMath.hypot(singularValues[k], A[i][k]); } - matATA[j][i] = matATA[i][j]; + if (singularValues[k] != 0.0) { + if (A[k][k] < 0.0) { + singularValues[k] = -singularValues[k]; + } + for (int i = k; i < m; i++) { + A[i][k] /= singularValues[k]; + } + A[k][k] += 1.0; + } + singularValues[k] = -singularValues[k]; } - } - - double[][] matAAT = new double[m][m]; - // - // create A*A^T - // - for (int i = 0; i < m; i++) { - for (int j = i; j < m; j++) { - matAAT[i][j] = 0.0; - for (int k = 0; k < n; k++) { - matAAT[i][j] += localcopy[i][k] * localcopy[j][k]; + for (int j = k + 1; j < n; j++) { + if ((k < nct) & (singularValues[k] != 0.0)) { + // Apply the transformation. + double t = 0; + for (int i = k; i < m; i++) { + t += A[i][k] * A[i][j]; + } + t = -t / A[k][k]; + for (int i = k; i < m; i++) { + A[i][j] += t * A[i][k]; + } + } + // Place the k-th row of A into e for the + // subsequent calculation of the row transformation. + e[j] = A[k][j]; + } + if (wantu & (k < nct)) { + // Place the transformation in U for subsequent back + // multiplication. + for (int i = k; i < m; i++) { + U[i][k] = A[i][k]; + } + } + if (k < nrt) { + // Compute the k-th row transformation and place the + // k-th super-diagonal in e[k]. + // Compute 2-norm without under/overflow. + e[k] = 0; + for (int i = k + 1; i < n; i++) { + e[k] = FastMath.hypot(e[k], e[i]); + } + if (e[k] != 0.0) { + if (e[k + 1] < 0.0) { + e[k] = -e[k]; + } + for (int i = k + 1; i < n; i++) { + e[i] /= e[k]; + } + e[k + 1] += 1.0; + } + e[k] = -e[k]; + if ((k + 1 < m) & (e[k] != 0.0)) { + // Apply the transformation. + for (int i = k + 1; i < m; i++) { + work[i] = 0.0; + } + for (int j = k + 1; j < n; j++) { + for (int i = k + 1; i < m; i++) { + work[i] += e[j] * A[i][j]; + } + } + for (int j = k + 1; j < n; j++) { + double t = -e[j] / e[k + 1]; + for (int i = k + 1; i < m; i++) { + A[i][j] += t * work[i]; + } + } + } + if (wantv) { + // Place the transformation in V for subsequent + // back multiplication. + for (int i = k + 1; i < n; i++) { + V[i][k] = e[i]; + } } - matAAT[j][i] = matAAT[i][j]; } } - int p; - if (m >= n) { - p = n; - // compute eigen decomposition of A^T*A - eigenDecomposition - = new EigenDecompositionImpl(new Array2DRowRealMatrix(matATA), 1); - singularValues = eigenDecomposition.getRealEigenvalues(); - cachedV = eigenDecomposition.getV(); - // compute eigen decomposition of A*A^T - eigenDecomposition - = new EigenDecompositionImpl(new Array2DRowRealMatrix(matAAT), 1); - cachedU = eigenDecomposition.getV().getSubMatrix(0, m - 1, 0, p - 1); - } else { - p = m; - // compute eigen decomposition of A*A^T - eigenDecomposition - = new EigenDecompositionImpl(new Array2DRowRealMatrix(matAAT), 1); - singularValues = eigenDecomposition.getRealEigenvalues(); - cachedU = eigenDecomposition.getV(); - - // compute eigen decomposition of A^T*A - eigenDecomposition - = new EigenDecompositionImpl(new Array2DRowRealMatrix(matATA), 1); - cachedV = eigenDecomposition.getV().getSubMatrix(0, n - 1 , 0, p - 1); + // Set up the final bidiagonal matrix or order p. + int p = FastMath.min(n, m + 1); + if (nct < n) { + singularValues[nct] = A[nct][nct]; + } + if (m < p) { + singularValues[p - 1] = 0.0; + } + if (nrt + 1 < p) { + e[nrt] = A[nrt][p - 1]; + } + e[p - 1] = 0.0; + // If required, generate U. + if (wantu) { + for (int j = nct; j < nu; j++) { + for (int i = 0; i < m; i++) { + U[i][j] = 0.0; + } + U[j][j] = 1.0; + } + for (int k = nct - 1; k >= 0; k--) { + if (singularValues[k] != 0.0) { + for (int j = k + 1; j < nu; j++) { + double t = 0; + for (int i = k; i < m; i++) { + t += U[i][k] * U[i][j]; + } + t = -t / U[k][k]; + for (int i = k; i < m; i++) { + U[i][j] += t * U[i][k]; + } + } + for (int i = k; i < m; i++) { + U[i][k] = -U[i][k]; + } + U[k][k] = 1.0 + U[k][k]; + for (int i = 0; i < k - 1; i++) { + U[i][k] = 0.0; + } + } else { + for (int i = 0; i < m; i++) { + U[i][k] = 0.0; + } + U[k][k] = 1.0; + } + } } - for (int i = 0; i < p; i++) { - singularValues[i] = FastMath.sqrt(FastMath.abs(singularValues[i])); + // If required, generate V. + if (wantv) { + for (int k = n - 1; k >= 0; k--) { + if ((k < nrt) & (e[k] != 0.0)) { + for (int j = k + 1; j < nu; j++) { + double t = 0; + for (int i = k + 1; i < n; i++) { + t += V[i][k] * V[i][j]; + } + t = -t / V[k + 1][k]; + for (int i = k + 1; i < n; i++) { + V[i][j] += t * V[i][k]; + } + } + } + for (int i = 0; i < n; i++) { + V[i][k] = 0.0; + } + V[k][k] = 1.0; + } } - // Up to this point, U and V are computed independently of each other. - // There still a sign indetermination of each column of, say, U. - // The sign is set such that A.V_i=sigma_i.U_i (i<=p) - // The right sign corresponds to a positive dot product of A.V_i and U_i - for (int i = 0; i < p; i++) { - RealVector tmp = cachedU.getColumnVector(i); - double product=matrix.operate(cachedV.getColumnVector(i)).dotProduct(tmp); - if (product < 0) { - cachedU.setColumnVector(i, tmp.mapMultiply(-1)); + // Main iteration loop for the singular values. + int pp = p - 1; + int iter = 0; + while (p > 0) { + int k, kase; + // Here is where a test for too many iterations would go. + // This section of the program inspects for + // negligible elements in the s and e arrays. On + // completion the variables kase and k are set as follows. + // kase = 1 if s(p) and e[k-1] are negligible and k<p + // kase = 2 if s(k) is negligible and k<p + // kase = 3 if e[k-1] is negligible, k<p, and + // s(k), ..., s(p) are not negligible (qr step). + // kase = 4 if e(p-1) is negligible (convergence). + for (k = p - 2; k >= -1; k--) { + if (k == -1) { + break; + } + final double threshold = + TINY + EPS * (FastMath.abs(singularValues[k]) + FastMath.abs(singularValues[k + 1])); + if (FastMath.abs(e[k]) <= threshold) { + e[k] = 0.0; + break; + } + } + if (k == p - 2) { + kase = 4; + } else { + int ks; + for (ks = p - 1; ks >= k; ks--) { + if (ks == k) { + break; + } + double t = (ks != p ? FastMath.abs(e[ks]) : 0.0) + + (ks != k + 1 ? FastMath.abs(e[ks - 1]) : 0.0); + if (FastMath.abs(singularValues[ks]) <= TINY + EPS * t) { + singularValues[ks] = 0.0; + break; + } + } + if (ks == k) { + kase = 3; + } else if (ks == p - 1) { + kase = 1; + } else { + kase = 2; + k = ks; + } + } + k++; + // Perform the task indicated by kase. + switch (kase) { + // Deflate negligible s(p). + case 1: { + double f = e[p - 2]; + e[p - 2] = 0.0; + for (int j = p - 2; j >= k; j--) { + double t = FastMath.hypot(singularValues[j], f); + double cs = singularValues[j] / t; + double sn = f / t; + singularValues[j] = t; + if (j != k) { + f = -sn * e[j - 1]; + e[j - 1] = cs * e[j - 1]; + } + if (wantv) { + for (int i = 0; i < n; i++) { + t = cs * V[i][j] + sn * V[i][p - 1]; + V[i][p - 1] = -sn * V[i][j] + cs * V[i][p - 1]; + V[i][j] = t; + } + } + } + } + break; + // Split at negligible s(k). + case 2: { + double f = e[k - 1]; + e[k - 1] = 0.0; + for (int j = k; j < p; j++) { + double t = FastMath.hypot(singularValues[j], f); + double cs = singularValues[j] / t; + double sn = f / t; + singularValues[j] = t; + f = -sn * e[j]; + e[j] = cs * e[j]; + if (wantu) { + for (int i = 0; i < m; i++) { + t = cs * U[i][j] + sn * U[i][k - 1]; + U[i][k - 1] = -sn * U[i][j] + cs * U[i][k - 1]; + U[i][j] = t; + } + } + } + } + break; + // Perform one qr step. + case 3: { + // Calculate the shift. + double scale = FastMath.max(FastMath.max(FastMath.max(FastMath.max( + FastMath.abs(singularValues[p - 1]), FastMath.abs(singularValues[p - 2])), FastMath.abs(e[p - 2])), + FastMath.abs(singularValues[k])), FastMath.abs(e[k])); + double sp = singularValues[p - 1] / scale; + double spm1 = singularValues[p - 2] / scale; + double epm1 = e[p - 2] / scale; + double sk = singularValues[k] / scale; + double ek = e[k] / scale; + double b = ((spm1 + sp) * (spm1 - sp) + epm1 * epm1) / 2.0; + double c = (sp * epm1) * (sp * epm1); + double shift = 0.0; + if ((b != 0.0) | (c != 0.0)) { + shift = FastMath.sqrt(b * b + c); + if (b < 0.0) { + shift = -shift; + } + shift = c / (b + shift); + } + double f = (sk + sp) * (sk - sp) + shift; + double g = sk * ek; + // Chase zeros. + for (int j = k; j < p - 1; j++) { + double t = FastMath.hypot(f, g); + double cs = f / t; + double sn = g / t; + if (j != k) { + e[j - 1] = t; + } + f = cs * singularValues[j] + sn * e[j]; + e[j] = cs * e[j] - sn * singularValues[j]; + g = sn * singularValues[j + 1]; + singularValues[j + 1] = cs * singularValues[j + 1]; + if (wantv) { + for (int i = 0; i < n; i++) { + t = cs * V[i][j] + sn * V[i][j + 1]; + V[i][j + 1] = -sn * V[i][j] + cs * V[i][j + 1]; + V[i][j] = t; + } + } + t = FastMath.hypot(f, g); + cs = f / t; + sn = g / t; + singularValues[j] = t; + f = cs * e[j] + sn * singularValues[j + 1]; + singularValues[j + 1] = -sn * e[j] + cs * singularValues[j + 1]; + g = sn * e[j + 1]; + e[j + 1] = cs * e[j + 1]; + if (wantu && (j < m - 1)) { + for (int i = 0; i < m; i++) { + t = cs * U[i][j] + sn * U[i][j + 1]; + U[i][j + 1] = -sn * U[i][j] + cs * U[i][j + 1]; + U[i][j] = t; + } + } + } + e[p - 2] = f; + iter = iter + 1; + } + break; + // Convergence. + default: { + // Make the singular values positive. + if (singularValues[k] <= 0.0) { + singularValues[k] = singularValues[k] < 0.0 ? -singularValues[k] : 0.0; + if (wantv) { + for (int i = 0; i <= pp; i++) { + V[i][k] = -V[i][k]; + } + } + } + // Order the singular values. + while (k < pp) { + if (singularValues[k] >= singularValues[k + 1]) { + break; + } + double t = singularValues[k]; + singularValues[k] = singularValues[k + 1]; + singularValues[k + 1] = t; + if (wantv && (k < n - 1)) { + for (int i = 0; i < n; i++) { + t = V[i][k + 1]; + V[i][k + 1] = V[i][k]; + V[i][k] = t; + } + } + if (wantu && (k < m - 1)) { + for (int i = 0; i < m; i++) { + t = U[i][k + 1]; + U[i][k + 1] = U[i][k]; + U[i][k] = t; + } + } + k++; + } + iter = 0; + p--; + } + break; } } + + if (!transposed) { + cachedU = MatrixUtils.createRealMatrix(U); + cachedV = MatrixUtils.createRealMatrix(V); + } else { + cachedU = MatrixUtils.createRealMatrix(V); + cachedV = MatrixUtils.createRealMatrix(U); + + } } /** {@inheritDoc} */ @@ -193,7 +524,7 @@ public class SingularValueDecompositionImpl implements SingularValueDecompositio if (dimension == 0) { throw new NumberIsTooLargeException(LocalizedFormats.TOO_LARGE_CUTOFF_SINGULAR_VALUE, - minSingularValue, singularValues[0], true); + minSingularValue, singularValues[0], true); } final double[][] data = new double[dimension][p]; @@ -217,19 +548,19 @@ public class SingularValueDecompositionImpl implements SingularValueDecompositio /** {@inheritDoc} */ public double getConditionNumber() { - return singularValues[0] / singularValues[singularValues.length - 1]; + return singularValues[0] / singularValues[FastMath.min(m, n) - 1]; } /** {@inheritDoc} */ public int getRank() { - final double threshold = FastMath.max(m, n) * FastMath.ulp(singularValues[0]); - - for (int i = singularValues.length - 1; i >= 0; --i) { - if (singularValues[i] > threshold) { - return i + 1; + double tol = FastMath.max(m, n) * singularValues[0] * EPS; + int r = 0; + for (int i = 0; i < singularValues.length; i++) { + if (singularValues[i] > tol) { + r++; } } - return 0; + return r; } /** {@inheritDoc} */ @@ -253,14 +584,14 @@ public class SingularValueDecompositionImpl implements SingularValueDecompositio * @param nonSingular Singularity indicator. */ private Solver(final double[] singularValues, final RealMatrix uT, - final RealMatrix v, final boolean nonSingular) { + final RealMatrix v, final boolean nonSingular) { double[][] suT = uT.getData(); for (int i = 0; i < singularValues.length; ++i) { final double a; if (singularValues[i] > 0) { - a = 1 / singularValues[i]; + a = 1 / singularValues[i]; } else { - a = 0; + a = 0; } final double[] suTi = suT[i]; for (int j = 0; j < suTi.length; ++j) {
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-327_262fe4c0.diff
bugs-dot-jar_data_MATH-713_f656676e
--- BugID: MATH-713 Summary: Negative value with restrictNonNegative Description: |- Problem: commons-math-2.2 SimplexSolver. A variable with 0 coefficient may be assigned a negative value nevertheless restrictToNonnegative flag in call: SimplexSolver.optimize(function, constraints, GoalType.MINIMIZE, true); Function 1 * x + 1 * y + 0 Constraints: 1 * x + 0 * y = 1 Result: x = 1; y = -1; Probably variables with 0 coefficients are omitted at some point of computation and because of that the restrictions do not affect their values. diff --git a/src/main/java/org/apache/commons/math/optimization/linear/SimplexTableau.java b/src/main/java/org/apache/commons/math/optimization/linear/SimplexTableau.java index fd89432..d96c916 100644 --- a/src/main/java/org/apache/commons/math/optimization/linear/SimplexTableau.java +++ b/src/main/java/org/apache/commons/math/optimization/linear/SimplexTableau.java @@ -407,7 +407,12 @@ class SimplexTableau implements Serializable { continue; } Integer basicRow = getBasicRow(colIndex); - if (basicRows.contains(basicRow)) { + if (basicRow != null && basicRow == 0) { + // if the basic row is found to be the objective function row + // set the coefficient to 0 -> this case handles unconstrained + // variables that are still part of the objective function + coefficients[i] = 0; + } else if (basicRows.contains(basicRow)) { // if multiple variables can take a given value // then we choose the first and set the rest equal to 0 coefficients[i] = 0 - (restrictToNonNegative ? 0 : mostNegative);
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-713_f656676e.diff
bugs-dot-jar_data_MATH-757_76b7413d
--- BugID: MATH-757 Summary: ResizableDoubleArray is not thread-safe yet has some synch. methods Description: | ResizableDoubleArray has several synchronised methods, but is not thread-safe, because class variables are not always accessed using the lock. Is the class supposed to be thread-safe? If so, all accesses (read and write) need to be synch. If not, the synch. qualifiers could be dropped. In any case, the protected fields need to be made private. diff --git a/src/main/java/org/apache/commons/math4/util/ResizableDoubleArray.java b/src/main/java/org/apache/commons/math4/util/ResizableDoubleArray.java index b0dd132..09fd748 100644 --- a/src/main/java/org/apache/commons/math4/util/ResizableDoubleArray.java +++ b/src/main/java/org/apache/commons/math4/util/ResizableDoubleArray.java @@ -21,21 +21,15 @@ import java.util.Arrays; import org.apache.commons.math4.exception.MathIllegalArgumentException; import org.apache.commons.math4.exception.MathIllegalStateException; -import org.apache.commons.math4.exception.MathInternalError; import org.apache.commons.math4.exception.NotStrictlyPositiveException; import org.apache.commons.math4.exception.NullArgumentException; import org.apache.commons.math4.exception.NumberIsTooSmallException; import org.apache.commons.math4.exception.util.LocalizedFormats; /** - * <p> * A variable length {@link DoubleArray} implementation that automatically * handles expanding and contracting its internal storage array as elements * are added and removed. - * </p> - * <h3>Important note: Usage should not assume that this class is thread-safe - * even though some of the methods are {@code synchronized}. - * This qualifier will be dropped in the next major release (4.0).</h3> * <p> * The internal storage array starts with capacity determined by the * {@code initialCapacity} property, which can be set by the constructor. @@ -51,7 +45,6 @@ import org.apache.commons.math4.exception.util.LocalizedFormats; * locations added). * The default {@code expansionMode} is {@code MULTIPLICATIVE} and the default * {@code expansionFactor} is 2. - * </p> * <p> * The {@link #addElementRolling(double)} method adds a new element to the end * of the internal storage array and adjusts the "usable window" of the @@ -71,26 +64,16 @@ import org.apache.commons.math4.exception.util.LocalizedFormats; * {@code contractionFactor.} If the {@code expansionMode} * is {@code ADDITIVE}, the number of excess storage locations * is compared to {@code contractionFactor}. - * </p> * <p> * To avoid cycles of expansions and contractions, the * {@code expansionFactor} must not exceed the {@code contractionFactor}. * Constructors and mutators for both of these properties enforce this * requirement, throwing a {@code MathIllegalArgumentException} if it is * violated. - * </p> + * <p> + * <b>Note:</b> this class is <b>NOT</b> thread-safe. */ public class ResizableDoubleArray implements DoubleArray, Serializable { - /** Additive expansion mode. - * @deprecated As of 3.1. Please use {@link ExpansionMode#ADDITIVE} instead. - */ - @Deprecated - public static final int ADDITIVE_MODE = 1; - /** Multiplicative expansion mode. - * @deprecated As of 3.1. Please use {@link ExpansionMode#MULTIPLICATIVE} instead. - */ - @Deprecated - public static final int MULTIPLICATIVE_MODE = 0; /** Serializable version identifier. */ private static final long serialVersionUID = -3485529955529426875L; @@ -98,6 +81,8 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { private static final int DEFAULT_INITIAL_CAPACITY = 16; /** Default value for array size modifier. */ private static final double DEFAULT_EXPANSION_FACTOR = 2.0; + /** Default value for expansion mode. */ + private static final ExpansionMode DEFAULT_EXPANSION_MODE = ExpansionMode.MULTIPLICATIVE; /** * Default value for the difference between {@link #contractionCriterion} * and {@link #expansionFactor}. @@ -109,23 +94,22 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { * contracted to fit the number of elements contained in the element * array + 1. */ - private double contractionCriterion = 2.5; + private final double contractionCriterion; /** * The expansion factor of the array. When the array needs to be expanded, - * the new array size will be - * {@code internalArray.length * expansionFactor} - * if {@code expansionMode} is set to MULTIPLICATIVE_MODE, or + * the new array size will be {@code internalArray.length * expansionFactor} + * if {@code expansionMode} is set to MULTIPLICATIVE, or * {@code internalArray.length + expansionFactor} if - * {@code expansionMode} is set to ADDITIVE_MODE. + * {@code expansionMode} is set to ADDITIVE. */ - private double expansionFactor = 2.0; + private final double expansionFactor; /** * Determines whether array expansion by {@code expansionFactor} * is additive or multiplicative. */ - private ExpansionMode expansionMode = ExpansionMode.MULTIPLICATIVE; + private final ExpansionMode expansionMode; /** * The internal storage array. @@ -171,6 +155,7 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { /** * Creates an instance with the specified initial capacity. + * <p> * Other properties take default values: * <ul> * <li>{@code expansionMode = MULTIPLICATIVE}</li> @@ -180,8 +165,7 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { * @param initialCapacity Initial size of the internal storage array. * @throws MathIllegalArgumentException if {@code initialCapacity <= 0}. */ - public ResizableDoubleArray(int initialCapacity) - throws MathIllegalArgumentException { + public ResizableDoubleArray(int initialCapacity) throws MathIllegalArgumentException { this(initialCapacity, DEFAULT_EXPANSION_FACTOR); } @@ -189,6 +173,7 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { * Creates an instance from an existing {@code double[]} with the * initial capacity and numElements corresponding to the size of * the supplied {@code double[]} array. + * <p> * If the supplied array is null, a new empty array with the default * initial capacity will be created. * The input array is copied, not referenced. @@ -207,177 +192,66 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { this(DEFAULT_INITIAL_CAPACITY, DEFAULT_EXPANSION_FACTOR, DEFAULT_CONTRACTION_DELTA + DEFAULT_EXPANSION_FACTOR, - ExpansionMode.MULTIPLICATIVE, + DEFAULT_EXPANSION_MODE, initialArray); } /** * Creates an instance with the specified initial capacity * and expansion factor. + * <p> * The remaining properties take default values: * <ul> * <li>{@code expansionMode = MULTIPLICATIVE}</li> * <li>{@code contractionCriterion = 0.5 + expansionFactor}</li> * </ul> - * <br/> - * Throws IllegalArgumentException if the following conditions are - * not met: - * <ul> - * <li>{@code initialCapacity > 0}</li> - * <li>{@code expansionFactor > 1}</li> - * </ul> - * - * @param initialCapacity Initial size of the internal storage array. - * @param expansionFactor The array will be expanded based on this - * parameter. - * @throws MathIllegalArgumentException if parameters are not valid. - * @deprecated As of 3.1. Please use - * {@link #ResizableDoubleArray(int,double)} instead. - */ - @Deprecated - public ResizableDoubleArray(int initialCapacity, - float expansionFactor) - throws MathIllegalArgumentException { - this(initialCapacity, - (double) expansionFactor); - } - - /** - * Creates an instance with the specified initial capacity - * and expansion factor. - * The remaining properties take default values: - * <ul> - * <li>{@code expansionMode = MULTIPLICATIVE}</li> - * <li>{@code contractionCriterion = 0.5 + expansionFactor}</li> - * </ul> - * <br/> - * Throws IllegalArgumentException if the following conditions are - * not met: + * <p> + * Throws MathIllegalArgumentException if the following conditions + * are not met: * <ul> * <li>{@code initialCapacity > 0}</li> * <li>{@code expansionFactor > 1}</li> * </ul> * * @param initialCapacity Initial size of the internal storage array. - * @param expansionFactor The array will be expanded based on this - * parameter. + * @param expansionFactor The array will be expanded based on this parameter. * @throws MathIllegalArgumentException if parameters are not valid. * @since 3.1 */ - public ResizableDoubleArray(int initialCapacity, - double expansionFactor) - throws MathIllegalArgumentException { - this(initialCapacity, - expansionFactor, - DEFAULT_CONTRACTION_DELTA + expansionFactor); - } - - /** - * Creates an instance with the specified initialCapacity, - * expansionFactor, and contractionCriterion. - * The expansion mode will default to {@code MULTIPLICATIVE}. - * <br/> - * Throws IllegalArgumentException if the following conditions are - * not met: - * <ul> - * <li>{@code initialCapacity > 0}</li> - * <li>{@code expansionFactor > 1}</li> - * <li>{@code contractionCriterion >= expansionFactor}</li> - * </ul> - * - * @param initialCapacity Initial size of the internal storage array.. - * @param expansionFactor The array will be expanded based on this - * parameter. - * @param contractionCriteria Contraction criteria. - * @throws MathIllegalArgumentException if parameters are not valid. - * @deprecated As of 3.1. Please use - * {@link #ResizableDoubleArray(int,double,double)} instead. - */ - @Deprecated - public ResizableDoubleArray(int initialCapacity, - float expansionFactor, - float contractionCriteria) - throws MathIllegalArgumentException { - this(initialCapacity, - (double) expansionFactor, - (double) contractionCriteria); + public ResizableDoubleArray(int initialCapacity, double expansionFactor) throws MathIllegalArgumentException { + this(initialCapacity, expansionFactor, DEFAULT_CONTRACTION_DELTA + expansionFactor); } /** * Creates an instance with the specified initial capacity, * expansion factor, and contraction criteria. + * <p> * The expansion mode will default to {@code MULTIPLICATIVE}. - * <br/> - * Throws IllegalArgumentException if the following conditions are - * not met: + * <p> + * Throws MathIllegalArgumentException if the following conditions + * are not met: * <ul> * <li>{@code initialCapacity > 0}</li> * <li>{@code expansionFactor > 1}</li> * <li>{@code contractionCriterion >= expansionFactor}</li> * </ul> * - * @param initialCapacity Initial size of the internal storage array.. - * @param expansionFactor The array will be expanded based on this - * parameter. + * @param initialCapacity Initial size of the internal storage array. + * @param expansionFactor The array will be expanded based on this parameter. * @param contractionCriterion Contraction criterion. * @throws MathIllegalArgumentException if the parameters are not valid. * @since 3.1 */ - public ResizableDoubleArray(int initialCapacity, - double expansionFactor, - double contractionCriterion) + public ResizableDoubleArray(int initialCapacity, double expansionFactor, double contractionCriterion) throws MathIllegalArgumentException { - this(initialCapacity, - expansionFactor, - contractionCriterion, - ExpansionMode.MULTIPLICATIVE, - null); - } - - /** - * <p> - * Create a ResizableArray with the specified properties.</p> - * <p> - * Throws IllegalArgumentException if the following conditions are - * not met: - * <ul> - * <li><code>initialCapacity > 0</code></li> - * <li><code>expansionFactor > 1</code></li> - * <li><code>contractionFactor >= expansionFactor</code></li> - * <li><code>expansionMode in {MULTIPLICATIVE_MODE, ADDITIVE_MODE}</code> - * </li> - * </ul></p> - * - * @param initialCapacity the initial size of the internal storage array - * @param expansionFactor the array will be expanded based on this - * parameter - * @param contractionCriteria the contraction Criteria - * @param expansionMode the expansion mode - * @throws MathIllegalArgumentException if parameters are not valid - * @deprecated As of 3.1. Please use - * {@link #ResizableDoubleArray(int,double,double,ExpansionMode,double[])} - * instead. - */ - @Deprecated - public ResizableDoubleArray(int initialCapacity, float expansionFactor, - float contractionCriteria, int expansionMode) throws MathIllegalArgumentException { - this(initialCapacity, - expansionFactor, - contractionCriteria, - expansionMode == ADDITIVE_MODE ? - ExpansionMode.ADDITIVE : - ExpansionMode.MULTIPLICATIVE, - null); - // XXX Just ot retain the expected failure in a unit test. - // With the new "enum", that test will become obsolete. - setExpansionMode(expansionMode); + this(initialCapacity, expansionFactor, contractionCriterion, DEFAULT_EXPANSION_MODE, null); } /** * Creates an instance with the specified properties. * <br/> - * Throws MathIllegalArgumentException if the following conditions are - * not met: + * Throws MathIllegalArgumentException if the following conditions + * are not met: * <ul> * <li>{@code initialCapacity > 0}</li> * <li>{@code expansionFactor > 1}</li> @@ -385,12 +259,12 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { * </ul> * * @param initialCapacity Initial size of the internal storage array. - * @param expansionFactor The array will be expanded based on this - * parameter. + * @param expansionFactor The array will be expanded based on this parameter. * @param contractionCriterion Contraction criteria. * @param expansionMode Expansion mode. * @param data Initial contents of the array. * @throws MathIllegalArgumentException if the parameters are not valid. + * @throws NullArgumentException if expansionMode is null */ public ResizableDoubleArray(int initialCapacity, double expansionFactor, @@ -403,6 +277,7 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { initialCapacity); } checkContractExpand(contractionCriterion, expansionFactor); + MathUtils.checkNotNull(expansionMode); this.expansionFactor = expansionFactor; this.contractionCriterion = contractionCriterion; @@ -417,19 +292,25 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { } /** - * Copy constructor. Creates a new ResizableDoubleArray that is a deep, - * fresh copy of the original. Needs to acquire synchronization lock - * on original. Original may not be null; otherwise a {@link NullArgumentException} - * is thrown. + * Copy constructor. + * <p> + * Creates a new ResizableDoubleArray that is a deep, fresh copy of the original. + * Original may not be null; otherwise a {@link NullArgumentException} is thrown. * * @param original array to copy * @exception NullArgumentException if original is null * @since 2.0 */ - public ResizableDoubleArray(ResizableDoubleArray original) + public ResizableDoubleArray(final ResizableDoubleArray original) throws NullArgumentException { MathUtils.checkNotNull(original); - copy(original, this); + this.contractionCriterion = original.contractionCriterion; + this.expansionFactor = original.expansionFactor; + this.expansionMode = original.expansionMode; + this.internalArray = new double[original.internalArray.length]; + System.arraycopy(original.internalArray, 0, this.internalArray, 0, this.internalArray.length); + this.numElements = original.numElements; + this.startIndex = original.startIndex; } /** @@ -438,7 +319,7 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { * @param value Value to be added to end of array. */ @Override - public synchronized void addElement(double value) { + public void addElement(final double value) { if (internalArray.length <= startIndex + numElements) { expand(); } @@ -452,7 +333,7 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { * @since 2.2 */ @Override - public synchronized void addElements(double[] values) { + public void addElements(final double[] values) { final double[] tempArray = new double[numElements + values.length + 1]; System.arraycopy(internalArray, startIndex, tempArray, 0, numElements); System.arraycopy(values, 0, tempArray, numElements, values.length); @@ -462,23 +343,21 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { } /** - * <p> * Adds an element to the end of the array and removes the first * element in the array. Returns the discarded first element. + * <p> * The effect is similar to a push operation in a FIFO queue. - * </p> * <p> * Example: If the array contains the elements 1, 2, 3, 4 (in that order) * and addElementRolling(5) is invoked, the result is an array containing * the entries 2, 3, 4, 5 and the value returned is 1. - * </p> * * @param value Value to be added to the array. * @return the value which has been discarded or "pushed" out of the array * by this rolling insert. */ @Override - public synchronized double addElementRolling(double value) { + public double addElementRolling(double value) { double discarded = internalArray[startIndex]; if ((startIndex + (numElements + 1)) > internalArray.length) { @@ -498,20 +377,19 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { } /** - * Substitutes <code>value</code> for the most recently added value. + * Substitutes {@code value} for the most recently added value. + * <p> * Returns the value that has been replaced. If the array is empty (i.e. - * if {@link #numElements} is zero), an IllegalStateException is thrown. + * if {@link #numElements} is zero), an MathIllegalStateException is thrown. * * @param value New value to substitute for the most recently added value * @return the value that has been replaced in the array. * @throws MathIllegalStateException if the array is empty * @since 2.0 */ - public synchronized double substituteMostRecentElement(double value) - throws MathIllegalStateException { + public double substituteMostRecentElement(double value) throws MathIllegalStateException { if (numElements < 1) { - throw new MathIllegalStateException( - LocalizedFormats.CANNOT_SUBSTITUTE_ELEMENT_FROM_EMPTY_ARRAY); + throw new MathIllegalStateException(LocalizedFormats.CANNOT_SUBSTITUTE_ELEMENT_FROM_EMPTY_ARRAY); } final int substIndex = startIndex + (numElements - 1); @@ -523,25 +401,6 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { } /** - * Checks the expansion factor and the contraction criterion and throws an - * IllegalArgumentException if the contractionCriteria is less than the - * expansionCriteria - * - * @param expansion factor to be checked - * @param contraction criteria to be checked - * @throws MathIllegalArgumentException if the contractionCriteria is less than - * the expansionCriteria. - * @deprecated As of 3.1. Please use - * {@link #checkContractExpand(double,double)} instead. - */ - @Deprecated - protected void checkContractExpand(float contraction, float expansion) - throws MathIllegalArgumentException { - checkContractExpand((double) contraction, - (double) expansion); - } - - /** * Checks the expansion factor and the contraction criterion and raises * an exception if the contraction criterion is smaller than the * expansion criterion. @@ -553,9 +412,7 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { * @throws NumberIsTooSmallException if {@code expansion <= 1 }. * @since 3.1 */ - protected void checkContractExpand(double contraction, - double expansion) - throws NumberIsTooSmallException { + protected void checkContractExpand(double contraction, double expansion) throws NumberIsTooSmallException { if (contraction < expansion) { final NumberIsTooSmallException e = new NumberIsTooSmallException(contraction, 1, true); e.getContext().addMessage(LocalizedFormats.CONTRACTION_CRITERIA_SMALLER_THAN_EXPANSION_FACTOR, @@ -582,17 +439,16 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { * Clear the array contents, resetting the number of elements to zero. */ @Override - public synchronized void clear() { + public void clear() { numElements = 0; startIndex = 0; } /** - * Contracts the storage array to the (size of the element set) + 1 - to - * avoid a zero length array. This function also resets the startIndex to - * zero. + * Contracts the storage array to the (size of the element set) + 1 - to avoid + * a zero length array. This function also resets the startIndex to zero. */ - public synchronized void contract() { + public void contract() { final double[] tempArray = new double[numElements + 1]; // Copy and swap - copy only the element array from the src array. @@ -604,48 +460,45 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { } /** - * Discards the <code>i</code> initial elements of the array. For example, - * if the array contains the elements 1,2,3,4, invoking - * <code>discardFrontElements(2)</code> will cause the first two elements - * to be discarded, leaving 3,4 in the array. Throws illegalArgumentException - * if i exceeds numElements. + * Discards the {@code i} initial elements of the array. + * <p> + * For example, if the array contains the elements 1,2,3,4, invoking + * {@code discardFrontElements(2)} will cause the first two elements + * to be discarded, leaving 3,4 in the array. * * @param i the number of elements to discard from the front of the array * @throws MathIllegalArgumentException if i is greater than numElements. * @since 2.0 */ - public synchronized void discardFrontElements(int i) - throws MathIllegalArgumentException { + public void discardFrontElements(int i) throws MathIllegalArgumentException { discardExtremeElements(i,true); } /** - * Discards the <code>i</code> last elements of the array. For example, - * if the array contains the elements 1,2,3,4, invoking - * <code>discardMostRecentElements(2)</code> will cause the last two elements - * to be discarded, leaving 1,2 in the array. Throws illegalArgumentException - * if i exceeds numElements. + * Discards the {@code i} last elements of the array. + * <p> + * For example, if the array contains the elements 1,2,3,4, invoking + * {@code discardMostRecentElements(2)} will cause the last two elements + * to be discarded, leaving 1,2 in the array. * * @param i the number of elements to discard from the end of the array * @throws MathIllegalArgumentException if i is greater than numElements. * @since 2.0 */ - public synchronized void discardMostRecentElements(int i) - throws MathIllegalArgumentException { + public void discardMostRecentElements(int i) throws MathIllegalArgumentException { discardExtremeElements(i,false); } /** - * Discards the <code>i</code> first or last elements of the array, - * depending on the value of <code>front</code>. + * Discards the {@code i} first or last elements of the array, + * depending on the value of {@code front}. + * <p> * For example, if the array contains the elements 1,2,3,4, invoking - * <code>discardExtremeElements(2,false)</code> will cause the last two elements + * {@code discardExtremeElements(2,false)} will cause the last two elements * to be discarded, leaving 1,2 in the array. * For example, if the array contains the elements 1,2,3,4, invoking - * <code>discardExtremeElements(2,true)</code> will cause the first two elements + * {@code discardExtremeElements(2,true)} will cause the first two elements * to be discarded, leaving 3,4 in the array. - * Throws illegalArgumentException - * if i exceeds numElements. * * @param i the number of elements to discard from the front/end of the array * @param front true if elements are to be discarded from the front @@ -654,9 +507,7 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { * @throws MathIllegalArgumentException if i is greater than numElements. * @since 2.0 */ - private synchronized void discardExtremeElements(int i, - boolean front) - throws MathIllegalArgumentException { + private void discardExtremeElements(int i, boolean front) throws MathIllegalArgumentException { if (i > numElements) { throw new MathIllegalArgumentException( LocalizedFormats.TOO_MANY_ELEMENTS_TO_DISCARD_FROM_ARRAY, @@ -680,13 +531,12 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { /** * Expands the internal storage array using the expansion factor. * <p> - * if <code>expansionMode</code> is set to MULTIPLICATIVE_MODE, - * the new array size will be <code>internalArray.length * expansionFactor.</code> - * If <code>expansionMode</code> is set to ADDITIVE_MODE, the length - * after expansion will be <code>internalArray.length + expansionFactor</code> - * </p> + * If {@code expansionMode} is set to MULTIPLICATIVE, + * the new array size will be {@code internalArray.length * expansionFactor}. + * If {@code expansionMode} is set to ADDITIVE, the length + * after expansion will be {@code internalArray.length + expansionFactor}. */ - protected synchronized void expand() { + protected void expand() { // notice the use of FastMath.ceil(), this guarantees that we will always // have an array of at least currentSize + 1. Assume that the // current initial capacity is 1 and the expansion factor @@ -710,7 +560,7 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { * * @param size Size of the new internal storage array. */ - private synchronized void expandTo(int size) { + private void expandTo(int size) { final double[] tempArray = new double[size]; // Copy and swap System.arraycopy(internalArray, 0, tempArray, 0, internalArray.length); @@ -718,33 +568,14 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { } /** - * The contraction criteria defines when the internal array will contract - * to store only the number of elements in the element array. - * If the <code>expansionMode</code> is <code>MULTIPLICATIVE_MODE</code>, - * contraction is triggered when the ratio between storage array length - * and <code>numElements</code> exceeds <code>contractionFactor</code>. - * If the <code>expansionMode</code> is <code>ADDITIVE_MODE</code>, the - * number of excess storage locations is compared to - * <code>contractionFactor.</code> - * - * @return the contraction criteria used to reclaim memory. - * @deprecated As of 3.1. Please use {@link #getContractionCriterion()} - * instead. - */ - @Deprecated - public float getContractionCriteria() { - return (float) getContractionCriterion(); - } - - /** * The contraction criterion defines when the internal array will contract * to store only the number of elements in the element array. - * If the <code>expansionMode</code> is <code>MULTIPLICATIVE_MODE</code>, + * <p> + * If the {@code expansionMode} is {@code MULTIPLICATIVE}, * contraction is triggered when the ratio between storage array length - * and <code>numElements</code> exceeds <code>contractionFactor</code>. - * If the <code>expansionMode</code> is <code>ADDITIVE_MODE</code>, the - * number of excess storage locations is compared to - * <code>contractionFactor.</code> + * and {@code numElements} exceeds {@code contractionFactor}. + * If the {@code expansionMode} is {@code ADDITIVE}, the + * number of excess storage locations is compared to {@code contractionFactor}. * * @return the contraction criterion used to reclaim memory. * @since 3.1 @@ -754,15 +585,15 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { } /** - * Returns the element at the specified index + * Returns the element at the specified index. * * @param index index to fetch a value from * @return value stored at the specified index - * @throws ArrayIndexOutOfBoundsException if <code>index</code> is less than - * zero or is greater than <code>getNumElements() - 1</code>. + * @throws ArrayIndexOutOfBoundsException if {@code index} is less than + * zero or is greater than {@code getNumElements() - 1}. */ @Override - public synchronized double getElement(int index) { + public double getElement(int index) { if (index >= numElements) { throw new ArrayIndexOutOfBoundsException(index); } else if (index >= 0) { @@ -773,14 +604,15 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { } /** - * Returns a double array containing the elements of this - * <code>ResizableArray</code>. This method returns a copy, not a - * reference to the underlying array, so that changes made to the returned - * array have no effect on this <code>ResizableArray.</code> + * Returns a double array containing the elements of this ResizableArray. + * <p> + * This method returns a copy, not a reference to the underlying array, + * so that changes made to the returned array have no effect on this ResizableArray. + * * @return the double array. */ @Override - public synchronized double[] getElements() { + public double[] getElements() { final double[] elementArray = new double[numElements]; System.arraycopy(internalArray, startIndex, elementArray, 0, numElements); return elementArray; @@ -788,20 +620,18 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { /** * The expansion factor controls the size of a new array when an array - * needs to be expanded. The <code>expansionMode</code> - * determines whether the size of the array is multiplied by the - * <code>expansionFactor</code> (MULTIPLICATIVE_MODE) or if - * the expansion is additive (ADDITIVE_MODE -- <code>expansionFactor</code> - * storage locations added). The default <code>expansionMode</code> is - * MULTIPLICATIVE_MODE and the default <code>expansionFactor</code> - * is 2.0. + * needs to be expanded. + * <p> + * The {@code expansionMode} determines whether the size of the array + * is multiplied by the {@code expansionFactor} (MULTIPLICATIVE) or if + * the expansion is additive (ADDITIVE -- {@code expansionFactor} + * storage locations added). The default {@code expansionMode} is + * MULTIPLICATIVE and the default {@code expansionFactor} is 2.0. * * @return the expansion factor of this expandable double array - * @deprecated As of 3.1. Return type will be changed to "double" in 4.0. */ - @Deprecated - public float getExpansionFactor() { - return (float) expansionFactor; + public double getExpansionFactor() { + return expansionFactor; } /** @@ -809,33 +639,9 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { * array grows additively or multiplicatively when it is expanded. * * @return the expansion mode. - * @deprecated As of 3.1. Return value to be changed to - * {@link ExpansionMode} in 4.0. - */ - @Deprecated - public int getExpansionMode() { - switch (expansionMode) { - case MULTIPLICATIVE: - return MULTIPLICATIVE_MODE; - case ADDITIVE: - return ADDITIVE_MODE; - default: - throw new MathInternalError(); // Should never happen. - } - } - - /** - * Notice the package scope on this method. This method is simply here - * for the JUnit test, it allows us check if the expansion is working - * properly after a number of expansions. This is not meant to be a part - * of the public interface of this class. - * - * @return the length of the internal storage array. - * @deprecated As of 3.1. Please use {@link #getCapacity()} instead. */ - @Deprecated - synchronized int getInternalLength() { - return internalArray.length; + public ExpansionMode getExpansionMode() { + return expansionMode; } /** @@ -858,41 +664,23 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { * @return the number of elements. */ @Override - public synchronized int getNumElements() { + public int getNumElements() { return numElements; } /** - * Returns the internal storage array. Note that this method returns - * a reference to the internal storage array, not a copy, and to correctly - * address elements of the array, the <code>startIndex</code> is - * required (available via the {@link #start} method). This method should - * only be used in cases where copying the internal array is not practical. - * The {@link #getElements} method should be used in all other cases. - * - * - * @return the internal storage array used by this object - * @since 2.0 - * @deprecated As of 3.1. - */ - @Deprecated - public synchronized double[] getInternalValues() { - return internalArray; - } - - /** * Provides <em>direct</em> access to the internal storage array. * Please note that this method returns a reference to this object's * storage array, not a copy. - * <br/> + * <p> * To correctly address elements of the array, the "start index" is * required (available via the {@link #getStartIndex() getStartIndex} * method. - * <br/> + * <p> * This method should only be used to avoid copying the internal array. * The returned value <em>must</em> be used for reading only; other * uses could lead to this object becoming inconsistent. - * <br/> + * <p> * The {@link #getElements} method has no such limitation since it * returns a copy of this array's addressable elements. * @@ -907,6 +695,7 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { * Returns the "start index" of the internal array. * This index is the position of the first addressable element in the * internal storage array. + * <p> * The addressable elements in the array are at indices contained in * the interval [{@link #getStartIndex()}, * {@link #getStartIndex()} + {@link #getNumElements()} - 1]. @@ -919,23 +708,6 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { } /** - * Sets the contraction criteria. - * - * @param contractionCriteria contraction criteria - * @throws MathIllegalArgumentException if the contractionCriteria is less than - * the expansionCriteria. - * @deprecated As of 3.1 (to be removed in 4.0 as field will become "final"). - */ - @Deprecated - public void setContractionCriteria(float contractionCriteria) - throws MathIllegalArgumentException { - checkContractExpand(contractionCriteria, getExpansionFactor()); - synchronized(this) { - this.contractionCriterion = contractionCriteria; - } - } - - /** * Performs an operation on the addressable elements of the array. * * @param f Function to be applied on this array. @@ -943,30 +715,24 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { * @since 3.1 */ public double compute(MathArrays.Function f) { - final double[] array; - final int start; - final int num; - synchronized(this) { - array = internalArray; - start = startIndex; - num = numElements; - } - return f.evaluate(array, start, num); + return f.evaluate(internalArray, startIndex, numElements); } /** - * Sets the element at the specified index. If the specified index is greater than - * <code>getNumElements() - 1</code>, the <code>numElements</code> property - * is increased to <code>index +1</code> and additional storage is allocated - * (if necessary) for the new element and all (uninitialized) elements - * between the new element and the previous end of the array). + * Sets the element at the specified index. + * <p> + * If the specified index is greater than {@code getNumElements() - 1}, + * the {@code numElements} property is increased to {@code index +1} + * and additional storage is allocated (if necessary) for the new element and + * all (uninitialized) elements between the new element and the previous end + * of the array). * * @param index index to store a value in * @param value value to store at the specified index * @throws ArrayIndexOutOfBoundsException if {@code index < 0}. */ @Override - public synchronized void setElement(int index, double value) { + public void setElement(int index, double value) { if (index < 0) { throw new ArrayIndexOutOfBoundsException(index); } @@ -980,92 +746,17 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { } /** - * Sets the expansionFactor. Throws IllegalArgumentException if the - * the following conditions are not met: - * <ul> - * <li><code>expansionFactor > 1</code></li> - * <li><code>contractionFactor >= expansionFactor</code></li> - * </ul> - * @param expansionFactor the new expansion factor value. - * @throws MathIllegalArgumentException if expansionFactor is <= 1 or greater - * than contractionFactor - * @deprecated As of 3.1 (to be removed in 4.0 as field will become "final"). - */ - @Deprecated - public void setExpansionFactor(float expansionFactor) throws MathIllegalArgumentException { - checkContractExpand(getContractionCriterion(), expansionFactor); - // The check above verifies that the expansion factor is > 1.0; - synchronized(this) { - this.expansionFactor = expansionFactor; - } - } - - /** - * Sets the <code>expansionMode</code>. The specified value must be one of - * ADDITIVE_MODE, MULTIPLICATIVE_MODE. - * - * @param expansionMode The expansionMode to set. - * @throws MathIllegalArgumentException if the specified mode value is not valid. - * @deprecated As of 3.1. Please use {@link #setExpansionMode(ExpansionMode)} instead. - */ - @Deprecated - public void setExpansionMode(int expansionMode) - throws MathIllegalArgumentException { - if (expansionMode != MULTIPLICATIVE_MODE && - expansionMode != ADDITIVE_MODE) { - throw new MathIllegalArgumentException(LocalizedFormats.UNSUPPORTED_EXPANSION_MODE, expansionMode, - MULTIPLICATIVE_MODE, "MULTIPLICATIVE_MODE", - ADDITIVE_MODE, "ADDITIVE_MODE"); - } - synchronized(this) { - if (expansionMode == MULTIPLICATIVE_MODE) { - setExpansionMode(ExpansionMode.MULTIPLICATIVE); - } else if (expansionMode == ADDITIVE_MODE) { - setExpansionMode(ExpansionMode.ADDITIVE); - } - } - } - - /** - * Sets the {@link ExpansionMode expansion mode}. - * - * @param expansionMode Expansion mode to use for resizing the array. - * @deprecated As of 3.1 (to be removed in 4.0 as field will become "final"). - */ - @Deprecated - public void setExpansionMode(ExpansionMode expansionMode) { - this.expansionMode = expansionMode; - } - - /** - * Sets the initial capacity. Should only be invoked by constructors. - * - * @param initialCapacity of the array - * @throws MathIllegalArgumentException if <code>initialCapacity</code> is not - * positive. - * @deprecated As of 3.1, this is a no-op. - */ - @Deprecated - protected void setInitialCapacity(int initialCapacity) - throws MathIllegalArgumentException { - // Body removed in 3.1. - } - - /** * This function allows you to control the number of elements contained * in this array, and can be used to "throw out" the last n values in an * array. This function will also expand the internal array as needed. * * @param i a new number of elements - * @throws MathIllegalArgumentException if <code>i</code> is negative. + * @throws MathIllegalArgumentException if {@code i} is negative. */ - public synchronized void setNumElements(int i) - throws MathIllegalArgumentException { + public void setNumElements(int i) throws MathIllegalArgumentException { // If index is negative thrown an error. if (i < 0) { - throw new MathIllegalArgumentException( - LocalizedFormats.INDEX_NOT_POSITIVE, - i); + throw new MathIllegalArgumentException(LocalizedFormats.INDEX_NOT_POSITIVE, i); } // Test the new num elements, check to see if the array needs to be @@ -1085,7 +776,7 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { * * @return true if array satisfies the contraction criteria */ - private synchronized boolean shouldContract() { + private boolean shouldContract() { if (expansionMode == ExpansionMode.MULTIPLICATIVE) { return (internalArray.length / ((float) numElements)) > contractionCriterion; } else { @@ -1094,57 +785,6 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { } /** - * Returns the starting index of the internal array. The starting index is - * the position of the first addressable element in the internal storage - * array. The addressable elements in the array are <code> - * internalArray[startIndex],...,internalArray[startIndex + numElements -1] - * </code> - * - * @return the starting index. - * @deprecated As of 3.1. - */ - @Deprecated - public synchronized int start() { - return startIndex; - } - - /** - * <p>Copies source to dest, copying the underlying data, so dest is - * a new, independent copy of source. Does not contract before - * the copy.</p> - * - * <p>Obtains synchronization locks on both source and dest - * (in that order) before performing the copy.</p> - * - * <p>Neither source nor dest may be null; otherwise a {@link NullArgumentException} - * is thrown</p> - * - * @param source ResizableDoubleArray to copy - * @param dest ResizableArray to replace with a copy of the source array - * @exception NullArgumentException if either source or dest is null - * @since 2.0 - * - */ - public static void copy(ResizableDoubleArray source, - ResizableDoubleArray dest) - throws NullArgumentException { - MathUtils.checkNotNull(source); - MathUtils.checkNotNull(dest); - synchronized(source) { - synchronized(dest) { - dest.contractionCriterion = source.contractionCriterion; - dest.expansionFactor = source.expansionFactor; - dest.expansionMode = source.expansionMode; - dest.internalArray = new double[source.internalArray.length]; - System.arraycopy(source.internalArray, 0, dest.internalArray, - 0, dest.internalArray.length); - dest.numElements = source.numElements; - dest.startIndex = source.startIndex; - } - } - } - - /** * Returns a copy of the ResizableDoubleArray. Does not contract before * the copy, so the returned object is an exact copy of this. * @@ -1152,10 +792,8 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { * properties as this * @since 2.0 */ - public synchronized ResizableDoubleArray copy() { - final ResizableDoubleArray result = new ResizableDoubleArray(); - copy(this, result); - return result; + public ResizableDoubleArray copy() { + return new ResizableDoubleArray(this); } /** @@ -1175,21 +813,17 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { if (object instanceof ResizableDoubleArray == false) { return false; } - synchronized(this) { - synchronized(object) { - boolean result = true; - final ResizableDoubleArray other = (ResizableDoubleArray) object; - result = result && (other.contractionCriterion == contractionCriterion); - result = result && (other.expansionFactor == expansionFactor); - result = result && (other.expansionMode == expansionMode); - result = result && (other.numElements == numElements); - result = result && (other.startIndex == startIndex); - if (!result) { - return false; - } else { - return Arrays.equals(internalArray, other.internalArray); - } - } + boolean result = true; + final ResizableDoubleArray other = (ResizableDoubleArray) object; + result = result && (other.contractionCriterion == contractionCriterion); + result = result && (other.expansionFactor == expansionFactor); + result = result && (other.expansionMode == expansionMode); + result = result && (other.numElements == numElements); + result = result && (other.startIndex == startIndex); + if (!result) { + return false; + } else { + return Arrays.equals(internalArray, other.internalArray); } } @@ -1200,7 +834,7 @@ public class ResizableDoubleArray implements DoubleArray, Serializable { * @since 2.0 */ @Override - public synchronized int hashCode() { + public int hashCode() { final int[] hashData = new int[6]; hashData[0] = Double.valueOf(expansionFactor).hashCode(); hashData[1] = Double.valueOf(contractionCriterion).hashCode();
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-757_76b7413d.diff
bugs-dot-jar_data_MATH-405_784e4f69
--- BugID: MATH-405 Summary: Inconsistent result from Levenberg-Marquardt Description: Levenberg-Marquardt (its method doOptimize) returns a VectorialPointValuePair. However, the class holds the optimum point, the vector of the objective function, the cost and residuals. The value returns by doOptimize does not always corresponds to the point which leads to the residuals and cost diff --git a/src/main/java/org/apache/commons/math/optimization/general/AbstractLeastSquaresOptimizer.java b/src/main/java/org/apache/commons/math/optimization/general/AbstractLeastSquaresOptimizer.java index 152c30c..c4b1985 100644 --- a/src/main/java/org/apache/commons/math/optimization/general/AbstractLeastSquaresOptimizer.java +++ b/src/main/java/org/apache/commons/math/optimization/general/AbstractLeastSquaresOptimizer.java @@ -247,12 +247,7 @@ public abstract class AbstractLeastSquaresOptimizer implements DifferentiableMul * @return chi-square value */ public double getChiSquare() { - double chiSquare = 0; - for (int i = 0; i < rows; ++i) { - final double residual = residuals[i]; - chiSquare += residual * residual * residualsWeights[i]; - } - return chiSquare; + return cost*cost; } /** diff --git a/src/main/java/org/apache/commons/math/optimization/general/LevenbergMarquardtOptimizer.java b/src/main/java/org/apache/commons/math/optimization/general/LevenbergMarquardtOptimizer.java index 4cf4ee0..b41456e 100644 --- a/src/main/java/org/apache/commons/math/optimization/general/LevenbergMarquardtOptimizer.java +++ b/src/main/java/org/apache/commons/math/optimization/general/LevenbergMarquardtOptimizer.java @@ -255,6 +255,8 @@ public class LevenbergMarquardtOptimizer extends AbstractLeastSquaresOptimizer { double[] diag = new double[cols]; double[] oldX = new double[cols]; double[] oldRes = new double[rows]; + double[] oldObj = new double[rows]; + double[] qtf = new double[rows]; double[] work1 = new double[cols]; double[] work2 = new double[cols]; double[] work3 = new double[cols]; @@ -267,7 +269,9 @@ public class LevenbergMarquardtOptimizer extends AbstractLeastSquaresOptimizer { boolean firstIteration = true; VectorialPointValuePair current = new VectorialPointValuePair(point, objective); while (true) { - + for (int i=0;i<rows;i++) { + qtf[i]=residuals[i]; + } incrementIterationsCounter(); // compute the Q.R. decomposition of the jacobian matrix @@ -276,8 +280,7 @@ public class LevenbergMarquardtOptimizer extends AbstractLeastSquaresOptimizer { qrDecomposition(); // compute Qt.res - qTy(residuals); - + qTy(qtf); // now we don't need Q anymore, // so let jacobian contain the R matrix with its diagonal elements for (int k = 0; k < solvedCols; ++k) { @@ -315,7 +318,7 @@ public class LevenbergMarquardtOptimizer extends AbstractLeastSquaresOptimizer { if (s != 0) { double sum = 0; for (int i = 0; i <= j; ++i) { - sum += jacobian[i][pj] * residuals[i]; + sum += jacobian[i][pj] * qtf[i]; } maxCosine = Math.max(maxCosine, Math.abs(sum) / (s * cost)); } @@ -323,6 +326,8 @@ public class LevenbergMarquardtOptimizer extends AbstractLeastSquaresOptimizer { } if (maxCosine <= orthoTolerance) { // convergence has been reached + updateResidualsAndCost(); + current = new VectorialPointValuePair(point, objective); return current; } @@ -343,9 +348,12 @@ public class LevenbergMarquardtOptimizer extends AbstractLeastSquaresOptimizer { double[] tmpVec = residuals; residuals = oldRes; oldRes = tmpVec; + tmpVec = objective; + objective = oldObj; + oldObj = tmpVec; // determine the Levenberg-Marquardt parameter - determineLMParameter(oldRes, delta, diag, work1, work2, work3); + determineLMParameter(qtf, delta, diag, work1, work2, work3); // compute the new point and the norm of the evolution direction double lmNorm = 0; @@ -357,7 +365,6 @@ public class LevenbergMarquardtOptimizer extends AbstractLeastSquaresOptimizer { lmNorm += s * s; } lmNorm = Math.sqrt(lmNorm); - // on the first iteration, adjust the initial step bound. if (firstIteration) { delta = Math.min(delta, lmNorm); @@ -365,7 +372,6 @@ public class LevenbergMarquardtOptimizer extends AbstractLeastSquaresOptimizer { // evaluate the function at x + p and calculate its norm updateResidualsAndCost(); - current = new VectorialPointValuePair(point, objective); // compute the scaled actual reduction double actRed = -1.0; @@ -421,6 +427,15 @@ public class LevenbergMarquardtOptimizer extends AbstractLeastSquaresOptimizer { xNorm += xK * xK; } xNorm = Math.sqrt(xNorm); + current = new VectorialPointValuePair(point, objective); + + // tests for convergence. + if (checker != null) { + // we use the vectorial convergence checker + if (checker.converged(getIterations(), previous, current)) { + return current; + } + } } else { // failed iteration, reset the previous values cost = previousCost; @@ -431,24 +446,18 @@ public class LevenbergMarquardtOptimizer extends AbstractLeastSquaresOptimizer { tmpVec = residuals; residuals = oldRes; oldRes = tmpVec; + tmpVec = objective; + objective = oldObj; + oldObj = tmpVec; } - - // tests for convergence. - if (checker != null) { - // we use the vectorial convergence checker - if (checker.converged(getIterations(), previous, current)) { - return current; - } - } else { - // we use the Levenberg-Marquardt specific convergence parameters - if (((Math.abs(actRed) <= costRelativeTolerance) && - (preRed <= costRelativeTolerance) && - (ratio <= 2.0)) || - (delta <= parRelativeTolerance * xNorm)) { - return current; - } + if (checker==null) { + if (((Math.abs(actRed) <= costRelativeTolerance) && + (preRed <= costRelativeTolerance) && + (ratio <= 2.0)) || + (delta <= parRelativeTolerance * xNorm)) { + return current; + } } - // tests for termination and stringent tolerances // (2.2204e-16 is the machine epsilon for IEEE754) if ((Math.abs(actRed) <= 2.2204e-16) && (preRed <= 2.2204e-16) && (ratio <= 2.0)) {
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-405_784e4f69.diff
bugs-dot-jar_data_MATH-318_83f18d52
--- BugID: MATH-318 Summary: wrong result in eigen decomposition Description: "Some results computed by EigenDecompositionImpl are wrong. The following case computed by Fortran Lapack fails with version 2.0\n{code}\n public void testMathpbx02() {\n\n double[] mainTridiagonal = {\n \t 7484.860960227216, 18405.28129035345, 13855.225609560746,\n \t 10016.708722343366, 559.8117399576674, 6750.190788301587, \n \t 71.21428769782159\n };\n double[] secondaryTridiagonal = {\n \t -4175.088570476366,1975.7955858241994,5193.178422374075, \n \t 1995.286659169179,75.34535882933804,-234.0808002076056\n };\n\n \ // the reference values have been computed using routine DSTEMR\n // from the fortran library LAPACK version 3.2.1\n double[] refEigenValues = {\n \t\t20654.744890306974412,16828.208208485466457,\n \t\t6893.155912634994820,6757.083016675340332,\n \ \t\t5887.799885688558788,64.309089923240379,\n \t\t57.992628792736340\n \ };\n RealVector[] refEigenVectors = {\n \t\tnew ArrayRealVector(new double[] {-0.270356342026904, 0.852811091326997, 0.399639490702077, 0.198794657813990, 0.019739323307666, 0.000106983022327, -0.000001216636321}),\n \t\tnew ArrayRealVector(new double[] {0.179995273578326,-0.402807848153042,0.701870993525734,0.555058211014888,0.068079148898236,0.000509139115227,-0.000007112235617}),\n \ \t\tnew ArrayRealVector(new double[] {-0.399582721284727,-0.056629954519333,-0.514406488522827,0.711168164518580,0.225548081276367,0.125943999652923,-0.004321507456014}),\n \ \t\tnew ArrayRealVector(new double[] {0.058515721572821,0.010200130057739,0.063516274916536,-0.090696087449378,-0.017148420432597,0.991318870265707,-0.034707338554096}),\n \ \t\tnew ArrayRealVector(new double[] {0.855205995537564,0.327134656629775,-0.265382397060548,0.282690729026706,0.105736068025572,-0.009138126622039,0.000367751821196}),\n \ \t\tnew ArrayRealVector(new double[] {-0.002913069901144,-0.005177515777101,0.041906334478672,-0.109315918416258,0.436192305456741,0.026307315639535,0.891797507436344}),\n \ \t\tnew ArrayRealVector(new double[] {-0.005738311176435,-0.010207611670378,0.082662420517928,-0.215733886094368,0.861606487840411,-0.025478530652759,-0.451080697503958})\n \ };\n\n // the following line triggers the exception\n EigenDecomposition decomposition =\n new EigenDecompositionImpl(mainTridiagonal, secondaryTridiagonal, MathUtils.SAFE_MIN);\n\n double[] eigenValues = decomposition.getRealEigenvalues();\n \ for (int i = 0; i < refEigenValues.length; ++i) {\n assertEquals(refEigenValues[i], eigenValues[i], 1.0e-3);\n if (refEigenVectors[i].dotProduct(decomposition.getEigenvector(i)) < 0) {\n assertEquals(0, refEigenVectors[i].add(decomposition.getEigenvector(i)).getNorm(), 1.0e-5);\n } else {\n assertEquals(0, refEigenVectors[i].subtract(decomposition.getEigenvector(i)).getNorm(), 1.0e-5);\n }\n }\n\n }\n{code}" diff --git a/src/main/java/org/apache/commons/math/linear/EigenDecompositionImpl.java b/src/main/java/org/apache/commons/math/linear/EigenDecompositionImpl.java index 3fc328d..9d1b797 100644 --- a/src/main/java/org/apache/commons/math/linear/EigenDecompositionImpl.java +++ b/src/main/java/org/apache/commons/math/linear/EigenDecompositionImpl.java @@ -1132,7 +1132,7 @@ public class EigenDecompositionImpl implements EigenDecomposition { private boolean flipIfWarranted(final int n, final int step) { if (1.5 * work[pingPong] < work[4 * (n - 1) + pingPong]) { // flip array - int j = 4 * n - 1; + int j = 4 * (n - 1); for (int i = 0; i < j; i += 4) { for (int k = 0; k < 4; k += step) { final double tmp = work[i + k];
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-318_83f18d52.diff
bugs-dot-jar_data_MATH-723_1352a70f
--- BugID: MATH-723 Summary: BitStreamGenerators (MersenneTwister, Well generators) do not clear normal deviate cache on setSeed Description: The BitStream generators generate normal deviates (for nextGaussian) in pairs, caching the last value generated. When reseeded, the cache should be cleared; otherwise seeding two generators with the same value is not guaranteed to generate the same sequence. diff --git a/src/main/java/org/apache/commons/math/random/ISAACRandom.java b/src/main/java/org/apache/commons/math/random/ISAACRandom.java index 3db6de1..011a704 100644 --- a/src/main/java/org/apache/commons/math/random/ISAACRandom.java +++ b/src/main/java/org/apache/commons/math/random/ISAACRandom.java @@ -235,6 +235,7 @@ public class ISAACRandom extends BitsStreamGenerator implements Serializable { } isaac(); count = SIZE - 1; + clear(); } /** Shuffle array. */
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-723_1352a70f.diff
bugs-dot-jar_data_MATH-935_48dde378
--- BugID: MATH-935 Summary: DerivativeStructure.atan2(y,x) does not handle special cases properly Description: The four special cases +/-0 for both x and y should give the same values as Math.atan2 and FastMath.atan2. However, they give NaN for the value in all cases. diff --git a/src/main/java/org/apache/commons/math3/analysis/differentiation/DSCompiler.java b/src/main/java/org/apache/commons/math3/analysis/differentiation/DSCompiler.java index 9d08555..d5a3c18 100644 --- a/src/main/java/org/apache/commons/math3/analysis/differentiation/DSCompiler.java +++ b/src/main/java/org/apache/commons/math3/analysis/differentiation/DSCompiler.java @@ -1415,6 +1415,9 @@ public class DSCompiler { } + // fix value to take special cases (+0/+0, +0/-0, -0/+0, -0/-0, +/-infinity) correctly + result[resultOffset] = FastMath.atan2(y[yOffset], x[xOffset]); + } /** Compute hyperbolic cosine of a derivative structure.
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-935_48dde378.diff
bugs-dot-jar_data_MATH-413_51aa6e6c
--- BugID: MATH-413 Summary: Miscellaneous issues concerning the "optimization" package Description: | Revision 990792 contains changes triggered the following issues: * [MATH-394|https://issues.apache.org/jira/browse/MATH-394] * [MATH-397|https://issues.apache.org/jira/browse/MATH-397] * [MATH-404|https://issues.apache.org/jira/browse/MATH-404] This issue collects the currently still unsatisfactory code (not necessarily sorted in order of annoyance): # "BrentOptimizer": a specific convergence checker must be used. "LevenbergMarquardtOptimizer" also has specific convergence checks. # Trying to make convergence checking independent of the optimization algorithm creates problems (conceptual and practical): ** See "BrentOptimizer" and "LevenbergMarquardtOptimizer", the algorithm passes "points" to the convergence checker, but the actual meaning of the points can very well be different in the caller (optimization algorithm) and the callee (convergence checker). ** In "PowellOptimizer" the line search ("BrentOptimizer") tolerances depend on the tolerances within the main algorithm. Since tolerances come with "ConvergenceChecker" and so can be changed at any time, it is awkward to adapt the values within the line search optimizer without exposing its internals ("BrentOptimizer" field) to the enclosing class ("PowellOptimizer"). # Given the numerous changes, some Javadoc comments might be out-of-sync, although I did try to update them all. # Class "DirectSearchOptimizer" (in package "optimization.direct") inherits from class "AbstractScalarOptimizer" (in package "optimization.general"). # Some interfaces are defined in package "optimization" but their base implementations (abstract class that contain the boiler-plate code) are in package "optimization.general" (e.g. "DifferentiableMultivariateVectorialOptimizer" and "BaseAbstractVectorialOptimizer"). # No check is performed to ensure the the convergence checker has been set (see e.g. "BrentOptimizer" and "PowellOptimizer"); if it hasn't there will be a NPE. The alternative is to initialize a default checker that will never be used in case the user had intended to explicitly sets the checker. # "NonLinearConjugateGradientOptimizer": Ugly workaround for the checked "ConvergenceException". # Everywhere, we trail the checked "FunctionEvaluationException" although it is never used. # There remains some duplicate code (such as the "multi-start loop" in the various "MultiStart..." implementations). # The "ConvergenceChecker" interface is very general (the "converged" method can take any number of "...PointValuePair"). However there remains a "semantic" problem: One cannot be sure that the list of points means the same thing for the caller of "converged" and within the implementation of the "ConvergenceChecker" that was independently set. # It is not clear whether it is wise to aggregate the counter of gradient evaluations to the function evaluation counter. In "LevenbergMarquartdOptimizer" for example, it would be unfair to do so. Currently I had to remove all tests referring to gradient and Jacobian evaluations. # In "AbstractLeastSquaresOptimizer" and "LevenbergMarquardtOptimizer", occurences of "OptimizationException" were replaced by the unchecked "ConvergenceException" but in some cases it might not be the most appropriate one. # "MultiStartUnivariateRealOptimizer": in the other classes ("MultiStartMultivariate...") similar to this one, the randomization is on the firts-guess value while in this class, it is on the search interval. I think that here also we should randomly choose the start value (within the user-selected interval). # The Javadoc utility raises warnings (see output of "mvn site") which I couldn't figure out how to correct. # Some previously existing classes and interfaces have become no more than a specialisation of new "generics" classes; it might be interesting to remove them in order to reduce the number of classes and thus limit the potential for confusion. diff --git a/src/main/java/org/apache/commons/math/optimization/univariate/MultiStartUnivariateRealOptimizer.java b/src/main/java/org/apache/commons/math/optimization/univariate/MultiStartUnivariateRealOptimizer.java index aa14ce6..952e565 100644 --- a/src/main/java/org/apache/commons/math/optimization/univariate/MultiStartUnivariateRealOptimizer.java +++ b/src/main/java/org/apache/commons/math/optimization/univariate/MultiStartUnivariateRealOptimizer.java @@ -143,18 +143,22 @@ public class MultiStartUnivariateRealOptimizer<FUNC extends UnivariateRealFuncti final GoalType goal, final double min, final double max) throws FunctionEvaluationException { + return optimize(f, goal, min, max, min + 0.5 * (max - min)); + } + /** {@inheritDoc} */ + public UnivariateRealPointValuePair optimize(final FUNC f, final GoalType goal, + final double min, final double max, + final double startValue) + throws FunctionEvaluationException { optima = new UnivariateRealPointValuePair[starts]; totalEvaluations = 0; // Multi-start loop. for (int i = 0; i < starts; ++i) { try { - final double bound1 = (i == 0) ? min : min + generator.nextDouble() * (max - min); - final double bound2 = (i == 0) ? max : min + generator.nextDouble() * (max - min); - optima[i] = optimizer.optimize(f, goal, - FastMath.min(bound1, bound2), - FastMath.max(bound1, bound2)); + final double s = (i == 0) ? startValue : min + generator.nextDouble() * (max - min); + optima[i] = optimizer.optimize(f, goal, min, max, s); } catch (FunctionEvaluationException fee) { optima[i] = null; } catch (ConvergenceException ce) { @@ -177,16 +181,6 @@ public class MultiStartUnivariateRealOptimizer<FUNC extends UnivariateRealFuncti return optima[0]; } - /** {@inheritDoc} */ - public UnivariateRealPointValuePair optimize(final FUNC f, final GoalType goalType, - final double min, final double max, - final double startValue) - throws FunctionEvaluationException { - // XXX Main code should be here, using "startValue" for the first start. - // XXX This method should set "startValue" to min + 0.5 * (max - min) - return optimize(f, goalType, min, max); - } - /** * Sort the optima from best to worst, followed by {@code null} elements. *
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-413_51aa6e6c.diff
bugs-dot-jar_data_MATH-1058_4ebd967c
--- BugID: MATH-1058 Summary: Beta, LogNormalDistribution, WeibullDistribution give slightly wrong answer for extremely small args due to log/exp inaccuracy Description: |- Background for those who aren't familiar: math libs like Math and FastMath have two mysterious methods, log1p and expm1. log1p(x) = log(1+x) and expm1(x) = exp(x)-1 mathetmatically, but can return a correct answer even when x was small, where floating-point error due to the addition/subtraction introduces a relatively large error. There are three instances in the code that can employ these specialized methods and gain a measurable improvement in accuracy. See patch and tests for an example -- try the tests without the code change to see the error. diff --git a/src/main/java/org/apache/commons/math3/distribution/LogNormalDistribution.java b/src/main/java/org/apache/commons/math3/distribution/LogNormalDistribution.java index 810c9e4..ab7a13b 100644 --- a/src/main/java/org/apache/commons/math3/distribution/LogNormalDistribution.java +++ b/src/main/java/org/apache/commons/math3/distribution/LogNormalDistribution.java @@ -289,7 +289,7 @@ public class LogNormalDistribution extends AbstractRealDistribution { public double getNumericalVariance() { final double s = shape; final double ss = s * s; - return (FastMath.exp(ss) - 1) * FastMath.exp(2 * scale + ss); + return (FastMath.expm1(ss)) * FastMath.exp(2 * scale + ss); } /** diff --git a/src/main/java/org/apache/commons/math3/distribution/WeibullDistribution.java b/src/main/java/org/apache/commons/math3/distribution/WeibullDistribution.java index ec34b50..6e69bc9 100644 --- a/src/main/java/org/apache/commons/math3/distribution/WeibullDistribution.java +++ b/src/main/java/org/apache/commons/math3/distribution/WeibullDistribution.java @@ -221,7 +221,7 @@ public class WeibullDistribution extends AbstractRealDistribution { } else if (p == 1) { ret = Double.POSITIVE_INFINITY; } else { - ret = scale * FastMath.pow(-FastMath.log(1.0 - p), 1.0 / shape); + ret = scale * FastMath.pow(-FastMath.log1p(-p), 1.0 / shape); } return ret; } diff --git a/src/main/java/org/apache/commons/math3/special/Beta.java b/src/main/java/org/apache/commons/math3/special/Beta.java index 59f696a..c6091b4 100644 --- a/src/main/java/org/apache/commons/math3/special/Beta.java +++ b/src/main/java/org/apache/commons/math3/special/Beta.java @@ -218,7 +218,7 @@ public class Beta { return 1.0; } }; - ret = FastMath.exp((a * FastMath.log(x)) + (b * FastMath.log(1.0 - x)) - + ret = FastMath.exp((a * FastMath.log(x)) + (b * FastMath.log1p(-x)) - FastMath.log(a) - logBeta(a, b)) * 1.0 / fraction.evaluate(x, epsilon, maxIterations); }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1058_4ebd967c.diff
bugs-dot-jar_data_MATH-618_2123f780
--- BugID: MATH-618 Summary: Complex Add and Subtract handle NaN arguments differently, but javadoc contracts are the same Description: | For both Complex add and subtract, the javadoc states that {code} * If either this or <code>rhs</code> has a NaN value in either part, * {@link #NaN} is returned; otherwise Inifinite and NaN values are * returned in the parts of the result according to the rules for * {@link java.lang.Double} arithmetic {code} Subtract includes an isNaN test and returns Complex.NaN if either complex argument isNaN; but add omits this test. The test should be added to the add implementation (actually restored, since this looks like a code merge problem going back to 1.1). diff --git a/src/main/java/org/apache/commons/math/complex/Complex.java b/src/main/java/org/apache/commons/math/complex/Complex.java index e0a8e97..ab58c78 100644 --- a/src/main/java/org/apache/commons/math/complex/Complex.java +++ b/src/main/java/org/apache/commons/math/complex/Complex.java @@ -150,6 +150,9 @@ public class Complex implements FieldElement<Complex>, Serializable { public Complex add(Complex rhs) throws NullArgumentException { MathUtils.checkNotNull(rhs); + if (isNaN || rhs.isNaN) { + return NaN; + } return createComplex(real + rhs.getReal(), imaginary + rhs.getImaginary()); }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-618_2123f780.diff
bugs-dot-jar_data_MATH-727_d2777388
--- BugID: MATH-727 Summary: too large first step with embedded Runge-Kutta integrators (Dormand-Prince 8(5,3) ...) Description: |- Adaptive step size integrators compute the first step size by themselves if it is not provided. For embedded Runge-Kutta type, this step size is not checked against the integration range, so if the integration range is extremely short, this step size may evaluate the function out of the range (and in fact it tries afterward to go back, and fails to stop). Gragg-Bulirsch-Stoer integrators do not have this problem, the step size is checked and truncated if needed. diff --git a/src/main/java/org/apache/commons/math/ode/nonstiff/EmbeddedRungeKuttaIntegrator.java b/src/main/java/org/apache/commons/math/ode/nonstiff/EmbeddedRungeKuttaIntegrator.java index 13ced27..d2bbf67 100644 --- a/src/main/java/org/apache/commons/math/ode/nonstiff/EmbeddedRungeKuttaIntegrator.java +++ b/src/main/java/org/apache/commons/math/ode/nonstiff/EmbeddedRungeKuttaIntegrator.java @@ -247,6 +247,15 @@ public abstract class EmbeddedRungeKuttaIntegrator } stepSize = hNew; + if (forward) { + if (stepStart + stepSize >= t) { + stepSize = t - stepStart; + } + } else { + if (stepStart + stepSize <= t) { + stepSize = t - stepStart; + } + } // next stages for (int k = 1; k < stages; ++k) {
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-727_d2777388.diff
bugs-dot-jar_data_MATH-738_f64b6a90
--- BugID: MATH-738 Summary: Incomplete beta function I(x, a, b) is inaccurate for large values of a and/or b Description: "This was first reported in MATH-718. The result of the current implementation of the incomplete beta function I(x, a, b) is inaccurate when a and/or b are large-ish. \r\n\r\nI've skimmed through [slatec|http://www.netlib.org/slatec/fnlib/betai.f], GSL, [Boost|http://www.boost.org/doc/libs/1_38_0/libs/math/doc/sf_and_dist/html/math_toolkit/special/sf_beta/ibeta_function.html] as well as NR. At first sight, neither uses the same method to compute this function. I think [TOMS-708|http://www.netlib.org/toms/708] is probably the best option.\r\n\r\n_Issue moved from MATH project on January 27, 2018 (concerned implementation was moved to module {{commons-numbers-gamma}} of \"Commons Numbers\")._" diff --git a/src/main/java/org/apache/commons/math3/special/Beta.java b/src/main/java/org/apache/commons/math3/special/Beta.java index 2420791..6e15fa3 100644 --- a/src/main/java/org/apache/commons/math3/special/Beta.java +++ b/src/main/java/org/apache/commons/math3/special/Beta.java @@ -16,12 +16,38 @@ */ package org.apache.commons.math3.special; +import org.apache.commons.math3.exception.NumberIsTooSmallException; +import org.apache.commons.math3.exception.OutOfRangeException; import org.apache.commons.math3.util.ContinuedFraction; import org.apache.commons.math3.util.FastMath; /** + * <p> * This is a utility class that provides computation methods related to the * Beta family of functions. + * </p> + * <p> + * Implementation of {@link #logBeta(double, double)} is based on the + * algorithms described in + * <ul> + * <li><a href="http://dx.doi.org/10.1145/22721.23109">Didonato and Morris + * (1986)</a>, <em>Computation of the Incomplete Gamma Function Ratios + * and their Inverse</em>, TOMS 12(4), 377-393,</li> + * <li><a href="http://dx.doi.org/10.1145/131766.131776">Didonato and Morris + * (1992)</a>, <em>Algorithm 708: Significant Digit Computation of the + * Incomplete Beta Function Ratios</em>, TOMS 18(3), 360-373,</li> + * </ul> + * and implemented in the + * <a href="http://www.dtic.mil/docs/citations/ADA476840">NSWC Library of Mathematical Functions</a>, + * available + * <a href="http://www.ualberta.ca/CNS/RESEARCH/Software/NumericalNSWC/site.html">here</a>. + * This library is "approved for public release", and the + * <a href="http://www.dtic.mil/dtic/pdf/announcements/CopyrightGuidance.pdf">Copyright guidance</a> + * indicates that unless otherwise stated in the code, all FORTRAN functions in + * this library are license free. Since no such notice appears in the code these + * functions can safely be ported to Commons-Math. + * </p> + * * * @version $Id$ */ @@ -29,6 +55,47 @@ public class Beta { /** Maximum allowed numerical error. */ private static final double DEFAULT_EPSILON = 1E-14; + /** The constant value of ½log 2π. */ + private static final double HALF_LOG_TWO_PI = .9189385332046727; + + /** + * <p> + * The coefficients of the series expansion of the Δ function. This function + * is defined as follows + * </p> + * <center>Δ(x) = log Γ(x) - (x - 0.5) log a + a - 0.5 log 2π,</center> + * <p> + * see equation (23) in Didonato and Morris (1992). The series expansion, + * which applies for x ≥ 10, reads + * </p> + * <pre> + * 14 + * ==== + * 1 \ 2 n + * Δ(x) = --- > d (10 / x) + * x / n + * ==== + * n = 0 + * <pre> + */ + private static final double[] DELTA = { + .833333333333333333333333333333E-01, + -.277777777777777777777777752282E-04, + .793650793650793650791732130419E-07, + -.595238095238095232389839236182E-09, + .841750841750832853294451671990E-11, + -.191752691751854612334149171243E-12, + .641025640510325475730918472625E-14, + -.295506514125338232839867823991E-15, + .179643716359402238723287696452E-16, + -.139228964661627791231203060395E-17, + .133802855014020915603275339093E-18, + -.154246009867966094273710216533E-19, + .197701992980957427278370133333E-20, + -.234065664793997056856992426667E-21, + .171348014966398575409015466667E-22 + }; + /** * Default constructor. Prohibit instantiation. */ @@ -162,17 +229,6 @@ public class Beta { /** * Returns the natural logarithm of the beta function B(a, b). * - * @param a Parameter {@code a}. - * @param b Parameter {@code b}. - * @return log(B(a, b)). - */ - public static double logBeta(double a, double b) { - return logBeta(a, b, DEFAULT_EPSILON, Integer.MAX_VALUE); - } - - /** - * Returns the natural logarithm of the beta function B(a, b). - * * The implementation of this method is based on: * <ul> * <li><a href="http://mathworld.wolfram.com/BetaFunction.html"> @@ -181,27 +237,272 @@ public class Beta { * * @param a Parameter {@code a}. * @param b Parameter {@code b}. - * @param epsilon When the absolute value of the nth item in the - * series is less than epsilon the approximation ceases to calculate - * further elements in the series. - * @param maxIterations Maximum number of "iterations" to complete. + * @param epsilon This parameter is ignored. + * @param maxIterations This parameter is ignored. * @return log(B(a, b)). + * @deprecated as of version 3.1, this method is deprecated as the + * computation of the beta function is no longer iterative. This method + * internally calls {@link #logBeta(double, double)}. */ + @Deprecated public static double logBeta(double a, double b, double epsilon, int maxIterations) { - double ret; - if (Double.isNaN(a) || - Double.isNaN(b) || - a <= 0.0 || - b <= 0.0) { - ret = Double.NaN; + return logBeta(a, b); + } + + + /** + * Returns the value of log Γ(a + b) for 1 ≤ a, b ≤ 2. Based on the + * <em>NSWC Library of Mathematics Subroutines</em> double precision + * implementation, {@code DGSMLN}. + * + * @param a First argument. + * @param b Second argument. + * @return the value of {@code log(Gamma(a + b))}. + * @throws OutOfRangeException if {@code a} or {@code b} is lower than + * {@code 1.0} or greater than {@code 2.0}. + */ + private static double logGammaSum(final double a, final double b) + throws OutOfRangeException { + + if ((a < 1.0) || (a > 2.0)) { + throw new OutOfRangeException(a, 1.0, 2.0); + } + if ((b < 1.0) || (b > 2.0)) { + throw new OutOfRangeException(b, 1.0, 2.0); + } + + final double x = (a - 1.0) + (b - 1.0); + if (x <= 0.5) { + return Gamma.logGamma1p(1.0 + x); + } else if (x <= 1.5) { + return Gamma.logGamma1p(x) + FastMath.log1p(x); } else { - ret = Gamma.logGamma(a) + Gamma.logGamma(b) - - Gamma.logGamma(a + b); + return Gamma.logGamma1p(x - 1.0) + FastMath.log(x * (1.0 + x)); } + } - return ret; + /** + * Returns the value of log[Γ(b) / Γ(a + b)] for a ≥ 0 and b ≥ 10. Based on + * the <em>NSWC Library of Mathematics Subroutines</em> double precision + * implementation, {@code DLGDIV}. + * + * @param a First argument. + * @param b Second argument. + * @return the value of {@code log(Gamma(b) / Gamma(a + b))}. + * @throws NumberIsTooSmallException if {@code a < 0.0} or {@code b < 10.0}. + */ + private static double logGammaMinusLogGammaSum(final double a, + final double b) + throws NumberIsTooSmallException { + + if (a < 0.0) { + throw new NumberIsTooSmallException(a, 0.0, true); + } + if (b < 10.0) { + throw new NumberIsTooSmallException(b, 10.0, true); + } + + /* + * p = a / (a + b), q = b / (a + b), d = a + b - 0.5 + */ + final double p; + final double q; + final double d; + final double w; + if (a <= b) { + d = b + (a - 0.5); + w = deltaMinusDeltaSum(a, b); + } else { + d = a + (b - 0.5); + w = deltaMinusDeltaSum(b, a); + } + + final double u = d * FastMath.log1p(a / b); + final double v = a * (FastMath.log(b) - 1.0); + + return u <= v ? (w - u) - v : (w - v) - u; + } + + /** + * Returns the value of Δ(b) - Δ(a + b), with 0 ≤ a ≤ b and b ≥ 10. Based + * on equations (26), (27) and (28) in Didonato and Morris (1992). + * + * @param a First argument. + * @param b Second argument. + * @return the value of {@code Delta(b) - Delta(a + b)} + * @throws OutOfRangeException if {@code a < 0} or {@code a > b} + * @throws NumberIsTooSmallException if {@code b < 10} + */ + private static double deltaMinusDeltaSum(final double a, + final double b) + throws OutOfRangeException, NumberIsTooSmallException { + + if ((a < 0) || (a > b)) { + throw new OutOfRangeException(a, 0, b); + } + if (b < 10) { + throw new NumberIsTooSmallException(b, 10, true); + } + + final double h = a / b; + final double p = h / (1.0 + h); + final double q = 1.0 / (1.0 + h); + final double q2 = q * q; + /* + * s[i] = 1 + q + ... - q**(2 * i) + */ + final double[] s = new double[DELTA.length]; + s[0] = 1.0; + for (int i = 1; i < s.length; i++) { + s[i] = 1.0 + (q + q2 * s[i - 1]); + } + /* + * w = Delta(b) - Delta(a + b) + */ + final double sqrtT = 10.0 / b; + final double t = sqrtT * sqrtT; + double w = DELTA[DELTA.length - 1] * s[s.length - 1]; + for (int i = DELTA.length - 2; i >= 0; i--) { + w = t * w + DELTA[i] * s[i]; + } + return w * p / b; + } + + /** + * Returns the value of Δ(p) + Δ(q) - Δ(p + q), with p, q ≥ 10. Based on + * the <em>NSWC Library of Mathematics Subroutines</em> double precision + * implementation, {@code DBCORR}. + * + * @param p First argument. + * @param q Second argument. + * @return the value of {@code Delta(p) + Delta(q) - Delta(p + q)}. + * @throws NumberIsTooSmallException if {@code p < 10.0} or {@code q < 10.0}. + */ + private static double sumDeltaMinusDeltaSum(final double p, + final double q) { + + if (p < 10.0) { + throw new NumberIsTooSmallException(p, 10.0, true); + } + if (q < 10.0) { + throw new NumberIsTooSmallException(q, 10.0, true); + } + + final double a = FastMath.min(p, q); + final double b = FastMath.max(p, q); + final double sqrtT = 10.0 / a; + final double t = sqrtT * sqrtT; + double z = DELTA[DELTA.length - 1]; + for (int i = DELTA.length - 2; i >= 0; i--) { + z = t * z + DELTA[i]; + } + return z / a + deltaMinusDeltaSum(a, b); + } + + /** + * Returns the value of log B(p, q) for 0 ≤ x ≤ 1 and p, q > 0. Based on the + * <em>NSWC Library of Mathematics Subroutines</em> implementation, + * {@code DBETLN}. + * + * @param p First argument. + * @param q Second argument. + * @return the value of {@code log(Beta(p, q))}, {@code NaN} if + * {@code p <= 0} or {@code q <= 0}. + */ + public static final double logBeta(final double p, final double q) { + + if (Double.isNaN(p) || Double.isNaN(q) || (p <= 0.0) || (q <= 0.0)) { + return Double.NaN; + } + + final double a = FastMath.min(p, q); + final double b = FastMath.max(p, q); + if (a >= 10.0) { + final double w = sumDeltaMinusDeltaSum(a, b); + final double h = a / b; + final double c = h / (1.0 + h); + final double u = -(a - 0.5) * FastMath.log(c); + final double v = b * FastMath.log1p(h); + if (u <= v) { + return (((-0.5 * FastMath.log(b) + HALF_LOG_TWO_PI) + w) - u) - v; + } else { + return (((-0.5 * FastMath.log(b) + HALF_LOG_TWO_PI) + w) - v) - u; + } + } else if (a > 2.0) { + if (b > 1000.0) { + final int n = (int) FastMath.floor(a - 1.0); + double prod = 1.0; + double ared = a; + for (int i = 0; i < n; i++) { + ared -= 1.0; + prod *= ared / (1.0 + ared / b); + } + return (FastMath.log(prod) - n * FastMath.log(b)) + + (Gamma.logGamma(ared) + + logGammaMinusLogGammaSum(ared, b)); + } else { + double prod1 = 1.0; + double ared = a; + while (ared > 2.0) { + ared -= 1.0; + final double h = ared / b; + prod1 *= h / (1.0 + h); + } + if (b < 10.0) { + double prod2 = 1.0; + double bred = b; + while (bred > 2.0) { + bred -= 1.0; + prod2 *= bred / (ared + bred); + } + return FastMath.log(prod1) + + FastMath.log(prod2) + + (Gamma.logGamma(ared) + + (Gamma.logGamma(bred) - + logGammaSum(ared, bred))); + } else { + return FastMath.log(prod1) + + Gamma.logGamma(ared) + + logGammaMinusLogGammaSum(ared, b); + } + } + } else if (a >= 1.0) { + if (b > 2.0) { + if (b < 10.0) { + double prod = 1.0; + double bred = b; + while (bred > 2.0) { + bred -= 1.0; + prod *= bred / (a + bred); + } + return FastMath.log(prod) + + (Gamma.logGamma(a) + + (Gamma.logGamma(bred) - + logGammaSum(a, bred))); + } else { + return Gamma.logGamma(a) + + logGammaMinusLogGammaSum(a, b); + } + } else { + return Gamma.logGamma(a) + + Gamma.logGamma(b) - + logGammaSum(a, b); + } + } else { + if (b >= 10.0) { + return Gamma.logGamma(a) + + logGammaMinusLogGammaSum(a, b); + } else { + // The following command is the original NSWC implementation. + // return Gamma.logGamma(a) + + // (Gamma.logGamma(b) - Gamma.logGamma(a + b)); + // The following command turns out to be more accurate. + return FastMath.log(Gamma.gamma(a) * Gamma.gamma(b) / + Gamma.gamma(a + b)); + } + } } }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-738_f64b6a90.diff
bugs-dot-jar_data_MATH-305_ef9b639a
--- BugID: MATH-305 Summary: NPE in KMeansPlusPlusClusterer unittest Description: "When running this unittest, I am facing this NPE:\njava.lang.NullPointerException\n\tat org.apache.commons.math.stat.clustering.KMeansPlusPlusClusterer.assignPointsToClusters(KMeansPlusPlusClusterer.java:91)\n\nThis is the unittest:\n\n\npackage org.fao.fisheries.chronicles.calcuation.cluster;\n\nimport static org.junit.Assert.assertEquals;\nimport static org.junit.Assert.assertTrue;\n\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Random;\n\nimport org.apache.commons.math.stat.clustering.Cluster;\nimport org.apache.commons.math.stat.clustering.EuclideanIntegerPoint;\nimport org.apache.commons.math.stat.clustering.KMeansPlusPlusClusterer;\nimport org.fao.fisheries.chronicles.input.CsvImportProcess;\nimport org.fao.fisheries.chronicles.input.Top200Csv;\nimport org.junit.Test;\n\npublic class ClusterAnalysisTest {\n\n\n\t@Test\n\tpublic void testPerformClusterAnalysis2() {\n\t\tKMeansPlusPlusClusterer<EuclideanIntegerPoint> transformer = new KMeansPlusPlusClusterer<EuclideanIntegerPoint>(\n\t\t\t\tnew Random(1746432956321l));\n\t\tEuclideanIntegerPoint[] points = new EuclideanIntegerPoint[] {\n\t\t\t\tnew EuclideanIntegerPoint(new int[] { 1959, 325100 }),\n\t\t\t\tnew EuclideanIntegerPoint(new int[] { 1960, 373200 }), };\n\t\tList<Cluster<EuclideanIntegerPoint>> clusters = transformer.cluster(Arrays.asList(points), 1, 1);\n\t\tassertEquals(1, clusters.size());\n\n\t}\n\n}\n" diff --git a/src/main/java/org/apache/commons/math/util/MathUtils.java b/src/main/java/org/apache/commons/math/util/MathUtils.java index 46368dc..ad77a56 100644 --- a/src/main/java/org/apache/commons/math/util/MathUtils.java +++ b/src/main/java/org/apache/commons/math/util/MathUtils.java @@ -1621,9 +1621,9 @@ public final class MathUtils { * @return the L<sub>2</sub> distance between the two points */ public static double distance(int[] p1, int[] p2) { - int sum = 0; + double sum = 0; for (int i = 0; i < p1.length; i++) { - final int dp = p1[i] - p2[i]; + final double dp = p1[i] - p2[i]; sum += dp * dp; } return Math.sqrt(sum);
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-305_ef9b639a.diff
bugs-dot-jar_data_MATH-471_a4b1948b
--- BugID: MATH-471 Summary: MathUtils.equals(double, double) does not work properly for floats Description: | MathUtils.equals(double, double) does not work properly for floats. There is no equals(float,float) so float parameters are automatically promoted to double. However, that is not necessarily appropriate, given that the ULP for a double is much smaller than the ULP for a float. So for example: {code} double oneDouble = 1.0d; assertTrue(MathUtils.equals(oneDouble, Double.longBitsToDouble(1 + Double.doubleToLongBits(oneDouble)))); // OK float oneFloat = 1.0f; assertTrue(MathUtils.equals(oneFloat, Float.intBitsToFloat(1 + Float.floatToIntBits(oneFloat)))); // FAILS float f1 = 333.33334f; double d1 = 333.33334d; assertTrue(MathUtils.equals(d1, f1)); // FAILS {code} I think the equals() methods need to be duplicated with the appropriate changes for floats to avoid any problems with the promotion of floats. diff --git a/src/main/java/org/apache/commons/math/util/MathUtils.java b/src/main/java/org/apache/commons/math/util/MathUtils.java index bd68523..f015cdb 100644 --- a/src/main/java/org/apache/commons/math/util/MathUtils.java +++ b/src/main/java/org/apache/commons/math/util/MathUtils.java @@ -82,6 +82,9 @@ public final class MathUtils { /** Offset to order signed double numbers lexicographically. */ private static final long SGN_MASK = 0x8000000000000000L; + /** Offset to order signed double numbers lexicographically. */ + private static final int SGN_MASK_FLOAT = 0x80000000; + /** All long-representable factorials */ private static final long[] FACTORIALS = new long[] { 1l, 1l, 2l, @@ -416,6 +419,160 @@ public final class MathUtils { /** * Returns true iff they are equal as defined by + * {@link #equals(float,float,int) equals(x, y, 1)}. + * + * @param x first value + * @param y second value + * @return {@code true} if the values are equal. + */ + public static boolean equals(float x, float y) { + return equals(x, y, 1); + } + + /** + * Returns true if both arguments are NaN or neither is NaN and they are + * equal as defined by {@link #equals(float,float) this method}. + * + * @param x first value + * @param y second value + * @return {@code true} if the values are equal or both are NaN. + */ + public static boolean equalsIncludingNaN(float x, float y) { + return (Float.isNaN(x) && Float.isNaN(y)) || equals(x, y, 1); + } + + /** + * Returns true if both arguments are equal or within the range of allowed + * error (inclusive). + * + * @param x first value + * @param y second value + * @param eps the amount of absolute error to allow. + * @return {@code true} if the values are equal or within range of each other. + */ + public static boolean equals(float x, float y, float eps) { + return equals(x, y, 1) || FastMath.abs(y - x) <= eps; + } + + /** + * Returns true if both arguments are NaN or are equal or within the range + * of allowed error (inclusive). + * + * @param x first value + * @param y second value + * @param eps the amount of absolute error to allow. + * @return {@code true} if the values are equal or within range of each other, + * or both are NaN. + */ + public static boolean equalsIncludingNaN(float x, float y, float eps) { + return equalsIncludingNaN(x, y) || (FastMath.abs(y - x) <= eps); + } + + /** + * Returns true if both arguments are equal or within the range of allowed + * error (inclusive). + * Two float numbers are considered equal if there are {@code (maxUlps - 1)} + * (or fewer) floating point numbers between them, i.e. two adjacent floating + * point numbers are considered equal. + * Adapted from <a + * href="http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm"> + * Bruce Dawson</a> + * + * @param x first value + * @param y second value + * @param maxUlps {@code (maxUlps - 1)} is the number of floating point + * values between {@code x} and {@code y}. + * @return {@code true} if there are fewer than {@code maxUlps} floating + * point values between {@code x} and {@code y}. + */ + public static boolean equals(float x, float y, int maxUlps) { + // Check that "maxUlps" is non-negative and small enough so that + // NaN won't compare as equal to anything (except another NaN). + assert maxUlps > 0 && maxUlps < NAN_GAP; + + int xInt = Float.floatToIntBits(x); + int yInt = Float.floatToIntBits(y); + + // Make lexicographically ordered as a two's-complement integer. + if (xInt < 0) { + xInt = SGN_MASK_FLOAT - xInt; + } + if (yInt < 0) { + yInt = SGN_MASK_FLOAT - yInt; + } + + final boolean isEqual = FastMath.abs(xInt - yInt) <= maxUlps; + + return isEqual && !Float.isNaN(x) && !Float.isNaN(y); + } + + /** + * Returns true if both arguments are NaN or if they are equal as defined + * by {@link #equals(float,float,int) this method}. + * + * @param x first value + * @param y second value + * @param maxUlps {@code (maxUlps - 1)} is the number of floating point + * values between {@code x} and {@code y}. + * @return {@code true} if both arguments are NaN or if there are less than + * {@code maxUlps} floating point values between {@code x} and {@code y}. + */ + public static boolean equalsIncludingNaN(float x, float y, int maxUlps) { + return (Float.isNaN(x) && Float.isNaN(y)) || equals(x, y, maxUlps); + } + + /** + * Returns true iff both arguments are null or have same dimensions and all + * their elements are equal as defined by + * {@link #equals(float,float) this method}. + * + * @param x first array + * @param y second array + * @return true if the values are both null or have same dimension + * and equal elements. + */ + public static boolean equals(float[] x, float[] y) { + if ((x == null) || (y == null)) { + return !((x == null) ^ (y == null)); + } + if (x.length != y.length) { + return false; + } + for (int i = 0; i < x.length; ++i) { + if (!equals(x[i], y[i])) { + return false; + } + } + return true; + } + + /** + * Returns true iff both arguments are null or have same dimensions and all + * their elements are equal as defined by + * {@link #equalsIncludingNaN(double,double) this method}. + * + * @param x first array + * @param y second array + * @return true if the values are both null or have same dimension and + * equal elements + */ + public static boolean equalsIncludingNaN(float[] x, float[] y) { + if ((x == null) || (y == null)) { + return !((x == null) ^ (y == null)); + } + if (x.length != y.length) { + return false; + } + for (int i = 0; i < x.length; ++i) { + if (!equalsIncludingNaN(x[i], y[i])) { + return false; + } + } + return true; + } + + /** + * Returns true iff they are equal as defined by * {@link #equals(double,double,int) equals(x, y, 1)}. * * @param x first value
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-471_a4b1948b.diff
bugs-dot-jar_data_MATH-1070_8e5867ed
--- BugID: MATH-1070 Summary: Incorrect rounding of float Description: "package org.apache.commons.math3.util \nexample of usage of round functions of Precision class:\n\nPrecision.round(0.0f, 2, BigDecimal.ROUND_UP) = 0.01\nPrecision.round((float)0.0, 2, BigDecimal.ROUND_UP) = 0.01\nPrecision.round((float) 0.0, 2) = 0.0\nPrecision.round(0.0, 2, BigDecimal.ROUND_UP) = 0.0\n\nSeems the reason is usage of extending float to double inside round functions and getting influence of memory trash as value.\n\nI think, same problem will be found at usage of other round modes.\n" diff --git a/src/main/java/org/apache/commons/math3/util/Precision.java b/src/main/java/org/apache/commons/math3/util/Precision.java index ef3b57f..f0b0c4f 100644 --- a/src/main/java/org/apache/commons/math3/util/Precision.java +++ b/src/main/java/org/apache/commons/math3/util/Precision.java @@ -491,8 +491,7 @@ public class Precision { unscaled = FastMath.floor(unscaled); } else { // The following equality test is intentional and needed for rounding purposes - if (FastMath.floor(unscaled) / 2.0 == FastMath.floor(Math - .floor(unscaled) / 2.0)) { // even + if (FastMath.floor(unscaled) / 2.0 == FastMath.floor(FastMath.floor(unscaled) / 2.0)) { // even unscaled = FastMath.floor(unscaled); } else { // odd unscaled = FastMath.ceil(unscaled); @@ -516,7 +515,10 @@ public class Precision { } break; case BigDecimal.ROUND_UP : - unscaled = FastMath.ceil(FastMath.nextAfter(unscaled, Double.POSITIVE_INFINITY)); + // do not round if the discarded fraction is equal to zero + if (unscaled != FastMath.floor(unscaled)) { + unscaled = FastMath.ceil(FastMath.nextAfter(unscaled, Double.POSITIVE_INFINITY)); + } break; default : throw new MathIllegalArgumentException(LocalizedFormats.INVALID_ROUNDING_METHOD,
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1070_8e5867ed.diff
bugs-dot-jar_data_MATH-801_118e94b5
--- BugID: MATH-801 Summary: Quaternion not normalized after construction Description: "The use of the Rotation(Vector3D u1,Vector3D u2,Vector3D v1,Vector3D v2) constructor with normalized angle can apparently lead to un-normalized quaternion.\nThis case appeared to me with the following data :\nu1 = (0.9999988431610581, -0.0015210774290851095, 0.0)\nu2 = (0.0, 0.0, 1.0)\nand \nv1 = (0.9999999999999999, 0.0, 0.0)\nv2 = (0.0, 0.0, -1.0)\n\nThis lead to the following quaternion :\nq0 = 225783.35177064248\nq1 = 0.0\nq2 = 0.0\nq3 = -3.3684446110762543E-9\n\nI was expecting to have a normalized quaternion, as input vector's are normalized. Does the quaternion shouldn't be normalized ?\nI've joined the corresponding piece of code as JUnit Test case" diff --git a/src/main/java/org/apache/commons/math3/geometry/euclidean/threed/Rotation.java b/src/main/java/org/apache/commons/math3/geometry/euclidean/threed/Rotation.java index d5cfb9b..51ffce2 100644 --- a/src/main/java/org/apache/commons/math3/geometry/euclidean/threed/Rotation.java +++ b/src/main/java/org/apache/commons/math3/geometry/euclidean/threed/Rotation.java @@ -22,6 +22,7 @@ import java.io.Serializable; import org.apache.commons.math3.exception.MathIllegalArgumentException; import org.apache.commons.math3.exception.util.LocalizedFormats; import org.apache.commons.math3.util.FastMath; +import org.apache.commons.math3.util.MathArrays; /** * This class implements rotations in a three-dimensional space. @@ -241,54 +242,72 @@ public class Rotation implements Serializable { det); } - // There are different ways to compute the quaternions elements - // from the matrix. They all involve computing one element from - // the diagonal of the matrix, and computing the three other ones - // using a formula involving a division by the first element, - // which unfortunately can be zero. Since the norm of the - // quaternion is 1, we know at least one element has an absolute - // value greater or equal to 0.5, so it is always possible to - // select the right formula and avoid division by zero and even - // numerical inaccuracy. Checking the elements in turn and using - // the first one greater than 0.45 is safe (this leads to a simple - // test since qi = 0.45 implies 4 qi^2 - 1 = -0.19) - double s = ort[0][0] + ort[1][1] + ort[2][2]; - if (s > -0.19) { - // compute q0 and deduce q1, q2 and q3 - q0 = 0.5 * FastMath.sqrt(s + 1.0); - double inv = 0.25 / q0; - q1 = inv * (ort[1][2] - ort[2][1]); - q2 = inv * (ort[2][0] - ort[0][2]); - q3 = inv * (ort[0][1] - ort[1][0]); - } else { - s = ort[0][0] - ort[1][1] - ort[2][2]; + double[] quat = mat2quat(ort); + q0 = quat[0]; + q1 = quat[1]; + q2 = quat[2]; + q3 = quat[3]; + + } + + /** Convert an orthogonal rotation matrix to a quaternion. + * @param ort orthogonal rotation matrix + * @return quaternion corresponding to the matrix + */ + private static double[] mat2quat(final double[][] ort) { + + final double[] quat = new double[4]; + + // There are different ways to compute the quaternions elements + // from the matrix. They all involve computing one element from + // the diagonal of the matrix, and computing the three other ones + // using a formula involving a division by the first element, + // which unfortunately can be zero. Since the norm of the + // quaternion is 1, we know at least one element has an absolute + // value greater or equal to 0.5, so it is always possible to + // select the right formula and avoid division by zero and even + // numerical inaccuracy. Checking the elements in turn and using + // the first one greater than 0.45 is safe (this leads to a simple + // test since qi = 0.45 implies 4 qi^2 - 1 = -0.19) + double s = ort[0][0] + ort[1][1] + ort[2][2]; if (s > -0.19) { - // compute q1 and deduce q0, q2 and q3 - q1 = 0.5 * FastMath.sqrt(s + 1.0); - double inv = 0.25 / q1; - q0 = inv * (ort[1][2] - ort[2][1]); - q2 = inv * (ort[0][1] + ort[1][0]); - q3 = inv * (ort[0][2] + ort[2][0]); + // compute q0 and deduce q1, q2 and q3 + quat[0] = 0.5 * FastMath.sqrt(s + 1.0); + double inv = 0.25 / quat[0]; + quat[1] = inv * (ort[1][2] - ort[2][1]); + quat[2] = inv * (ort[2][0] - ort[0][2]); + quat[3] = inv * (ort[0][1] - ort[1][0]); } else { - s = ort[1][1] - ort[0][0] - ort[2][2]; - if (s > -0.19) { - // compute q2 and deduce q0, q1 and q3 - q2 = 0.5 * FastMath.sqrt(s + 1.0); - double inv = 0.25 / q2; - q0 = inv * (ort[2][0] - ort[0][2]); - q1 = inv * (ort[0][1] + ort[1][0]); - q3 = inv * (ort[2][1] + ort[1][2]); - } else { - // compute q3 and deduce q0, q1 and q2 - s = ort[2][2] - ort[0][0] - ort[1][1]; - q3 = 0.5 * FastMath.sqrt(s + 1.0); - double inv = 0.25 / q3; - q0 = inv * (ort[0][1] - ort[1][0]); - q1 = inv * (ort[0][2] + ort[2][0]); - q2 = inv * (ort[2][1] + ort[1][2]); - } + s = ort[0][0] - ort[1][1] - ort[2][2]; + if (s > -0.19) { + // compute q1 and deduce q0, q2 and q3 + quat[1] = 0.5 * FastMath.sqrt(s + 1.0); + double inv = 0.25 / quat[1]; + quat[0] = inv * (ort[1][2] - ort[2][1]); + quat[2] = inv * (ort[0][1] + ort[1][0]); + quat[3] = inv * (ort[0][2] + ort[2][0]); + } else { + s = ort[1][1] - ort[0][0] - ort[2][2]; + if (s > -0.19) { + // compute q2 and deduce q0, q1 and q3 + quat[2] = 0.5 * FastMath.sqrt(s + 1.0); + double inv = 0.25 / quat[2]; + quat[0] = inv * (ort[2][0] - ort[0][2]); + quat[1] = inv * (ort[0][1] + ort[1][0]); + quat[3] = inv * (ort[2][1] + ort[1][2]); + } else { + // compute q3 and deduce q0, q1 and q2 + s = ort[2][2] - ort[0][0] - ort[1][1]; + quat[3] = 0.5 * FastMath.sqrt(s + 1.0); + double inv = 0.25 / quat[3]; + quat[0] = inv * (ort[0][1] - ort[1][0]); + quat[1] = inv * (ort[0][2] + ort[2][0]); + quat[2] = inv * (ort[2][1] + ort[1][2]); + } + } } - } + + return quat; } @@ -308,85 +327,48 @@ public class Rotation implements Serializable { * @param u2 second vector of the origin pair * @param v1 desired image of u1 by the rotation * @param v2 desired image of u2 by the rotation - * @exception MathIllegalArgumentException if the norm of one of the vectors is zero + * @exception MathIllegalArgumentException if the norm of one of the vectors is zero, + * or if one of the pair is degenerated (i.e. the vectors of the pair are colinear) */ - public Rotation(Vector3D u1, Vector3D u2, Vector3D v1, Vector3D v2) { - - // norms computation - double u1u1 = u1.getNormSq(); - double u2u2 = u2.getNormSq(); - double v1v1 = v1.getNormSq(); - double v2v2 = v2.getNormSq(); - if ((u1u1 == 0) || (u2u2 == 0) || (v1v1 == 0) || (v2v2 == 0)) { - throw new MathIllegalArgumentException(LocalizedFormats.ZERO_NORM_FOR_ROTATION_DEFINING_VECTOR); - } - - // normalize v1 in order to have (v1'|v1') = (u1|u1) - v1 = new Vector3D(FastMath.sqrt(u1u1 / v1v1), v1); - - // adjust v2 in order to have (u1|u2) = (v1'|v2') and (v2'|v2') = (u2|u2) - double u1u2 = u1.dotProduct(u2); - double v1v2 = v1.dotProduct(v2); - double coeffU = u1u2 / u1u1; - double coeffV = v1v2 / u1u1; - double beta = FastMath.sqrt((u2u2 - u1u2 * coeffU) / (v2v2 - v1v2 * coeffV)); - double alpha = coeffU - beta * coeffV; - v2 = new Vector3D(alpha, v1, beta, v2); - - // preliminary computation - Vector3D uRef = u1; - Vector3D vRef = v1; - Vector3D v1Su1 = v1.subtract(u1); - Vector3D v2Su2 = v2.subtract(u2); - Vector3D k = v1Su1.crossProduct(v2Su2); - Vector3D u3 = u1.crossProduct(u2); - double c = k.dotProduct(u3); - final double inPlaneThreshold = 0.001; - if (c <= inPlaneThreshold * k.getNorm() * u3.getNorm()) { - // the (q1, q2, q3) vector is close to the (u1, u2) plane - // we try other vectors - Vector3D v3 = Vector3D.crossProduct(v1, v2); - Vector3D v3Su3 = v3.subtract(u3); - k = v1Su1.crossProduct(v3Su3); - Vector3D u2Prime = u1.crossProduct(u3); - c = k.dotProduct(u2Prime); - - if (c <= inPlaneThreshold * k.getNorm() * u2Prime.getNorm()) { - // the (q1, q2, q3) vector is also close to the (u1, u3) plane, - // it is almost aligned with u1: we try (u2, u3) and (v2, v3) - k = v2Su2.crossProduct(v3Su3); - c = k.dotProduct(u2.crossProduct(u3)); - - if (c <= 0) { - // the (q1, q2, q3) vector is aligned with everything - // this is really the identity rotation - q0 = 1.0; - q1 = 0.0; - q2 = 0.0; - q3 = 0.0; - return; - } - - // we will have to use u2 and v2 to compute the scalar part - uRef = u2; - vRef = v2; - - } - - } + public Rotation(Vector3D u1, Vector3D u2, Vector3D v1, Vector3D v2) + throws MathIllegalArgumentException { + + // build orthonormalized base from u1, u2 + // this fails when vectors are null or colinear, which is forbidden to define a rotation + final Vector3D u3 = u1.crossProduct(u2).normalize(); + u2 = u3.crossProduct(u1).normalize(); + u1 = u1.normalize(); + + // build an orthonormalized base from v1, v2 + // this fails when vectors are null or colinear, which is forbidden to define a rotation + final Vector3D v3 = v1.crossProduct(v2).normalize(); + v2 = v3.crossProduct(v1).normalize(); + v1 = v1.normalize(); + + // buid a matrix transforming the first base into the second one + final double[][] m = new double[][] { + { + MathArrays.linearCombination(u1.getX(), v1.getX(), u2.getX(), v2.getX(), u3.getX(), v3.getX()), + MathArrays.linearCombination(u1.getY(), v1.getX(), u2.getY(), v2.getX(), u3.getY(), v3.getX()), + MathArrays.linearCombination(u1.getZ(), v1.getX(), u2.getZ(), v2.getX(), u3.getZ(), v3.getX()) + }, + { + MathArrays.linearCombination(u1.getX(), v1.getY(), u2.getX(), v2.getY(), u3.getX(), v3.getY()), + MathArrays.linearCombination(u1.getY(), v1.getY(), u2.getY(), v2.getY(), u3.getY(), v3.getY()), + MathArrays.linearCombination(u1.getZ(), v1.getY(), u2.getZ(), v2.getY(), u3.getZ(), v3.getY()) + }, + { + MathArrays.linearCombination(u1.getX(), v1.getZ(), u2.getX(), v2.getZ(), u3.getX(), v3.getZ()), + MathArrays.linearCombination(u1.getY(), v1.getZ(), u2.getY(), v2.getZ(), u3.getY(), v3.getZ()), + MathArrays.linearCombination(u1.getZ(), v1.getZ(), u2.getZ(), v2.getZ(), u3.getZ(), v3.getZ()) + } + }; - // compute the vectorial part - c = FastMath.sqrt(c); - double inv = 1.0 / (c + c); - q1 = inv * k.getX(); - q2 = inv * k.getY(); - q3 = inv * k.getZ(); - - // compute the scalar part - k = new Vector3D(uRef.getY() * q3 - uRef.getZ() * q2, - uRef.getZ() * q1 - uRef.getX() * q3, - uRef.getX() * q2 - uRef.getY() * q1); - q0 = vRef.dotProduct(k) / (2 * k.getNormSq()); + double[] quat = mat2quat(m); + q0 = quat[0]; + q1 = quat[1]; + q2 = quat[2]; + q3 = quat[3]; }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-801_118e94b5.diff
bugs-dot-jar_data_MATH-1115_2a6c6409
--- BugID: MATH-1115 Summary: Constructor of PolyhedronsSet throws NullPointerException Description: |- The following statement throws a NullPointerException: new org.apache.commons.math3.geometry.euclidean.threed.PolyhedronsSet(0.0d, 0.0d, 0.0d, 0.0d, 0.0d, 0.0d); I found that other numbers also produce that effect. The stack trace: java.lang.NullPointerException at org.apache.commons.math3.geometry.partitioning.BSPTree.fitToCell(BSPTree.java:297) at org.apache.commons.math3.geometry.partitioning.BSPTree.insertCut(BSPTree.java:155) at org.apache.commons.math3.geometry.partitioning.RegionFactory.buildConvex(RegionFactory.java:55) at org.apache.commons.math3.geometry.euclidean.threed.PolyhedronsSet.buildBoundary(PolyhedronsSet.java:119) at org.apache.commons.math3.geometry.euclidean.threed.PolyhedronsSet.<init>(PolyhedronsSet.java:97) diff --git a/src/main/java/org/apache/commons/math3/geometry/euclidean/threed/PolyhedronsSet.java b/src/main/java/org/apache/commons/math3/geometry/euclidean/threed/PolyhedronsSet.java index 31b78a6..0c5a9a7 100644 --- a/src/main/java/org/apache/commons/math3/geometry/euclidean/threed/PolyhedronsSet.java +++ b/src/main/java/org/apache/commons/math3/geometry/euclidean/threed/PolyhedronsSet.java @@ -59,6 +59,16 @@ public class PolyhedronsSet extends AbstractRegion<Euclidean3D, Euclidean2D> { * cells). In order to avoid building too many small objects, it is * recommended to use the predefined constants * {@code Boolean.TRUE} and {@code Boolean.FALSE}</p> + * <p> + * This constructor is aimed at expert use, as building the tree may + * be a difficult taks. It is not intended for general use and for + * performances reasons does not check thoroughly its input, as this would + * require walking the full tree each time. Failing to provide a tree with + * the proper attributes, <em>will</em> therefore generate problems like + * {@link NullPointerException} or {@link ClassCastException} only later on. + * This limitation is known and explains why this constructor is for expert + * use only. The caller does have the responsibility to provided correct arguments. + * </p> * @param tree inside/outside BSP tree representing the region * @param tolerance tolerance below which points are considered identical * @since 3.3 @@ -190,6 +200,10 @@ public class PolyhedronsSet extends AbstractRegion<Euclidean3D, Euclidean2D> { final double yMin, final double yMax, final double zMin, final double zMax, final double tolerance) { + if ((xMin >= xMax - tolerance) || (yMin >= yMax - tolerance) || (zMin >= zMax - tolerance)) { + // too thin box, build an empty polygons set + return new BSPTree<Euclidean3D>(Boolean.FALSE); + } final Plane pxMin = new Plane(new Vector3D(xMin, 0, 0), Vector3D.MINUS_I, tolerance); final Plane pxMax = new Plane(new Vector3D(xMax, 0, 0), Vector3D.PLUS_I, tolerance); final Plane pyMin = new Plane(new Vector3D(0, yMin, 0), Vector3D.MINUS_J, tolerance);
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1115_2a6c6409.diff
bugs-dot-jar_data_MATH-704_3f645310
--- BugID: MATH-704 Summary: One of Variance.evaluate() methods does not work correctly Description: |- The method org.apache.commons.math.stat.descriptive.moment.Variance.evaluate(double[] values, double[] weights, double mean, int begin, int length) does not work properly. Looks loke it ignores the length parameter and grabs the whole dataset. Similar method in Mean class seems to work. I did not check other methods taking the part of the array; they may have the same problem. Workaround: I had to shrink my arrays and use the method without the length. diff --git a/src/main/java/org/apache/commons/math/stat/descriptive/moment/Variance.java b/src/main/java/org/apache/commons/math/stat/descriptive/moment/Variance.java index e5518e3..1de139f 100644 --- a/src/main/java/org/apache/commons/math/stat/descriptive/moment/Variance.java +++ b/src/main/java/org/apache/commons/math/stat/descriptive/moment/Variance.java @@ -517,7 +517,7 @@ public class Variance extends AbstractStorelessUnivariateStatistic implements Se } double sumWts = 0; - for (int i = 0; i < weights.length; i++) { + for (int i = begin; i < begin + length; i++) { sumWts += weights[i]; }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-704_3f645310.diff
bugs-dot-jar_data_MATH-329_6dd3724b
--- BugID: MATH-329 Summary: 'In stat.Frequency, getPct(Object) uses getCumPct(Comparable) instead of getPct(Comparable) ' Description: |- Drop in Replacement of 1.2 with 2.0 not possible because all getPct calls will be cummulative without code change Frequency.java /** * Returns the percentage of values that are equal to v * @deprecated replaced by {@link #getPct(Comparable)} as of 2.0 */ @Deprecated public double getPct(Object v) { return getCumPct((Comparable<?>) v); } diff --git a/src/main/java/org/apache/commons/math/stat/Frequency.java b/src/main/java/org/apache/commons/math/stat/Frequency.java index c45d728..68c8bf2 100644 --- a/src/main/java/org/apache/commons/math/stat/Frequency.java +++ b/src/main/java/org/apache/commons/math/stat/Frequency.java @@ -300,7 +300,7 @@ public class Frequency implements Serializable { */ @Deprecated public double getPct(Object v) { - return getCumPct((Comparable<?>) v); + return getPct((Comparable<?>) v); } /**
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-329_6dd3724b.diff
bugs-dot-jar_data_MATH-776_b9ca51f0
--- BugID: MATH-776 Summary: Need range checks for elitismRate in ElitisticListPopulation constructors. Description: There is a range check for setting the elitismRate via ElitisticListPopulation's setElitismRate method, but not via the constructors. diff --git a/src/main/java/org/apache/commons/math3/genetics/ElitisticListPopulation.java b/src/main/java/org/apache/commons/math3/genetics/ElitisticListPopulation.java index a309080..829c97d 100644 --- a/src/main/java/org/apache/commons/math3/genetics/ElitisticListPopulation.java +++ b/src/main/java/org/apache/commons/math3/genetics/ElitisticListPopulation.java @@ -24,7 +24,7 @@ import org.apache.commons.math3.exception.util.LocalizedFormats; import org.apache.commons.math3.util.FastMath; /** - * Population of chromosomes which uses elitism (certain percentace of the best + * Population of chromosomes which uses elitism (certain percentage of the best * chromosomes is directly copied to the next generation). * * @version $Id$ @@ -42,12 +42,13 @@ public class ElitisticListPopulation extends ListPopulation { * @param populationLimit maximal size of the population * @param elitismRate how many best chromosomes will be directly transferred to the * next generation [in %] + * @throws OutOfRangeException if the elitism rate is outside the [0, 1] range */ public ElitisticListPopulation(final List<Chromosome> chromosomes, final int populationLimit, final double elitismRate) { super(chromosomes, populationLimit); - this.elitismRate = elitismRate; + setElitismRate(elitismRate); } /** @@ -57,10 +58,11 @@ public class ElitisticListPopulation extends ListPopulation { * @param populationLimit maximal size of the population * @param elitismRate how many best chromosomes will be directly transferred to the * next generation [in %] + * @throws OutOfRangeException if the elitism rate is outside the [0, 1] range */ public ElitisticListPopulation(final int populationLimit, final double elitismRate) { super(populationLimit); - this.elitismRate = elitismRate; + setElitismRate(elitismRate); } /**
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-776_b9ca51f0.diff
bugs-dot-jar_data_MATH-781_3c4cb189
--- BugID: MATH-781 Summary: SimplexSolver gives bad results Description: "Methode SimplexSolver.optimeze(...) gives bad results with commons-math3-3.0\nin a simple test problem. It works well in commons-math-2.2. " diff --git a/src/main/java/org/apache/commons/math3/optimization/linear/SimplexTableau.java b/src/main/java/org/apache/commons/math3/optimization/linear/SimplexTableau.java index 9a6993a..327b2ae 100644 --- a/src/main/java/org/apache/commons/math3/optimization/linear/SimplexTableau.java +++ b/src/main/java/org/apache/commons/math3/optimization/linear/SimplexTableau.java @@ -335,7 +335,7 @@ class SimplexTableau implements Serializable { // positive cost non-artificial variables for (int i = getNumObjectiveFunctions(); i < getArtificialVariableOffset(); i++) { final double entry = tableau.getEntry(0, i); - if (Precision.compareTo(entry, 0d, maxUlps) > 0) { + if (Precision.compareTo(entry, 0d, epsilon) > 0) { columnsToDrop.add(i); } }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-781_3c4cb189.diff
bugs-dot-jar_data_MATH-812_607c9ec6
--- BugID: MATH-812 Summary: In RealVector, dotProduct and outerProduct return wrong results due to misuse of sparse iterators Description: |- In class {{RealVector}}, the default implementation of {{RealMatrix outerProduct(RealVector)}} uses sparse iterators on the entries of the two vectors. The rationale behind this is that {{0d * x == 0d}} is {{true}} for all {{double x}}. This assumption is in fact false, since {{0d * NaN == NaN}}. Proposed fix is to loop through *all* entries of both vectors. This can have a significant impact on the CPU cost, but robustness should probably be preferred over speed in default implementations. Same issue occurs with {{double dotProduct(RealVector)}}, which uses sparse iterators for {{this}} only. Another option would be to through an exception if {{isNaN()}} is {{true}}, in which case caching could be used for both {{isNaN()}} and {{isInfinite()}}. diff --git a/src/main/java/org/apache/commons/math3/linear/RealVector.java b/src/main/java/org/apache/commons/math3/linear/RealVector.java index 89611c3..f05ea4d 100644 --- a/src/main/java/org/apache/commons/math3/linear/RealVector.java +++ b/src/main/java/org/apache/commons/math3/linear/RealVector.java @@ -642,27 +642,20 @@ public abstract class RealVector { * @return the matrix outer product between this instance and {@code v}. */ public RealMatrix outerProduct(RealVector v) { - RealMatrix product; + final int m = this.getDimension(); + final int n = v.getDimension(); + final RealMatrix product; if (v instanceof SparseRealVector || this instanceof SparseRealVector) { - product = new OpenMapRealMatrix(this.getDimension(), - v.getDimension()); + product = new OpenMapRealMatrix(m, n); } else { - product = new Array2DRowRealMatrix(this.getDimension(), - v.getDimension()); + product = new Array2DRowRealMatrix(m, n); } - Iterator<Entry> thisIt = sparseIterator(); - while (thisIt.hasNext()) { - final Entry thisE = thisIt.next(); - Iterator<Entry> otherIt = v.sparseIterator(); - while (otherIt.hasNext()) { - final Entry otherE = otherIt.next(); - product.setEntry(thisE.getIndex(), otherE.getIndex(), - thisE.getValue() * otherE.getValue()); + for (int i = 0; i < m; i++) { + for (int j = 0; j < n; j++) { + product.setEntry(i, j, this.getEntry(i) * v.getEntry(j)); } } - return product; - } /**
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-812_607c9ec6.diff
bugs-dot-jar_data_MATH-939_49444ee6
--- BugID: MATH-939 Summary: stat.correlation.Covariance should allow one-column matrices Description: Currently (rev 1453206), passing 1-by-M matrix to the Covariance constructor throws IllegalArgumentException. For consistency, the Covariance class should work for a single-column matrix (i.e., for a N-dimensional random variable with N=1) and it should return 1-by-1 covariance matrix with the variable's variance in its only element. diff --git a/src/main/java/org/apache/commons/math3/stat/correlation/Covariance.java b/src/main/java/org/apache/commons/math3/stat/correlation/Covariance.java index ba62463..ace3205 100644 --- a/src/main/java/org/apache/commons/math3/stat/correlation/Covariance.java +++ b/src/main/java/org/apache/commons/math3/stat/correlation/Covariance.java @@ -17,6 +17,7 @@ package org.apache.commons.math3.stat.correlation; import org.apache.commons.math3.exception.MathIllegalArgumentException; +import org.apache.commons.math3.exception.NotStrictlyPositiveException; import org.apache.commons.math3.exception.util.LocalizedFormats; import org.apache.commons.math3.linear.RealMatrix; import org.apache.commons.math3.linear.BlockRealMatrix; @@ -70,16 +71,18 @@ public class Covariance { * <p>The <code>biasCorrected</code> parameter determines whether or not * covariance estimates are bias-corrected.</p> * - * <p>The input array must be rectangular with at least two columns + * <p>The input array must be rectangular with at least one column * and two rows.</p> * * @param data rectangular array with columns representing covariates * @param biasCorrected true means covariances are bias-corrected * @throws MathIllegalArgumentException if the input data array is not - * rectangular with at least two rows and two columns. + * rectangular with at least two rows and one column. + * @throws NotStrictlyPositiveException if the input data array is not + * rectangular with at least one row and one column. */ public Covariance(double[][] data, boolean biasCorrected) - throws MathIllegalArgumentException { + throws MathIllegalArgumentException, NotStrictlyPositiveException { this(new BlockRealMatrix(data), biasCorrected); } @@ -87,14 +90,17 @@ public class Covariance { * Create a Covariance matrix from a rectangular array * whose columns represent covariates. * - * <p>The input array must be rectangular with at least two columns + * <p>The input array must be rectangular with at least one column * and two rows</p> * * @param data rectangular array with columns representing covariates * @throws MathIllegalArgumentException if the input data array is not - * rectangular with at least two rows and two columns. + * rectangular with at least two rows and one column. + * @throws NotStrictlyPositiveException if the input data array is not + * rectangular with at least one row and one column. */ - public Covariance(double[][] data) throws MathIllegalArgumentException { + public Covariance(double[][] data) + throws MathIllegalArgumentException, NotStrictlyPositiveException { this(data, true); } @@ -105,12 +111,12 @@ public class Covariance { * <p>The <code>biasCorrected</code> parameter determines whether or not * covariance estimates are bias-corrected.</p> * - * <p>The matrix must have at least two columns and two rows</p> + * <p>The matrix must have at least one column and two rows</p> * * @param matrix matrix with columns representing covariates * @param biasCorrected true means covariances are bias-corrected * @throws MathIllegalArgumentException if the input matrix does not have - * at least two rows and two columns + * at least two rows and one column */ public Covariance(RealMatrix matrix, boolean biasCorrected) throws MathIllegalArgumentException { @@ -123,11 +129,11 @@ public class Covariance { * Create a covariance matrix from a matrix whose columns * represent covariates. * - * <p>The matrix must have at least two columns and two rows</p> + * <p>The matrix must have at least one column and two rows</p> * * @param matrix matrix with columns representing covariates * @throws MathIllegalArgumentException if the input matrix does not have - * at least two rows and two columns + * at least two rows and one column */ public Covariance(RealMatrix matrix) throws MathIllegalArgumentException { this(matrix, true); @@ -154,7 +160,7 @@ public class Covariance { /** * Compute a covariance matrix from a matrix whose columns represent * covariates. - * @param matrix input matrix (must have at least two columns and two rows) + * @param matrix input matrix (must have at least one column and two rows) * @param biasCorrected determines whether or not covariance estimates are bias-corrected * @return covariance matrix * @throws MathIllegalArgumentException if the matrix does not contain sufficient data @@ -178,7 +184,7 @@ public class Covariance { /** * Create a covariance matrix from a matrix whose columns represent * covariates. Covariances are computed using the bias-corrected formula. - * @param matrix input matrix (must have at least two columns and two rows) + * @param matrix input matrix (must have at least one column and two rows) * @return covariance matrix * @throws MathIllegalArgumentException if matrix does not contain sufficient data * @see #Covariance @@ -191,26 +197,31 @@ public class Covariance { /** * Compute a covariance matrix from a rectangular array whose columns represent * covariates. - * @param data input array (must have at least two columns and two rows) + * @param data input array (must have at least one column and two rows) * @param biasCorrected determines whether or not covariance estimates are bias-corrected * @return covariance matrix * @throws MathIllegalArgumentException if the data array does not contain sufficient * data + * @throws NotStrictlyPositiveException if the input data array is not + * rectangular with at least one row and one column. */ protected RealMatrix computeCovarianceMatrix(double[][] data, boolean biasCorrected) - throws MathIllegalArgumentException { + throws MathIllegalArgumentException, NotStrictlyPositiveException { return computeCovarianceMatrix(new BlockRealMatrix(data), biasCorrected); } /** * Create a covariance matrix from a rectangular array whose columns represent * covariates. Covariances are computed using the bias-corrected formula. - * @param data input array (must have at least two columns and two rows) + * @param data input array (must have at least one column and two rows) * @return covariance matrix * @throws MathIllegalArgumentException if the data array does not contain sufficient data + * @throws NotStrictlyPositiveException if the input data array is not + * rectangular with at least one row and one column. * @see #Covariance */ - protected RealMatrix computeCovarianceMatrix(double[][] data) throws MathIllegalArgumentException { + protected RealMatrix computeCovarianceMatrix(double[][] data) + throws MathIllegalArgumentException, NotStrictlyPositiveException { return computeCovarianceMatrix(data, true); } @@ -268,7 +279,7 @@ public class Covariance { /** * Throws MathIllegalArgumentException if the matrix does not have at least - * two columns and two rows. + * one column and two rows. * @param matrix matrix to check * @throws MathIllegalArgumentException if the matrix does not contain sufficient data * to compute covariance @@ -276,7 +287,7 @@ public class Covariance { private void checkSufficientData(final RealMatrix matrix) throws MathIllegalArgumentException { int nRows = matrix.getRowDimension(); int nCols = matrix.getColumnDimension(); - if (nRows < 2 || nCols < 2) { + if (nRows < 2 || nCols < 1) { throw new MathIllegalArgumentException( LocalizedFormats.INSUFFICIENT_ROWS_AND_COLUMNS, nRows, nCols);
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-939_49444ee6.diff
bugs-dot-jar_data_MATH-716_faa77857
--- BugID: MATH-716 Summary: BracketingNthOrderBrentSolver exceeds maxIterationCount while updating always the same boundary Description: |- In some cases, the aging feature in BracketingNthOrderBrentSolver fails. It attempts to balance the bracketing points by targeting a non-zero value instead of the real root. However, the chosen target is too close too zero, and the inverse polynomial approximation is always on the same side, thus always updates the same bracket. In the real used case for a large program, I had a bracket point xA = 12500.0, yA = 3.7e-16, agingA = 0, which is the (really good) estimate of the zero on one side of the root and xB = 12500.03, yB = -7.0e-5, agingB = 97. This shows that the bracketing interval is completely unbalanced, and we never succeed to rebalance it as we always updates (xA, yA) and never updates (xB, yB). diff --git a/src/main/java/org/apache/commons/math/analysis/solvers/BracketingNthOrderBrentSolver.java b/src/main/java/org/apache/commons/math/analysis/solvers/BracketingNthOrderBrentSolver.java index 93dd3bb..59dc461 100644 --- a/src/main/java/org/apache/commons/math/analysis/solvers/BracketingNthOrderBrentSolver.java +++ b/src/main/java/org/apache/commons/math/analysis/solvers/BracketingNthOrderBrentSolver.java @@ -232,10 +232,16 @@ public class BracketingNthOrderBrentSolver double targetY; if (agingA >= MAXIMAL_AGING) { // we keep updating the high bracket, try to compensate this - targetY = -REDUCTION_FACTOR * yB; + final int p = agingA - MAXIMAL_AGING; + final double weightA = (1 << p) - 1; + final double weightB = p + 1; + targetY = (weightA * yA - weightB * REDUCTION_FACTOR * yB) / (weightA + weightB); } else if (agingB >= MAXIMAL_AGING) { // we keep updating the low bracket, try to compensate this - targetY = -REDUCTION_FACTOR * yA; + final int p = agingB - MAXIMAL_AGING; + final double weightA = p + 1; + final double weightB = (1 << p) - 1; + targetY = (weightB * yB - weightA * REDUCTION_FACTOR * yA) / (weightA + weightB); } else { // bracketing is balanced, try to find the root itself targetY = 0;
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-716_faa77857.diff
bugs-dot-jar_data_MATH-1257_03178c8b
--- BugID: MATH-1257 Summary: NormalDistribution.cumulativeProbability() suffers from cancellation Description: "I see the following around line 194:\n{noformat}\n return 0.5 * (1 + Erf.erf(dev / (standardDeviation * SQRT2)));\n{noformat}\n\nWhen erf() returns a very small value, this cancels in the addition with the \"1.0\" which leads to poor precision in the results.\n\nI would suggest changing this line to read more like:\n{noformat}\nreturn 0.5 * Erf.erfc( -dev / standardDeviation * SQRT2 );\n{noformat} \n\nShould you want some test cases for \"extreme values\" (one might argue that within 10 standard deviations isn't all that extreme) then you can check the following: http://www.jstatsoft.org/v52/i07/ then look in the v52i07-xls.zip at replication-01-distribution-standard-normal.xls\n\nI think you will also find that evaluation of expressions such as {noformat}NormalDistribution( 0, 1 ).cumulativeProbability( -10.0 );{noformat}\nare pretty far off." diff --git a/src/main/java/org/apache/commons/math4/distribution/NormalDistribution.java b/src/main/java/org/apache/commons/math4/distribution/NormalDistribution.java index 6313ef0..5216867 100644 --- a/src/main/java/org/apache/commons/math4/distribution/NormalDistribution.java +++ b/src/main/java/org/apache/commons/math4/distribution/NormalDistribution.java @@ -193,7 +193,7 @@ public class NormalDistribution extends AbstractRealDistribution { if (FastMath.abs(dev) > 40 * standardDeviation) { return dev < 0 ? 0.0d : 1.0d; } - return 0.5 * (1 + Erf.erf(dev / (standardDeviation * SQRT2))); + return 0.5 * Erf.erfc(-dev / (standardDeviation * SQRT2)); } /** {@inheritDoc}
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1257_03178c8b.diff
bugs-dot-jar_data_MATH-891_2b852d79
--- BugID: MATH-891 Summary: SpearmansCorrelation fails when using NaturalRanking together with NaNStrategy.REMOVED Description: |- As reported by Martin Rosellen on the users mailinglist: Using a NaturalRanking with a REMOVED NaNStrategy can result in an exception when NaN are contained in the input arrays. The current implementation just removes the NaN values where they occur, without taken care to remove the corresponding values in the other array. diff --git a/src/main/java/org/apache/commons/math3/stat/correlation/SpearmansCorrelation.java b/src/main/java/org/apache/commons/math3/stat/correlation/SpearmansCorrelation.java index 2cd4151..14c48c6 100644 --- a/src/main/java/org/apache/commons/math3/stat/correlation/SpearmansCorrelation.java +++ b/src/main/java/org/apache/commons/math3/stat/correlation/SpearmansCorrelation.java @@ -17,27 +17,32 @@ package org.apache.commons.math3.stat.correlation; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + import org.apache.commons.math3.exception.DimensionMismatchException; import org.apache.commons.math3.exception.MathIllegalArgumentException; import org.apache.commons.math3.exception.util.LocalizedFormats; import org.apache.commons.math3.linear.BlockRealMatrix; import org.apache.commons.math3.linear.RealMatrix; +import org.apache.commons.math3.stat.ranking.NaNStrategy; import org.apache.commons.math3.stat.ranking.NaturalRanking; import org.apache.commons.math3.stat.ranking.RankingAlgorithm; /** - * <p>Spearman's rank correlation. This implementation performs a rank + * Spearman's rank correlation. This implementation performs a rank * transformation on the input data and then computes {@link PearsonsCorrelation} - * on the ranked data.</p> - * - * <p>By default, ranks are computed using {@link NaturalRanking} with default + * on the ranked data. + * <p> + * By default, ranks are computed using {@link NaturalRanking} with default * strategies for handling NaNs and ties in the data (NaNs maximal, ties averaged). - * The ranking algorithm can be set using a constructor argument.</p> + * The ranking algorithm can be set using a constructor argument. * * @since 2.0 * @version $Id$ */ - public class SpearmansCorrelation { /** Input data */ @@ -58,6 +63,9 @@ public class SpearmansCorrelation { /** * Create a SpearmansCorrelation with the given ranking algorithm. + * <p> + * From version 4.0 onwards this constructor will throw an exception + * if the provided {@link NaturalRanking} uses a {@link NaNStrategy#REMOVED} strategy. * * @param rankingAlgorithm ranking algorithm * @since 3.1 @@ -81,15 +89,17 @@ public class SpearmansCorrelation { /** * Create a SpearmansCorrelation with the given input data matrix * and ranking algorithm. + * <p> + * From version 4.0 onwards this constructor will throw an exception + * if the provided {@link NaturalRanking} uses a {@link NaNStrategy#REMOVED} strategy. * * @param dataMatrix matrix of data with columns representing * variables to correlate * @param rankingAlgorithm ranking algorithm */ public SpearmansCorrelation(final RealMatrix dataMatrix, final RankingAlgorithm rankingAlgorithm) { - this.data = dataMatrix.copy(); this.rankingAlgorithm = rankingAlgorithm; - rankTransform(data); + this.data = rankTransform(dataMatrix); rankCorrelation = new PearsonsCorrelation(data); } @@ -125,9 +135,8 @@ public class SpearmansCorrelation { * @param matrix matrix with columns representing variables to correlate * @return correlation matrix */ - public RealMatrix computeCorrelationMatrix(RealMatrix matrix) { - RealMatrix matrixCopy = matrix.copy(); - rankTransform(matrixCopy); + public RealMatrix computeCorrelationMatrix(final RealMatrix matrix) { + final RealMatrix matrixCopy = rankTransform(matrix); return new PearsonsCorrelation().computeCorrelationMatrix(matrixCopy); } @@ -139,7 +148,7 @@ public class SpearmansCorrelation { * @param matrix matrix with columns representing variables to correlate * @return correlation matrix */ - public RealMatrix computeCorrelationMatrix(double[][] matrix) { + public RealMatrix computeCorrelationMatrix(final double[][] matrix) { return computeCorrelationMatrix(new BlockRealMatrix(matrix)); } @@ -159,20 +168,93 @@ public class SpearmansCorrelation { throw new MathIllegalArgumentException(LocalizedFormats.INSUFFICIENT_DIMENSION, xArray.length, 2); } else { - return new PearsonsCorrelation().correlation(rankingAlgorithm.rank(xArray), - rankingAlgorithm.rank(yArray)); + double[] x = xArray; + double[] y = yArray; + if (rankingAlgorithm instanceof NaturalRanking && + NaNStrategy.REMOVED == ((NaturalRanking) rankingAlgorithm).getNanStrategy()) { + final Set<Integer> nanPositions = new HashSet<Integer>(); + + nanPositions.addAll(getNaNPositions(xArray)); + nanPositions.addAll(getNaNPositions(yArray)); + + x = removeValues(xArray, nanPositions); + y = removeValues(yArray, nanPositions); + } + return new PearsonsCorrelation().correlation(rankingAlgorithm.rank(x), rankingAlgorithm.rank(y)); } } /** * Applies rank transform to each of the columns of <code>matrix</code> - * using the current <code>rankingAlgorithm</code> + * using the current <code>rankingAlgorithm</code>. * * @param matrix matrix to transform + * @return a rank-transformed matrix */ - private void rankTransform(RealMatrix matrix) { - for (int i = 0; i < matrix.getColumnDimension(); i++) { - matrix.setColumn(i, rankingAlgorithm.rank(matrix.getColumn(i))); + private RealMatrix rankTransform(final RealMatrix matrix) { + RealMatrix transformed = null; + + if (rankingAlgorithm instanceof NaturalRanking && + ((NaturalRanking) rankingAlgorithm).getNanStrategy() == NaNStrategy.REMOVED) { + final Set<Integer> nanPositions = new HashSet<Integer>(); + for (int i = 0; i < matrix.getColumnDimension(); i++) { + nanPositions.addAll(getNaNPositions(matrix.getColumn(i))); + } + + // if we have found NaN values, we have to update the matrix size + if (!nanPositions.isEmpty()) { + transformed = new BlockRealMatrix(matrix.getRowDimension() - nanPositions.size(), + matrix.getColumnDimension()); + for (int i = 0; i < transformed.getColumnDimension(); i++) { + transformed.setColumn(i, removeValues(matrix.getColumn(i), nanPositions)); + } + } + } + + if (transformed == null) { + transformed = matrix.copy(); + } + + for (int i = 0; i < transformed.getColumnDimension(); i++) { + transformed.setColumn(i, rankingAlgorithm.rank(transformed.getColumn(i))); + } + + return transformed; + } + + /** + * Returns a list containing the indices of NaN values in the input array. + * + * @param input the input array + * @return a list of NaN positions in the input array + */ + private List<Integer> getNaNPositions(final double[] input) { + final List<Integer> positions = new ArrayList<Integer>(); + for (int i = 0; i < input.length; i++) { + if (Double.isNaN(input[i])) { + positions.add(i); + } + } + return positions; + } + + /** + * Removes all values from the input array at the specified indices. + * + * @param input the input array + * @param indices a set containing the indices to be removed + * @return the input array without the values at the specified indices + */ + private double[] removeValues(final double[] input, final Set<Integer> indices) { + if (indices.isEmpty()) { + return input; + } + final double[] result = new double[input.length - indices.size()]; + for (int i = 0, j = 0; i < input.length; i++) { + if (!indices.contains(i)) { + result[j++] = input[i]; + } } + return result; } }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-891_2b852d79.diff
bugs-dot-jar_data_MATH-1232_8f35fcb8
--- BugID: MATH-1232 Summary: UknownParameterException message prints {0} instead of parameter name Description: |- The constructor for UnknownParameterException stores the parameter name internally but does not forward it to the base class which creates the error message. diff --git a/src/main/java/org/apache/commons/math4/ode/UnknownParameterException.java b/src/main/java/org/apache/commons/math4/ode/UnknownParameterException.java index 75c6210..bbbaa4a 100644 --- a/src/main/java/org/apache/commons/math4/ode/UnknownParameterException.java +++ b/src/main/java/org/apache/commons/math4/ode/UnknownParameterException.java @@ -38,7 +38,7 @@ public class UnknownParameterException extends MathIllegalArgumentException { * @param name parameter name. */ public UnknownParameterException(final String name) { - super(LocalizedFormats.UNKNOWN_PARAMETER); + super(LocalizedFormats.UNKNOWN_PARAMETER, name); this.name = name; }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1232_8f35fcb8.diff
bugs-dot-jar_data_MATH-1067_aff82362
--- BugID: MATH-1067 Summary: Stack overflow in Beta.regularizedBeta Description: "In org.apache.commons.math3.special.Beta.regularizedBeta(double,double,double,double,int), the case\n\n } else if (x > (a + 1.0) / (a + b + 2.0)) {\n ret = 1.0 - regularizedBeta(1.0 - x, b, a, epsilon, maxIterations);\n} \n\nis prone to infinite recursion: If x is approximately the tested value, then 1-x is approximately the tested value in the recursion. Thus, due to loss of precision after the subtraction, this condition can be true for the recursive call as well.\n\nExample:\ndouble x= Double.longBitsToDouble(4597303555101269224L);\ndouble a= Double.longBitsToDouble(4634227472812299606L);\ndouble b = Double.longBitsToDouble(4642050131540049920L);\nSystem.out.println(x > (a + 1.0) / (a + b + 2.0));\nSystem.out.println(1-x>(b + 1.0) / (b + a + 2.0));\nSystem.out.println(1-(1-x)>(a + 1.0) / (a + b + 2.0));\n\nPossible solution: change the condition to\nx > (a + 1.0) / (a + b + 2.0) && 1-x<=(b + 1.0) / (b + a + 2.0)" diff --git a/src/main/java/org/apache/commons/math3/special/Beta.java b/src/main/java/org/apache/commons/math3/special/Beta.java index c6091b4..04696f9 100644 --- a/src/main/java/org/apache/commons/math3/special/Beta.java +++ b/src/main/java/org/apache/commons/math3/special/Beta.java @@ -189,11 +189,12 @@ public class Beta { Double.isNaN(b) || x < 0 || x > 1 || - a <= 0.0 || - b <= 0.0) { + a <= 0 || + b <= 0) { ret = Double.NaN; - } else if (x > (a + 1.0) / (a + b + 2.0)) { - ret = 1.0 - regularizedBeta(1.0 - x, b, a, epsilon, maxIterations); + } else if (x > (a + 1) / (2 + b + a) && + 1 - x <= (b + 1) / (2 + b + a)) { + ret = 1 - regularizedBeta(1 - x, b, a, epsilon, maxIterations); } else { ContinuedFraction fraction = new ContinuedFraction() {
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1067_aff82362.diff
bugs-dot-jar_data_MATH-1005_91d280b7
--- BugID: MATH-1005 Summary: ArrayIndexOutOfBoundsException in MathArrays.linearCombination Description: |- When MathArrays.linearCombination is passed arguments with length 1, it throws an ArrayOutOfBoundsException. This is caused by this line: double prodHighNext = prodHigh[1]; linearCombination should check the length of the arguments and fall back to simple multiplication if length == 1. diff --git a/src/main/java/org/apache/commons/math3/util/MathArrays.java b/src/main/java/org/apache/commons/math3/util/MathArrays.java index 1aa56cc..8f83d96 100644 --- a/src/main/java/org/apache/commons/math3/util/MathArrays.java +++ b/src/main/java/org/apache/commons/math3/util/MathArrays.java @@ -818,6 +818,11 @@ public class MathArrays { throw new DimensionMismatchException(len, b.length); } + if (len == 1) { + // Revert to scalar multiplication. + return a[0] * b[0]; + } + final double[] prodHigh = new double[len]; double prodLowSum = 0;
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1005_91d280b7.diff
bugs-dot-jar_data_MATH-789_621bbb8f
--- BugID: MATH-789 Summary: Correlated random vector generator fails (silently) when faced with zero rows in covariance matrix Description: "The following three matrices (which are basically permutations of each other) produce different results when sampling a multi-variate Gaussian with the help of CorrelatedRandomVectorGenerator (sample covariances calculated in R, based on 10,000 samples):\n\nArray2DRowRealMatrix{\n{0.0,0.0,0.0,0.0,0.0},\n{0.0,0.013445532,0.01039469,0.009881156,0.010499559},\n{0.0,0.01039469,0.023006616,0.008196856,0.010732709},\n{0.0,0.009881156,0.008196856,0.019023866,0.009210099},\n{0.0,0.010499559,0.010732709,0.009210099,0.019107243}}\n\n> cov(data1)\n V1 V2 V3 V4 V5\nV1 0 0.000000000 0.00000000 0.000000000 0.000000000\nV2 0 0.013383931 0.01034401 0.009913271 0.010506733\nV3 0 0.010344006 0.02309479 0.008374730 0.010759306\nV4 0 0.009913271 0.00837473 0.019005488 0.009187287\nV5 0 0.010506733 0.01075931 0.009187287 0.019021483\n\nArray2DRowRealMatrix{\n{0.013445532,0.01039469,0.0,0.009881156,0.010499559},\n{0.01039469,0.023006616,0.0,0.008196856,0.010732709},\n{0.0,0.0,0.0,0.0,0.0},\n{0.009881156,0.008196856,0.0,0.019023866,0.009210099},\n{0.010499559,0.010732709,0.0,0.009210099,0.019107243}}\n\n> cov(data2)\n V1 V2 V3 V4 V5\nV1 0.006922905 0.010507692 0 0.005817399 0.010330529\nV2 0.010507692 0.023428918 0 0.008273152 0.010735568\nV3 0.000000000 0.000000000 0 0.000000000 0.000000000\nV4 0.005817399 0.008273152 0 0.004929843 0.009048759\nV5 0.010330529 0.010735568 0 0.009048759 0.018683544 \n\nArray2DRowRealMatrix{\n{0.013445532,0.01039469,0.009881156,0.010499559},\n{0.01039469,0.023006616,0.008196856,0.010732709},\n{0.009881156,0.008196856,0.019023866,0.009210099},\n{0.010499559,0.010732709,0.009210099,0.019107243}}\n\n> cov(data3)\n V1 V2 V3 V4\nV1 0.013445047 0.010478862 0.009955904 0.010529542\nV2 0.010478862 0.022910522 0.008610113 0.011046353\nV3 0.009955904 0.008610113 0.019250975 0.009464442\nV4 0.010529542 0.011046353 0.009464442 0.019260317\n\n\nI've traced this back to the RectangularCholeskyDecomposition, which does not seem to handle the second matrix very well (decompositions in the same order as the matrices above):\n\nCorrelatedRandomVectorGenerator.getRootMatrix() = \nArray2DRowRealMatrix{{0.0,0.0,0.0,0.0,0.0},{0.0759577418122063,0.0876125188474239,0.0,0.0,0.0},{0.07764443622513505,0.05132821221460752,0.11976381821791235,0.0,0.0},{0.06662930527909404,0.05501661744114585,0.0016662506519307997,0.10749324207653632,0.0},{0.13822895138139477,0.0,0.0,0.0,0.0}}\nCorrelatedRandomVectorGenerator.getRank() = 5\n\nCorrelatedRandomVectorGenerator.getRootMatrix() = \nArray2DRowRealMatrix{{0.0759577418122063,0.034512751379448724,0.0},{0.07764443622513505,0.13029949164628746,0.0},{0.0,0.0,0.0},{0.06662930527909404,0.023203936694855674,0.0},{0.13822895138139477,0.0,0.0}}\nCorrelatedRandomVectorGenerator.getRank() = 3\n\nCorrelatedRandomVectorGenerator.getRootMatrix() = \nArray2DRowRealMatrix{{0.0759577418122063,0.034512751379448724,0.033913748226348225,0.07303890149947785},{0.07764443622513505,0.13029949164628746,0.0,0.0},{0.06662930527909404,0.023203936694855674,0.11851573313229945,0.0},{0.13822895138139477,0.0,0.0,0.0}}\nCorrelatedRandomVectorGenerator.getRank() = 4\n\nClearly, the rank of each of these matrices should be 4. The first matrix does not lead to incorrect results, but the second one does. Unfortunately, I don't know enough about the Cholesky decomposition to find the flaw in the implementation, and I could not find documentation for the \"rectangular\" variant (also not at the links provided in the javadoc)." diff --git a/src/main/java/org/apache/commons/math3/linear/RectangularCholeskyDecomposition.java b/src/main/java/org/apache/commons/math3/linear/RectangularCholeskyDecomposition.java index 38584d4..aba7b98 100644 --- a/src/main/java/org/apache/commons/math3/linear/RectangularCholeskyDecomposition.java +++ b/src/main/java/org/apache/commons/math3/linear/RectangularCholeskyDecomposition.java @@ -62,11 +62,10 @@ public class RectangularCholeskyDecomposition { public RectangularCholeskyDecomposition(RealMatrix matrix, double small) throws NonPositiveDefiniteMatrixException { - int order = matrix.getRowDimension(); - double[][] c = matrix.getData(); - double[][] b = new double[order][order]; + final int order = matrix.getRowDimension(); + final double[][] c = matrix.getData(); + final double[][] b = new double[order][order]; - int[] swap = new int[order]; int[] index = new int[order]; for (int i = 0; i < order; ++i) { index[i] = i; @@ -76,21 +75,24 @@ public class RectangularCholeskyDecomposition { for (boolean loop = true; loop;) { // find maximal diagonal element - swap[r] = r; + int swapR = r; for (int i = r + 1; i < order; ++i) { int ii = index[i]; - int isi = index[swap[i]]; - if (c[ii][ii] > c[isi][isi]) { - swap[r] = i; + int isr = index[swapR]; + if (c[ii][ii] > c[isr][isr]) { + swapR = i; } } // swap elements - if (swap[r] != r) { - int tmp = index[r]; - index[r] = index[swap[r]]; - index[swap[r]] = tmp; + if (swapR != r) { + final int tmpIndex = index[r]; + index[r] = index[swapR]; + index[swapR] = tmpIndex; + final double[] tmpRow = b[r]; + b[r] = b[swapR]; + b[swapR] = tmpRow; } // check diagonal element @@ -118,17 +120,18 @@ public class RectangularCholeskyDecomposition { } else { // transform the matrix - double sqrt = FastMath.sqrt(c[ir][ir]); + final double sqrt = FastMath.sqrt(c[ir][ir]); b[r][r] = sqrt; - double inverse = 1 / sqrt; + final double inverse = 1 / sqrt; + final double inverse2 = 1 / c[ir][ir]; for (int i = r + 1; i < order; ++i) { - int ii = index[i]; - double e = inverse * c[ii][ir]; + final int ii = index[i]; + final double e = inverse * c[ii][ir]; b[i][r] = e; - c[ii][ii] -= e * e; + c[ii][ii] -= c[ii][ir] * c[ii][ir] * inverse2; for (int j = r + 1; j < i; ++j) { - int ij = index[j]; - double f = c[ii][ij] - e * b[j][r]; + final int ij = index[j]; + final double f = c[ii][ij] - e * b[j][r]; c[ii][ij] = f; c[ij][ii] = f; }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-789_621bbb8f.diff
bugs-dot-jar_data_MATH-640_98556fed
--- BugID: MATH-640 Summary: AbstractRandomGenerator nextInt() and nextLong() default implementations generate only positive values Description: The javadoc for these methods (and what is specified in the RandomGenerator interface) says that all int / long values should be in the range of these methods. The default implementations provided in this class do not generate negative values. diff --git a/src/main/java/org/apache/commons/math/random/AbstractRandomGenerator.java b/src/main/java/org/apache/commons/math/random/AbstractRandomGenerator.java index 8576847..10c8f38 100644 --- a/src/main/java/org/apache/commons/math/random/AbstractRandomGenerator.java +++ b/src/main/java/org/apache/commons/math/random/AbstractRandomGenerator.java @@ -134,7 +134,7 @@ public abstract class AbstractRandomGenerator implements RandomGenerator { * value from this random number generator's sequence */ public int nextInt() { - return (int) (nextDouble() * Integer.MAX_VALUE); + return (int) ((2d * nextDouble() - 1d) * Integer.MAX_VALUE); } /** @@ -176,7 +176,7 @@ public abstract class AbstractRandomGenerator implements RandomGenerator { *value from this random number generator's sequence */ public long nextLong() { - return (long) (nextDouble() * Long.MAX_VALUE); + return (long) ((2d * nextDouble() - 1d) * Long.MAX_VALUE); } /**
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-640_98556fed.diff
bugs-dot-jar_data_MATH-1141_2f2a2dda
--- BugID: MATH-1141 Summary: UniformIntegerDistribution should make constructer a exclusive bound or made parameter check more relax Description: "UniformIntegerDistribution constructer public UniformIntegerDistribution(RandomGenerator rng,\n int lower,\n int upper) \nthe lower and the upper all inclusive. but the parameter check made a if (lower >= upper) {\n throw new NumberIsTooLargeException(\n LocalizedFormats.LOWER_BOUND_NOT_BELOW_UPPER_BOUND,\n \ lower, upper, false);\ncheck, i think it is too strict\nto construct UniformIntegerDistribution (0,0) \nthis should make it possible" diff --git a/src/main/java/org/apache/commons/math3/distribution/UniformIntegerDistribution.java b/src/main/java/org/apache/commons/math3/distribution/UniformIntegerDistribution.java index 3c26fdf..c3a55b0 100644 --- a/src/main/java/org/apache/commons/math3/distribution/UniformIntegerDistribution.java +++ b/src/main/java/org/apache/commons/math3/distribution/UniformIntegerDistribution.java @@ -59,7 +59,7 @@ public class UniformIntegerDistribution extends AbstractIntegerDistribution { * @param rng Random number generator. * @param lower Lower bound (inclusive) of this distribution. * @param upper Upper bound (inclusive) of this distribution. - * @throws NumberIsTooLargeException if {@code lower >= upper}. + * @throws NumberIsTooLargeException if {@code lower > upper}. * @since 3.1 */ public UniformIntegerDistribution(RandomGenerator rng, @@ -68,10 +68,10 @@ public class UniformIntegerDistribution extends AbstractIntegerDistribution { throws NumberIsTooLargeException { super(rng); - if (lower >= upper) { + if (lower > upper) { throw new NumberIsTooLargeException( LocalizedFormats.LOWER_BOUND_NOT_BELOW_UPPER_BOUND, - lower, upper, false); + lower, upper, true); } this.lower = lower; this.upper = upper;
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1141_2f2a2dda.diff
bugs-dot-jar_data_MATH-904_6844aba9
--- BugID: MATH-904 Summary: 'FastMath.pow deviates from Math.pow for negative, finite base values with an exponent 2^52 < y < 2^53 ' Description: |- As reported by Jeff Hain: pow(double,double): Math.pow(-1.0,5.000000000000001E15) = -1.0 FastMath.pow(-1.0,5.000000000000001E15) = 1.0 ===> This is due to considering that power is an even integer if it is >= 2^52, while you need to test that it is >= 2^53 for it. ===> replace "if (y >= TWO_POWER_52 || y <= -TWO_POWER_52)" with "if (y >= 2*TWO_POWER_52 || y <= -2*TWO_POWER_52)" and that solves it. diff --git a/src/main/java/org/apache/commons/math3/util/FastMath.java b/src/main/java/org/apache/commons/math3/util/FastMath.java index fd1ce2d..9dc84c6 100644 --- a/src/main/java/org/apache/commons/math3/util/FastMath.java +++ b/src/main/java/org/apache/commons/math3/util/FastMath.java @@ -309,6 +309,8 @@ public class FastMath { /** 2^52 - double numbers this large must be integral (no fraction) or NaN or Infinite */ private static final double TWO_POWER_52 = 4503599627370496.0; + /** 2^53 - double numbers this large must be even. */ + private static final double TWO_POWER_53 = 2 * TWO_POWER_52; /** Constant: {@value}. */ private static final double F_1_3 = 1d / 3d; @@ -1537,7 +1539,7 @@ public class FastMath { /* Handle special case x<0 */ if (x < 0) { // y is an even integer in this case - if (y >= TWO_POWER_52 || y <= -TWO_POWER_52) { + if (y >= TWO_POWER_53 || y <= -TWO_POWER_53) { return pow(-x, y); }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-904_6844aba9.diff
bugs-dot-jar_data_MATH-778_5b9302d5
--- BugID: MATH-778 Summary: Dfp Dfp.multiply(int x) does not comply with the general contract FieldElement.multiply(int n) Description: In class {{org.apache.commons.math3.Dfp}}, the method {{multiply(int n)}} is limited to {{0 <= n <= 9999}}. This is not consistent with the general contract of {{FieldElement.multiply(int n)}}, where there should be no limitation on the values of {{n}}. diff --git a/src/main/java/org/apache/commons/math3/dfp/Dfp.java b/src/main/java/org/apache/commons/math3/dfp/Dfp.java index d278a31..3a91982 100644 --- a/src/main/java/org/apache/commons/math3/dfp/Dfp.java +++ b/src/main/java/org/apache/commons/math3/dfp/Dfp.java @@ -1595,12 +1595,24 @@ public class Dfp implements FieldElement<Dfp> { } - /** Multiply this by a single digit 0&lt;=x&lt;radix. - * There are speed advantages in this special case + /** Multiply this by a single digit x. * @param x multiplicand * @return product of this and x */ public Dfp multiply(final int x) { + if (x >= 0 && x < RADIX) { + return multiplyFast(x); + } else { + return multiply(newInstance(x)); + } + } + + /** Multiply this by a single digit 0&lt;=x&lt;radix. + * There are speed advantages in this special case. + * @param x multiplicand + * @return product of this and x + */ + private Dfp multiplyFast(final int x) { Dfp result = newInstance(this); /* handle special cases */
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-778_5b9302d5.diff
bugs-dot-jar_data_MATH-1088_63d88c74
--- BugID: MATH-1088 Summary: MultidimensionalCounter does not throw "NoSuchElementException" Description: 'The iterator should throw when "next()" is called even though "hasNext()" would return false. ' diff --git a/src/main/java/org/apache/commons/math3/util/MultidimensionalCounter.java b/src/main/java/org/apache/commons/math3/util/MultidimensionalCounter.java index 0553c6b..3c87fba 100644 --- a/src/main/java/org/apache/commons/math3/util/MultidimensionalCounter.java +++ b/src/main/java/org/apache/commons/math3/util/MultidimensionalCounter.java @@ -17,6 +17,7 @@ package org.apache.commons.math3.util; +import java.util.NoSuchElementException; import org.apache.commons.math3.exception.DimensionMismatchException; import org.apache.commons.math3.exception.NotStrictlyPositiveException; import org.apache.commons.math3.exception.OutOfRangeException; @@ -77,6 +78,10 @@ public class MultidimensionalCounter implements Iterable<Integer> { * Unidimensional counter. */ private int count = -1; + /** + * Maximum value for {@link #count}. + */ + private final int maxCount = totalSize - 1; /** * Create an iterator @@ -90,19 +95,20 @@ public class MultidimensionalCounter implements Iterable<Integer> { * {@inheritDoc} */ public boolean hasNext() { - for (int i = 0; i < dimension; i++) { - if (counter[i] != size[i] - 1) { - return true; - } - } - return false; + return count < maxCount; } /** * @return the unidimensional count after the counter has been * incremented by {@code 1}. + * @throws NoSuchElementException if {@link #hasNext()} would have + * returned {@code false}. */ public Integer next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + for (int i = last; i >= 0; i--) { if (counter[i] == size[i] - 1) { counter[i] = 0;
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1088_63d88c74.diff
bugs-dot-jar_data_MATH-358_061f5017
--- BugID: MATH-358 Summary: ODE integrator goes past specified end of integration range Description: "End of integration range in ODE solving is handled as an event.\nIn some cases, numerical accuracy in events detection leads to error in events location.\nThe following test case shows the end event is not handled properly and an integration that should cover a 60s range in fact covers a 160s range, more than twice the specified range.\n{code}\n public void testMissedEvent() throws IntegratorException, DerivativeException {\n final double t0 = 1878250320.0000029;\n final double t = 1878250379.9999986;\n \ FirstOrderDifferentialEquations ode = new FirstOrderDifferentialEquations() {\n \n public int getDimension() {\n return 1;\n }\n \n public void computeDerivatives(double t, double[] y, double[] yDot)\n throws DerivativeException {\n yDot[0] = y[0] * 1.0e-6;\n }\n };\n\n DormandPrince853Integrator integrator = new DormandPrince853Integrator(0.0, 100.0,\n 1.0e-10, 1.0e-10);\n\n double[] y = { 1.0 };\n integrator.setInitialStepSize(60.0);\n \ double finalT = integrator.integrate(ode, t0, y, t, y);\n Assert.assertEquals(t, finalT, 1.0e-6);\n }\n\n{code}" diff --git a/src/main/java/org/apache/commons/math/ode/nonstiff/AdamsBashforthIntegrator.java b/src/main/java/org/apache/commons/math/ode/nonstiff/AdamsBashforthIntegrator.java index 935bb8b..6ee9bd5 100644 --- a/src/main/java/org/apache/commons/math/ode/nonstiff/AdamsBashforthIntegrator.java +++ b/src/main/java/org/apache/commons/math/ode/nonstiff/AdamsBashforthIntegrator.java @@ -271,8 +271,16 @@ public class AdamsBashforthIntegrator extends AdamsIntegrator { if (manager.evaluateStep(interpolatorTmp)) { final double dt = manager.getEventTime() - stepStart; if (Math.abs(dt) <= Math.ulp(stepStart)) { - // rejecting the step would lead to a too small next step, we accept it - loop = false; + // we cannot simply truncate the step, reject the current computation + // and let the loop compute another state with the truncated step. + // it is so small (much probably exactly 0 due to limited accuracy) + // that the code above would fail handling it. + // So we set up an artificial 0 size step by copying states + interpolator.storeTime(stepStart); + System.arraycopy(y, 0, yTmp, 0, y0.length); + hNew = 0; + stepSize = 0; + loop = false; } else { // reject the step to match exactly the next switch time hNew = dt; diff --git a/src/main/java/org/apache/commons/math/ode/nonstiff/AdamsMoultonIntegrator.java b/src/main/java/org/apache/commons/math/ode/nonstiff/AdamsMoultonIntegrator.java index 27ade7b..e0e2f0d 100644 --- a/src/main/java/org/apache/commons/math/ode/nonstiff/AdamsMoultonIntegrator.java +++ b/src/main/java/org/apache/commons/math/ode/nonstiff/AdamsMoultonIntegrator.java @@ -289,8 +289,16 @@ public class AdamsMoultonIntegrator extends AdamsIntegrator { if (manager.evaluateStep(interpolatorTmp)) { final double dt = manager.getEventTime() - stepStart; if (Math.abs(dt) <= Math.ulp(stepStart)) { - // rejecting the step would lead to a too small next step, we accept it - loop = false; + // we cannot simply truncate the step, reject the current computation + // and let the loop compute another state with the truncated step. + // it is so small (much probably exactly 0 due to limited accuracy) + // that the code above would fail handling it. + // So we set up an artificial 0 size step by copying states + interpolator.storeTime(stepStart); + System.arraycopy(y, 0, yTmp, 0, y0.length); + hNew = 0; + stepSize = 0; + loop = false; } else { // reject the step to match exactly the next switch time hNew = dt; diff --git a/src/main/java/org/apache/commons/math/ode/nonstiff/EmbeddedRungeKuttaIntegrator.java b/src/main/java/org/apache/commons/math/ode/nonstiff/EmbeddedRungeKuttaIntegrator.java index 34b3dc1..e03be9e 100644 --- a/src/main/java/org/apache/commons/math/ode/nonstiff/EmbeddedRungeKuttaIntegrator.java +++ b/src/main/java/org/apache/commons/math/ode/nonstiff/EmbeddedRungeKuttaIntegrator.java @@ -292,8 +292,16 @@ public abstract class EmbeddedRungeKuttaIntegrator if (manager.evaluateStep(interpolator)) { final double dt = manager.getEventTime() - stepStart; if (Math.abs(dt) <= Math.ulp(stepStart)) { - // rejecting the step would lead to a too small next step, we accept it - loop = false; + // we cannot simply truncate the step, reject the current computation + // and let the loop compute another state with the truncated step. + // it is so small (much probably exactly 0 due to limited accuracy) + // that the code above would fail handling it. + // So we set up an artificial 0 size step by copying states + interpolator.storeTime(stepStart); + System.arraycopy(y, 0, yTmp, 0, y0.length); + hNew = 0; + stepSize = 0; + loop = false; } else { // reject the step to match exactly the next switch time hNew = dt; diff --git a/src/main/java/org/apache/commons/math/ode/nonstiff/RungeKuttaIntegrator.java b/src/main/java/org/apache/commons/math/ode/nonstiff/RungeKuttaIntegrator.java index 3227b98..b61b0b1 100644 --- a/src/main/java/org/apache/commons/math/ode/nonstiff/RungeKuttaIntegrator.java +++ b/src/main/java/org/apache/commons/math/ode/nonstiff/RungeKuttaIntegrator.java @@ -172,8 +172,15 @@ public abstract class RungeKuttaIntegrator extends AbstractIntegrator { if (manager.evaluateStep(interpolator)) { final double dt = manager.getEventTime() - stepStart; if (Math.abs(dt) <= Math.ulp(stepStart)) { - // rejecting the step would lead to a too small next step, we accept it - loop = false; + // we cannot simply truncate the step, reject the current computation + // and let the loop compute another state with the truncated step. + // it is so small (much probably exactly 0 due to limited accuracy) + // that the code above would fail handling it. + // So we set up an artificial 0 size step by copying states + interpolator.storeTime(stepStart); + System.arraycopy(y, 0, yTmp, 0, y0.length); + stepSize = 0; + loop = false; } else { // reject the step to match exactly the next switch time stepSize = dt;
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-358_061f5017.diff
bugs-dot-jar_data_MATH-555_328513f3
--- BugID: MATH-555 Summary: MathUtils round method should propagate rather than wrap Runitme exceptions Description: MathUtils.round(double, int, int) can generate IllegalArgumentException or ArithmeticException. Instead of wrapping these exceptions in MathRuntimeException, the conditions under which these exceptions can be thrown should be documented and the exceptions should be propagated directly to the caller. diff --git a/src/main/java/org/apache/commons/math/util/MathUtils.java b/src/main/java/org/apache/commons/math/util/MathUtils.java index 85d9a04..52ebeb6 100644 --- a/src/main/java/org/apache/commons/math/util/MathUtils.java +++ b/src/main/java/org/apache/commons/math/util/MathUtils.java @@ -33,7 +33,6 @@ import org.apache.commons.math.exception.NullArgumentException; import org.apache.commons.math.exception.NotPositiveException; import org.apache.commons.math.exception.MathArithmeticException; import org.apache.commons.math.exception.MathIllegalArgumentException; -import org.apache.commons.math.exception.MathRuntimeException; import org.apache.commons.math.exception.NumberIsTooLargeException; import org.apache.commons.math.exception.NotFiniteNumberException; @@ -1333,15 +1332,22 @@ public final class MathUtils { } /** - * Round the given value to the specified number of decimal places. The + * <p>Round the given value to the specified number of decimal places. The * value is rounded using the given method which is any method defined in - * {@link BigDecimal}. + * {@link BigDecimal}.</p> + * + * <p>If {@code x} is infinite or NaN, then the value of {@code x} is + * returned unchanged, regardless of the other parameters.</p> * * @param x the value to round. * @param scale the number of digits to the right of the decimal point. * @param roundingMethod the rounding method as defined in * {@link BigDecimal}. * @return the rounded value. + * @throws ArithmeticException if roundingMethod==ROUND_UNNECESSARY and the + * specified scaling operation would require rounding. + * @throws IllegalArgumentException if roundingMethod does not represent a + * valid rounding mode. * @since 1.1 */ public static double round(double x, int scale, int roundingMethod) { @@ -1356,8 +1362,6 @@ public final class MathUtils { } else { return Double.NaN; } - } catch (RuntimeException ex) { - throw new MathRuntimeException(ex); } }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-555_328513f3.diff
bugs-dot-jar_data_MATH-1136_cc4ab51e
--- BugID: MATH-1136 Summary: BinomialDistribution deals with degenerate cases incorrectly Description: |- The following calculation returns false results: {{new BinomialDistribution(0, 0.01).logProbability(0)}} It evaluates to Double.NaN when it should be 0 (cf., for example, "dbinom(0, 0, 0.01, log=T)" in R). I attach a patch dealing with the problem. The patch also adds a test for this bug. diff --git a/src/main/java/org/apache/commons/math3/distribution/BinomialDistribution.java b/src/main/java/org/apache/commons/math3/distribution/BinomialDistribution.java index 15747e1..9a882a0 100644 --- a/src/main/java/org/apache/commons/math3/distribution/BinomialDistribution.java +++ b/src/main/java/org/apache/commons/math3/distribution/BinomialDistribution.java @@ -106,6 +106,9 @@ public class BinomialDistribution extends AbstractIntegerDistribution { /** {@inheritDoc} **/ @Override public double logProbability(int x) { + if (numberOfTrials == 0) { + return (x == 0) ? 0. : Double.NEGATIVE_INFINITY; + } double ret; if (x < 0 || x > numberOfTrials) { ret = Double.NEGATIVE_INFINITY;
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1136_cc4ab51e.diff
bugs-dot-jar_data_MATH-996_86545dab
--- BugID: MATH-996 Summary: Fraction specified with maxDenominator and a value very close to a simple fraction should not throw an overflow exception Description: |- An overflow exception is thrown when a Fraction is initialized with a maxDenominator from a double that is very close to a simple fraction. For example: double d = 0.5000000001; Fraction f = new Fraction(d, 10); Patch with unit test on way. diff --git a/src/main/java/org/apache/commons/math3/fraction/BigFraction.java b/src/main/java/org/apache/commons/math3/fraction/BigFraction.java index 5b0a6b4..4ca33eb 100644 --- a/src/main/java/org/apache/commons/math3/fraction/BigFraction.java +++ b/src/main/java/org/apache/commons/math3/fraction/BigFraction.java @@ -301,6 +301,11 @@ public class BigFraction p2 = (a1 * p1) + p0; q2 = (a1 * q1) + q0; if ((p2 > overflow) || (q2 > overflow)) { + // in maxDenominator mode, if the last fraction was very close to the actual value + // q2 may overflow in the next iteration; in this case return the last one. + if (epsilon == 0.0 && FastMath.abs(q1) < maxDenominator) { + break; + } throw new FractionConversionException(value, p2, q2); } diff --git a/src/main/java/org/apache/commons/math3/fraction/Fraction.java b/src/main/java/org/apache/commons/math3/fraction/Fraction.java index 08d3b95..002dae9 100644 --- a/src/main/java/org/apache/commons/math3/fraction/Fraction.java +++ b/src/main/java/org/apache/commons/math3/fraction/Fraction.java @@ -83,6 +83,9 @@ public class Fraction /** Serializable version identifier */ private static final long serialVersionUID = 3698073679419233275L; + /** The default epsilon used for convergence. */ + private static final double DEFAULT_EPSILON = 1e-5; + /** The denominator. */ private final int denominator; @@ -96,7 +99,7 @@ public class Fraction * converge. */ public Fraction(double value) throws FractionConversionException { - this(value, 1.0e-5, 100); + this(value, DEFAULT_EPSILON, 100); } /** @@ -182,8 +185,7 @@ public class Fraction throw new FractionConversionException(value, a0, 1l); } - // check for (almost) integer arguments, which should not go - // to iterations. + // check for (almost) integer arguments, which should not go to iterations. if (FastMath.abs(a0 - value) < epsilon) { this.numerator = (int) a0; this.denominator = 1; @@ -206,7 +208,13 @@ public class Fraction long a1 = (long)FastMath.floor(r1); p2 = (a1 * p1) + p0; q2 = (a1 * q1) + q0; + if ((FastMath.abs(p2) > overflow) || (FastMath.abs(q2) > overflow)) { + // in maxDenominator mode, if the last fraction was very close to the actual value + // q2 may overflow in the next iteration; in this case return the last one. + if (epsilon == 0.0 && FastMath.abs(q1) < maxDenominator) { + break; + } throw new FractionConversionException(value, p2, q2); }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-996_86545dab.diff
bugs-dot-jar_data_MATH-1045_a4ffd393
--- BugID: MATH-1045 Summary: EigenDecomposition.Solver should consider tiny values 0 for purposes of determining singularity Description: |- EigenDecomposition.Solver tests for singularity by comparing eigenvalues to 0 for exact equality. Elsewhere in the class and in the code, of course, very small values are considered 0. This causes the solver to consider some singular matrices as non-singular. The patch here includes a test as well showing the behavior -- the matrix is clearly singular but isn't considered as such since one eigenvalue are ~1e-14 rather than exactly 0. (What I am not sure of is whether we should really be evaluating the *norm* of the imaginary eigenvalues rather than real/imag components separately. But the javadoc says the solver only supports real eigenvalues anyhow, so it's kind of moot since imag=0 for all eigenvalues.) diff --git a/src/main/java/org/apache/commons/math3/linear/EigenDecomposition.java b/src/main/java/org/apache/commons/math3/linear/EigenDecomposition.java index 4652f61..b5e93ce 100644 --- a/src/main/java/org/apache/commons/math3/linear/EigenDecomposition.java +++ b/src/main/java/org/apache/commons/math3/linear/EigenDecomposition.java @@ -513,9 +513,16 @@ public class EigenDecomposition { * @return true if the decomposed matrix is non-singular. */ public boolean isNonSingular() { + // The eigenvalues are sorted by size, descending + double largestEigenvalueNorm = eigenvalueNorm(0); + // Corner case: zero matrix, all exactly 0 eigenvalues + if (largestEigenvalueNorm == 0.0) { + return false; + } for (int i = 0; i < realEigenvalues.length; ++i) { - if (realEigenvalues[i] == 0 && - imagEigenvalues[i] == 0) { + // Looking for eigenvalues that are 0, where we consider anything much much smaller + // than the largest eigenvalue to be effectively 0. + if (Precision.equals(eigenvalueNorm(i) / largestEigenvalueNorm, 0, EPSILON)) { return false; } } @@ -523,6 +530,16 @@ public class EigenDecomposition { } /** + * @param i which eigenvalue to find the norm of + * @return the norm of ith (complex) eigenvalue. + */ + private double eigenvalueNorm(int i) { + final double re = realEigenvalues[i]; + final double im = imagEigenvalues[i]; + return FastMath.sqrt(re * re + im * im); + } + + /** * Get the inverse of the decomposed matrix. * * @return the inverse matrix.
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1045_a4ffd393.diff
bugs-dot-jar_data_MATH-554_fbbb96eb
--- BugID: MATH-554 Summary: Vector3D.crossProduct is sensitive to numerical cancellation Description: |- Cross product implementation uses the naive formulas (y1 z2 - y2 z1, ...). These formulas fail when vectors are almost colinear, like in the following example: {code} Vector3D v1 = new Vector3D(9070467121.0, 4535233560.0, 1); Vector3D v2 = new Vector3D(9070467123.0, 4535233561.0, 1); System.out.println(Vector3D.crossProduct(v1, v2)); {code} The previous code displays { -1, 2, 0 } instead of the correct answer { -1, 2, 1 } diff --git a/src/main/java/org/apache/commons/math/geometry/Vector3D.java b/src/main/java/org/apache/commons/math/geometry/Vector3D.java index 0a4adb8..2d915e5 100644 --- a/src/main/java/org/apache/commons/math/geometry/Vector3D.java +++ b/src/main/java/org/apache/commons/math/geometry/Vector3D.java @@ -454,10 +454,41 @@ public class Vector3D implements Serializable { * @param v2 second vector * @return the cross product v1 ^ v2 as a new Vector */ - public static Vector3D crossProduct(Vector3D v1, Vector3D v2) { - return new Vector3D(v1.y * v2.z - v1.z * v2.y, - v1.z * v2.x - v1.x * v2.z, - v1.x * v2.y - v1.y * v2.x); + public static Vector3D crossProduct(final Vector3D v1, final Vector3D v2) { + + final double n1 = v1.getNormSq(); + final double n2 = v2.getNormSq(); + if ((n1 * n2) < MathUtils.SAFE_MIN) { + return ZERO; + } + + // rescale both vectors without losing precision, + // to ensure their norm are the same order of magnitude + final int deltaExp = (FastMath.getExponent(n1) - FastMath.getExponent(n2)) / 4; + final double x1 = FastMath.scalb(v1.x, -deltaExp); + final double y1 = FastMath.scalb(v1.y, -deltaExp); + final double z1 = FastMath.scalb(v1.z, -deltaExp); + final double x2 = FastMath.scalb(v2.x, deltaExp); + final double y2 = FastMath.scalb(v2.y, deltaExp); + final double z2 = FastMath.scalb(v2.z, deltaExp); + + // we reduce cancellation errors by preconditioning, + // we replace v1 by v3 = v1 - rho v2 with rho chosen in order to compute + // v3 without loss of precision. See Kahan lecture + // "Computing Cross-Products and Rotations in 2- and 3-Dimensional Euclidean Spaces" + // available at http://www.cs.berkeley.edu/~wkahan/MathH110/Cross.pdf + + // compute rho as an 8 bits approximation of v1.v2 / v2.v2 + final double ratio = (x1 * x2 + y1 * y2 + z1 * z2) / FastMath.scalb(n2, 2 * deltaExp); + final double rho = FastMath.rint(256 * ratio) / 256; + + final double x3 = x1 - rho * x2; + final double y3 = y1 - rho * y2; + final double z3 = z1 - rho * z2; + + // compute cross product from v3 and v2 instead of v1 and v2 + return new Vector3D(y3 * z2 - z3 * y2, z3 * x2 - x3 * z2, x3 * y2 - y3 * x2); + } /** Compute the distance between two vectors according to the L<sub>1</sub> norm.
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-554_fbbb96eb.diff
bugs-dot-jar_data_MATH-639_8b418000
--- BugID: MATH-639 Summary: numerical problems in rotation creation Description: | building a rotation from the following vector pairs leads to NaN: u1 = -4921140.837095533, -2.1512094250440013E7, -890093.279426377 u2 = -2.7238580938724895E9, -2.169664921341876E9, 6.749688708885301E10 v1 = 1, 0, 0 v2 = 0, 0, 1 The constructor first changes the (v1, v2) pair into (v1', v2') ensuring the following scalar products hold: <v1'|v1'> == <u1|u1> <v2'|v2'> == <u2|u2> <u1 |u2> == <v1'|v2'> Once the (v1', v2') pair has been computed, we compute the cross product: k = (v1' - u1)^(v2' - u2) and the scalar product: c = <k | (u1^u2)> By construction, c is positive or null and the quaternion axis we want to build is q = k/[2*sqrt(c)]. c should be null only if some of the vectors are aligned, and this is dealt with later in the algorithm. However, there are numerical problems with the vector above with the way these computations are done, as shown by the following comparisons, showing the result we get from our Java code and the result we get from manual computation with the same formulas but with enhanced precision: commons math: k = 38514476.5, -84., -1168590144 high precision: k = 38514410.36093388..., -0.374075245201180409222711..., -1168590152.10599715208... and it becomes worse when computing c because the vectors are almost orthogonal to each other, hence inducing additional cancellations. We get: commons math c = -1.2397173627587605E20 high precision: c = 558382746168463196.7079627... We have lost ALL significant digits in cancellations, and even the sign is wrong! diff --git a/src/main/java/org/apache/commons/math/geometry/euclidean/threed/Rotation.java b/src/main/java/org/apache/commons/math/geometry/euclidean/threed/Rotation.java index bfa7f26..ada0a8f 100644 --- a/src/main/java/org/apache/commons/math/geometry/euclidean/threed/Rotation.java +++ b/src/main/java/org/apache/commons/math/geometry/euclidean/threed/Rotation.java @@ -313,92 +313,51 @@ public class Rotation implements Serializable { public Rotation(Vector3D u1, Vector3D u2, Vector3D v1, Vector3D v2) { // norms computation - double u1u1 = Vector3D.dotProduct(u1, u1); - double u2u2 = Vector3D.dotProduct(u2, u2); - double v1v1 = Vector3D.dotProduct(v1, v1); - double v2v2 = Vector3D.dotProduct(v2, v2); + double u1u1 = u1.getNormSq(); + double u2u2 = u2.getNormSq(); + double v1v1 = v1.getNormSq(); + double v2v2 = v2.getNormSq(); if ((u1u1 == 0) || (u2u2 == 0) || (v1v1 == 0) || (v2v2 == 0)) { throw MathRuntimeException.createIllegalArgumentException(LocalizedFormats.ZERO_NORM_FOR_ROTATION_DEFINING_VECTOR); } - double u1x = u1.getX(); - double u1y = u1.getY(); - double u1z = u1.getZ(); - - double u2x = u2.getX(); - double u2y = u2.getY(); - double u2z = u2.getZ(); - // normalize v1 in order to have (v1'|v1') = (u1|u1) - double coeff = FastMath.sqrt (u1u1 / v1v1); - double v1x = coeff * v1.getX(); - double v1y = coeff * v1.getY(); - double v1z = coeff * v1.getZ(); - v1 = new Vector3D(v1x, v1y, v1z); - - // adjust v2 in order to have (u1|u2) = (v1|v2) and (v2'|v2') = (u2|u2) - double u1u2 = Vector3D.dotProduct(u1, u2); - double v1v2 = Vector3D.dotProduct(v1, v2); + v1 = new Vector3D(FastMath.sqrt(u1u1 / v1v1), v1); + + // adjust v2 in order to have (u1|u2) = (v1'|v2') and (v2'|v2') = (u2|u2) + double u1u2 = u1.dotProduct(u2); + double v1v2 = v1.dotProduct(v2); double coeffU = u1u2 / u1u1; double coeffV = v1v2 / u1u1; double beta = FastMath.sqrt((u2u2 - u1u2 * coeffU) / (v2v2 - v1v2 * coeffV)); double alpha = coeffU - beta * coeffV; - double v2x = alpha * v1x + beta * v2.getX(); - double v2y = alpha * v1y + beta * v2.getY(); - double v2z = alpha * v1z + beta * v2.getZ(); - v2 = new Vector3D(v2x, v2y, v2z); - - // preliminary computation (we use explicit formulation instead - // of relying on the Vector3D class in order to avoid building lots - // of temporary objects) - Vector3D uRef = u1; - Vector3D vRef = v1; - double dx1 = v1x - u1.getX(); - double dy1 = v1y - u1.getY(); - double dz1 = v1z - u1.getZ(); - double dx2 = v2x - u2.getX(); - double dy2 = v2y - u2.getY(); - double dz2 = v2z - u2.getZ(); - Vector3D k = new Vector3D(dy1 * dz2 - dz1 * dy2, - dz1 * dx2 - dx1 * dz2, - dx1 * dy2 - dy1 * dx2); - double c = k.getX() * (u1y * u2z - u1z * u2y) + - k.getY() * (u1z * u2x - u1x * u2z) + - k.getZ() * (u1x * u2y - u1y * u2x); - - if (c == 0) { - // the (q1, q2, q3) vector is in the (u1, u2) plane + v2 = new Vector3D(alpha, v1, beta, v2); + + // preliminary computation + Vector3D uRef = u1; + Vector3D vRef = v1; + Vector3D v1Su1 = v1.subtract(u1); + Vector3D v2Su2 = v2.subtract(u2); + Vector3D k = v1Su1.crossProduct(v2Su2); + Vector3D u3 = u1.crossProduct(u2); + double c = k.dotProduct(u3); + final double inPlaneThreshold = 0.001; + if (c <= inPlaneThreshold * k.getNorm() * u3.getNorm()) { + // the (q1, q2, q3) vector is close to the (u1, u2) plane // we try other vectors - Vector3D u3 = Vector3D.crossProduct(u1, u2); Vector3D v3 = Vector3D.crossProduct(v1, v2); - double u3x = u3.getX(); - double u3y = u3.getY(); - double u3z = u3.getZ(); - double v3x = v3.getX(); - double v3y = v3.getY(); - double v3z = v3.getZ(); - - double dx3 = v3x - u3x; - double dy3 = v3y - u3y; - double dz3 = v3z - u3z; - k = new Vector3D(dy1 * dz3 - dz1 * dy3, - dz1 * dx3 - dx1 * dz3, - dx1 * dy3 - dy1 * dx3); - c = k.getX() * (u1y * u3z - u1z * u3y) + - k.getY() * (u1z * u3x - u1x * u3z) + - k.getZ() * (u1x * u3y - u1y * u3x); - - if (c == 0) { - // the (q1, q2, q3) vector is aligned with u1: - // we try (u2, u3) and (v2, v3) - k = new Vector3D(dy2 * dz3 - dz2 * dy3, - dz2 * dx3 - dx2 * dz3, - dx2 * dy3 - dy2 * dx3); - c = k.getX() * (u2y * u3z - u2z * u3y) + - k.getY() * (u2z * u3x - u2x * u3z) + - k.getZ() * (u2x * u3y - u2y * u3x); - - if (c == 0) { + Vector3D v3Su3 = v3.subtract(u3); + k = v1Su1.crossProduct(v3Su3); + Vector3D u2Prime = u1.crossProduct(u3); + c = k.dotProduct(u2Prime); + + if (c <= inPlaneThreshold * k.getNorm() * u2Prime.getNorm()) { + // the (q1, q2, q3) vector is also close to the (u1, u3) plane, + // it is almost aligned with u1: we try (u2, u3) and (v2, v3) + k = v2Su2.crossProduct(v3Su3);; + c = k.dotProduct(u2.crossProduct(u3));; + + if (c <= 0) { // the (q1, q2, q3) vector is aligned with everything // this is really the identity rotation q0 = 1.0; @@ -427,8 +386,7 @@ public class Rotation implements Serializable { k = new Vector3D(uRef.getY() * q3 - uRef.getZ() * q2, uRef.getZ() * q1 - uRef.getX() * q3, uRef.getX() * q2 - uRef.getY() * q1); - c = Vector3D.dotProduct(k, k); - q0 = Vector3D.dotProduct(vRef, k) / (c + c); + q0 = vRef.dotProduct(k) / (2 * k.getNormSq()); } @@ -452,7 +410,7 @@ public class Rotation implements Serializable { throw MathRuntimeException.createIllegalArgumentException(LocalizedFormats.ZERO_NORM_FOR_ROTATION_DEFINING_VECTOR); } - double dot = Vector3D.dotProduct(u, v); + double dot = u.dotProduct(v); if (dot < ((2.0e-15 - 1.0) * normProduct)) { // special case u = -v: we select a PI angle rotation around @@ -467,9 +425,10 @@ public class Rotation implements Serializable { // the shortest possible rotation: axis orthogonal to this plane q0 = FastMath.sqrt(0.5 * (1.0 + dot / normProduct)); double coeff = 1.0 / (2.0 * q0 * normProduct); - q1 = coeff * (v.getY() * u.getZ() - v.getZ() * u.getY()); - q2 = coeff * (v.getZ() * u.getX() - v.getX() * u.getZ()); - q3 = coeff * (v.getX() * u.getY() - v.getY() * u.getX()); + Vector3D q = v.crossProduct(u); + q1 = coeff * q.getX(); + q2 = coeff * q.getY(); + q3 = coeff * q.getZ(); } }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-639_8b418000.diff
bugs-dot-jar_data_MATH-1089_e91d0f05
--- BugID: MATH-1089 Summary: Precision.round() returns different results when provided negative zero as double or float Description: |- Precision.round(-0.0d, x) = 0.0 Precision.round(-0.0f, x) = -0.0 After discussion on the mailinglist, the result should always be -0.0. diff --git a/src/main/java/org/apache/commons/math3/util/Precision.java b/src/main/java/org/apache/commons/math3/util/Precision.java index f0b0c4f..441e015 100644 --- a/src/main/java/org/apache/commons/math3/util/Precision.java +++ b/src/main/java/org/apache/commons/math3/util/Precision.java @@ -392,10 +392,11 @@ public class Precision { */ public static double round(double x, int scale, int roundingMethod) { try { - return (new BigDecimal - (Double.toString(x)) + final double rounded = (new BigDecimal(Double.toString(x)) .setScale(scale, roundingMethod)) .doubleValue(); + // MATH-1089: negative values rounded to zero should result in negative zero + return rounded == 0.0 ? rounded * FastMath.copySign(1d, x) : rounded; } catch (NumberFormatException ex) { if (Double.isInfinite(x)) { return x;
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1089_e91d0f05.diff
bugs-dot-jar_data_MATH-949_f83bbc1d
--- BugID: MATH-949 Summary: LevenbergMarquardtOptimizer reports 0 iterations Description: |- The method LevenbergMarquardtOptimizer.getIterations() does not report the correct number of iterations; It always returns 0. A quick look at the code shows that only SimplexOptimizer calls BaseOptimizer.incrementEvaluationsCount() I've put a test case below. Notice how the evaluations count is correctly incremented, but the iterations count is not. {noformat} @Test public void testGetIterations() { // setup LevenbergMarquardtOptimizer otim = new LevenbergMarquardtOptimizer(); // action otim.optimize(new MaxEval(100), new Target(new double[] { 1 }), new Weight(new double[] { 1 }), new InitialGuess( new double[] { 3 }), new ModelFunction( new MultivariateVectorFunction() { @Override public double[] value(double[] point) throws IllegalArgumentException { return new double[] { FastMath.pow(point[0], 4) }; } }), new ModelFunctionJacobian( new MultivariateMatrixFunction() { @Override public double[][] value(double[] point) throws IllegalArgumentException { return new double[][] { { 0.25 * FastMath.pow( point[0], 3) } }; } })); // verify assertThat(otim.getEvaluations(), greaterThan(1)); assertThat(otim.getIterations(), greaterThan(1)); } {noformat} diff --git a/src/main/java/org/apache/commons/math3/optim/BaseOptimizer.java b/src/main/java/org/apache/commons/math3/optim/BaseOptimizer.java index 75c9757..927e17f 100644 --- a/src/main/java/org/apache/commons/math3/optim/BaseOptimizer.java +++ b/src/main/java/org/apache/commons/math3/optim/BaseOptimizer.java @@ -48,7 +48,7 @@ public abstract class BaseOptimizer<PAIR> { this.checker = checker; evaluations = new Incrementor(0, new MaxEvalCallback()); - iterations = new Incrementor(0, new MaxIterCallback()); + iterations = new Incrementor(Integer.MAX_VALUE, new MaxIterCallback()); } /** diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/gradient/NonLinearConjugateGradientOptimizer.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/gradient/NonLinearConjugateGradientOptimizer.java index e010781..bd12b54 100644 --- a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/gradient/NonLinearConjugateGradientOptimizer.java +++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/gradient/NonLinearConjugateGradientOptimizer.java @@ -211,16 +211,15 @@ public class NonLinearConjugateGradientOptimizer } PointValuePair current = null; - int iter = 0; int maxEval = getMaxEvaluations(); while (true) { - ++iter; + incrementIterationCount(); final double objective = computeObjectiveValue(point); PointValuePair previous = current; current = new PointValuePair(point, objective); if (previous != null) { - if (checker.converged(iter, previous, current)) { + if (checker.converged(getIterations(), previous, current)) { // We have found an optimum. return current; } @@ -274,7 +273,7 @@ public class NonLinearConjugateGradientOptimizer steepestDescent = newSteepestDescent; // Compute conjugate search direction. - if (iter % n == 0 || + if (getIterations() % n == 0 || beta < 0) { // Break conjugation: reset search direction. searchDirection = steepestDescent.clone(); diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/CMAESOptimizer.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/CMAESOptimizer.java index c7216f9..fed67b1 100644 --- a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/CMAESOptimizer.java +++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/CMAESOptimizer.java @@ -385,6 +385,8 @@ public class CMAESOptimizer generationLoop: for (iterations = 1; iterations <= maxIterations; iterations++) { + incrementIterationCount(); + // Generate and evaluate lambda offspring final RealMatrix arz = randn1(dimension, lambda); final RealMatrix arx = zeros(dimension, lambda); diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/PowellOptimizer.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/PowellOptimizer.java index 9572820..afe8d2f 100644 --- a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/PowellOptimizer.java +++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/PowellOptimizer.java @@ -188,9 +188,8 @@ public class PowellOptimizer double[] x = guess; double fVal = computeObjectiveValue(x); double[] x1 = x.clone(); - int iter = 0; while (true) { - ++iter; + incrementIterationCount(); double fX = fVal; double fX2 = 0; @@ -224,7 +223,7 @@ public class PowellOptimizer final PointValuePair current = new PointValuePair(x, fVal); if (!stop) { // User-defined stopping criteria. if (checker != null) { - stop = checker.converged(iter, previous, current); + stop = checker.converged(getIterations(), previous, current); } } if (stop) { diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/SimplexOptimizer.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/SimplexOptimizer.java index 3d16aa8..0dd644e 100644 --- a/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/SimplexOptimizer.java +++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/scalar/noderiv/SimplexOptimizer.java @@ -155,7 +155,7 @@ public class SimplexOptimizer extends MultivariateOptimizer { int iteration = 0; final ConvergenceChecker<PointValuePair> checker = getConvergenceChecker(); while (true) { - if (iteration > 0) { + if (getIterations() > 0) { boolean converged = true; for (int i = 0; i < simplex.getSize(); i++) { PointValuePair prev = previous[i]; @@ -171,7 +171,8 @@ public class SimplexOptimizer extends MultivariateOptimizer { // We still need to search. previous = simplex.getPoints(); simplex.iterate(evalFunc, comparator); - ++iteration; + + incrementIterationCount(); } } diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/GaussNewtonOptimizer.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/GaussNewtonOptimizer.java index a2834f2..844ed22 100644 --- a/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/GaussNewtonOptimizer.java +++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/GaussNewtonOptimizer.java @@ -103,9 +103,8 @@ public class GaussNewtonOptimizer extends AbstractLeastSquaresOptimizer { // iterate until convergence is reached PointVectorValuePair current = null; - int iter = 0; for (boolean converged = false; !converged;) { - ++iter; + incrementIterationCount(); // evaluate the objective function and its jacobian PointVectorValuePair previous = current; @@ -157,7 +156,7 @@ public class GaussNewtonOptimizer extends AbstractLeastSquaresOptimizer { // Check convergence. if (previous != null) { - converged = checker.converged(iter, previous, current); + converged = checker.converged(getIterations(), previous, current); if (converged) { setCost(computeCost(currentResiduals)); return current; diff --git a/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/LevenbergMarquardtOptimizer.java b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/LevenbergMarquardtOptimizer.java index ca2d138..4016131 100644 --- a/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/LevenbergMarquardtOptimizer.java +++ b/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/LevenbergMarquardtOptimizer.java @@ -319,10 +319,10 @@ public class LevenbergMarquardtOptimizer // Outer loop. lmPar = 0; boolean firstIteration = true; - int iter = 0; final ConvergenceChecker<PointVectorValuePair> checker = getConvergenceChecker(); while (true) { - ++iter; + incrementIterationCount(); + final PointVectorValuePair previous = current; // QR decomposition of the jacobian matrix @@ -486,7 +486,7 @@ public class LevenbergMarquardtOptimizer // tests for convergence. if (checker != null) { // we use the vectorial convergence checker - if (checker.converged(iter, previous, current)) { + if (checker.converged(getIterations(), previous, current)) { setCost(currentCost); return current; }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-949_f83bbc1d.diff
bugs-dot-jar_data_MATH-1203_4aa4c6d3
--- BugID: MATH-1203 Summary: getKernel fails for buckets with only multiple instances of the same value in random.EmpiricalDistribution Description: |- After loading a set of values into an EmpericalDistribution, assume that there's a case where a single bin ONLY contains multiple instances of the same value. In this case the standard deviation will equal zero. This will fail when getKernel attempts to create a NormalDistribution. The other case where stddev=0 is when there is only a single value in the bin, and this is handled by returning a ConstantRealDistribution rather than a NormalDistrbution. See: https://issues.apache.org/jira/browse/MATH-984 diff --git a/src/main/java/org/apache/commons/math4/random/EmpiricalDistribution.java b/src/main/java/org/apache/commons/math4/random/EmpiricalDistribution.java index 9458289..3b3a864 100644 --- a/src/main/java/org/apache/commons/math4/random/EmpiricalDistribution.java +++ b/src/main/java/org/apache/commons/math4/random/EmpiricalDistribution.java @@ -799,7 +799,7 @@ public class EmpiricalDistribution extends AbstractRealDistribution { * @return within-bin kernel parameterized by bStats */ protected RealDistribution getKernel(SummaryStatistics bStats) { - if (bStats.getN() == 1) { + if (bStats.getN() == 1 || bStats.getVariance() == 0) { return new ConstantRealDistribution(bStats.getMean()); } else { return new NormalDistribution(randomData.getRandomGenerator(),
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1203_4aa4c6d3.diff
bugs-dot-jar_data_MATH-294_2c8a114f
--- BugID: MATH-294 Summary: RandomDataImpl.nextPoisson fails for means in range 6.0 - 19.99 Description: "math.random.RandomDataImpl.nextPoisson(double mean) fails frequently (but not always) for values of mean between 6.0 and 19.99 inclusive. For values below 6.0 (where I see there is a branch in the logic) and above 20.0 it seems to be okay (though I've only randomly sampled the space and run a million trials for the values I've tried)\n\nWhen it fails, the exception is as follows (this for a mean of 6.0)\n\norg.apache.commons.math.MathRuntimeException$4: must have n >= 0 for n!, got n = -2\n\tat org.apache.commons.math.MathRuntimeException.createIllegalArgumentException(MathRuntimeException.java:282)\n\tat org.apache.commons.math.util.MathUtils.factorialLog(MathUtils.java:561)\n\tat org.apache.commons.math.random.RandomDataImpl.nextPoisson(RandomDataImpl.java:434) \n\nie MathUtils.factorialLog is being called with a negative input\n\nTo reproduce:\n\n \ JDKRandomGenerator random = new JDKRandomGenerator();\n random.setSeed(123456);\n \ RandomData randomData = new RandomDataImpl(random);\n\n for (int i=0; i< 1000000; i++){\n randomData.nextPoisson(6.0);\n }\n" diff --git a/src/main/java/org/apache/commons/math/random/RandomDataImpl.java b/src/main/java/org/apache/commons/math/random/RandomDataImpl.java index 18097bc..d5d2474 100644 --- a/src/main/java/org/apache/commons/math/random/RandomDataImpl.java +++ b/src/main/java/org/apache/commons/math/random/RandomDataImpl.java @@ -322,30 +322,17 @@ public class RandomDataImpl implements RandomData, Serializable { /** * {@inheritDoc} * <p> - * <strong>Algorithm Description</strong>: For small means, uses simulation - * of a Poisson process using Uniform deviates, as described <a - * href="http://irmi.epfl.ch/cmos/Pmmi/interactive/rng7.htm"> here.</a> - * </p> - * <p> - * The Poisson process (and hence value returned) is bounded by 1000 * mean. - * </p> - * - * <p> - * For large means, uses a reject method as described in <a - * href="http://cg.scs.carleton.ca/~luc/rnbookindex.html">Non-Uniform Random - * Variate Generation</a> - * </p> + * <strong>Algorithm Description</strong>: + * <ul><li> For small means, uses simulation of a Poisson process + * using Uniform deviates, as described + * <a href="http://irmi.epfl.ch/cmos/Pmmi/interactive/rng7.htm"> here.</a> + * The Poisson process (and hence value returned) is bounded by 1000 * mean.</li> * - * <p> - * References: - * <ul> - * <li>Devroye, Luc. (1986). <i>Non-Uniform Random Variate Generation</i>. - * New York, NY. Springer-Verlag</li> - * </ul> - * </p> + * <li> For large means, uses the rejection algorithm described in <br/> + * Devroye, Luc. (1981).<i>The Computer Generation of Poisson Random Variables</i> + * <strong>Computing</strong> vol. 26 pp. 197-207.</li></ul></p> * - * @param mean - * mean of the Poisson distribution. + * @param mean mean of the Poisson distribution. * @return the random Poisson value. */ public long nextPoisson(double mean) { @@ -356,7 +343,7 @@ public class RandomDataImpl implements RandomData, Serializable { final RandomGenerator generator = getRan(); - double pivot = 6.0; + final double pivot = 40.0d; if (mean < pivot) { double p = Math.exp(-mean); long n = 0; @@ -374,68 +361,70 @@ public class RandomDataImpl implements RandomData, Serializable { } return n; } else { - double mu = Math.floor(mean); - double delta = Math.floor(pivot + (mu - pivot) / 2.0); // integer - // between 6 - // and mean - double mu2delta = 2.0 * mu + delta; - double muDeltaHalf = mu + delta / 2.0; - double logMeanMu = Math.log(mean / mu); - - double muFactorialLog = MathUtils.factorialLog((int) mu); - - double c1 = Math.sqrt(Math.PI * mu / 2.0); - double c2 = c1 + - Math.sqrt(Math.PI * muDeltaHalf / - (2.0 * Math.exp(1.0 / mu2delta))); - double c3 = c2 + 2.0; - double c4 = c3 + Math.exp(1.0 / 78.0); - double c = c4 + 2.0 / delta * mu2delta * - Math.exp(-delta / mu2delta * (1.0 + delta / 2.0)); - - double y = 0.0; - double x = 0.0; - double w = Double.POSITIVE_INFINITY; - - boolean accept = false; - while (!accept) { - double u = nextUniform(0.0, c); - double e = nextExponential(mean); - - if (u <= c1) { - double z = nextGaussian(0.0, 1.0); - y = -Math.abs(z) * Math.sqrt(mu) - 1.0; - x = Math.floor(y); - w = -z * z / 2.0 - e - x * logMeanMu; - if (x < -mu) { - w = Double.POSITIVE_INFINITY; + final double lambda = Math.floor(mean); + final double lambdaFractional = mean - lambda; + final double logLambda = Math.log(lambda); + final double logLambdaFactorial = MathUtils.factorialLog((int) lambda); + final long y2 = lambdaFractional < Double.MIN_VALUE ? 0 : nextPoisson(lambdaFractional); + final double delta = Math.sqrt(lambda * Math.log(32 * lambda / Math.PI + 1)); + final double halfDelta = delta / 2; + final double twolpd = 2 * lambda + delta; + final double a1 = Math.sqrt(Math.PI * twolpd) * Math.exp(1 / 8 * lambda); + final double a2 = (twolpd / delta) * Math.exp(-delta * (1 + delta) / twolpd); + final double aSum = a1 + a2 + 1; + final double p1 = a1 / aSum; + final double p2 = a2 / aSum; + final double c1 = 1 / (8 * lambda); + + double x = 0; + double y = 0; + double v = 0; + int a = 0; + double t = 0; + double qr = 0; + double qa = 0; + for (;;) { + final double u = nextUniform(0.0, 1); + if (u <= p1) { + final double n = nextGaussian(0d, 1d); + x = n * Math.sqrt(lambda + halfDelta) - 0.5d; + if (x > delta || x < -lambda) { + continue; } - } else if (c1 < u && u <= c2) { - double z = nextGaussian(0.0, 1.0); - y = 1.0 + Math.abs(z) * Math.sqrt(muDeltaHalf); - x = Math.ceil(y); - w = (-y * y + 2.0 * y) / mu2delta - e - x * logMeanMu; - if (x > delta) { - w = Double.POSITIVE_INFINITY; + y = x < 0 ? Math.floor(x) : Math.ceil(x); + final double e = nextExponential(1d); + v = -e - (n * n / 2) + c1; + } else { + if (u > p1 + p2) { + y = lambda; + break; + } else { + x = delta + (twolpd / delta) * nextExponential(1d); + y = Math.ceil(x); + v = -nextExponential(1d) - delta * (x + 1) / twolpd; } - } else if (c2 < u && u <= c3) { - x = 0.0; - w = -e; - } else if (c3 < u && u <= c4) { - x = 1.0; - w = -e - logMeanMu; - } else if (c4 < u) { - double v = nextExponential(mean); - y = delta + v * 2.0 / delta * mu2delta; - x = Math.ceil(y); - w = -delta / mu2delta * (1.0 + y / 2.0) - e - x * logMeanMu; } - accept = w <= x * Math.log(mu) - - MathUtils.factorialLog((int) (mu + x)) / muFactorialLog; + a = x < 0 ? 1 : 0; + t = y * (y + 1) / (2 * lambda); + if (v < -t && a == 0) { + y = lambda + y; + break; + } + qr = t * ((2 * y + 1) / (6 * lambda) - 1); + qa = qr - (t * t) / (3 * (lambda + a * (y + 1))); + if (v < qa) { + y = lambda + y; + break; + } + if (v > qr) { + continue; + } + if (v < y * logLambda - MathUtils.factorialLog((int) (y + lambda)) + logLambdaFactorial) { + y = lambda + y; + break; + } } - // cast to long is acceptable because both x and mu are whole - // numbers. - return (long) (x + mu); + return y2 + (long) y; } }
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-294_2c8a114f.diff
bugs-dot-jar_data_MATH-1269_a94ff90a
--- BugID: MATH-1269 Summary: FastMath.exp may return NaN for non-NaN arguments Description: I have observed that FastMath.exp(709.8125) returns NaN. However, the exponential function must never return NaN (if the argument is not NaN). The result must always be non-negative or positive infinity. diff --git a/src/main/java/org/apache/commons/math4/util/FastMath.java b/src/main/java/org/apache/commons/math4/util/FastMath.java index 3d6d27d..2532c62 100644 --- a/src/main/java/org/apache/commons/math4/util/FastMath.java +++ b/src/main/java/org/apache/commons/math4/util/FastMath.java @@ -968,6 +968,13 @@ public class FastMath { much larger than the others. If there are extra bits specified from the pow() function, use them. */ final double tempC = tempB + tempA; + + // If tempC is positive infinite, the evaluation below could result in NaN, + // because z could be negative at the same time. + if (tempC == Double.POSITIVE_INFINITY) { + return Double.POSITIVE_INFINITY; + } + final double result; if (extra != 0.0) { result = tempC*extra*z + tempC*extra + tempC*z + tempB + tempA;
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1269_a94ff90a.diff
bugs-dot-jar_data_MATH-1230_96eb80ef
--- BugID: MATH-1230 Summary: SimplexSolver returning wrong answer from optimize Description: |- SimplexSolver fails for the following linear program: min 2x1 +15x2 +18x3 Subject to -x1 +2x2 -6x3 <=-10 x2 +2x3 <= 6 2x1 +10x3 <= 19 -x1 +x2 <= -2 x1,x2,x3 >= 0 Solution should be x1 = 7 x2 = 0 x3 = 1/2 Objective function = 23 Instead, it is returning x1 = 9.5 x2 = 1/8 x3 = 0 Objective function = 20.875 Constraint number 1 is violated by this answer diff --git a/src/main/java/org/apache/commons/math4/optim/linear/SimplexSolver.java b/src/main/java/org/apache/commons/math4/optim/linear/SimplexSolver.java index d4b4259..743fe9b 100644 --- a/src/main/java/org/apache/commons/math4/optim/linear/SimplexSolver.java +++ b/src/main/java/org/apache/commons/math4/optim/linear/SimplexSolver.java @@ -19,6 +19,7 @@ package org.apache.commons.math4.optim.linear; import java.util.ArrayList; import java.util.List; +import org.apache.commons.math4.exception.DimensionMismatchException; import org.apache.commons.math4.exception.TooManyIterationsException; import org.apache.commons.math4.optim.OptimizationData; import org.apache.commons.math4.optim.PointValuePair; @@ -146,6 +147,8 @@ public class SimplexSolver extends LinearOptimizer { * * @return {@inheritDoc} * @throws TooManyIterationsException if the maximal number of iterations is exceeded. + * @throws DimensionMismatchException if the dimension of the constraints does not match the + * dimension of the objective function */ @Override public PointValuePair optimize(OptimizationData... optData) diff --git a/src/main/java/org/apache/commons/math4/optim/linear/SimplexTableau.java b/src/main/java/org/apache/commons/math4/optim/linear/SimplexTableau.java index e869a74..f0a842f 100644 --- a/src/main/java/org/apache/commons/math4/optim/linear/SimplexTableau.java +++ b/src/main/java/org/apache/commons/math4/optim/linear/SimplexTableau.java @@ -28,6 +28,7 @@ import java.util.List; import java.util.Set; import java.util.TreeSet; +import org.apache.commons.math4.exception.DimensionMismatchException; import org.apache.commons.math4.linear.Array2DRowRealMatrix; import org.apache.commons.math4.linear.MatrixUtils; import org.apache.commons.math4.linear.RealVector; @@ -112,6 +113,8 @@ class SimplexTableau implements Serializable { * or {@link GoalType#MINIMIZE}. * @param restrictToNonNegative Whether to restrict the variables to non-negative values. * @param epsilon Amount of error to accept when checking for optimality. + * @throws DimensionMismatchException if the dimension of the constraints does not match the + * dimension of the objective function */ SimplexTableau(final LinearObjectiveFunction f, final Collection<LinearConstraint> constraints, @@ -129,13 +132,16 @@ class SimplexTableau implements Serializable { * @param restrictToNonNegative whether to restrict the variables to non-negative values * @param epsilon amount of error to accept when checking for optimality * @param maxUlps amount of error to accept in floating point comparisons + * @throws DimensionMismatchException if the dimension of the constraints does not match the + * dimension of the objective function */ SimplexTableau(final LinearObjectiveFunction f, final Collection<LinearConstraint> constraints, final GoalType goalType, final boolean restrictToNonNegative, final double epsilon, - final int maxUlps) { + final int maxUlps) throws DimensionMismatchException { + checkDimensions(f, constraints); this.f = f; this.constraints = normalizeConstraints(constraints); this.restrictToNonNegative = restrictToNonNegative; @@ -154,6 +160,23 @@ class SimplexTableau implements Serializable { } /** + * Checks that the dimensions of the objective function and the constraints match. + * @param f the objective function + * @param constraints the set of constraints + * @throws DimensionMismatchException if the constraint dimensions do not match with the + * dimension of the objective function + */ + private void checkDimensions(final LinearObjectiveFunction f, + final Collection<LinearConstraint> constraints) { + final int dimension = f.getCoefficients().getDimension(); + for (final LinearConstraint constraint : constraints) { + final int constraintDimension = constraint.getCoefficients().getDimension(); + if (constraintDimension != dimension) { + throw new DimensionMismatchException(constraintDimension, dimension); + } + } + } + /** * Initialize the labels for the columns. */ protected void initializeColumnLabels() {
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1230_96eb80ef.diff
bugs-dot-jar_data_MATH-1211_a06a1584
--- BugID: MATH-1211 Summary: PolyhedronsSet.firstIntersection(Vector3D point, Line line) sometimes reports intersections on wrong end of line Description: |2 I constructed a PolyhedronsSet from a list of triangular faces representing an icosphere (using the instructions found at https://mail-archives.apache.org/mod_mbox/commons-user/201208.mbox/<[email protected]>). This seems to produce correct INSIDE/OUTSIDE results for randomly chosen points. I think my mesh triangles are defined appropriately. However, using PolyhedronsSet.firstIntersection(Vector3D point, Line line) to shoot randomly oriented rays from the origin sometimes gives a wrong mesh intersection point "behind" the origin. The intersection algorithm is sometimes picking up faces of the sphere-shaped mesh on the wrong semi-infinite portion of the line, i.e. meshIntersectionPoint.subtract(point).dotProduct(line.getDirection())<0 where point is the Vector3D at center of the sphere and line extends outward through the mesh. I think the dot product above should always be positive. If multiple intersections exist along a "whole" line then the first one in "front" of the line's origin should be returned. This makes ray tracing with a PolyhedronsSet possible. diff --git a/src/main/java/org/apache/commons/math4/geometry/euclidean/threed/PolyhedronsSet.java b/src/main/java/org/apache/commons/math4/geometry/euclidean/threed/PolyhedronsSet.java index 9c46ae3..2f3bfa9 100644 --- a/src/main/java/org/apache/commons/math4/geometry/euclidean/threed/PolyhedronsSet.java +++ b/src/main/java/org/apache/commons/math4/geometry/euclidean/threed/PolyhedronsSet.java @@ -240,9 +240,9 @@ public class PolyhedronsSet extends AbstractRegion<Euclidean3D, Euclidean2D> { /** Get the first sub-hyperplane crossed by a semi-infinite line. * @param point start point of the part of the line considered * @param line line to consider (contains point) - * @return the first sub-hyperplaned crossed by the line after the + * @return the first sub-hyperplane crossed by the line after the * given point, or null if the line does not intersect any - * sub-hyperplaned + * sub-hyperplane */ public SubHyperplane<Euclidean3D> firstIntersection(final Vector3D point, final Line line) { return recurseFirstIntersection(getTree(true), point, line); @@ -252,9 +252,9 @@ public class PolyhedronsSet extends AbstractRegion<Euclidean3D, Euclidean2D> { * @param node current node * @param point start point of the part of the line considered * @param line line to consider (contains point) - * @return the first sub-hyperplaned crossed by the line after the + * @return the first sub-hyperplane crossed by the line after the * given point, or null if the line does not intersect any - * sub-hyperplaned + * sub-hyperplane */ private SubHyperplane<Euclidean3D> recurseFirstIntersection(final BSPTree<Euclidean3D> node, final Vector3D point, @@ -266,11 +266,11 @@ public class PolyhedronsSet extends AbstractRegion<Euclidean3D, Euclidean2D> { } final BSPTree<Euclidean3D> minus = node.getMinus(); final BSPTree<Euclidean3D> plus = node.getPlus(); - final Plane plane = (Plane) cut.getHyperplane(); + final Plane plane = (Plane) cut.getHyperplane(); // establish search order final double offset = plane.getOffset((Point<Euclidean3D>) point); - final boolean in = FastMath.abs(offset) < 1.0e-10; + final boolean in = FastMath.abs(offset) < getTolerance(); final BSPTree<Euclidean3D> near; final BSPTree<Euclidean3D> far; if (offset < 0) { @@ -298,7 +298,7 @@ public class PolyhedronsSet extends AbstractRegion<Euclidean3D, Euclidean2D> { if (!in) { // search in the cut hyperplane final Vector3D hit3D = plane.intersection(line); - if (hit3D != null) { + if (hit3D != null && line.getAbscissa(hit3D) > line.getAbscissa(point)) { final SubHyperplane<Euclidean3D> facet = boundaryFacet(hit3D, node); if (facet != null) { return facet;
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1211_a06a1584.diff
bugs-dot-jar_data_MATH-1127_ba62c59d
--- BugID: MATH-1127 Summary: 2.0 equal to -2.0 Description: | The following test fails: {code} @Test public void testMath1127() { Assert.assertFalse(Precision.equals(2.0, -2.0, 1)); } {code} diff --git a/src/main/java/org/apache/commons/math3/util/Precision.java b/src/main/java/org/apache/commons/math3/util/Precision.java index d089476..fa938c0 100644 --- a/src/main/java/org/apache/commons/math3/util/Precision.java +++ b/src/main/java/org/apache/commons/math3/util/Precision.java @@ -62,6 +62,14 @@ public class Precision { private static final int SGN_MASK_FLOAT = 0x80000000; /** Positive zero. */ private static final double POSITIVE_ZERO = 0d; + /** Positive zero bits. */ + private static final long POSITIVE_ZERO_DOUBLE_BITS = Double.doubleToRawLongBits(+0.0); + /** Negative zero bits. */ + private static final long NEGATIVE_ZERO_DOUBLE_BITS = Double.doubleToRawLongBits(-0.0); + /** Positive zero bits. */ + private static final int POSITIVE_ZERO_FLOAT_BITS = Float.floatToRawIntBits(+0.0f); + /** Negative zero bits. */ + private static final int NEGATIVE_ZERO_FLOAT_BITS = Float.floatToRawIntBits(-0.0f); static { /* @@ -109,7 +117,7 @@ public class Precision { * (or fewer) floating point numbers between them, i.e. two adjacent floating * point numbers are considered equal. * Adapted from <a - * href="http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm"> + * href="http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/"> * Bruce Dawson</a> * * @param x first value @@ -190,7 +198,7 @@ public class Precision { * (or fewer) floating point numbers between them, i.e. two adjacent floating * point numbers are considered equal. * Adapted from <a - * href="http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm"> + * href="http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/"> * Bruce Dawson</a> * * @param x first value @@ -201,21 +209,37 @@ public class Precision { * point values between {@code x} and {@code y}. * @since 2.2 */ - public static boolean equals(float x, float y, int maxUlps) { - int xInt = Float.floatToIntBits(x); - int yInt = Float.floatToIntBits(y); + public static boolean equals(final float x, final float y, final int maxUlps) { + + final int xInt = Float.floatToRawIntBits(x); + final int yInt = Float.floatToRawIntBits(y); + + final boolean isEqual; + if (((xInt ^ yInt) & SGN_MASK_FLOAT) == 0) { + // number have same sign, there is no risk of overflow + isEqual = FastMath.abs(xInt - yInt) <= maxUlps; + } else { + // number have opposite signs, take care of overflow + final int deltaPlus; + final int deltaMinus; + if (xInt < yInt) { + deltaPlus = yInt - POSITIVE_ZERO_FLOAT_BITS; + deltaMinus = xInt - NEGATIVE_ZERO_FLOAT_BITS; + } else { + deltaPlus = xInt - POSITIVE_ZERO_FLOAT_BITS; + deltaMinus = yInt - NEGATIVE_ZERO_FLOAT_BITS; + } - // Make lexicographically ordered as a two's-complement integer. - if (xInt < 0) { - xInt = SGN_MASK_FLOAT - xInt; - } - if (yInt < 0) { - yInt = SGN_MASK_FLOAT - yInt; - } + if (deltaPlus > maxUlps) { + isEqual = false; + } else { + isEqual = deltaMinus <= (maxUlps - deltaPlus); + } - final boolean isEqual = FastMath.abs(xInt - yInt) <= maxUlps; + } return isEqual && !Float.isNaN(x) && !Float.isNaN(y); + } /** @@ -315,12 +339,16 @@ public class Precision { /** * Returns true if both arguments are equal or within the range of allowed * error (inclusive). + * <p> * Two float numbers are considered equal if there are {@code (maxUlps - 1)} - * (or fewer) floating point numbers between them, i.e. two adjacent floating - * point numbers are considered equal. + * (or fewer) floating point numbers between them, i.e. two adjacent + * floating point numbers are considered equal. + * </p> + * <p> * Adapted from <a - * href="http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm"> + * href="http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/"> * Bruce Dawson</a> + * </p> * * @param x first value * @param y second value @@ -329,21 +357,37 @@ public class Precision { * @return {@code true} if there are fewer than {@code maxUlps} floating * point values between {@code x} and {@code y}. */ - public static boolean equals(double x, double y, int maxUlps) { - long xInt = Double.doubleToLongBits(x); - long yInt = Double.doubleToLongBits(y); + public static boolean equals(final double x, final double y, final int maxUlps) { + + final long xInt = Double.doubleToRawLongBits(x); + final long yInt = Double.doubleToRawLongBits(y); + + final boolean isEqual; + if (((xInt ^ yInt) & SGN_MASK) == 0l) { + // number have same sign, there is no risk of overflow + isEqual = FastMath.abs(xInt - yInt) <= maxUlps; + } else { + // number have opposite signs, take care of overflow + final long deltaPlus; + final long deltaMinus; + if (xInt < yInt) { + deltaPlus = yInt - POSITIVE_ZERO_DOUBLE_BITS; + deltaMinus = xInt - NEGATIVE_ZERO_DOUBLE_BITS; + } else { + deltaPlus = xInt - POSITIVE_ZERO_DOUBLE_BITS; + deltaMinus = yInt - NEGATIVE_ZERO_DOUBLE_BITS; + } - // Make lexicographically ordered as a two's-complement integer. - if (xInt < 0) { - xInt = SGN_MASK - xInt; - } - if (yInt < 0) { - yInt = SGN_MASK - yInt; - } + if (deltaPlus > maxUlps) { + isEqual = false; + } else { + isEqual = deltaMinus <= (maxUlps - deltaPlus); + } - final boolean isEqual = FastMath.abs(xInt - yInt) <= maxUlps; + } return isEqual && !Double.isNaN(x) && !Double.isNaN(y); + } /**
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-1127_ba62c59d.diff
bugs-dot-jar_data_MATH-865_b55e0206
--- BugID: MATH-865 Summary: Wide bounds to CMAESOptimizer result in NaN parameters passed to fitness function Description: If you give large values as lower/upper bounds (for example -Double.MAX_VALUE as a lower bound), the optimizer can call the fitness function with parameters set to NaN. My guess is this is due to FitnessFunction.encode/decode generating NaN when normalizing/denormalizing parameters. For example, if the difference between the lower and upper bound is greater than Double.MAX_VALUE, encode could divide infinity by infinity. diff --git a/src/main/java/org/apache/commons/math3/optimization/direct/CMAESOptimizer.java b/src/main/java/org/apache/commons/math3/optimization/direct/CMAESOptimizer.java index b54cb37..f6a4f28 100644 --- a/src/main/java/org/apache/commons/math3/optimization/direct/CMAESOptimizer.java +++ b/src/main/java/org/apache/commons/math3/optimization/direct/CMAESOptimizer.java @@ -24,8 +24,8 @@ import java.util.List; import org.apache.commons.math3.analysis.MultivariateFunction; import org.apache.commons.math3.exception.DimensionMismatchException; import org.apache.commons.math3.exception.MathUnsupportedOperationException; -import org.apache.commons.math3.exception.MathIllegalStateException; import org.apache.commons.math3.exception.NotPositiveException; +import org.apache.commons.math3.exception.NumberIsTooLargeException; import org.apache.commons.math3.exception.OutOfRangeException; import org.apache.commons.math3.exception.TooManyEvaluationsException; import org.apache.commons.math3.exception.util.LocalizedFormats; @@ -78,6 +78,12 @@ import org.apache.commons.math3.util.MathArrays; * <li><a href="http://en.wikipedia.org/wiki/CMA-ES">Wikipedia</a></li> * </ul> * + * When simple constraints (boundaries) are used, care must be taken that the + * difference between the upper and lower bounds does not overflow; should it + * be the case, a {@link NumberIsTooLargeException} will be thrown by the + * {@link BaseAbstractMultivariateSimpleBoundsOptimizer#optimize(int, + * MultivariateFunction,GoalType,double[],double[],double[]) optimize} method. + * * @version $Id$ * @since 3.0 */ @@ -529,6 +535,21 @@ public class CMAESOptimizer boundaries = new double[2][]; boundaries[0] = lB; boundaries[1] = uB; + + // Abort early if the normalization will overflow (cf. "encode" method). + for (int i = 0; i < lB.length; i++) { + if (Double.isInfinite(boundaries[1][i] - boundaries[0][i])) { + final double max = Double.MAX_VALUE + boundaries[0][i]; + final NumberIsTooLargeException e + = new NumberIsTooLargeException(boundaries[1][i], + max, + true); + e.getContext().addMessage(LocalizedFormats.OVERFLOW); + e.getContext().addMessage(LocalizedFormats.INDEX, i); + + throw e; + } + } } } else { // Convert API to internal handling of boundaries.
bugs-dot-jar/commons-math_extracted_diff/developer-patch_bugs-dot-jar_MATH-865_b55e0206.diff
bugs-dot-jar_data_FLINK-2484_d738430c
--- BugID: FLINK-2484 Summary: BarrierBuffer does not properly clean up temp files Description: diff --git a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/io/BarrierBuffer.java b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/io/BarrierBuffer.java index b7766ee..fd896c9 100644 --- a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/io/BarrierBuffer.java +++ b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/io/BarrierBuffer.java @@ -76,7 +76,7 @@ public class BarrierBuffer implements CheckpointBarrierHandler { /** * - * @param inputGate Teh input gate to draw the buffers and events from. + * @param inputGate The input gate to draw the buffers and events from. * @param ioManager The I/O manager that gives access to the temp directories. * * @throws IOException Thrown, when the spilling to temp files cannot be initialized. @@ -102,6 +102,7 @@ public class BarrierBuffer implements CheckpointBarrierHandler { if (currentBuffered != null) { next = currentBuffered.getNext(); if (next == null) { + currentBuffered.cleanup(); currentBuffered = queuedBuffered.pollFirst(); if (currentBuffered != null) { currentBuffered.open();
bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2484_d738430c.diff
bugs-dot-jar_data_FLINK-2460_a17d4e82
--- BugID: FLINK-2460 Summary: ReduceOnNeighborsWithExceptionITCase failure Description: |- I noticed a build error due to failure on this case. It was on a branch of my fork, which didn't actually have anything to do with the failed test or the runtime system at all. Here's the error log: https://s3.amazonaws.com/archive.travis-ci.org/jobs/73695554/log.txt diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PipelinedSubpartition.java b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PipelinedSubpartition.java index 931790a..3b7a2a6 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PipelinedSubpartition.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PipelinedSubpartition.java @@ -41,7 +41,7 @@ class PipelinedSubpartition extends ResultSubpartition { private boolean isFinished; /** Flag indicating whether the subpartition has been released. */ - private boolean isReleased; + private volatile boolean isReleased; /** * A data availability listener. Registered, when the consuming task is faster than the @@ -167,6 +167,11 @@ class PipelinedSubpartition extends ResultSubpartition { } @Override + public boolean isReleased() { + return isReleased; + } + + @Override public PipelinedSubpartitionView createReadView(BufferProvider bufferProvider) { synchronized (buffers) { if (readView != null) { diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/ResultSubpartition.java b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/ResultSubpartition.java index e9dfe32..b7ca9c4 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/ResultSubpartition.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/ResultSubpartition.java @@ -81,4 +81,6 @@ public abstract class ResultSubpartition { abstract int releaseMemory() throws IOException; + abstract public boolean isReleased(); + } diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpillableSubpartition.java b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpillableSubpartition.java index 4a18691..21e9cc6 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpillableSubpartition.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpillableSubpartition.java @@ -59,7 +59,7 @@ class SpillableSubpartition extends ResultSubpartition { private boolean isFinished; /** Flag indicating whether the subpartition has been released. */ - boolean isReleased; + private volatile boolean isReleased; /** The read view to consume this subpartition. */ private ResultSubpartitionView readView; @@ -168,6 +168,11 @@ class SpillableSubpartition extends ResultSubpartition { } @Override + public boolean isReleased() { + return isReleased; + } + + @Override public ResultSubpartitionView createReadView(BufferProvider bufferProvider) throws IOException { synchronized (buffers) { if (!isFinished) { diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpillableSubpartitionView.java b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpillableSubpartitionView.java index 972e34b..c9da40a 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpillableSubpartitionView.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpillableSubpartitionView.java @@ -73,7 +73,7 @@ class SpillableSubpartitionView implements ResultSubpartitionView { // 1) In-memory synchronized (parent.buffers) { - if (parent.isReleased) { + if (parent.isReleased()) { return null; } @@ -162,7 +162,7 @@ class SpillableSubpartitionView implements ResultSubpartitionView { @Override public boolean isReleased() { - return isReleased.get(); + return parent.isReleased() || isReleased.get(); } @Override diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpilledSubpartitionViewAsyncIO.java b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpilledSubpartitionViewAsyncIO.java index ea5c20b..052a7cd 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpilledSubpartitionViewAsyncIO.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpilledSubpartitionViewAsyncIO.java @@ -187,7 +187,7 @@ class SpilledSubpartitionViewAsyncIO implements ResultSubpartitionView { @Override public boolean isReleased() { - return isReleased; + return parent.isReleased() || isReleased; } @Override diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpilledSubpartitionViewSyncIO.java b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpilledSubpartitionViewSyncIO.java index 24099a7..5b91668 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpilledSubpartitionViewSyncIO.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpilledSubpartitionViewSyncIO.java @@ -108,7 +108,7 @@ class SpilledSubpartitionViewSyncIO implements ResultSubpartitionView { @Override public boolean isReleased() { - return isReleased.get(); + return parent.isReleased() || isReleased.get(); } @Override
bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2460_a17d4e82.diff
bugs-dot-jar_data_FLINK-2442_30761572
--- BugID: FLINK-2442 Summary: 'PojoType fields not supported by field position keys ' Description: "Tuple fields which are Pojos (or any other non-tuple composite type) cannot be selected as keys by field position keys.\n\nSomething like \n\n{code}\nDataSet<Tuple2<Integer, MyPojo>> data = ...\ndata.groupBy(1).reduce(...)\n{code}\n\nfails with an exception." diff --git a/flink-java/src/main/java/org/apache/flink/api/java/operators/Keys.java b/flink-java/src/main/java/org/apache/flink/api/java/operators/Keys.java index 69d306f..09874e5 100644 --- a/flink-java/src/main/java/org/apache/flink/api/java/operators/Keys.java +++ b/flink-java/src/main/java/org/apache/flink/api/java/operators/Keys.java @@ -223,43 +223,43 @@ public abstract class Keys<T> { } else { groupingFields = rangeCheckFields(groupingFields, type.getArity() -1); } - CompositeType<?> compositeType = (CompositeType<?>) type; Preconditions.checkArgument(groupingFields.length > 0, "Grouping fields can not be empty at this point"); keyFields = new ArrayList<FlatFieldDescriptor>(type.getTotalFields()); // for each key, find the field: for(int j = 0; j < groupingFields.length; j++) { + int keyPos = groupingFields[j]; + + int offset = 0; for(int i = 0; i < type.getArity(); i++) { - TypeInformation<?> fieldType = compositeType.getTypeAt(i); - - if(groupingFields[j] == i) { // check if user set the key - int keyId = countNestedElementsBefore(compositeType, i) + i; - if(fieldType instanceof TupleTypeInfoBase) { - TupleTypeInfoBase<?> tupleFieldType = (TupleTypeInfoBase<?>) fieldType; - tupleFieldType.addAllFields(keyId, keyFields); - } else { - Preconditions.checkArgument(fieldType instanceof AtomicType, "Wrong field type"); - keyFields.add(new FlatFieldDescriptor(keyId, fieldType)); + + TypeInformation fieldType = ((CompositeType<?>) type).getTypeAt(i); + if(i < keyPos) { + // not yet there, increment key offset + offset += fieldType.getTotalFields(); + } + else { + // arrived at key position + if(fieldType instanceof CompositeType) { + // add all nested fields of composite type + ((CompositeType) fieldType).getFlatFields("*", offset, keyFields); } - + else if(fieldType instanceof AtomicType) { + // add atomic type field + keyFields.add(new FlatFieldDescriptor(offset, fieldType)); + } + else { + // type should either be composite or atomic + throw new InvalidProgramException("Field type is neither CompositeType nor AtomicType: "+fieldType); + } + // go to next key + break; } } } keyFields = removeNullElementsFromList(keyFields); } - - private static int countNestedElementsBefore(CompositeType<?> compositeType, int pos) { - if( pos == 0) { - return 0; - } - int ret = 0; - for (int i = 0; i < pos; i++) { - TypeInformation<?> fieldType = compositeType.getTypeAt(i); - ret += fieldType.getTotalFields() -1; - } - return ret; - } - + public static <R> List<R> removeNullElementsFromList(List<R> in) { List<R> elements = new ArrayList<R>(); for(R e: in) { diff --git a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TupleTypeInfoBase.java b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TupleTypeInfoBase.java index 3314ca9..881e690 100644 --- a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TupleTypeInfoBase.java +++ b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TupleTypeInfoBase.java @@ -23,7 +23,6 @@ import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.flink.api.common.typeinfo.AtomicType; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.common.typeutils.CompositeType; import org.apache.flink.api.java.operators.Keys.ExpressionKeys; @@ -88,25 +87,6 @@ public abstract class TupleTypeInfoBase<T> extends CompositeType<T> { return tupleType; } - /** - * Recursively add all fields in this tuple type. We need this in particular to get all - * the types. - * @param startKeyId - * @param keyFields - */ - public void addAllFields(int startKeyId, List<FlatFieldDescriptor> keyFields) { - for(int i = 0; i < this.getArity(); i++) { - TypeInformation<?> type = this.types[i]; - if(type instanceof AtomicType) { - keyFields.add(new FlatFieldDescriptor(startKeyId, type)); - } else if(type instanceof TupleTypeInfoBase<?>) { - TupleTypeInfoBase<?> ttb = (TupleTypeInfoBase<?>) type; - ttb.addAllFields(startKeyId, keyFields); - } - startKeyId += type.getTotalFields(); - } - } - @Override public void getFlatFields(String fieldExpression, int offset, List<FlatFieldDescriptor> result) {
bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2442_30761572.diff
bugs-dot-jar_data_FLINK-3052_8dc70f2e
--- BugID: FLINK-3052 Summary: Optimizer does not push properties out of bulk iterations Description: "Flink's optimizer should be able to reuse interesting properties from outside the loop. In order to do that it is sometimes necessary to append a NoOp node to the step function which recomputes the required properties.\n\nThis is currently not working for {{BulkIterations}}, because the plans with the appended NoOp nodes are not added to the overall list of candidates.\n\nThis not only leads to sub-optimal plan selection but sometimes to the rejection of valid jobs. The following job, for example, will be falsely rejected by flink.\n\n{code}\nExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();\n\n\t\tDataSet<Tuple1<Long>> input1 = env.generateSequence(1, 10).map(new MapFunction<Long, Tuple1<Long>>() {\n\t\t\t@Override\n\t\t\tpublic Tuple1<Long> map(Long value) throws Exception {\n\t\t\t\treturn new Tuple1<>(value);\n\t\t\t}\n\t\t});\n\n\t\tDataSet<Tuple1<Long>> input2 = env.generateSequence(1, 10).map(new MapFunction<Long, Tuple1<Long>>() {\n\t\t\t@Override\n\t\t\tpublic Tuple1<Long> map(Long value) throws Exception {\n\t\t\t\treturn new Tuple1<>(value);\n\t\t\t}\n\t\t});\n\n\t\tDataSet<Tuple1<Long>> distinctInput = input1.distinct();\n\n\t\tIterativeDataSet<Tuple1<Long>> iteration = distinctInput.iterate(10);\n\n\t\tDataSet<Tuple1<Long>> iterationStep = iteration\n\t\t\t\t.coGroup(input2)\n\t\t\t\t.where(0)\n\t\t\t\t.equalTo(0)\n\t\t\t\t.with(new CoGroupFunction<Tuple1<Long>, Tuple1<Long>, Tuple1<Long>>() {\n\t\t\t\t\t@Override\n\t\t\t\t\tpublic void coGroup(\n\t\t\t\t\t\t\tIterable<Tuple1<Long>> first,\n\t\t\t\t\t\t\tIterable<Tuple1<Long>> second,\n\t\t\t\t\t\t\tCollector<Tuple1<Long>> out) throws Exception {\n\t\t\t\t\t\tIterator<Tuple1<Long>> it = first.iterator();\n\n\t\t\t\t\t\tif (it.hasNext()) {\n\t\t\t\t\t\t\tout.collect(it.next());\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t});\n\n\t\tDataSet<Tuple1<Long>> iterationResult = iteration.closeWith(iterationStep);\n\n\t\titerationResult.output(new DiscardingOutputFormat<Tuple1<Long>>());\n{code}" diff --git a/flink-optimizer/src/main/java/org/apache/flink/optimizer/dag/BulkIterationNode.java b/flink-optimizer/src/main/java/org/apache/flink/optimizer/dag/BulkIterationNode.java index 3d95c22..556e2e3 100644 --- a/flink-optimizer/src/main/java/org/apache/flink/optimizer/dag/BulkIterationNode.java +++ b/flink-optimizer/src/main/java/org/apache/flink/optimizer/dag/BulkIterationNode.java @@ -28,7 +28,6 @@ import org.apache.flink.api.common.ExecutionMode; import org.apache.flink.api.common.operators.SemanticProperties; import org.apache.flink.api.common.operators.SemanticProperties.EmptySemanticProperties; import org.apache.flink.api.common.operators.base.BulkIterationBase; -import org.apache.flink.api.common.operators.util.FieldList; import org.apache.flink.optimizer.CompilerException; import org.apache.flink.optimizer.DataStatistics; import org.apache.flink.optimizer.traversals.InterestingPropertyVisitor; @@ -48,6 +47,7 @@ import org.apache.flink.optimizer.plan.NamedChannel; import org.apache.flink.optimizer.plan.PlanNode; import org.apache.flink.optimizer.plan.SingleInputPlanNode; import org.apache.flink.optimizer.plan.PlanNode.FeedbackPropertiesMeetRequirementsReport; +import org.apache.flink.optimizer.util.NoOpUnaryUdfOp; import org.apache.flink.runtime.operators.DriverStrategy; import org.apache.flink.util.Visitor; @@ -273,7 +273,7 @@ public class BulkIterationNode extends SingleInputNode implements IterationNode this.openBranches = (result == null || result.isEmpty()) ? Collections.<UnclosedBranchDescriptor>emptyList() : result; } - + @SuppressWarnings("unchecked") @Override protected void instantiateCandidate(OperatorDescriptorSingle dps, Channel in, List<Set<? extends NamedChannel>> broadcastPlanChannels, List<PlanNode> target, CostEstimator estimator, RequestedGlobalProperties globPropsReq, RequestedLocalProperties locPropsReq) @@ -321,8 +321,10 @@ public class BulkIterationNode extends SingleInputNode implements IterationNode Channel toNoOp = new Channel(candidate); globPropsReq.parameterizeChannel(toNoOp, false, rootConnection.getDataExchangeMode(), false); locPropsReq.parameterizeChannel(toNoOp); - - UnaryOperatorNode rebuildPropertiesNode = new UnaryOperatorNode("Rebuild Partial Solution Properties", FieldList.EMPTY_LIST); + + NoOpUnaryUdfOp noOpUnaryUdfOp = new NoOpUnaryUdfOp<>(); + noOpUnaryUdfOp.setInput(candidate.getProgramOperator()); + UnaryOperatorNode rebuildPropertiesNode = new UnaryOperatorNode("Rebuild Partial Solution Properties", noOpUnaryUdfOp, true); rebuildPropertiesNode.setParallelism(candidate.getParallelism()); SingleInputPlanNode rebuildPropertiesPlanNode = new SingleInputPlanNode(rebuildPropertiesNode, "Rebuild Partial Solution Properties", toNoOp, DriverStrategy.UNARY_NO_OP); @@ -343,8 +345,10 @@ public class BulkIterationNode extends SingleInputNode implements IterationNode planDeleter.remove(); } } + + candidates.addAll(newCandidates); } - + if (candidates.isEmpty()) { return; } diff --git a/flink-optimizer/src/main/java/org/apache/flink/optimizer/dag/UnaryOperatorNode.java b/flink-optimizer/src/main/java/org/apache/flink/optimizer/dag/UnaryOperatorNode.java index 0c48033..0ec0264 100644 --- a/flink-optimizer/src/main/java/org/apache/flink/optimizer/dag/UnaryOperatorNode.java +++ b/flink-optimizer/src/main/java/org/apache/flink/optimizer/dag/UnaryOperatorNode.java @@ -18,10 +18,12 @@ package org.apache.flink.optimizer.dag; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; import org.apache.flink.api.common.operators.SemanticProperties; +import org.apache.flink.api.common.operators.SingleInputOperator; import org.apache.flink.api.common.operators.SingleInputSemanticProperties; import org.apache.flink.api.common.operators.util.FieldSet; import org.apache.flink.optimizer.DataStatistics; @@ -30,11 +32,17 @@ import org.apache.flink.optimizer.operators.OperatorDescriptorSingle; public class UnaryOperatorNode extends SingleInputNode { - private final List<OperatorDescriptorSingle> operator; + private final List<OperatorDescriptorSingle> operators; private final String name; + public UnaryOperatorNode(String name, SingleInputOperator<?, ?, ?> operator, boolean onDynamicPath) { + super(operator); + this.name = name; + this.operators = new ArrayList<>(); + this.onDynamicPath = onDynamicPath; + } public UnaryOperatorNode(String name, FieldSet keys, OperatorDescriptorSingle ... operators) { this(name, keys, Arrays.asList(operators)); @@ -43,13 +51,13 @@ public class UnaryOperatorNode extends SingleInputNode { public UnaryOperatorNode(String name, FieldSet keys, List<OperatorDescriptorSingle> operators) { super(keys); - this.operator = operators; + this.operators = operators; this.name = name; } @Override protected List<OperatorDescriptorSingle> getPossibleProperties() { - return this.operator; + return this.operators; } @Override diff --git a/flink-optimizer/src/main/java/org/apache/flink/optimizer/dag/WorksetIterationNode.java b/flink-optimizer/src/main/java/org/apache/flink/optimizer/dag/WorksetIterationNode.java index 15b9a50..7969a94 100644 --- a/flink-optimizer/src/main/java/org/apache/flink/optimizer/dag/WorksetIterationNode.java +++ b/flink-optimizer/src/main/java/org/apache/flink/optimizer/dag/WorksetIterationNode.java @@ -52,6 +52,7 @@ import org.apache.flink.optimizer.plan.WorksetIterationPlanNode; import org.apache.flink.optimizer.plan.WorksetPlanNode; import org.apache.flink.optimizer.plan.PlanNode.FeedbackPropertiesMeetRequirementsReport; import org.apache.flink.optimizer.util.NoOpBinaryUdfOp; +import org.apache.flink.optimizer.util.NoOpUnaryUdfOp; import org.apache.flink.runtime.operators.DriverStrategy; import org.apache.flink.runtime.operators.shipping.ShipStrategyType; import org.apache.flink.runtime.operators.util.LocalStrategy; @@ -307,7 +308,8 @@ public class WorksetIterationNode extends TwoInputNode implements IterationNode this.nextWorkset.accept(InterestingPropertiesClearer.INSTANCE); this.solutionSetDelta.accept(InterestingPropertiesClearer.INSTANCE); } - + + @SuppressWarnings("unchecked") @Override protected void instantiate(OperatorDescriptorDual operator, Channel solutionSetIn, Channel worksetIn, List<Set<? extends NamedChannel>> broadcastPlanChannels, List<PlanNode> target, CostEstimator estimator, @@ -367,9 +369,14 @@ public class WorksetIterationNode extends TwoInputNode implements IterationNode globPropsReqWorkset.parameterizeChannel(toNoOp, false, nextWorksetRootConnection.getDataExchangeMode(), false); locPropsReqWorkset.parameterizeChannel(toNoOp); - - UnaryOperatorNode rebuildWorksetPropertiesNode = new UnaryOperatorNode("Rebuild Workset Properties", - FieldList.EMPTY_LIST); + + NoOpUnaryUdfOp noOpUnaryUdfOp = new NoOpUnaryUdfOp<>(); + noOpUnaryUdfOp.setInput(candidate.getProgramOperator()); + + UnaryOperatorNode rebuildWorksetPropertiesNode = new UnaryOperatorNode( + "Rebuild Workset Properties", + noOpUnaryUdfOp, + true); rebuildWorksetPropertiesNode.setParallelism(candidate.getParallelism()); diff --git a/flink-optimizer/src/main/java/org/apache/flink/optimizer/util/NoOpUnaryUdfOp.java b/flink-optimizer/src/main/java/org/apache/flink/optimizer/util/NoOpUnaryUdfOp.java index cc4a4d6..8537b9c 100644 --- a/flink-optimizer/src/main/java/org/apache/flink/optimizer/util/NoOpUnaryUdfOp.java +++ b/flink-optimizer/src/main/java/org/apache/flink/optimizer/util/NoOpUnaryUdfOp.java @@ -36,7 +36,7 @@ public class NoOpUnaryUdfOp<OUT> extends SingleInputOperator<OUT, OUT, NoOpFunct @SuppressWarnings("rawtypes") public static final NoOpUnaryUdfOp INSTANCE = new NoOpUnaryUdfOp(); - private NoOpUnaryUdfOp() { + public NoOpUnaryUdfOp() { // pass null here because we override getOutputType to return type // of input operator super(new UserCodeClassWrapper<NoOpFunction>(NoOpFunction.class), null, "");
bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-3052_8dc70f2e.diff
bugs-dot-jar_data_FLINK-3314_8fc7e7af
--- BugID: FLINK-3314 Summary: Early cancel calls can cause Tasks to not cancel properly Description: | When a task receives the "cancel()" call before the operators are properly instantiated, it can be that the operator never receives a cancel call. In certain cases, this causes the operator to hang. diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTask.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTask.java index 9ab6c10..c9624fc 100644 --- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTask.java +++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTask.java @@ -146,6 +146,9 @@ public abstract class StreamTask<OUT, Operator extends StreamOperator<OUT>> /** Flag to mark the task "in operation", in which case check * needs to be initialized to true, so that early cancel() before invoke() behaves correctly */ private volatile boolean isRunning; + + /** Flag to mark this task as canceled */ + private volatile boolean canceled; private long recoveryTimestamp; @@ -191,6 +194,11 @@ public abstract class StreamTask<OUT, Operator extends StreamOperator<OUT>> // task specific initialization init(); + // save the work of reloadig state, etc, if the task is already canceled + if (canceled) { + throw new CancelTaskException(); + } + // -------- Invoke -------- LOG.debug("Invoking {}", getName()); @@ -205,7 +213,12 @@ public abstract class StreamTask<OUT, Operator extends StreamOperator<OUT>> openAllOperators(); } - // let the task do its work + // final check to exit early before starting to run + if (canceled) { + throw new CancelTaskException(); + } + + // let the task do its work isRunning = true; run(); isRunning = false; @@ -290,6 +303,7 @@ public abstract class StreamTask<OUT, Operator extends StreamOperator<OUT>> @Override public final void cancel() throws Exception { isRunning = false; + canceled = true; cancelTask(); } @@ -297,6 +311,10 @@ public abstract class StreamTask<OUT, Operator extends StreamOperator<OUT>> return isRunning; } + public final boolean isCanceled() { + return canceled; + } + private void openAllOperators() throws Exception { for (StreamOperator<?> operator : operatorChain.getAllOperators()) { if (operator != null) {
bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-3314_8fc7e7af.diff
bugs-dot-jar_data_FLINK-1761_380ef878
--- BugID: FLINK-1761 Summary: IndexOutOfBoundsException when receiving empty buffer at remote channel Description: "Receiving buffers from remote input channels with size 0 results in an {{IndexOutOfBoundsException}}.\n\n{code}\nCaused by: java.lang.IndexOutOfBoundsException: index: 30 (expected: range(0, 30))\n\tat io.netty.buffer.AbstractByteBuf.checkIndex(AbstractByteBuf.java:1123)\n\tat io.netty.buffer.PooledUnsafeDirectByteBuf.getBytes(PooledUnsafeDirectByteBuf.java:156)\n\tat io.netty.buffer.PooledUnsafeDirectByteBuf.getBytes(PooledUnsafeDirectByteBuf.java:151)\n\tat io.netty.buffer.SlicedByteBuf.getBytes(SlicedByteBuf.java:179)\n\tat io.netty.buffer.AbstractByteBuf.readBytes(AbstractByteBuf.java:717)\n\tat org.apache.flink.runtime.io.network.netty.PartitionRequestClientHandler.decodeBufferOrEvent(PartitionRequestClientHandler.java:205)\n\tat org.apache.flink.runtime.io.network.netty.PartitionRequestClientHandler.decodeMsg(PartitionRequestClientHandler.java:164)\n\tat org.apache.flink.runtime.io.network.netty.PartitionRequestClientHandler.channelRead(PartitionRequestClientHandler.java:118)\n{code}" diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/PartitionRequestClientHandler.java b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/PartitionRequestClientHandler.java index fef21ce..12ed140 100644 --- a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/PartitionRequestClientHandler.java +++ b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/PartitionRequestClientHandler.java @@ -133,7 +133,10 @@ class PartitionRequestClientHandler extends ChannelInboundHandlerAdapter { } inputChannels.clear(); - ctx.close(); + + if (ctx != null) { + ctx.close(); + } } } @@ -187,6 +190,13 @@ class PartitionRequestClientHandler extends ChannelInboundHandlerAdapter { try { if (bufferOrEvent.isBuffer()) { // ---- Buffer ------------------------------------------------ + + // Early return for empty buffers. Otherwise Netty's readBytes() throws an + // IndexOutOfBoundsException. + if (bufferOrEvent.getSize() == 0) { + return true; + } + BufferProvider bufferProvider = inputChannel.getBufferProvider(); if (bufferProvider == null) { @@ -216,7 +226,6 @@ class PartitionRequestClientHandler extends ChannelInboundHandlerAdapter { } else { // ---- Event ------------------------------------------------- - // TODO We can just keep the serialized data in the Netty buffer and release it later at the reader byte[] byteArray = new byte[bufferOrEvent.getSize()]; bufferOrEvent.getNettyBuffer().readBytes(byteArray);
bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1761_380ef878.diff
bugs-dot-jar_data_FLINK-1531_21f47d9c
--- BugID: FLINK-1531 Summary: Custom Kryo Serializer fails in itertation scenario Description: "When using iterations with a custom serializer for a domain object, the iteration will fail.\n\n{code:java}\norg.apache.flink.runtime.client.JobExecutionException: com.esotericsoftware.kryo.KryoException: Buffer underflow\n\tat org.apache.flink.api.java.typeutils.runtime.NoFetchingInput.require(NoFetchingInput.java:76)\n\tat com.esotericsoftware.kryo.io.Input.readVarInt(Input.java:355)\n\tat com.esotericsoftware.kryo.util.DefaultClassResolver.readClass(DefaultClassResolver.java:109)\n\tat com.esotericsoftware.kryo.Kryo.readClass(Kryo.java:641)\n\tat com.esotericsoftware.kryo.Kryo.readClassAndObject(Kryo.java:752)\n\tat org.apache.flink.api.java.typeutils.runtime.KryoSerializer.deserialize(KryoSerializer.java:198)\n\tat org.apache.flink.api.java.typeutils.runtime.KryoSerializer.deserialize(KryoSerializer.java:203)\n\tat org.apache.flink.runtime.io.disk.InputViewIterator.next(InputViewIterator.java:43)\n\tat org.apache.flink.runtime.iterative.task.IterationHeadPactTask.streamOutFinalOutputBulk(IterationHeadPactTask.java:404)\n\tat org.apache.flink.runtime.iterative.task.IterationHeadPactTask.run(IterationHeadPactTask.java:377)\n\tat org.apache.flink.runtime.operators.RegularPactTask.invoke(RegularPactTask.java:360)\n\tat org.apache.flink.runtime.execution.RuntimeEnvironment.run(RuntimeEnvironment.java:204)\n\tat java.lang.Thread.run(Thread.java:745)\n{code}" diff --git a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/KryoSerializer.java b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/KryoSerializer.java index 133dd57..d8411a0 100644 --- a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/KryoSerializer.java +++ b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/KryoSerializer.java @@ -195,7 +195,18 @@ public class KryoSerializer<T> extends TypeSerializer<T> { input = new NoFetchingInput(inputStream); previousIn = source; } - return (T) kryo.readClassAndObject(input); + + try { + return (T) kryo.readClassAndObject(input); + } catch (KryoException ke) { + Throwable cause = ke.getCause(); + + if(cause instanceof EOFException) { + throw (EOFException) cause; + } else { + throw ke; + } + } } @Override diff --git a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/NoFetchingInput.java b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/NoFetchingInput.java index 524347c..0f4fe94 100644 --- a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/NoFetchingInput.java +++ b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/NoFetchingInput.java @@ -21,6 +21,7 @@ package org.apache.flink.api.java.typeutils.runtime; import com.esotericsoftware.kryo.KryoException; import com.esotericsoftware.kryo.io.Input; +import java.io.EOFException; import java.io.IOException; import java.io.InputStream; @@ -73,7 +74,7 @@ public class NoFetchingInput extends Input { count = fill(buffer, bytesRead, required - bytesRead); if(count == -1){ - throw new KryoException("Buffer underflow"); + throw new KryoException(new EOFException("No more bytes left.")); } bytesRead += count; @@ -121,7 +122,7 @@ public class NoFetchingInput extends Input { c = inputStream.read(bytes, offset+bytesRead, count-bytesRead); if(c == -1){ - throw new KryoException("Buffer underflow"); + throw new KryoException(new EOFException("No more bytes left.")); } bytesRead += c;
bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1531_21f47d9c.diff
bugs-dot-jar_data_FLINK-1686_1f726e48
--- BugID: FLINK-1686 Summary: Streaming iteration heads cannot be instantiated Description: "It looks that streaming jobs with iterations and dop > 1 do not work currently. From what I see, when the TaskManager tries to instantiate a new RuntimeEnvironment for the iteration head tasks it fails since the following exception is being thrown:\n\njava.lang.Exception: Failed to deploy the task Map (2/8) - execution #0 to slot SimpleSlot (0)(1) - 0e39fcabcab3e8543cc2d8320f9de783 - ALLOCATED/ALIVE: java.lang.Exception: Error setting up runtime environment: java.lang.RuntimeException: Could not register the given element, broker slot is already occupied.\n\tat org.apache.flink.runtime.execution.RuntimeEnvironment.<init>(RuntimeEnvironment.java:174)\n\tat org.apache.flink.runtime.taskmanager.TaskManager.org$apache$flink$runtime$taskmanager$TaskManager$$submitTask(TaskManager.scala:432)\n.....\n.....\nCaused by: java.lang.RuntimeException: java.lang.RuntimeException: Could not register the given element, broker slot is already occupied.\n\tat org.apache.flink.streaming.api.streamvertex.StreamIterationHead.setInputsOutputs(StreamIterationHead.java:64)\n\tat org.apache.flink.streaming.api.streamvertex.StreamVertex.registerInputOutput(StreamVertex.java:86)\n\tat org.apache.flink.runtime.execution.RuntimeEnvironment.<init>(RuntimeEnvironment.java:171)\n\t... 20 more\nCaused by: java.lang.RuntimeException: Could not register the given element, broker slot is already occupied.\n\tat org.apache.flink.runtime.iterative.concurrent.Broker.handIn(Broker.java:39)\n\tat org.apache.flink.streaming.api.streamvertex.StreamIterationHead.setInputsOutputs(StreamIterationHead.java:62)\n\nThe IterateTest passed since it is using a dop of 1 but for higher parallelism it fails. Also, the IterateExample fails as well if you try to run it. \n\nI will debug this once I find some time so any ideas of what could possible cause this are more than welcome. " diff --git a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/streamvertex/StreamIterationTail.java b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/streamvertex/StreamIterationTail.java index 7b654be..ab09aff 100755 --- a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/streamvertex/StreamIterationTail.java +++ b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/streamvertex/StreamIterationTail.java @@ -49,7 +49,7 @@ public class StreamIterationTail<IN> extends StreamVertex<IN, IN> { iterationId = configuration.getIterationId(); iterationWaitTime = configuration.getIterationWaitTime(); shouldWait = iterationWaitTime > 0; - BlockingQueueBroker.instance().get(iterationId.toString()+"-" + dataChannel = BlockingQueueBroker.instance().get(iterationId.toString()+"-" +getEnvironment().getIndexInSubtaskGroup()); } catch (Exception e) { throw new StreamVertexException(String.format(
bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1686_1f726e48.diff