id
stringlengths 33
40
| content
stringlengths 662
61.5k
| max_stars_repo_path
stringlengths 85
97
|
---|---|---|
bugs-dot-jar_data_FLINK-2874_17e7b423 | ---
BugID: FLINK-2874
Summary: Certain Avro generated getters/setters not recognized
Description: "For Avro schemas where value null is not allowed, the field is unboxed
e.g. int but the getter/setter methods provide the boxed Integer as interface:\n\n{code}\n{\n
\"fields\": [\n {\n \"type\": \"double\", \n \"name\": \"time\"\n }, \n}\n{code}\n\nThis
results in Java\n\n{code}\n private double time;\n\n public java.lang.Double getTime()
{\n return time;\n }\n\n public void setTime(java.lang.Double value) {\n this.time
= value;\n }\n{code}\n\nThere is also a problem when there is an underscore in
the Avro schema, e.g.:\n\n{code}\n {\n \"default\": null, \n \"type\": [\n
\ \"null\", \n \"long\"\n ], \n \"name\": \"conn_id\"\n }, \n{code}\n\nThis
results in Java:\n\n{code}\nprivate java.lang.Long conn_id;\n\n public java.lang.Long
getConnId() {\n return conn_id;\n }\n\n public void setConnId(java.lang.Long
value) {\n this.conn_id = value;\n }\n{code}"
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
index 1dec90b..0281da6 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
@@ -30,6 +30,7 @@ import java.util.ArrayList;
import java.util.List;
import org.apache.avro.specific.SpecificRecordBase;
+import org.apache.commons.lang3.ClassUtils;
import org.apache.flink.api.common.functions.CoGroupFunction;
import org.apache.flink.api.common.functions.CrossFunction;
import org.apache.flink.api.common.functions.FlatJoinFunction;
@@ -1299,22 +1300,26 @@ public class TypeExtractor {
return true;
} else {
boolean hasGetter = false, hasSetter = false;
- final String fieldNameLow = f.getName().toLowerCase();
-
+ final String fieldNameLow = f.getName().toLowerCase().replaceAll("_", "");
+
Type fieldType = f.getGenericType();
+ Class<?> fieldTypeWrapper = ClassUtils.primitiveToWrapper(f.getType());
+
TypeVariable<?> fieldTypeGeneric = null;
if(fieldType instanceof TypeVariable) {
fieldTypeGeneric = (TypeVariable<?>) fieldType;
fieldType = materializeTypeVariable(typeHierarchy, (TypeVariable<?>)fieldType);
}
for(Method m : clazz.getMethods()) {
+ final String methodNameLow = m.getName().toLowerCase().replaceAll("_", "");
+
// check for getter
if( // The name should be "get<FieldName>" or "<fieldName>" (for scala) or "is<fieldName>" for boolean fields.
- (m.getName().toLowerCase().equals("get"+fieldNameLow) || m.getName().toLowerCase().equals("is"+fieldNameLow) || m.getName().toLowerCase().equals(fieldNameLow)) &&
+ (methodNameLow.equals("get"+fieldNameLow) || methodNameLow.equals("is"+fieldNameLow) || methodNameLow.equals(fieldNameLow)) &&
// no arguments for the getter
m.getParameterTypes().length == 0 &&
// return type is same as field type (or the generic variant of it)
- (m.getGenericReturnType().equals( fieldType ) || (fieldTypeGeneric != null && m.getGenericReturnType().equals(fieldTypeGeneric)) )
+ (m.getGenericReturnType().equals( fieldType ) || (fieldTypeWrapper != null && m.getReturnType().equals( fieldTypeWrapper )) || (fieldTypeGeneric != null && m.getGenericReturnType().equals(fieldTypeGeneric)) )
) {
if(hasGetter) {
throw new IllegalStateException("Detected more than one getter");
@@ -1322,9 +1327,9 @@ public class TypeExtractor {
hasGetter = true;
}
// check for setters (<FieldName>_$eq for scala)
- if((m.getName().toLowerCase().equals("set"+fieldNameLow) || m.getName().toLowerCase().equals(fieldNameLow+"_$eq")) &&
+ if((methodNameLow.equals("set"+fieldNameLow) || methodNameLow.equals(fieldNameLow+"_$eq")) &&
m.getParameterTypes().length == 1 && // one parameter of the field's type
- ( m.getGenericParameterTypes()[0].equals( fieldType ) || (fieldTypeGeneric != null && m.getGenericParameterTypes()[0].equals(fieldTypeGeneric) ) )&&
+ (m.getGenericParameterTypes()[0].equals( fieldType ) || (fieldTypeWrapper != null && m.getParameterTypes()[0].equals( fieldTypeWrapper )) || (fieldTypeGeneric != null && m.getGenericParameterTypes()[0].equals(fieldTypeGeneric) ) )&&
// return type is void.
m.getReturnType().equals(Void.TYPE)
) {
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2874_17e7b423.diff |
bugs-dot-jar_data_FLINK-2817_5dfc897b | ---
BugID: FLINK-2817
Summary: FileMonitoring function throws NPE when location is empty
Description: |-
{{StreamExecutionEnvironment.readFileStream()}} does not handle a missing location properly. I would suggest to log that the location is empty and continue running the job.
A test covering the correct behavior is also needed.
diff --git a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/functions/source/FileMonitoringFunction.java b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/functions/source/FileMonitoringFunction.java
index 2c85650..a217923 100644
--- a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/functions/source/FileMonitoringFunction.java
+++ b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/functions/source/FileMonitoringFunction.java
@@ -17,13 +17,6 @@
package org.apache.flink.streaming.api.functions.source;
-import java.io.IOException;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.core.fs.FileStatus;
import org.apache.flink.core.fs.FileSystem;
@@ -31,6 +24,13 @@ import org.apache.flink.core.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.io.IOException;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
public class FileMonitoringFunction implements SourceFunction<Tuple3<String, Long, Long>> {
private static final long serialVersionUID = 1L;
@@ -95,16 +95,21 @@ public class FileMonitoringFunction implements SourceFunction<Tuple3<String, Lon
FileStatus[] statuses = fileSystem.listStatus(new Path(path));
- for (FileStatus status : statuses) {
- Path filePath = status.getPath();
- String fileName = filePath.getName();
- long modificationTime = status.getModificationTime();
-
- if (!isFiltered(fileName, modificationTime)) {
- files.add(filePath.toString());
- modificationTimes.put(fileName, modificationTime);
+ if (statuses == null) {
+ LOG.warn("Path does not exist: {}", path);
+ } else {
+ for (FileStatus status : statuses) {
+ Path filePath = status.getPath();
+ String fileName = filePath.getName();
+ long modificationTime = status.getModificationTime();
+
+ if (!isFiltered(fileName, modificationTime)) {
+ files.add(filePath.toString());
+ modificationTimes.put(fileName, modificationTime);
+ }
}
}
+
return files;
}
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2817_5dfc897b.diff |
bugs-dot-jar_data_FLINK-2802_88a97768 | ---
BugID: FLINK-2802
Summary: Watermark triggered operators cannot progress with cyclic flows
Description: |-
The problem is that we can easily create a cyclic watermark (time) dependency in the stream graph which will result in a deadlock for watermark triggered operators such as the `WindowOperator`.
A solution to this could be to emit a Long.MAX_VALUE watermark from the iteration sources.
diff --git a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/io/StreamInputProcessor.java b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/io/StreamInputProcessor.java
index f50ddcd..80563b8 100644
--- a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/io/StreamInputProcessor.java
+++ b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/io/StreamInputProcessor.java
@@ -79,7 +79,7 @@ public class StreamInputProcessor<IN> {
private final DeserializationDelegate<StreamElement> deserializationDelegate;
- @SuppressWarnings({"unchecked", "rawtypes"})
+ @SuppressWarnings("unchecked")
public StreamInputProcessor(InputGate[] inputGates, TypeSerializer<IN> inputSerializer,
EventListener<CheckpointBarrier> checkpointListener,
CheckpointingMode checkpointMode,
@@ -125,7 +125,6 @@ public class StreamInputProcessor<IN> {
lastEmittedWatermark = Long.MIN_VALUE;
}
- @SuppressWarnings("unchecked")
public boolean processInput(OneInputStreamOperator<IN, ?> streamOperator, Object lock) throws Exception {
if (isFinished) {
return false;
diff --git a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamIterationHead.java b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamIterationHead.java
index 2ad2d2d..c937e51 100644
--- a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamIterationHead.java
+++ b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamIterationHead.java
@@ -23,10 +23,10 @@ import java.util.concurrent.BlockingQueue;
import java.util.concurrent.TimeUnit;
import org.apache.flink.api.common.JobID;
+import org.apache.flink.streaming.api.watermark.Watermark;
import org.apache.flink.streaming.runtime.io.RecordWriterOutput;
import org.apache.flink.streaming.runtime.io.BlockingQueueBroker;
import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
-
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -64,6 +64,13 @@ public class StreamIterationHead<OUT> extends OneInputStreamTask<OUT, OUT> {
Collection<RecordWriterOutput<OUT>> outputs =
(Collection<RecordWriterOutput<OUT>>) (Collection<?>) outputHandler.getOutputs();
+ // If timestamps are enabled we make sure to remove cyclic watermark dependencies
+ if (getExecutionConfig().areTimestampsEnabled()) {
+ for (RecordWriterOutput<OUT> output : outputs) {
+ output.emitWatermark(new Watermark(Long.MAX_VALUE));
+ }
+ }
+
while (running) {
StreamRecord<OUT> nextRecord = shouldWait ?
dataChannel.poll(iterationWaitTime, TimeUnit.MILLISECONDS) :
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2802_88a97768.diff |
bugs-dot-jar_data_FLINK-996_32a003d5 | ---
BugID: FLINK-996
Summary: NullPointerException while translating union node
Description: The NepheleJobGraphGenerator throws a NullPointerException when translating
a binary union operator. The BinaryUnionPlanNode is not replaced by a NAryUnionPlanNode
and thus is still treated as a DualInputVertex. Accessing the driver code of the
BinaryUnionPlanNode causes then the NullPointerException.
diff --git a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/PactCompiler.java b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/PactCompiler.java
index bf3d6af..5a0a3e1 100644
--- a/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/PactCompiler.java
+++ b/stratosphere-compiler/src/main/java/eu/stratosphere/compiler/PactCompiler.java
@@ -1195,7 +1195,7 @@ public class PactCompiler {
private static final class BinaryUnionReplacer implements Visitor<PlanNode> {
private final Set<PlanNode> seenBefore = new HashSet<PlanNode>();
-
+
@Override
public boolean preVisit(PlanNode visitable) {
if (this.seenBefore.add(visitable)) {
@@ -1217,43 +1217,20 @@ public class PactCompiler {
final Channel in2 = unionNode.getInput2();
PlanNode newUnionNode;
-
- // if any input is cached, we keep this as a binary union and do not collapse it into a
- // n-ary union
-// if (in1.getTempMode().isCached() || in2.getTempMode().isCached()) {
-// // replace this node by an explicit operator
-// Channel cached, pipelined;
-// if (in1.getTempMode().isCached()) {
-// cached = in1;
-// pipelined = in2;
-// } else {
-// cached = in2;
-// pipelined = in1;
-// }
-//
-// newUnionNode = new DualInputPlanNode(unionNode.getOriginalOptimizerNode(), cached, pipelined,
-// DriverStrategy.UNION_WITH_CACHED);
-// newUnionNode.initProperties(unionNode.getGlobalProperties(), new LocalProperties());
-//
-// in1.setTarget(newUnionNode);
-// in2.setTarget(newUnionNode);
-// } else {
- // collect the union inputs to collapse this operator with
- // its collapsed predecessors. check whether an input is materialized to prevent
- // collapsing
- List<Channel> inputs = new ArrayList<Channel>();
- collect(in1, inputs);
- collect(in2, inputs);
-
- newUnionNode = new NAryUnionPlanNode(unionNode.getOptimizerNode(), inputs, unionNode.getGlobalProperties());
-
- // adjust the input channels to have their target point to the new union node
- for (Channel c : inputs) {
- c.setTarget(newUnionNode);
- }
-// }
-
- unionNode.getOutgoingChannels().get(0).swapUnionNodes(newUnionNode);
+
+ List<Channel> inputs = new ArrayList<Channel>();
+ collect(in1, inputs);
+ collect(in2, inputs);
+
+ newUnionNode = new NAryUnionPlanNode(unionNode.getOptimizerNode(), inputs, unionNode.getGlobalProperties());
+
+ for (Channel c : inputs) {
+ c.setTarget(newUnionNode);
+ }
+
+ for(Channel channel : unionNode.getOutgoingChannels()){
+ channel.swapUnionNodes(newUnionNode);
+ }
}
}
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-996_32a003d5.diff |
bugs-dot-jar_data_FLINK-3513_d90672fd | ---
BugID: FLINK-3513
Summary: Fix interplay of automatic Operator UID and Changing name of WindowOperator
Description: |-
WindowOperator can have a changing name because it has the TypeSerializer .toString() output in it's name. For some type serializers that don't implement toString() this means that the name changes.
This means that savepoint restore does not work for the automatically generated UID.
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamingJobGraphGenerator.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamingJobGraphGenerator.java
index da46424..e3e1ac6 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamingJobGraphGenerator.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/graph/StreamingJobGraphGenerator.java
@@ -736,8 +736,6 @@ public class StreamingJobGraphGenerator {
hasher.putInt(node.getParallelism());
- hasher.putString(node.getOperatorName(), Charset.forName("UTF-8"));
-
if (node.getOperator() instanceof AbstractUdfStreamOperator) {
String udfClassName = ((AbstractUdfStreamOperator<?, ?>) node.getOperator())
.getUserFunction().getClass().getName();
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-3513_d90672fd.diff |
bugs-dot-jar_data_FLINK-1985_495a5c3c | ---
BugID: FLINK-1985
Summary: Streaming does not correctly forward ExecutionConfig to runtime
Description: |-
When running streaming jobs you see this log entry:
"Environment did not contain an ExecutionConfig - using a default config."
Some parts of the code use an ExecutionConfig at runtime. This will be a default config without registered serializers and other user settings.
diff --git a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/graph/StreamingJobGraphGenerator.java b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/graph/StreamingJobGraphGenerator.java
index 6bad4c8..d16ee58 100644
--- a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/graph/StreamingJobGraphGenerator.java
+++ b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/graph/StreamingJobGraphGenerator.java
@@ -17,6 +17,7 @@
package org.apache.flink.streaming.api.graph;
+import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
@@ -26,6 +27,7 @@ import java.util.Map;
import java.util.Map.Entry;
import org.apache.commons.lang.StringUtils;
+import org.apache.flink.api.common.ExecutionConfig;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.jobgraph.AbstractJobVertex;
import org.apache.flink.runtime.jobgraph.DistributionPattern;
@@ -43,6 +45,7 @@ import org.apache.flink.streaming.runtime.partitioner.StreamPartitioner;
import org.apache.flink.streaming.runtime.partitioner.StreamPartitioner.PartitioningStrategy;
import org.apache.flink.streaming.runtime.tasks.StreamIterationHead;
import org.apache.flink.streaming.runtime.tasks.StreamIterationTail;
+import org.apache.flink.util.InstantiationUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -93,6 +96,12 @@ public class StreamingJobGraphGenerator {
configureCheckpointing();
+ try {
+ InstantiationUtil.writeObjectToConfig(this.streamGraph.getExecutionConfig(), this.jobGraph.getJobConfiguration(), ExecutionConfig.CONFIG_KEY);
+ } catch (IOException e) {
+ throw new RuntimeException("Config object could not be written to Job Configuration: ", e);
+ }
+
return jobGraph;
}
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1985_495a5c3c.diff |
bugs-dot-jar_data_FLINK-1922_ccd574a4 | ---
BugID: FLINK-1922
Summary: Failed task deployment causes NPE on input split assignment
Description: "The input split assignment code is returning {null} if the Task has
failed, which is causing a NPE.\n\nWe should improve our error handling / reporting
in that situation.\n\n{code}\n13:12:31,002 INFO org.apache.flink.yarn.ApplicationMaster$$anonfun$2$$anon$1
\ - Status of job c0b47ce41e9a85a628a628a3977705ef (Flink Java Job at Tue Apr
21 13:10:36 UTC 2015) changed to FAILING Cannot deploy task - TaskManager not responding..\n....\n13:12:47,591
ERROR org.apache.flink.runtime.operators.RegularPactTask - Error in task
code: CHAIN DataSource (at userMethod (org.apache.flink.api.java.io.AvroInputFormat))
-> FlatMap (FlatMap at main(UserClass.java:111)) (20/50)\njava.lang.RuntimeException:
Requesting the next InputSplit failed.\n\tat org.apache.flink.runtime.taskmanager.TaskInputSplitProvider.getNextInputSplit(TaskInputSplitProvider.java:88)\n\tat
org.apache.flink.runtime.operators.DataSourceTask$1.hasNext(DataSourceTask.java:337)\n\tat
org.apache.flink.runtime.operators.DataSourceTask.invoke(DataSourceTask.java:136)\n\tat
org.apache.flink.runtime.execution.RuntimeEnvironment.run(RuntimeEnvironment.java:217)\n\tat
java.lang.Thread.run(Thread.java:744)\nCaused by: java.lang.NullPointerException\n\tat
java.io.ByteArrayInputStream.<init>(ByteArrayInputStream.java:106)\n\tat org.apache.flink.util.InstantiationUtil.deserializeObject(InstantiationUtil.java:301)\n\tat
org.apache.flink.runtime.taskmanager.TaskInputSplitProvider.getNextInputSplit(TaskInputSplitProvider.java:83)\n\t...
4 more\n13:12:47,595 INFO org.apache.flink.runtime.taskmanager.Task -
CHAIN DataSource (at SomeMethod (org.apache.flink.api.java.io.AvroInputFormat))
-> FlatMap (FlatMap at main(SomeClass.java:111)) (20/50) switched to FAILED : java.lang.RuntimeException:
Requesting the next InputSplit failed.\n\tat org.apache.flink.runtime.taskmanager.TaskInputSplitProvider.getNextInputSplit(TaskInputSplitProvider.java:88)\n\tat
org.apache.flink.runtime.operators.DataSourceTask$1.hasNext(DataSourceTask.java:337)\n\tat
org.apache.flink.runtime.operators.DataSourceTask.invoke(DataSourceTask.java:136)\n\tat
org.apache.flink.runtime.execution.RuntimeEnvironment.run(RuntimeEnvironment.java:217)\n\tat
java.lang.Thread.run(Thread.java:744)\nCaused by: java.lang.NullPointerException\n\tat
java.io.ByteArrayInputStream.<init>(ByteArrayInputStream.java:106)\n\tat org.apache.flink.util.InstantiationUtil.deserializeObject(InstantiationUtil.java:301)\n\tat
org.apache.flink.runtime.taskmanager.TaskInputSplitProvider.getNextInputSplit(TaskInputSplitProvider.java:83)\n\t...
4 more\n{code}"
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
index 3ba378c..baed947 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
@@ -328,7 +328,7 @@ public class Execution implements Serializable {
// register this execution at the execution graph, to receive call backs
vertex.getExecutionGraph().registerExecution(this);
- Instance instance = slot.getInstance();
+ final Instance instance = slot.getInstance();
Future<Object> deployAction = Patterns.ask(instance.getTaskManager(),
new SubmitTask(deployment), new Timeout(timeout));
@@ -338,7 +338,9 @@ public class Execution implements Serializable {
public void onComplete(Throwable failure, Object success) throws Throwable {
if (failure != null) {
if (failure instanceof TimeoutException) {
- markFailed(new Exception("Cannot deploy task - TaskManager not responding.", failure));
+ markFailed(new Exception(
+ "Cannot deploy task - TaskManager " + instance + " not responding.",
+ failure));
}
else {
markFailed(failure);
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskInputSplitProvider.java b/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskInputSplitProvider.java
index 1bdc346..5a69850 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskInputSplitProvider.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/TaskInputSplitProvider.java
@@ -68,10 +68,6 @@ public class TaskInputSplitProvider implements InputSplitProvider {
final Object result = Await.result(response, timeout.duration());
- if (result == null) {
- return null;
- }
-
if(!(result instanceof JobManagerMessages.NextInputSplit)){
throw new RuntimeException("RequestNextInputSplit requires a response of type " +
"NextInputSplit. Instead response is of type " + result.getClass() + ".");
@@ -80,9 +76,14 @@ public class TaskInputSplitProvider implements InputSplitProvider {
(JobManagerMessages.NextInputSplit) result;
byte[] serializedData = nextInputSplit.splitData();
- Object deserialized = InstantiationUtil.deserializeObject(serializedData,
- usercodeClassLoader);
- return (InputSplit) deserialized;
+
+ if(serializedData == null) {
+ return null;
+ } else {
+ Object deserialized = InstantiationUtil.deserializeObject(serializedData,
+ usercodeClassLoader);
+ return (InputSplit) deserialized;
+ }
}
} catch (Exception e) {
throw new RuntimeException("Requesting the next InputSplit failed.", e);
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1922_ccd574a4.diff |
bugs-dot-jar_data_FLINK-1382_9cd96df7 | ---
BugID: FLINK-1382
Summary: Void is not added to TypeInfoParser
Description: |-
List l = Arrays.asList(new Tuple2<Void,Long>(null, 1L));
TypeInformation t = TypeInfoParser.parse("Tuple2<Void,Long>");
DataSet<Tuple2<Void,Long>> data = env.fromCollection(l, t);
data.print();
Throws:
Exception in thread "main" java.lang.IllegalArgumentException: String could not be parsed: Class 'Void' could not be found for use as custom object. Please note that inner classes must be declared static.
at org.apache.flink.api.java.typeutils.TypeInfoParser.parse(TypeInfoParser.java:90)
at org.apache.flink.hadoopcompatibility.mapreduce.example.ParquetOutput.main(ParquetOutput.java:92)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at com.intellij.rt.execution.application.AppMain.main(AppMain.java:134)
Caused by: java.lang.IllegalArgumentException: Class 'Void' could not be found for use as custom object. Please note that inner classes must be declared static.
at org.apache.flink.api.java.typeutils.TypeInfoParser.parse(TypeInfoParser.java:290)
at org.apache.flink.api.java.typeutils.TypeInfoParser.parse(TypeInfoParser.java:133)
at org.apache.flink.api.java.typeutils.TypeInfoParser.parse(TypeInfoParser.java:88)
... 6 more
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeInfoParser.java b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeInfoParser.java
index e9d5dac..98373da 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeInfoParser.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeInfoParser.java
@@ -40,8 +40,9 @@ public class TypeInfoParser {
private static final Pattern writablePattern = Pattern.compile("^((" + WRITABLE_PACKAGE.replaceAll("\\.", "\\\\.") + "\\.)?Writable)<([^\\s,>]*)(,|>|$)");
private static final Pattern enumPattern = Pattern.compile("^((java\\.lang\\.)?Enum)<([^\\s,>]*)(,|>|$)");
private static final Pattern basicTypePattern = Pattern
- .compile("^((java\\.lang\\.)?(String|Integer|Byte|Short|Character|Double|Float|Long|Boolean))(,|>|$)");
- private static final Pattern basicType2Pattern = Pattern.compile("^(int|byte|short|char|double|float|long|boolean)(,|>|$)");
+ .compile("^((java\\.lang\\.)?(String|Integer|Byte|Short|Character|Double|Float|Long|Boolean|Void))(,|>|$)");
+ private static final Pattern basicTypeDatePattern = Pattern.compile("^((java\\.util\\.)?Date)(,|>|$)");
+ private static final Pattern basicType2Pattern = Pattern.compile("^(int|byte|short|char|double|float|long|boolean|void)(,|>|$)");
private static final Pattern valueTypePattern = Pattern.compile("^((" + VALUE_PACKAGE.replaceAll("\\.", "\\\\.")
+ "\\.)?(String|Int|Byte|Short|Char|Double|Float|Long|Boolean|List|Map|Null))Value(,|>|$)");
private static final Pattern basicArrayTypePattern = Pattern
@@ -105,6 +106,7 @@ public class TypeInfoParser {
final Matcher enumMatcher = enumPattern.matcher(infoString);
final Matcher basicTypeMatcher = basicTypePattern.matcher(infoString);
+ final Matcher basicTypeDateMatcher = basicTypeDatePattern.matcher(infoString);
final Matcher basicType2Matcher = basicType2Pattern.matcher(infoString);
final Matcher valueTypeMatcher = valueTypePattern.matcher(infoString);
@@ -194,6 +196,19 @@ public class TypeInfoParser {
}
returnType = BasicTypeInfo.getInfoFor(clazz);
}
+ // special basic type "Date"
+ else if (basicTypeDateMatcher.find()) {
+ String className = basicTypeDateMatcher.group(1);
+ sb.delete(0, className.length());
+ Class<?> clazz;
+ // check if fully qualified
+ if (className.startsWith("java.util")) {
+ clazz = Class.forName(className);
+ } else {
+ clazz = Class.forName("java.util." + className);
+ }
+ returnType = BasicTypeInfo.getInfoFor(clazz);
+ }
// basic type of primitives
else if (basicType2Matcher.find()) {
String className = basicType2Matcher.group(1);
@@ -216,6 +231,8 @@ public class TypeInfoParser {
clazz = Long.class;
} else if (className.equals("boolean")) {
clazz = Boolean.class;
+ } else if (className.equals("void")) {
+ clazz = Void.class;
}
returnType = BasicTypeInfo.getInfoFor(clazz);
}
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1382_9cd96df7.diff |
bugs-dot-jar_data_FLINK-2800_b654e989 | ---
BugID: FLINK-2800
Summary: kryo serialization problem
Description: "Performing a cross of two dataset of POJOs I have got the exception
below. The first time I run the process, there was no problem. When I run it the
second time, I have got the exception. My guess is that it could be a race condition
related to the reuse of the Kryo serializer object. However, it could also be \"a
bug where type registrations are not properly forwarded to all Serializers\", as
suggested by Stephan.\n\n------------------------------------------------------------------------\n2015-10-01
18:18:21 INFO JobClient:161 - 10/01/2015 18:18:21\tCross(Cross at main(FlinkMongoHadoop2LinkPOI2CDA.java:160))(3/4)
switched to FAILED \ncom.esotericsoftware.kryo.KryoException: Encountered unregistered
class ID: 114\n\tat com.esotericsoftware.kryo.util.DefaultClassResolver.readClass(DefaultClassResolver.java:119)\n\tat
com.esotericsoftware.kryo.Kryo.readClass(Kryo.java:641)\n\tat com.esotericsoftware.kryo.Kryo.readClassAndObject(Kryo.java:752)\n\tat
org.apache.flink.api.java.typeutils.runtime.kryo.KryoSerializer.deserialize(KryoSerializer.java:210)\n\tat
org.apache.flink.api.java.typeutils.runtime.TupleSerializer.deserialize(TupleSerializer.java:127)\n\tat
org.apache.flink.api.java.typeutils.runtime.TupleSerializer.deserialize(TupleSerializer.java:30)\n\tat
org.apache.flink.runtime.operators.resettable.AbstractBlockResettableIterator.getNextRecord(AbstractBlockResettableIterator.java:180)\n\tat
org.apache.flink.runtime.operators.resettable.BlockResettableMutableObjectIterator.next(BlockResettableMutableObjectIterator.java:111)\n\tat
org.apache.flink.runtime.operators.CrossDriver.runBlockedOuterSecond(CrossDriver.java:309)\n\tat
org.apache.flink.runtime.operators.CrossDriver.run(CrossDriver.java:162)\n\tat org.apache.flink.runtime.operators.RegularPactTask.run(RegularPactTask.java:489)\n\tat
org.apache.flink.runtime.operators.RegularPactTask.invoke(RegularPactTask.java:354)\n\tat
org.apache.flink.runtime.taskmanager.Task.run(Task.java:581)\n\tat java.lang.Thread.run(Thread.java:745)"
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/KryoSerializer.java b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/KryoSerializer.java
index f825fc6..8549e26 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/KryoSerializer.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/KryoSerializer.java
@@ -58,7 +58,7 @@ import java.util.Objects;
* @param <T> The type to be serialized.
*/
public class KryoSerializer<T> extends TypeSerializer<T> {
-
+
private static final long serialVersionUID = 3L;
// ------------------------------------------------------------------------
@@ -82,7 +82,7 @@ public class KryoSerializer<T> extends TypeSerializer<T> {
private transient Input input;
private transient Output output;
-
+
// ------------------------------------------------------------------------
public KryoSerializer(Class<T> type, ExecutionConfig executionConfig){
@@ -182,11 +182,22 @@ public class KryoSerializer<T> extends TypeSerializer<T> {
previousOut = target;
}
+ // Sanity check: Make sure that the output is cleared/has been flushed by the last call
+ // otherwise data might be written multiple times in case of a previous EOFException
+ if (output.position() != 0) {
+ throw new IllegalStateException("The Kryo Output still contains data from a previous " +
+ "serialize call. It has to be flushed or cleared at the end of the serialize call.");
+ }
+
try {
kryo.writeClassAndObject(output, record);
output.flush();
}
catch (KryoException ke) {
+ // make sure that the Kryo output buffer is cleared in case that we can recover from
+ // the exception (e.g. EOFException which denotes buffer full)
+ output.clear();
+
Throwable cause = ke.getCause();
if (cause instanceof EOFException) {
throw (EOFException) cause;
@@ -212,7 +223,7 @@ public class KryoSerializer<T> extends TypeSerializer<T> {
} catch (KryoException ke) {
Throwable cause = ke.getCause();
- if(cause instanceof EOFException) {
+ if (cause instanceof EOFException) {
throw (EOFException) cause;
} else {
throw ke;
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2800_b654e989.diff |
bugs-dot-jar_data_FLINK-2658_ce68cbd9 | ---
BugID: FLINK-2658
Summary: fieldsGrouping for multiple output streams fails
Description: "If a Spout or Bolt declares multiple output streams and another Bolt
connects to one of those streams via \"fieldsGrouping\", the call to {{FlinkTopologyBuilder.createTopology()}}
fails with the following exception:\n\n{noformat}\norg.apache.flink.api.common.InvalidProgramException:
Specifying keys via field positions is only valid for tuple data types. Type: PojoType<org.apache.flink.stormcompatibility.util.SplitStreamType,
fields = [streamId: String, value: GenericType<java.lang.Object>]>\n\tat org.apache.flink.api.java.operators.Keys$ExpressionKeys.<init>(Keys.java:209)\n\tat
org.apache.flink.api.java.operators.Keys$ExpressionKeys.<init>(Keys.java:203)\n\tat
org.apache.flink.streaming.api.datastream.DataStream.groupBy(DataStream.java:285)\n\tat
org.apache.flink.stormcompatibility.api.FlinkTopologyBuilder.createTopology(FlinkTopologyBuilder.java:200)\n\tat
org.apache.flink.stormcompatibility.api.FlinkTopologyBuilderTest.testFieldsGroupingOnMultipleBoltOutputStreams(FlinkTopologyBuilderTest.java:73)\n\tat
sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)\n\tat
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\n\tat
java.lang.reflect.Method.invoke(Method.java:606)\n\tat org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)\n\tat
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)\n\tat
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)\n\tat
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)\n\tat
org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:271)\n\tat org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:70)\n\tat
org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:50)\n\tat
org.junit.runners.ParentRunner$3.run(ParentRunner.java:238)\n\tat org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:63)\n\tat
org.junit.runners.ParentRunner.runChildren(ParentRunner.java:236)\n\tat org.junit.runners.ParentRunner.access$000(ParentRunner.java:53)\n\tat
org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:229)\n\tat org.junit.runners.ParentRunner.run(ParentRunner.java:309)\n\tat
org.eclipse.jdt.internal.junit4.runner.JUnit4TestReference.run(JUnit4TestReference.java:50)\n\tat
org.eclipse.jdt.internal.junit.runner.TestExecution.run(TestExecution.java:38)\n\tat
org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.runTests(RemoteTestRunner.java:467)\n\tat
org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.runTests(RemoteTestRunner.java:683)\n\tat
org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.run(RemoteTestRunner.java:390)\n\tat
org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.main(RemoteTestRunner.java:197)\n{noformat}\n\nFix:
either introduce a mapper, that \"flattens\" the {{SplitStreamType}} in to regular
tuple type that is nested inside or provide a custom {{KeySelector}}."
diff --git a/flink-contrib/flink-storm-compatibility/flink-storm-compatibility-core/src/main/java/org/apache/flink/stormcompatibility/api/FlinkTopologyBuilder.java b/flink-contrib/flink-storm-compatibility/flink-storm-compatibility-core/src/main/java/org/apache/flink/stormcompatibility/api/FlinkTopologyBuilder.java
index a739c23..e4d880f 100644
--- a/flink-contrib/flink-storm-compatibility/flink-storm-compatibility-core/src/main/java/org/apache/flink/stormcompatibility/api/FlinkTopologyBuilder.java
+++ b/flink-contrib/flink-storm-compatibility/flink-storm-compatibility-core/src/main/java/org/apache/flink/stormcompatibility/api/FlinkTopologyBuilder.java
@@ -78,8 +78,8 @@ public class FlinkTopologyBuilder {
*/
@SuppressWarnings({"rawtypes", "unchecked"})
public FlinkTopology createTopology() {
- final StormTopology stormTopolgoy = this.stormBuilder.createTopology();
- final FlinkTopology env = new FlinkTopology(stormTopolgoy);
+ final StormTopology stormTopology = this.stormBuilder.createTopology();
+ final FlinkTopology env = new FlinkTopology(stormTopology);
env.setParallelism(1);
final HashMap<String, HashMap<String, DataStream>> availableInputs = new HashMap<String, HashMap<String, DataStream>>();
@@ -121,7 +121,7 @@ public class FlinkTopologyBuilder {
availableInputs.put(spoutId, outputStreams);
int dop = 1;
- final ComponentCommon common = stormTopolgoy.get_spouts().get(spoutId).get_common();
+ final ComponentCommon common = stormTopology.get_spouts().get(spoutId).get_common();
if (common.is_set_parallelism_hint()) {
dop = common.get_parallelism_hint();
source.setParallelism(dop);
@@ -155,7 +155,7 @@ public class FlinkTopologyBuilder {
final String boltId = bolt.getKey();
final IRichBolt userBolt = bolt.getValue();
- final ComponentCommon common = stormTopolgoy.get_bolts().get(boltId).get_common();
+ final ComponentCommon common = stormTopology.get_bolts().get(boltId).get_common();
Set<Entry<GlobalStreamId, Grouping>> unprocessedInputs = unprocessdInputsPerBolt.get(boltId);
if (unprocessedInputs == null) {
@@ -194,9 +194,17 @@ public class FlinkTopologyBuilder {
final List<String> fields = grouping.get_fields();
if (fields.size() > 0) {
FlinkOutputFieldsDeclarer prodDeclarer = this.declarers.get(producerId);
- inputStream = inputStream.groupBy(prodDeclarer
- .getGroupingFieldIndexes(inputStreamId,
- grouping.get_fields()));
+ if (producer.size() == 1) {
+ inputStream = inputStream.groupBy(prodDeclarer
+ .getGroupingFieldIndexes(inputStreamId,
+ grouping.get_fields()));
+ } else {
+ inputStream = inputStream
+ .groupBy(new SplitStreamTypeKeySelector(
+ prodDeclarer.getGroupingFieldIndexes(
+ inputStreamId,
+ grouping.get_fields())));
+ }
} else {
inputStream = inputStream.global();
}
diff --git a/flink-contrib/flink-storm-compatibility/flink-storm-compatibility-core/src/main/java/org/apache/flink/stormcompatibility/api/SplitStreamTypeKeySelector.java b/flink-contrib/flink-storm-compatibility/flink-storm-compatibility-core/src/main/java/org/apache/flink/stormcompatibility/api/SplitStreamTypeKeySelector.java
new file mode 100644
index 0000000..30227b8
--- /dev/null
+++ b/flink-contrib/flink-storm-compatibility/flink-storm-compatibility-core/src/main/java/org/apache/flink/stormcompatibility/api/SplitStreamTypeKeySelector.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.flink.stormcompatibility.api;
+
+import org.apache.flink.api.java.functions.KeySelector;
+import org.apache.flink.api.java.tuple.Tuple;
+import org.apache.flink.stormcompatibility.util.SplitStreamType;
+import org.apache.flink.streaming.util.keys.KeySelectorUtil;
+import org.apache.flink.streaming.util.keys.KeySelectorUtil.ArrayKeySelector;
+
+/**
+ * {@link SplitStreamTypeKeySelector} is a specific grouping key selector for streams that are selected via
+ * {@link FlinkStormStreamSelector} from a Spout or Bolt that declares multiple output streams.
+ *
+ * It extracts the wrapped {@link Tuple} type from the {@link SplitStreamType} tuples and applies a regular
+ * {@link ArrayKeySelector} on it.
+ */
+public class SplitStreamTypeKeySelector implements KeySelector<SplitStreamType<Tuple>, Tuple> {
+ private static final long serialVersionUID = 4672434660037669254L;
+
+ private final ArrayKeySelector<Tuple> selector;
+
+ public SplitStreamTypeKeySelector(int... fields) {
+ this.selector = new KeySelectorUtil.ArrayKeySelector<Tuple>(fields);
+ }
+
+ @Override
+ public Tuple getKey(SplitStreamType<Tuple> value) throws Exception {
+ return selector.getKey(value.value);
+ }
+
+}
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2658_ce68cbd9.diff |
bugs-dot-jar_data_FLINK-1848_7164b2b6 | ---
BugID: FLINK-1848
Summary: Paths containing a Windows drive letter cannot be used in FileOutputFormats
Description: |-
Paths that contain a Windows drive letter such as {{file:///c:/my/directory}} cannot be used as output path for {{FileOutputFormat}}.
If done, the following exception is thrown:
{code}
Caused by: java.lang.IllegalArgumentException: java.net.URISyntaxException: Relative path in absolute URI: file:c:
at org.apache.flink.core.fs.Path.initialize(Path.java:242)
at org.apache.flink.core.fs.Path.<init>(Path.java:225)
at org.apache.flink.core.fs.Path.<init>(Path.java:138)
at org.apache.flink.core.fs.local.LocalFileSystem.pathToFile(LocalFileSystem.java:147)
at org.apache.flink.core.fs.local.LocalFileSystem.mkdirs(LocalFileSystem.java:232)
at org.apache.flink.core.fs.local.LocalFileSystem.mkdirs(LocalFileSystem.java:233)
at org.apache.flink.core.fs.local.LocalFileSystem.mkdirs(LocalFileSystem.java:233)
at org.apache.flink.core.fs.local.LocalFileSystem.mkdirs(LocalFileSystem.java:233)
at org.apache.flink.core.fs.local.LocalFileSystem.mkdirs(LocalFileSystem.java:233)
at org.apache.flink.core.fs.FileSystem.initOutPathLocalFS(FileSystem.java:603)
at org.apache.flink.api.common.io.FileOutputFormat.open(FileOutputFormat.java:233)
at org.apache.flink.api.java.io.CsvOutputFormat.open(CsvOutputFormat.java:158)
at org.apache.flink.runtime.operators.DataSinkTask.invoke(DataSinkTask.java:183)
at org.apache.flink.runtime.execution.RuntimeEnvironment.run(RuntimeEnvironment.java:217)
at java.lang.Thread.run(Unknown Source)
Caused by: java.net.URISyntaxException: Relative path in absolute URI: file:c:
at java.net.URI.checkPath(Unknown Source)
at java.net.URI.<init>(Unknown Source)
at org.apache.flink.core.fs.Path.initialize(Path.java:240)
... 14 more
{code}
diff --git a/flink-core/src/main/java/org/apache/flink/core/fs/Path.java b/flink-core/src/main/java/org/apache/flink/core/fs/Path.java
index 75155eb..c47bc0d 100644
--- a/flink-core/src/main/java/org/apache/flink/core/fs/Path.java
+++ b/flink-core/src/main/java/org/apache/flink/core/fs/Path.java
@@ -260,7 +260,11 @@ public class Path implements IOReadableWritable, Serializable {
path = path.replaceAll("/+", "/");
// remove tailing separator
- if(!path.equals(SEPARATOR) && path.endsWith(SEPARATOR)) {
+ if(!path.equals(SEPARATOR) && // UNIX root path
+ !path.matches("/\\p{Alpha}+:/") && // Windows root path
+ path.endsWith(SEPARATOR))
+ {
+ // remove tailing slash
path = path.substring(0, path.length() - SEPARATOR.length());
}
diff --git a/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalFileSystem.java b/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalFileSystem.java
index 2313a41..9dd9e30 100644
--- a/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalFileSystem.java
+++ b/flink-core/src/main/java/org/apache/flink/core/fs/local/LocalFileSystem.java
@@ -228,8 +228,13 @@ public class LocalFileSystem extends FileSystem {
*/
public boolean mkdirs(final Path f) throws IOException {
- final Path parent = f.getParent();
final File p2f = pathToFile(f);
+
+ if(p2f.isDirectory()) {
+ return true;
+ }
+
+ final Path parent = f.getParent();
return (parent == null || mkdirs(parent)) && (p2f.mkdir() || p2f.isDirectory());
}
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1848_7164b2b6.diff |
bugs-dot-jar_data_FLINK-1290_45fb6d82 | ---
BugID: FLINK-1290
Summary: Optimizer prunes all candidates when unable to reuse sort properties
Description: |-
Programs fail with an exception that no plan could be created.
The bug can be reproduced by the following code:
{code}
val data : DataSet[(Long, Long)] = ...
data.distinct(0, 1).groupBy(0).reduceGroup(...)
{code}
diff --git a/flink-compiler/src/main/java/org/apache/flink/compiler/operators/AbstractJoinDescriptor.java b/flink-compiler/src/main/java/org/apache/flink/compiler/operators/AbstractJoinDescriptor.java
index cb0e61c..d8f7746 100644
--- a/flink-compiler/src/main/java/org/apache/flink/compiler/operators/AbstractJoinDescriptor.java
+++ b/flink-compiler/src/main/java/org/apache/flink/compiler/operators/AbstractJoinDescriptor.java
@@ -19,6 +19,7 @@
package org.apache.flink.compiler.operators;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
import org.apache.flink.api.common.functions.Partitioner;
@@ -62,6 +63,33 @@ public abstract class AbstractJoinDescriptor extends OperatorDescriptorDual {
if (repartitionAllowed) {
// partition both (hash or custom)
+ if (this.customPartitioner == null) {
+
+ // we accept compatible partitionings of any type
+ RequestedGlobalProperties partitioned_left_any = new RequestedGlobalProperties();
+ RequestedGlobalProperties partitioned_right_any = new RequestedGlobalProperties();
+ partitioned_left_any.setAnyPartitioning(this.keys1);
+ partitioned_right_any.setAnyPartitioning(this.keys2);
+ pairs.add(new GlobalPropertiesPair(partitioned_left_any, partitioned_right_any));
+
+ // we also explicitly add hash partitioning, as a fallback, if the any-pairs do not match
+ RequestedGlobalProperties partitioned_left_hash = new RequestedGlobalProperties();
+ RequestedGlobalProperties partitioned_right_hash = new RequestedGlobalProperties();
+ partitioned_left_hash.setHashPartitioned(this.keys1);
+ partitioned_right_hash.setHashPartitioned(this.keys2);
+ pairs.add(new GlobalPropertiesPair(partitioned_left_hash, partitioned_right_hash));
+ }
+ else {
+ RequestedGlobalProperties partitioned_left = new RequestedGlobalProperties();
+ partitioned_left.setCustomPartitioned(this.keys1, this.customPartitioner);
+
+ RequestedGlobalProperties partitioned_right = new RequestedGlobalProperties();
+ partitioned_right.setCustomPartitioned(this.keys2, this.customPartitioner);
+
+ return Collections.singletonList(new GlobalPropertiesPair(partitioned_left, partitioned_right));
+ }
+
+
RequestedGlobalProperties partitioned1 = new RequestedGlobalProperties();
if (customPartitioner == null) {
partitioned1.setAnyPartitioning(this.keys1);
diff --git a/flink-compiler/src/main/java/org/apache/flink/compiler/operators/CoGroupDescriptor.java b/flink-compiler/src/main/java/org/apache/flink/compiler/operators/CoGroupDescriptor.java
index 14f40f3..bc83c51 100644
--- a/flink-compiler/src/main/java/org/apache/flink/compiler/operators/CoGroupDescriptor.java
+++ b/flink-compiler/src/main/java/org/apache/flink/compiler/operators/CoGroupDescriptor.java
@@ -18,6 +18,7 @@
package org.apache.flink.compiler.operators;
+import java.util.Arrays;
import java.util.Collections;
import java.util.List;
@@ -98,21 +99,29 @@ public class CoGroupDescriptor extends OperatorDescriptorDual {
@Override
protected List<GlobalPropertiesPair> createPossibleGlobalProperties() {
- RequestedGlobalProperties partitioned1 = new RequestedGlobalProperties();
if (this.customPartitioner == null) {
- partitioned1.setAnyPartitioning(this.keys1);
- } else {
- partitioned1.setCustomPartitioned(this.keys1, this.customPartitioner);
+ RequestedGlobalProperties partitioned_left_any = new RequestedGlobalProperties();
+ RequestedGlobalProperties partitioned_left_hash = new RequestedGlobalProperties();
+ partitioned_left_any.setAnyPartitioning(this.keys1);
+ partitioned_left_hash.setHashPartitioned(this.keys1);
+
+ RequestedGlobalProperties partitioned_right_any = new RequestedGlobalProperties();
+ RequestedGlobalProperties partitioned_right_hash = new RequestedGlobalProperties();
+ partitioned_right_any.setAnyPartitioning(this.keys2);
+ partitioned_right_hash.setHashPartitioned(this.keys2);
+
+ return Arrays.asList(new GlobalPropertiesPair(partitioned_left_any, partitioned_right_any),
+ new GlobalPropertiesPair(partitioned_left_hash, partitioned_right_hash));
}
-
- RequestedGlobalProperties partitioned2 = new RequestedGlobalProperties();
- if (this.customPartitioner == null) {
- partitioned2.setAnyPartitioning(this.keys2);
- } else {
- partitioned2.setCustomPartitioned(this.keys2, this.customPartitioner);
+ else {
+ RequestedGlobalProperties partitioned_left = new RequestedGlobalProperties();
+ partitioned_left.setCustomPartitioned(this.keys1, this.customPartitioner);
+
+ RequestedGlobalProperties partitioned_right = new RequestedGlobalProperties();
+ partitioned_right.setCustomPartitioned(this.keys2, this.customPartitioner);
+
+ return Collections.singletonList(new GlobalPropertiesPair(partitioned_left, partitioned_right));
}
-
- return Collections.singletonList(new GlobalPropertiesPair(partitioned1, partitioned2));
}
@Override
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1290_45fb6d82.diff |
bugs-dot-jar_data_FLINK-3342_8e3e2f8f | ---
BugID: FLINK-3342
Summary: 'Operator checkpoint statistics state size overflow '
Description: State sizes ({{long}}) of checkpoint stats overflow when summing them
up per operator, because the sum is stored in an {{int}}.
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/stats/SimpleCheckpointStatsTracker.java b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/stats/SimpleCheckpointStatsTracker.java
index fba3f22..5ee4fc3 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/stats/SimpleCheckpointStatsTracker.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/stats/SimpleCheckpointStatsTracker.java
@@ -136,7 +136,7 @@ public class SimpleCheckpointStatsTracker implements CheckpointStatsTracker {
}
synchronized (statsLock) {
- int overallStateSize = 0;
+ long overallStateSize = 0;
// Operator stats
Map<JobVertexID, long[][]> statsForSubTasks = new HashMap<>();
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-3342_8e3e2f8f.diff |
bugs-dot-jar_data_FLINK-1951_adb321d6 | ---
BugID: FLINK-1951
Summary: NullPointerException in DeltaIteration when no ForwardedFileds
Description: "The following exception is thrown by the Connected Components example,
if the @ForwardedFieldsFirst(\"*\") annotation from the ComponentIdFilter join is
removed:\n\nCaused by: java.lang.NullPointerException\n\tat org.apache.flink.examples.java.graph.ConnectedComponents$ComponentIdFilter.join(ConnectedComponents.java:186)\n\tat
org.apache.flink.examples.java.graph.ConnectedComponents$ComponentIdFilter.join(ConnectedComponents.java:1)\n\tat
org.apache.flink.runtime.operators.JoinWithSolutionSetSecondDriver.run(JoinWithSolutionSetSecondDriver.java:198)\n\tat
org.apache.flink.runtime.operators.RegularPactTask.run(RegularPactTask.java:496)\n\tat
org.apache.flink.runtime.iterative.task.AbstractIterativePactTask.run(AbstractIterativePactTask.java:139)\n\tat
org.apache.flink.runtime.iterative.task.IterationIntermediatePactTask.run(IterationIntermediatePactTask.java:92)\n\tat
org.apache.flink.runtime.operators.RegularPactTask.invoke(RegularPactTask.java:362)\n\tat
org.apache.flink.runtime.execution.RuntimeEnvironment.run(RuntimeEnvironment.java:217)\n\tat
java.lang.Thread.run(Thread.java:745)\n\n[Code | https://github.com/vasia/flink/tree/cc-test]
and [dataset | http://snap.stanford.edu/data/com-DBLP.html] to reproduce."
diff --git a/flink-optimizer/src/main/java/org/apache/flink/optimizer/plantranslate/JobGraphGenerator.java b/flink-optimizer/src/main/java/org/apache/flink/optimizer/plantranslate/JobGraphGenerator.java
index dc21c13..2630019 100644
--- a/flink-optimizer/src/main/java/org/apache/flink/optimizer/plantranslate/JobGraphGenerator.java
+++ b/flink-optimizer/src/main/java/org/apache/flink/optimizer/plantranslate/JobGraphGenerator.java
@@ -1163,8 +1163,9 @@ public class JobGraphGenerator implements Visitor<PlanNode> {
final TempMode tm = channel.getTempMode();
boolean needsMemory = false;
- // Don't add a pipeline breaker if the data exchange is already blocking.
- if (tm.breaksPipeline() && channel.getDataExchangeMode() != DataExchangeMode.BATCH) {
+ // Don't add a pipeline breaker if the data exchange is already blocking, EXCEPT the channel is within an iteration.
+ if (tm.breaksPipeline() &&
+ (channel.isOnDynamicPath() || channel.getDataExchangeMode() != DataExchangeMode.BATCH) ) {
config.setInputAsynchronouslyMaterialized(inputNum, true);
needsMemory = true;
}
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1951_adb321d6.diff |
bugs-dot-jar_data_FLINK-1133_27e40205 | ---
BugID: FLINK-1133
Summary: Type extractor cannot determine type of function
Description: "This function fails in the type extractor.\n\n{code}\npublic static
final class DuplicateValue<T> implements MapFunction<Tuple1<T>, Tuple2<T, T>> {\n\t\t\n\t@Override\n\tpublic
Tuple2<T, T> map(Tuple1<T> vertex) {\n\t\treturn new Tuple2<T, T>(vertex.f0, vertex.f0);\n\t}\n}\n{code}"
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
index d8e1aed..d5f3619 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
@@ -201,7 +201,7 @@ public class TypeExtractor {
// return type is a variable -> try to get the type info from the input directly
if (returnType instanceof TypeVariable<?>) {
- typeInfo = (TypeInformation<OUT>) createTypeInfoFromInput((TypeVariable<?>) returnType, typeHierarchy, in1Type, in2Type);
+ typeInfo = (TypeInformation<OUT>) createTypeInfoFromInputs((TypeVariable<?>) returnType, typeHierarchy, in1Type, in2Type);
if (typeInfo != null) {
return typeInfo;
@@ -280,7 +280,7 @@ public class TypeExtractor {
// sub type could not be determined with materializing
// try to derive the type info of the TypeVariable from the immediate base child input as a last attempt
if (subtypes[i] instanceof TypeVariable<?>) {
- tupleSubTypes[i] = createTypeInfoFromInput((TypeVariable<?>) subtypes[i], typeHierarchy, in1Type, in2Type);
+ tupleSubTypes[i] = createTypeInfoFromInputs((TypeVariable<?>) subtypes[i], typeHierarchy, in1Type, in2Type);
// variable could not be determined
if (tupleSubTypes[i] == null) {
@@ -315,7 +315,7 @@ public class TypeExtractor {
}
// try to derive the type info of the TypeVariable from the immediate base child input as a last attempt
else {
- TypeInformation<OUT> typeInfo = (TypeInformation<OUT>) createTypeInfoFromInput((TypeVariable<?>) t, typeHierarchy, in1Type, in2Type);
+ TypeInformation<OUT> typeInfo = (TypeInformation<OUT>) createTypeInfoFromInputs((TypeVariable<?>) t, typeHierarchy, in1Type, in2Type);
if (typeInfo != null) {
return typeInfo;
} else {
@@ -371,11 +371,11 @@ public class TypeExtractor {
throw new InvalidTypesException("Type Information could not be created.");
}
- private <IN1, IN2> TypeInformation<?> createTypeInfoFromInput(TypeVariable<?> returnTypeVar, ArrayList<Type> returnTypeHierarchy,
+ private <IN1, IN2> TypeInformation<?> createTypeInfoFromInputs(TypeVariable<?> returnTypeVar, ArrayList<Type> returnTypeHierarchy,
TypeInformation<IN1> in1TypeInfo, TypeInformation<IN2> in2TypeInfo) {
-
+
Type matReturnTypeVar = materializeTypeVariable(returnTypeHierarchy, returnTypeVar);
-
+
// variable could be resolved
if (!(matReturnTypeVar instanceof TypeVariable)) {
return createTypeInfoWithTypeHierarchy(returnTypeHierarchy, matReturnTypeVar, in1TypeInfo, in2TypeInfo);
@@ -383,35 +383,56 @@ public class TypeExtractor {
else {
returnTypeVar = (TypeVariable<?>) matReturnTypeVar;
}
-
+
TypeInformation<?> info = null;
if (in1TypeInfo != null) {
// find the deepest type variable that describes the type of input 1
- ParameterizedType baseClass = (ParameterizedType) returnTypeHierarchy.get(returnTypeHierarchy.size() - 1 );
+ ParameterizedType baseClass = (ParameterizedType) returnTypeHierarchy.get(returnTypeHierarchy.size() - 1);
Type in1Type = baseClass.getActualTypeArguments()[0];
- if (in1Type instanceof TypeVariable) {
- in1Type = materializeTypeVariable(returnTypeHierarchy, (TypeVariable<?>) in1Type);
- info = findCorrespondingInfo(returnTypeVar, in1Type, in1TypeInfo);
- }
+
+ info = createTypeInfoFromInput(returnTypeVar, returnTypeHierarchy, in1Type, in1TypeInfo);
}
-
+
if (info == null && in2TypeInfo != null) {
// find the deepest type variable that describes the type of input 2
- ParameterizedType baseClass = (ParameterizedType) returnTypeHierarchy.get(returnTypeHierarchy.size() - 1 );
+ ParameterizedType baseClass = (ParameterizedType) returnTypeHierarchy.get(returnTypeHierarchy.size() - 1);
Type in2Type = baseClass.getActualTypeArguments()[1];
- if (in2Type instanceof TypeVariable) {
- in2Type = materializeTypeVariable(returnTypeHierarchy, (TypeVariable<?>) in2Type);
- info = findCorrespondingInfo(returnTypeVar, in2Type, in2TypeInfo);
- }
+
+ info = createTypeInfoFromInput(returnTypeVar, returnTypeHierarchy, in2Type, in2TypeInfo);
}
-
+
if (info != null) {
return info;
}
-
+
return null;
}
+ private <IN1> TypeInformation<?> createTypeInfoFromInput(TypeVariable<?> returnTypeVar, ArrayList<Type> returnTypeHierarchy,
+ Type inType, TypeInformation<IN1> inTypeInfo) {
+ TypeInformation<?> info = null;
+ // the input is a type variable
+ if (inType instanceof TypeVariable) {
+ inType = materializeTypeVariable(returnTypeHierarchy, (TypeVariable<?>) inType);
+ info = findCorrespondingInfo(returnTypeVar, inType, inTypeInfo);
+ }
+ // the input is a tuple that may contains type variables
+ else if (inType instanceof ParameterizedType && Tuple.class.isAssignableFrom(((Class<?>)((ParameterizedType) inType).getRawType()))) {
+ Type[] tupleElements = ((ParameterizedType) inType).getActualTypeArguments();
+ // go thru all tuple elements and search for type variables
+ for(int i = 0; i < tupleElements.length; i++) {
+ if(tupleElements[i] instanceof TypeVariable) {
+ inType = materializeTypeVariable(returnTypeHierarchy, (TypeVariable<?>) tupleElements[i]);
+ info = findCorrespondingInfo(returnTypeVar, inType, ((TupleTypeInfo<?>) inTypeInfo).getTypeAt(i));
+ if(info != null) {
+ break;
+ }
+ }
+ }
+ }
+ return info;
+ }
+
// --------------------------------------------------------------------------------------------
// Extract type parameters
// --------------------------------------------------------------------------------------------
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1133_27e40205.diff |
bugs-dot-jar_data_FLINK-3267_ed3810b1 | ---
BugID: FLINK-3267
Summary: Disable reference tracking in Kryo fallback serializer
Description: |-
Kryo runs extra logic to track and resolve repeated references to the same object (similar as JavaSerialization)
We should disable reference tracking
- reference tracking is costly
- it is virtually always unnecessary in the datatypes used in Flink
- most importantly, it is inconsistent with Flink's own serialization (which does not do reference tracking)
- It may have problems if elements are read in a different order than they are written.
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/KryoSerializer.java b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/KryoSerializer.java
index f1287fa..276ffc4 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/KryoSerializer.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/KryoSerializer.java
@@ -323,6 +323,10 @@ public class KryoSerializer<T> extends TypeSerializer<T> {
if (this.kryo == null) {
this.kryo = getKryoInstance();
+ // disable reference tracking. reference tracking is costly, usually unnecessary, and
+ // inconsistent with Flink's own serialization (which does not do reference tracking)
+ kryo.setReferences(false);
+
// Throwable and all subclasses should be serialized via java serialization
kryo.addDefaultSerializer(Throwable.class, new JavaSerializer());
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-3267_ed3810b1.diff |
bugs-dot-jar_data_FLINK-1333_63ef8e86 | ---
BugID: FLINK-1333
Summary: Getter/Setter recognition for POJO fields with generics is not working
Description: |-
Fields like
{code}
private List<Contributors> contributors;
{code}
Are not recognized correctly, even if they have getters and setters.
Workaround: make them public.
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
index e52e2af..b528d00 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
@@ -989,12 +989,12 @@ public class TypeExtractor {
}
for(Method m : clazz.getMethods()) {
// check for getter
- if( // The name should be "get<FieldName>" or "<fieldName>" (for scala).
- (m.getName().toLowerCase().equals("get"+fieldNameLow) || m.getName().toLowerCase().equals(fieldNameLow)) &&
+ if( // The name should be "get<FieldName>" or "<fieldName>" (for scala) or "is<fieldName>" for boolean fields.
+ (m.getName().toLowerCase().equals("get"+fieldNameLow) || m.getName().toLowerCase().equals("is"+fieldNameLow) || m.getName().toLowerCase().equals(fieldNameLow)) &&
// no arguments for the getter
m.getParameterTypes().length == 0 &&
// return type is same as field type (or the generic variant of it)
- (m.getReturnType().equals( fieldType ) || (fieldTypeGeneric != null && m.getGenericReturnType().equals(fieldTypeGeneric)) )
+ (m.getGenericReturnType().equals( fieldType ) || (fieldTypeGeneric != null && m.getGenericReturnType().equals(fieldTypeGeneric)) )
) {
if(hasGetter) {
throw new IllegalStateException("Detected more than one getter");
@@ -1004,7 +1004,7 @@ public class TypeExtractor {
// check for setters (<FieldName>_$eq for scala)
if((m.getName().toLowerCase().equals("set"+fieldNameLow) || m.getName().toLowerCase().equals(fieldNameLow+"_$eq")) &&
m.getParameterTypes().length == 1 && // one parameter of the field's type
- ( m.getParameterTypes()[0].equals( fieldType ) || (fieldTypeGeneric != null && m.getGenericParameterTypes()[0].equals(fieldTypeGeneric) ) )&&
+ ( m.getGenericParameterTypes()[0].equals( fieldType ) || (fieldTypeGeneric != null && m.getGenericParameterTypes()[0].equals(fieldTypeGeneric) ) )&&
// return type is void.
m.getReturnType().equals(Void.TYPE)
) {
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1333_63ef8e86.diff |
bugs-dot-jar_data_FLINK-3534_734ba01d | ---
BugID: FLINK-3534
Summary: Cancelling a running job can lead to restart instead of stopping
Description: "I just tried cancelling a regularly running job. Instead of the job
stopping, it restarted.\n\n\n{code}\n2016-02-29 10:39:28,415 INFO org.apache.flink.yarn.YarnJobManager
\ - Trying to cancel job with ID 5c0604694c8469cfbb89daaa990068df.\n2016-02-29
10:39:28,416 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph -
Source: Out of order data generator -> (Flat Map, Timestamps/Watermarks) (1/1) (e3b05555ab0e373defb925898de9f200)
switched from RUNNING to CANCELING\n....\n2016-02-29 10:39:28,488 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - TriggerWindow(TumblingTimeWindows(60000), FoldingStateDescriptor{name=window-contents,
defaultValue=(0,9223372036854775807,0), serializer=null}, EventTimeTrigger(), WindowedStream.apply(WindowedStream.java:397))
(19/24) (c1be31b0be596d2521073b2d78ffa60a) switched from CANCELING to CANCELED\n2016-02-29
10:40:08,468 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph -
Source: Out of order data generator -> (Flat Map, Timestamps/Watermarks) (1/1) (e3b05555ab0e373defb925898de9f200)
switched from CANCELING to FAILED\n2016-02-29 10:40:08,468 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - TriggerWindow(TumblingTimeWindows(60000), FoldingStateDescriptor{name=window-contents,
defaultValue=(0,9223372036854775807,0), serializer=null}, EventTimeTrigger(), WindowedStream.apply(WindowedStream.java:397))
(1/24) (5ad172ec9932b24d5a98377a2c82b0b3) switched from CANCELING to FAILED\n2016-02-29
10:40:08,472 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph -
TriggerWindow(TumblingTimeWindows(60000), FoldingStateDescriptor{name=window-contents,
defaultValue=(0,9223372036854775807,0), serializer=null}, EventTimeTrigger(), WindowedStream.apply(WindowedStream.java:397))
(2/24) (5404ca28ac7cf23b67dff30ef2309078) switched from CANCELING to FAILED\n2016-02-29
10:40:08,473 INFO org.apache.flink.yarn.YarnJobManager -
Status of job 5c0604694c8469cfbb89daaa990068df (Event counter: {auto.offset.reset=earliest,
rocksdb=hdfs:///user/robert/rocksdb, generateInPlace=soTrue, parallelism=24, bootstrap.servers=cdh544-worker-0:9092,
topic=eventsGenerator, eventsPerKeyPerGenerator=2, numKeys=1000000000, zookeeper.connect=cdh544-worker-0:2181,
timeSliceSize=60000, eventsKerPey=1, genPar=1}) changed to FAILING.\njava.lang.Exception:
Task could not be canceled.\n\tat org.apache.flink.runtime.executiongraph.Execution$5.onComplete(Execution.java:902)\n\tat
akka.dispatch.OnComplete.internal(Future.scala:246)\n\tat akka.dispatch.OnComplete.internal(Future.scala:244)\n\tat
akka.dispatch.japi$CallbackBridge.apply(Future.scala:174)\n\tat akka.dispatch.japi$CallbackBridge.apply(Future.scala:171)\n\tat
scala.concurrent.impl.CallbackRunnable.run(Promise.scala:32)\n\tat scala.concurrent.impl.ExecutionContextImpl$$anon$3.exec(ExecutionContextImpl.scala:107)\n\tat
scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)\n\tat scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)\n\tat
scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)\n\tat scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)\nCaused
by: akka.pattern.AskTimeoutException: Ask timed out on [Actor[akka.tcp://[email protected]:50119/user/taskmanager#640539146]]
after [10000 ms]\n\tat akka.pattern.PromiseActorRef$$anonfun$1.apply$mcV$sp(AskSupport.scala:333)\n\tat
akka.actor.Scheduler$$anon$7.run(Scheduler.scala:117)\n\tat scala.concurrent.Future$InternalCallbackExecutor$.scala$concurrent$Future$InternalCallbackExecutor$$unbatchedExecute(Future.scala:694)\n\tat
scala.concurrent.Future$InternalCallbackExecutor$.execute(Future.scala:691)\n\tat
akka.actor.LightArrayRevolverScheduler$TaskHolder.executeTask(Scheduler.scala:467)\n\tat
akka.actor.LightArrayRevolverScheduler$$anon$8.executeBucket$1(Scheduler.scala:419)\n\tat
akka.actor.LightArrayRevolverScheduler$$anon$8.nextTick(Scheduler.scala:423)\n\tat
akka.actor.LightArrayRevolverScheduler$$anon$8.run(Scheduler.scala:375)\n\tat java.lang.Thread.run(Thread.java:745)\n2016-02-29
10:40:08,477 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph -
TriggerWindow(TumblingTimeWindows(60000), FoldingStateDescriptor{name=window-contents,
defaultValue=(0,9223372036854775807,0), serializer=null}, EventTimeTrigger(), WindowedStream.apply(WindowedStream.java:397))
(3/24) (fc527d65ec8df3ccf68f882d968e776e) switched from CANCELING to FAILED\n2016-02-29
10:40:08,487 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph -
TriggerWindow(TumblingTimeWindows(60000), FoldingStateDescriptor{name=window-contents,
defaultValue=(0,9223372036854775807,0), serializer=null}, EventTimeTrigger(), WindowedStream.apply(WindowedStream.java:397))
(4/24) (afb1aa3c2d8acdee0f138cf344238e4e) switched from CANCELING to FAILED\n2016-02-29
10:40:08,488 INFO org.apache.flink.runtime.executiongraph.restart.FixedDelayRestartStrategy
\ - Delaying retry of job execution for 3000 ms ...\n2016-02-29 10:40:08,488 INFO
\ org.apache.flink.yarn.YarnJobManager - Status of job
5c0604694c8469cfbb89daaa990068df (Event counter: {auto.offset.reset=earliest, rocksdb=hdfs:///user/robert/rocksdb,
generateInPlace=soTrue, parallelism=24, bootstrap.servers=cdh544-worker-0:9092,
topic=eventsGenerator, eventsPerKeyPerGenerator=2, numKeys=1000000000, zookeeper.connect=cdh544-worker-0:2181,
timeSliceSize=60000, eventsKerPey=1, genPar=1}) changed to RESTARTING.\n2016-02-29
10:40:11,490 INFO org.apache.flink.yarn.YarnJobManager -
Status of job 5c0604694c8469cfbb89daaa990068df (Event counter: {auto.offset.reset=earliest,
rocksdb=hdfs:///user/robert/rocksdb, generateInPlace=soTrue, parallelism=24, bootstrap.servers=cdh544-worker-0:9092,
topic=eventsGenerator, eventsPerKeyPerGenerator=2, numKeys=1000000000, zookeeper.connect=cdh544-worker-0:2181,
timeSliceSize=60000, eventsKerPey=1, genPar=1}) changed to CREATED.\n2016-02-29
10:40:11,490 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph -
Source: Out of order data generator -> (Flat Map, Timestamps/Watermarks) (1/1) (1319b2f44d78d99948ffde4350c052d9)
switched from CREATED to SCHEDULED\n2016-02-29 10:40:11,490 INFO org.apache.flink.yarn.YarnJobManager
\ - Status of job 5c0604694c8469cfbb89daaa990068df (Event
counter: {auto.offset.reset=earliest, rocksdb=hdfs:///user/robert/rocksdb, generateInPlace=soTrue,
parallelism=24, bootstrap.servers=cdh544-worker-0:9092, topic=eventsGenerator, eventsPerKeyPerGenerator=2,
numKeys=1000000000, zookeeper.connect=cdh544-worker-0:2181, timeSliceSize=60000,
eventsKerPey=1, genPar=1}) changed to RUNNING.\n{code}\n"
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
index bc75664..6d5832b 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
@@ -797,6 +797,11 @@ public class Execution implements Serializable {
return false;
}
+ if (current == CANCELING) {
+ cancelingComplete();
+ return false;
+ }
+
if (transitionState(current, FAILED, t)) {
// success (in a manner of speaking)
this.failureCause = t;
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionGraph.java b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionGraph.java
index 0d6de98..ed50bea 100755
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionGraph.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionGraph.java
@@ -809,7 +809,7 @@ public class ExecutionGraph implements Serializable {
public void fail(Throwable t) {
while (true) {
JobStatus current = state;
- if (current == JobStatus.FAILED || current == JobStatus.FAILING) {
+ if (current == JobStatus.FAILING || current.isTerminalState()) {
return;
}
else if (transitionState(current, JobStatus.FAILING, t)) {
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-3534_734ba01d.diff |
bugs-dot-jar_data_FLINK-2515_06e2da35 | ---
BugID: FLINK-2515
Summary: CheckpointCoordinator triggers checkpoints even if not all sources are running
any more
Description: When some sources finish early, they will not emit checkpoint barriers
any more. That means that pending checkpoint alignments will never be able to complete,
locking the flow.
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
index 9694132..de83ad9 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
@@ -23,6 +23,7 @@ import akka.actor.PoisonPill;
import akka.actor.Props;
import org.apache.flink.api.common.JobID;
+import org.apache.flink.runtime.execution.ExecutionState;
import org.apache.flink.runtime.executiongraph.Execution;
import org.apache.flink.runtime.executiongraph.ExecutionAttemptID;
import org.apache.flink.runtime.executiongraph.ExecutionJobVertex;
@@ -224,7 +225,7 @@ public class CheckpointCoordinator {
ExecutionAttemptID[] triggerIDs = new ExecutionAttemptID[tasksToTrigger.length];
for (int i = 0; i < tasksToTrigger.length; i++) {
Execution ee = tasksToTrigger[i].getCurrentExecutionAttempt();
- if (ee != null) {
+ if (ee != null && ee.getState() == ExecutionState.RUNNING) {
triggerIDs[i] = ee.getAttemptId();
} else {
LOG.info("Checkpoint triggering task {} is not being executed at the moment. Aborting checkpoint.",
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2515_06e2da35.diff |
bugs-dot-jar_data_FLINK-1820_39d526e6 | ---
BugID: FLINK-1820
Summary: Bug in DoubleParser and FloatParser - empty String is not casted to 0
Description: "Hi,\n\nI found the bug, when I wanted to read a csv file, which had
a line like:\n\"||\\n\"\n\nIf I treat it as a Tuple2<Long,Long>, I get as expected
a tuple (0L,0L).\n\nBut if I want to read it into a Double-Tuple or a Float-Tuple,
I get the following error:\n\njava.lang.AssertionError: Test failed due to a org.apache.flink.api.common.io.ParseException:
Line could not be parsed: '||'\nParserError NUMERIC_VALUE_FORMAT_ERROR \n\nThis
error can be solved by adding an additional condition for empty strings in the FloatParser
/ DoubleParser.\n\nWe definitely need the CSVReader to be able to read \"empty values\".\n\nI
can fix it like described if there are no better ideas :)\n"
diff --git a/flink-core/src/main/java/org/apache/flink/types/parser/ByteParser.java b/flink-core/src/main/java/org/apache/flink/types/parser/ByteParser.java
index 5858da2..09e517a 100644
--- a/flink-core/src/main/java/org/apache/flink/types/parser/ByteParser.java
+++ b/flink-core/src/main/java/org/apache/flink/types/parser/ByteParser.java
@@ -21,22 +21,23 @@ package org.apache.flink.types.parser;
public class ByteParser extends FieldParser<Byte> {
-
+
private byte result;
-
+
@Override
public int parseField(byte[] bytes, int startPos, int limit, byte[] delimiter, Byte reusable) {
int val = 0;
boolean neg = false;
- final int delimLimit = limit-delimiter.length+1;
-
+ final int delimLimit = limit - delimiter.length + 1;
+
if (bytes[startPos] == '-') {
neg = true;
startPos++;
-
+
// check for empty field with only the sign
- if (startPos == limit || (startPos < delimLimit && delimiterNext(bytes, startPos, delimiter))) {
+ if (startPos == limit || (startPos < delimLimit && delimiterNext(bytes, startPos,
+ delimiter))) {
setErrorState(ParseErrorState.NUMERIC_VALUE_ORPHAN_SIGN);
return -1;
}
@@ -44,6 +45,10 @@ public class ByteParser extends FieldParser<Byte> {
for (int i = startPos; i < limit; i++) {
if (i < delimLimit && delimiterNext(bytes, i, delimiter)) {
+ if (i == startPos) {
+ setErrorState(ParseErrorState.EMPTY_STRING);
+ return -1;
+ }
this.result = (byte) (neg ? -val : val);
return i + delimiter.length;
}
@@ -53,17 +58,17 @@ public class ByteParser extends FieldParser<Byte> {
}
val *= 10;
val += bytes[i] - 48;
-
+
if (val > Byte.MAX_VALUE && (!neg || val > -Byte.MIN_VALUE)) {
setErrorState(ParseErrorState.NUMERIC_VALUE_OVERFLOW_UNDERFLOW);
return -1;
}
}
-
+
this.result = (byte) (neg ? -val : val);
return limit;
}
-
+
@Override
public Byte createValue() {
return Byte.MIN_VALUE;
@@ -73,43 +78,40 @@ public class ByteParser extends FieldParser<Byte> {
public Byte getLastResult() {
return Byte.valueOf(this.result);
}
-
+
/**
- * Static utility to parse a field of type byte from a byte sequence that represents text characters
+ * Static utility to parse a field of type byte from a byte sequence that represents text
+ * characters
* (such as when read from a file stream).
- *
- * @param bytes The bytes containing the text data that should be parsed.
+ *
+ * @param bytes The bytes containing the text data that should be parsed.
* @param startPos The offset to start the parsing.
- * @param length The length of the byte sequence (counting from the offset).
- *
+ * @param length The length of the byte sequence (counting from the offset).
* @return The parsed value.
- *
- * @throws NumberFormatException Thrown when the value cannot be parsed because the text represents not a correct number.
+ * @throws NumberFormatException Thrown when the value cannot be parsed because the text
+ * represents not a correct number.
*/
public static final byte parseField(byte[] bytes, int startPos, int length) {
return parseField(bytes, startPos, length, (char) 0xffff);
}
-
+
/**
- * Static utility to parse a field of type byte from a byte sequence that represents text characters
+ * Static utility to parse a field of type byte from a byte sequence that represents text
+ * characters
* (such as when read from a file stream).
- *
- * @param bytes The bytes containing the text data that should be parsed.
- * @param startPos The offset to start the parsing.
- * @param length The length of the byte sequence (counting from the offset).
+ *
+ * @param bytes The bytes containing the text data that should be parsed.
+ * @param startPos The offset to start the parsing.
+ * @param length The length of the byte sequence (counting from the offset).
* @param delimiter The delimiter that terminates the field.
- *
* @return The parsed value.
- *
- * @throws NumberFormatException Thrown when the value cannot be parsed because the text represents not a correct number.
+ * @throws NumberFormatException Thrown when the value cannot be parsed because the text
+ * represents not a correct number.
*/
public static final byte parseField(byte[] bytes, int startPos, int length, char delimiter) {
- if (length <= 0) {
- throw new NumberFormatException("Invalid input: Empty string");
- }
long val = 0;
boolean neg = false;
-
+
if (bytes[startPos] == '-') {
neg = true;
startPos++;
@@ -118,17 +120,17 @@ public class ByteParser extends FieldParser<Byte> {
throw new NumberFormatException("Orphaned minus sign.");
}
}
-
+
for (; length > 0; startPos++, length--) {
if (bytes[startPos] == delimiter) {
- return (byte) (neg ? -val : val);
+ throw new NumberFormatException("Empty field.");
}
if (bytes[startPos] < 48 || bytes[startPos] > 57) {
throw new NumberFormatException("Invalid character.");
}
val *= 10;
val += bytes[startPos] - 48;
-
+
if (val > Byte.MAX_VALUE && (!neg || val > -Byte.MIN_VALUE)) {
throw new NumberFormatException("Value overflow/underflow");
}
diff --git a/flink-core/src/main/java/org/apache/flink/types/parser/ByteValueParser.java b/flink-core/src/main/java/org/apache/flink/types/parser/ByteValueParser.java
index f9b36e4..612a1cb 100644
--- a/flink-core/src/main/java/org/apache/flink/types/parser/ByteValueParser.java
+++ b/flink-core/src/main/java/org/apache/flink/types/parser/ByteValueParser.java
@@ -52,6 +52,10 @@ public class ByteValueParser extends FieldParser<ByteValue> {
for (int i = startPos; i < limit; i++) {
if (i < delimLimit && delimiterNext(bytes, i, delimiter)) {
+ if (i == startPos) {
+ setErrorState(ParseErrorState.EMPTY_STRING);
+ return -1;
+ }
reusable.setValue((byte) (neg ? -val : val));
return i + delimiter.length;
}
diff --git a/flink-core/src/main/java/org/apache/flink/types/parser/DoubleParser.java b/flink-core/src/main/java/org/apache/flink/types/parser/DoubleParser.java
index 947fdfe..086c1f5 100644
--- a/flink-core/src/main/java/org/apache/flink/types/parser/DoubleParser.java
+++ b/flink-core/src/main/java/org/apache/flink/types/parser/DoubleParser.java
@@ -23,35 +23,39 @@ package org.apache.flink.types.parser;
* Parses a text field into a Double.
*/
public class DoubleParser extends FieldParser<Double> {
-
+
private static final Double DOUBLE_INSTANCE = Double.valueOf(0.0);
-
+
private double result;
-
+
@Override
public int parseField(byte[] bytes, int startPos, int limit, byte[] delimiter, Double reusable) {
int i = startPos;
- final int delimLimit = limit-delimiter.length+1;
-
+ final int delimLimit = limit - delimiter.length + 1;
+
while (i < limit) {
if (i < delimLimit && delimiterNext(bytes, i, delimiter)) {
break;
}
i++;
}
-
- String str = new String(bytes, startPos, i-startPos);
+
+ String str = new String(bytes, startPos, i - startPos);
+ int len = str.length();
+ if (Character.isWhitespace(bytes[startPos]) || Character.isWhitespace(bytes[Math.max(i - 1, 0)])) {
+ setErrorState(ParseErrorState.WHITESPACE_IN_NUMERIC_FIELD);
+ return -1;
+ }
try {
this.result = Double.parseDouble(str);
return (i == limit) ? limit : i + delimiter.length;
- }
- catch (NumberFormatException e) {
+ } catch (NumberFormatException e) {
setErrorState(ParseErrorState.NUMERIC_VALUE_FORMAT_ERROR);
return -1;
}
}
-
+
@Override
public Double createValue() {
return DOUBLE_INSTANCE;
@@ -61,35 +65,35 @@ public class DoubleParser extends FieldParser<Double> {
public Double getLastResult() {
return Double.valueOf(this.result);
}
-
+
/**
- * Static utility to parse a field of type double from a byte sequence that represents text characters
+ * Static utility to parse a field of type double from a byte sequence that represents text
+ * characters
* (such as when read from a file stream).
- *
- * @param bytes The bytes containing the text data that should be parsed.
+ *
+ * @param bytes The bytes containing the text data that should be parsed.
* @param startPos The offset to start the parsing.
- * @param length The length of the byte sequence (counting from the offset).
- *
+ * @param length The length of the byte sequence (counting from the offset).
* @return The parsed value.
- *
- * @throws NumberFormatException Thrown when the value cannot be parsed because the text represents not a correct number.
+ * @throws NumberFormatException Thrown when the value cannot be parsed because the text
+ * represents not a correct number.
*/
public static final double parseField(byte[] bytes, int startPos, int length) {
return parseField(bytes, startPos, length, (char) 0xffff);
}
-
+
/**
- * Static utility to parse a field of type double from a byte sequence that represents text characters
+ * Static utility to parse a field of type double from a byte sequence that represents text
+ * characters
* (such as when read from a file stream).
- *
- * @param bytes The bytes containing the text data that should be parsed.
- * @param startPos The offset to start the parsing.
- * @param length The length of the byte sequence (counting from the offset).
+ *
+ * @param bytes The bytes containing the text data that should be parsed.
+ * @param startPos The offset to start the parsing.
+ * @param length The length of the byte sequence (counting from the offset).
* @param delimiter The delimiter that terminates the field.
- *
* @return The parsed value.
- *
- * @throws NumberFormatException Thrown when the value cannot be parsed because the text represents not a correct number.
+ * @throws NumberFormatException Thrown when the value cannot be parsed because the text
+ * represents not a correct number.
*/
public static final double parseField(byte[] bytes, int startPos, int length, char delimiter) {
if (length <= 0) {
@@ -97,12 +101,17 @@ public class DoubleParser extends FieldParser<Double> {
}
int i = 0;
final byte delByte = (byte) delimiter;
-
+
while (i < length && bytes[i] != delByte) {
i++;
}
-
- String str = new String(bytes, startPos, i);
+
+ String str = new String(bytes, startPos, i - startPos);
+ int len = str.length();
+ if (Character.isWhitespace(bytes[startPos]) || Character.isWhitespace(bytes[Math.max(i - 1, 0)])) {
+ throw new NumberFormatException("There is leading or trailing whitespace in the " +
+ "numeric field: " + str);
+ }
return Double.parseDouble(str);
}
}
diff --git a/flink-core/src/main/java/org/apache/flink/types/parser/DoubleValueParser.java b/flink-core/src/main/java/org/apache/flink/types/parser/DoubleValueParser.java
index e225c1f..7751831 100644
--- a/flink-core/src/main/java/org/apache/flink/types/parser/DoubleValueParser.java
+++ b/flink-core/src/main/java/org/apache/flink/types/parser/DoubleValueParser.java
@@ -33,7 +33,7 @@ public class DoubleValueParser extends FieldParser<DoubleValue> {
int i = startPos;
- final int delimLimit = limit-delimiter.length+1;
+ final int delimLimit = limit - delimiter.length + 1;
while (i < limit) {
if (i < delimLimit && delimiterNext(bytes, i, delimiter)) {
@@ -42,7 +42,11 @@ public class DoubleValueParser extends FieldParser<DoubleValue> {
i++;
}
- String str = new String(bytes, startPos, i-startPos);
+ String str = new String(bytes, startPos, i - startPos);
+ if (Character.isWhitespace(bytes[startPos]) || Character.isWhitespace(bytes[Math.max(i - 1, 0)])) {
+ setErrorState(ParseErrorState.WHITESPACE_IN_NUMERIC_FIELD);
+ return -1;
+ }
try {
double value = Double.parseDouble(str);
reusable.setValue(value);
diff --git a/flink-core/src/main/java/org/apache/flink/types/parser/FieldParser.java b/flink-core/src/main/java/org/apache/flink/types/parser/FieldParser.java
index 33697fd..55e9915 100644
--- a/flink-core/src/main/java/org/apache/flink/types/parser/FieldParser.java
+++ b/flink-core/src/main/java/org/apache/flink/types/parser/FieldParser.java
@@ -64,7 +64,13 @@ public abstract class FieldParser<T> {
UNTERMINATED_QUOTED_STRING,
/** The parser found characters between the end of the quoted string and the delimiter. */
- UNQUOTED_CHARS_AFTER_QUOTED_STRING
+ UNQUOTED_CHARS_AFTER_QUOTED_STRING,
+
+ /** The string is empty. */
+ EMPTY_STRING,
+
+ /** There is whitespace in a numeric field. */
+ WHITESPACE_IN_NUMERIC_FIELD
}
private ParseErrorState errorState = ParseErrorState.NONE;
diff --git a/flink-core/src/main/java/org/apache/flink/types/parser/FloatParser.java b/flink-core/src/main/java/org/apache/flink/types/parser/FloatParser.java
index 7d166c7..be98aa1 100644
--- a/flink-core/src/main/java/org/apache/flink/types/parser/FloatParser.java
+++ b/flink-core/src/main/java/org/apache/flink/types/parser/FloatParser.java
@@ -23,15 +23,16 @@ package org.apache.flink.types.parser;
* Parses a text field into a {@link Float}.
*/
public class FloatParser extends FieldParser<Float> {
-
+
private float result;
@Override
- public int parseField(byte[] bytes, int startPos, int limit, byte[] delimiter, Float reusable) {
-
+ public int parseField(byte[] bytes, int startPos, int limit, byte[] delimiter, Float
+ reusable) {
+
int i = startPos;
- final int delimLimit = limit-delimiter.length+1;
+ final int delimLimit = limit - delimiter.length + 1;
while (i < limit) {
if (i < delimLimit && delimiterNext(bytes, i, delimiter)) {
@@ -39,18 +40,23 @@ public class FloatParser extends FieldParser<Float> {
}
i++;
}
-
- String str = new String(bytes, startPos, i-startPos);
+
+ if (Character.isWhitespace(bytes[startPos]) || Character.isWhitespace(bytes[Math.max(i - 1, 0)])) {
+ setErrorState(ParseErrorState.WHITESPACE_IN_NUMERIC_FIELD);
+ return -1;
+ }
+
+ String str = new String(bytes, startPos, i - startPos);
+ int len = str.length();
try {
this.result = Float.parseFloat(str);
- return (i == limit) ? limit : i+ delimiter.length;
- }
- catch (NumberFormatException e) {
+ return (i == limit) ? limit : i + delimiter.length;
+ } catch (NumberFormatException e) {
setErrorState(ParseErrorState.NUMERIC_VALUE_FORMAT_ERROR);
return -1;
}
}
-
+
@Override
public Float createValue() {
return Float.MIN_VALUE;
@@ -60,35 +66,35 @@ public class FloatParser extends FieldParser<Float> {
public Float getLastResult() {
return Float.valueOf(this.result);
}
-
+
/**
- * Static utility to parse a field of type float from a byte sequence that represents text characters
+ * Static utility to parse a field of type float from a byte sequence that represents text
+ * characters
* (such as when read from a file stream).
- *
- * @param bytes The bytes containing the text data that should be parsed.
+ *
+ * @param bytes The bytes containing the text data that should be parsed.
* @param startPos The offset to start the parsing.
- * @param length The length of the byte sequence (counting from the offset).
- *
+ * @param length The length of the byte sequence (counting from the offset).
* @return The parsed value.
- *
- * @throws NumberFormatException Thrown when the value cannot be parsed because the text represents not a correct number.
+ * @throws NumberFormatException Thrown when the value cannot be parsed because the text
+ * represents not a correct number.
*/
public static final float parseField(byte[] bytes, int startPos, int length) {
return parseField(bytes, startPos, length, (char) 0xffff);
}
-
+
/**
- * Static utility to parse a field of type float from a byte sequence that represents text characters
+ * Static utility to parse a field of type float from a byte sequence that represents text
+ * characters
* (such as when read from a file stream).
- *
- * @param bytes The bytes containing the text data that should be parsed.
- * @param startPos The offset to start the parsing.
- * @param length The length of the byte sequence (counting from the offset).
+ *
+ * @param bytes The bytes containing the text data that should be parsed.
+ * @param startPos The offset to start the parsing.
+ * @param length The length of the byte sequence (counting from the offset).
* @param delimiter The delimiter that terminates the field.
- *
* @return The parsed value.
- *
- * @throws NumberFormatException Thrown when the value cannot be parsed because the text represents not a correct number.
+ * @throws NumberFormatException Thrown when the value cannot be parsed because the text
+ * represents not a correct number.
*/
public static final float parseField(byte[] bytes, int startPos, int length, char delimiter) {
if (length <= 0) {
@@ -96,12 +102,17 @@ public class FloatParser extends FieldParser<Float> {
}
int i = 0;
final byte delByte = (byte) delimiter;
-
+
while (i < length && bytes[i] != delByte) {
i++;
}
- String str = new String(bytes, startPos, i);
+ String str = new String(bytes, startPos, i - startPos);
+ if (Character.isWhitespace(bytes[startPos]) || Character.isWhitespace(bytes[Math.max(i - 1, 0)])) {
+ throw new NumberFormatException("There is leading or trailing whitespace in the " +
+ "numeric field: " + str);
+ }
+ int len = str.length();
return Float.parseFloat(str);
}
}
diff --git a/flink-core/src/main/java/org/apache/flink/types/parser/FloatValueParser.java b/flink-core/src/main/java/org/apache/flink/types/parser/FloatValueParser.java
index af16d4c..e8caac2 100644
--- a/flink-core/src/main/java/org/apache/flink/types/parser/FloatValueParser.java
+++ b/flink-core/src/main/java/org/apache/flink/types/parser/FloatValueParser.java
@@ -33,7 +33,7 @@ public class FloatValueParser extends FieldParser<FloatValue> {
int i = startPos;
- final int delimLimit = limit-delimiter.length+1;
+ final int delimLimit = limit - delimiter.length + 1;
while (i < limit) {
if (i < delimLimit && delimiterNext(bytes, i, delimiter)) {
@@ -42,7 +42,11 @@ public class FloatValueParser extends FieldParser<FloatValue> {
i++;
}
- String str = new String(bytes, startPos, i-startPos);
+ String str = new String(bytes, startPos, i - startPos);
+ if (Character.isWhitespace(bytes[startPos]) || Character.isWhitespace(bytes[Math.max(i - 1, 0)])) {
+ setErrorState(ParseErrorState.WHITESPACE_IN_NUMERIC_FIELD);
+ return -1;
+ }
try {
float value = Float.parseFloat(str);
reusable.setValue(value);
diff --git a/flink-core/src/main/java/org/apache/flink/types/parser/IntParser.java b/flink-core/src/main/java/org/apache/flink/types/parser/IntParser.java
index c871f4a..dcd2ec2 100644
--- a/flink-core/src/main/java/org/apache/flink/types/parser/IntParser.java
+++ b/flink-core/src/main/java/org/apache/flink/types/parser/IntParser.java
@@ -25,32 +25,38 @@ package org.apache.flink.types.parser;
* The parser does not check for the maximum value.
*/
public class IntParser extends FieldParser<Integer> {
-
+
private static final long OVERFLOW_BOUND = 0x7fffffffL;
private static final long UNDERFLOW_BOUND = 0x80000000L;
private int result;
-
+
@Override
- public int parseField(byte[] bytes, int startPos, int limit, byte[] delimiter, Integer reusable) {
+ public int parseField(byte[] bytes, int startPos, int limit, byte[] delimiter, Integer
+ reusable) {
long val = 0;
boolean neg = false;
- final int delimLimit = limit-delimiter.length+1;
+ final int delimLimit = limit - delimiter.length + 1;
if (bytes[startPos] == '-') {
neg = true;
startPos++;
-
+
// check for empty field with only the sign
- if (startPos == limit || ( startPos < delimLimit && delimiterNext(bytes, startPos, delimiter))) {
+ if (startPos == limit || (startPos < delimLimit && delimiterNext(bytes, startPos,
+ delimiter))) {
setErrorState(ParseErrorState.NUMERIC_VALUE_ORPHAN_SIGN);
return -1;
}
}
-
+
for (int i = startPos; i < limit; i++) {
if (i < delimLimit && delimiterNext(bytes, i, delimiter)) {
+ if (i == startPos) {
+ setErrorState(ParseErrorState.EMPTY_STRING);
+ return -1;
+ }
this.result = (int) (neg ? -val : val);
return i + delimiter.length;
}
@@ -60,17 +66,17 @@ public class IntParser extends FieldParser<Integer> {
}
val *= 10;
val += bytes[i] - 48;
-
+
if (val > OVERFLOW_BOUND && (!neg || val > UNDERFLOW_BOUND)) {
setErrorState(ParseErrorState.NUMERIC_VALUE_OVERFLOW_UNDERFLOW);
return -1;
}
}
-
+
this.result = (int) (neg ? -val : val);
return limit;
}
-
+
@Override
public Integer createValue() {
return Integer.MIN_VALUE;
@@ -80,40 +86,37 @@ public class IntParser extends FieldParser<Integer> {
public Integer getLastResult() {
return Integer.valueOf(this.result);
}
-
+
/**
- * Static utility to parse a field of type int from a byte sequence that represents text characters
+ * Static utility to parse a field of type int from a byte sequence that represents text
+ * characters
* (such as when read from a file stream).
- *
- * @param bytes The bytes containing the text data that should be parsed.
+ *
+ * @param bytes The bytes containing the text data that should be parsed.
* @param startPos The offset to start the parsing.
- * @param length The length of the byte sequence (counting from the offset).
- *
+ * @param length The length of the byte sequence (counting from the offset).
* @return The parsed value.
- *
- * @throws NumberFormatException Thrown when the value cannot be parsed because the text represents not a correct number.
+ * @throws NumberFormatException Thrown when the value cannot be parsed because the text
+ * represents not a correct number.
*/
public static final int parseField(byte[] bytes, int startPos, int length) {
return parseField(bytes, startPos, length, (char) 0xffff);
}
-
+
/**
- * Static utility to parse a field of type int from a byte sequence that represents text characters
+ * Static utility to parse a field of type int from a byte sequence that represents text
+ * characters
* (such as when read from a file stream).
- *
- * @param bytes The bytes containing the text data that should be parsed.
- * @param startPos The offset to start the parsing.
- * @param length The length of the byte sequence (counting from the offset).
+ *
+ * @param bytes The bytes containing the text data that should be parsed.
+ * @param startPos The offset to start the parsing.
+ * @param length The length of the byte sequence (counting from the offset).
* @param delimiter The delimiter that terminates the field.
- *
* @return The parsed value.
- *
- * @throws NumberFormatException Thrown when the value cannot be parsed because the text represents not a correct number.
+ * @throws NumberFormatException Thrown when the value cannot be parsed because the text
+ * represents not a correct number.
*/
public static final int parseField(byte[] bytes, int startPos, int length, char delimiter) {
- if (length <= 0) {
- throw new NumberFormatException("Invalid input: Empty string");
- }
long val = 0;
boolean neg = false;
@@ -125,17 +128,17 @@ public class IntParser extends FieldParser<Integer> {
throw new NumberFormatException("Orphaned minus sign.");
}
}
-
+
for (; length > 0; startPos++, length--) {
if (bytes[startPos] == delimiter) {
- return (int) (neg ? -val : val);
+ throw new NumberFormatException("Empty field.");
}
if (bytes[startPos] < 48 || bytes[startPos] > 57) {
throw new NumberFormatException("Invalid character.");
}
val *= 10;
val += bytes[startPos] - 48;
-
+
if (val > OVERFLOW_BOUND && (!neg || val > UNDERFLOW_BOUND)) {
throw new NumberFormatException("Value overflow/underflow");
}
diff --git a/flink-core/src/main/java/org/apache/flink/types/parser/IntValueParser.java b/flink-core/src/main/java/org/apache/flink/types/parser/IntValueParser.java
index 8cb8176..abd8615 100644
--- a/flink-core/src/main/java/org/apache/flink/types/parser/IntValueParser.java
+++ b/flink-core/src/main/java/org/apache/flink/types/parser/IntValueParser.java
@@ -54,6 +54,10 @@ public class IntValueParser extends FieldParser<IntValue> {
for (int i = startPos; i < limit; i++) {
if (i < delimLimit && delimiterNext(bytes, i, delimiter)) {
+ if (i == startPos) {
+ setErrorState(ParseErrorState.EMPTY_STRING);
+ return -1;
+ }
reusable.setValue((int) (neg ? -val : val));
return i + delimiter.length;
}
diff --git a/flink-core/src/main/java/org/apache/flink/types/parser/LongParser.java b/flink-core/src/main/java/org/apache/flink/types/parser/LongParser.java
index af17f15..bb6c7c9 100644
--- a/flink-core/src/main/java/org/apache/flink/types/parser/LongParser.java
+++ b/flink-core/src/main/java/org/apache/flink/types/parser/LongParser.java
@@ -24,9 +24,9 @@ package org.apache.flink.types.parser;
* Only characters '1' to '0' and '-' are allowed.
*/
public class LongParser extends FieldParser<Long> {
-
+
private long result;
-
+
@Override
public int parseField(byte[] bytes, int startPos, int limit, byte[] delimiter, Long reusable) {
long val = 0;
@@ -37,16 +37,20 @@ public class LongParser extends FieldParser<Long> {
if (bytes[startPos] == '-') {
neg = true;
startPos++;
-
+
// check for empty field with only the sign
if (startPos == limit || (startPos < delimLimit && delimiterNext(bytes, startPos, delimiter))) {
setErrorState(ParseErrorState.NUMERIC_VALUE_ORPHAN_SIGN);
return -1;
}
}
-
+
for (int i = startPos; i < limit; i++) {
if (i < delimLimit && delimiterNext(bytes, i, delimiter)) {
+ if (i == startPos) {
+ setErrorState(ParseErrorState.EMPTY_STRING);
+ return -1;
+ }
this.result = neg ? -val : val;
return i + delimiter.length;
}
@@ -56,15 +60,15 @@ public class LongParser extends FieldParser<Long> {
}
val *= 10;
val += bytes[i] - 48;
-
+
// check for overflow / underflow
if (val < 0) {
// this is an overflow/underflow, unless we hit exactly the Long.MIN_VALUE
if (neg && val == Long.MIN_VALUE) {
this.result = Long.MIN_VALUE;
-
+
if (i+1 >= limit) {
- return limit;
+ return limit;
} else if (i+1 < delimLimit && delimiterNext(bytes, i+1, delimiter)) {
return i + 1 + delimiter.length;
} else {
@@ -78,57 +82,54 @@ public class LongParser extends FieldParser<Long> {
}
}
}
-
+
this.result = neg ? -val : val;
return limit;
}
-
+
@Override
public Long createValue() {
return Long.MIN_VALUE;
}
-
+
@Override
public Long getLastResult() {
return Long.valueOf(this.result);
}
-
+
/**
* Static utility to parse a field of type long from a byte sequence that represents text characters
* (such as when read from a file stream).
- *
+ *
* @param bytes The bytes containing the text data that should be parsed.
* @param startPos The offset to start the parsing.
* @param length The length of the byte sequence (counting from the offset).
- *
+ *
* @return The parsed value.
- *
+ *
* @throws NumberFormatException Thrown when the value cannot be parsed because the text represents not a correct number.
*/
public static final long parseField(byte[] bytes, int startPos, int length) {
return parseField(bytes, startPos, length, (char) 0xffff);
}
-
+
/**
* Static utility to parse a field of type long from a byte sequence that represents text characters
* (such as when read from a file stream).
- *
+ *
* @param bytes The bytes containing the text data that should be parsed.
* @param startPos The offset to start the parsing.
* @param length The length of the byte sequence (counting from the offset).
* @param delimiter The delimiter that terminates the field.
- *
+ *
* @return The parsed value.
- *
+ *
* @throws NumberFormatException Thrown when the value cannot be parsed because the text represents not a correct number.
*/
public static final long parseField(byte[] bytes, int startPos, int length, char delimiter) {
- if (length <= 0) {
- throw new NumberFormatException("Invalid input: Empty string");
- }
long val = 0;
boolean neg = false;
-
+
if (bytes[startPos] == '-') {
neg = true;
startPos++;
@@ -137,17 +138,17 @@ public class LongParser extends FieldParser<Long> {
throw new NumberFormatException("Orphaned minus sign.");
}
}
-
+
for (; length > 0; startPos++, length--) {
if (bytes[startPos] == delimiter) {
- return neg ? -val : val;
+ throw new NumberFormatException("Empty field.");
}
if (bytes[startPos] < 48 || bytes[startPos] > 57) {
throw new NumberFormatException("Invalid character.");
}
val *= 10;
val += bytes[startPos] - 48;
-
+
// check for overflow / underflow
if (val < 0) {
// this is an overflow/underflow, unless we hit exactly the Long.MIN_VALUE
diff --git a/flink-core/src/main/java/org/apache/flink/types/parser/LongValueParser.java b/flink-core/src/main/java/org/apache/flink/types/parser/LongValueParser.java
index 8b697cc..a99a86e 100644
--- a/flink-core/src/main/java/org/apache/flink/types/parser/LongValueParser.java
+++ b/flink-core/src/main/java/org/apache/flink/types/parser/LongValueParser.java
@@ -51,6 +51,10 @@ public class LongValueParser extends FieldParser<LongValue> {
for (int i = startPos; i < limit; i++) {
if (i < delimLimit && delimiterNext(bytes, i, delimiter)) {
+ if (i == startPos) {
+ setErrorState(ParseErrorState.EMPTY_STRING);
+ return -1;
+ }
reusable.setValue(neg ? -val : val);
return i + delimiter.length;
}
diff --git a/flink-core/src/main/java/org/apache/flink/types/parser/ShortParser.java b/flink-core/src/main/java/org/apache/flink/types/parser/ShortParser.java
index a6f9898..6e04d60 100644
--- a/flink-core/src/main/java/org/apache/flink/types/parser/ShortParser.java
+++ b/flink-core/src/main/java/org/apache/flink/types/parser/ShortParser.java
@@ -25,10 +25,10 @@ package org.apache.flink.types.parser;
* The parser does not check for the maximum value.
*/
public class ShortParser extends FieldParser<Short> {
-
+
private static final int OVERFLOW_BOUND = 0x7fff;
private static final int UNDERFLOW_BOUND = 0x8000;
-
+
private short result;
@Override
@@ -37,20 +37,24 @@ public class ShortParser extends FieldParser<Short> {
boolean neg = false;
final int delimLimit = limit-delimiter.length+1;
-
+
if (bytes[startPos] == '-') {
neg = true;
startPos++;
-
+
// check for empty field with only the sign
if (startPos == limit || (startPos < delimLimit && delimiterNext(bytes, startPos, delimiter))) {
setErrorState(ParseErrorState.NUMERIC_VALUE_ORPHAN_SIGN);
return -1;
}
}
-
+
for (int i = startPos; i < limit; i++) {
if (i < delimLimit && delimiterNext(bytes, i, delimiter)) {
+ if (i == startPos) {
+ setErrorState(ParseErrorState.EMPTY_STRING);
+ return -1;
+ }
this.result = (short) (neg ? -val : val);
return i + delimiter.length;
}
@@ -60,17 +64,17 @@ public class ShortParser extends FieldParser<Short> {
}
val *= 10;
val += bytes[i] - 48;
-
+
if (val > OVERFLOW_BOUND && (!neg || val > UNDERFLOW_BOUND)) {
setErrorState(ParseErrorState.NUMERIC_VALUE_OVERFLOW_UNDERFLOW);
return -1;
}
}
-
+
this.result = (short) (neg ? -val : val);
return limit;
}
-
+
@Override
public Short createValue() {
return Short.MIN_VALUE;
@@ -80,43 +84,40 @@ public class ShortParser extends FieldParser<Short> {
public Short getLastResult() {
return Short.valueOf(this.result);
}
-
+
/**
* Static utility to parse a field of type short from a byte sequence that represents text characters
* (such as when read from a file stream).
- *
+ *
* @param bytes The bytes containing the text data that should be parsed.
* @param startPos The offset to start the parsing.
* @param length The length of the byte sequence (counting from the offset).
- *
+ *
* @return The parsed value.
- *
+ *
* @throws NumberFormatException Thrown when the value cannot be parsed because the text represents not a correct number.
*/
public static final short parseField(byte[] bytes, int startPos, int length) {
return parseField(bytes, startPos, length, (char) 0xffff);
}
-
+
/**
* Static utility to parse a field of type short from a byte sequence that represents text characters
* (such as when read from a file stream).
- *
+ *
* @param bytes The bytes containing the text data that should be parsed.
* @param startPos The offset to start the parsing.
* @param length The length of the byte sequence (counting from the offset).
* @param delimiter The delimiter that terminates the field.
- *
+ *
* @return The parsed value.
- *
+ *
* @throws NumberFormatException Thrown when the value cannot be parsed because the text represents not a correct number.
*/
public static final short parseField(byte[] bytes, int startPos, int length, char delimiter) {
- if (length <= 0) {
- throw new NumberFormatException("Invalid input: Empty string");
- }
long val = 0;
boolean neg = false;
-
+
if (bytes[startPos] == '-') {
neg = true;
startPos++;
@@ -125,17 +126,17 @@ public class ShortParser extends FieldParser<Short> {
throw new NumberFormatException("Orphaned minus sign.");
}
}
-
+
for (; length > 0; startPos++, length--) {
if (bytes[startPos] == delimiter) {
- return (short) (neg ? -val : val);
+ throw new NumberFormatException("Empty field.");
}
if (bytes[startPos] < 48 || bytes[startPos] > 57) {
throw new NumberFormatException("Invalid character.");
}
val *= 10;
val += bytes[startPos] - 48;
-
+
if (val > OVERFLOW_BOUND && (!neg || val > UNDERFLOW_BOUND)) {
throw new NumberFormatException("Value overflow/underflow");
}
diff --git a/flink-core/src/main/java/org/apache/flink/types/parser/ShortValueParser.java b/flink-core/src/main/java/org/apache/flink/types/parser/ShortValueParser.java
index f5168cc..4289d1a 100644
--- a/flink-core/src/main/java/org/apache/flink/types/parser/ShortValueParser.java
+++ b/flink-core/src/main/java/org/apache/flink/types/parser/ShortValueParser.java
@@ -54,6 +54,10 @@ public class ShortValueParser extends FieldParser<ShortValue> {
for (int i = startPos; i < limit; i++) {
if (i < delimLimit && delimiterNext(bytes, i, delimiter)) {
+ if (i == startPos) {
+ setErrorState(ParseErrorState.EMPTY_STRING);
+ return -1;
+ }
reusable.setValue((short) (neg ? -val : val));
return i + delimiter.length;
}
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1820_39d526e6.diff |
bugs-dot-jar_data_FLINK-3566_434e88fd | ---
BugID: FLINK-3566
Summary: Input type validation often fails on custom TypeInfo implementations
Description: "Input type validation often fails when used with custom type infos.
One example of this behaviour can be reproduced by creating a custom type info with
our own field type:\n\nStreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();\n\nenv.generateSequence(1,
10).map(new MapFunction<Long, Tuple1<Optional<Long>>>() {\n\t\t\t@Override\n\t\t\tpublic
Tuple1<Optional<Long>> map(Long value) throws Exception {\n\t\t\t\treturn Tuple1.of(Optional.of(value));\n\t\t\t}\n\t\t}).returns(new
TupleTypeInfo<>(new OptionTypeInfo<Long>(BasicTypeInfo.LONG_TYPE_INFO)))\n\t\t\t\t.keyBy(new
KeySelector<Tuple1<Optional<Long>>, Optional<Long>>() {\n\n\t\t\t\t\t@Override\n\t\t\t\t\tpublic
Optional<Long> getKey(Tuple1<Optional<Long>> value) throws Exception {\n\t\t\t\t\t\treturn
value.f0;\n\t\t\t\t\t}\n\t\t\t\t});\n\nThis will fail on Input type validation at
the KeySelector (or any other function for example a mapper) with the following
exception:\n\nInput mismatch: Basic type expected."
diff --git a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
index 01afe14..dd4b132 100644
--- a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
+++ b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
@@ -890,8 +890,8 @@ public class TypeExtractor {
}
if (!(type instanceof TypeVariable<?>)) {
- // check for basic type
- if (typeInfo.isBasicType()) {
+ // check for Java Basic Types
+ if (typeInfo instanceof BasicTypeInfo) {
TypeInformation<?> actual;
// check if basic type at all
@@ -904,8 +904,8 @@ public class TypeExtractor {
}
}
- // check for tuple
- else if (typeInfo.isTupleType()) {
+ // check for Java Tuples
+ else if (typeInfo instanceof TupleTypeInfo) {
// check if tuple at all
if (!(isClassType(type) && Tuple.class.isAssignableFrom(typeToClass(type)))) {
throw new InvalidTypesException("Tuple type expected.");
@@ -1079,9 +1079,9 @@ public class TypeExtractor {
// check for generic object
else if (typeInfo instanceof GenericTypeInfo<?>) {
Class<?> clazz = null;
- if (!(isClassType(type) && ((GenericTypeInfo<?>) typeInfo).getTypeClass() == (clazz = typeToClass(type)))) {
- throw new InvalidTypesException("Generic object type '"
- + ((GenericTypeInfo<?>) typeInfo).getTypeClass().getCanonicalName() + "' expected but was '"
+ if (!(isClassType(type) && (clazz = typeToClass(type)).isAssignableFrom(((GenericTypeInfo<?>) typeInfo).getTypeClass()))) {
+ throw new InvalidTypesException("Generic type '"
+ + ((GenericTypeInfo<?>) typeInfo).getTypeClass().getCanonicalName() + "' or a subclass of it expected but was '"
+ clazz.getCanonicalName() + "'.");
}
}
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-3566_434e88fd.diff |
bugs-dot-jar_data_FLINK-2412_a56aad74 | ---
BugID: FLINK-2412
Summary: Race leading to IndexOutOfBoundsException when querying for buffer while
releasing SpillablePartition
Description: "When running a code as simple as: \n\n{noformat}\n\t\tExecutionEnvironment
env = ExecutionEnvironment.getExecutionEnvironment();\n\n\t\tDataSet<Edge<String,
NullValue>> edges = getEdgesDataSet(env);\n\t\tGraph<String, NullValue, NullValue>
graph = Graph.fromDataSet(edges, env);\n\n\t\tDataSet<Tuple2<String, Long>> degrees
= graph.getDegrees();\ndegrees.writeAsCsv(outputPath, \"\\n\", \" \");\n\t\t\tenv.execute();\n\non
the Freindster data set: https://snap.stanford.edu/data/com-Friendster.html; on
30 Wally nodes\n \nI get the following exception:\njava.lang.Exception: The data
preparation for task 'CoGroup (CoGroup at inDegrees(Graph.java:701))' , caused an
error: Error obtaining the sorted input: Thread 'SortMerger Reading Thread' terminated
due to an exception: Fatal error at remote task manager 'wally028.cit.tu-berlin.de/130.149.249.38:53730'.\n\tat
org.apache.flink.runtime.operators.RegularPactTask.run(RegularPactTask.java:471)\n\tat
org.apache.flink.runtime.operators.RegularPactTask.invoke(RegularPactTask.java:362)\n\tat
org.apache.flink.runtime.taskmanager.Task.run(Task.java:559)\n\tat java.lang.Thread.run(Thread.java:722)\nCaused
by: java.lang.RuntimeException: Error obtaining the sorted input: Thread 'SortMerger
Reading Thread' terminated due to an exception: Fatal error at remote task manager
'wally028.cit.tu-berlin.de/130.149.249.38:53730'.\n\tat org.apache.flink.runtime.operators.sort.UnilateralSortMerger.getIterator(UnilateralSortMerger.java:607)\n\tat
org.apache.flink.runtime.operators.RegularPactTask.getInput(RegularPactTask.java:1145)\n\tat
org.apache.flink.runtime.operators.CoGroupDriver.prepare(CoGroupDriver.java:98)\n\tat
org.apache.flink.runtime.operators.RegularPactTask.run(RegularPactTask.java:466)\n\t...
3 more\nCaused by: java.io.IOException: Thread 'SortMerger Reading Thread' terminated
due to an exception: Fatal error at remote task manager 'wally028.cit.tu-berlin.de/130.149.249.38:53730'.\n\tat
org.apache.flink.runtime.operators.sort.UnilateralSortMerger$ThreadBase.run(UnilateralSortMerger.java:784)\nCaused
by: org.apache.flink.runtime.io.network.netty.exception.RemoteTransportException:
Fatal error at remote task manager 'wally028.cit.tu-berlin.de/130.149.249.38:53730'.\n\tat
org.apache.flink.runtime.io.network.netty.PartitionRequestClientHandler.decodeMsg(PartitionRequestClientHandler.java:227)\n\tat
org.apache.flink.runtime.io.network.netty.PartitionRequestClientHandler.channelRead(PartitionRequestClientHandler.java:162)\n\tat
io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:339)\n\tat
io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:324)\n\tat
io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:103)\n\tat
io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:339)\n\tat
io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:324)\n\tat
io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:242)\n\tat
io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:339)\n\tat
io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:324)\n\tat
io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:847)\n\tat
io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:131)\n\tat
io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:511)\n\tat
io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:468)\n\tat
io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:382)\n\tat
io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:354)\n\tat io.netty.util.concurrent.SingleThreadEventExecutor$2.run(SingleThreadEventExecutor.java:111)\n\tat
java.lang.Thread.run(Thread.java:722)\nCaused by: java.io.IOException: Index: 133,
Size: 0\n\n{noformat}\n\nCode works fine for the twitter data set, for instance,
which is bigger in size, but contains less vertices. \n\n"
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpillableSubpartition.java b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpillableSubpartition.java
index 91f2042..4a18691 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpillableSubpartition.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpillableSubpartition.java
@@ -59,7 +59,7 @@ class SpillableSubpartition extends ResultSubpartition {
private boolean isFinished;
/** Flag indicating whether the subpartition has been released. */
- private boolean isReleased;
+ boolean isReleased;
/** The read view to consume this subpartition. */
private ResultSubpartitionView readView;
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpillableSubpartitionView.java b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpillableSubpartitionView.java
index d37f042..972e34b 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpillableSubpartitionView.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/SpillableSubpartitionView.java
@@ -73,6 +73,10 @@ class SpillableSubpartitionView implements ResultSubpartitionView {
// 1) In-memory
synchronized (parent.buffers) {
+ if (parent.isReleased) {
+ return null;
+ }
+
if (parent.spillWriter == null) {
if (currentQueuePosition < numberOfBuffers) {
Buffer buffer = parent.buffers.get(currentQueuePosition);
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2412_a56aad74.diff |
bugs-dot-jar_data_FLINK-3684_e3759a5e | ---
BugID: FLINK-3684
Summary: CEP operator does not forward watermarks properly
Description: The CEP stream operator don't emit a proper watermark when using event
time.
diff --git a/flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/operator/CEPPatternOperator.java b/flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/operator/CEPPatternOperator.java
index 153c9c9..7760817 100644
--- a/flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/operator/CEPPatternOperator.java
+++ b/flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/operator/CEPPatternOperator.java
@@ -87,6 +87,8 @@ public class CEPPatternOperator<IN> extends AbstractCEPPatternOperator<IN> {
processEvent(nfa, streamRecord.getValue(), streamRecord.getTimestamp());
}
+
+ output.emitWatermark(mark);
}
@Override
diff --git a/flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/operator/KeyedCEPPatternOperator.java b/flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/operator/KeyedCEPPatternOperator.java
index 5d754ce..5db8ef2 100644
--- a/flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/operator/KeyedCEPPatternOperator.java
+++ b/flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/operator/KeyedCEPPatternOperator.java
@@ -163,6 +163,8 @@ public class KeyedCEPPatternOperator<IN, KEY> extends AbstractCEPPatternOperator
processEvent(nfa, streamRecord.getValue(), streamRecord.getTimestamp());
}
}
+
+ output.emitWatermark(mark);
}
@Override
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-3684_e3759a5e.diff |
bugs-dot-jar_data_FLINK-1458_91f9bfc7 | ---
BugID: FLINK-1458
Summary: Interfaces and abstract classes are not valid types
Description: "I don't know whether this is by design or is a bug, but I am having
trouble working with DataSet and traits in scala which is a major limitation. A
simple example is shown below. \n\nCompile time warning is 'Type Main.SimpleTrait
has no fields that are visible from Scala Type analysis. Falling back to Java Type
Analysis...'\n\nRun time error is 'Interfaces and abstract classes are not valid
types: interface Main$SimpleTrait'\n\nRegards, John\n\n\n val env = ExecutionEnvironment.getExecutionEnvironment\n\n
\ trait SimpleTrait {\n def contains(x: String): Boolean\n }\n\n class SimpleClass
extends SimpleTrait {\n def contains(x: String) = true\n }\n\n val data: DataSet[Double]
= env.fromElements(1.0, 2.0, 3.0, 4.0)\n\n def f(data: DataSet[Double]): DataSet[SimpleTrait]
= {\n\n data.mapPartition(iterator => {\n Iterator(new SimpleClass)\n })\n
\ }\n\n\n val g = f(data)\n g.print()\n\n\n env.execute(\"Simple example\")"
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
index 99292a6..124055c 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
@@ -893,6 +893,10 @@ public class TypeExtractor {
while (!(isClassType(curT) && typeToClass(curT).equals(stopAtClass))) {
typeHierarchy.add(curT);
curT = typeToClass(curT).getGenericSuperclass();
+
+ if (curT == null) {
+ break;
+ }
}
return curT;
}
@@ -1090,11 +1094,6 @@ public class TypeExtractor {
ParameterizedType parameterizedType, TypeInformation<IN1> in1Type, TypeInformation<IN2> in2Type) {
Validate.notNull(clazz);
- // check for abstract classes or interfaces
- if (!clazz.isPrimitive() && (Modifier.isInterface(clazz.getModifiers()) || (Modifier.isAbstract(clazz.getModifiers()) && !clazz.isArray()))) {
- throw new InvalidTypesException("Interfaces and abstract classes are not valid types: " + clazz);
- }
-
if (clazz.equals(Object.class)) {
return new GenericTypeInfo<OUT>(clazz);
}
@@ -1153,6 +1152,11 @@ public class TypeExtractor {
alreadySeen.add(clazz);
+ if (Modifier.isInterface(clazz.getModifiers())) {
+ // Interface has no members and is therefore not handled as POJO
+ return new GenericTypeInfo<OUT>(clazz);
+ }
+
if (clazz.equals(Class.class)) {
// special case handling for Class, this should not be handled by the POJO logic
return new GenericTypeInfo<OUT>(clazz);
@@ -1228,10 +1232,10 @@ public class TypeExtractor {
return true;
} else {
if(!hasGetter) {
- LOG.warn("Class "+clazz+" does not contain a getter for field "+f.getName() );
+ LOG.debug("Class "+clazz+" does not contain a getter for field "+f.getName() );
}
if(!hasSetter) {
- LOG.warn("Class "+clazz+" does not contain a setter for field "+f.getName() );
+ LOG.debug("Class "+clazz+" does not contain a setter for field "+f.getName() );
}
return false;
}
@@ -1251,11 +1255,16 @@ public class TypeExtractor {
}
List<Field> fields = getAllDeclaredFields(clazz);
+ if(fields.size() == 0) {
+ LOG.info("No fields detected for class " + clazz + ". Cannot be used as a PojoType. Will be handled as GenericType");
+ return new GenericTypeInfo<OUT>(clazz);
+ }
+
List<PojoField> pojoFields = new ArrayList<PojoField>();
for (Field field : fields) {
Type fieldType = field.getGenericType();
if(!isValidPojoField(field, clazz, typeHierarchy)) {
- LOG.warn("Class "+clazz+" is not a valid POJO type");
+ LOG.info("Class " + clazz + " is not a valid POJO type");
return null;
}
try {
@@ -1281,7 +1290,7 @@ public class TypeExtractor {
List<Method> methods = getAllDeclaredMethods(clazz);
for (Method method : methods) {
if (method.getName().equals("readObject") || method.getName().equals("writeObject")) {
- LOG.warn("Class "+clazz+" contains custom serialization methods we do not call.");
+ LOG.info("Class "+clazz+" contains custom serialization methods we do not call.");
return null;
}
}
@@ -1291,8 +1300,13 @@ public class TypeExtractor {
try {
clazz.getDeclaredConstructor();
} catch (NoSuchMethodException e) {
- LOG.warn("Class " + clazz + " must have a default constructor to be used as a POJO.");
- return null;
+ if (clazz.isInterface() || Modifier.isAbstract(clazz.getModifiers())) {
+ LOG.info("Class " + clazz + " is abstract or an interface, having a concrete " +
+ "type can increase performance.");
+ } else {
+ LOG.info("Class " + clazz + " must have a default constructor to be used as a POJO.");
+ return null;
+ }
}
// everything is checked, we return the pojo
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1458_91f9bfc7.diff |
bugs-dot-jar_data_FLINK-1311_94c8e3fa | ---
BugID: FLINK-1311
Summary: Auxiliary nodes in iterations are not correctly identified as "dynamic" or
"static"
Description: The static/dynamic path tagger starts on the original roots of the step
functions, ignoring possible auxiliary nodes that we need to attach to the root
(such as NoOps, when the root is a union)
diff --git a/flink-compiler/src/main/java/org/apache/flink/compiler/PactCompiler.java b/flink-compiler/src/main/java/org/apache/flink/compiler/PactCompiler.java
index bec264d..a63cfd1 100644
--- a/flink-compiler/src/main/java/org/apache/flink/compiler/PactCompiler.java
+++ b/flink-compiler/src/main/java/org/apache/flink/compiler/PactCompiler.java
@@ -837,10 +837,7 @@ public class PactCompiler {
// go over the contained data flow and mark the dynamic path nodes
StaticDynamicPathIdentifier identifier = new StaticDynamicPathIdentifier(iterNode.getCostWeight());
- rootOfStepFunction.accept(identifier);
- if(terminationCriterion != null){
- terminationCriterion.accept(identifier);
- }
+ iterNode.acceptForStepFunction(identifier);
}
else if (n instanceof WorksetIterationNode) {
final WorksetIterationNode iterNode = (WorksetIterationNode) n;
@@ -919,8 +916,7 @@ public class PactCompiler {
// go over the contained data flow and mark the dynamic path nodes
StaticDynamicPathIdentifier pathIdentifier = new StaticDynamicPathIdentifier(iterNode.getCostWeight());
- nextWorksetNode.accept(pathIdentifier);
- iterNode.getSolutionSetDelta().accept(pathIdentifier);
+ iterNode.acceptForStepFunction(pathIdentifier);
}
}
};
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1311_94c8e3fa.diff |
bugs-dot-jar_data_FLINK-2074_6bc6dbec | ---
BugID: FLINK-2074
Summary: Sliding Window Keeps Emitting Elements After Source Stops Producing
Description: This happens when the source produces some elements, then the source
stops for a while and then produces again some elements before stopping again. After
this, the window will just keep emitting the last emitted element indefinitely.
diff --git a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/windowing/windowbuffer/SlidingGroupedPreReducer.java b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/windowing/windowbuffer/SlidingGroupedPreReducer.java
index 0872c6e..09fadf9 100644
--- a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/windowing/windowbuffer/SlidingGroupedPreReducer.java
+++ b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/windowing/windowbuffer/SlidingGroupedPreReducer.java
@@ -143,6 +143,7 @@ public abstract class SlidingGroupedPreReducer<T> extends SlidingPreReducer<T> {
@Override
protected void resetCurrent() {
currentReducedMap = null;
+ elementsSinceLastPreAggregate = 0;
}
@Override
diff --git a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/windowing/windowbuffer/SlidingTimePreReducer.java b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/windowing/windowbuffer/SlidingTimePreReducer.java
index 7652d81..d84505c 100644
--- a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/windowing/windowbuffer/SlidingTimePreReducer.java
+++ b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/windowing/windowbuffer/SlidingTimePreReducer.java
@@ -89,6 +89,7 @@ public class SlidingTimePreReducer<T> extends SlidingPreReducer<T> {
if (toRemove > 0 && lastPreAggregateSize == null) {
currentReduced = null;
+ elementsSinceLastPreAggregate = 0;
toRemove = 0;
}
}
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2074_6bc6dbec.diff |
bugs-dot-jar_data_FLINK-1437_fb7ce0e3 | ---
BugID: FLINK-1437
Summary: Bug in PojoSerializer's copy() method
Description: "The PojoSerializer's {{copy()}} method does not work properly with {{null}}
values. An exception could look like:\n\n{code}\nCaused by: java.io.IOException:
Thread 'SortMerger spilling thread' terminated due to an exception: null\n\tat org.apache.flink.runtime.operators.sort.UnilateralSortMerger$ThreadBase.run(UnilateralSortMerger.java:792)\nCaused
by: java.io.EOFException\n\tat org.apache.flink.runtime.io.disk.RandomAccessInputView.nextSegment(RandomAccessInputView.java:83)\n\tat
org.apache.flink.runtime.memorymanager.AbstractPagedInputView.advance(AbstractPagedInputView.java:159)\n\tat
org.apache.flink.runtime.memorymanager.AbstractPagedInputView.readByte(AbstractPagedInputView.java:270)\n\tat
org.apache.flink.runtime.memorymanager.AbstractPagedInputView.readUnsignedByte(AbstractPagedInputView.java:277)\n\tat
org.apache.flink.types.StringValue.copyString(StringValue.java:839)\n\tat org.apache.flink.api.common.typeutils.base.StringSerializer.copy(StringSerializer.java:83)\n\tat
org.apache.flink.api.java.typeutils.runtime.PojoSerializer.copy(PojoSerializer.java:261)\n\tat
org.apache.flink.runtime.operators.sort.NormalizedKeySorter.writeToOutput(NormalizedKeySorter.java:449)\n\tat
org.apache.flink.runtime.operators.sort.UnilateralSortMerger$SpillingThread.go(UnilateralSortMerger.java:1303)\n\tat
org.apache.flink.runtime.operators.sort.UnilateralSortMerger$ThreadBase.run(UnilateralSortMerger.java:788)\n{code}\n\nI'm
working on a fix for that..."
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializer.java b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializer.java
index 99b9f65..1e58b9d 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializer.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializer.java
@@ -142,8 +142,14 @@ public final class PojoSerializer<T> extends TypeSerializer<T> {
try {
for (int i = 0; i < numFields; i++) {
- Object copy = fieldSerializers[i].copy(fields[i].get(from));
- fields[i].set(target, copy);
+ Object value = fields[i].get(from);
+ if (value != null) {
+ Object copy = fieldSerializers[i].copy(value);
+ fields[i].set(target, copy);
+ }
+ else {
+ fields[i].set(target, null);
+ }
}
}
catch (IllegalAccessException e) {
@@ -156,8 +162,14 @@ public final class PojoSerializer<T> extends TypeSerializer<T> {
public T copy(T from, T reuse) {
try {
for (int i = 0; i < numFields; i++) {
- Object copy = fieldSerializers[i].copy(fields[i].get(from), fields[i].get(reuse));
- fields[i].set(reuse, copy);
+ Object value = fields[i].get(from);
+ if (value != null) {
+ Object copy = fieldSerializers[i].copy(fields[i].get(from), fields[i].get(reuse));
+ fields[i].set(reuse, copy);
+ }
+ else {
+ fields[i].set(reuse, null);
+ }
}
} catch (IllegalAccessException e) {
throw new RuntimeException("Error during POJO copy, this should not happen since we check the fields" +
@@ -257,8 +269,11 @@ public final class PojoSerializer<T> extends TypeSerializer<T> {
// copy the Non-Null/Null tag
target.writeBoolean(source.readBoolean());
for (int i = 0; i < numFields; i++) {
- target.writeBoolean(source.readBoolean());
- fieldSerializers[i].copy(source, target);
+ boolean isNull = source.readBoolean();
+ target.writeBoolean(isNull);
+ if (!isNull) {
+ fieldSerializers[i].copy(source, target);
+ }
}
}
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1437_fb7ce0e3.diff |
bugs-dot-jar_data_FLINK-2964_76bebd42 | ---
BugID: FLINK-2964
Summary: MutableHashTable fails when spilling partitions without overflow segments
Description: "When one performs a join operation with many and large records then
the join operation fails with the following exception when it tries to spill a {{HashPartition}}.\n\n{code}\njava.lang.RuntimeException:
Bug in Hybrid Hash Join: Request to spill a partition with less than two buffers.\n\tat
org.apache.flink.runtime.operators.hash.HashPartition.spillPartition(HashPartition.java:302)\n\tat
org.apache.flink.runtime.operators.hash.MutableHashTable.spillPartition(MutableHashTable.java:1108)\n\tat
org.apache.flink.runtime.operators.hash.MutableHashTable.nextSegment(MutableHashTable.java:1277)\n\tat
org.apache.flink.runtime.operators.hash.HashPartition$BuildSideBuffer.nextSegment(HashPartition.java:524)\n\tat
org.apache.flink.runtime.memory.AbstractPagedOutputView.advance(AbstractPagedOutputView.java:140)\n\tat
org.apache.flink.runtime.memory.AbstractPagedOutputView.write(AbstractPagedOutputView.java:201)\n\tat
org.apache.flink.runtime.memory.AbstractPagedOutputView.write(AbstractPagedOutputView.java:178)\n\tat
org.apache.flink.api.common.typeutils.base.array.BytePrimitiveArraySerializer.serialize(BytePrimitiveArraySerializer.java:74)\n\tat
org.apache.flink.api.common.typeutils.base.array.BytePrimitiveArraySerializer.serialize(BytePrimitiveArraySerializer.java:30)\n\tat
org.apache.flink.runtime.operators.hash.HashPartition.insertIntoBuildBuffer(HashPartition.java:257)\n\tat
org.apache.flink.runtime.operators.hash.MutableHashTable.insertIntoTable(MutableHashTable.java:856)\n\tat
org.apache.flink.runtime.operators.hash.MutableHashTable.buildInitialTable(MutableHashTable.java:685)\n\tat
org.apache.flink.runtime.operators.hash.MutableHashTable.open(MutableHashTable.java:443)\n\tat
org.apache.flink.runtime.operators.hash.HashTableTest.testSpillingWhenBuildingTableWithoutOverflow(HashTableTest.java:234)\n\tat
sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\n\tat
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\n\tat
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)\n\tat
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)\n\tat
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)\n\tat
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)\n\tat
org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:271)\n\tat org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:70)\n\tat
org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:50)\n\tat
org.junit.runners.ParentRunner$3.run(ParentRunner.java:238)\n\tat org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:63)\n\tat
org.junit.runners.ParentRunner.runChildren(ParentRunner.java:236)\n\tat org.junit.runners.ParentRunner.access$000(ParentRunner.java:53)\n\tat
org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:229)\n\tat org.junit.runners.ParentRunner.run(ParentRunner.java:309)\n\tat
org.junit.runner.JUnitCore.run(JUnitCore.java:160)\n\tat com.intellij.junit4.JUnit4IdeaTestRunner.startRunnerWithArgs(JUnit4IdeaTestRunner.java:78)\n\tat
com.intellij.rt.execution.junit.JUnitStarter.prepareStreamsAndStart(JUnitStarter.java:212)\n\tat
com.intellij.rt.execution.junit.JUnitStarter.main(JUnitStarter.java:68)\n\tat sun.reflect.NativeMethodAccessorImpl.invoke0(Native
Method)\n\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\n\tat
com.intellij.rt.execution.application.AppMain.main(AppMain.java:140)\n{code}\n\nThe
reason is that the {{HashPartition}} does not include the number of used memory
segments by the {{BuildSideBuffer}} when it counts the currently occupied memory
segments."
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/HashPartition.java b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/HashPartition.java
index 32fd74a..97bef4a 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/HashPartition.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/HashPartition.java
@@ -207,7 +207,8 @@ public class HashPartition<BT, PT> extends AbstractPagedInputView implements See
*/
public int getNumOccupiedMemorySegments() {
// either the number of memory segments, or one for spilling
- final int numPartitionBuffers = this.partitionBuffers != null ? this.partitionBuffers.length : 1;
+ final int numPartitionBuffers = this.partitionBuffers != null ?
+ this.partitionBuffers.length : this.buildSideWriteBuffer.getNumOccupiedMemorySegments();
return numPartitionBuffers + numOverflowSegments;
}
@@ -541,6 +542,11 @@ public class HashPartition<BT, PT> extends AbstractPagedInputView implements See
int getBlockCount() {
return this.currentBlockNumber + 1;
}
+
+ int getNumOccupiedMemorySegments() {
+ // return the current segment + all filled segments
+ return this.targetList.size() + 1;
+ }
int spill(BlockChannelWriter<MemorySegment> writer) throws IOException {
this.writer = writer;
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2964_76bebd42.diff |
bugs-dot-jar_data_FLINK-3011_a402002d | ---
BugID: FLINK-3011
Summary: Cannot cancel failing/restarting streaming job from the command line
Description: |-
I cannot seem to be able to cancel a failing/restarting job from the command line client. The job cannot be rescheduled so it keeps failing:
The exception I get:
13:58:11,240 INFO org.apache.flink.runtime.jobmanager.JobManager - Status of job 0c895d22c632de5dfe16c42a9ba818d5 (player-id) changed to RESTARTING.
13:58:25,234 INFO org.apache.flink.runtime.jobmanager.JobManager - Trying to cancel job with ID 0c895d22c632de5dfe16c42a9ba818d5.
13:58:25,561 WARN akka.remote.ReliableDeliverySupervisor - Association with remote system [akka.tcp://[email protected]:42012] has failed, address is now gated for [5000] ms. Reason is: [Disassociated].
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionGraph.java b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionGraph.java
index 9430d80..aae0b7c 100755
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionGraph.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionGraph.java
@@ -746,12 +746,6 @@ public class ExecutionGraph implements Serializable {
public void restart() {
try {
- if (state == JobStatus.FAILED) {
- if (!transitionState(JobStatus.FAILED, JobStatus.RESTARTING)) {
- throw new IllegalStateException("Execution Graph left the state FAILED while trying to restart.");
- }
- }
-
synchronized (progressLock) {
if (state != JobStatus.RESTARTING) {
throw new IllegalStateException("Can only restart job from state restarting.");
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-3011_a402002d.diff |
bugs-dot-jar_data_FLINK-1496_0a4c7694 | ---
BugID: FLINK-1496
Summary: Events at unitialized input channels are lost
Description: If a program sends an event backwards to the producer task, it might
happen that some of it input channels have not been initialized yet (UnknownInputChannel).
In that case, the events are lost and will never be received at the producer.
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/api/reader/BufferReader.java b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/api/reader/BufferReader.java
index 91784f6..fca27fa 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/api/reader/BufferReader.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/api/reader/BufferReader.java
@@ -49,6 +49,8 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
import java.util.Map;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
@@ -91,6 +93,10 @@ public final class BufferReader implements BufferReaderBase {
private final AtomicReference<EventListener<BufferReaderBase>> readerListener = new AtomicReference<EventListener<BufferReaderBase>>(null);
+ private final List<TaskEvent> pendingEvents = new ArrayList<TaskEvent>();
+
+ private int numberOfUninitializedChannels;
+
// ------------------------------------------------------------------------
private boolean isIterativeReader;
@@ -149,17 +155,13 @@ public final class BufferReader implements BufferReaderBase {
return networkEnvironment.getConnectionManager();
}
- // TODO This is a work-around for the union reader
- boolean hasInputChannelWithData() {
- return !inputChannelsWithData.isEmpty();
- }
-
/**
* Returns the total number of input channels for this reader.
* <p>
* Note: This number might be smaller the current number of input channels
* of the reader as channels are possibly updated during runtime.
*/
+ @Override
public int getNumberOfInputChannels() {
return totalNumberOfInputChannels;
}
@@ -170,7 +172,11 @@ public final class BufferReader implements BufferReaderBase {
public void setInputChannel(IntermediateResultPartitionID partitionId, InputChannel inputChannel) {
synchronized (requestLock) {
- inputChannels.put(checkNotNull(partitionId), checkNotNull(inputChannel));
+ if (inputChannels.put(checkNotNull(partitionId), checkNotNull(inputChannel)) == null &&
+ inputChannel.getClass() == UnknownInputChannel.class) {
+
+ numberOfUninitializedChannels++;
+ }
}
}
@@ -202,7 +208,16 @@ public final class BufferReader implements BufferReaderBase {
inputChannels.put(partitionId, newChannel);
+
newChannel.requestIntermediateResultPartition(queueToRequest);
+
+ for (TaskEvent event : pendingEvents) {
+ newChannel.sendTaskEvent(event);
+ }
+
+ if (--numberOfUninitializedChannels == 0) {
+ pendingEvents.clear();
+ }
}
}
}
@@ -387,6 +402,10 @@ public final class BufferReader implements BufferReaderBase {
for (InputChannel inputChannel : inputChannels.values()) {
inputChannel.sendTaskEvent(event);
}
+
+ if (numberOfUninitializedChannels > 0) {
+ pendingEvents.add(event);
+ }
}
}
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1496_0a4c7694.diff |
bugs-dot-jar_data_FLINK-2754_68912126 | ---
BugID: FLINK-2754
Summary: FixedLengthRecordSorter can not write to output cross MemorySegments.
Description: FixedLengthRecordSorter can not write to output cross MemorySegments,
it works well as it's only called to write a single record before. Should fix it
and add more unit test.
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/FixedLengthRecordSorter.java b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/FixedLengthRecordSorter.java
index da96b17..3a44ab5 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/FixedLengthRecordSorter.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/FixedLengthRecordSorter.java
@@ -447,11 +447,13 @@ public final class FixedLengthRecordSorter<T> implements InMemorySorter<T> {
num -= recordsPerSegment;
} else {
// partially filled segment
- for (; num > 0; num--) {
+ for (; num > 0 && offset <= this.lastEntryOffset; num--, offset += this.recordSize) {
record = comparator.readWithKeyDenormalization(record, inView);
serializer.serialize(record, output);
}
}
+
+ offset = 0;
}
}
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2754_68912126.diff |
bugs-dot-jar_data_FLINK-1133_02c08456 | ---
BugID: FLINK-1133
Summary: Type extractor cannot determine type of function
Description: "This function fails in the type extractor.\n\n{code}\npublic static
final class DuplicateValue<T> implements MapFunction<Tuple1<T>, Tuple2<T, T>> {\n\t\t\n\t@Override\n\tpublic
Tuple2<T, T> map(Tuple1<T> vertex) {\n\t\treturn new Tuple2<T, T>(vertex.f0, vertex.f0);\n\t}\n}\n{code}"
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
index d5f3619..55f6b1f 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
@@ -421,12 +421,9 @@ public class TypeExtractor {
Type[] tupleElements = ((ParameterizedType) inType).getActualTypeArguments();
// go thru all tuple elements and search for type variables
for(int i = 0; i < tupleElements.length; i++) {
- if(tupleElements[i] instanceof TypeVariable) {
- inType = materializeTypeVariable(returnTypeHierarchy, (TypeVariable<?>) tupleElements[i]);
- info = findCorrespondingInfo(returnTypeVar, inType, ((TupleTypeInfo<?>) inTypeInfo).getTypeAt(i));
- if(info != null) {
- break;
- }
+ info = createTypeInfoFromInput(returnTypeVar, returnTypeHierarchy, tupleElements[i], ((TupleTypeInfo<?>) inTypeInfo).getTypeAt(i));
+ if(info != null) {
+ break;
}
}
}
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1133_02c08456.diff |
bugs-dot-jar_data_FLINK-2567_948b6e05 | ---
BugID: FLINK-2567
Summary: 'CsvParser: Quotes cannot be escaped inside quoted fields'
Description: |-
We should allow users to escape the quote character inside a quoted field.
Quoting could be realized through the \ character like in: {{"This is an \"escaped\" quotation."}}
Mailing list thread: http://apache-flink-mailing-list-archive.1008284.n3.nabble.com/jira-Created-FLINK-2567-CsvParser-Quotes-cannot-be-escaped-inside-quoted-fields-td7654.html
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/io/GenericCsvInputFormat.java b/flink-core/src/main/java/org/apache/flink/api/common/io/GenericCsvInputFormat.java
index b132ca2..8d979bb 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/io/GenericCsvInputFormat.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/io/GenericCsvInputFormat.java
@@ -53,6 +53,8 @@ public abstract class GenericCsvInputFormat<OT> extends DelimitedInputFormat<OT>
private static final byte[] DEFAULT_FIELD_DELIMITER = new byte[] {','};
+ private static final byte BACKSLASH = 92;
+
// --------------------------------------------------------------------------------------------
// Variables for internal operation.
// They are all transient, because we do not want them so be serialized
@@ -443,9 +445,10 @@ public abstract class GenericCsvInputFormat<OT> extends DelimitedInputFormat<OT>
if(quotedStringParsing == true && bytes[i] == quoteCharacter) {
// quoted string parsing enabled and field is quoted
- // search for ending quote character
+ // search for ending quote character, continue when it is escaped
i++;
- while(i < limit && bytes[i] != quoteCharacter) {
+
+ while (i < limit && (bytes[i] != quoteCharacter || bytes[i-1] == BACKSLASH)){
i++;
}
i++;
diff --git a/flink-core/src/main/java/org/apache/flink/types/parser/StringParser.java b/flink-core/src/main/java/org/apache/flink/types/parser/StringParser.java
index 27e49f5..47e4494 100644
--- a/flink-core/src/main/java/org/apache/flink/types/parser/StringParser.java
+++ b/flink-core/src/main/java/org/apache/flink/types/parser/StringParser.java
@@ -27,6 +27,7 @@ public class StringParser extends FieldParser<String> {
private boolean quotedStringParsing = false;
private byte quoteCharacter;
+ private static final byte BACKSLASH = 92;
private String result;
@@ -46,8 +47,8 @@ public class StringParser extends FieldParser<String> {
// quoted string parsing enabled and first character Vis a quote
i++;
- // search for ending quote character
- while(i < limit && bytes[i] != quoteCharacter) {
+ // search for ending quote character, continue when it is escaped
+ while (i < limit && (bytes[i] != quoteCharacter || bytes[i-1] == BACKSLASH)){
i++;
}
diff --git a/flink-core/src/main/java/org/apache/flink/types/parser/StringValueParser.java b/flink-core/src/main/java/org/apache/flink/types/parser/StringValueParser.java
index 086e3e4..bcb1042 100644
--- a/flink-core/src/main/java/org/apache/flink/types/parser/StringValueParser.java
+++ b/flink-core/src/main/java/org/apache/flink/types/parser/StringValueParser.java
@@ -31,6 +31,7 @@ public class StringValueParser extends FieldParser<StringValue> {
private boolean quotedStringParsing = false;
private byte quoteCharacter;
+ private static final byte BACKSLASH = 92;
private StringValue result;
@@ -51,8 +52,8 @@ public class StringValueParser extends FieldParser<StringValue> {
// quoted string parsing enabled and first character is a quote
i++;
- // search for ending quote character
- while(i < limit && bytes[i] != quoteCharacter) {
+ // search for ending quote character, continue when it is escaped
+ while (i < limit && (bytes[i] != quoteCharacter || bytes[i-1] == BACKSLASH)){
i++;
}
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2567_948b6e05.diff |
bugs-dot-jar_data_FLINK-1214_6ecd0f82 | ---
BugID: FLINK-1214
Summary: Prevent partitioning pushdown unless partitions fields match exactly
Description: |-
Consider an operation grouped on fields (A, B), followed by an operation grouped on field (A).
Right now, the optimizer can push down the partitioning on (A), which serves both operations (the first step locally still groups by A and B). This may however by a bad idea for the cases where the field A has a low cardinality, or the value distribution is skewed.
Since we cannot determine that robustly yet, I suggest to disable this optimization for now.
diff --git a/flink-compiler/src/main/java/org/apache/flink/compiler/dag/SingleInputNode.java b/flink-compiler/src/main/java/org/apache/flink/compiler/dag/SingleInputNode.java
index b3d639b..730c1bb 100644
--- a/flink-compiler/src/main/java/org/apache/flink/compiler/dag/SingleInputNode.java
+++ b/flink-compiler/src/main/java/org/apache/flink/compiler/dag/SingleInputNode.java
@@ -220,8 +220,21 @@ public abstract class SingleInputNode extends OptimizerNode {
// add all properties relevant to this node
for (OperatorDescriptorSingle dps : getPossibleProperties()) {
for (RequestedGlobalProperties gp : dps.getPossibleGlobalProperties()) {
+
+ if (gp.getPartitioning().isPartitionedOnKey()) {
+ // make sure that among the same partitioning types, we do not push anything down that has fewer key fields
+
+ for (RequestedGlobalProperties contained : props.getGlobalProperties()) {
+ if (contained.getPartitioning() == gp.getPartitioning() && gp.getPartitionedFields().isValidSubset(contained.getPartitionedFields())) {
+ props.getGlobalProperties().remove(contained);
+ break;
+ }
+ }
+ }
+
props.addGlobalProperties(gp);
}
+
for (RequestedLocalProperties lp : dps.getPossibleLocalProperties()) {
props.addLocalProperties(lp);
}
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1214_6ecd0f82.diff |
bugs-dot-jar_data_FLINK-2734_8b40bb7a | ---
BugID: FLINK-2734
Summary: ArrayKeySelector returns wrong positions (or fails)
Description: The {{ArrayKeySelector}} is broken and returns wrong values in all cases
except for [0] as a single only key position.
diff --git a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/datastream/KeyedDataStream.java b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/datastream/KeyedDataStream.java
index 7628815..100e5de 100644
--- a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/datastream/KeyedDataStream.java
+++ b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/datastream/KeyedDataStream.java
@@ -35,7 +35,8 @@ import org.apache.flink.streaming.runtime.partitioner.StreamPartitioner;
* @param <T> The type of the elements in the Keyed Stream
*/
public class KeyedDataStream<T> extends DataStream<T> {
- KeySelector<T, ?> keySelector;
+
+ protected final KeySelector<T, ?> keySelector;
/**
* Creates a new {@link KeyedDataStream} using the given {@link KeySelector}
diff --git a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/environment/StreamExecutionEnvironment.java b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/environment/StreamExecutionEnvironment.java
index c50f23e..d91afc9 100644
--- a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/environment/StreamExecutionEnvironment.java
+++ b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/environment/StreamExecutionEnvironment.java
@@ -1228,8 +1228,7 @@ public abstract class StreamExecutionEnvironment {
*/
public static StreamExecutionEnvironment createRemoteEnvironment(String host, int port,
String... jarFiles) {
- RemoteStreamEnvironment env = new RemoteStreamEnvironment(host, port, jarFiles);
- return env;
+ return new RemoteStreamEnvironment(host, port, jarFiles);
}
/**
@@ -1298,8 +1297,7 @@ public abstract class StreamExecutionEnvironment {
if (transformations.size() <= 0) {
throw new IllegalStateException("No operators defined in streaming topology. Cannot execute.");
}
- StreamGraph result = StreamGraphGenerator.generate(this, transformations);
- return result;
+ return StreamGraphGenerator.generate(this, transformations);
}
/**
diff --git a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/util/keys/KeySelectorUtil.java b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/util/keys/KeySelectorUtil.java
index 89c6142..cd32548 100644
--- a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/util/keys/KeySelectorUtil.java
+++ b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/util/keys/KeySelectorUtil.java
@@ -27,41 +27,9 @@ import org.apache.flink.api.common.typeutils.TypeComparator;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.operators.Keys;
import org.apache.flink.api.java.tuple.Tuple;
-import org.apache.flink.api.java.tuple.Tuple1;
-import org.apache.flink.api.java.tuple.Tuple10;
-import org.apache.flink.api.java.tuple.Tuple11;
-import org.apache.flink.api.java.tuple.Tuple12;
-import org.apache.flink.api.java.tuple.Tuple13;
-import org.apache.flink.api.java.tuple.Tuple14;
-import org.apache.flink.api.java.tuple.Tuple15;
-import org.apache.flink.api.java.tuple.Tuple16;
-import org.apache.flink.api.java.tuple.Tuple17;
-import org.apache.flink.api.java.tuple.Tuple18;
-import org.apache.flink.api.java.tuple.Tuple19;
-import org.apache.flink.api.java.tuple.Tuple2;
-import org.apache.flink.api.java.tuple.Tuple20;
-import org.apache.flink.api.java.tuple.Tuple21;
-import org.apache.flink.api.java.tuple.Tuple22;
-import org.apache.flink.api.java.tuple.Tuple23;
-import org.apache.flink.api.java.tuple.Tuple24;
-import org.apache.flink.api.java.tuple.Tuple25;
-import org.apache.flink.api.java.tuple.Tuple3;
-import org.apache.flink.api.java.tuple.Tuple4;
-import org.apache.flink.api.java.tuple.Tuple5;
-import org.apache.flink.api.java.tuple.Tuple6;
-import org.apache.flink.api.java.tuple.Tuple7;
-import org.apache.flink.api.java.tuple.Tuple8;
-import org.apache.flink.api.java.tuple.Tuple9;
public class KeySelectorUtil {
- public static Class<?>[] tupleClasses = new Class[] { Tuple1.class, Tuple2.class, Tuple3.class,
- Tuple4.class, Tuple5.class, Tuple6.class, Tuple7.class, Tuple8.class, Tuple9.class,
- Tuple10.class, Tuple11.class, Tuple12.class, Tuple13.class, Tuple14.class,
- Tuple15.class, Tuple16.class, Tuple17.class, Tuple18.class, Tuple19.class,
- Tuple20.class, Tuple21.class, Tuple22.class, Tuple23.class, Tuple24.class,
- Tuple25.class };
-
public static <X> KeySelector<X, ?> getSelectorForKeys(Keys<X> keys, TypeInformation<X> typeInfo, ExecutionConfig executionConfig) {
int[] logicalKeyPositions = keys.computeLogicalKeyPositions();
int keyLength = logicalKeyPositions.length;
@@ -129,7 +97,7 @@ public class KeySelectorUtil {
@Override
public Tuple getKey(IN value) throws Exception {
- key = (Tuple) tupleClasses[keyLength - 1].newInstance();
+ key = Tuple.getTupleClass(keyLength).newInstance();
comparator.extractKeys(value, keyArray, 0);
for (int i = 0; i < keyLength; i++) {
key.setField(keyArray[i], i);
@@ -139,12 +107,11 @@ public class KeySelectorUtil {
}
- public static class ArrayKeySelector<IN> implements KeySelector<IN, Tuple> {
+ public static final class ArrayKeySelector<IN> implements KeySelector<IN, Tuple> {
private static final long serialVersionUID = 1L;
-
- Tuple key;
- int[] fields;
+
+ private final int[] fields;
public ArrayKeySelector(int... fields) {
this.fields = fields;
@@ -152,10 +119,9 @@ public class KeySelectorUtil {
@Override
public Tuple getKey(IN value) throws Exception {
- key = (Tuple) tupleClasses[fields.length - 1].newInstance();
+ Tuple key = Tuple.getTupleClass(fields.length).newInstance();
for (int i = 0; i < fields.length; i++) {
- int pos = fields[i];
- key.setField(Array.get(value, fields[pos]), i);
+ key.setField(Array.get(value, fields[i]), i);
}
return key;
}
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2734_8b40bb7a.diff |
bugs-dot-jar_data_FLINK-2812_e494c279 | ---
BugID: FLINK-2812
Summary: KeySelectorUtil.getSelectorForKeys and TypeExtractor.getKeySelectorTypes
are incompatible
Description: "The following code snippet fails, because {{KeySelectorUtil.getSelectorForKeys}}
returns the base {{Tuple}} type.\n\n```java\nTypeInformation<Tuple2<Integer, Integer>>
typeInfo = TypeExtractor\n.getForObject(Tuple2.of(0, 0));\n\nExecutionConfig config
= new ExecutionConfig();\n\nKeySelector<Tuple2<Integer, Integer>, ?> keySelector
= KeySelectorUtil.getSelectorForKeys(\nnew Keys.ExpressionKeys<>(new int[]{0}, typeInfo),
typeInfo, config);\n\n// fails with InvalidTypesException\nTypeExtractor.getKeySelectorTypes(keySelector,
typeInfo); \n```\n\nHowever if I manually define the key selector as follows the
snippet works fine due to the key type being an integer.\n\n```java\nKeySelector<Tuple2<Integer,
Integer>, Integer> keySelector =\n\nnew KeySelector<Tuple2<Integer, Integer>, Integer>()
{\n\t@Override\n\tpublic Integer getKey(Tuple2<Integer, Integer> value) throws Exception
{\n\t\treturn value.f0;\n\t}\n};\n```\n\nThe error message looks like this:\norg.apache.flink.api.common.functions.InvalidTypesException:
Usage of class Tuple as a type is not allowed. Use a concrete subclass (e.g. Tuple1,
Tuple2, etc.) instead.\n\tat org.apache.flink.api.java.typeutils.TypeExtractor.createTypeInfoWithTypeHierarchy(TypeExtractor.java:401)\n\tat
org.apache.flink.api.java.typeutils.TypeExtractor.privateCreateTypeInfo(TypeExtractor.java:379)\n\tat
org.apache.flink.api.java.typeutils.TypeExtractor.getUnaryOperatorReturnType(TypeExtractor.java:279)\n\tat
org.apache.flink.api.java.typeutils.TypeExtractor.getKeySelectorTypes(TypeExtractor.java:229)\n\tat
org.apache.flink.api.java.typeutils.TypeExtractor.getKeySelectorTypes(TypeExtractor.java:223)\n"
diff --git a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/util/keys/KeySelectorUtil.java b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/util/keys/KeySelectorUtil.java
index d8839a0..9c76d95 100644
--- a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/util/keys/KeySelectorUtil.java
+++ b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/util/keys/KeySelectorUtil.java
@@ -28,6 +28,8 @@ import org.apache.flink.api.common.typeutils.TypeComparator;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.operators.Keys;
import org.apache.flink.api.java.tuple.Tuple;
+import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
+import org.apache.flink.api.java.typeutils.TupleTypeInfo;
/**
* Utility class that contains helper methods to manipulating {@link KeySelector} for streaming.
@@ -47,12 +49,14 @@ public final class KeySelectorUtil {
// use ascending order here, the code paths for that are usually a slight bit faster
boolean[] orders = new boolean[numKeyFields];
+ TypeInformation[] typeInfos = new TypeInformation[numKeyFields];
for (int i = 0; i < numKeyFields; i++) {
orders[i] = true;
+ typeInfos[i] = compositeType.getTypeAt(logicalKeyPositions[i]);
}
-
+
TypeComparator<X> comparator = compositeType.createComparator(logicalKeyPositions, orders, 0, executionConfig);
- return new ComparableKeySelector<X>(comparator, numKeyFields);
+ return new ComparableKeySelector<>(comparator, numKeyFields, new TupleTypeInfo<>(typeInfos));
}
@@ -70,7 +74,7 @@ public final class KeySelectorUtil {
TypeComparator<X> comparator = ((CompositeType<X>) typeInfo).createComparator(
logicalKeyPositions, new boolean[1], 0, executionConfig);
- return new OneKeySelector<X, K>(comparator);
+ return new OneKeySelector<>(comparator);
}
/**
@@ -111,21 +115,23 @@ public final class KeySelectorUtil {
*
* @param <IN> The type from which the key is extracted.
*/
- public static final class ComparableKeySelector<IN> implements KeySelector<IN, Tuple> {
+ public static final class ComparableKeySelector<IN> implements KeySelector<IN, Tuple>, ResultTypeQueryable<Tuple> {
private static final long serialVersionUID = 1L;
private final TypeComparator<IN> comparator;
private final int keyLength;
+ private final TupleTypeInfo tupleTypeInfo;
/** Reusable array to hold the key objects. Since this is initially empty (all positions
* are null), it does not have any serialization problems */
@SuppressWarnings("NonSerializableFieldInSerializableClass")
private final Object[] keyArray;
- public ComparableKeySelector(TypeComparator<IN> comparator, int keyLength) {
+ public ComparableKeySelector(TypeComparator<IN> comparator, int keyLength, TupleTypeInfo tupleTypeInfo) {
this.comparator = comparator;
this.keyLength = keyLength;
+ this.tupleTypeInfo = tupleTypeInfo;
keyArray = new Object[keyLength];
}
@@ -139,6 +145,10 @@ public final class KeySelectorUtil {
return key;
}
+ @Override
+ public TypeInformation<Tuple> getProducedType() {
+ return tupleTypeInfo;
+ }
}
// ------------------------------------------------------------------------
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2812_e494c279.diff |
bugs-dot-jar_data_FLINK-3760_494212b3 | ---
BugID: FLINK-3760
Summary: 'Fix StateDescriptor.readObject '
Description: The readObject method of StateDescriptor uses uses {{ObjectInputStream.read()}}.
For very large serialized default values this will not necessarily read all data
in one go. We need a loop that reads it in several steps.
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/state/StateDescriptor.java b/flink-core/src/main/java/org/apache/flink/api/common/state/StateDescriptor.java
index 10ac5ba..243ebcd 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/state/StateDescriptor.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/state/StateDescriptor.java
@@ -61,12 +61,12 @@ public abstract class StateDescriptor<S extends State, T> implements Serializabl
/** The type information describing the value type. Only used to lazily create the serializer
* and dropped during serialization */
private transient TypeInformation<T> typeInfo;
-
+
// ------------------------------------------------------------------------
-
+
/**
* Create a new {@code StateDescriptor} with the given name and the given type serializer.
- *
+ *
* @param name The name of the {@code StateDescriptor}.
* @param serializer The type serializer for the values in the state.
* @param defaultValue The default value that will be set when requesting state without setting
@@ -94,7 +94,7 @@ public abstract class StateDescriptor<S extends State, T> implements Serializabl
/**
* Create a new {@code StateDescriptor} with the given name and the given type information.
- *
+ *
* <p>If this constructor fails (because it is not possible to describe the type via a class),
* consider using the {@link #StateDescriptor(String, TypeInformation, Object)} constructor.
*
@@ -106,7 +106,7 @@ public abstract class StateDescriptor<S extends State, T> implements Serializabl
protected StateDescriptor(String name, Class<T> type, T defaultValue) {
this.name = requireNonNull(name, "name must not be null");
requireNonNull(type, "type class must not be null");
-
+
try {
this.typeInfo = TypeExtractor.createTypeInfo(type);
} catch (Exception e) {
@@ -117,7 +117,7 @@ public abstract class StateDescriptor<S extends State, T> implements Serializabl
}
// ------------------------------------------------------------------------
-
+
/**
* Returns the name of this {@code StateDescriptor}.
*/
@@ -152,21 +152,21 @@ public abstract class StateDescriptor<S extends State, T> implements Serializabl
throw new IllegalStateException("Serializer not yet initialized.");
}
}
-
+
/**
* Creates a new {@link State} on the given {@link StateBackend}.
*
* @param stateBackend The {@code StateBackend} on which to create the {@link State}.
*/
public abstract S bind(StateBackend stateBackend) throws Exception;
-
+
// ------------------------------------------------------------------------
/**
* Checks whether the serializer has been initialized. Serializer initialization is lazy,
* to allow parametrization of serializers with an {@link ExecutionConfig} via
* {@link #initializeSerializerUnlessSet(ExecutionConfig)}.
- *
+ *
* @return True if the serializers have been initialized, false otherwise.
*/
public boolean isSerializerInitialized() {
@@ -175,7 +175,7 @@ public abstract class StateDescriptor<S extends State, T> implements Serializabl
/**
* Initializes the serializer, unless it has been initialized before.
- *
+ *
* @param executionConfig The execution config to use when creating the serializer.
*/
public void initializeSerializerUnlessSet(ExecutionConfig executionConfig) {
@@ -188,7 +188,7 @@ public abstract class StateDescriptor<S extends State, T> implements Serializabl
}
}
}
-
+
/**
* This method should be called by subclasses prior to serialization. Because the TypeInformation is
* not always serializable, it is 'transient' and dropped during serialization. Hence, the descriptor
@@ -204,7 +204,7 @@ public abstract class StateDescriptor<S extends State, T> implements Serializabl
}
}
}
-
+
// ------------------------------------------------------------------------
// Standard Utils
// ------------------------------------------------------------------------
@@ -230,7 +230,7 @@ public abstract class StateDescriptor<S extends State, T> implements Serializabl
@Override
public String toString() {
- return getClass().getSimpleName() +
+ return getClass().getSimpleName() +
"{name=" + name +
", defaultValue=" + defaultValue +
", serializer=" + serializer +
@@ -257,7 +257,7 @@ public abstract class StateDescriptor<S extends State, T> implements Serializabl
out.writeBoolean(true);
byte[] serializedDefaultValue;
- try (ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ try (ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputViewStreamWrapper outView = new DataOutputViewStreamWrapper(baos))
{
TypeSerializer<T> duplicateSerializer = serializer.duplicate();
@@ -284,12 +284,10 @@ public abstract class StateDescriptor<S extends State, T> implements Serializabl
boolean hasDefaultValue = in.readBoolean();
if (hasDefaultValue) {
int size = in.readInt();
+
byte[] buffer = new byte[size];
- int bytesRead = in.read(buffer);
- if (bytesRead != size) {
- throw new RuntimeException("Read size does not match expected size.");
- }
+ in.readFully(buffer);
try (ByteArrayInputStream bais = new ByteArrayInputStream(buffer);
DataInputViewStreamWrapper inView = new DataInputViewStreamWrapper(bais))
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-3760_494212b3.diff |
bugs-dot-jar_data_FLINK-1640_8f321c72 | ---
BugID: FLINK-1640
Summary: FileOutputFormat writes to wrong path if path ends with '/'
Description: |-
The FileOutputFormat duplicates the last directory of a path, if the path ends with a slash '/'.
For example, if the output path is specified as {{/home/myuser/outputPath/}} the output is written to {{/home/myuser/outputPath/outputPath/}}.
This bug was introduced by commit 8fc04e4da8a36866e10564205c3f900894f4f6e0
diff --git a/flink-core/src/main/java/org/apache/flink/core/fs/Path.java b/flink-core/src/main/java/org/apache/flink/core/fs/Path.java
index a104d86..30a2a65 100644
--- a/flink-core/src/main/java/org/apache/flink/core/fs/Path.java
+++ b/flink-core/src/main/java/org/apache/flink/core/fs/Path.java
@@ -37,6 +37,8 @@ import org.apache.flink.util.StringUtils;
/**
* Names a file or directory in a {@link FileSystem}. Path strings use slash as
* the directory separator. A path string is absolute if it begins with a slash.
+ *
+ * Tailing slashes are removed from the path.
*/
public class Path implements IOReadableWritable, Serializable {
@@ -71,7 +73,7 @@ public class Path implements IOReadableWritable, Serializable {
* Constructs a path object from a given URI.
*
* @param uri
- * the URI to contruct the path object from
+ * the URI to construct the path object from
*/
public Path(URI uri) {
this.uri = uri;
@@ -143,20 +145,24 @@ public class Path implements IOReadableWritable, Serializable {
}
/**
- * Checks if the provided path string is either null or has zero length and throws
+ * Checks if the provided path string is either null or has zero length and throws
* a {@link IllegalArgumentException} if any of the two conditions apply.
- *
+ * In addition, leading and tailing whitespaces are removed.
+ *
* @param path
* the path string to be checked
+ * @return The checked and trimmed path.
*/
- private void checkPathArg(String path) {
+ private String checkAndTrimPathArg(String path) {
// disallow construction of a Path from an empty string
if (path == null) {
throw new IllegalArgumentException("Can not create a Path from a null string");
}
+ path = path.trim();
if (path.length() == 0) {
throw new IllegalArgumentException("Can not create a Path from an empty string");
}
+ return path;
}
/**
@@ -167,7 +173,7 @@ public class Path implements IOReadableWritable, Serializable {
* the string to construct a path from
*/
public Path(String pathString) {
- checkPathArg(pathString);
+ pathString = checkAndTrimPathArg(pathString);
// We can't use 'new URI(String)' directly, since it assumes things are
// escaped, which we don't require of Paths.
@@ -217,7 +223,7 @@ public class Path implements IOReadableWritable, Serializable {
* the path string
*/
public Path(String scheme, String authority, String path) {
- checkPathArg(path);
+ path = checkAndTrimPathArg(path);
initialize(scheme, authority, path);
}
@@ -247,9 +253,18 @@ public class Path implements IOReadableWritable, Serializable {
* @return the normalized path string
*/
private String normalizePath(String path) {
- // remove double slashes & backslashes
- path = path.replace("//", "/");
+
+ // remove leading and tailing whitespaces
+ path = path.trim();
+
+ // remove consecutive slashes & backslashes
path = path.replace("\\", "/");
+ path = path.replaceAll("/+", "/");
+
+ // remove tailing separator
+ if(!path.equals(SEPARATOR) && path.endsWith(SEPARATOR)) {
+ path = path.substring(0, path.length() - SEPARATOR.length());
+ }
return path;
}
@@ -306,23 +321,19 @@ public class Path implements IOReadableWritable, Serializable {
}
/**
- * Returns the final component of this path.
+ * Returns the final component of this path, i.e., everything that follows the last separator.
*
* @return the final component of the path
*/
public String getName() {
final String path = uri.getPath();
- if (path.endsWith(SEPARATOR)) {
- final int slash = path.lastIndexOf(SEPARATOR, path.length() - SEPARATOR.length() - 1);
- return path.substring(slash + 1, path.length() - SEPARATOR.length());
- } else {
- final int slash = path.lastIndexOf(SEPARATOR);
- return path.substring(slash + 1);
- }
+ final int slash = path.lastIndexOf(SEPARATOR);
+ return path.substring(slash + 1);
}
/**
- * Returns the parent of a path or <code>null</code> if at root.
+ * Returns the parent of a path, i.e., everything that precedes the last separator
+ * or <code>null</code> if at root.
*
* @return the parent of a path or <code>null</code> if at root.
*/
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1640_8f321c72.diff |
bugs-dot-jar_data_FLINK-3189_a5b05566 | ---
BugID: FLINK-3189
Summary: Error while parsing job arguments passed by CLI
Description: "Flink CLI treats job arguments provided in format \"-<char>\" as its
own parameters, which results in errors in execution.\n\nExample 1:\ncall: >bin/flink
info myJarFile.jar -f flink -i <filepath> -m 1\nerror: Unrecognized option: -f\n\nExample
2:\nJob myJarFile.jar is uploaded to web submission client, flink parameter box
is empty\nprogram arguments box: -f flink -i <filepath> -m 1\nerror: \nAn unexpected
error occurred:\nUnrecognized option: -f\norg.apache.flink.client.cli.CliArgsException:
Unrecognized option: -f\n\tat org.apache.flink.client.cli.CliFrontendParser.parseInfoCommand(CliFrontendParser.java:296)\n\tat
org.apache.flink.client.CliFrontend.info(CliFrontend.java:376)\n\tat org.apache.flink.client.CliFrontend.parseParameters(CliFrontend.java:983)\n\tat
org.apache.flink.client.web.JobSubmissionServlet.doGet(JobSubmissionServlet.java:171)\n\tat
javax.servlet.http.HttpServlet.service(HttpServlet.java:734)\n\tat javax.servlet.http.HttpServlet.service(HttpServlet.java:847)\n\tat
org.eclipse.jetty.servlet.ServletHolder.handle(ServletHolder.java:532)\n\tat org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:453)\n\tat
org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:227)\n\tat
org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:965)\n\tat
org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:388)\n\tat
org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:187)\n\tat
org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:901)\n\tat
org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:117)\n\tat
org.eclipse.jetty.server.handler.HandlerList.handle(HandlerList.java:47)\n\tat org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:113)\n\tat
org.eclipse.jetty.server.Server.handle(Server.java:348)\n\tat org.eclipse.jetty.server.HttpConnection.handleRequest(HttpConnection.java:596)\n\tat
org.eclipse.jetty.server.HttpConnection$RequestHandler.headerComplete(HttpConnection.java:1048)\n\tat
org.eclipse.jetty.http.HttpParser.parseNext(HttpParser.java:549)\n\tat org.eclipse.jetty.http.HttpParser.parseAvailable(HttpParser.java:211)\n\tat
org.eclipse.jetty.server.HttpConnection.handle(HttpConnection.java:425)\n\tat org.eclipse.jetty.io.nio.SelectChannelEndPoint.run(SelectChannelEndPoint.java:489)\n\tat
org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:436)\n\tat
java.lang.Thread.run(Thread.java:745)\n\nExecution of \n>bin/flink run myJarFile.jar
-f flink -i <filepath> -m 1 \nworks perfectly fine"
diff --git a/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontendParser.java b/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontendParser.java
index 4e081fd..07d409e 100644
--- a/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontendParser.java
+++ b/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontendParser.java
@@ -339,7 +339,7 @@ public class CliFrontendParser {
public static InfoOptions parseInfoCommand(String[] args) throws CliArgsException {
try {
PosixParser parser = new PosixParser();
- CommandLine line = parser.parse(INFO_OPTIONS, args, false);
+ CommandLine line = parser.parse(INFO_OPTIONS, args, true);
return new InfoOptions(line);
}
catch (ParseException e) {
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-3189_a5b05566.diff |
bugs-dot-jar_data_FLINK-2447_5546a1ef | ---
BugID: FLINK-2447
Summary: TypeExtractor returns wrong type info when a Tuple has two fields of the
same POJO type
Description: "Consider the following code:\n\nDataSet<FooBarPojo> d1 = env.fromElements(new
FooBarPojo());\n\t\tDataSet<Tuple2<FooBarPojo, FooBarPojo>> d2 = d1.map(new MapFunction<FooBarPojo,
Tuple2<FooBarPojo, FooBarPojo>>() {\n\t\t\t@Override\n\t\t\tpublic Tuple2<FooBarPojo,
FooBarPojo> map(FooBarPojo value) throws Exception {\n\t\t\t\treturn null;\n\t\t\t}\n\t\t});\n\nwhere
FooBarPojo is the following type:\npublic class FooBarPojo {\n\tpublic int foo,
bar;\n\tpublic FooBarPojo() {}\n}\n\nThis should print a tuple type with two identical
fields:\nJava Tuple2<PojoType<FooBarPojo, fields = [bar: Integer, foo: Integer]>,
PojoType<FooBarPojo, fields = [bar: Integer, foo: Integer]>>\n\nBut it prints the
following instead:\nJava Tuple2<PojoType<FooBarPojo, fields = [bar: Integer, foo:
Integer]>, GenericType<FooBarPojo>>\n\nNote, that this problem causes some co-groups
in Gelly to crash with \"org.apache.flink.api.common.InvalidProgramException: The
pair of co-group keys are not compatible with each other\" when the vertex ID type
is a POJO, because the second field of the Edge type gets to be a generic type,
but the POJO gets recognized in the Vertex type, and getNumberOfKeyFields returns
different numbers for the POJO and the generic type.\n\nThe source of the problem
is the mechanism in TypeExtractor that would detect recursive types (see the \"alreadySeen\"
field in TypeExtractor), as it mistakes the second appearance of FooBarPojo with
a recursive field.\n\nSpecifically the following happens: createTypeInfoWithTypeHierarchy
starts to process the Tuple2<FooBarPojo, FooBarPojo> type, and in line 434 it calls
itself for the first field, which proceeds into the privateGetForClass case which
correctly detects that it is a POJO, and correctly returns a PojoTypeInfo; but in
the meantime in line 1191, privateGetForClass adds PojoTypeInfo to \"alreadySeen\".
Then the outer createTypeInfoWithTypeHierarchy approaches the second field, goes
into privateGetForClass, which mistakenly returns a GenericTypeInfo, as it thinks
in line 1187, that a recursive type is being processed.\n\n(Note, that if we comment
out the recursive type detection (the lines that do their thing with the alreadySeen
field), then the output is correct.)"
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
index 41644f9..1ae8d3d 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
@@ -26,9 +26,7 @@ import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.lang.reflect.TypeVariable;
import java.util.ArrayList;
-import java.util.HashSet;
import java.util.List;
-import java.util.Set;
import org.apache.avro.specific.SpecificRecordBase;
import org.apache.flink.api.common.functions.CoGroupFunction;
@@ -66,15 +64,33 @@ import com.google.common.base.Preconditions;
* functions.
*/
public class TypeExtractor {
+
+ /*
+ * NOTE: Most methods of the TypeExtractor work with a so-called "typeHierarchy".
+ * The type hierarchy describes all types (Classes, ParameterizedTypes, TypeVariables etc. ) and intermediate
+ * types from a given type of a function or type (e.g. MyMapper, Tuple2) until a current type
+ * (depends on the method, e.g. MyPojoFieldType).
+ *
+ * Thus, it fully qualifies types until tuple/POJO field level.
+ *
+ * A typical typeHierarchy could look like:
+ *
+ * UDF: MyMapFunction.class
+ * top-level UDF: MyMapFunctionBase.class
+ * RichMapFunction: RichMapFunction.class
+ * MapFunction: MapFunction.class
+ * Function's OUT: Tuple1<MyPojo>
+ * user-defined POJO: MyPojo.class
+ * user-defined top-level POJO: MyPojoBase.class
+ * POJO field: Tuple1<String>
+ * Field type: String.class
+ *
+ */
private static final Logger LOG = LoggerFactory.getLogger(TypeExtractor.class);
- // We need this to detect recursive types and not get caught
- // in an endless recursion
- private Set<Class<?>> alreadySeen;
-
protected TypeExtractor() {
- alreadySeen = new HashSet<Class<?>>();
+ // only create instances for special use cases
}
// --------------------------------------------------------------------------------------------
@@ -416,10 +432,12 @@ public class TypeExtractor {
TypeInformation<?>[] tupleSubTypes = new TypeInformation<?>[subtypes.length];
for (int i = 0; i < subtypes.length; i++) {
+ ArrayList<Type> subTypeHierarchy = new ArrayList<Type>(typeHierarchy);
+ subTypeHierarchy.add(subtypes[i]);
// sub type could not be determined with materializing
// try to derive the type info of the TypeVariable from the immediate base child input as a last attempt
if (subtypes[i] instanceof TypeVariable<?>) {
- tupleSubTypes[i] = createTypeInfoFromInputs((TypeVariable<?>) subtypes[i], typeHierarchy, in1Type, in2Type);
+ tupleSubTypes[i] = createTypeInfoFromInputs((TypeVariable<?>) subtypes[i], subTypeHierarchy, in1Type, in2Type);
// variable could not be determined
if (tupleSubTypes[i] == null) {
@@ -430,7 +448,7 @@ public class TypeExtractor {
+ "all variables in the return type can be deduced from the input type(s).");
}
} else {
- tupleSubTypes[i] = createTypeInfoWithTypeHierarchy(new ArrayList<Type>(typeHierarchy), subtypes[i], in1Type, in2Type);
+ tupleSubTypes[i] = createTypeInfoWithTypeHierarchy(subTypeHierarchy, subtypes[i], in1Type, in2Type);
}
}
@@ -912,6 +930,19 @@ public class TypeExtractor {
// --------------------------------------------------------------------------------------------
// Utility methods
// --------------------------------------------------------------------------------------------
+
+ /**
+ * @return number of items with equal type or same raw type
+ */
+ private static int countTypeInHierarchy(ArrayList<Type> typeHierarchy, Type type) {
+ int count = 0;
+ for (Type t : typeHierarchy) {
+ if (t == type || (isClassType(type) && t == typeToClass(type))) {
+ count++;
+ }
+ }
+ return count;
+ }
/**
* @param curT : start type
@@ -1183,12 +1214,10 @@ public class TypeExtractor {
return (TypeInformation<OUT>) new AvroTypeInfo(clazz);
}
- if (alreadySeen.contains(clazz)) {
+ if (countTypeInHierarchy(typeHierarchy, clazz) > 1) {
return new GenericTypeInfo<OUT>(clazz);
}
- alreadySeen.add(clazz);
-
if (Modifier.isInterface(clazz.getModifiers())) {
// Interface has no members and is therefore not handled as POJO
return new GenericTypeInfo<OUT>(clazz);
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2447_5546a1ef.diff |
bugs-dot-jar_data_FLINK-1978_0078c44e | ---
BugID: FLINK-1978
Summary: POJO serialization NPE
Description: "NullPointer on serialization of a Date field:\n\nCaused by: java.lang.RuntimeException:
Error obtaining the sorted input: Thread 'SortMerger Reading Thread' terminated
due to an exception: null\n\tat org.apache.flink.runtime.operators.sort.UnilateralSortMerger.getIterator(UnilateralSortMerger.java:607)\n\tat
org.apache.flink.runtime.operators.RegularPactTask.getInput(RegularPactTask.java:1132)\n\tat
org.apache.flink.runtime.operators.CoGroupDriver.prepare(CoGroupDriver.java:98)\n\tat
org.apache.flink.runtime.operators.RegularPactTask.run(RegularPactTask.java:464)\n\t...
3 more\nCaused by: java.io.IOException: Thread 'SortMerger Reading Thread' terminated
due to an exception: null\n\tat org.apache.flink.runtime.operators.sort.UnilateralSortMerger$ThreadBase.run(UnilateralSortMerger.java:784)\nCaused
by: java.lang.NullPointerException\n\tat org.apache.flink.api.common.typeutils.base.DateSerializer.deserialize(DateSerializer.java:72)\n\tat
org.apache.flink.api.common.typeutils.base.DateSerializer.deserialize(DateSerializer.java:1)\n\tat
org.apache.flink.api.java.typeutils.runtime.PojoSerializer.deserialize(PojoSerializer.java:487)\n\tat
org.apache.flink.api.java.typeutils.runtime.TupleSerializer.deserialize(TupleSerializer.java:136)\n\tat
org.apache.flink.api.java.typeutils.runtime.TupleSerializer.deserialize(TupleSerializer.java:30)\n\tat
org.apache.flink.runtime.plugable.ReusingDeserializationDelegate.read(ReusingDeserializationDelegate.java:57)\n\tat
org.apache.flink.runtime.io.network.api.serialization.SpillingAdaptiveSpanningRecordDeserializer.getNextRecord(SpillingAdaptiveSpanningRecordDeserializer.java:111)\n\tat
org.apache.flink.runtime.io.network.api.reader.AbstractRecordReader.getNextRecord(AbstractRecordReader.java:64)\n\tat
org.apache.flink.runtime.io.network.api.reader.MutableRecordReader.next(MutableRecordReader.java:34)\n\tat
org.apache.flink.runtime.operators.util.ReaderIterator.next(ReaderIterator.java:59)\n\tat
org.apache.flink.runtime.operators.sort.UnilateralSortMerger$ReadingThread.go(UnilateralSortMerger.java:958)\n\tat
org.apache.flink.runtime.operators.sort.UnilateralSortMerger$ThreadBase.run(UnilateralSortMerger.java:781)"
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializer.java b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializer.java
index b81ab67..c61ad8d 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializer.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializer.java
@@ -293,7 +293,14 @@ public final class PojoSerializer<T> extends TypeSerializer<T> {
for (int i = 0; i < numFields; i++) {
Object value = fields[i].get(from);
if (value != null) {
- Object copy = fieldSerializers[i].copy(fields[i].get(from), fields[i].get(reuse));
+ Object reuseValue = fields[i].get(reuse);
+ Object copy;
+ if(reuseValue != null) {
+ copy = fieldSerializers[i].copy(value, reuseValue);
+ }
+ else {
+ copy = fieldSerializers[i].copy(value);
+ }
fields[i].set(reuse, copy);
}
else {
@@ -484,7 +491,15 @@ public final class PojoSerializer<T> extends TypeSerializer<T> {
if (isNull) {
fields[i].set(reuse, null);
} else {
- Object field = fieldSerializers[i].deserialize(fields[i].get(reuse), source);
+ Object field;
+
+ Object reuseField = fields[i].get(reuse);
+ if(reuseField != null) {
+ field = fieldSerializers[i].deserialize(reuseField, source);
+ }
+ else {
+ field = fieldSerializers[i].deserialize(source);
+ }
fields[i].set(reuse, field);
}
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1978_0078c44e.diff |
bugs-dot-jar_data_FLINK-2294_fef9f115 | ---
BugID: FLINK-2294
Summary: Keyed State does not work with DOP=1
Description: |-
When changing the DOP from 3 to 1 in StatefulOperatorTest.apiTest() the test fails. The reason seems to be that the element is not properly set when chaining is happening.
Also, requiring this:
{code}
headContext.setNextInput(nextRecord);
streamOperator.processElement(nextRecord);
{code}
to be called seems rather fragile. Why not set the element in {{processElement()}}. This would also make for cleaner encapsulation, since now all outside code must assume that operators have a {{StreamingRuntimeContext}} on which they set the next element.
The state/keyed state machinery seems dangerously undertested.
diff --git a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/tasks/OutputHandler.java b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/tasks/OutputHandler.java
index 2d2f29b..73f0a89 100644
--- a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/tasks/OutputHandler.java
+++ b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/tasks/OutputHandler.java
@@ -274,6 +274,7 @@ public class OutputHandler<OUT> {
@Override
public void collect(T record) {
try {
+ operator.getRuntimeContext().setNextInput(record);
operator.processElement(serializer.copy(record));
} catch (Exception e) {
if (LOG.isErrorEnabled()) {
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2294_fef9f115.diff |
bugs-dot-jar_data_FLINK-1471_d033fa8f | ---
BugID: FLINK-1471
Summary: Allow KeySelectors to implement ResultTypeQueryable
Description: See https://github.com/apache/flink/pull/354
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
index 124055c..c99a80f 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
@@ -680,10 +680,20 @@ public class TypeExtractor {
}
}
- private static void validateInputType(Class<?> baseClass, Class<?> clazz, int inputParamPos, TypeInformation<?> inType) {
+ private static void validateInputType(Class<?> baseClass, Class<?> clazz, int inputParamPos, TypeInformation<?> inTypeInfo) {
ArrayList<Type> typeHierarchy = new ArrayList<Type>();
+
+ // try to get generic parameter
+ Type inType;
+ try {
+ inType = getParameterType(baseClass, typeHierarchy, clazz, inputParamPos);
+ }
+ catch (IllegalArgumentException e) {
+ return; // skip input validation e.g. for raw types
+ }
+
try {
- validateInfo(typeHierarchy, getParameterType(baseClass, typeHierarchy, clazz, inputParamPos), inType);
+ validateInfo(typeHierarchy, inType, inTypeInfo);
}
catch(InvalidTypesException e) {
throw new InvalidTypesException("Input mismatch: " + e.getMessage());
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1471_d033fa8f.diff |
bugs-dot-jar_data_FLINK-1930_4dbf030a | ---
BugID: FLINK-1930
Summary: NullPointerException in vertex-centric iteration
Description: "Hello to my Squirrels,\n\nI came across this exception when having a
vertex-centric iteration output followed by a group by. \nI'm not sure if what is
causing it, since I saw this error in a rather large pipeline, but I managed to
reproduce it with [this code example | https://github.com/vasia/flink/commit/1b7bbca1a6130fbcfe98b4b9b43967eb4c61f309]
and a sufficiently large dataset, e.g. [this one | http://snap.stanford.edu/data/com-DBLP.html]
(I'm running this locally).\nIt seems like a null Buffer in RecordWriter.\n\nThe
exception message is the following:\n\nException in thread \"main\" org.apache.flink.runtime.client.JobExecutionException:
Job execution failed.\nat org.apache.flink.runtime.jobmanager.JobManager$anonfun$receiveWithLogMessages$1.applyOrElse(JobManager.scala:319)\nat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply$mcVL$sp(AbstractPartialFunction.scala:33)\nat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:33)\nat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:25)\nat
org.apache.flink.runtime.ActorLogMessages$anon$1.apply(ActorLogMessages.scala:37)\nat
org.apache.flink.runtime.ActorLogMessages$anon$1.apply(ActorLogMessages.scala:30)\nat
scala.PartialFunction$class.applyOrElse(PartialFunction.scala:118)\nat org.apache.flink.runtime.ActorLogMessages$anon$1.applyOrElse(ActorLogMessages.scala:30)\nat
akka.actor.Actor$class.aroundReceive(Actor.scala:465)\nat org.apache.flink.runtime.jobmanager.JobManager.aroundReceive(JobManager.scala:94)\nat
akka.actor.ActorCell.receiveMessage(ActorCell.scala:516)\nat akka.actor.ActorCell.invoke(ActorCell.scala:487)\nat
akka.dispatch.Mailbox.processMailbox(Mailbox.scala:254)\nat akka.dispatch.Mailbox.run(Mailbox.scala:221)\nat
akka.dispatch.Mailbox.exec(Mailbox.scala:231)\nat scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)\nat
scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)\nat
scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)\nat scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)\nCaused
by: java.lang.NullPointerException\nat org.apache.flink.runtime.io.network.api.serialization.SpanningRecordSerializer.setNextBuffer(SpanningRecordSerializer.java:93)\nat
org.apache.flink.runtime.io.network.api.writer.RecordWriter.emit(RecordWriter.java:92)\nat
org.apache.flink.runtime.operators.shipping.OutputCollector.collect(OutputCollector.java:65)\nat
org.apache.flink.runtime.iterative.task.IterationHeadPactTask.streamSolutionSetToFinalOutput(IterationHeadPactTask.java:405)\nat
org.apache.flink.runtime.iterative.task.IterationHeadPactTask.run(IterationHeadPactTask.java:365)\nat
org.apache.flink.runtime.operators.RegularPactTask.invoke(RegularPactTask.java:360)\nat
org.apache.flink.runtime.execution.RuntimeEnvironment.run(RuntimeEnvironment.java:221)\nat
java.lang.Thread.run(Thread.java:745)"
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/buffer/LocalBufferPool.java b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/buffer/LocalBufferPool.java
index 1da2b8b..4cb1521 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/buffer/LocalBufferPool.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/io/network/buffer/LocalBufferPool.java
@@ -141,7 +141,7 @@ class LocalBufferPool implements BufferPool {
while (availableMemorySegments.isEmpty()) {
if (isDestroyed) {
- return null;
+ throw new IllegalStateException("Buffer pool is destroyed.");
}
if (numberOfRequestedMemorySegments < currentPoolSize) {
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1930_4dbf030a.diff |
bugs-dot-jar_data_FLINK-3762_dc78a747 | ---
BugID: FLINK-3762
Summary: " Kryo StackOverflowError due to disabled Kryo Reference tracking"
Description: "As discussed on the dev list,\n\nIn {{KryoSerializer.java}}\n\nKryo
Reference tracking is disabled by default:\n\n{code}\n kryo.setReferences(false);\n{code}\n\nThis
can causes {{StackOverflowError}} Exceptions when serializing many objects that
may contain recursive objects:\n\n{code}\njava.lang.StackOverflowError\n\tat com.esotericsoftware.kryo.serializers.ObjectField.write(ObjectField.java:48)\n\tat
com.esotericsoftware.kryo.serializers.FieldSerializer.write(FieldSerializer.java:495)\n\tat
com.esotericsoftware.kryo.Kryo.writeObject(Kryo.java:523)\n\tat com.esotericsoftware.kryo.serializers.ObjectField.write(ObjectField.java:61)\n\tat
com.esotericsoftware.kryo.serializers.FieldSerializer.write(FieldSerializer.java:495)\n\tat
com.esotericsoftware.kryo.Kryo.writeObject(Kryo.java:523)\n\tat com.esotericsoftware.kryo.serializers.ObjectField.write(ObjectField.java:61)\n\tat
com.esotericsoftware.kryo.serializers.FieldSerializer.write(FieldSerializer.java:495)\n\tat
com.esotericsoftware.kryo.Kryo.writeObject(Kryo.java:523)\n\tat com.esotericsoftware.kryo.serializers.ObjectField.write(ObjectField.java:61)\n\tat
com.esotericsoftware.kryo.serializers.FieldSerializer.write(FieldSerializer.java:495)\n{code}\n\nBy
enabling reference tracking, we can fix this problem.\n\n[1]https://gist.github.com/andrewpalumbo/40c7422a5187a24cd03d7d81feb2a419\n "
diff --git a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/KryoSerializer.java b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/KryoSerializer.java
index d5c2f67..e74e251 100644
--- a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/KryoSerializer.java
+++ b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/kryo/KryoSerializer.java
@@ -327,9 +327,8 @@ public class KryoSerializer<T> extends TypeSerializer<T> {
if (this.kryo == null) {
this.kryo = getKryoInstance();
- // disable reference tracking. reference tracking is costly, usually unnecessary, and
- // inconsistent with Flink's own serialization (which does not do reference tracking)
- kryo.setReferences(false);
+ // Enable reference tracking.
+ kryo.setReferences(true);
// Throwable and all subclasses should be serialized via java serialization
kryo.addDefaultSerializer(Throwable.class, new JavaSerializer());
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-3762_dc78a747.diff |
bugs-dot-jar_data_FLINK-2437_a41bc8cc | ---
BugID: FLINK-2437
Summary: TypeExtractor.analyzePojo has some problems around the default constructor
detection
Description: |+
If a class does have a default constructor, but the user forgot to make it public, then TypeExtractor.analyzePojo still thinks everything is OK, so it creates a PojoTypeInfo. Then PojoSerializer.createInstance blows up.
Furthermore, a "return null" seems to be missing from the then case of the if after catching the NoSuchMethodException which would also cause a headache for PojoSerializer.
An additional minor issue is that the word "class" is printed twice in several places, because class.toString also prepends it to the class name.
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
index 1ae8d3d..2e45107 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
@@ -18,6 +18,7 @@
package org.apache.flink.api.java.typeutils;
+import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.lang.reflect.GenericArrayType;
import java.lang.reflect.Method;
@@ -1298,10 +1299,10 @@ public class TypeExtractor {
return true;
} else {
if(!hasGetter) {
- LOG.debug("Class "+clazz+" does not contain a getter for field "+f.getName() );
+ LOG.debug(clazz+" does not contain a getter for field "+f.getName() );
}
if(!hasSetter) {
- LOG.debug("Class "+clazz+" does not contain a setter for field "+f.getName() );
+ LOG.debug(clazz+" does not contain a setter for field "+f.getName() );
}
return false;
}
@@ -1323,7 +1324,7 @@ public class TypeExtractor {
List<Field> fields = getAllDeclaredFields(clazz);
if(fields.size() == 0) {
- LOG.info("No fields detected for class " + clazz + ". Cannot be used as a PojoType. Will be handled as GenericType");
+ LOG.info("No fields detected for " + clazz + ". Cannot be used as a PojoType. Will be handled as GenericType");
return new GenericTypeInfo<OUT>(clazz);
}
@@ -1331,7 +1332,7 @@ public class TypeExtractor {
for (Field field : fields) {
Type fieldType = field.getGenericType();
if(!isValidPojoField(field, clazz, typeHierarchy)) {
- LOG.info("Class " + clazz + " is not a valid POJO type");
+ LOG.info(clazz + " is not a valid POJO type");
return null;
}
try {
@@ -1357,24 +1358,29 @@ public class TypeExtractor {
List<Method> methods = getAllDeclaredMethods(clazz);
for (Method method : methods) {
if (method.getName().equals("readObject") || method.getName().equals("writeObject")) {
- LOG.info("Class "+clazz+" contains custom serialization methods we do not call.");
+ LOG.info(clazz+" contains custom serialization methods we do not call.");
return null;
}
}
// Try retrieving the default constructor, if it does not have one
// we cannot use this because the serializer uses it.
+ Constructor defaultConstructor = null;
try {
- clazz.getDeclaredConstructor();
+ defaultConstructor = clazz.getDeclaredConstructor();
} catch (NoSuchMethodException e) {
if (clazz.isInterface() || Modifier.isAbstract(clazz.getModifiers())) {
- LOG.info("Class " + clazz + " is abstract or an interface, having a concrete " +
+ LOG.info(clazz + " is abstract or an interface, having a concrete " +
"type can increase performance.");
} else {
- LOG.info("Class " + clazz + " must have a default constructor to be used as a POJO.");
+ LOG.info(clazz + " must have a default constructor to be used as a POJO.");
return null;
}
}
+ if(defaultConstructor != null && !Modifier.isPublic(defaultConstructor.getModifiers())) {
+ LOG.info("The default constructor of " + clazz + " should be Public to be used as a POJO.");
+ return null;
+ }
// everything is checked, we return the pojo
return pojoType;
@@ -1394,7 +1400,7 @@ public class TypeExtractor {
continue; // we have no use for transient or static fields
}
if(hasFieldWithSameName(field.getName(), result)) {
- throw new RuntimeException("The field "+field+" is already contained in the hierarchy of the class "+clazz+"."
+ throw new RuntimeException("The field "+field+" is already contained in the hierarchy of the "+clazz+"."
+ "Please use unique field names through your classes hierarchy");
}
result.add(field);
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2437_a41bc8cc.diff |
bugs-dot-jar_data_FLINK-2121_03340919 | ---
BugID: FLINK-2121
Summary: FileInputFormat.addFilesInDir miscalculates total size
Description: |-
In FileInputFormat.addFilesInDir, the length variable should start from 0, because the return value is always used by adding it to the length (instead of just assigning). So with the current version, the length before the call will be seen twice in the result.
mvn verify caught this for me now. The reason why this hasn't been seen yet, is because testGetStatisticsMultipleNestedFiles catches this only if it gets the listings of the outer directory in a certain order. Concretely, if the inner directory is seen before the other file in the outer directory, then length is 0 at that point, so the bug doesn't show. But if the other file is seen first, then its size is added twice to the total result.
diff --git a/flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java b/flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java
index 37739f5..a8f334b 100644
--- a/flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java
+++ b/flink-core/src/main/java/org/apache/flink/api/common/io/FileInputFormat.java
@@ -330,7 +330,7 @@ public abstract class FileInputFormat<OT> implements InputFormat<OT, FileInputSp
// enumerate all files
if (file.isDir()) {
- totalLength += addFilesInDir(file.getPath(), files, totalLength, false);
+ totalLength += addFilesInDir(file.getPath(), files, false);
} else {
files.add(file);
testForUnsplittable(file);
@@ -390,7 +390,7 @@ public abstract class FileInputFormat<OT> implements InputFormat<OT, FileInputSp
final FileStatus pathFile = fs.getFileStatus(path);
if (pathFile.isDir()) {
- totalLength += addFilesInDir(path, files, totalLength, true);
+ totalLength += addFilesInDir(path, files, true);
} else {
testForUnsplittable(pathFile);
@@ -497,14 +497,16 @@ public abstract class FileInputFormat<OT> implements InputFormat<OT, FileInputSp
* Enumerate all files in the directory and recursive if enumerateNestedFiles is true.
* @return the total length of accepted files.
*/
- private long addFilesInDir(Path path, List<FileStatus> files, long length, boolean logExcludedFiles)
+ private long addFilesInDir(Path path, List<FileStatus> files, boolean logExcludedFiles)
throws IOException {
final FileSystem fs = path.getFileSystem();
+ long length = 0;
+
for(FileStatus dir: fs.listStatus(path)) {
if (dir.isDir()) {
if (acceptFile(dir) && enumerateNestedFiles) {
- length += addFilesInDir(dir.getPath(), files, length, logExcludedFiles);
+ length += addFilesInDir(dir.getPath(), files, logExcludedFiles);
} else {
if (logExcludedFiles && LOG.isDebugEnabled()) {
LOG.debug("Directory "+dir.getPath().toString()+" did not pass the file-filter and is excluded.");
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2121_03340919.diff |
bugs-dot-jar_data_FLINK-3256_44061882 | ---
BugID: FLINK-3256
Summary: Invalid execution graph cleanup for jobs with colocation groups
Description: |-
Currently, upon restarting an execution graph, we clean-up the colocation constraints for each group present in an ExecutionJobVertex respectively.
This can lead to invalid reconfiguration upon a restart or any other activity that relies on state cleanup of the execution graph. For example, upon restarting a DataStream job with iterations the following steps are executed:
1) IterationSource colgroup constraints are reset
2) IterationSource execution vertices reset and create new colocation constraints
3) IterationSink colgroup constraints are reset
4) IterationSink execution vertices reset and create different colocation constraints.
This can be trivially fixed by reseting colocation groups independently from ExecutionJobVertices, thus, updating them once per reconfiguration.
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionGraph.java b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionGraph.java
index 9085483..a03f0bf 100755
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionGraph.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionGraph.java
@@ -51,6 +51,7 @@ import org.apache.flink.runtime.jobgraph.JobStatus;
import org.apache.flink.runtime.jobgraph.JobVertexID;
import org.apache.flink.runtime.jobgraph.ScheduleMode;
import org.apache.flink.runtime.jobmanager.RecoveryMode;
+import org.apache.flink.runtime.jobmanager.scheduler.CoLocationGroup;
import org.apache.flink.runtime.jobmanager.scheduler.Scheduler;
import org.apache.flink.runtime.messages.ExecutionGraphMessages;
import org.apache.flink.runtime.taskmanager.TaskExecutionState;
@@ -76,6 +77,8 @@ import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
+import java.util.Collection;
+import java.util.HashSet;
import java.util.NoSuchElementException;
import java.util.UUID;
import java.util.concurrent.Callable;
@@ -147,7 +150,7 @@ public class ExecutionGraph implements Serializable {
/** All vertices, in the order in which they were created **/
private final List<ExecutionJobVertex> verticesInCreationOrder;
-
+
/** All intermediate results that are part of this graph */
private final ConcurrentHashMap<IntermediateDataSetID, IntermediateResult> intermediateResults;
@@ -719,7 +722,7 @@ public class ExecutionGraph implements Serializable {
res.getId(), res, previousDataSet));
}
}
-
+
this.verticesInCreationOrder.add(ejv);
}
}
@@ -849,7 +852,16 @@ public class ExecutionGraph implements Serializable {
this.currentExecutions.clear();
+ Collection<CoLocationGroup> colGroups = new HashSet<>();
+
for (ExecutionJobVertex jv : this.verticesInCreationOrder) {
+
+ CoLocationGroup cgroup = jv.getCoLocationGroup();
+ if(cgroup != null && !colGroups.contains(cgroup)){
+ cgroup.resetConstraints();
+ colGroups.add(cgroup);
+ }
+
jv.resetForNewExecution();
}
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionJobVertex.java b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionJobVertex.java
index 93ae7c1..bc368ab 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionJobVertex.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionJobVertex.java
@@ -362,9 +362,6 @@ public class ExecutionJobVertex implements Serializable {
if (slotSharingGroup != null) {
slotSharingGroup.clearTaskAssignment();
}
- if (coLocationGroup != null) {
- coLocationGroup.resetConstraints();
- }
// reset vertices one by one. if one reset fails, the "vertices in final state"
// fields will be consistent to handle triggered cancel calls
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-3256_44061882.diff |
bugs-dot-jar_data_FLINK-1145_22c370d9 | ---
BugID: FLINK-1145
Summary: POJO Type extractor bug with type variables
Description: "The following program incorrectly states that there are duplicate getters/setters.\n\n{code}\n\tpublic
static class Vertex<K, V> {\n\t\t\n\t\tprivate K key1;\n\t\tprivate K key2;\n\t\tprivate
V value;\n\t\t\n\t\tpublic Vertex() {}\n\t\t\n\t\tpublic Vertex(K key, V value)
{\n\t\t\tthis.key1 = key;\n\t\t\tthis.key2 = key;\n\t\t\tthis.value = value;\n\t\t}\n\t\t\n\t\tpublic
Vertex(K key1, K key2, V value) {\n\t\t\tthis.key1 = key1;\n\t\t\tthis.key2 = key2;\n\t\t\tthis.value
= value;\n\t\t}\n\n\t\tpublic void setKey1(K key1) {\n\t\t\tthis.key1 = key1;\n\t\t}\n\t\t\n\t\tpublic
void setKey2(K key2) {\n\t\t\tthis.key2 = key2;\n\t\t}\n\t\t\n\t\tpublic K getKey1()
{\n\t\t\treturn key1;\n\t\t}\n\t\t\n\t\tpublic K getKey2() {\n\t\t\treturn key2;\n\t\t}\n\t\t\n\t\tpublic
void setValue(V value) {\n\t\t\tthis.value = value;\n\t\t}\n\t\t\n\t\tpublic V getValue()
{\n\t\t\treturn value;\n\t\t}\n\t}\n\t\n\tpublic static void main(String[] args)
throws Exception {\n\t\tExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();\n\t\t\n\t\tDataSet<Vertex<Long,
Double>> set = env.fromElements(new Vertex<Long, Double>(0L, 3.0), new Vertex<Long,
Double>(1L, 1.0));\n\t\t\n\t\tset.print();\n\t\t\n\t\tenv.execute();\n\t}\n{code}\n\nThe
exception is\n{code}\nException in thread \"main\" java.lang.IllegalStateException:
Detected more than one getters\n\tat org.apache.flink.api.java.typeutils.TypeExtractor.isValidPojoField(TypeExtractor.java:981)\n\tat
org.apache.flink.api.java.typeutils.TypeExtractor.analyzePojo(TypeExtractor.java:1025)\n\tat
org.apache.flink.api.java.typeutils.TypeExtractor.privateGetForClass(TypeExtractor.java:937)\n\tat
org.apache.flink.api.java.typeutils.TypeExtractor.privateGetForClass(TypeExtractor.java:863)\n\tat
org.apache.flink.api.java.typeutils.TypeExtractor.privateGetForObject(TypeExtractor.java:1146)\n\tat
org.apache.flink.api.java.typeutils.TypeExtractor.getForObject(TypeExtractor.java:1116)\n\tat
org.apache.flink.api.java.ExecutionEnvironment.fromElements(ExecutionEnvironment.java:466)\n\tat
test.Test.main(Test.java:74)\n\n{code}"
diff --git a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
index 7836e74..6a56e46 100644
--- a/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
+++ b/flink-java/src/main/java/org/apache/flink/api/java/typeutils/TypeExtractor.java
@@ -969,16 +969,15 @@ public class TypeExtractor {
}
for(Method m : clazz.getMethods()) {
// check for getter
-
if( // The name should be "get<FieldName>" or "<fieldName>" (for scala).
(m.getName().toLowerCase().equals("get"+fieldNameLow) || m.getName().toLowerCase().equals(fieldNameLow)) &&
// no arguments for the getter
m.getParameterTypes().length == 0 &&
// return type is same as field type (or the generic variant of it)
- m.getReturnType().equals( fieldType ) || (fieldTypeGeneric != null && m.getGenericReturnType().equals(fieldTypeGeneric) )
+ (m.getReturnType().equals( fieldType ) || (fieldTypeGeneric != null && m.getGenericReturnType().equals(fieldTypeGeneric)) )
) {
if(hasGetter) {
- throw new IllegalStateException("Detected more than one getters");
+ throw new IllegalStateException("Detected more than one getter");
}
hasGetter = true;
}
@@ -990,7 +989,7 @@ public class TypeExtractor {
m.getReturnType().equals(Void.TYPE)
) {
if(hasSetter) {
- throw new IllegalStateException("Detected more than one getters");
+ throw new IllegalStateException("Detected more than one setter");
}
hasSetter = true;
}
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1145_22c370d9.diff |
bugs-dot-jar_data_FLINK-2713_63d9800e | ---
BugID: FLINK-2713
Summary: Custom StateCheckpointers should be included in the snapshots
Description: |-
Currently the restoreInitialState call fails when the user uses a custom StateCheckpointer to create the snapshot, because the state is restored before the StateCheckpointer is set for the StreamOperatorState. (because the restoreInitialState() call precedes the open() call)
To avoid this issue, the custom StateCheckpointer instance should be stored within the snapshot and should be set in the StreamOperatorState before calling restoreState(..).
To reduce the overhead induced by this we can do 2 optimizations:
- We only include custom StateCheckpointers (the default java serializer one is always available)
- We only serialize the checkpointer once and store the byte array in the snapshot
diff --git a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/state/PartitionedStreamOperatorState.java b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/state/PartitionedStreamOperatorState.java
index 115a97c..408a0f0 100644
--- a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/state/PartitionedStreamOperatorState.java
+++ b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/state/PartitionedStreamOperatorState.java
@@ -55,6 +55,8 @@ public class PartitionedStreamOperatorState<IN, S, C extends Serializable> exten
private IN currentInput;
private ClassLoader cl;
+ private boolean restored = true;
+ private StateHandle<Serializable> checkpoint = null;
public PartitionedStreamOperatorState(StateCheckpointer<S, C> checkpointer,
StateHandleProvider<C> provider, KeySelector<IN, Serializable> keySelector, ClassLoader cl) {
@@ -76,6 +78,10 @@ public class PartitionedStreamOperatorState<IN, S, C extends Serializable> exten
if (currentInput == null) {
throw new IllegalStateException("Need a valid input for accessing the state.");
} else {
+ if (!restored) {
+ // If the state is not restored yet, restore now
+ restoreWithCheckpointer();
+ }
Serializable key;
try {
key = keySelector.getKey(currentInput);
@@ -100,6 +106,10 @@ public class PartitionedStreamOperatorState<IN, S, C extends Serializable> exten
if (currentInput == null) {
throw new IllegalStateException("Need a valid input for updating a state.");
} else {
+ if (!restored) {
+ // If the state is not restored yet, restore now
+ restoreWithCheckpointer();
+ }
Serializable key;
try {
key = keySelector.getKey(currentInput);
@@ -131,18 +141,38 @@ public class PartitionedStreamOperatorState<IN, S, C extends Serializable> exten
@Override
public StateHandle<Serializable> snapshotState(long checkpointId, long checkpointTimestamp) throws Exception {
- return stateStore.snapshotStates(checkpointId, checkpointTimestamp);
+ // If the state is restored we take a snapshot, otherwise return the last checkpoint
+ return restored ? stateStore.snapshotStates(checkpointId, checkpointTimestamp) : provider
+ .createStateHandle(checkpoint.getState(cl));
}
-
+
@Override
- public void restoreState(StateHandle<Serializable> snapshots, ClassLoader userCodeClassLoader) throws Exception {
- stateStore.restoreStates(snapshots, userCodeClassLoader);
+ public void restoreState(StateHandle<Serializable> snapshot, ClassLoader userCodeClassLoader) throws Exception {
+ // We store the snapshot for lazy restore
+ checkpoint = snapshot;
+ restored = false;
+ }
+
+ private void restoreWithCheckpointer() throws IOException {
+ try {
+ stateStore.restoreStates(checkpoint, cl);
+ } catch (Exception e) {
+ throw new IOException(e);
+ }
+ restored = true;
+ checkpoint = null;
}
@Override
public Map<Serializable, S> getPartitionedState() throws Exception {
return stateStore.getPartitionedState();
}
+
+ @Override
+ public void setCheckpointer(StateCheckpointer<S, C> checkpointer) {
+ super.setCheckpointer(checkpointer);
+ stateStore.setCheckPointer(checkpointer);
+ }
@Override
public String toString() {
diff --git a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/state/StreamOperatorState.java b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/state/StreamOperatorState.java
index 29a19b5..c33b94e 100644
--- a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/state/StreamOperatorState.java
+++ b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/api/state/StreamOperatorState.java
@@ -44,7 +44,10 @@ public class StreamOperatorState<S, C extends Serializable> implements OperatorS
private S state;
protected StateCheckpointer<S, C> checkpointer;
- private final StateHandleProvider<Serializable> provider;
+ protected final StateHandleProvider<Serializable> provider;
+
+ private boolean restored = true;
+ private Serializable checkpoint = null;
@SuppressWarnings("unchecked")
public StreamOperatorState(StateCheckpointer<S, C> checkpointer, StateHandleProvider<C> provider) {
@@ -59,6 +62,10 @@ public class StreamOperatorState<S, C extends Serializable> implements OperatorS
@Override
public S value() throws IOException {
+ if (!restored) {
+ // If the state is not restore it yet, restore at this point
+ restoreWithCheckpointer();
+ }
return state;
}
@@ -67,6 +74,11 @@ public class StreamOperatorState<S, C extends Serializable> implements OperatorS
if (state == null) {
throw new RuntimeException("Cannot set state to null.");
}
+ if (!restored) {
+ // If the value is updated before the restore it is overwritten
+ restored = true;
+ checkpoint = false;
+ }
this.state = state;
}
@@ -90,14 +102,22 @@ public class StreamOperatorState<S, C extends Serializable> implements OperatorS
public StateHandle<Serializable> snapshotState(long checkpointId, long checkpointTimestamp)
throws Exception {
- return provider.createStateHandle(checkpointer.snapshotState(value(), checkpointId,
- checkpointTimestamp));
-
+ // If the state is restored we take a snapshot, otherwise return the last checkpoint
+ return provider.createStateHandle(restored ? checkpointer.snapshotState(value(), checkpointId,
+ checkpointTimestamp) : checkpoint);
}
- @SuppressWarnings("unchecked")
public void restoreState(StateHandle<Serializable> snapshot, ClassLoader userCodeClassLoader) throws Exception {
- update(checkpointer.restoreState((C) snapshot.getState(userCodeClassLoader)));
+ // We set the checkpoint for lazy restore
+ checkpoint = snapshot.getState(userCodeClassLoader);
+ restored = false;
+ }
+
+ @SuppressWarnings("unchecked")
+ private void restoreWithCheckpointer() throws IOException {
+ update(checkpointer.restoreState((C) checkpoint));
+ restored = true;
+ checkpoint = null;
}
public Map<Serializable, S> getPartitionedState() throws Exception {
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2713_63d9800e.diff |
bugs-dot-jar_data_FLINK-3251_117ba95f | ---
BugID: FLINK-3251
Summary: Checkpoint stats show ghost numbers
Description: "[~StephanEwen] reported an issue with the display of checkpoint stats.
A pipeline with a stateful source and stateless intermediate operator shows stats
for the stateless intermediate operator. The numbers are most likely the same as
for the source operator."
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/stats/SimpleCheckpointStatsTracker.java b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/stats/SimpleCheckpointStatsTracker.java
index 5881f4f..fba3f22 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/stats/SimpleCheckpointStatsTracker.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/stats/SimpleCheckpointStatsTracker.java
@@ -299,31 +299,32 @@ public class SimpleCheckpointStatsTracker implements CheckpointStatsTracker {
long[][] subTaskStats = this.subTaskStats.get(operatorId);
if (subTaskStats == null) {
- throw new IllegalArgumentException("Unknown operator ID.");
+ return Option.empty();
}
+ else {
+ long maxDuration = Long.MIN_VALUE;
+ long stateSize = 0;
- long maxDuration = Long.MIN_VALUE;
- long stateSize = 0;
+ for (long[] subTaskStat : subTaskStats) {
+ if (subTaskStat[0] > maxDuration) {
+ maxDuration = subTaskStat[0];
+ }
- for (long[] subTaskStat : subTaskStats) {
- if (subTaskStat[0] > maxDuration) {
- maxDuration = subTaskStat[0];
+ stateSize += subTaskStat[1];
}
- stateSize += subTaskStat[1];
- }
-
- stats = new OperatorCheckpointStats(
- latestCompletedCheckpoint.getCheckpointID(),
- latestCompletedCheckpoint.getTimestamp(),
- maxDuration,
- stateSize,
- subTaskStats);
+ stats = new OperatorCheckpointStats(
+ latestCompletedCheckpoint.getCheckpointID(),
+ latestCompletedCheckpoint.getTimestamp(),
+ maxDuration,
+ stateSize,
+ subTaskStats);
- // Remember this and don't recompute if requested again
- operatorStatsCache.put(operatorId, stats);
+ // Remember this and don't recompute if requested again
+ operatorStatsCache.put(operatorId, stats);
- return Option.apply(stats);
+ return Option.apply(stats);
+ }
}
else {
return Option.empty();
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-3251_117ba95f.diff |
bugs-dot-jar_data_FLINK-3740_f2f5bd5b | ---
BugID: FLINK-3740
Summary: Session Window State is Not Checkpointed
Description: |-
The merging window state in the {{WindowOperator}} is not checkpointed. This means that programs containing session windows will fail upon restore after a failure.
I propose adding a simulated snapshot/restore cycle to the tests in {{WindowOperatorTest}} to catch these problems in the future.
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/EvictingWindowOperator.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/EvictingWindowOperator.java
index 1e4e453..84ee0b9 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/EvictingWindowOperator.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/EvictingWindowOperator.java
@@ -82,15 +82,11 @@ public class EvictingWindowOperator<K, IN, OUT, W extends Window> extends Window
Collection<W> elementWindows = windowAssigner.assignWindows(element.getValue(),
element.getTimestamp());
- K key = (K) getStateBackend().getCurrentKey();
+ final K key = (K) getStateBackend().getCurrentKey();
if (windowAssigner instanceof MergingWindowAssigner) {
- MergingWindowSet<W> mergingWindows = mergingWindowsByKey.get(getStateBackend().getCurrentKey());
- if (mergingWindows == null) {
- mergingWindows = new MergingWindowSet<>((MergingWindowAssigner<? super IN, W>) windowAssigner);
- mergingWindowsByKey.put(key, mergingWindows);
- }
+ MergingWindowSet<W> mergingWindows = getMergingWindowSet();
for (W window : elementWindows) {
// If there is a merge, it can only result in a window that contains our new
@@ -107,6 +103,7 @@ public class EvictingWindowOperator<K, IN, OUT, W extends Window> extends Window
public void merge(W mergeResult,
Collection<W> mergedWindows, W stateWindowResult,
Collection<W> mergedStateWindows) throws Exception {
+ context.key = key;
context.window = mergeResult;
// store for later use
@@ -141,7 +138,7 @@ public class EvictingWindowOperator<K, IN, OUT, W extends Window> extends Window
TriggerResult combinedTriggerResult = TriggerResult.merge(triggerResult,
mergeTriggerResult.f0);
- processTriggerResult(combinedTriggerResult, key, actualWindow);
+ processTriggerResult(combinedTriggerResult, actualWindow);
}
} else {
@@ -157,14 +154,14 @@ public class EvictingWindowOperator<K, IN, OUT, W extends Window> extends Window
context.window = window;
TriggerResult triggerResult = context.onElement(element);
- processTriggerResult(triggerResult, key, window);
+ processTriggerResult(triggerResult, window);
}
}
}
@Override
@SuppressWarnings("unchecked,rawtypes")
- protected void processTriggerResult(TriggerResult triggerResult, K key, W window) throws Exception {
+ protected void processTriggerResult(TriggerResult triggerResult, W window) throws Exception {
if (!triggerResult.isFire() && !triggerResult.isPurge()) {
// do nothing
return;
@@ -175,7 +172,7 @@ public class EvictingWindowOperator<K, IN, OUT, W extends Window> extends Window
MergingWindowSet<W> mergingWindows = null;
if (windowAssigner instanceof MergingWindowAssigner) {
- mergingWindows = mergingWindowsByKey.get(key);
+ mergingWindows = getMergingWindowSet();
W stateWindow = mergingWindows.getStateWindow(window);
windowState = getPartitionedState(stateWindow, windowSerializer, windowStateDescriptor);
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/MergingWindowSet.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/MergingWindowSet.java
index 7ef1af4..49a2017 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/MergingWindowSet.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/MergingWindowSet.java
@@ -17,6 +17,8 @@
*/
package org.apache.flink.streaming.runtime.operators.windowing;
+import org.apache.flink.api.common.state.ListState;
+import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.windowing.assigners.MergingWindowAssigner;
import org.apache.flink.streaming.api.windowing.windows.Window;
import org.slf4j.Logger;
@@ -73,6 +75,24 @@ public class MergingWindowSet<W extends Window> {
}
/**
+ * Restores a {@link MergingWindowSet} from the given state.
+ */
+ public MergingWindowSet(MergingWindowAssigner<?, W> windowAssigner, ListState<Tuple2<W, W>> state) throws Exception {
+ this.windowAssigner = windowAssigner;
+ windows = new HashMap<>();
+
+ for (Tuple2<W, W> window: state.get()) {
+ windows.put(window.f0, window.f1);
+ }
+ }
+
+ public void persist(ListState<Tuple2<W, W>> state) throws Exception {
+ for (Map.Entry<W, W> window: windows.entrySet()) {
+ state.add(new Tuple2<>(window.getKey(), window.getValue()));
+ }
+ }
+
+ /**
* Returns the state window for the given in-flight {@code Window}. The state window is the
* {@code Window} in which we keep the actual state of a given in-flight window. Windows
* might expand but we keep to original state window for keeping the elements of the window
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperator.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperator.java
index c106e70..919cee7 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperator.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperator.java
@@ -22,6 +22,8 @@ import com.google.common.annotations.VisibleForTesting;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.ExecutionConfig;
import org.apache.flink.api.common.state.AppendingState;
+import org.apache.flink.api.common.state.ListState;
+import org.apache.flink.api.common.state.ListStateDescriptor;
import org.apache.flink.api.common.state.MergingState;
import org.apache.flink.api.common.state.State;
import org.apache.flink.api.common.state.StateDescriptor;
@@ -29,10 +31,13 @@ import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeutils.TypeSerializer;
+import org.apache.flink.api.common.typeutils.base.VoidSerializer;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple1;
+import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.typeutils.InputTypeConfigurable;
import org.apache.flink.api.java.typeutils.TypeExtractor;
+import org.apache.flink.api.java.typeutils.runtime.TupleSerializer;
import org.apache.flink.core.memory.DataInputView;
import org.apache.flink.runtime.state.AbstractStateBackend;
import org.apache.flink.runtime.state.StateHandle;
@@ -224,6 +229,25 @@ public class WindowOperator<K, IN, ACC, OUT, W extends Window>
@Override
public final void close() throws Exception {
super.close();
+ timestampedCollector = null;
+ watermarkTimers = null;
+ watermarkTimersQueue = null;
+ processingTimeTimers = null;
+ processingTimeTimersQueue = null;
+ context = null;
+ mergingWindowsByKey = null;
+ }
+
+ @Override
+ public void dispose() {
+ super.dispose();
+ timestampedCollector = null;
+ watermarkTimers = null;
+ watermarkTimersQueue = null;
+ processingTimeTimers = null;
+ processingTimeTimersQueue = null;
+ context = null;
+ mergingWindowsByKey = null;
}
@Override
@@ -231,15 +255,10 @@ public class WindowOperator<K, IN, ACC, OUT, W extends Window>
public void processElement(StreamRecord<IN> element) throws Exception {
Collection<W> elementWindows = windowAssigner.assignWindows(element.getValue(), element.getTimestamp());
- K key = (K) getStateBackend().getCurrentKey();
+ final K key = (K) getStateBackend().getCurrentKey();
if (windowAssigner instanceof MergingWindowAssigner) {
- MergingWindowSet<W> mergingWindows = mergingWindowsByKey.get(getStateBackend().getCurrentKey());
- if (mergingWindows == null) {
- mergingWindows = new MergingWindowSet<>((MergingWindowAssigner<? super IN, W>) windowAssigner);
- mergingWindowsByKey.put(key, mergingWindows);
- }
-
+ MergingWindowSet<W> mergingWindows = getMergingWindowSet();
for (W window: elementWindows) {
// If there is a merge, it can only result in a window that contains our new
@@ -255,6 +274,7 @@ public class WindowOperator<K, IN, ACC, OUT, W extends Window>
public void merge(W mergeResult,
Collection<W> mergedWindows, W stateWindowResult,
Collection<W> mergedStateWindows) throws Exception {
+ context.key = key;
context.window = mergeResult;
// store for later use
@@ -286,7 +306,7 @@ public class WindowOperator<K, IN, ACC, OUT, W extends Window>
TriggerResult combinedTriggerResult = TriggerResult.merge(triggerResult, mergeTriggerResult.f0);
- processTriggerResult(combinedTriggerResult, key, actualWindow);
+ processTriggerResult(combinedTriggerResult, actualWindow);
}
} else {
@@ -301,13 +321,40 @@ public class WindowOperator<K, IN, ACC, OUT, W extends Window>
context.window = window;
TriggerResult triggerResult = context.onElement(element);
- processTriggerResult(triggerResult, key, window);
+ processTriggerResult(triggerResult, window);
}
}
}
+ /**
+ * Retrieves the {@link MergingWindowSet} for the currently active key. The caller must
+ * ensure that the correct key is set in the state backend.
+ */
+ @SuppressWarnings("unchecked")
+ protected MergingWindowSet<W> getMergingWindowSet() throws Exception {
+ MergingWindowSet<W> mergingWindows = mergingWindowsByKey.get((K) getStateBackend().getCurrentKey());
+ if (mergingWindows == null) {
+ // try to retrieve from state
+
+ TupleSerializer<Tuple2<W, W>> tupleSerializer = new TupleSerializer<>((Class) Tuple2.class, new TypeSerializer[] {windowSerializer, windowSerializer} );
+ ListStateDescriptor<Tuple2<W, W>> mergeStateDescriptor = new ListStateDescriptor<>("merging-window-set", tupleSerializer);
+ ListState<Tuple2<W, W>> mergeState = getStateBackend().getPartitionedState(null, VoidSerializer.INSTANCE, mergeStateDescriptor);
+
+ mergingWindows = new MergingWindowSet<>((MergingWindowAssigner<? super IN, W>) windowAssigner, mergeState);
+ mergeState.clear();
+
+ mergingWindowsByKey.put((K) getStateBackend().getCurrentKey(), mergingWindows);
+ }
+ return mergingWindows;
+ }
+
+
+ /**
+ * Process {@link TriggerResult} for the currently active key and the given window. The caller
+ * must ensure that the correct key is set in the state backend and the context object.
+ */
@SuppressWarnings("unchecked")
- protected void processTriggerResult(TriggerResult triggerResult, K key, W window) throws Exception {
+ protected void processTriggerResult(TriggerResult triggerResult, W window) throws Exception {
if (!triggerResult.isFire() && !triggerResult.isPurge()) {
// do nothing
return;
@@ -318,7 +365,7 @@ public class WindowOperator<K, IN, ACC, OUT, W extends Window>
MergingWindowSet<W> mergingWindows = null;
if (windowAssigner instanceof MergingWindowAssigner) {
- mergingWindows = mergingWindowsByKey.get(key);
+ mergingWindows = getMergingWindowSet();
W stateWindow = mergingWindows.getStateWindow(window);
windowState = getPartitionedState(stateWindow, windowSerializer, windowStateDescriptor);
@@ -366,7 +413,7 @@ public class WindowOperator<K, IN, ACC, OUT, W extends Window>
context.window = timer.window;
setKeyContext(timer.key);
TriggerResult triggerResult = context.onEventTime(timer.timestamp);
- processTriggerResult(triggerResult, context.key, context.window);
+ processTriggerResult(triggerResult, context.window);
} else {
fire = false;
}
@@ -389,7 +436,7 @@ public class WindowOperator<K, IN, ACC, OUT, W extends Window>
context.window = timer.window;
setKeyContext(timer.key);
TriggerResult triggerResult = context.onProcessingTime(timer.timestamp);
- processTriggerResult(triggerResult, context.key, context.window);
+ processTriggerResult(triggerResult, context.window);
} else {
fire = false;
}
@@ -604,7 +651,20 @@ public class WindowOperator<K, IN, ACC, OUT, W extends Window>
// ------------------------------------------------------------------------
@Override
+ @SuppressWarnings("unchecked")
public StreamTaskState snapshotOperatorState(long checkpointId, long timestamp) throws Exception {
+
+ if (mergingWindowsByKey != null) {
+ TupleSerializer<Tuple2<W, W>> tupleSerializer = new TupleSerializer<>((Class) Tuple2.class, new TypeSerializer[] {windowSerializer, windowSerializer} );
+ ListStateDescriptor<Tuple2<W, W>> mergeStateDescriptor = new ListStateDescriptor<>("merging-window-set", tupleSerializer);
+ for (Map.Entry<K, MergingWindowSet<W>> key: mergingWindowsByKey.entrySet()) {
+ setKeyContext(key.getKey());
+ ListState<Tuple2<W, W>> mergeState = getStateBackend().getPartitionedState(null, VoidSerializer.INSTANCE, mergeStateDescriptor);
+ mergeState.clear();
+ key.getValue().persist(mergeState);
+ }
+ }
+
StreamTaskState taskState = super.snapshotOperatorState(checkpointId, timestamp);
AbstractStateBackend.CheckpointStateOutputView out =
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-3740_f2f5bd5b.diff |
bugs-dot-jar_data_FLINK-2082_0cfa43d7 | ---
BugID: FLINK-2082
Summary: Chained stream tasks share the same RuntimeContext
Description: "Chained stream operators currently share the same runtimecontext, this
will certainly lead to problems in the future. \n\nWe should create separate runtime
contexts for each operator in the chain."
diff --git a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/tasks/OutputHandler.java b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/tasks/OutputHandler.java
index c953a94..38f1231 100644
--- a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/tasks/OutputHandler.java
+++ b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/tasks/OutputHandler.java
@@ -150,7 +150,11 @@ public class OutputHandler<OUT> {
// operator which will be returned and set it up using the wrapper
OneInputStreamOperator chainableOperator =
chainedTaskConfig.getStreamOperator(vertex.getUserCodeClassLoader());
- chainableOperator.setup(wrapper, vertex.context);
+
+ StreamingRuntimeContext chainedContext = vertex.createRuntimeContext(chainedTaskConfig);
+ vertex.contexts.add(chainedContext);
+
+ chainableOperator.setup(wrapper, chainedContext);
chainedOperators.add(chainableOperator);
return new OperatorCollector<X>(chainableOperator);
diff --git a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTask.java b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTask.java
index d678922..1a3d44f 100644
--- a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTask.java
+++ b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamTask.java
@@ -62,7 +62,7 @@ public abstract class StreamTask<OUT, O extends StreamOperator<OUT>> extends Abs
protected volatile boolean isRunning = false;
- protected StreamingRuntimeContext context;
+ protected List<StreamingRuntimeContext> contexts;
protected ClassLoader userClassLoader;
@@ -73,21 +73,26 @@ public abstract class StreamTask<OUT, O extends StreamOperator<OUT>> extends Abs
public StreamTask() {
streamOperator = null;
superstepListener = new SuperstepEventListener();
+ contexts = new ArrayList<StreamingRuntimeContext>();
}
@Override
public void registerInputOutput() {
this.userClassLoader = getUserCodeClassLoader();
this.configuration = new StreamConfig(getTaskConfiguration());
- this.context = createRuntimeContext(getEnvironment().getTaskName());
this.stateHandleProvider = getStateHandleProvider();
outputHandler = new OutputHandler<OUT>(this);
streamOperator = configuration.getStreamOperator(userClassLoader);
+
if (streamOperator != null) {
+ //Create context of the head operator
+ StreamingRuntimeContext headContext = createRuntimeContext(configuration);
+ this.contexts.add(headContext);
+
// IterationHead and IterationTail don't have an Operator...
- streamOperator.setup(outputHandler.getOutput(), this.context);
+ streamOperator.setup(outputHandler.getOutput(), headContext);
}
hasChainedOperators = !outputHandler.getChainedOperators().isEmpty();
@@ -97,10 +102,10 @@ public abstract class StreamTask<OUT, O extends StreamOperator<OUT>> extends Abs
return getEnvironment().getTaskName();
}
- public StreamingRuntimeContext createRuntimeContext(String taskName) {
+ public StreamingRuntimeContext createRuntimeContext(StreamConfig conf) {
Environment env = getEnvironment();
- return new StreamingRuntimeContext(taskName, env, getUserCodeClassLoader(),
- getExecutionConfig());
+ return new StreamingRuntimeContext(conf.getStreamOperator(userClassLoader).getClass()
+ .getSimpleName(), env, getUserCodeClassLoader(), getExecutionConfig());
}
private StateHandleProvider<Serializable> getStateHandleProvider() {
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2082_0cfa43d7.diff |
bugs-dot-jar_data_FLINK-2707_3e233a38 | ---
BugID: FLINK-2707
Summary: Set state checkpointer before default state for PartitionedStreamOperatorState
Description: |-
Currently the default state is set before the passed StateCheckpointer instance for operator states.
What currently happens because of this is that the default value is serialized with Java serialization and then deserialized on the opstate.value() call using the StateCheckpointer most likely causing a failure.
This can be trivially fixed by swaping the order of the 2 calls.
diff --git a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamingRuntimeContext.java b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamingRuntimeContext.java
index 2ca2862..b82888e 100644
--- a/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamingRuntimeContext.java
+++ b/flink-staging/flink-streaming/flink-streaming-core/src/main/java/org/apache/flink/streaming/runtime/tasks/StreamingRuntimeContext.java
@@ -104,8 +104,8 @@ public class StreamingRuntimeContext extends RuntimeUDFContext {
throw new RuntimeException("Cannot set default state to null.");
}
StreamOperatorState<S, C> state = (StreamOperatorState<S, C>) getState(name, partitioned);
- state.setDefaultState(defaultState);
state.setCheckpointer(checkpointer);
+ state.setDefaultState(defaultState);
return (OperatorState<S>) state;
}
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2707_3e233a38.diff |
bugs-dot-jar_data_FLINK-2763_af477563 | ---
BugID: FLINK-2763
Summary: 'Bug in Hybrid Hash Join: Request to spill a partition with less than two
buffers.'
Description: "The following exception is thrown when running the example triangle
listing with an unmodified master build (4cadc3d6).\n\n{noformat}\n./bin/flink run
~/flink-examples/flink-java-examples/target/flink-java-examples-0.10-SNAPSHOT-EnumTrianglesOpt.jar
~/rmat/undirected/s19_e8.ssv output\n{noformat}\n\nThe only changes to {{flink-conf.yaml}}
are {{taskmanager.numberOfTaskSlots: 8}} and {{parallelism.default: 8}}.\n\nI have
confirmed with input files [s19_e8.ssv|https://drive.google.com/file/d/0B6TrSsnHj2HxR2lnMHR4amdyTnM/view?usp=sharing]
(40 MB) and [s20_e8.ssv|https://drive.google.com/file/d/0B6TrSsnHj2HxNi1HbmptU29MTm8/view?usp=sharing]
(83 MB). On a second machine only the larger file caused the exception.\n\n{noformat}\norg.apache.flink.client.program.ProgramInvocationException:
The program execution failed: Job execution failed.\n\tat org.apache.flink.client.program.Client.runBlocking(Client.java:407)\n\tat
org.apache.flink.client.program.Client.runBlocking(Client.java:386)\n\tat org.apache.flink.client.program.Client.runBlocking(Client.java:353)\n\tat
org.apache.flink.client.program.ContextEnvironment.execute(ContextEnvironment.java:64)\n\tat
org.apache.flink.examples.java.graph.EnumTrianglesOpt.main(EnumTrianglesOpt.java:125)\n\tat
sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\n\tat
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\n\tat
java.lang.reflect.Method.invoke(Method.java:497)\n\tat org.apache.flink.client.program.PackagedProgram.callMainMethod(PackagedProgram.java:434)\n\tat
org.apache.flink.client.program.PackagedProgram.invokeInteractiveModeForExecution(PackagedProgram.java:350)\n\tat
org.apache.flink.client.program.Client.runBlocking(Client.java:290)\n\tat org.apache.flink.client.CliFrontend.executeProgramBlocking(CliFrontend.java:675)\n\tat
org.apache.flink.client.CliFrontend.run(CliFrontend.java:324)\n\tat org.apache.flink.client.CliFrontend.parseParameters(CliFrontend.java:977)\n\tat
org.apache.flink.client.CliFrontend.main(CliFrontend.java:1027)\nCaused by: org.apache.flink.runtime.client.JobExecutionException:
Job execution failed.\n\tat org.apache.flink.runtime.jobmanager.JobManager$$anonfun$handleMessage$1.applyOrElse(JobManager.scala:425)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply$mcVL$sp(AbstractPartialFunction.scala:33)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:33)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:25)\n\tat
org.apache.flink.runtime.LeaderSessionMessageFilter$$anonfun$receive$1.applyOrElse(LeaderSessionMessageFilter.scala:36)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply$mcVL$sp(AbstractPartialFunction.scala:33)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:33)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:25)\n\tat
org.apache.flink.runtime.LogMessages$$anon$1.apply(LogMessages.scala:33)\n\tat org.apache.flink.runtime.LogMessages$$anon$1.apply(LogMessages.scala:28)\n\tat
scala.PartialFunction$class.applyOrElse(PartialFunction.scala:118)\n\tat org.apache.flink.runtime.LogMessages$$anon$1.applyOrElse(LogMessages.scala:28)\n\tat
akka.actor.Actor$class.aroundReceive(Actor.scala:465)\n\tat org.apache.flink.runtime.jobmanager.JobManager.aroundReceive(JobManager.scala:107)\n\tat
akka.actor.ActorCell.receiveMessage(ActorCell.scala:516)\n\tat akka.actor.ActorCell.invoke(ActorCell.scala:487)\n\tat
akka.dispatch.Mailbox.processMailbox(Mailbox.scala:254)\n\tat akka.dispatch.Mailbox.run(Mailbox.scala:221)\n\tat
akka.dispatch.Mailbox.exec(Mailbox.scala:231)\n\tat scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)\n\tat
scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)\n\tat
scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)\n\tat scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)\nCaused
by: java.lang.RuntimeException: Bug in Hybrid Hash Join: Request to spill a partition
with less than two buffers.\n\tat org.apache.flink.runtime.operators.hash.HashPartition.spillPartition(HashPartition.java:288)\n\tat
org.apache.flink.runtime.operators.hash.MutableHashTable.spillPartition(MutableHashTable.java:1108)\n\tat
org.apache.flink.runtime.operators.hash.MutableHashTable.insertBucketEntry(MutableHashTable.java:934)\n\tat
org.apache.flink.runtime.operators.hash.MutableHashTable.insertIntoTable(MutableHashTable.java:859)\n\tat
org.apache.flink.runtime.operators.hash.MutableHashTable.buildTableFromSpilledPartition(MutableHashTable.java:819)\n\tat
org.apache.flink.runtime.operators.hash.MutableHashTable.prepareNextPartition(MutableHashTable.java:517)\n\tat
org.apache.flink.runtime.operators.hash.MutableHashTable.nextRecord(MutableHashTable.java:556)\n\tat
org.apache.flink.runtime.operators.hash.NonReusingBuildFirstHashMatchIterator.callWithNextKey(NonReusingBuildFirstHashMatchIterator.java:104)\n\tat
org.apache.flink.runtime.operators.JoinDriver.run(JoinDriver.java:208)\n\tat org.apache.flink.runtime.operators.RegularPactTask.run(RegularPactTask.java:489)\n\tat
org.apache.flink.runtime.operators.RegularPactTask.invoke(RegularPactTask.java:354)\n\tat
org.apache.flink.runtime.taskmanager.Task.run(Task.java:579)\n\tat java.lang.Thread.run(Thread.java:745)\n{noformat}"
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/HashPartition.java b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/HashPartition.java
index 7baaee7..32fd74a 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/HashPartition.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/HashPartition.java
@@ -198,6 +198,19 @@ public class HashPartition<BT, PT> extends AbstractPagedInputView implements See
public final boolean isInMemory() {
return this.buildSideChannel == null;
}
+
+ /**
+ * Gets the number of memory segments used by this partition, which includes build side
+ * memory buffers and overflow memory segments.
+ *
+ * @return The number of occupied memory segments.
+ */
+ public int getNumOccupiedMemorySegments() {
+ // either the number of memory segments, or one for spilling
+ final int numPartitionBuffers = this.partitionBuffers != null ? this.partitionBuffers.length : 1;
+ return numPartitionBuffers + numOverflowSegments;
+ }
+
public int getBuildSideBlockCount() {
return this.partitionBuffers == null ? this.buildSideWriteBuffer.getBlockCount() : this.partitionBuffers.length;
@@ -284,7 +297,7 @@ public class HashPartition<BT, PT> extends AbstractPagedInputView implements See
throw new RuntimeException("Bug in Hybrid Hash Join: " +
"Request to spill a partition that has already been spilled.");
}
- if (getBuildSideBlockCount() + this.numOverflowSegments < 2) {
+ if (getNumOccupiedMemorySegments() < 2) {
throw new RuntimeException("Bug in Hybrid Hash Join: " +
"Request to spill a partition with less than two buffers.");
}
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/MutableHashTable.java b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/MutableHashTable.java
index 2ad01aa..efaceea 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/MutableHashTable.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/MutableHashTable.java
@@ -1093,8 +1093,8 @@ public class MutableHashTable<BT, PT> implements MemorySegmentSource {
for (int i = 0; i < partitions.size(); i++) {
HashPartition<BT, PT> p = partitions.get(i);
- if (p.isInMemory() && p.getBuildSideBlockCount() > largestNumBlocks) {
- largestNumBlocks = p.getBuildSideBlockCount();
+ if (p.isInMemory() && p.getNumOccupiedMemorySegments() > largestNumBlocks) {
+ largestNumBlocks = p.getNumOccupiedMemorySegments();
largestPartNum = i;
}
}
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2763_af477563.diff |
bugs-dot-jar_data_FLINK-2109_d594d024 | ---
BugID: FLINK-2109
Summary: CancelTaskException leads to FAILED task state
Description: |-
The {{CancelTaskException}} is thrown to trigger canceling of the executing task. It is intended to cause a cancelled status, rather than a failed status.
Currently, it leads to a {{FAILED}} state instead of the expected {{CANCELED}} state.
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java b/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
index 40198dc..6250837 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
@@ -537,7 +537,7 @@ public class Task implements Runnable {
// actual task core work
// ----------------------------------------------------------------
- // we must make strictly sure that the invokable is accessible to teh cancel() call
+ // we must make strictly sure that the invokable is accessible to the cancel() call
// by the time we switched to running.
this.invokable = invokable;
@@ -597,22 +597,25 @@ public class Task implements Runnable {
// to failExternally()
while (true) {
ExecutionState current = this.executionState;
+
if (current == ExecutionState.RUNNING || current == ExecutionState.DEPLOYING) {
- if (STATE_UPDATER.compareAndSet(this, current, ExecutionState.FAILED)) {
- // proper failure of the task. record the exception as the root cause
- failureCause = t;
- notifyObservers(ExecutionState.FAILED, t);
-
- // in case of an exception during execution, we still call "cancel()" on the task
- if (invokable != null && this.invokable != null && invokableHasBeenCanceled.compareAndSet(false, true)) {
- try {
- invokable.cancel();
- }
- catch (Throwable t2) {
- LOG.error("Error while canceling task " + taskNameWithSubtask, t2);
- }
+ if (t instanceof CancelTaskException) {
+ if (STATE_UPDATER.compareAndSet(this, current, ExecutionState.CANCELED)) {
+ cancelInvokable();
+
+ notifyObservers(ExecutionState.CANCELED, null);
+ break;
+ }
+ }
+ else {
+ if (STATE_UPDATER.compareAndSet(this, current, ExecutionState.FAILED)) {
+ // proper failure of the task. record the exception as the root cause
+ failureCause = t;
+ cancelInvokable();
+
+ notifyObservers(ExecutionState.FAILED, t);
+ break;
}
- break;
}
}
else if (current == ExecutionState.CANCELING) {
@@ -746,7 +749,7 @@ public class Task implements Runnable {
}
/**
- * Marks task execution failed for an external reason (a reason other than th task code itself
+ * Marks task execution failed for an external reason (a reason other than the task code itself
* throwing an exception). If the task is already in a terminal state
* (such as FINISHED, CANCELED, FAILED), or if the task is already canceling this does nothing.
* Otherwise it sets the state to FAILED, and, if the invokable code is running,
@@ -962,6 +965,18 @@ public class Task implements Runnable {
// Utilities
// ------------------------------------------------------------------------
+ private void cancelInvokable() {
+ // in case of an exception during execution, we still call "cancel()" on the task
+ if (invokable != null && this.invokable != null && invokableHasBeenCanceled.compareAndSet(false, true)) {
+ try {
+ invokable.cancel();
+ }
+ catch (Throwable t) {
+ LOG.error("Error while canceling task " + taskNameWithSubtask, t);
+ }
+ }
+ }
+
@Override
public String toString() {
return getTaskNameWithSubtasks() + " [" + executionState + ']';
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2109_d594d024.diff |
bugs-dot-jar_data_FLINK-3011_5a86a0a1 | ---
BugID: FLINK-3011
Summary: Cannot cancel failing/restarting streaming job from the command line
Description: |-
I cannot seem to be able to cancel a failing/restarting job from the command line client. The job cannot be rescheduled so it keeps failing:
The exception I get:
13:58:11,240 INFO org.apache.flink.runtime.jobmanager.JobManager - Status of job 0c895d22c632de5dfe16c42a9ba818d5 (player-id) changed to RESTARTING.
13:58:25,234 INFO org.apache.flink.runtime.jobmanager.JobManager - Trying to cancel job with ID 0c895d22c632de5dfe16c42a9ba818d5.
13:58:25,561 WARN akka.remote.ReliableDeliverySupervisor - Association with remote system [akka.tcp://[email protected]:42012] has failed, address is now gated for [5000] ms. Reason is: [Disassociated].
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionGraph.java b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionGraph.java
index aae0b7c..1e5d02c 100755
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionGraph.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/ExecutionGraph.java
@@ -711,6 +711,26 @@ public class ExecutionGraph implements Serializable {
return;
}
}
+ // Executions are being canceled. Go into cancelling and wait for
+ // all vertices to be in their final state.
+ else if (current == JobStatus.FAILING) {
+ if (transitionState(current, JobStatus.CANCELLING)) {
+ return;
+ }
+ }
+ // All vertices have been cancelled and it's safe to directly go
+ // into the canceled state.
+ else if (current == JobStatus.RESTARTING) {
+ synchronized (progressLock) {
+ if (transitionState(current, JobStatus.CANCELED)) {
+ postRunCleanup();
+ progressLock.notifyAll();
+
+ LOG.info("Canceled during restart.");
+ return;
+ }
+ }
+ }
else {
// no need to treat other states
return;
@@ -747,9 +767,16 @@ public class ExecutionGraph implements Serializable {
public void restart() {
try {
synchronized (progressLock) {
- if (state != JobStatus.RESTARTING) {
+ JobStatus current = state;
+
+ if (current == JobStatus.CANCELED) {
+ LOG.info("Canceled job during restart. Aborting restart.");
+ return;
+ }
+ else if (current != JobStatus.RESTARTING) {
throw new IllegalStateException("Can only restart job from state restarting.");
}
+
if (scheduler == null) {
throw new IllegalStateException("The execution graph has not been scheduled before - scheduler is null.");
}
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-3011_5a86a0a1.diff |
bugs-dot-jar_data_FLINK-1167_259f10c0 | ---
BugID: FLINK-1167
Summary: CompilerException caused by NullPointerException
Description: "Run into it during working on my code. Seems not caused by my plan,
or anyway the compiler should have a NullPointer isssue:\n\norg.apache.flink.compiler.CompilerException:
An error occurred while translating the optimized plan to a nephele JobGraph: Error
translating node 'Union \"Union\" : UNION [[ GlobalProperties [partitioning=HASH_PARTITIONED,
on fields [0]] ]] [[ LocalProperties [ordering=null, grouped=null, unique=null]
]]': null\n\tat org.apache.flink.compiler.plantranslate.NepheleJobGraphGenerator.postVisit(NepheleJobGraphGenerator.java:543)\n\tat
org.apache.flink.compiler.plantranslate.NepheleJobGraphGenerator.postVisit(NepheleJobGraphGenerator.java:95)\n\tat
org.apache.flink.compiler.plan.DualInputPlanNode.accept(DualInputPlanNode.java:170)\n\tat
org.apache.flink.compiler.plan.SingleInputPlanNode.accept(SingleInputPlanNode.java:196)\n\tat
org.apache.flink.compiler.plan.SingleInputPlanNode.accept(SingleInputPlanNode.java:196)\n\tat
org.apache.flink.compiler.plan.OptimizedPlan.accept(OptimizedPlan.java:165)\n\tat
org.apache.flink.compiler.plantranslate.NepheleJobGraphGenerator.compileJobGraph(NepheleJobGraphGenerator.java:163)\n\tat
org.apache.flink.client.program.Client.getJobGraph(Client.java:218)\n\tat org.apache.flink.client.program.Client.run(Client.java:290)\n\tat
org.apache.flink.client.program.Client.run(Client.java:285)\n\tat org.apache.flink.client.program.Client.run(Client.java:230)\n\tat
org.apache.flink.client.CliFrontend.executeProgram(CliFrontend.java:347)\n\tat org.apache.flink.client.CliFrontend.run(CliFrontend.java:334)\n\tat
org.apache.flink.client.CliFrontend.parseParameters(CliFrontend.java:1001)\n\tat
org.apache.flink.client.CliFrontend.main(CliFrontend.java:1025)\nCaused by: org.apache.flink.compiler.CompilerException:
Error translating node 'Union \"Union\" : UNION [[ GlobalProperties [partitioning=HASH_PARTITIONED,
on fields [0]] ]] [[ LocalProperties [ordering=null, grouped=null, unique=null]
]]': null\n\tat org.apache.flink.compiler.plantranslate.NepheleJobGraphGenerator.preVisit(NepheleJobGraphGenerator.java:338)\n\tat
org.apache.flink.compiler.plantranslate.NepheleJobGraphGenerator.preVisit(NepheleJobGraphGenerator.java:95)\n\tat
org.apache.flink.compiler.plan.DualInputPlanNode.accept(DualInputPlanNode.java:162)\n\tat
org.apache.flink.compiler.plan.WorksetIterationPlanNode.acceptForStepFunction(WorksetIterationPlanNode.java:196)\n\tat
org.apache.flink.compiler.plantranslate.NepheleJobGraphGenerator.postVisit(NepheleJobGraphGenerator.java:398)\n\t...
14 more\nCaused by: java.lang.NullPointerException\n\tat org.apache.flink.runtime.operators.util.TaskConfig.setDriver(TaskConfig.java:307)\n\tat
org.apache.flink.compiler.plantranslate.NepheleJobGraphGenerator.createDualInputVertex(NepheleJobGraphGenerator.java:793)\n\tat
org.apache.flink.compiler.plantranslate.NepheleJobGraphGenerator.preVisit(NepheleJobGraphGenerator.java:286)\n\t...
18 more\n"
diff --git a/flink-compiler/src/main/java/org/apache/flink/compiler/dag/BulkIterationNode.java b/flink-compiler/src/main/java/org/apache/flink/compiler/dag/BulkIterationNode.java
index a5f8026..d3f0fbb 100644
--- a/flink-compiler/src/main/java/org/apache/flink/compiler/dag/BulkIterationNode.java
+++ b/flink-compiler/src/main/java/org/apache/flink/compiler/dag/BulkIterationNode.java
@@ -132,7 +132,7 @@ public class BulkIterationNode extends SingleInputNode implements IterationNode
// check if the root of the step function has the same DOP as the iteration
// or if the steo function has any operator at all
if (nextPartialSolution.getDegreeOfParallelism() != getDegreeOfParallelism() ||
- nextPartialSolution == partialSolution)
+ nextPartialSolution == partialSolution || nextPartialSolution instanceof BinaryUnionNode)
{
// add a no-op to the root to express the re-partitioning
NoOpNode noop = new NoOpNode();
diff --git a/flink-compiler/src/main/java/org/apache/flink/compiler/dag/WorksetIterationNode.java b/flink-compiler/src/main/java/org/apache/flink/compiler/dag/WorksetIterationNode.java
index 7638cca..b6ae34e 100644
--- a/flink-compiler/src/main/java/org/apache/flink/compiler/dag/WorksetIterationNode.java
+++ b/flink-compiler/src/main/java/org/apache/flink/compiler/dag/WorksetIterationNode.java
@@ -160,7 +160,7 @@ public class WorksetIterationNode extends TwoInputNode implements IterationNode
// there needs to be at least one node in the workset path, so
// if the next workset is equal to the workset, we need to inject a no-op node
- if (nextWorkset == worksetNode) {
+ if (nextWorkset == worksetNode || nextWorkset instanceof BinaryUnionNode) {
NoOpNode noop = new NoOpNode();
noop.setDegreeOfParallelism(getDegreeOfParallelism());
diff --git a/flink-compiler/src/main/java/org/apache/flink/compiler/plandump/PlanJSONDumpGenerator.java b/flink-compiler/src/main/java/org/apache/flink/compiler/plandump/PlanJSONDumpGenerator.java
index 00e2bc2..60500b8 100644
--- a/flink-compiler/src/main/java/org/apache/flink/compiler/plandump/PlanJSONDumpGenerator.java
+++ b/flink-compiler/src/main/java/org/apache/flink/compiler/plandump/PlanJSONDumpGenerator.java
@@ -16,7 +16,6 @@
* limitations under the License.
*/
-
package org.apache.flink.compiler.plandump;
import java.io.File;
@@ -26,7 +25,6 @@ import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.Collection;
-import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
@@ -47,7 +45,6 @@ import org.apache.flink.compiler.dataproperties.GlobalProperties;
import org.apache.flink.compiler.dataproperties.LocalProperties;
import org.apache.flink.compiler.plan.BulkIterationPlanNode;
import org.apache.flink.compiler.plan.Channel;
-import org.apache.flink.compiler.plan.NAryUnionPlanNode;
import org.apache.flink.compiler.plan.OptimizedPlan;
import org.apache.flink.compiler.plan.PlanNode;
import org.apache.flink.compiler.plan.SingleInputPlanNode;
@@ -265,121 +262,104 @@ public class PlanJSONDumpGenerator {
if (inConns != null && inConns.hasNext()) {
// start predecessor list
writer.print(",\n\t\t\"predecessors\": [");
- int connNum = 0;
int inputNum = 0;
while (inConns.hasNext()) {
- final DumpableConnection<?> conn = inConns.next();
-
- final Collection<DumpableConnection<?>> inConnsForInput;
- if (conn.getSource() instanceof NAryUnionPlanNode) {
- inConnsForInput = new ArrayList<DumpableConnection<?>>();
+ final DumpableConnection<?> inConn = inConns.next();
+ final DumpableNode<?> source = inConn.getSource();
+ writer.print(inputNum == 0 ? "\n" : ",\n");
+ if (inputNum == 0) {
+ child1name += child1name.length() > 0 ? ", " : "";
+ child1name += source.getOptimizerNode().getPactContract().getName();
+ } else if (inputNum == 1) {
+ child2name += child2name.length() > 0 ? ", " : "";
+ child2name = source.getOptimizerNode().getPactContract().getName();
+ }
+
+ // output predecessor id
+ writer.print("\t\t\t{\"id\": " + this.nodeIds.get(source));
+
+ // output connection side
+ if (inConns.hasNext() || inputNum > 0) {
+ writer.print(", \"side\": \"" + (inputNum == 0 ? "first" : "second") + "\"");
+ }
+ // output shipping strategy and channel type
+ final Channel channel = (inConn instanceof Channel) ? (Channel) inConn : null;
+ final ShipStrategyType shipType = channel != null ? channel.getShipStrategy() :
+ ((PactConnection) inConn).getShipStrategy();
- for (DumpableConnection<?> inputOfUnion : conn.getSource().getDumpableInputs()) {
- inConnsForInput.add(inputOfUnion);
+ String shipStrategy = null;
+ if (shipType != null) {
+ switch (shipType) {
+ case NONE:
+ // nothing
+ break;
+ case FORWARD:
+ shipStrategy = "Forward";
+ break;
+ case BROADCAST:
+ shipStrategy = "Broadcast";
+ break;
+ case PARTITION_HASH:
+ shipStrategy = "Hash Partition";
+ break;
+ case PARTITION_RANGE:
+ shipStrategy = "Range Partition";
+ break;
+ case PARTITION_RANDOM:
+ shipStrategy = "Redistribute";
+ break;
+ case PARTITION_FORCED_REBALANCE:
+ shipStrategy = "Rebalance";
+ break;
+ default:
+ throw new CompilerException("Unknown ship strategy '" + inConn.getShipStrategy().name()
+ + "' in JSON generator.");
}
}
- else {
- inConnsForInput = Collections.<DumpableConnection<?>>singleton(conn);
+
+ if (channel != null && channel.getShipStrategyKeys() != null && channel.getShipStrategyKeys().size() > 0) {
+ shipStrategy += " on " + (channel.getShipStrategySortOrder() == null ?
+ channel.getShipStrategyKeys().toString() :
+ Utils.createOrdering(channel.getShipStrategyKeys(), channel.getShipStrategySortOrder()).toString());
+ }
+
+ if (shipStrategy != null) {
+ writer.print(", \"ship_strategy\": \"" + shipStrategy + "\"");
}
- for (DumpableConnection<?> inConn : inConnsForInput) {
- final DumpableNode<?> source = inConn.getSource();
- writer.print(connNum == 0 ? "\n" : ",\n");
- if (connNum == 0) {
- child1name += child1name.length() > 0 ? ", " : "";
- child1name += source.getOptimizerNode().getPactContract().getName();
- } else if (connNum == 1) {
- child2name += child2name.length() > 0 ? ", " : "";
- child2name = source.getOptimizerNode().getPactContract().getName();
- }
-
- // output predecessor id
- writer.print("\t\t\t{\"id\": " + this.nodeIds.get(source));
-
- // output connection side
- if (inConns.hasNext() || inputNum > 0) {
- writer.print(", \"side\": \"" + (inputNum == 0 ? "first" : "second") + "\"");
- }
- // output shipping strategy and channel type
- final Channel channel = (inConn instanceof Channel) ? (Channel) inConn : null;
- final ShipStrategyType shipType = channel != null ? channel.getShipStrategy() :
- ((PactConnection) inConn).getShipStrategy();
-
- String shipStrategy = null;
- if (shipType != null) {
- switch (shipType) {
- case NONE:
- // nothing
- break;
- case FORWARD:
- shipStrategy = "Forward";
- break;
- case BROADCAST:
- shipStrategy = "Broadcast";
- break;
- case PARTITION_HASH:
- shipStrategy = "Hash Partition";
- break;
- case PARTITION_RANGE:
- shipStrategy = "Range Partition";
- break;
- case PARTITION_RANDOM:
- shipStrategy = "Redistribute";
- break;
- case PARTITION_FORCED_REBALANCE:
- shipStrategy = "Rebalance";
- break;
- default:
- throw new CompilerException("Unknown ship strategy '" + conn.getShipStrategy().name()
- + "' in JSON generator.");
- }
+ if (channel != null) {
+ String localStrategy = null;
+ switch (channel.getLocalStrategy()) {
+ case NONE:
+ break;
+ case SORT:
+ localStrategy = "Sort";
+ break;
+ case COMBININGSORT:
+ localStrategy = "Sort (combining)";
+ break;
+ default:
+ throw new CompilerException("Unknown local strategy " + channel.getLocalStrategy().name());
}
- if (channel != null && channel.getShipStrategyKeys() != null && channel.getShipStrategyKeys().size() > 0) {
- shipStrategy += " on " + (channel.getShipStrategySortOrder() == null ?
- channel.getShipStrategyKeys().toString() :
- Utils.createOrdering(channel.getShipStrategyKeys(), channel.getShipStrategySortOrder()).toString());
- }
-
- if (shipStrategy != null) {
- writer.print(", \"ship_strategy\": \"" + shipStrategy + "\"");
+ if (channel != null && channel.getLocalStrategyKeys() != null && channel.getLocalStrategyKeys().size() > 0) {
+ localStrategy += " on " + (channel.getLocalStrategySortOrder() == null ?
+ channel.getLocalStrategyKeys().toString() :
+ Utils.createOrdering(channel.getLocalStrategyKeys(), channel.getLocalStrategySortOrder()).toString());
}
- if (channel != null) {
- String localStrategy = null;
- switch (channel.getLocalStrategy()) {
- case NONE:
- break;
- case SORT:
- localStrategy = "Sort";
- break;
- case COMBININGSORT:
- localStrategy = "Sort (combining)";
- break;
- default:
- throw new CompilerException("Unknown local strategy " + channel.getLocalStrategy().name());
- }
-
- if (channel != null && channel.getLocalStrategyKeys() != null && channel.getLocalStrategyKeys().size() > 0) {
- localStrategy += " on " + (channel.getLocalStrategySortOrder() == null ?
- channel.getLocalStrategyKeys().toString() :
- Utils.createOrdering(channel.getLocalStrategyKeys(), channel.getLocalStrategySortOrder()).toString());
- }
-
- if (localStrategy != null) {
- writer.print(", \"local_strategy\": \"" + localStrategy + "\"");
- }
-
- if (channel != null && channel.getTempMode() != TempMode.NONE) {
- String tempMode = channel.getTempMode().toString();
- writer.print(", \"temp_mode\": \"" + tempMode + "\"");
- }
+ if (localStrategy != null) {
+ writer.print(", \"local_strategy\": \"" + localStrategy + "\"");
}
- writer.print('}');
- connNum++;
+ if (channel != null && channel.getTempMode() != TempMode.NONE) {
+ String tempMode = channel.getTempMode().toString();
+ writer.print(", \"temp_mode\": \"" + tempMode + "\"");
+ }
}
+
+ writer.print('}');
inputNum++;
}
// finish predecessors
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1167_259f10c0.diff |
bugs-dot-jar_data_FLINK-3260_6968a57a | ---
BugID: FLINK-3260
Summary: ExecutionGraph gets stuck in state FAILING
Description: "It is a bit of a rare case, but the following can currently happen:\n\n
\ 1. Jobs runs for a while, some tasks are already finished.\n 2. Job fails, goes
to state failing and restarting. Non-finished tasks fail or are canceled.\n 3.
For the finished tasks, ask-futures from certain messages (for example for releasing
intermediate result partitions) can fail (timeout) and cause the execution to go
from FINISHED to FAILED\n 4. This triggers the execution graph to go to FAILING
without ever going further into RESTARTING again\n 5. The job is stuck\n\nIt initially
looks like this is mainly an issue for batch jobs (jobs where tasks do finish, rather
than run infinitely).\n\nThe log that shows how this manifests:\n{code}\n--------------------------------------------------------------------------------\n17:19:19,782
INFO akka.event.slf4j.Slf4jLogger - Slf4jLogger
started\n17:19:19,844 INFO Remoting -
Starting remoting\n17:19:20,065 INFO Remoting -
Remoting started; listening on addresses :[akka.tcp://[email protected]:56722]\n17:19:20,090
INFO org.apache.flink.runtime.blob.BlobServer - Created BLOB
server storage directory /tmp/blobStore-6766f51a-1c51-4a03-acfb-08c2c29c11f0\n17:19:20,096
INFO org.apache.flink.runtime.blob.BlobServer - Started BLOB
server at 0.0.0.0:43327 - max concurrent requests: 50 - max backlog: 1000\n17:19:20,113
INFO org.apache.flink.runtime.jobmanager.MemoryArchivist - Started memory
archivist akka://flink/user/archive\n17:19:20,115 INFO org.apache.flink.runtime.checkpoint.SavepointStoreFactory
\ - No savepoint state backend configured. Using job manager savepoint state
backend.\n17:19:20,118 INFO org.apache.flink.runtime.jobmanager.JobManager -
Starting JobManager at akka.tcp://[email protected]:56722/user/jobmanager.\n17:19:20,123
INFO org.apache.flink.runtime.jobmanager.JobManager - JobManager
akka.tcp://[email protected]:56722/user/jobmanager was granted leadership with leader
session ID None.\n17:19:25,605 INFO org.apache.flink.runtime.instance.InstanceManager
\ - Registered TaskManager at testing-worker-linux-docker-e6d6931f-3200-linux-4
(akka.tcp://[email protected]:43702/user/taskmanager) as f213232054587f296a12140d56f63ed1.
Current number of registered hosts is 1. Current number of alive task slots is 2.\n17:19:26,758
INFO org.apache.flink.runtime.instance.InstanceManager - Registered
TaskManager at testing-worker-linux-docker-e6d6931f-3200-linux-4 (akka.tcp://[email protected]:43956/user/taskmanager)
as f9e78baa14fb38c69517fb1bcf4f419c. Current number of registered hosts is 2. Current
number of alive task slots is 4.\n17:19:27,064 INFO org.apache.flink.api.java.ExecutionEnvironment
\ - The job has 0 registered types and 0 default Kryo serializers\n17:19:27,071
INFO org.apache.flink.client.program.Client - Starting client
actor system\n17:19:27,072 INFO org.apache.flink.runtime.client.JobClient -
Starting JobClient actor system\n17:19:27,110 INFO akka.event.slf4j.Slf4jLogger
\ - Slf4jLogger started\n17:19:27,121 INFO Remoting
\ - Starting remoting\n17:19:27,143
INFO org.apache.flink.runtime.client.JobClient - Started JobClient
actor system at 127.0.0.1:51198\n17:19:27,145 INFO Remoting -
Remoting started; listening on addresses :[akka.tcp://[email protected]:51198]\n17:19:27,325
INFO org.apache.flink.runtime.client.JobClientActor - Disconnect
from JobManager null.\n17:19:27,362 INFO org.apache.flink.runtime.client.JobClientActor
\ - Received job Flink Java Job at Mon Jan 18 17:19:27 UTC 2016 (fa05fd25993a8742da09cc5023c1e38d).\n17:19:27,362
INFO org.apache.flink.runtime.client.JobClientActor - Could not
submit job Flink Java Job at Mon Jan 18 17:19:27 UTC 2016 (fa05fd25993a8742da09cc5023c1e38d),
because there is no connection to a JobManager.\n17:19:27,379 INFO org.apache.flink.runtime.client.JobClientActor
\ - Connect to JobManager Actor[akka.tcp://[email protected]:56722/user/jobmanager#-1489998809].\n17:19:27,379
INFO org.apache.flink.runtime.client.JobClientActor - Connected
to new JobManager akka.tcp://[email protected]:56722/user/jobmanager.\n17:19:27,379
INFO org.apache.flink.runtime.client.JobClientActor - Sending message
to JobManager akka.tcp://[email protected]:56722/user/jobmanager to submit job Flink
Java Job at Mon Jan 18 17:19:27 UTC 2016 (fa05fd25993a8742da09cc5023c1e38d) and
wait for progress\n17:19:27,380 INFO org.apache.flink.runtime.client.JobClientActor
\ - Upload jar files to job manager akka.tcp://[email protected]:56722/user/jobmanager.\n17:19:27,380
INFO org.apache.flink.runtime.client.JobClientActor - Submit job
to the job manager akka.tcp://[email protected]:56722/user/jobmanager.\n17:19:27,453
INFO org.apache.flink.runtime.jobmanager.JobManager - Submitting
job fa05fd25993a8742da09cc5023c1e38d (Flink Java Job at Mon Jan 18 17:19:27 UTC
2016).\n17:19:27,591 INFO org.apache.flink.runtime.jobmanager.JobManager -
Scheduling job fa05fd25993a8742da09cc5023c1e38d (Flink Java Job at Mon Jan 18 17:19:27
UTC 2016).\n17:19:27,592 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - DataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat)) (1/4) (c79bf4381462c690f5999f2d1949ab50)
switched from CREATED to SCHEDULED\n17:19:27,596 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - DataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat)) (1/4) (c79bf4381462c690f5999f2d1949ab50)
switched from SCHEDULED to DEPLOYING\n17:19:27,597 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - Deploying DataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat)) (1/4) (attempt #0) to
testing-worker-linux-docker-e6d6931f-3200-linux-4\n17:19:27,606 INFO org.apache.flink.runtime.client.JobClientActor
\ - Job was successfully submitted to the JobManager akka.tcp://[email protected]:56722/user/jobmanager.\n17:19:27,630
INFO org.apache.flink.runtime.jobmanager.JobManager - Status of
job fa05fd25993a8742da09cc5023c1e38d (Flink Java Job at Mon Jan 18 17:19:27 UTC
2016) changed to RUNNING.\n17:19:27,637 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - DataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat)) (2/4) (e73af91028cb76f7d3cd887cb6d66755)
switched from CREATED to SCHEDULED\n17:19:27,654 INFO org.apache.flink.runtime.client.JobClientActor
\ - 01/18/2016 17:19:27\tJob execution switched to status RUNNING.\n17:19:27,655
INFO org.apache.flink.runtime.client.JobClientActor - 01/18/2016
17:19:27\tDataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat))(1/4) switched to SCHEDULED
\n17:19:27,656 INFO org.apache.flink.runtime.client.JobClientActor -
01/18/2016 17:19:27\tDataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat))(1/4) switched to DEPLOYING
\n17:19:27,666 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph -
DataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat)) (2/4) (e73af91028cb76f7d3cd887cb6d66755)
switched from SCHEDULED to DEPLOYING\n17:19:27,667 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - Deploying DataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat)) (2/4) (attempt #0) to
testing-worker-linux-docker-e6d6931f-3200-linux-4\n17:19:27,667 INFO org.apache.flink.runtime.client.JobClientActor
\ - 01/18/2016 17:19:27\tDataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat))(2/4) switched to SCHEDULED
\n17:19:27,669 INFO org.apache.flink.runtime.client.JobClientActor -
01/18/2016 17:19:27\tDataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat))(2/4) switched to DEPLOYING
\n17:19:27,681 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph -
DataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat)) (3/4) (807daf978da9dc347dca930822c78f8f)
switched from CREATED to SCHEDULED\n17:19:27,682 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - DataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat)) (3/4) (807daf978da9dc347dca930822c78f8f)
switched from SCHEDULED to DEPLOYING\n17:19:27,682 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - Deploying DataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat)) (3/4) (attempt #0) to
testing-worker-linux-docker-e6d6931f-3200-linux-4\n17:19:27,682 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - DataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat)) (4/4) (ba45c37065b67fc8f5005a50d0e88fff)
switched from CREATED to SCHEDULED\n17:19:27,682 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - DataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat)) (4/4) (ba45c37065b67fc8f5005a50d0e88fff)
switched from SCHEDULED to DEPLOYING\n17:19:27,685 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - Deploying DataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat)) (4/4) (attempt #0) to
testing-worker-linux-docker-e6d6931f-3200-linux-4\n17:19:27,686 INFO org.apache.flink.runtime.client.JobClientActor
\ - 01/18/2016 17:19:27\tDataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat))(3/4) switched to SCHEDULED
\n17:19:27,687 INFO org.apache.flink.runtime.client.JobClientActor -
01/18/2016 17:19:27\tDataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat))(3/4) switched to DEPLOYING
\n17:19:27,687 INFO org.apache.flink.runtime.client.JobClientActor -
01/18/2016 17:19:27\tDataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat))(4/4) switched to SCHEDULED
\n17:19:27,692 INFO org.apache.flink.runtime.client.JobClientActor -
01/18/2016 17:19:27\tDataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat))(4/4) switched to DEPLOYING
\n17:19:27,833 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph -
DataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat)) (4/4) (ba45c37065b67fc8f5005a50d0e88fff)
switched from DEPLOYING to RUNNING\n17:19:27,839 INFO org.apache.flink.runtime.client.JobClientActor
\ - 01/18/2016 17:19:27\tDataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat))(4/4) switched to RUNNING
\n17:19:27,840 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph -
DataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat)) (2/4) (e73af91028cb76f7d3cd887cb6d66755)
switched from DEPLOYING to RUNNING\n17:19:27,852 INFO org.apache.flink.runtime.client.JobClientActor
\ - 01/18/2016 17:19:27\tDataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat))(2/4) switched to RUNNING
\n17:19:27,896 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph -
DataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat)) (1/4) (c79bf4381462c690f5999f2d1949ab50)
switched from DEPLOYING to RUNNING\n17:19:27,898 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - DataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat)) (3/4) (807daf978da9dc347dca930822c78f8f)
switched from DEPLOYING to RUNNING\n17:19:27,901 INFO org.apache.flink.runtime.client.JobClientActor
\ - 01/18/2016 17:19:27\tDataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat))(1/4) switched to RUNNING
\n17:19:27,905 INFO org.apache.flink.runtime.client.JobClientActor -
01/18/2016 17:19:27\tDataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat))(3/4) switched to RUNNING
\n17:19:28,114 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph -
CHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(3/4) (7997918330ecf2610b3298a8c8ef2852) switched from CREATED to SCHEDULED\n17:19:28,126
INFO org.apache.flink.runtime.executiongraph.ExecutionGraph - CHAIN Partition
-> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(1/4) (6421c8f88b191ea844619a40a523773b) switched from CREATED to SCHEDULED\n17:19:28,134
INFO org.apache.flink.runtime.executiongraph.ExecutionGraph - CHAIN Partition
-> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(1/4) (6421c8f88b191ea844619a40a523773b) switched from SCHEDULED to DEPLOYING\n17:19:28,134
INFO org.apache.flink.runtime.executiongraph.ExecutionGraph - Deploying
CHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(1/4) (attempt #0) to testing-worker-linux-docker-e6d6931f-3200-linux-4\n17:19:28,126
INFO org.apache.flink.runtime.executiongraph.ExecutionGraph - CHAIN Partition
-> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(2/4) (d0d011dc0a0823bcec5a57a369b334ed) switched from CREATED to SCHEDULED\n17:19:28,139
INFO org.apache.flink.runtime.executiongraph.ExecutionGraph - CHAIN Partition
-> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(2/4) (d0d011dc0a0823bcec5a57a369b334ed) switched from SCHEDULED to DEPLOYING\n17:19:28,139
INFO org.apache.flink.runtime.executiongraph.ExecutionGraph - Deploying
CHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(2/4) (attempt #0) to testing-worker-linux-docker-e6d6931f-3200-linux-4\n17:19:28,117
INFO org.apache.flink.runtime.executiongraph.ExecutionGraph - CHAIN Partition
-> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(4/4) (c928d19f73d700e80cdfad650689febb) switched from CREATED to SCHEDULED\n17:19:28,134
INFO org.apache.flink.runtime.executiongraph.ExecutionGraph - CHAIN Partition
-> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(3/4) (7997918330ecf2610b3298a8c8ef2852) switched from SCHEDULED to DEPLOYING\n17:19:28,140
INFO org.apache.flink.runtime.executiongraph.ExecutionGraph - Deploying
CHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(3/4) (attempt #0) to testing-worker-linux-docker-e6d6931f-3200-linux-4\n17:19:28,140
INFO org.apache.flink.runtime.executiongraph.ExecutionGraph - CHAIN Partition
-> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(4/4) (c928d19f73d700e80cdfad650689febb) switched from SCHEDULED to DEPLOYING\n17:19:28,141
INFO org.apache.flink.runtime.executiongraph.ExecutionGraph - Deploying
CHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(4/4) (attempt #0) to testing-worker-linux-docker-e6d6931f-3200-linux-4\n17:19:28,147
INFO org.apache.flink.runtime.client.JobClientActor - 01/18/2016
17:19:28\tCHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))(3/4)
switched to SCHEDULED \n17:19:28,153 INFO org.apache.flink.runtime.client.JobClientActor
\ - 01/18/2016 17:19:28\tCHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))(1/4)
switched to SCHEDULED \n17:19:28,153 INFO org.apache.flink.runtime.client.JobClientActor
\ - 01/18/2016 17:19:28\tCHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))(1/4)
switched to DEPLOYING \n17:19:28,153 INFO org.apache.flink.runtime.client.JobClientActor
\ - 01/18/2016 17:19:28\tCHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))(2/4)
switched to SCHEDULED \n17:19:28,153 INFO org.apache.flink.runtime.client.JobClientActor
\ - 01/18/2016 17:19:28\tCHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))(2/4)
switched to DEPLOYING \n17:19:28,156 INFO org.apache.flink.runtime.client.JobClientActor
\ - 01/18/2016 17:19:28\tCHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))(3/4)
switched to DEPLOYING \n17:19:28,158 INFO org.apache.flink.runtime.client.JobClientActor
\ - 01/18/2016 17:19:28\tCHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))(4/4)
switched to SCHEDULED \n17:19:28,165 INFO org.apache.flink.runtime.client.JobClientActor
\ - 01/18/2016 17:19:28\tCHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))(4/4)
switched to DEPLOYING \n17:19:28,238 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - DataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat)) (2/4) (e73af91028cb76f7d3cd887cb6d66755)
switched from RUNNING to FINISHED\n17:19:28,242 INFO org.apache.flink.runtime.client.JobClientActor
\ - 01/18/2016 17:19:28\tDataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat))(2/4) switched to FINISHED
\n17:19:28,308 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph -
DataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat)) (3/4) (807daf978da9dc347dca930822c78f8f)
switched from RUNNING to FINISHED\n17:19:28,315 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - DataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat)) (1/4) (c79bf4381462c690f5999f2d1949ab50)
switched from RUNNING to FINISHED\n17:19:28,317 INFO org.apache.flink.runtime.client.JobClientActor
\ - 01/18/2016 17:19:28\tDataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat))(3/4) switched to FINISHED
\n17:19:28,318 INFO org.apache.flink.runtime.client.JobClientActor -
01/18/2016 17:19:28\tDataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat))(1/4) switched to FINISHED
\n17:19:28,328 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph -
CHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(1/4) (6421c8f88b191ea844619a40a523773b) switched from DEPLOYING to RUNNING\n17:19:28,336
INFO org.apache.flink.runtime.client.JobClientActor - 01/18/2016
17:19:28\tCHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))(1/4)
switched to RUNNING \n17:19:28,338 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - CHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(3/4) (7997918330ecf2610b3298a8c8ef2852) switched from DEPLOYING to RUNNING\n17:19:28,341
INFO org.apache.flink.runtime.client.JobClientActor - 01/18/2016
17:19:28\tCHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))(3/4)
switched to RUNNING \n17:19:28,459 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - DataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat)) (4/4) (ba45c37065b67fc8f5005a50d0e88fff)
switched from RUNNING to FINISHED\n17:19:28,463 INFO org.apache.flink.runtime.client.JobClientActor
\ - 01/18/2016 17:19:28\tDataSource (at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73)
(org.apache.flink.api.java.io.ParallelIteratorInputFormat))(4/4) switched to FINISHED
\n17:19:28,520 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph -
CHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(4/4) (c928d19f73d700e80cdfad650689febb) switched from DEPLOYING to RUNNING\n17:19:28,529
INFO org.apache.flink.runtime.client.JobClientActor - 01/18/2016
17:19:28\tCHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))(4/4)
switched to RUNNING \n17:19:28,540 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - CHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(2/4) (d0d011dc0a0823bcec5a57a369b334ed) switched from DEPLOYING to RUNNING\n17:19:28,545
INFO org.apache.flink.runtime.client.JobClientActor - 01/18/2016
17:19:28\tCHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))(2/4)
switched to RUNNING \n17:19:32,384 INFO org.apache.flink.runtime.instance.InstanceManager
\ - Registered TaskManager at testing-worker-linux-docker-e6d6931f-3200-linux-4
(akka.tcp://[email protected]:60852/user/taskmanager) as 5848d44035a164a0302da6c8701ff748.
Current number of registered hosts is 3. Current number of alive task slots is 6.\n17:19:32,598
INFO org.apache.flink.runtime.executiongraph.ExecutionGraph - Reduce (Reduce
at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(1/1) (d0f8f69f9047c3154b860850955de20f) switched from CREATED to SCHEDULED\n17:19:32,598
INFO org.apache.flink.runtime.executiongraph.ExecutionGraph - Reduce (Reduce
at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(1/1) (d0f8f69f9047c3154b860850955de20f) switched from SCHEDULED to DEPLOYING\n17:19:32,598
INFO org.apache.flink.runtime.executiongraph.ExecutionGraph - Deploying
Reduce (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(1/1) (attempt #0) to testing-worker-linux-docker-e6d6931f-3200-linux-4\n17:19:32,605
INFO org.apache.flink.runtime.client.JobClientActor - 01/18/2016
17:19:32\tReduce (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))(1/1)
switched to SCHEDULED \n17:19:32,605 INFO org.apache.flink.runtime.client.JobClientActor
\ - 01/18/2016 17:19:32\tReduce (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))(1/1)
switched to DEPLOYING \n17:19:32,611 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - CHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(4/4) (c928d19f73d700e80cdfad650689febb) switched from RUNNING to FINISHED\n17:19:32,614
INFO org.apache.flink.runtime.client.JobClientActor - 01/18/2016
17:19:32\tCHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))(4/4)
switched to FINISHED \n17:19:32,717 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - CHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(1/4) (6421c8f88b191ea844619a40a523773b) switched from RUNNING to FINISHED\n17:19:32,719
INFO org.apache.flink.runtime.client.JobClientActor - 01/18/2016
17:19:32\tCHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))(1/4)
switched to FINISHED \n17:19:32,724 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - Reduce (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(1/1) (d0f8f69f9047c3154b860850955de20f) switched from DEPLOYING to RUNNING\n17:19:32,726
INFO org.apache.flink.runtime.client.JobClientActor - 01/18/2016
17:19:32\tReduce (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))(1/1)
switched to RUNNING \n17:19:32,843 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - CHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(2/4) (d0d011dc0a0823bcec5a57a369b334ed) switched from RUNNING to FINISHED\n17:19:32,845
INFO org.apache.flink.runtime.client.JobClientActor - 01/18/2016
17:19:32\tCHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))(2/4)
switched to FINISHED \n17:19:33,092 WARN akka.remote.ReliableDeliverySupervisor
\ - Association with remote system [akka.tcp://[email protected]:43702]
has failed, address is now gated for [5000] ms. Reason is: [Disassociated].\n17:19:39,111
WARN Remoting - Tried to associate
with unreachable remote address [akka.tcp://[email protected]:43702]. Address is
now gated for 5000 ms, all messages to this address will be delivered to dead letters.
Reason: Connection refused: /172.17.0.253:43702\n17:19:39,113 INFO org.apache.flink.runtime.jobmanager.JobManager
\ - Task manager akka.tcp://[email protected]:43702/user/taskmanager
terminated.\n17:19:39,114 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - CHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(3/4) (7997918330ecf2610b3298a8c8ef2852) switched from RUNNING to FAILED\n17:19:39,120
INFO org.apache.flink.runtime.client.JobClientActor - 01/18/2016
17:19:39\tCHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))(3/4)
switched to FAILED \njava.lang.Exception: The slot in which the task was executed
has been released. Probably loss of TaskManager f213232054587f296a12140d56f63ed1
@ testing-worker-linux-docker-e6d6931f-3200-linux-4 - 2 slots - URL: akka.tcp://[email protected]:43702/user/taskmanager\n\tat
org.apache.flink.runtime.instance.SimpleSlot.releaseSlot(SimpleSlot.java:151)\n\tat
org.apache.flink.runtime.instance.SlotSharingGroupAssignment.releaseSharedSlot(SlotSharingGroupAssignment.java:547)\n\tat
org.apache.flink.runtime.instance.SharedSlot.releaseSlot(SharedSlot.java:119)\n\tat
org.apache.flink.runtime.instance.Instance.markDead(Instance.java:156)\n\tat org.apache.flink.runtime.instance.InstanceManager.unregisterTaskManager(InstanceManager.java:215)\n\tat
org.apache.flink.runtime.jobmanager.JobManager$$anonfun$handleMessage$1.applyOrElse(JobManager.scala:792)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply$mcVL$sp(AbstractPartialFunction.scala:33)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:33)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:25)\n\tat
org.apache.flink.runtime.LeaderSessionMessageFilter$$anonfun$receive$1.applyOrElse(LeaderSessionMessageFilter.scala:44)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply$mcVL$sp(AbstractPartialFunction.scala:33)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:33)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:25)\n\tat
org.apache.flink.runtime.LogMessages$$anon$1.apply(LogMessages.scala:33)\n\tat org.apache.flink.runtime.LogMessages$$anon$1.apply(LogMessages.scala:28)\n\tat
scala.PartialFunction$class.applyOrElse(PartialFunction.scala:118)\n\tat org.apache.flink.runtime.LogMessages$$anon$1.applyOrElse(LogMessages.scala:28)\n\tat
akka.actor.Actor$class.aroundReceive(Actor.scala:465)\n\tat org.apache.flink.runtime.jobmanager.JobManager.aroundReceive(JobManager.scala:100)\n\tat
akka.actor.ActorCell.receiveMessage(ActorCell.scala:516)\n\tat akka.actor.dungeon.DeathWatch$class.receivedTerminated(DeathWatch.scala:46)\n\tat
akka.actor.ActorCell.receivedTerminated(ActorCell.scala:369)\n\tat akka.actor.ActorCell.autoReceiveMessage(ActorCell.scala:501)\n\tat
akka.actor.ActorCell.invoke(ActorCell.scala:486)\n\tat akka.dispatch.Mailbox.processMailbox(Mailbox.scala:254)\n\tat
akka.dispatch.Mailbox.run(Mailbox.scala:221)\n\tat akka.dispatch.Mailbox.exec(Mailbox.scala:231)\n\tat
scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)\n\tat scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)\n\tat
scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)\n\tat scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)\n\n17:19:39,129
INFO org.apache.flink.runtime.executiongraph.ExecutionGraph - Reduce (Reduce
at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(1/1) (d0f8f69f9047c3154b860850955de20f) switched from RUNNING to CANCELING\n17:19:39,132
INFO org.apache.flink.runtime.executiongraph.ExecutionGraph - DataSink (collect())
(1/1) (895e1ea552281a665ae390c966cdb3b7) switched from CREATED to CANCELED\n17:19:39,149
INFO org.apache.flink.runtime.client.JobClientActor - 01/18/2016
17:19:39\tJob execution switched to status FAILING.\njava.lang.Exception: The slot
in which the task was executed has been released. Probably loss of TaskManager f213232054587f296a12140d56f63ed1
@ testing-worker-linux-docker-e6d6931f-3200-linux-4 - 2 slots - URL: akka.tcp://[email protected]:43702/user/taskmanager\n\tat
org.apache.flink.runtime.instance.SimpleSlot.releaseSlot(SimpleSlot.java:151)\n\tat
org.apache.flink.runtime.instance.SlotSharingGroupAssignment.releaseSharedSlot(SlotSharingGroupAssignment.java:547)\n\tat
org.apache.flink.runtime.instance.SharedSlot.releaseSlot(SharedSlot.java:119)\n\tat
org.apache.flink.runtime.instance.Instance.markDead(Instance.java:156)\n\tat org.apache.flink.runtime.instance.InstanceManager.unregisterTaskManager(InstanceManager.java:215)\n\tat
org.apache.flink.runtime.jobmanager.JobManager$$anonfun$handleMessage$1.applyOrElse(JobManager.scala:792)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply$mcVL$sp(AbstractPartialFunction.scala:33)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:33)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:25)\n\tat
org.apache.flink.runtime.LeaderSessionMessageFilter$$anonfun$receive$1.applyOrElse(LeaderSessionMessageFilter.scala:44)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply$mcVL$sp(AbstractPartialFunction.scala:33)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:33)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:25)\n\tat
org.apache.flink.runtime.LogMessages$$anon$1.apply(LogMessages.scala:33)\n\tat org.apache.flink.runtime.LogMessages$$anon$1.apply(LogMessages.scala:28)\n\tat
scala.PartialFunction$class.applyOrElse(PartialFunction.scala:118)\n\tat org.apache.flink.runtime.LogMessages$$anon$1.applyOrElse(LogMessages.scala:28)\n\tat
akka.actor.Actor$class.aroundReceive(Actor.scala:465)\n\tat org.apache.flink.runtime.jobmanager.JobManager.aroundReceive(JobManager.scala:100)\n\tat
akka.actor.ActorCell.receiveMessage(ActorCell.scala:516)\n\tat akka.actor.dungeon.DeathWatch$class.receivedTerminated(DeathWatch.scala:46)\n\tat
akka.actor.ActorCell.receivedTerminated(ActorCell.scala:369)\n\tat akka.actor.ActorCell.autoReceiveMessage(ActorCell.scala:501)\n\tat
akka.actor.ActorCell.invoke(ActorCell.scala:486)\n\tat akka.dispatch.Mailbox.processMailbox(Mailbox.scala:254)\n\tat
akka.dispatch.Mailbox.run(Mailbox.scala:221)\n\tat akka.dispatch.Mailbox.exec(Mailbox.scala:231)\n\tat
scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)\n\tat scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)\n\tat
scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)\n\tat scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)\n17:19:39,173
INFO org.apache.flink.runtime.client.JobClientActor - 01/18/2016
17:19:39\tReduce (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))(1/1)
switched to CANCELING \n17:19:39,173 INFO org.apache.flink.runtime.client.JobClientActor
\ - 01/18/2016 17:19:39\tDataSink (collect())(1/1) switched to CANCELED
\n17:19:39,174 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph -
Reduce (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(1/1) (d0f8f69f9047c3154b860850955de20f) switched from CANCELING to FAILED\n17:19:39,177
INFO org.apache.flink.runtime.client.JobClientActor - 01/18/2016
17:19:39\tReduce (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))(1/1)
switched to FAILED \njava.lang.Exception: The slot in which the task was executed
has been released. Probably loss of TaskManager f213232054587f296a12140d56f63ed1
@ testing-worker-linux-docker-e6d6931f-3200-linux-4 - 2 slots - URL: akka.tcp://[email protected]:43702/user/taskmanager\n\tat
org.apache.flink.runtime.instance.SimpleSlot.releaseSlot(SimpleSlot.java:151)\n\tat
org.apache.flink.runtime.instance.SlotSharingGroupAssignment.releaseSharedSlot(SlotSharingGroupAssignment.java:547)\n\tat
org.apache.flink.runtime.instance.SharedSlot.releaseSlot(SharedSlot.java:119)\n\tat
org.apache.flink.runtime.instance.Instance.markDead(Instance.java:156)\n\tat org.apache.flink.runtime.instance.InstanceManager.unregisterTaskManager(InstanceManager.java:215)\n\tat
org.apache.flink.runtime.jobmanager.JobManager$$anonfun$handleMessage$1.applyOrElse(JobManager.scala:792)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply$mcVL$sp(AbstractPartialFunction.scala:33)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:33)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:25)\n\tat
org.apache.flink.runtime.LeaderSessionMessageFilter$$anonfun$receive$1.applyOrElse(LeaderSessionMessageFilter.scala:44)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply$mcVL$sp(AbstractPartialFunction.scala:33)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:33)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:25)\n\tat
org.apache.flink.runtime.LogMessages$$anon$1.apply(LogMessages.scala:33)\n\tat org.apache.flink.runtime.LogMessages$$anon$1.apply(LogMessages.scala:28)\n\tat
scala.PartialFunction$class.applyOrElse(PartialFunction.scala:118)\n\tat org.apache.flink.runtime.LogMessages$$anon$1.applyOrElse(LogMessages.scala:28)\n\tat
akka.actor.Actor$class.aroundReceive(Actor.scala:465)\n\tat org.apache.flink.runtime.jobmanager.JobManager.aroundReceive(JobManager.scala:100)\n\tat
akka.actor.ActorCell.receiveMessage(ActorCell.scala:516)\n\tat akka.actor.dungeon.DeathWatch$class.receivedTerminated(DeathWatch.scala:46)\n\tat
akka.actor.ActorCell.receivedTerminated(ActorCell.scala:369)\n\tat akka.actor.ActorCell.autoReceiveMessage(ActorCell.scala:501)\n\tat
akka.actor.ActorCell.invoke(ActorCell.scala:486)\n\tat akka.dispatch.Mailbox.processMailbox(Mailbox.scala:254)\n\tat
akka.dispatch.Mailbox.run(Mailbox.scala:221)\n\tat akka.dispatch.Mailbox.exec(Mailbox.scala:231)\n\tat
scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)\n\tat scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)\n\tat
scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)\n\tat scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)\n\n17:19:39,179
INFO org.apache.flink.runtime.client.JobClientActor - 01/18/2016
17:19:39\tJob execution switched to status RESTARTING.\n17:19:39,179 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - Delaying retry of job execution for 10000 ms ...\n17:19:39,179 INFO org.apache.flink.runtime.instance.InstanceManager
\ - Unregistered task manager akka.tcp://[email protected]:43702/user/taskmanager.
Number of registered task managers 2. Number of available slots 4.\n17:19:39,179
INFO org.apache.flink.runtime.jobmanager.JobManager - Status of
job fa05fd25993a8742da09cc5023c1e38d (Flink Java Job at Mon Jan 18 17:19:27 UTC
2016) changed to FAILING.\njava.lang.Exception: The slot in which the task was executed
has been released. Probably loss of TaskManager f213232054587f296a12140d56f63ed1
@ testing-worker-linux-docker-e6d6931f-3200-linux-4 - 2 slots - URL: akka.tcp://[email protected]:43702/user/taskmanager\n\tat
org.apache.flink.runtime.instance.SimpleSlot.releaseSlot(SimpleSlot.java:151)\n\tat
org.apache.flink.runtime.instance.SlotSharingGroupAssignment.releaseSharedSlot(SlotSharingGroupAssignment.java:547)\n\tat
org.apache.flink.runtime.instance.SharedSlot.releaseSlot(SharedSlot.java:119)\n\tat
org.apache.flink.runtime.instance.Instance.markDead(Instance.java:156)\n\tat org.apache.flink.runtime.instance.InstanceManager.unregisterTaskManager(InstanceManager.java:215)\n\tat
org.apache.flink.runtime.jobmanager.JobManager$$anonfun$handleMessage$1.applyOrElse(JobManager.scala:792)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply$mcVL$sp(AbstractPartialFunction.scala:33)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:33)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:25)\n\tat
org.apache.flink.runtime.LeaderSessionMessageFilter$$anonfun$receive$1.applyOrElse(LeaderSessionMessageFilter.scala:44)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply$mcVL$sp(AbstractPartialFunction.scala:33)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:33)\n\tat
scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:25)\n\tat
org.apache.flink.runtime.LogMessages$$anon$1.apply(LogMessages.scala:33)\n\tat org.apache.flink.runtime.LogMessages$$anon$1.apply(LogMessages.scala:28)\n\tat
scala.PartialFunction$class.applyOrElse(PartialFunction.scala:118)\n\tat org.apache.flink.runtime.LogMessages$$anon$1.applyOrElse(LogMessages.scala:28)\n\tat
akka.actor.Actor$class.aroundReceive(Actor.scala:465)\n\tat org.apache.flink.runtime.jobmanager.JobManager.aroundReceive(JobManager.scala:100)\n\tat
akka.actor.ActorCell.receiveMessage(ActorCell.scala:516)\n\tat akka.actor.dungeon.DeathWatch$class.receivedTerminated(DeathWatch.scala:46)\n\tat
akka.actor.ActorCell.receivedTerminated(ActorCell.scala:369)\n\tat akka.actor.ActorCell.autoReceiveMessage(ActorCell.scala:501)\n\tat
akka.actor.ActorCell.invoke(ActorCell.scala:486)\n\tat akka.dispatch.Mailbox.processMailbox(Mailbox.scala:254)\n\tat
akka.dispatch.Mailbox.run(Mailbox.scala:221)\n\tat akka.dispatch.Mailbox.exec(Mailbox.scala:231)\n\tat
scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)\n\tat scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)\n\tat
scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)\n\tat scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)\n17:19:39,180
INFO org.apache.flink.runtime.jobmanager.JobManager - Status of
job fa05fd25993a8742da09cc5023c1e38d (Flink Java Job at Mon Jan 18 17:19:27 UTC
2016) changed to RESTARTING.\n17:19:42,766 INFO org.apache.flink.runtime.executiongraph.ExecutionGraph
\ - CHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
(2/4) (d0d011dc0a0823bcec5a57a369b334ed) switched from FINISHED to FAILED\n17:19:42,773
INFO org.apache.flink.runtime.client.JobClientActor - 01/18/2016
17:19:42\tCHAIN Partition -> Map (Map at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))
-> Combine (Reduce at testTaskManagerFailure(TaskManagerProcessFailureBatchRecoveryITCase.java:73))(2/4)
switched to FAILED \njava.lang.IllegalStateException: Update task on instance f213232054587f296a12140d56f63ed1
@ testing-worker-linux-docker-e6d6931f-3200-linux-4 - 2 slots - URL: akka.tcp://[email protected]:43702/user/taskmanager
failed due to:\n\tat org.apache.flink.runtime.executiongraph.Execution$5.onFailure(Execution.java:915)\n\tat
akka.dispatch.OnFailure.internal(Future.scala:228)\n\tat akka.dispatch.OnFailure.internal(Future.scala:227)\n\tat
akka.dispatch.japi$CallbackBridge.apply(Future.scala:174)\n\tat akka.dispatch.japi$CallbackBridge.apply(Future.scala:171)\n\tat
scala.PartialFunction$class.applyOrElse(PartialFunction.scala:118)\n\tat scala.runtime.AbstractPartialFunction.applyOrElse(AbstractPartialFunction.scala:25)\n\tat
scala.concurrent.Future$$anonfun$onFailure$1.apply(Future.scala:136)\n\tat scala.concurrent.Future$$anonfun$onFailure$1.apply(Future.scala:134)\n\tat
scala.concurrent.impl.CallbackRunnable.run(Promise.scala:32)\n\tat scala.concurrent.impl.ExecutionContextImpl$$anon$3.exec(ExecutionContextImpl.scala:107)\n\tat
scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)\n\tat scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)\n\tat
scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)\n\tat scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)\nCaused
by: akka.pattern.AskTimeoutException: Ask timed out on [Actor[akka.tcp://[email protected]:43702/user/taskmanager#-1712955384]]
after [10000 ms]\n\tat akka.pattern.PromiseActorRef$$anonfun$1.apply$mcV$sp(AskSupport.scala:333)\n\tat
akka.actor.Scheduler$$anon$7.run(Scheduler.scala:117)\n\tat scala.concurrent.Future$InternalCallbackExecutor$.scala$concurrent$Future$InternalCallbackExecutor$$unbatchedExecute(Future.scala:694)\n\tat
scala.concurrent.Future$InternalCallbackExecutor$.execute(Future.scala:691)\n\tat
akka.actor.LightArrayRevolverScheduler$TaskHolder.executeTask(Scheduler.scala:467)\n\tat
akka.actor.LightArrayRevolverScheduler$$anon$8.executeBucket$1(Scheduler.scala:419)\n\tat
akka.actor.LightArrayRevolverScheduler$$anon$8.nextTick(Scheduler.scala:423)\n\tat
akka.actor.LightArrayRevolverScheduler$$anon$8.run(Scheduler.scala:375)\n\tat java.lang.Thread.run(Thread.java:745)\n\n17:19:42,774
INFO org.apache.flink.runtime.jobmanager.JobManager - Status of
job fa05fd25993a8742da09cc5023c1e38d (Flink Java Job at Mon Jan 18 17:19:27 UTC
2016) changed to FAILING.\njava.lang.IllegalStateException: Update task on instance
f213232054587f296a12140d56f63ed1 @ testing-worker-linux-docker-e6d6931f-3200-linux-4
- 2 slots - URL: akka.tcp://[email protected]:43702/user/taskmanager failed due
to:\n\tat org.apache.flink.runtime.executiongraph.Execution$5.onFailure(Execution.java:915)\n\tat
akka.dispatch.OnFailure.internal(Future.scala:228)\n\tat akka.dispatch.OnFailure.internal(Future.scala:227)\n\tat
akka.dispatch.japi$CallbackBridge.apply(Future.scala:174)\n\tat akka.dispatch.japi$CallbackBridge.apply(Future.scala:171)\n\tat
scala.PartialFunction$class.applyOrElse(PartialFunction.scala:118)\n\tat scala.runtime.AbstractPartialFunction.applyOrElse(AbstractPartialFunction.scala:25)\n\tat
scala.concurrent.Future$$anonfun$onFailure$1.apply(Future.scala:136)\n\tat scala.concurrent.Future$$anonfun$onFailure$1.apply(Future.scala:134)\n\tat
scala.concurrent.impl.CallbackRunnable.run(Promise.scala:32)\n\tat scala.concurrent.impl.ExecutionContextImpl$$anon$3.exec(ExecutionContextImpl.scala:107)\n\tat
scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)\n\tat scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)\n\tat
scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)\n\tat scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)\nCaused
by: akka.pattern.AskTimeoutException: Ask timed out on [Actor[akka.tcp://[email protected]:43702/user/taskmanager#-1712955384]]
after [10000 ms]\n\tat akka.pattern.PromiseActorRef$$anonfun$1.apply$mcV$sp(AskSupport.scala:333)\n\tat
akka.actor.Scheduler$$anon$7.run(Scheduler.scala:117)\n\tat scala.concurrent.Future$InternalCallbackExecutor$.scala$concurrent$Future$InternalCallbackExecutor$$unbatchedExecute(Future.scala:694)\n\tat
scala.concurrent.Future$InternalCallbackExecutor$.execute(Future.scala:691)\n\tat
akka.actor.LightArrayRevolverScheduler$TaskHolder.executeTask(Scheduler.scala:467)\n\tat
akka.actor.LightArrayRevolverScheduler$$anon$8.executeBucket$1(Scheduler.scala:419)\n\tat
akka.actor.LightArrayRevolverScheduler$$anon$8.nextTick(Scheduler.scala:423)\n\tat
akka.actor.LightArrayRevolverScheduler$$anon$8.run(Scheduler.scala:375)\n\tat java.lang.Thread.run(Thread.java:745)\n17:19:42,780
INFO org.apache.flink.runtime.client.JobClientActor - 01/18/2016
17:19:42\tJob execution switched to status FAILING.\njava.lang.IllegalStateException:
Update task on instance f213232054587f296a12140d56f63ed1 @ testing-worker-linux-docker-e6d6931f-3200-linux-4
- 2 slots - URL: akka.tcp://[email protected]:43702/user/taskmanager failed due
to:\n\tat org.apache.flink.runtime.executiongraph.Execution$5.onFailure(Execution.java:915)\n\tat
akka.dispatch.OnFailure.internal(Future.scala:228)\n\tat akka.dispatch.OnFailure.internal(Future.scala:227)\n\tat
akka.dispatch.japi$CallbackBridge.apply(Future.scala:174)\n\tat akka.dispatch.japi$CallbackBridge.apply(Future.scala:171)\n\tat
scala.PartialFunction$class.applyOrElse(PartialFunction.scala:118)\n\tat scala.runtime.AbstractPartialFunction.applyOrElse(AbstractPartialFunction.scala:25)\n\tat
scala.concurrent.Future$$anonfun$onFailure$1.apply(Future.scala:136)\n\tat scala.concurrent.Future$$anonfun$onFailure$1.apply(Future.scala:134)\n\tat
scala.concurrent.impl.CallbackRunnable.run(Promise.scala:32)\n\tat scala.concurrent.impl.ExecutionContextImpl$$anon$3.exec(ExecutionContextImpl.scala:107)\n\tat
scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)\n\tat scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)\n\tat
scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)\n\tat scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)\nCaused
by: akka.pattern.AskTimeoutException: Ask timed out on [Actor[akka.tcp://[email protected]:43702/user/taskmanager#-1712955384]]
after [10000 ms]\n\tat akka.pattern.PromiseActorRef$$anonfun$1.apply$mcV$sp(AskSupport.scala:333)\n\tat
akka.actor.Scheduler$$anon$7.run(Scheduler.scala:117)\n\tat scala.concurrent.Future$InternalCallbackExecutor$.scala$concurrent$Future$InternalCallbackExecutor$$unbatchedExecute(Future.scala:694)\n\tat
scala.concurrent.Future$InternalCallbackExecutor$.execute(Future.scala:691)\n\tat
akka.actor.LightArrayRevolverScheduler$TaskHolder.executeTask(Scheduler.scala:467)\n\tat
akka.actor.LightArrayRevolverScheduler$$anon$8.executeBucket$1(Scheduler.scala:419)\n\tat
akka.actor.LightArrayRevolverScheduler$$anon$8.nextTick(Scheduler.scala:423)\n\tat
akka.actor.LightArrayRevolverScheduler$$anon$8.run(Scheduler.scala:375)\n\tat java.lang.Thread.run(Thread.java:745)\n17:19:49,152
WARN Remoting - Tried to associate
with unreachable remote address [akka.tcp://[email protected]:43702]. Address is
now gated for 5000 ms, all messages to this address will be delivered to dead letters.
Reason: Connection refused: /172.17.0.253:43702\n17:19:59,172 WARN Remoting -
Tried to associate with unreachable remote address [akka.tcp://[email protected]:43702].
Address is now gated for 5000 ms, all messages to this address will be delivered
to dead letters. Reason: Connection refused: /172.17.0.253:43702\n17:20:09,191 WARN
\ Remoting - Tried to associate
with unreachable remote address [akka.tcp://[email protected]:43702]. Address is
now gated for 5000 ms, all messages to this address will be delivered to dead letters.
Reason: Connection refused: /172.17.0.253:43702\n17:24:32,423 INFO org.apache.flink.runtime.jobmanager.JobManager
\ - Stopping JobManager akka.tcp://[email protected]:56722/user/jobmanager.\n17:24:32,440
ERROR org.apache.flink.test.recovery.TaskManagerProcessFailureBatchRecoveryITCase
\ - \n--------------------------------------------------------------------------------\nTest
testTaskManagerProcessFailure[0](org.apache.flink.test.recovery.TaskManagerProcessFailureBatchRecoveryITCase)
failed with:\njava.lang.AssertionError: The program did not finish in time\n\tat
org.junit.Assert.fail(Assert.java:88)\n\tat org.junit.Assert.assertTrue(Assert.java:41)\n\tat
org.junit.Assert.assertFalse(Assert.java:64)\n\tat org.apache.flink.test.recovery.AbstractTaskManagerProcessFailureRecoveryTest.testTaskManagerProcessFailure(AbstractTaskManagerProcessFailureRecoveryTest.java:212)\n\tat
sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)\n\tat
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\n\tat
java.lang.reflect.Method.invoke(Method.java:606)\n\tat org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)\n\tat
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)\n\tat
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)\n\tat
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)\n\tat
org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)\n\tat org.junit.rules.RunRules.evaluate(RunRules.java:20)\n\tat
org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:271)\n\tat org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:70)\n\tat
org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:50)\n\tat
org.junit.runners.ParentRunner$3.run(ParentRunner.java:238)\n\tat org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:63)\n\tat
org.junit.runners.ParentRunner.runChildren(ParentRunner.java:236)\n\tat org.junit.runners.ParentRunner.access$000(ParentRunner.java:53)\n\tat
org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:229)\n\tat org.junit.runners.ParentRunner.run(ParentRunner.java:309)\n\tat
org.junit.runners.Suite.runChild(Suite.java:127)\n\tat org.junit.runners.Suite.runChild(Suite.java:26)\n\tat
org.junit.runners.ParentRunner$3.run(ParentRunner.java:238)\n\tat org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:63)\n\tat
org.junit.runners.ParentRunner.runChildren(ParentRunner.java:236)\n\tat org.junit.runners.ParentRunner.access$000(ParentRunner.java:53)\n\tat
org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:229)\n\tat org.junit.runners.ParentRunner.run(ParentRunner.java:309)\n\tat
org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:283)\n\tat
org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:173)\n\tat
org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)\n\tat
org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:128)\n{code}"
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
index eb2e68c..db037bb 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/Execution.java
@@ -435,7 +435,7 @@ public class Execution implements Serializable {
return;
}
else if (current == CREATED || current == SCHEDULED) {
- // from here, we can directly switch to cancelled, because the no task has been deployed
+ // from here, we can directly switch to cancelled, because no task has been deployed
if (transitionState(current, CANCELED)) {
// we skip the canceling state. set the timestamp, for a consistent appearance
@@ -754,11 +754,10 @@ public class Execution implements Serializable {
return false;
}
- if (current == CANCELED) {
- // we are already aborting or are already aborted
+ if (current == CANCELED || current == FINISHED) {
+ // we are already aborting or are already aborted or we are already finished
if (LOG.isDebugEnabled()) {
- LOG.debug(String.format("Ignoring transition of vertex %s to %s while being %s",
- getVertexWithAttempt(), FAILED, CANCELED));
+ LOG.debug("Ignoring transition of vertex {} to {} while being {}.", getVertexWithAttempt(), FAILED, current);
}
return false;
}
@@ -928,6 +927,11 @@ public class Execution implements Serializable {
}
private boolean transitionState(ExecutionState currentState, ExecutionState targetState, Throwable error) {
+ // sanity check
+ if (currentState.isTerminal()) {
+ throw new IllegalStateException("Cannot leave terminal state " + currentState + " to transition to " + targetState + ".");
+ }
+
if (STATE_UPDATER.compareAndSet(this, currentState, targetState)) {
markTimestamp(targetState);
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-3260_6968a57a.diff |
bugs-dot-jar_data_FLINK-2968_59685903 | ---
BugID: FLINK-2968
Summary: Windowed fold operation fails because the initial value was not serialized
Description: "The windowed fold operation currently fails because the initial value
was not serialized. The reason for this is that the fold operation is realized as
a {{WindowFunction}} within an {{AbstractUdfStreamOperator}} and does not get the
output type information forwarded (which is necessary for the serialization). \n\nThe
solution is to let the {{AbstractUdfStreamOperator}} forward the output type information
to the {{WindowFunction}} if it implements the {{OutputTypeConfigurable}} interface."
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractUdfStreamOperator.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractUdfStreamOperator.java
index 17bd08d..32be2ba 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractUdfStreamOperator.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/operators/AbstractUdfStreamOperator.java
@@ -20,8 +20,10 @@ package org.apache.flink.streaming.api.operators;
import java.io.Serializable;
+import org.apache.flink.api.common.ExecutionConfig;
import org.apache.flink.api.common.functions.Function;
import org.apache.flink.api.common.functions.util.FunctionUtils;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.StateHandle;
import org.apache.flink.streaming.api.checkpoint.CheckpointNotifier;
@@ -44,7 +46,7 @@ import static java.util.Objects.requireNonNull;
* @param <F>
* The type of the user function
*/
-public abstract class AbstractUdfStreamOperator<OUT, F extends Function> extends AbstractStreamOperator<OUT> {
+public abstract class AbstractUdfStreamOperator<OUT, F extends Function> extends AbstractStreamOperator<OUT> implements OutputTypeConfigurable<OUT> {
private static final long serialVersionUID = 1L;
@@ -176,6 +178,20 @@ public abstract class AbstractUdfStreamOperator<OUT, F extends Function> extends
}
// ------------------------------------------------------------------------
+ // Output type configuration
+ // ------------------------------------------------------------------------
+
+ @Override
+ public void setOutputType(TypeInformation<OUT> outTypeInfo, ExecutionConfig executionConfig) {
+ if (userFunction instanceof OutputTypeConfigurable) {
+ OutputTypeConfigurable<OUT> outputTypeConfigurable = (OutputTypeConfigurable<OUT>) userFunction;
+
+ outputTypeConfigurable.setOutputType(outTypeInfo, executionConfig);
+ }
+ }
+
+
+ // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/NonKeyedWindowOperator.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/NonKeyedWindowOperator.java
index d12a930..cf90cf2 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/NonKeyedWindowOperator.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/NonKeyedWindowOperator.java
@@ -31,7 +31,6 @@ import org.apache.flink.streaming.api.functions.windowing.AllWindowFunction;
import org.apache.flink.streaming.api.operators.AbstractUdfStreamOperator;
import org.apache.flink.streaming.api.operators.ChainingStrategy;
import org.apache.flink.streaming.api.operators.OneInputStreamOperator;
-import org.apache.flink.streaming.api.operators.OutputTypeConfigurable;
import org.apache.flink.streaming.api.operators.TimestampedCollector;
import org.apache.flink.streaming.api.watermark.Watermark;
import org.apache.flink.streaming.api.windowing.assigners.WindowAssigner;
@@ -70,7 +69,7 @@ import static java.util.Objects.requireNonNull;
*/
public class NonKeyedWindowOperator<IN, OUT, W extends Window>
extends AbstractUdfStreamOperator<OUT, AllWindowFunction<IN, OUT, W>>
- implements OneInputStreamOperator<IN, OUT>, Triggerable, InputTypeConfigurable, OutputTypeConfigurable<OUT> {
+ implements OneInputStreamOperator<IN, OUT>, Triggerable, InputTypeConfigurable {
private static final long serialVersionUID = 1L;
@@ -510,15 +509,6 @@ public class NonKeyedWindowOperator<IN, OUT, W extends Window>
return this;
}
- @Override
- public final void setOutputType(TypeInformation<OUT> outTypeInfo, ExecutionConfig executionConfig) {
- if (userFunction instanceof OutputTypeConfigurable) {
- @SuppressWarnings("unchecked")
- OutputTypeConfigurable<OUT> typeConfigurable = (OutputTypeConfigurable<OUT>) userFunction;
- typeConfigurable.setOutputType(outTypeInfo, executionConfig);
- }
- }
-
// ------------------------------------------------------------------------
// Checkpointing
// ------------------------------------------------------------------------
diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperator.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperator.java
index c39679f..6764186 100644
--- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperator.java
+++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/runtime/operators/windowing/WindowOperator.java
@@ -32,7 +32,6 @@ import org.apache.flink.streaming.api.functions.windowing.WindowFunction;
import org.apache.flink.streaming.api.operators.AbstractUdfStreamOperator;
import org.apache.flink.streaming.api.operators.ChainingStrategy;
import org.apache.flink.streaming.api.operators.OneInputStreamOperator;
-import org.apache.flink.streaming.api.operators.OutputTypeConfigurable;
import org.apache.flink.streaming.api.operators.TimestampedCollector;
import org.apache.flink.streaming.api.watermark.Watermark;
import org.apache.flink.streaming.api.windowing.assigners.WindowAssigner;
@@ -88,7 +87,7 @@ import static java.util.Objects.requireNonNull;
*/
public class WindowOperator<K, IN, OUT, W extends Window>
extends AbstractUdfStreamOperator<OUT, WindowFunction<IN, OUT, K, W>>
- implements OneInputStreamOperator<IN, OUT>, Triggerable, InputTypeConfigurable, OutputTypeConfigurable<OUT> {
+ implements OneInputStreamOperator<IN, OUT>, Triggerable, InputTypeConfigurable {
private static final long serialVersionUID = 1L;
@@ -579,15 +578,6 @@ public class WindowOperator<K, IN, OUT, W extends Window>
return this;
}
- @Override
- public final void setOutputType(TypeInformation<OUT> outTypeInfo, ExecutionConfig executionConfig) {
- if (userFunction instanceof OutputTypeConfigurable) {
- @SuppressWarnings("unchecked")
- OutputTypeConfigurable<OUT> typeConfigurable = (OutputTypeConfigurable<OUT>) userFunction;
- typeConfigurable.setOutputType(outTypeInfo, executionConfig);
- }
- }
-
// ------------------------------------------------------------------------
// Checkpointing
// ------------------------------------------------------------------------
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-2968_59685903.diff |
bugs-dot-jar_data_FLINK-3107_937963e3 | ---
BugID: FLINK-3107
Summary: ZooKeeperCheckpointIDCounter.start() can block JobManager actor
Description: |-
In HA mode, the job manager enables checkpoints during submission of streaming programs.
This leads to call to ZooKeeperCheckpointIDCounter.start(), which communicates with ZooKeeper. This can block the job manager actor.
A solution is to start the counter later instead of the CheckpointCoordinator constructor.
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SavepointCoordinator.java b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SavepointCoordinator.java
index 6ce6502..ea4b8ae 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SavepointCoordinator.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/SavepointCoordinator.java
@@ -230,6 +230,7 @@ public class SavepointCoordinator extends CheckpointCoordinator {
// Reset the checkpoint ID counter
long nextCheckpointId = checkpoint.getCheckpointID();
+ checkpointIdCounter.start();
checkpointIdCounter.setCount(nextCheckpointId + 1);
LOG.info("Reset the checkpoint ID to {}", nextCheckpointId);
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-3107_937963e3.diff |
bugs-dot-jar_data_FLINK-1705_5308ac83 | ---
BugID: FLINK-1705
Summary: InstanceConnectionInfo returns wrong hostname when no DNS entry exists
Description: 'If there is no DNS entry for an address (like 10.4.122.43), then the
{{InstanceConnectionInfo}} returns the first octet ({{10}}) as the hostame.
'
diff --git a/flink-runtime/src/main/java/org/apache/flink/runtime/instance/InstanceConnectionInfo.java b/flink-runtime/src/main/java/org/apache/flink/runtime/instance/InstanceConnectionInfo.java
index a1eec4d..ee79c23 100644
--- a/flink-runtime/src/main/java/org/apache/flink/runtime/instance/InstanceConnectionInfo.java
+++ b/flink-runtime/src/main/java/org/apache/flink/runtime/instance/InstanceConnectionInfo.java
@@ -31,7 +31,10 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
- * This class encapsulates all connection information necessary to connect to the instance's task manager.
+ * This class encapsulates the connection information of a TaskManager.
+ * It describes the host where the TaskManager operates and its server port
+ * for data exchange. This class also contains utilities to work with the
+ * TaskManager's host name, which is used to localize work assignments.
*/
public class InstanceConnectionInfo implements IOReadableWritable, Comparable<InstanceConnectionInfo>, java.io.Serializable {
@@ -56,15 +59,9 @@ public class InstanceConnectionInfo implements IOReadableWritable, Comparable<In
private String fqdnHostName;
/**
- * The hostname
+ * The hostname, derived from the fully qualified host name.
*/
private String hostName;
-
- /**
- * This flag indicates if the FQDN hostname cound not be resolved and is represented
- * as an IP address (string).
- */
- private boolean fqdnHostNameIsIP = false;
/**
@@ -90,14 +87,24 @@ public class InstanceConnectionInfo implements IOReadableWritable, Comparable<In
// get FQDN hostname on this TaskManager.
try {
this.fqdnHostName = this.inetAddress.getCanonicalHostName();
- } catch (Throwable t) {
- LOG.warn("Unable to determine hostname for TaskManager. The performance might be degraded since HDFS input split assignment is not possible");
- if(LOG.isDebugEnabled()) {
- LOG.debug("getCanonicalHostName() Exception", t);
- }
- // could not determine host name, so take IP textual representation
- this.fqdnHostName = inetAddress.getHostAddress();
- this.fqdnHostNameIsIP = true;
+ }
+ catch (Throwable t) {
+ LOG.warn("Unable to determine the canonical hostname. Input split assignment (such as " +
+ "for HDFS files) may be non-local when the canonical hostname is missing.");
+ LOG.debug("getCanonicalHostName() Exception:", t);
+ this.fqdnHostName = this.inetAddress.getHostAddress();
+ }
+
+ if (this.fqdnHostName.equals(this.inetAddress.getHostAddress())) {
+ // this happens when the name lookup fails, either due to an exception,
+ // or because no hostname can be found for the address
+ // take IP textual representation
+ this.hostName = this.fqdnHostName;
+ LOG.warn("No hostname could be resolved for the IP address {}, using IP address as host name. "
+ + "Local input split assignment (such as for HDFS files) may be impacted.");
+ }
+ else {
+ this.hostName = NetUtils.getHostnameFromFQDN(this.fqdnHostName);
}
}
@@ -126,27 +133,37 @@ public class InstanceConnectionInfo implements IOReadableWritable, Comparable<In
}
/**
- * Returns the host name of the instance. If the host name could not be determined, the return value will be a
- * textual representation of the instance's IP address.
+ * Returns the fully-qualified domain name the TaskManager. If the name could not be
+ * determined, the return value will be a textual representation of the TaskManager's IP address.
*
- * @return the host name of the instance
+ * @return The fully-qualified domain name of the TaskManager.
*/
public String getFQDNHostname() {
return this.fqdnHostName;
}
-
+
+ /**
+ * Gets the hostname of the TaskManager. The hostname derives from the fully qualified
+ * domain name (FQDN, see {@link #getFQDNHostname()}):
+ * <ul>
+ * <li>If the FQDN is the textual IP address, then the hostname is also the IP address</li>
+ * <li>If the FQDN has only one segment (such as "localhost", or "host17"), then this is
+ * used as the hostname.</li>
+ * <li>If the FQDN has multiple segments (such as "worker3.subgroup.company.net"), then the first
+ * segment (here "worker3") will be used as the hostname.</li>
+ * </ul>
+ *
+ * @return The hostname of the TaskManager.
+ */
public String getHostname() {
- if(hostName == null) {
- String fqdn = getFQDNHostname();
- if(this.fqdnHostNameIsIP) { // fqdn to hostname translation is pointless if FQDN is an ip address.
- hostName = fqdn;
- } else {
- hostName = NetUtils.getHostnameFromFQDN(fqdn);
- }
- }
return hostName;
}
+ /**
+ * Gets the IP address where the TaskManager operates.
+ *
+ * @return The IP address.
+ */
public String getInetAdress() {
return this.inetAddress.toString();
}
@@ -166,7 +183,6 @@ public class InstanceConnectionInfo implements IOReadableWritable, Comparable<In
this.fqdnHostName = StringUtils.readNullableString(in);
this.hostName = StringUtils.readNullableString(in);
- this.fqdnHostNameIsIP = in.readBoolean();
try {
this.inetAddress = InetAddress.getByAddress(address);
@@ -185,7 +201,6 @@ public class InstanceConnectionInfo implements IOReadableWritable, Comparable<In
StringUtils.writeNullableString(fqdnHostName, out);
StringUtils.writeNullableString(hostName, out);
- out.writeBoolean(fqdnHostNameIsIP);
}
// --------------------------------------------------------------------------------------------
| bugs-dot-jar/flink_extracted_diff/developer-patch_bugs-dot-jar_FLINK-1705_5308ac83.diff |
bugs-dot-jar_data_OAK-3079_33c18762 | ---
BugID: OAK-3079
Summary: LastRevRecoveryAgent can update _lastRev of children but not the root
Description: |-
As mentioned in [OAK-2131|https://issues.apache.org/jira/browse/OAK-2131?focusedCommentId=14616391&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-14616391] there can be a situation wherein the LastRevRecoveryAgent updates some nodes in the tree but not the root. This seems to happen due to OAK-2131's change in the Commit.applyToCache (where paths to update are collected via tracker.track): in that code, paths which are non-root and for which no content has changed (and mind you, a content change includes adding _deleted, which happens by default for nodes with children) are not 'tracked', ie for those the _lastRev is not update by subsequent backgroundUpdate operations - leaving them 'old/out-of-date'. This seems correct as per description/intention of OAK-2131 where the last revision can be determined via the commitRoot of the parent. But it has the effect that the LastRevRecoveryAgent then finds those intermittent nodes to be updated while as the root has already been updated (which is at first glance non-intuitive).
I'll attach a test case to reproduce this.
Perhaps this is a bug, perhaps it's ok. [~mreutegg] wdyt?
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/LastRevRecoveryAgent.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/LastRevRecoveryAgent.java
index fee60dc..dc3074f 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/LastRevRecoveryAgent.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/LastRevRecoveryAgent.java
@@ -19,12 +19,10 @@
package org.apache.jackrabbit.oak.plugins.document;
-import static com.google.common.collect.ImmutableList.of;
-import static com.google.common.collect.Iterables.filter;
-import static com.google.common.collect.Iterables.mergeSorted;
+import static com.google.common.collect.Maps.filterKeys;
import static java.util.Collections.singletonList;
import static org.apache.jackrabbit.oak.plugins.document.Collection.JOURNAL;
-import static org.apache.jackrabbit.oak.plugins.document.UnsavedModifications.Snapshot.IGNORE;
+import static org.apache.jackrabbit.oak.plugins.document.util.Utils.PROPERTY_OR_DELETED;
import java.util.Iterator;
import java.util.List;
@@ -35,6 +33,7 @@ import javax.annotation.CheckForNull;
import com.google.common.base.Predicate;
import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
import org.apache.jackrabbit.oak.commons.PathUtils;
import org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentStore;
@@ -152,13 +151,17 @@ public class LastRevRecoveryAgent {
}
Revision currentLastRev = doc.getLastRev().get(clusterId);
- if (currentLastRev != null) {
- knownLastRevs.put(doc.getPath(), currentLastRev);
- }
+
// 1. determine last committed modification on document
Revision lastModifiedRev = determineLastModification(doc, clusterId);
Revision lastRevForParents = Utils.max(lastModifiedRev, currentLastRev);
+ // remember the higher of the two revisions. this is the
+ // most recent revision currently obtained from either a
+ // _lastRev entry or an explicit modification on the document
+ if (lastRevForParents != null) {
+ knownLastRevs.put(doc.getPath(), lastRevForParents);
+ }
//If both currentLastRev and lostLastRev are null it means
//that no change is done by suspect cluster on this document
@@ -306,21 +309,17 @@ public class LastRevRecoveryAgent {
private Revision determineLastModification(NodeDocument doc, int clusterId) {
ClusterPredicate cp = new ClusterPredicate(clusterId);
- // Merge sort the revs for which changes have been made
- // to this doc
-
- // localMap always keeps the most recent valid commit entry
- // per cluster node so looking into that should be sufficient
- Iterable<Revision> revs = mergeSorted(of(
- filter(doc.getLocalCommitRoot().keySet(), cp),
- filter(doc.getLocalRevisions().keySet(), cp)),
- StableRevisionComparator.REVERSE
- );
-
Revision lastModified = null;
- // Look for latest valid revision
- for (Revision rev : revs) {
- lastModified = Utils.max(lastModified, doc.getCommitRevision(rev));
+ for (String property : Sets.filter(doc.keySet(), PROPERTY_OR_DELETED)) {
+ Map<Revision, String> valueMap = doc.getLocalMap(property);
+ // collect committed changes of this cluster node
+ for (Map.Entry<Revision, String> entry : filterKeys(valueMap, cp).entrySet()) {
+ Revision rev = entry.getKey();
+ if (doc.isCommitted(rev)) {
+ lastModified = Utils.max(lastModified, doc.getCommitRevision(rev));
+ break;
+ }
+ }
}
return lastModified;
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3079_33c18762.diff |
bugs-dot-jar_data_OAK-225_e33328e0 | ---
BugID: OAK-225
Summary: Sling I18N queries not supported by Oak
Description: |-
The Sling I18N component issues XPath queries like the following:
{code:none}
//element(*,mix:language)[fn:lower-case(@jcr:language)='en']//element(*,sling:Message)[@sling:message]/(@sling:key|@sling:message)
{code}
Such queries currently fail with the following exception:
{code:none}
javax.jcr.query.InvalidQueryException: java.text.ParseException: Query: //element(*,mix:language)[fn:lower-(*)case(@jcr:language)='en']//element(*,sling:Message)[@sling:message]/(@sling:key|@sling:message); expected: (
at org.apache.jackrabbit.oak.jcr.query.QueryManagerImpl.executeQuery(QueryManagerImpl.java:115)
at org.apache.jackrabbit.oak.jcr.query.QueryImpl.execute(QueryImpl.java:85)
at org.apache.sling.jcr.resource.JcrResourceUtil.query(JcrResourceUtil.java:52)
at org.apache.sling.jcr.resource.internal.helper.jcr.JcrResourceProvider.queryResources(JcrResourceProvider.java:262)
... 54 more
Caused by: java.text.ParseException: Query: //element(*,mix:language)[fn:lower-(*)case(@jcr:language)='en']//element(*,sling:Message)[@sling:message]/(@sling:key|@sling:message); expected: (
at org.apache.jackrabbit.oak.query.XPathToSQL2Converter.getSyntaxError(XPathToSQL2Converter.java:704)
at org.apache.jackrabbit.oak.query.XPathToSQL2Converter.read(XPathToSQL2Converter.java:410)
at org.apache.jackrabbit.oak.query.XPathToSQL2Converter.parseExpression(XPathToSQL2Converter.java:336)
at org.apache.jackrabbit.oak.query.XPathToSQL2Converter.parseCondition(XPathToSQL2Converter.java:279)
at org.apache.jackrabbit.oak.query.XPathToSQL2Converter.parseAnd(XPathToSQL2Converter.java:252)
at org.apache.jackrabbit.oak.query.XPathToSQL2Converter.parseConstraint(XPathToSQL2Converter.java:244)
at org.apache.jackrabbit.oak.query.XPathToSQL2Converter.convert(XPathToSQL2Converter.java:153)
at org.apache.jackrabbit.oak.query.QueryEngineImpl.parseQuery(QueryEngineImpl.java:86)
at org.apache.jackrabbit.oak.query.QueryEngineImpl.executeQuery(QueryEngineImpl.java:99)
at org.apache.jackrabbit.oak.query.QueryEngineImpl.executeQuery(QueryEngineImpl.java:39)
at org.apache.jackrabbit.oak.jcr.query.QueryManagerImpl.executeQuery(QueryManagerImpl.java:110)
{code}
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/XPathToSQL2Converter.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/XPathToSQL2Converter.java
index 6a5f5a9..8103898 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/XPathToSQL2Converter.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/XPathToSQL2Converter.java
@@ -381,10 +381,20 @@ public class XPathToSQL2Converter {
Cast c = new Cast(expr, "date");
read(")");
return c;
+ } else if ("fn:lower-case".equals(functionName)) {
+ Function f = new Function("lower");
+ f.params.add(parseExpression());
+ read(")");
+ return f;
+ } else if ("fn:upper-case".equals(functionName)) {
+ Function f = new Function("upper");
+ f.params.add(parseExpression());
+ read(")");
+ return f;
// } else if ("jcr:deref".equals(functionName)) {
// TODO support jcr:deref?
} else {
- throw getSyntaxError("jcr:like | jcr:contains | jcr:score | jcr:deref");
+ throw getSyntaxError("jcr:like | jcr:contains | jcr:score | jcr:deref | fn:lower-case | fn:upper-case");
}
}
@@ -537,7 +547,9 @@ public class XPathToSQL2Converter {
case CHAR_NAME:
while (true) {
type = types[i];
- if (type != CHAR_NAME && type != CHAR_VALUE) {
+ // the '-' can be part of a name,
+ // for example in "fn:lower-case"
+ if (type != CHAR_NAME && type != CHAR_VALUE && chars[i] != '-') {
c = chars[i];
break;
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-225_e33328e0.diff |
bugs-dot-jar_data_OAK-276_1bf5c550 | ---
BugID: OAK-276
Summary: potential clash of commit id's after restart
Description: "the commit id's in the current implementation are counter-based, i.e.
every commit (on HEAD or on a branch) gets its id by incrementing counter.\n\nonly
the current HEAD id is recorded/persisted. on startup the counter is initialized
with the current HEAD id. \n\nassume the following sequence:\n\n- ...startup...\n-
counter == HEAD == 99\n- commit on HEAD -> new HEAD rev: ++counter == 100\n- create
branch -> new branch rev: ++counter == 101\n- ...restart...\n- counter == HEAD ==
100\n- commit on HEAD -> new HEAD rev: ++counter == 101 => clashes with older branch
rev! \n\nsince a commit is never overwritten the above scenario results in a private
branch revision marked as HEAD, i.e. the revision history is corrupted."
diff --git a/oak-mk/src/main/java/org/apache/jackrabbit/mk/persistence/H2Persistence.java b/oak-mk/src/main/java/org/apache/jackrabbit/mk/persistence/H2Persistence.java
index d6cb3b5..76030db 100644
--- a/oak-mk/src/main/java/org/apache/jackrabbit/mk/persistence/H2Persistence.java
+++ b/oak-mk/src/main/java/org/apache/jackrabbit/mk/persistence/H2Persistence.java
@@ -70,6 +70,7 @@ public class H2Persistence implements GCPersistence {
try {
Statement stmt = con.createStatement();
stmt.execute("create table if not exists REVS(ID binary primary key, DATA binary, TIME timestamp)");
+ stmt.execute("create table if not exists NODES(ID binary primary key, DATA binary, TIME timestamp)");
stmt.execute("create table if not exists HEAD(ID binary) as select null");
stmt.execute("create sequence if not exists DATASTORE_ID");
/*
@@ -86,7 +87,16 @@ public class H2Persistence implements GCPersistence {
cp.dispose();
}
- public Id readHead() throws Exception {
+ public Id[] readIds() throws Exception {
+ Id lastCommitId = null;
+ Id headId = readHead();
+ if (headId != null) {
+ lastCommitId = readLastCommitId();
+ }
+ return new Id[] { headId, lastCommitId };
+ }
+
+ private Id readHead() throws Exception {
Connection con = cp.getConnection();
try {
PreparedStatement stmt = con.prepareStatement("select * from HEAD");
@@ -102,6 +112,22 @@ public class H2Persistence implements GCPersistence {
}
}
+ private Id readLastCommitId() throws Exception {
+ Connection con = cp.getConnection();
+ try {
+ PreparedStatement stmt = con.prepareStatement("select MAX(ID) from REVS");
+ ResultSet rs = stmt.executeQuery();
+ byte[] rawId = null;
+ if (rs.next()) {
+ rawId = rs.getBytes(1);
+ }
+ stmt.close();
+ return rawId == null ? null : new Id(rawId);
+ } finally {
+ con.close();
+ }
+ }
+
public void writeHead(Id id) throws Exception {
Connection con = cp.getConnection();
try {
@@ -118,7 +144,7 @@ public class H2Persistence implements GCPersistence {
Id id = node.getId();
Connection con = cp.getConnection();
try {
- PreparedStatement stmt = con.prepareStatement("select DATA from REVS where ID = ?");
+ PreparedStatement stmt = con.prepareStatement("select DATA from NODES where ID = ?");
try {
stmt.setBytes(1, id.getBytes());
ResultSet rs = stmt.executeQuery();
@@ -148,7 +174,7 @@ public class H2Persistence implements GCPersistence {
try {
PreparedStatement stmt = con
.prepareStatement(
- "insert into REVS (ID, DATA, TIME) select ?, ?, ? where not exists (select 1 from REVS where ID = ?)");
+ "insert into NODES (ID, DATA, TIME) select ?, ?, ? where not exists (select 1 from NODES where ID = ?)");
try {
stmt.setBytes(1, rawId);
stmt.setBytes(2, bytes);
@@ -212,7 +238,7 @@ public class H2Persistence implements GCPersistence {
public ChildNodeEntriesMap readCNEMap(Id id) throws NotFoundException, Exception {
Connection con = cp.getConnection();
try {
- PreparedStatement stmt = con.prepareStatement("select DATA from REVS where ID = ?");
+ PreparedStatement stmt = con.prepareStatement("select DATA from NODES where ID = ?");
try {
stmt.setBytes(1, id.getBytes());
ResultSet rs = stmt.executeQuery();
@@ -241,7 +267,7 @@ public class H2Persistence implements GCPersistence {
try {
PreparedStatement stmt = con
.prepareStatement(
- "insert into REVS (ID, DATA, TIME) select ?, ?, ? where not exists (select 1 from REVS where ID = ?)");
+ "insert into NODES (ID, DATA, TIME) select ?, ?, ? where not exists (select 1 from NODES where ID = ?)");
try {
stmt.setBytes(1, rawId);
stmt.setBytes(2, bytes);
@@ -264,7 +290,7 @@ public class H2Persistence implements GCPersistence {
@Override
public boolean markCommit(Id id) throws Exception {
- return touch(id, gcStart);
+ return touch("REVS", id, gcStart);
}
@Override
@@ -292,22 +318,23 @@ public class H2Persistence implements GCPersistence {
@Override
public boolean markNode(Id id) throws Exception {
- return touch(id, gcStart);
+ return touch("NODES", id, gcStart);
}
@Override
public boolean markCNEMap(Id id) throws Exception {
- return touch(id, gcStart);
+ return touch("NODES", id, gcStart);
}
- private boolean touch(Id id, long timeMillis) throws Exception {
+ private boolean touch(String table, Id id, long timeMillis) throws Exception {
Timestamp ts = new Timestamp(timeMillis);
Connection con = cp.getConnection();
try {
- PreparedStatement stmt = con
- .prepareStatement(
- "update REVS set TIME = ? where ID = ? and TIME < ?");
+ PreparedStatement stmt = con.prepareStatement(
+ String.format("update %s set TIME = ? where ID = ? and TIME < ?",
+ table));
+
try {
stmt.setTimestamp(1, ts);
stmt.setBytes(2, id.getBytes());
@@ -324,20 +351,29 @@ public class H2Persistence implements GCPersistence {
@Override
public int sweep() throws Exception {
Timestamp ts = new Timestamp(gcStart);
+ int swept = 0;
Connection con = cp.getConnection();
try {
- PreparedStatement stmt = con
- .prepareStatement(
- "delete REVS where TIME < ?");
+ PreparedStatement stmt = con.prepareStatement("delete REVS where TIME < ?");
+ try {
+ stmt.setTimestamp(1, ts);
+ swept += stmt.executeUpdate();
+ } finally {
+ stmt.close();
+ }
+
+ stmt = con.prepareStatement("delete NODES where TIME < ?");
+
try {
stmt.setTimestamp(1, ts);
- return stmt.executeUpdate();
+ swept += stmt.executeUpdate();
} finally {
stmt.close();
}
} finally {
con.close();
}
+ return swept;
}
}
\ No newline at end of file
diff --git a/oak-mk/src/main/java/org/apache/jackrabbit/mk/persistence/InMemPersistence.java b/oak-mk/src/main/java/org/apache/jackrabbit/mk/persistence/InMemPersistence.java
index bba95fc..f8ddafe 100644
--- a/oak-mk/src/main/java/org/apache/jackrabbit/mk/persistence/InMemPersistence.java
+++ b/oak-mk/src/main/java/org/apache/jackrabbit/mk/persistence/InMemPersistence.java
@@ -42,7 +42,6 @@ public class InMemPersistence implements GCPersistence {
private final Map<Id, byte[]> objects = Collections.synchronizedMap(new HashMap<Id, byte[]>());
private final Map<Id, byte[]> marked = Collections.synchronizedMap(new HashMap<Id, byte[]>());
- private Id head;
private long gcStart;
// TODO: make this configurable
@@ -53,12 +52,13 @@ public class InMemPersistence implements GCPersistence {
// nothing to initialize
}
- public Id readHead() {
- return head;
+ @Override
+ public Id[] readIds() throws Exception {
+ return new Id[2];
}
public void writeHead(Id id) {
- head = id;
+
}
public void readNode(StoredNode node) throws NotFoundException, Exception {
diff --git a/oak-mk/src/main/java/org/apache/jackrabbit/mk/persistence/Persistence.java b/oak-mk/src/main/java/org/apache/jackrabbit/mk/persistence/Persistence.java
index dbb4253..45ca883 100644
--- a/oak-mk/src/main/java/org/apache/jackrabbit/mk/persistence/Persistence.java
+++ b/oak-mk/src/main/java/org/apache/jackrabbit/mk/persistence/Persistence.java
@@ -36,7 +36,19 @@ public interface Persistence extends Closeable {
public void initialize(File homeDir) throws Exception;
- Id readHead() throws Exception;
+ /**
+ * Return an array of ids, where the first is the head id (as stored
+ * with {@link #writeHead(Id)}) and the second is the highest commit
+ * id found or {@code null}.
+ * <p/>
+ * This method is not guaranteed to deliver "live" results, after
+ * something is written to the storage, so it should better be used
+ * once after initialization.
+ *
+ * @return array of ids
+ * @throws Exception if an error occurs
+ */
+ Id[] readIds() throws Exception;
void writeHead(Id id) throws Exception;
diff --git a/oak-mk/src/main/java/org/apache/jackrabbit/mk/store/DefaultRevisionStore.java b/oak-mk/src/main/java/org/apache/jackrabbit/mk/store/DefaultRevisionStore.java
index 1d71ff8..42b50a5 100644
--- a/oak-mk/src/main/java/org/apache/jackrabbit/mk/store/DefaultRevisionStore.java
+++ b/oak-mk/src/main/java/org/apache/jackrabbit/mk/store/DefaultRevisionStore.java
@@ -134,7 +134,8 @@ public class DefaultRevisionStore extends AbstractRevisionStore implements
cache = Collections.synchronizedMap(SimpleLRUCache.<Id, Object> newInstance(initialCacheSize));
// make sure we've got a HEAD commit
- head = pm.readHead();
+ Id[] ids = pm.readIds();
+ head = ids[0];
if (head == null || head.getBytes().length == 0) {
// assume virgin repository
byte[] rawHead = Id.fromLong(commitCounter.incrementAndGet())
@@ -148,7 +149,11 @@ public class DefaultRevisionStore extends AbstractRevisionStore implements
pm.writeCommit(head, initialCommit);
pm.writeHead(head);
} else {
- commitCounter.set(Long.parseLong(head.toString(), 16));
+ Id lastCommitId = head;
+ if (ids[1] != null && ids[1].compareTo(lastCommitId) > 0) {
+ lastCommitId = ids[1];
+ }
+ commitCounter.set(Long.parseLong(lastCommitId.toString(), 16));
}
if (gcpm != null) {
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-276_1bf5c550.diff |
bugs-dot-jar_data_OAK-1093_d7f0f180 | ---
BugID: OAK-1093
Summary: IllegalArgumentException on Row.getValues()
Description: "Calling {{row.getValues()}} is throwing an {{IllegalArgumentException}}
when called on the {{QueryResult}} of the query {{SELECT properties FROM \\[nt:base\\]
WHERE \\[sling:resourceType\\]=\"cq/personalization/components/contextstores/surferinfo\"}}\n\n{quote}\njava.lang.IllegalArgumentException\n\tat
com.google.common.base.Preconditions.checkArgument(Preconditions.java:76)\n\tat
org.apache.jackrabbit.oak.plugins.value.ValueImpl.checkSingleValued(ValueImpl.java:85)\n\tat
org.apache.jackrabbit.oak.plugins.value.ValueImpl.<init>(ValueImpl.java:72)\n\tat
org.apache.jackrabbit.oak.plugins.value.ValueFactoryImpl.createValue(ValueFactoryImpl.java:95)\n\tat
org.apache.jackrabbit.oak.jcr.query.QueryResultImpl.createValue(QueryResultImpl.java:266)\n\tat
org.apache.jackrabbit.oak.jcr.query.RowImpl.getValues(RowImpl.java:99)\n\tat com.day.cq.analytics.sitecatalyst.impl.FrameworkComponentImpl.getListProperty(FrameworkComponentImpl.java:128)\n\tat
com.day.cq.analytics.sitecatalyst.impl.FrameworkComponentImpl.<init>(FrameworkComponentImpl.java:91)\n{quote}"
diff --git a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/query/RowImpl.java b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/query/RowImpl.java
index fd09cac..e337f39 100644
--- a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/query/RowImpl.java
+++ b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/query/RowImpl.java
@@ -25,6 +25,10 @@ import javax.jcr.query.Row;
import org.apache.jackrabbit.oak.api.PropertyValue;
import org.apache.jackrabbit.oak.api.ResultRow;
+import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.spi.query.PropertyValues;
+
+import com.google.common.base.Joiner;
/**
* The implementation of the corresponding JCR interface.
@@ -96,9 +100,20 @@ public class RowImpl implements Row {
int len = values.length;
Value[] v2 = new Value[values.length];
for (int i = 0; i < len; i++) {
- v2[i] = result.createValue(values[i]);
+ if(values[i].isArray()){
+ v2[i] = result.createValue(mvpToString(values[i]));
+ }else{
+ v2[i] = result.createValue(values[i]);
+ }
}
return v2;
}
+ private static PropertyValue mvpToString(PropertyValue pv) {
+ String v = Joiner.on(' ')
+ .appendTo(new StringBuilder(), pv.getValue(Type.STRINGS))
+ .toString();
+ return PropertyValues.newString(v);
+ }
+
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1093_d7f0f180.diff |
bugs-dot-jar_data_OAK-621_00b4b8a0 | ---
BugID: OAK-621
Summary: Moving or deleting tree instances with status NEW doesn't change its status
to DISCONNECTED
Description: "Further fall out from OAK-606:\n\n{code}\n Tree t = tree.addChild(\"new\");\n\n
\ root.move(\"/x\", \"/y/x\");\n assertEquals(Status.DISCONNECTED,
t.getStatus());\n{code}\n\nThe assertion fails. "
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/core/TreeImpl.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/core/TreeImpl.java
index 3cbd5b8..ac0353a 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/core/TreeImpl.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/core/TreeImpl.java
@@ -58,6 +58,11 @@ public class TreeImpl implements Tree {
private final RootImpl root;
/**
+ * The {@code NodeBuilder} for the underlying node state
+ */
+ private final NodeBuilder nodeBuilder;
+
+ /**
* Parent of this tree. Null for the root.
*/
private TreeImpl parent;
@@ -67,32 +72,26 @@ public class TreeImpl implements Tree {
*/
private String name;
- /**
- * Lazily initialised {@code NodeBuilder} for the underlying node state
- */
- NodeBuilder nodeBuilder;
+ private TreeImpl(RootImpl root) {
+ this.root = checkNotNull(root);
+ this.name = "";
+ this.nodeBuilder = root.createRootBuilder();
+ }
private TreeImpl(RootImpl root, TreeImpl parent, String name) {
this.root = checkNotNull(root);
- this.parent = parent;
+ this.parent = checkNotNull(parent);
this.name = checkNotNull(name);
+ this.nodeBuilder = parent.getNodeBuilder().child(name);
}
@Nonnull
static TreeImpl createRoot(final RootImpl root) {
- return new TreeImpl(root, null, "") {
+ return new TreeImpl(root) {
@Override
protected NodeState getBaseState() {
return root.getBaseState();
}
-
- @Override
- protected synchronized NodeBuilder getNodeBuilder() {
- if (nodeBuilder == null) {
- nodeBuilder = root.createRootBuilder();
- }
- return nodeBuilder;
- }
};
}
@@ -189,7 +188,7 @@ public class TreeImpl implements Tree {
@Override
public Iterable<? extends PropertyState> getProperties() {
root.checkLive();
- return Iterables.filter(getNodeBuilder().getProperties(),
+ return Iterables.filter(nodeBuilder.getProperties(),
new Predicate<PropertyState>() {
@Override
public boolean apply(PropertyState propertyState) {
@@ -219,7 +218,7 @@ public class TreeImpl implements Tree {
if (!parent.nodeBuilder.isConnected()) {
return true;
}
- return !getNodeBuilder().isConnected();
+ return !nodeBuilder.isConnected();
}
@Override
@@ -230,7 +229,7 @@ public class TreeImpl implements Tree {
return Status.DISCONNECTED;
}
- NodeBuilder builder = getNodeBuilder();
+ NodeBuilder builder = nodeBuilder;
if (builder.isNew()) {
return Status.NEW;
} else if (builder.isModified()) {
@@ -250,7 +249,7 @@ public class TreeImpl implements Tree {
public long getChildrenCount() {
// TODO: make sure cnt respects access control
root.checkLive();
- return getNodeBuilder().getChildNodeCount();
+ return nodeBuilder.getChildNodeCount();
}
@Override
@@ -260,7 +259,7 @@ public class TreeImpl implements Tree {
if (hasOrderableChildren()) {
childNames = getOrderedChildNames();
} else {
- childNames = getNodeBuilder().getChildNodeNames();
+ childNames = nodeBuilder.getChildNodeNames();
}
return Iterables.filter(Iterables.transform(
childNames,
@@ -282,9 +281,9 @@ public class TreeImpl implements Tree {
public Tree addChild(String name) {
root.checkLive();
if (!hasChild(name)) {
- getNodeBuilder().child(name);
+ nodeBuilder.child(name);
if (hasOrderableChildren()) {
- getNodeBuilder().setProperty(
+ nodeBuilder.setProperty(
MemoryPropertyBuilder.copy(Type.STRING, internalGetProperty(OAK_CHILD_ORDER))
.addValue(name)
.getPropertyState());
@@ -292,8 +291,10 @@ public class TreeImpl implements Tree {
root.updated();
}
- TreeImpl child = getChild(name);
- assert child != null;
+ TreeImpl child = new TreeImpl(root, this, name);
+
+ // Make sure to allocate the node builder for new nodes in order to correctly
+ // track removes and moves. See OAK-621
return child;
}
@@ -305,7 +306,7 @@ public class TreeImpl implements Tree {
}
if (!isRoot() && parent.hasChild(name)) {
- NodeBuilder builder = parent.getNodeBuilder();
+ NodeBuilder builder = parent.nodeBuilder;
builder.removeNode(name);
if (parent.hasOrderableChildren()) {
builder.setProperty(
@@ -360,7 +361,7 @@ public class TreeImpl implements Tree {
tail = Iterables.skip(filtered, idx);
}
// concatenate head, this name and tail
- parent.getNodeBuilder().setProperty(MultiStringPropertyState.stringProperty(OAK_CHILD_ORDER, Iterables.concat(head, Collections.singleton(getName()), tail))
+ parent.nodeBuilder.setProperty(MultiStringPropertyState.stringProperty(OAK_CHILD_ORDER, Iterables.concat(head, Collections.singleton(getName()), tail))
);
root.updated();
return true;
@@ -369,7 +370,7 @@ public class TreeImpl implements Tree {
@Override
public void setProperty(PropertyState property) {
root.checkLive();
- NodeBuilder builder = getNodeBuilder();
+ NodeBuilder builder = nodeBuilder;
builder.setProperty(property);
root.updated();
}
@@ -377,7 +378,7 @@ public class TreeImpl implements Tree {
@Override
public <T> void setProperty(String name, T value) {
root.checkLive();
- NodeBuilder builder = getNodeBuilder();
+ NodeBuilder builder = nodeBuilder;
builder.setProperty(name, value);
root.updated();
}
@@ -385,7 +386,7 @@ public class TreeImpl implements Tree {
@Override
public <T> void setProperty(String name, T value, Type<T> type) {
root.checkLive();
- NodeBuilder builder = getNodeBuilder();
+ NodeBuilder builder = nodeBuilder;
builder.setProperty(name, value, type);
root.updated();
}
@@ -393,7 +394,7 @@ public class TreeImpl implements Tree {
@Override
public void removeProperty(String name) {
root.checkLive();
- NodeBuilder builder = getNodeBuilder();
+ NodeBuilder builder = nodeBuilder;
builder.removeProperty(name);
root.updated();
}
@@ -418,16 +419,13 @@ public class TreeImpl implements Tree {
: parentBaseState.getChildNode(name);
}
+ //-----------------------------------------------------------< internal >---
+
@Nonnull
- protected synchronized NodeBuilder getNodeBuilder() {
- if (nodeBuilder == null) {
- nodeBuilder = parent.getNodeBuilder().child(name);
- }
+ NodeBuilder getNodeBuilder() {
return nodeBuilder;
}
- //-----------------------------------------------------------< internal >---
-
/**
* Move this tree to the parent at {@code destParent} with the new name
* {@code destName}.
@@ -446,7 +444,7 @@ public class TreeImpl implements Tree {
@Nonnull
NodeState getNodeState() {
- return getNodeBuilder().getNodeState();
+ return nodeBuilder.getNodeState();
}
/**
@@ -480,29 +478,29 @@ public class TreeImpl implements Tree {
}
Set<String> names = Sets.newLinkedHashSet();
for (String name : getOrderedChildNames()) {
- if (getNodeBuilder().hasChildNode(name)) {
+ if (nodeBuilder.hasChildNode(name)) {
names.add(name);
}
}
- for (String name : getNodeBuilder().getChildNodeNames()) {
+ for (String name : nodeBuilder.getChildNodeNames()) {
names.add(name);
}
PropertyBuilder<String> builder = MemoryPropertyBuilder.array(
Type.STRING, OAK_CHILD_ORDER);
builder.setValues(names);
- getNodeBuilder().setProperty(builder.getPropertyState());
+ nodeBuilder.setProperty(builder.getPropertyState());
}
//------------------------------------------------------------< private >---
private TreeImpl internalGetChild(String childName) {
- return getNodeBuilder().hasChildNode(childName)
+ return nodeBuilder.hasChildNode(childName)
? new TreeImpl(root, this, childName)
: null;
}
private PropertyState internalGetProperty(String propertyName) {
- return getNodeBuilder().getProperty(propertyName);
+ return nodeBuilder.getProperty(propertyName);
}
private void buildPath(StringBuilder sb) {
@@ -574,10 +572,10 @@ public class TreeImpl implements Tree {
* of the children as returned by {@link NodeBuilder#getChildNodeNames()}.
*/
public void ensureChildOrderProperty() {
- PropertyState childOrder = getNodeBuilder().getProperty(OAK_CHILD_ORDER);
+ PropertyState childOrder = nodeBuilder.getProperty(OAK_CHILD_ORDER);
if (childOrder == null) {
- getNodeBuilder().setProperty(
- MultiStringPropertyState.stringProperty(OAK_CHILD_ORDER, getNodeBuilder().getChildNodeNames()));
+ nodeBuilder.setProperty(
+ MultiStringPropertyState.stringProperty(OAK_CHILD_ORDER, nodeBuilder.getChildNodeNames()));
}
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-621_00b4b8a0.diff |
bugs-dot-jar_data_OAK-1075_79467350 | ---
BugID: OAK-1075
Summary: XPath query failures for mvps
Description: Adding some cases related to mvps that are not currently covered by the
existing (jackrabbit) tests.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/ast/ComparisonImpl.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/ast/ComparisonImpl.java
index 75e48bc..26a3a43 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/ast/ComparisonImpl.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/ast/ComparisonImpl.java
@@ -118,7 +118,7 @@ public class ComparisonImpl extends ConstraintImpl {
case EQUAL:
return PropertyValues.match(p1, p2);
case NOT_EQUAL:
- return !PropertyValues.match(p1, p2);
+ return PropertyValues.notMatch(p1, p2);
case GREATER_OR_EQUAL:
return p1.compareTo(p2) >= 0;
case GREATER_THAN:
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/index/FilterImpl.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/index/FilterImpl.java
index 6eb282e..b31e740 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/index/FilterImpl.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/index/FilterImpl.java
@@ -252,16 +252,17 @@ public class FilterImpl implements Filter {
switch (op) {
case EQUAL:
if (x.first != null && x.last == x.first && x.firstIncluding && x.lastIncluding) {
- // there is already an equality condition on this property
- // we will keep this, as it could be a multi-valued property
- // (unlike in databases, "x = 1 and x = 2" can match a node
- // if x is a multi-valued property with value "{1, 2}")
- return;
+ // we keep the old equality condition if there is one;
+ // we can not use setAlwaysFalse, as this would not be correct
+ // for multi-valued properties:
+ // unlike in databases, "x = 1 and x = 2" can match a node
+ // if x is a multi-valued property with value {1, 2}
+ } else {
+ // all other conditions (range conditions) are replaced with this one
+ // (we can not use setAlwaysFalse for the same reason as above)
+ x.first = x.last = v;
+ x.firstIncluding = x.lastIncluding = true;
}
- x.first = maxValue(oldFirst, v);
- x.firstIncluding = x.first == oldFirst ? x.firstIncluding : true;
- x.last = minValue(oldLast, v);
- x.lastIncluding = x.last == oldLast ? x.lastIncluding : true;
break;
case NOT_EQUAL:
if (v != null) {
@@ -269,25 +270,40 @@ public class FilterImpl implements Filter {
}
break;
case GREATER_THAN:
- x.first = maxValue(oldFirst, v);
- x.firstIncluding = false;
+ // we don't narrow the range because of multi-valued properties
+ if (x.first == null) {
+ x.first = maxValue(oldFirst, v);
+ x.firstIncluding = false;
+ }
break;
case GREATER_OR_EQUAL:
- x.first = maxValue(oldFirst, v);
- x.firstIncluding = x.first == oldFirst ? x.firstIncluding : true;
+ // we don't narrow the range because of multi-valued properties
+ if (x.first == null) {
+ x.first = maxValue(oldFirst, v);
+ x.firstIncluding = x.first == oldFirst ? x.firstIncluding : true;
+ }
break;
case LESS_THAN:
- x.last = minValue(oldLast, v);
- x.lastIncluding = false;
+ // we don't narrow the range because of multi-valued properties
+ if (x.last == null) {
+ x.last = minValue(oldLast, v);
+ x.lastIncluding = false;
+ }
break;
case LESS_OR_EQUAL:
- x.last = minValue(oldLast, v);
- x.lastIncluding = x.last == oldLast ? x.lastIncluding : true;
+ // we don't narrow the range because of multi-valued properties
+ if (x.last == null) {
+ x.last = minValue(oldLast, v);
+ x.lastIncluding = x.last == oldLast ? x.lastIncluding : true;
+ }
break;
case LIKE:
- // LIKE is handled in the fulltext index
- x.isLike = true;
- x.first = v;
+ // we don't narrow the range because of multi-valued properties
+ if (x.first == null) {
+ // LIKE is handled in the fulltext index
+ x.isLike = true;
+ x.first = v;
+ }
break;
case IN:
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/query/PropertyValues.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/query/PropertyValues.java
index 67e9cdb..5320a4f 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/query/PropertyValues.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/query/PropertyValues.java
@@ -167,7 +167,7 @@ public final class PropertyValues {
}
if (!p1.isArray() && p2.isArray()) {
return contains(p2.getValue(Type.BINARIES),
- p2.getValue(Type.BINARY));
+ p1.getValue(Type.BINARY));
}
break;
default:
@@ -185,6 +185,53 @@ public final class PropertyValues {
}
+ public static boolean notMatch(PropertyValue p1, PropertyValue p2) {
+ if (p1.getType().tag() != p2.getType().tag()) {
+ return true;
+ }
+
+ switch (p1.getType().tag()) {
+ case PropertyType.BINARY:
+ if (p1.isArray() && !p2.isArray()) {
+ if (p1.count() > 1) {
+ // a value can not possibly match multiple distinct values
+ return true;
+ }
+ return !contains(p1.getValue(Type.BINARIES),
+ p2.getValue(Type.BINARY));
+ }
+ if (!p1.isArray() && p2.isArray()) {
+ if (p2.count() > 1) {
+ // a value can not possibly match multiple distinct values
+ return true;
+ }
+ return !contains(p2.getValue(Type.BINARIES),
+ p1.getValue(Type.BINARY));
+ }
+ break;
+ default:
+ if (p1.isArray() && !p2.isArray()) {
+ if (p1.count() > 1) {
+ // a value can not possibly match multiple distinct values
+ return true;
+ }
+ return !contains(p1.getValue(Type.STRINGS),
+ p2.getValue(Type.STRING));
+ }
+ if (!p1.isArray() && p2.isArray()) {
+ if (p2.count() > 1) {
+ // a value can not possibly match multiple distinct values
+ return true;
+ }
+ return !contains(p2.getValue(Type.STRINGS),
+ p1.getValue(Type.STRING));
+ }
+ }
+ // both arrays or both single values
+ return p1.compareTo(p2) != 0;
+
+ }
+
// --
/**
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1075_79467350.diff |
bugs-dot-jar_data_OAK-1829_ca36450e | ---
BugID: OAK-1829
Summary: IllegalStateException when using "lowerCase"/"lower" on a array property
Description: "if query contain lowerCase on array property then QueryResult.getRows()
throwing IllegalStateException.\n\nQuery which causing issue\n\n select [selector_1].*
from [nt:unstructured] AS [selector_1] where (([selector_1].[lcc:className] = 'com.adobe.icc.dbforms.obj.ConditionalDataModule'))
AND (LOWER([selector_1].[dataDictionaryRefs]) = 'employeedd')\n\nIf we remove LOWER
function then it is working \n\n select [selector_1].* from [nt:unstructured] AS
[selector_1] where (([selector_1].[lcc:className] = 'com.adobe.icc.dbforms.obj.ConditionalDataModule'))
AND ([selector_1].[dataDictionaryRefs] = 'EmployeeDD')"
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/ast/LowerCaseImpl.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/ast/LowerCaseImpl.java
index 3f36512..1b13e1d 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/ast/LowerCaseImpl.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/ast/LowerCaseImpl.java
@@ -27,7 +27,11 @@ import org.apache.jackrabbit.oak.api.PropertyValue;
import org.apache.jackrabbit.oak.query.index.FilterImpl;
import org.apache.jackrabbit.oak.spi.query.PropertyValues;
+import com.google.common.base.Function;
+
+import static com.google.common.collect.Iterables.transform;
import static org.apache.jackrabbit.oak.api.Type.STRING;
+import static org.apache.jackrabbit.oak.api.Type.STRINGS;
/**
* The function "lower(..)".
@@ -70,11 +74,20 @@ public class LowerCaseImpl extends DynamicOperandImpl {
if (p == null) {
return null;
}
- // TODO what is the expected result of LOWER(x) for an array property?
- // currently throws an exception
- String value = p.getValue(STRING);
// TODO toLowerCase(): document the Turkish locale problem
- return PropertyValues.newString(value.toLowerCase());
+ if (p.getType().isArray()) {
+ Iterable<String> lowerCase = transform(p.getValue(STRINGS),
+ new Function<String, String>() {
+ @Override
+ public String apply(String input) {
+ return input.toLowerCase();
+ }
+ });
+ return PropertyValues.newString(lowerCase);
+ } else {
+ String value = p.getValue(STRING);
+ return PropertyValues.newString(value.toLowerCase());
+ }
}
@Override
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1829_ca36450e.diff |
bugs-dot-jar_data_OAK-1269_b8fe2ded | ---
BugID: OAK-1269
Summary: NodeType index doesn't respect the declaringNodeTypes setting
Description: |-
Following the OAK-1150 discussion, I've noticed that the node type index doesn't respect the declaringNodeTypes setting.
Setting a restriction on the node type index definition breaks the index - there are 0 query hits.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/nodetype/NodeTypeIndex.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/nodetype/NodeTypeIndex.java
index 8ffa57b..f4f8dfb 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/nodetype/NodeTypeIndex.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/nodetype/NodeTypeIndex.java
@@ -48,7 +48,7 @@ class NodeTypeIndex implements QueryIndex, JcrConstants {
return Double.POSITIVE_INFINITY;
}
NodeTypeIndexLookup lookup = new NodeTypeIndexLookup(root);
- if (lookup.isIndexed(filter.getPath())) {
+ if (lookup.isIndexed(filter.getPath(), filter)) {
return lookup.getCost(filter);
} else {
return Double.POSITIVE_INFINITY;
@@ -58,7 +58,7 @@ class NodeTypeIndex implements QueryIndex, JcrConstants {
@Override
public Cursor query(Filter filter, NodeState root) {
NodeTypeIndexLookup lookup = new NodeTypeIndexLookup(root);
- if (!hasNodeTypeRestriction(filter) || !lookup.isIndexed(filter.getPath())) {
+ if (!hasNodeTypeRestriction(filter) || !lookup.isIndexed(filter.getPath(), filter)) {
throw new IllegalStateException(
"NodeType index is used even when no index is available for filter " + filter);
}
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/nodetype/NodeTypeIndexLookup.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/nodetype/NodeTypeIndexLookup.java
index 9a9316b..e4a61bb 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/nodetype/NodeTypeIndexLookup.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/nodetype/NodeTypeIndexLookup.java
@@ -45,10 +45,10 @@ class NodeTypeIndexLookup implements JcrConstants {
* @return <code>true</code> if a node type index exists; <code>false</code>
* otherwise.
*/
- public boolean isIndexed(String path) {
+ public boolean isIndexed(String path, Filter f) {
PropertyIndexLookup lookup = new PropertyIndexLookup(root);
- if (lookup.isIndexed(JCR_PRIMARYTYPE, path, null)
- && lookup.isIndexed(JCR_MIXINTYPES, path, null)) {
+ if (lookup.isIndexed(JCR_PRIMARYTYPE, path, f)
+ && lookup.isIndexed(JCR_MIXINTYPES, path, f)) {
return true;
}
@@ -62,13 +62,13 @@ class NodeTypeIndexLookup implements JcrConstants {
NodeState child = root.getChildNode(path.substring(0, slash));
return new NodeTypeIndexLookup(child).isIndexed(
- path.substring(slash));
+ path.substring(slash), f);
}
public double getCost(Filter filter) {
PropertyIndexLookup lookup = new PropertyIndexLookup(root);
- return lookup.getCost(null, JCR_PRIMARYTYPE, newName(filter.getPrimaryTypes()))
- + lookup.getCost(null, JCR_MIXINTYPES, newName(filter.getMixinTypes()));
+ return lookup.getCost(filter, JCR_PRIMARYTYPE, newName(filter.getPrimaryTypes()))
+ + lookup.getCost(filter, JCR_MIXINTYPES, newName(filter.getMixinTypes()));
}
/**
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1269_b8fe2ded.diff |
bugs-dot-jar_data_OAK-1216_e403e003 | ---
BugID: OAK-1216
Summary: Path parsing must support SNS indexes, irrespective of SNS support
Description: |-
{code}
Session.getNode("/foo/bar[2]");
{code}
throws {{javax.jcr.RepositoryException: Invalid name or path: /foo/bar\[2]}}
This should be an ItemNotFoundException (if the item does not exist), irrespective if the repository supports SNS or not.
diff --git a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/session/SessionContext.java b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/session/SessionContext.java
index dff9421..86ca4bd 100644
--- a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/session/SessionContext.java
+++ b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/session/SessionContext.java
@@ -328,7 +328,13 @@ public class SessionContext implements NamePathMapper {
if (oakPath != null) {
return oakPath;
} else {
- throw new RepositoryException("Invalid name or path: " + jcrPath);
+ // check if the path is an SNS path with an index > 1 and throw a PathNotFoundException instead (see OAK-1216)
+ if (getOakPathKeepIndex(jcrPath) != null) {
+ throw new PathNotFoundException(jcrPath);
+ } else {
+ throw new RepositoryException("Invalid name or path: " + jcrPath);
+ }
+
}
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1216_e403e003.diff |
bugs-dot-jar_data_OAK-1429_279bb3ce | ---
BugID: OAK-1429
Summary: Slow event listeners do not scale as expected
Description: "{{org.apache.jackrabbit.oak.jcr.LargeOperationIT#slowListener}} does
not scale to {{O n log n}} on the document node store. "
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
index 49c9d01..48f3bfb 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
@@ -362,9 +362,7 @@ public final class NodeDocument extends Document implements CachedNodeDocument{
*/
@CheckForNull
public String getCommitRootPath(Revision revision) {
- // check local map first
- Map<Revision, String> local = getLocalCommitRoot();
- String depth = local.get(revision);
+ String depth = getCommitRootDepth(revision);
if (depth != null) {
if (depth.equals("0")) {
return "/";
@@ -373,13 +371,6 @@ public final class NodeDocument extends Document implements CachedNodeDocument{
return PathUtils.getAncestorPath(p,
PathUtils.getDepth(p) - Integer.parseInt(depth));
}
- // check previous
- for (NodeDocument prev : getPreviousDocs(COMMIT_ROOT, revision)) {
- String path = prev.getCommitRootPath(revision);
- if (path != null) {
- return path;
- }
- }
return null;
}
@@ -968,6 +959,31 @@ public final class NodeDocument extends Document implements CachedNodeDocument{
}
/**
+ * Returns the commit root depth for the given revision. This method also
+ * takes previous documents into account.
+ *
+ * @param revision get the commit root depth for this revision.
+ * @return the depth or <code>null</code> if there is no commit root entry
+ * for the given revision on this document or previous documents.
+ */
+ @CheckForNull
+ private String getCommitRootDepth(@Nonnull Revision revision) {
+ // check local map first
+ Map<Revision, String> local = getLocalCommitRoot();
+ String depth = local.get(revision);
+ if (depth == null) {
+ // check previous
+ for (NodeDocument prev : getPreviousDocs(COMMIT_ROOT, revision)) {
+ depth = prev.getCommitRootDepth(revision);
+ if (depth != null) {
+ break;
+ }
+ }
+ }
+ return depth;
+ }
+
+ /**
* Checks that revision x is newer than another revision.
*
* @param x the revision to check
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1429_279bb3ce.diff |
bugs-dot-jar_data_OAK-1094_2e20589f | ---
BugID: OAK-1094
Summary: CacheLIRS implementation incomplete
Description: The current CacheLIRS implementation is not complete and e.g. does not
provide a write through ConcurrentMap view on {{asMap()}}. For OAK-1088 it would
be good to have this implementation as it allows conditional and atomic updates
of cache entries.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheLIRS.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheLIRS.java
index 52747ac..3356131 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheLIRS.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheLIRS.java
@@ -922,8 +922,12 @@ public class CacheLIRS<K, V> implements LoadingCache<K, V> {
V old = get(key, hash);
long start = System.nanoTime();
try {
- ListenableFuture<V> future = loader.reload(key, old);
- value = future.get();
+ if (old == null) {
+ value = loader.load(key);
+ } else {
+ ListenableFuture<V> future = loader.reload(key, old);
+ value = future.get();
+ }
loadSuccessCount++;
} catch (Exception e) {
loadExceptionCount++;
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1094_2e20589f.diff |
bugs-dot-jar_data_OAK-1807_077efee5 | ---
BugID: OAK-1807
Summary: ConstraintViolationException seen with multiple Oak/Mongo with ConcurrentCreateNodesTest
Description: |-
While running ConcurrentCreateNodesTest with 5 instances writing to same Mongo instance following exception is seen
{noformat}
Exception in thread "Background job org.apache.jackrabbit.oak.benchmark.ConcurrentCreateNodesTest$Writer@3f56e5ed" java.lang.RuntimeException: javax.jcr.nodetype.ConstraintViolationException: OakConstraint0001: /: The primary type rep:root does not exist
at org.apache.jackrabbit.oak.benchmark.ConcurrentCreateNodesTest$Writer.run(ConcurrentCreateNodesTest.java:111)
at org.apache.jackrabbit.oak.benchmark.AbstractTest$1.run(AbstractTest.java:481)
Caused by: javax.jcr.nodetype.ConstraintViolationException: OakConstraint0001: /: The primary type rep:root does not exist
at org.apache.jackrabbit.oak.api.CommitFailedException.asRepositoryException(CommitFailedException.java:225)
at org.apache.jackrabbit.oak.api.CommitFailedException.asRepositoryException(CommitFailedException.java:212)
at org.apache.jackrabbit.oak.jcr.delegate.SessionDelegate.newRepositoryException(SessionDelegate.java:679)
at org.apache.jackrabbit.oak.jcr.delegate.SessionDelegate.save(SessionDelegate.java:553)
at org.apache.jackrabbit.oak.jcr.session.SessionImpl$8.perform(SessionImpl.java:417)
at org.apache.jackrabbit.oak.jcr.session.SessionImpl$8.perform(SessionImpl.java:414)
at org.apache.jackrabbit.oak.jcr.delegate.SessionDelegate.perform(SessionDelegate.java:308)
at org.apache.jackrabbit.oak.jcr.session.SessionImpl.perform(SessionImpl.java:127)
at org.apache.jackrabbit.oak.jcr.session.SessionImpl.save(SessionImpl.java:414)
at org.apache.jackrabbit.oak.benchmark.ConcurrentCreateNodesTest$Writer.run(ConcurrentCreateNodesTest.java:100)
... 1 more
Caused by: org.apache.jackrabbit.oak.api.CommitFailedException: OakConstraint0001: /: The primary type rep:root does not exist
at org.apache.jackrabbit.oak.plugins.nodetype.TypeEditor.constraintViolation(TypeEditor.java:150)
at org.apache.jackrabbit.oak.plugins.nodetype.TypeEditor.getEffectiveType(TypeEditor.java:286)
at org.apache.jackrabbit.oak.plugins.nodetype.TypeEditor.<init>(TypeEditor.java:101)
at org.apache.jackrabbit.oak.plugins.nodetype.TypeEditorProvider.getRootEditor(TypeEditorProvider.java:85)
at org.apache.jackrabbit.oak.spi.commit.CompositeEditorProvider.getRootEditor(CompositeEditorProvider.java:80)
at org.apache.jackrabbit.oak.spi.commit.EditorHook.processCommit(EditorHook.java:53)
at org.apache.jackrabbit.oak.spi.commit.CompositeHook.processCommit(CompositeHook.java:60)
at org.apache.jackrabbit.oak.spi.commit.CompositeHook.processCommit(CompositeHook.java:60)
at org.apache.jackrabbit.oak.spi.state.AbstractNodeStoreBranch$InMemory.merge(AbstractNodeStoreBranch.java:498)
at org.apache.jackrabbit.oak.spi.state.AbstractNodeStoreBranch.merge(AbstractNodeStoreBranch.java:300)
at org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreBranch.merge(DocumentNodeStoreBranch.java:129)
at org.apache.jackrabbit.oak.plugins.document.DocumentRootBuilder.merge(DocumentRootBuilder.java:159)
at org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore.merge(DocumentNodeStore.java:1275)
at org.apache.jackrabbit.oak.core.MutableRoot.commit(MutableRoot.java:247)
at org.apache.jackrabbit.oak.jcr.delegate.SessionDelegate.commit(SessionDelegate.java:405)
at org.apache.jackrabbit.oak.jcr.delegate.SessionDelegate.save(SessionDelegate.java:551)
... 7 more
{noformat}
This has been reported by [~rogoz]
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Revision.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Revision.java
index 79043e9..a7806cd 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Revision.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Revision.java
@@ -156,6 +156,12 @@ public class Revision {
long timestamp = getCurrentTimestamp();
int c;
synchronized (Revision.class) {
+ // need to check again, because threads
+ // could arrive inside the synchronized block
+ // out of order
+ if (timestamp < lastRevisionTimestamp) {
+ timestamp = lastRevisionTimestamp;
+ }
if (timestamp == lastRevisionTimestamp) {
c = ++lastRevisionCount;
} else {
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1807_077efee5.diff |
bugs-dot-jar_data_OAK-1467_dde7de85 | ---
BugID: OAK-1467
Summary: Commit.rollback() may remove changes from other commit
Description: Commit.rollback() removes documents it previously created. With concurrent
commits it may happen that this method removes documents some other commit modified
in the meantime.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Commit.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Commit.java
index 75510da..93b4bd0 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Commit.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Commit.java
@@ -408,7 +408,9 @@ public class Commit {
store.createOrUpdate(NODES, reverse);
}
for (UpdateOp op : newDocuments) {
- store.remove(NODES, op.id);
+ UpdateOp reverse = op.getReverseOperation();
+ NodeDocument.unsetLastRev(reverse, revision.getClusterId());
+ store.createOrUpdate(NODES, reverse);
}
UpdateOp removeCollision = new UpdateOp(commitRoot.getId(), false);
NodeDocument.removeCollision(removeCollision, revision);
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1467_dde7de85.diff |
bugs-dot-jar_data_OAK-2528_239de7b8 | ---
BugID: OAK-2528
Summary: Entries in _commitRoot not purged
Description: Entries in _commitRoot are not purged or moved to previous documents
if there are no changes with those revisions. Usually there is always a change associated
with a _commitRoot, but in some cases it may happen that the only update on the
document is for non-revisioned data like the _children flag.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/SplitOperations.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/SplitOperations.java
index 3ca3e08..f6bbb19 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/SplitOperations.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/SplitOperations.java
@@ -21,7 +21,6 @@ package org.apache.jackrabbit.oak.plugins.document;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
-import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
@@ -65,6 +64,7 @@ import static org.apache.jackrabbit.oak.plugins.document.util.Utils.isRevisionNe
class SplitOperations {
private static final Logger LOG = LoggerFactory.getLogger(SplitOperations.class);
+ private static final int GARBAGE_LIMIT = Integer.getInteger("oak.documentMK.garbage.limit", 10000);
private static final DocumentStore STORE = new MemoryDocumentStore();
private final NodeDocument doc;
@@ -75,7 +75,9 @@ class SplitOperations {
private Revision low;
private int numValues;
private Map<String, NavigableMap<Revision, String>> committedChanges;
+ private Set<Revision> changes;
private Map<String, Set<Revision>> garbage;
+ private int garbageCount = 0;
private Set<Revision> mostRecentRevs;
private Set<Revision> splitRevs;
private List<UpdateOp> splitOps;
@@ -119,7 +121,10 @@ class SplitOperations {
mostRecentRevs = Sets.newHashSet();
splitRevs = Sets.newHashSet();
garbage = Maps.newHashMap();
- committedChanges = getCommittedLocalChanges();
+ changes = Sets.newHashSet();
+ committedChanges = Maps.newHashMap();
+
+ collectLocalChanges(committedChanges, changes);
// revisions of the most recent committed changes on this document
// these are kept in the main document. _revisions and _commitRoot
@@ -215,9 +220,15 @@ class SplitOperations {
NavigableMap<Revision, String> commitRoot =
new TreeMap<Revision, String>(context.getRevisionComparator());
for (Map.Entry<Revision, String> entry : doc.getLocalCommitRoot().entrySet()) {
- if (splitRevs.contains(entry.getKey())) {
- commitRoot.put(entry.getKey(), entry.getValue());
+ Revision r = entry.getKey();
+ if (splitRevs.contains(r)) {
+ commitRoot.put(r, entry.getValue());
numValues++;
+ } else if (r.getClusterId() == context.getClusterId()
+ && !changes.contains(r)) {
+ // OAK-2528: _commitRoot entry without associated
+ // change -> consider as garbage
+ addGarbage(r, COMMIT_ROOT);
}
}
committedChanges.put(COMMIT_ROOT, commitRoot);
@@ -350,15 +361,15 @@ class SplitOperations {
}
/**
- * Returns a map of all local property changes committed by the current
+ * Collects all local property changes committed by the current
* cluster node.
*
- * @return local changes committed by the current cluster node.
+ * @param committedLocally local changes committed by the current cluster node.
+ * @param changes all revisions of local changes (committed and uncommitted).
*/
- @Nonnull
- private Map<String, NavigableMap<Revision, String>> getCommittedLocalChanges() {
- Map<String, NavigableMap<Revision, String>> committedLocally
- = new HashMap<String, NavigableMap<Revision, String>>();
+ private void collectLocalChanges(
+ Map<String, NavigableMap<Revision, String>> committedLocally,
+ Set<Revision> changes) {
for (String property : filter(doc.keySet(), PROPERTY_OR_DELETED)) {
NavigableMap<Revision, String> splitMap
= new TreeMap<Revision, String>(context.getRevisionComparator());
@@ -370,6 +381,7 @@ class SplitOperations {
if (rev.getClusterId() != context.getClusterId()) {
continue;
}
+ changes.add(rev);
if (doc.isCommitted(rev)) {
splitMap.put(rev, entry.getValue());
} else if (isGarbage(rev)) {
@@ -377,7 +389,6 @@ class SplitOperations {
}
}
}
- return committedLocally;
}
private boolean isGarbage(Revision rev) {
@@ -392,12 +403,17 @@ class SplitOperations {
}
private void addGarbage(Revision rev, String property) {
+ if (garbageCount > GARBAGE_LIMIT) {
+ return;
+ }
Set<Revision> revisions = garbage.get(property);
if (revisions == null) {
revisions = Sets.newHashSet();
garbage.put(property, revisions);
}
- revisions.add(rev);
+ if (revisions.add(rev)) {
+ garbageCount++;
+ }
}
private void disconnectStalePrevDocs() {
@@ -444,8 +460,10 @@ class SplitOperations {
for (Map.Entry<String, Set<Revision>> entry : garbage.entrySet()) {
for (Revision r : entry.getValue()) {
main.removeMapEntry(entry.getKey(), r);
- NodeDocument.removeCommitRoot(main, r);
- NodeDocument.removeRevision(main, r);
+ if (PROPERTY_OR_DELETED.apply(entry.getKey())) {
+ NodeDocument.removeCommitRoot(main, r);
+ NodeDocument.removeRevision(main, r);
+ }
}
}
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2528_239de7b8.diff |
bugs-dot-jar_data_OAK-1668_63070cf9 | ---
BugID: OAK-1668
Summary: Lucene should not serve queries for what it doesn't index
Description: |
If a query is asked and Lucene is chosen as index for serving it, it
will try to serve all the restrictions of the query, even the one that
are not indexed.
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndex.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndex.java
index 6d8e111..c589260 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndex.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndex.java
@@ -28,6 +28,8 @@ import static org.apache.jackrabbit.oak.plugins.index.IndexConstants.INDEX_DEFIN
import static org.apache.jackrabbit.oak.plugins.index.IndexConstants.TYPE_PROPERTY_NAME;
import static org.apache.jackrabbit.oak.plugins.index.lucene.FieldNames.PATH;
import static org.apache.jackrabbit.oak.plugins.index.lucene.FieldNames.PATH_SELECTOR;
+import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.EXCLUDE_PROPERTY_NAMES;
+import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.INCLUDE_PROPERTY_TYPES;
import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.INDEX_DATA_CHILD_NAME;
import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.PERSISTENCE_FILE;
import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.PERSISTENCE_NAME;
@@ -56,6 +58,8 @@ import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicReference;
+import javax.jcr.PropertyType;
+
import org.apache.jackrabbit.oak.api.PropertyValue;
import org.apache.jackrabbit.oak.plugins.index.aggregate.NodeAggregator;
import org.apache.jackrabbit.oak.plugins.index.lucene.util.MoreLikeThisHelper;
@@ -67,11 +71,11 @@ import org.apache.jackrabbit.oak.query.fulltext.FullTextOr;
import org.apache.jackrabbit.oak.query.fulltext.FullTextTerm;
import org.apache.jackrabbit.oak.query.fulltext.FullTextVisitor;
import org.apache.jackrabbit.oak.spi.query.Cursor;
-import org.apache.jackrabbit.oak.spi.query.Filter;
-import org.apache.jackrabbit.oak.spi.query.PropertyValues;
import org.apache.jackrabbit.oak.spi.query.Cursors.PathCursor;
+import org.apache.jackrabbit.oak.spi.query.Filter;
import org.apache.jackrabbit.oak.spi.query.Filter.PropertyRestriction;
import org.apache.jackrabbit.oak.spi.query.IndexRow;
+import org.apache.jackrabbit.oak.spi.query.PropertyValues;
import org.apache.jackrabbit.oak.spi.query.QueryIndex;
import org.apache.jackrabbit.oak.spi.query.QueryIndex.FulltextQueryIndex;
import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
@@ -329,7 +333,7 @@ public class LuceneIndex implements FulltextQueryIndex {
// we only restrict non-full-text conditions if there is
// no relative property in the full-text constraint
boolean nonFullTextConstraints = parent.isEmpty();
- String plan = getQuery(filter, null, nonFullTextConstraints, analyzer) + " ft:(" + ft + ")";
+ String plan = getQuery(filter, null, nonFullTextConstraints, analyzer, getIndexDef(root)) + " ft:(" + ft + ")";
if (!parent.isEmpty()) {
plan += " parent:" + parent;
}
@@ -363,7 +367,7 @@ public class LuceneIndex implements FulltextQueryIndex {
IndexSearcher searcher = new IndexSearcher(reader);
List<LuceneResultRow> rows = new ArrayList<LuceneResultRow>();
Query query = getQuery(filter, reader,
- nonFullTextConstraints, analyzer);
+ nonFullTextConstraints, analyzer, getIndexDef(root));
// TODO OAK-828
HashSet<String> seenPaths = new HashSet<String>();
@@ -428,10 +432,11 @@ public class LuceneIndex implements FulltextQueryIndex {
* path, node type, and so on) should be added to the Lucene
* query
* @param analyzer the Lucene analyzer used for building the fulltext query
+ * @param indexDefinition nodestate that contains the index definition
* @return the Lucene query
*/
private static Query getQuery(Filter filter, IndexReader reader,
- boolean nonFullTextConstraints, Analyzer analyzer) {
+ boolean nonFullTextConstraints, Analyzer analyzer, NodeState indexDefinition) {
List<Query> qs = new ArrayList<Query>();
FullTextExpression ft = filter.getFullTextConstraint();
if (ft == null) {
@@ -461,9 +466,9 @@ public class LuceneIndex implements FulltextQueryIndex {
throw new RuntimeException(e);
}
}
- }
- else if (nonFullTextConstraints) {
- addNonFullTextConstraints(qs, filter, reader, analyzer);
+ } else if (nonFullTextConstraints) {
+ addNonFullTextConstraints(qs, filter, reader, analyzer,
+ indexDefinition);
}
if (qs.size() == 0) {
return new MatchAllDocsQuery();
@@ -479,7 +484,7 @@ public class LuceneIndex implements FulltextQueryIndex {
}
private static void addNonFullTextConstraints(List<Query> qs,
- Filter filter, IndexReader reader, Analyzer analyzer) {
+ Filter filter, IndexReader reader, Analyzer analyzer, NodeState indexDefinition) {
if (!filter.matchesAllTypes()) {
addNodeTypeConstraints(qs, filter);
}
@@ -526,11 +531,12 @@ public class LuceneIndex implements FulltextQueryIndex {
continue;
}
- String name = pr.propertyName;
- if (name.contains("/")) {
- // lucene cannot handle child-level property restrictions
+ // check excluded properties and types
+ if (isExcludedProperty(pr, indexDefinition)) {
continue;
}
+
+ String name = pr.propertyName;
if ("rep:excerpt".equals(name)) {
continue;
}
@@ -617,6 +623,44 @@ public class LuceneIndex implements FulltextQueryIndex {
return token;
}
+ private static boolean isExcludedProperty(PropertyRestriction pr,
+ NodeState definition) {
+ String name = pr.propertyName;
+ if (name.contains("/")) {
+ // lucene cannot handle child-level property restrictions
+ return true;
+ }
+
+ // check name
+ for (String e : definition.getStrings(EXCLUDE_PROPERTY_NAMES)) {
+ if (e.equalsIgnoreCase(name)) {
+ return true;
+ }
+ }
+
+ // check type
+ Integer type = null;
+ if (pr.first != null) {
+ type = pr.first.getType().tag();
+ } else if (pr.last != null) {
+ type = pr.last.getType().tag();
+ } else if (pr.list != null && !pr.list.isEmpty()) {
+ type = pr.list.get(0).getType().tag();
+ }
+ if (type != null) {
+ boolean isIn = false;
+ for (String e : definition.getStrings(INCLUDE_PROPERTY_TYPES)) {
+ if (PropertyType.valueFromName(e) == type) {
+ isIn = true;
+ }
+ }
+ if (!isIn) {
+ return true;
+ }
+ }
+ return false;
+ }
+
private static void addReferenceConstraint(String uuid, List<Query> qs,
IndexReader reader) {
if (reader == null) {
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1668_63070cf9.diff |
bugs-dot-jar_data_OAK-2238_a28098fd | ---
BugID: OAK-2238
Summary: Session.getItem violates JCR Spec
Description: |-
Session.getItem(path) is supposed to first return a node for the given path, and if no node is found return a property. The oak implementation returns this in the opposite order.
see attached patch for a possible fix.
diff --git a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/delegate/SessionDelegate.java b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/delegate/SessionDelegate.java
index 5fd1988..2cab01a 100644
--- a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/delegate/SessionDelegate.java
+++ b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/delegate/SessionDelegate.java
@@ -426,12 +426,12 @@ public class SessionDelegate {
return getRootNode();
} else {
Tree parent = root.getTree(PathUtils.getParentPath(path));
- if (parent.hasProperty(name)) {
- return new PropertyDelegate(this, parent, name);
- }
+
Tree child = parent.getChild(name);
if (child.exists()) {
return new NodeDelegate(this, child);
+ } else if (parent.hasProperty(name)) {
+ return new PropertyDelegate(this, parent, name);
} else {
return null;
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2238_a28098fd.diff |
bugs-dot-jar_data_OAK-2260_0ac7ff20 | ---
BugID: OAK-2260
Summary: TarMK Cold Standby can corrupt bulk segments
Description: 'There''s a race condition on the segment transfer code that may introduce
corrupted binary segments on the secondary instance. What can happen during the
head sync phase is that the master may send the head segment twice which will make
the client receive&store the second segment thinking it''s a different one.
'
diff --git a/oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/client/SegmentLoaderHandler.java b/oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/client/SegmentLoaderHandler.java
index ba7e775..633652c 100644
--- a/oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/client/SegmentLoaderHandler.java
+++ b/oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/client/SegmentLoaderHandler.java
@@ -133,7 +133,7 @@ public class SegmentLoaderHandler extends ChannelInboundHandlerAdapter
@Override
public Segment readSegment(final String id) {
ctx.writeAndFlush(newGetSegmentReq(this.clientID, id));
- return getSegment();
+ return getSegment(id);
}
@Override
@@ -145,15 +145,18 @@ public class SegmentLoaderHandler extends ChannelInboundHandlerAdapter
// implementation of RemoteSegmentLoader
- public Segment getSegment() {
+ public Segment getSegment(final String id) {
boolean interrupted = false;
try {
for (;;) {
try {
- // log.debug("polling segment");
Segment s = segment.poll(timeoutMs, TimeUnit.MILLISECONDS);
- // log.debug("returning segment " + s.getSegmentId());
- return s;
+ if (s == null) {
+ return null;
+ }
+ if (s.getSegmentId().toString().equals(id)) {
+ return s;
+ }
} catch (InterruptedException ignore) {
interrupted = true;
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2260_0ac7ff20.diff |
bugs-dot-jar_data_OAK-1655_c91bfa54 | ---
BugID: OAK-1655
Summary: DataStoreBlobStore does not take into maxLastModifiedTime when fetching all
chunks
Description: |-
Currently the {{DataStoreBlobStore}} has a pending TODO
{code}
@Override
public Iterator<String> getAllChunkIds(long maxLastModifiedTime) throws Exception {
//TODO Ignores the maxLastModifiedTime currently.
return Iterators.transform(delegate.getAllIdentifiers(), new Function<DataIdentifier, String>() {
@Nullable
@Override
public String apply(@Nullable DataIdentifier input) {
return input.toString();
}
});
}
{code}
Due to this it currently returns all blobId. This would issue when new binary gets created while a blob gc is running as such binaries might be considered orphan and deleted
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStore.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStore.java
index 3ad49ad..0880a19 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStore.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStore.java
@@ -267,7 +267,8 @@ public class DataStoreBlobStore implements DataStore, BlobStore, GarbageCollecta
public boolean apply(DataIdentifier input) {
try {
DataRecord dr = delegate.getRecord(input);
- if(dr != null && dr.getLastModified() < maxLastModifiedTime){
+ if(dr != null && (maxLastModifiedTime <=0
+ || dr.getLastModified() < maxLastModifiedTime)){
return true;
}
} catch (DataStoreException e) {
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1655_c91bfa54.diff |
bugs-dot-jar_data_OAK-543_3ce758b7 | ---
BugID: OAK-543
Summary: PutTokenImpl not thread safe
Description: "{{PutTokenImpl}} uses prefix increment on a static member to generate
presumably unique identifiers. Prefix increment is not atomic though which might
result in non unique ids being generated. "
diff --git a/oak-mk/src/main/java/org/apache/jackrabbit/mk/store/DefaultRevisionStore.java b/oak-mk/src/main/java/org/apache/jackrabbit/mk/store/DefaultRevisionStore.java
index 4e299c3..533ccc8 100644
--- a/oak-mk/src/main/java/org/apache/jackrabbit/mk/store/DefaultRevisionStore.java
+++ b/oak-mk/src/main/java/org/apache/jackrabbit/mk/store/DefaultRevisionStore.java
@@ -223,12 +223,12 @@ public class DefaultRevisionStore extends AbstractRevisionStore implements
*/
static class PutTokenImpl extends PutToken {
- private static int idCounter;
+ private static final AtomicInteger ID_COUNTER = new AtomicInteger();
private int id;
private StoredNode lastModifiedNode;
public PutTokenImpl() {
- this.id = ++idCounter;
+ this.id = ID_COUNTER.incrementAndGet();
}
@Override
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-543_3ce758b7.diff |
bugs-dot-jar_data_OAK-1173_61c877d8 | ---
BugID: OAK-1173
Summary: NPE if checking for a non-existing node in version storage
Description: "NPE If a tree given to CompiledPermissionImpl.getTreePermission() does
not have a primary type, e.g. for a \"hidden\" oak node:\n\n{noformat}\n\t at com.google.common.base.Preconditions.checkNotNull(Preconditions.java:191)\n\t
\ at org.apache.jackrabbit.oak.security.authorization.permission.CompiledPermissionImpl.getTreePermission(CompiledPermissionImpl.java:160)\n\t
\ at org.apache.jackrabbit.oak.security.authorization.permission.CompiledPermissionImpl$TreePermissionImpl.getChildPermission(CompiledPermissionImpl.java:443)\n\t
\ at org.apache.jackrabbit.oak.core.SecureNodeBuilder.getTreePermission(SecureNodeBuilder.java:352)\n\t
\ at org.apache.jackrabbit.oak.core.SecureNodeBuilder.exists(SecureNodeBuilder.java:129)\n\t
\ at org.apache.jackrabbit.oak.core.SecureNodeBuilder.hasChildNode(SecureNodeBuilder.java:271)\n\t
\ at org.apache.jackrabbit.oak.core.AbstractTree.getChildrenCount(AbstractTree.java:248)\n{noformat}\n\nThe
tree passed here to get the children count is: {{/jcr:system/jcr:versionStorage}}
and the child node not having a primary type is {{:index}}\n\n"
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/permission/CompiledPermissionImpl.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/permission/CompiledPermissionImpl.java
index dc1b9c2..c9d0fa0 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/permission/CompiledPermissionImpl.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/permission/CompiledPermissionImpl.java
@@ -157,7 +157,10 @@ final class CompiledPermissionImpl implements CompiledPermissions, PermissionCon
// TODO: OAK-753 decide on where to filter out hidden items.
return TreePermission.ALL;
case TreeTypeProvider.TYPE_VERSION:
- String ntName = checkNotNull(TreeUtil.getPrimaryTypeName(tree));
+ String ntName = TreeUtil.getPrimaryTypeName(tree);
+ if (ntName == null) {
+ return TreePermission.EMPTY;
+ }
if (VersionConstants.VERSION_STORE_NT_NAMES.contains(ntName) || VersionConstants.NT_ACTIVITY.equals(ntName)) {
return new TreePermissionImpl(tree, TreeTypeProvider.TYPE_VERSION, parentPermission);
} else {
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1173_61c877d8.diff |
bugs-dot-jar_data_OAK-3021_494da6de | ---
BugID: OAK-3021
Summary: UserValidator and AccessControlValidator must not process hidden nodes
Description: This is similar to OAK-3019 but for {{UserValidator}} and {{AccessControlValidator}}.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/accesscontrol/AccessControlValidator.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/accesscontrol/AccessControlValidator.java
index 3aa11f9..0721f2b 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/accesscontrol/AccessControlValidator.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/accesscontrol/AccessControlValidator.java
@@ -42,6 +42,7 @@ import org.apache.jackrabbit.oak.plugins.tree.impl.AbstractTree;
import org.apache.jackrabbit.oak.plugins.tree.impl.TreeConstants;
import org.apache.jackrabbit.oak.spi.commit.DefaultValidator;
import org.apache.jackrabbit.oak.spi.commit.Validator;
+import org.apache.jackrabbit.oak.spi.commit.VisibleValidator;
import org.apache.jackrabbit.oak.spi.security.authorization.accesscontrol.AccessControlConstants;
import org.apache.jackrabbit.oak.spi.security.authorization.restriction.Restriction;
import org.apache.jackrabbit.oak.spi.security.authorization.restriction.RestrictionProvider;
@@ -122,7 +123,7 @@ class AccessControlValidator extends DefaultValidator implements AccessControlCo
Tree treeAfter = checkNotNull(parentAfter.getChild(name));
checkValidTree(parentAfter, treeAfter, after);
- return new AccessControlValidator(this, treeAfter);
+ return newValidator(this, treeAfter);
}
@Override
@@ -130,7 +131,7 @@ class AccessControlValidator extends DefaultValidator implements AccessControlCo
Tree treeAfter = checkNotNull(parentAfter.getChild(name));
checkValidTree(parentAfter, treeAfter, after);
- return new AccessControlValidator(this, treeAfter);
+ return newValidator(this, treeAfter);
}
@Override
@@ -141,6 +142,14 @@ class AccessControlValidator extends DefaultValidator implements AccessControlCo
//------------------------------------------------------------< private >---
+ private static Validator newValidator(AccessControlValidator parent,
+ Tree parentAfter) {
+ return new VisibleValidator(
+ new AccessControlValidator(parent, parentAfter),
+ true,
+ true);
+ }
+
private void checkValidTree(Tree parentAfter, Tree treeAfter, NodeState nodeAfter) throws CommitFailedException {
if (isPolicy(treeAfter)) {
checkValidPolicy(parentAfter, treeAfter, nodeAfter);
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/user/UserValidator.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/user/UserValidator.java
index 975f5eb..7d6a2e9 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/user/UserValidator.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/user/UserValidator.java
@@ -139,12 +139,13 @@ class UserValidator extends DefaultValidator implements UserConstants {
Tree tree = checkNotNull(parentAfter.getChild(name));
validateAuthorizable(tree, UserUtil.getType(tree));
- return new VisibleValidator(new UserValidator(null, tree, provider), true, true);
+ return newValidator(null, tree, provider);
}
@Override
public Validator childNodeChanged(String name, NodeState before, NodeState after) throws CommitFailedException {
- return new UserValidator(parentBefore.getChild(name), parentAfter.getChild(name), provider);
+ return newValidator(parentBefore.getChild(name),
+ parentAfter.getChild(name), provider);
}
@Override
@@ -158,12 +159,21 @@ class UserValidator extends DefaultValidator implements UserConstants {
}
return null;
} else {
- return new VisibleValidator(new UserValidator(tree, null, provider), true, true);
+ return newValidator(tree, null, provider);
}
}
//------------------------------------------------------------< private >---
+ private static Validator newValidator(Tree parentBefore,
+ Tree parentAfter,
+ UserValidatorProvider provider) {
+ return new VisibleValidator(
+ new UserValidator(parentBefore, parentAfter, provider),
+ true,
+ true);
+ }
+
private boolean isAdminUser(@Nonnull Tree userTree) {
if (userTree.exists() && isUser(userTree)) {
String id = UserUtil.getAuthorizableId(userTree);
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3021_494da6de.diff |
bugs-dot-jar_data_OAK-606_f0fbacab | ---
BugID: OAK-606
Summary: Node becomes invalid after Session#move()
Description: |-
moving or renaming an existing (saved) node renders that node instance
invalid and any access on that node instance will throw IllegalStateException.
diff --git a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/ItemDelegate.java b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/ItemDelegate.java
index 501cebb..426621f 100644
--- a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/ItemDelegate.java
+++ b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/ItemDelegate.java
@@ -83,8 +83,7 @@ public abstract class ItemDelegate {
* @return {@code true} iff stale
*/
public boolean isStale() {
- Status status = getLocationOrNull().getStatus();
- return status == Status.DISCONNECTED || status == null;
+ return !getLocationInternal().exists();
}
/**
@@ -116,7 +115,7 @@ public abstract class ItemDelegate {
*/
@Nonnull
public TreeLocation getLocation() throws InvalidItemStateException {
- TreeLocation location = getLocationOrNull();
+ TreeLocation location = getLocationInternal();
if (!location.exists()) {
throw new InvalidItemStateException("Item is stale");
}
@@ -134,12 +133,12 @@ public abstract class ItemDelegate {
/**
* The underlying {@link org.apache.jackrabbit.oak.api.TreeLocation} of this item.
* The location is only re-resolved when the revision of this item does not match
- * the revision of the session.
+ * the revision of the session or when the location does not exist (anymore).
* @return tree location of the underlying item.
*/
@Nonnull
- private synchronized TreeLocation getLocationOrNull() {
- if (location.exists() && sessionDelegate.getRevision() != revision) {
+ private synchronized TreeLocation getLocationInternal() {
+ if (sessionDelegate.getRevision() != revision || !location.exists()) {
location = sessionDelegate.getLocation(location.getPath());
revision = sessionDelegate.getRevision();
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-606_f0fbacab.diff |
bugs-dot-jar_data_OAK-3082_29e5b734 | ---
BugID: OAK-3082
Summary: Redundent entries in effective policies per principal-set
Description: 'when retrieving the effective policies for a given set of principals
the resulting array of policies contains redundant entries if a given policy contains
multiple ACEs for the given set of principals. '
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/accesscontrol/AccessControlManagerImpl.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/accesscontrol/AccessControlManagerImpl.java
index 806e992..0bad785 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/accesscontrol/AccessControlManagerImpl.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/accesscontrol/AccessControlManagerImpl.java
@@ -22,6 +22,7 @@ import java.security.Principal;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Collections;
+import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
@@ -47,6 +48,8 @@ import javax.jcr.security.Privilege;
import com.google.common.base.Objects;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import com.google.common.primitives.Ints;
import org.apache.jackrabbit.JcrConstants;
import org.apache.jackrabbit.api.security.JackrabbitAccessControlList;
import org.apache.jackrabbit.api.security.JackrabbitAccessControlPolicy;
@@ -361,7 +364,30 @@ public class AccessControlManagerImpl extends AbstractAccessControlManager imple
Root r = getLatestRoot();
Result aceResult = searchAces(principals, r);
- List<AccessControlPolicy> effective = new ArrayList<AccessControlPolicy>();
+ Set<JackrabbitAccessControlList> effective = Sets.newTreeSet(new Comparator<JackrabbitAccessControlList>() {
+ @Override
+ public int compare(JackrabbitAccessControlList list1, JackrabbitAccessControlList list2) {
+ if (list1.equals(list2)) {
+ return 0;
+ } else {
+ String p1 = list1.getPath();
+ String p2 = list2.getPath();
+
+ if (p1 == null) {
+ return -1;
+ } else if (p2 == null) {
+ return 1;
+ } else {
+ int depth1 = PathUtils.getDepth(p1);
+ int depth2 = PathUtils.getDepth(p2);
+ return (depth1 == depth2) ? p1.compareTo(p2) : Ints.compare(depth1, depth2);
+ }
+
+ }
+ }
+ });
+
+ Set<String> paths = Sets.newHashSet();
for (ResultRow row : aceResult.getRows()) {
String acePath = row.getPath();
String aclName = Text.getName(Text.getRelativeParent(acePath, 1));
@@ -373,9 +399,13 @@ public class AccessControlManagerImpl extends AbstractAccessControlManager imple
}
String path = (REP_REPO_POLICY.equals(aclName)) ? null : accessControlledTree.getPath();
- AccessControlPolicy policy = createACL(path, accessControlledTree, true);
+ if (paths.contains(path)) {
+ continue;
+ }
+ JackrabbitAccessControlList policy = createACL(path, accessControlledTree, true);
if (policy != null) {
effective.add(policy);
+ paths.add(path);
}
}
return effective.toArray(new AccessControlPolicy[effective.size()]);
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3082_29e5b734.diff |
bugs-dot-jar_data_OAK-740_35a7f014 | ---
BugID: OAK-740
Summary: Malformed solr delete query
Description: "Following OAK-734 the solr query tests are failing because of a parsing
error on the wildcard delete query.\n\nThe exact query is 'path_exact:/test*', which
apparently upsets the lucene parser somehow.\n\nFull trace:\n\n{code}\nSEVERE: org.apache.solr.common.SolrException:
org.apache.lucene.queryparser.classic.ParseException: Cannot parse 'path_exact:/test*':
Lexical error at line 1, column 18. Encountered: <EOF> after : \"/test*\"\n\tat
org.apache.solr.update.DirectUpdateHandler2.getQuery(DirectUpdateHandler2.java:328)\n\tat
org.apache.solr.update.DirectUpdateHandler2.deleteByQuery(DirectUpdateHandler2.java:340)\n\tat
org.apache.solr.update.processor.RunUpdateProcessor.processDelete(RunUpdateProcessorFactory.java:72)\n\tat
org.apache.solr.update.processor.UpdateRequestProcessor.processDelete(UpdateRequestProcessor.java:55)\n\tat
org.apache.solr.update.processor.DistributedUpdateProcessor.doLocalDelete(DistributedUpdateProcessor.java:437)\n\tat
org.apache.solr.update.processor.DistributedUpdateProcessor.doDeleteByQuery(DistributedUpdateProcessor.java:835)\n\tat
org.apache.solr.update.processor.DistributedUpdateProcessor.processDelete(DistributedUpdateProcessor.java:657)\n\tat
org.apache.solr.update.processor.LogUpdateProcessor.processDelete(LogUpdateProcessorFactory.java:121)\n\tat
org.apache.solr.handler.loader.XMLLoader.processDelete(XMLLoader.java:330)\n\tat
org.apache.solr.handler.loader.XMLLoader.processUpdate(XMLLoader.java:261)\n\tat
org.apache.solr.handler.loader.XMLLoader.load(XMLLoader.java:157)\n\tat org.apache.solr.handler.UpdateRequestHandler$1.load(UpdateRequestHandler.java:92)\n\tat
org.apache.solr.handler.ContentStreamHandlerBase.handleRequestBody(ContentStreamHandlerBase.java:74)\n\tat
org.apache.solr.handler.RequestHandlerBase.handleRequest(RequestHandlerBase.java:129)\n\tat
org.apache.solr.core.SolrCore.execute(SolrCore.java:1699)\n\tat org.apache.solr.client.solrj.embedded.EmbeddedSolrServer.request(EmbeddedSolrServer.java:150)\n\tat
org.apache.solr.client.solrj.request.AbstractUpdateRequest.process(AbstractUpdateRequest.java:117)\n\tat
org.apache.solr.client.solrj.SolrServer.deleteByQuery(SolrServer.java:285)\n\tat
org.apache.solr.client.solrj.SolrServer.deleteByQuery(SolrServer.java:271)\n\tat
org.apache.jackrabbit.oak.plugins.index.solr.index.SolrIndexUpdate.deleteSubtreeWriter(SolrIndexUpdate.java:161)\n\tat
org.apache.jackrabbit.oak.plugins.index.solr.index.SolrIndexUpdate.apply(SolrIndexUpdate.java:98)\n\tat
org.apache.jackrabbit.oak.plugins.index.solr.index.SolrIndexDiff.leave(SolrIndexDiff.java:202)\n\tat
org.apache.jackrabbit.oak.spi.commit.CompositeEditor.leave(CompositeEditor.java:74)\n\tat
org.apache.jackrabbit.oak.plugins.index.IndexHookManagerDiff.leave(IndexHookManagerDiff.java:117)\n\tat
org.apache.jackrabbit.oak.spi.commit.EditorHook$EditorDiff.process(EditorHook.java:115)\n\tat
org.apache.jackrabbit.oak.spi.commit.EditorHook.process(EditorHook.java:80)\n\tat
org.apache.jackrabbit.oak.spi.commit.EditorHook.processCommit(EditorHook.java:54)\n\tat
org.apache.jackrabbit.oak.kernel.KernelNodeStoreBranch.merge(KernelNodeStoreBranch.java:144)\n\tat
org.apache.jackrabbit.oak.core.RootImpl$2.run(RootImpl.java:266)\n\tat org.apache.jackrabbit.oak.core.RootImpl$2.run(RootImpl.java:1)\n\tat
java.security.AccessController.doPrivileged(Native Method)\n\tat javax.security.auth.Subject.doAs(Subject.java:337)\n\tat
org.apache.jackrabbit.oak.core.RootImpl.commit(RootImpl.java:261)\n\tat org.apache.jackrabbit.oak.query.AbstractQueryTest.test(AbstractQueryTest.java:236)\n\tat
org.apache.jackrabbit.oak.plugins.index.solr.query.SolrIndexQueryTest.sql2(SolrIndexQueryTest.java:79)\n\tat
sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)\n\tat
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)\n\tat
java.lang.reflect.Method.invoke(Method.java:597)\n\tat org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)\n\tat
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)\n\tat
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)\n\tat
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:20)\n\tat
org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:28)\n\tat
org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:31)\n\tat
org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:76)\n\tat
org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:50)\n\tat
org.junit.runners.ParentRunner$3.run(ParentRunner.java:193)\n\tat org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:52)\n\tat
org.junit.runners.ParentRunner.runChildren(ParentRunner.java:191)\n\tat org.junit.runners.ParentRunner.access$000(ParentRunner.java:42)\n\tat
org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:184)\n\tat org.junit.runners.ParentRunner.run(ParentRunner.java:236)\n\tat
org.eclipse.jdt.internal.junit4.runner.JUnit4TestReference.run(JUnit4TestReference.java:50)\n\tat
org.eclipse.jdt.internal.junit.runner.TestExecution.run(TestExecution.java:38)\n\tat
org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.runTests(RemoteTestRunner.java:467)\n\tat
org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.runTests(RemoteTestRunner.java:683)\n\tat
org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.run(RemoteTestRunner.java:390)\n\tat
org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.main(RemoteTestRunner.java:197)\nCaused
by: org.apache.lucene.queryparser.classic.ParseException: Cannot parse 'path_exact:/test*':
Lexical error at line 1, column 18. Encountered: <EOF> after : \"/test*\"\n\tat
org.apache.lucene.queryparser.classic.QueryParserBase.parse(QueryParserBase.java:130)\n\tat
org.apache.solr.search.LuceneQParser.parse(LuceneQParserPlugin.java:72)\n\tat org.apache.solr.search.QParser.getQuery(QParser.java:143)\n\tat
org.apache.solr.update.DirectUpdateHandler2.getQuery(DirectUpdateHandler2.java:310)\n\t...
58 more\nCaused by: org.apache.lucene.queryparser.classic.TokenMgrError: Lexical
error at line 1, column 18. Encountered: <EOF> after : \"/test*\"\n\tat org.apache.lucene.queryparser.classic.QueryParserTokenManager.getNextToken(QueryParserTokenManager.java:1048)\n\tat
org.apache.lucene.queryparser.classic.QueryParser.jj_ntk(QueryParser.java:638)\n\tat
org.apache.lucene.queryparser.classic.QueryParser.Clause(QueryParser.java:246)\n\tat
org.apache.lucene.queryparser.classic.QueryParser.Query(QueryParser.java:181)\n\tat
org.apache.lucene.queryparser.classic.QueryParser.TopLevelQuery(QueryParser.java:170)\n\tat
org.apache.lucene.queryparser.classic.QueryParserBase.parse(QueryParserBase.java:120)\n\t...
61 more\n{code}"
diff --git a/oak-solr-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/solr/index/SolrIndexUpdate.java b/oak-solr-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/solr/index/SolrIndexUpdate.java
index 3cdc05c..45cc00d 100644
--- a/oak-solr-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/solr/index/SolrIndexUpdate.java
+++ b/oak-solr-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/solr/index/SolrIndexUpdate.java
@@ -152,6 +152,7 @@ public class SolrIndexUpdate implements Closeable {
if (!path.startsWith("/")) {
path = "/" + path;
}
+ path = path.replace("/", "\\/");
solrServer.deleteByQuery(new StringBuilder(configuration.getPathField())
.append(':').append(path).append("*").toString());
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-740_35a7f014.diff |
bugs-dot-jar_data_OAK-4166_374e3f3d | ---
BugID: OAK-4166
Summary: Simple versionable nodes are invalid after migration
Description: 'OAK-3836 introduces a support for migrating {{mix:simpleVersionable}}
nodes from JCR2 to {{mix:versionable}} nodes in Oak. It changes the mixin type,
however it doesn''t add required properties: {{jcr:versionHistory}}, {{jcr:baseVersion}}
and {{jcr:predecessors}}. As a result, versioning-related methods invoked on such
nodes doesn''t work correctly.'
diff --git a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/RepositoryUpgrade.java b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/RepositoryUpgrade.java
index 7866301..9fb3b37 100644
--- a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/RepositoryUpgrade.java
+++ b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/RepositoryUpgrade.java
@@ -117,6 +117,7 @@ import org.apache.jackrabbit.oak.upgrade.security.GroupEditorProvider;
import org.apache.jackrabbit.oak.upgrade.security.RestrictionEditorProvider;
import org.apache.jackrabbit.oak.upgrade.version.VersionCopyConfiguration;
import org.apache.jackrabbit.oak.upgrade.version.VersionableEditor;
+import org.apache.jackrabbit.oak.upgrade.version.VersionablePropertiesEditor;
import org.apache.jackrabbit.spi.Name;
import org.apache.jackrabbit.spi.QNodeDefinition;
import org.apache.jackrabbit.spi.QNodeTypeDefinition;
@@ -470,6 +471,10 @@ public class RepositoryUpgrade {
new SameNameSiblingsEditor.Provider()
)));
+ // this editor works on the VersionableEditor output, so it can't be
+ // a part of the same EditorHook
+ hooks.add(new EditorHook(new VersionablePropertiesEditor.Provider()));
+
// security-related hooks
for (SecurityConfiguration sc : security.getConfigurations()) {
hooks.addAll(sc.getCommitHooks(workspaceName));
diff --git a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/version/VersionablePropertiesEditor.java b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/version/VersionablePropertiesEditor.java
new file mode 100644
index 0000000..a9c61e8
--- /dev/null
+++ b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/version/VersionablePropertiesEditor.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.upgrade.version;
+
+import org.apache.jackrabbit.oak.api.CommitFailedException;
+import org.apache.jackrabbit.oak.plugins.nodetype.TypePredicate;
+import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
+import org.apache.jackrabbit.oak.spi.commit.DefaultEditor;
+import org.apache.jackrabbit.oak.spi.commit.Editor;
+import org.apache.jackrabbit.oak.spi.commit.EditorProvider;
+import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
+import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import static com.google.common.collect.Sets.newHashSet;
+import static org.apache.jackrabbit.JcrConstants.JCR_BASEVERSION;
+import static org.apache.jackrabbit.JcrConstants.JCR_FROZENMIXINTYPES;
+import static org.apache.jackrabbit.JcrConstants.JCR_ISCHECKEDOUT;
+import static org.apache.jackrabbit.JcrConstants.JCR_MIXINTYPES;
+import static org.apache.jackrabbit.JcrConstants.JCR_PREDECESSORS;
+import static org.apache.jackrabbit.JcrConstants.JCR_ROOTVERSION;
+import static org.apache.jackrabbit.JcrConstants.JCR_SUCCESSORS;
+import static org.apache.jackrabbit.JcrConstants.JCR_UUID;
+import static org.apache.jackrabbit.JcrConstants.JCR_VERSIONHISTORY;
+import static org.apache.jackrabbit.JcrConstants.MIX_VERSIONABLE;
+import static org.apache.jackrabbit.JcrConstants.NT_FROZENNODE;
+import static org.apache.jackrabbit.JcrConstants.NT_VERSION;
+import static org.apache.jackrabbit.oak.api.Type.NAMES;
+import static org.apache.jackrabbit.oak.api.Type.REFERENCE;
+import static org.apache.jackrabbit.oak.api.Type.REFERENCES;
+import static org.apache.jackrabbit.oak.plugins.memory.MultiGenericPropertyState.nameProperty;
+import static org.apache.jackrabbit.oak.upgrade.version.VersionHistoryUtil.getVersionHistoryNodeState;
+
+/**
+ * The VersionablePropertiesEditor adds missing versionable properties.
+ */
+public final class VersionablePropertiesEditor extends DefaultEditor {
+
+ private static final String MIX_SIMPLE_VERSIONABLE = "mix:simpleVersionable";
+
+ private static final Logger log = LoggerFactory.getLogger(VersionablePropertiesEditor.class);
+
+ private final NodeBuilder rootBuilder;
+
+ private final NodeBuilder builder;
+
+ private final TypePredicate isVersionable;
+
+ private final TypePredicate isSimpleVersionable;
+
+ private final TypePredicate isNtVersion;
+
+ private final TypePredicate isFrozenNode;
+
+ private VersionablePropertiesEditor(NodeBuilder rootBuilder) {
+ this.builder = rootBuilder;
+ this.rootBuilder = rootBuilder;
+ this.isVersionable = new TypePredicate(rootBuilder.getNodeState(), MIX_VERSIONABLE);
+ this.isSimpleVersionable = new TypePredicate(rootBuilder.getNodeState(), MIX_SIMPLE_VERSIONABLE);
+ this.isNtVersion = new TypePredicate(rootBuilder.getNodeState(), NT_VERSION);
+ this.isFrozenNode = new TypePredicate(rootBuilder.getNodeState(), NT_FROZENNODE);
+ }
+
+ private VersionablePropertiesEditor(VersionablePropertiesEditor parent, NodeBuilder builder) {
+ this.builder = builder;
+ this.rootBuilder = parent.rootBuilder;
+ this.isVersionable = parent.isVersionable;
+ this.isSimpleVersionable = parent.isSimpleVersionable;
+ this.isNtVersion = parent.isNtVersion;
+ this.isFrozenNode = parent.isFrozenNode;
+ }
+
+ public static class Provider implements EditorProvider {
+ @Override
+ public Editor getRootEditor(NodeState before, NodeState after, NodeBuilder builder, CommitInfo info)
+ throws CommitFailedException {
+ return new VersionablePropertiesEditor(builder);
+ }
+
+ @Override
+ public String toString() {
+ return "VersionablePropertiesEditor";
+ }
+ }
+
+ @Override
+ public Editor childNodeAdded(String name, NodeState after) throws CommitFailedException {
+ NodeBuilder nodeBuilder = builder.getChildNode(name);
+ if (isVersionable.apply(after)) {
+ fixProperties(nodeBuilder);
+ } else if (isFrozenNode.apply(after)) {
+ updateFrozenMixins(nodeBuilder);
+ }
+ return new VersionablePropertiesEditor(this, nodeBuilder);
+ }
+
+ @Override
+ public Editor childNodeChanged(String name, NodeState before, NodeState after) throws CommitFailedException {
+ return childNodeAdded(name, after);
+ }
+
+ private static boolean updateFrozenMixins(NodeBuilder builder) {
+ if (builder.hasProperty(JCR_FROZENMIXINTYPES)) {
+ final Set<String> mixins = newHashSet(builder.getProperty(JCR_FROZENMIXINTYPES).getValue(NAMES));
+ if (mixins.remove(MIX_SIMPLE_VERSIONABLE)) {
+ mixins.add(MIX_VERSIONABLE);
+ builder.setProperty(nameProperty(JCR_FROZENMIXINTYPES, mixins));
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private void fixProperties(NodeBuilder node) {
+ NodeState versionHistory = getVersionHistoryNodeState(rootBuilder.getNodeState(), node.getString(JCR_UUID));
+ if (!versionHistory.exists()) {
+ log.warn("No version history for {}", node);
+ return;
+ }
+
+ Set<String> updated = new HashSet<>();
+ if (!node.hasProperty(JCR_VERSIONHISTORY)) {
+ node.setProperty(JCR_VERSIONHISTORY, versionHistory.getString(JCR_UUID), REFERENCE);
+ updated.add(JCR_VERSIONHISTORY);
+ }
+
+ String baseVersion = null;
+ if (!node.hasProperty(JCR_BASEVERSION)) {
+ baseVersion = getLastVersion(versionHistory);
+ node.setProperty(JCR_BASEVERSION, baseVersion, REFERENCE);
+ updated.add(JCR_BASEVERSION);
+ }
+
+ if (!node.hasProperty(JCR_PREDECESSORS)) {
+ baseVersion = baseVersion == null ? getLastVersion(versionHistory) : baseVersion;
+
+ List<String> predecessors = new ArrayList<>();
+ if (node.getBoolean(JCR_ISCHECKEDOUT)) {
+ predecessors.add(baseVersion);
+ }
+ node.setProperty(JCR_PREDECESSORS, predecessors, REFERENCES);
+ updated.add(JCR_PREDECESSORS);
+ }
+
+ if (!updated.isEmpty()) {
+ log.info("Updated versionable properties {} for {}", updated, node);
+ }
+ }
+
+ private String getLastVersion(NodeState versionHistory) {
+ NodeState lastVersion = versionHistory.getChildNode(JCR_ROOTVERSION);
+ for (ChildNodeEntry child : versionHistory.getChildNodeEntries()) {
+ NodeState v = child.getNodeState();
+ if (!isNtVersion.apply(v)) {
+ continue;
+ }
+ if (v.getProperty(JCR_SUCCESSORS).count() == 0) { // no successors
+ lastVersion = v;
+ }
+ }
+ return lastVersion.getString(JCR_UUID);
+ }
+}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-4166_374e3f3d.diff |
bugs-dot-jar_data_OAK-1655_01a8b283 | ---
BugID: OAK-1655
Summary: DataStoreBlobStore does not take into maxLastModifiedTime when fetching all
chunks
Description: |-
Currently the {{DataStoreBlobStore}} has a pending TODO
{code}
@Override
public Iterator<String> getAllChunkIds(long maxLastModifiedTime) throws Exception {
//TODO Ignores the maxLastModifiedTime currently.
return Iterators.transform(delegate.getAllIdentifiers(), new Function<DataIdentifier, String>() {
@Nullable
@Override
public String apply(@Nullable DataIdentifier input) {
return input.toString();
}
});
}
{code}
Due to this it currently returns all blobId. This would issue when new binary gets created while a blob gc is running as such binaries might be considered orphan and deleted
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStore.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStore.java
index b055278..3ad49ad 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStore.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStore.java
@@ -32,6 +32,7 @@ import javax.annotation.Nullable;
import javax.jcr.RepositoryException;
import com.google.common.base.Function;
+import com.google.common.base.Predicate;
import com.google.common.collect.Iterators;
import com.google.common.io.ByteStreams;
import com.google.common.io.Closeables;
@@ -47,6 +48,8 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static com.google.common.base.Preconditions.checkNotNull;
+import static com.google.common.collect.Iterators.filter;
+import static com.google.common.collect.Iterators.transform;
/**
* BlobStore wrapper for DataStore. Wraps Jackrabbit 2 DataStore and expose them as BlobStores
@@ -258,12 +261,23 @@ public class DataStoreBlobStore implements DataStore, BlobStore, GarbageCollecta
}
@Override
- public Iterator<String> getAllChunkIds(long maxLastModifiedTime) throws Exception {
- //TODO Ignores the maxLastModifiedTime currently.
- return Iterators.transform(delegate.getAllIdentifiers(), new Function<DataIdentifier, String>() {
- @Nullable
+ public Iterator<String> getAllChunkIds(final long maxLastModifiedTime) throws Exception {
+ return transform(filter(delegate.getAllIdentifiers(), new Predicate<DataIdentifier>() {
@Override
- public String apply(@Nullable DataIdentifier input) {
+ public boolean apply(DataIdentifier input) {
+ try {
+ DataRecord dr = delegate.getRecord(input);
+ if(dr != null && dr.getLastModified() < maxLastModifiedTime){
+ return true;
+ }
+ } catch (DataStoreException e) {
+ log.warn("Error occurred while fetching DataRecord for identifier {}",input, e);
+ }
+ return false;
+ }
+ }),new Function<DataIdentifier, String>() {
+ @Override
+ public String apply(DataIdentifier input) {
return input.toString();
}
});
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1655_01a8b283.diff |
bugs-dot-jar_data_OAK-2642_36fe017c | ---
BugID: OAK-2642
Summary: DocumentNodeStore.dispose() may leave repository in an inconsistent state
Description: "The repository may become inconsistent when a commit happens while the
DocumentNodeStore is disposed. \n\nThe node store writes back pending _lastRevs
and then unset the active flag in the clusterNodes collection. It is possible a
commit gets through even after the _lastRevs had been updated and the active flag
is cleared. This means the missing _lastRev updates will not be recovered on a restart
or by another cluster node."
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
index 043f344..2de0d39 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
@@ -459,40 +459,50 @@ public final class DocumentNodeStore
}
public void dispose() {
- runBackgroundOperations();
- if (!isDisposed.getAndSet(true)) {
- synchronized (isDisposed) {
- isDisposed.notifyAll();
- }
+ if (isDisposed.getAndSet(true)) {
+ // only dispose once
+ return;
+ }
+ // notify background threads waiting on isDisposed
+ synchronized (isDisposed) {
+ isDisposed.notifyAll();
+ }
+ try {
+ backgroundThread.join();
+ } catch (InterruptedException e) {
+ // ignore
+ }
+
+ // do a final round of background operations after
+ // the background thread stopped
+ internalRunBackgroundOperations();
+
+ if (leaseUpdateThread != null) {
try {
- backgroundThread.join();
+ leaseUpdateThread.join();
} catch (InterruptedException e) {
// ignore
}
- if (leaseUpdateThread != null) {
- try {
- leaseUpdateThread.join();
- } catch (InterruptedException e) {
- // ignore
- }
- }
- if (clusterNodeInfo != null) {
- clusterNodeInfo.dispose();
- }
- store.dispose();
- LOG.info("Disposed DocumentNodeStore with clusterNodeId: {}", clusterId);
-
- if (blobStore instanceof Closeable) {
- try {
- ((Closeable) blobStore).close();
- } catch (IOException ex) {
- LOG.debug("Error closing blob store " + blobStore, ex);
- }
+ }
+
+ // now mark this cluster node as inactive by
+ // disposing the clusterNodeInfo
+ if (clusterNodeInfo != null) {
+ clusterNodeInfo.dispose();
+ }
+ store.dispose();
+
+ if (blobStore instanceof Closeable) {
+ try {
+ ((Closeable) blobStore).close();
+ } catch (IOException ex) {
+ LOG.debug("Error closing blob store " + blobStore, ex);
}
}
if (persistentCache != null) {
persistentCache.close();
}
+ LOG.info("Disposed DocumentNodeStore with clusterNodeId: {}", clusterId);
}
Revision setHeadRevision(@Nonnull Revision newHead) {
@@ -544,6 +554,7 @@ public final class DocumentNodeStore
base = headRevision;
}
backgroundOperationLock.readLock().lock();
+ checkOpen();
boolean success = false;
Commit c;
try {
@@ -573,6 +584,7 @@ public final class DocumentNodeStore
base = headRevision;
}
backgroundOperationLock.readLock().lock();
+ checkOpen();
boolean success = false;
MergeCommit c;
try {
@@ -1502,48 +1514,49 @@ public final class DocumentNodeStore
//----------------------< background operations >---------------------------
- public synchronized void runBackgroundOperations() {
+ public void runBackgroundOperations() {
if (isDisposed.get()) {
return;
}
- if (simpleRevisionCounter != null) {
- // only when using timestamp
- return;
- }
try {
- long start = clock.getTime();
- long time = start;
- // clean orphaned branches and collisions
- cleanOrphanedBranches();
- cleanCollisions();
- long cleanTime = clock.getTime() - time;
- time = clock.getTime();
- // split documents (does not create new revisions)
- backgroundSplit();
- long splitTime = clock.getTime() - time;
- time = clock.getTime();
- // write back pending updates to _lastRev
- backgroundWrite();
- long writeTime = clock.getTime() - time;
- time = clock.getTime();
- // pull in changes from other cluster nodes
- BackgroundReadStats readStats = backgroundRead(true);
- long readTime = clock.getTime() - time;
- String msg = "Background operations stats (clean:{}, split:{}, write:{}, read:{} {})";
- if (clock.getTime() - start > TimeUnit.SECONDS.toMillis(10)) {
- // log as info if it took more than 10 seconds
- LOG.info(msg, cleanTime, splitTime, writeTime, readTime, readStats);
- } else {
- LOG.debug(msg, cleanTime, splitTime, writeTime, readTime, readStats);
- }
+ internalRunBackgroundOperations();
} catch (RuntimeException e) {
if (isDisposed.get()) {
+ LOG.warn("Background operation failed: " + e.toString(), e);
return;
}
throw e;
}
}
+ private synchronized void internalRunBackgroundOperations() {
+ long start = clock.getTime();
+ long time = start;
+ // clean orphaned branches and collisions
+ cleanOrphanedBranches();
+ cleanCollisions();
+ long cleanTime = clock.getTime() - time;
+ time = clock.getTime();
+ // split documents (does not create new revisions)
+ backgroundSplit();
+ long splitTime = clock.getTime() - time;
+ time = clock.getTime();
+ // write back pending updates to _lastRev
+ backgroundWrite();
+ long writeTime = clock.getTime() - time;
+ time = clock.getTime();
+ // pull in changes from other cluster nodes
+ BackgroundReadStats readStats = backgroundRead(true);
+ long readTime = clock.getTime() - time;
+ String msg = "Background operations stats (clean:{}, split:{}, write:{}, read:{} {})";
+ if (clock.getTime() - start > TimeUnit.SECONDS.toMillis(10)) {
+ // log as info if it took more than 10 seconds
+ LOG.info(msg, cleanTime, splitTime, writeTime, readTime, readStats);
+ } else {
+ LOG.debug(msg, cleanTime, splitTime, writeTime, readTime, readStats);
+ }
+ }
+
/**
* Renews the cluster lease if necessary.
*
@@ -1771,6 +1784,19 @@ public final class DocumentNodeStore
//-----------------------------< internal >---------------------------------
+ /**
+ * Checks if this store is still open and throws an
+ * {@link IllegalStateException} if it is already disposed (or a dispose
+ * is in progress).
+ *
+ * @throws IllegalStateException if this store is disposed.
+ */
+ private void checkOpen() throws IllegalStateException {
+ if (isDisposed.get()) {
+ throw new IllegalStateException("This DocumentNodeStore is disposed");
+ }
+ }
+
private boolean dispatch(@Nonnull String jsonDiff,
@Nonnull DocumentNodeState node,
@Nonnull DocumentNodeState base,
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2642_36fe017c.diff |
bugs-dot-jar_data_OAK-1270_70564c7c | ---
BugID: OAK-1270
Summary: 'Revisit full-text queries in case of multiple tokens '
Description: |-
There's still an issue with tokenizing the search terms when trying for example to search for a fulltext term that will split into 2 actual terms because of the analyzer.
Taking 'hello-world*' this will break into 2 tokens 'hello' and 'world*' which when treated as a PhraseQuery will not work, so I want to change this into a MutiPhraseQuery based on the simple tokens provided and all the existing tokens that match the wildchar character.
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndex.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndex.java
index 8889094..0e24834 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndex.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndex.java
@@ -73,10 +73,13 @@ import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.Term;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
@@ -87,6 +90,9 @@ import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.automaton.Automaton;
+import org.apache.lucene.util.automaton.CompiledAutomaton;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -416,7 +422,7 @@ public class LuceneIndex implements FulltextQueryIndex {
// when using the LowCostLuceneIndexProvider
// which is used for testing
} else {
- qs.add(getFullTextQuery(ft, analyzer));
+ qs.add(getFullTextQuery(ft, analyzer, reader));
}
if (nonFullTextConstraints) {
addNonFullTextConstraints(qs, filter, reader);
@@ -582,7 +588,7 @@ public class LuceneIndex implements FulltextQueryIndex {
qs.add(bq);
}
- static Query getFullTextQuery(FullTextExpression ft, final Analyzer analyzer) {
+ static Query getFullTextQuery(FullTextExpression ft, final Analyzer analyzer, final IndexReader reader) {
// a reference to the query, so it can be set in the visitor
// (a "non-local return")
final AtomicReference<Query> result = new AtomicReference<Query>();
@@ -592,7 +598,7 @@ public class LuceneIndex implements FulltextQueryIndex {
public boolean visit(FullTextOr or) {
BooleanQuery q = new BooleanQuery();
for (FullTextExpression e : or.list) {
- Query x = getFullTextQuery(e, analyzer);
+ Query x = getFullTextQuery(e, analyzer, reader);
q.add(x, SHOULD);
}
result.set(q);
@@ -603,7 +609,7 @@ public class LuceneIndex implements FulltextQueryIndex {
public boolean visit(FullTextAnd and) {
BooleanQuery q = new BooleanQuery();
for (FullTextExpression e : and.list) {
- Query x = getFullTextQuery(e, analyzer);
+ Query x = getFullTextQuery(e, analyzer, reader);
// Lucene can't deal with "must(must_not(x))"
if (x instanceof BooleanQuery) {
BooleanQuery bq = (BooleanQuery) x;
@@ -625,7 +631,7 @@ public class LuceneIndex implements FulltextQueryIndex {
// do not add constraints on child nodes properties
p = "*";
}
- Query q = tokenToQuery(term.getText(), analyzer);
+ Query q = tokenToQuery(term.getText(), analyzer, reader);
if (q == null) {
return false;
}
@@ -646,7 +652,7 @@ public class LuceneIndex implements FulltextQueryIndex {
return result.get();
}
- static Query tokenToQuery(String text, Analyzer analyzer) {
+ static Query tokenToQuery(String text, Analyzer analyzer, IndexReader reader) {
if (analyzer == null) {
return null;
}
@@ -657,29 +663,76 @@ public class LuceneIndex implements FulltextQueryIndex {
// TODO what should be returned in the case there are no tokens?
return new BooleanQuery();
}
-
if (tokens.size() == 1) {
- text = tokens.iterator().next();
- boolean hasFulltextToken = false;
- for (char c : fulltextTokens) {
- if (text.indexOf(c) != -1) {
- hasFulltextToken = true;
- break;
+ String token = tokens.iterator().next();
+ if (hasFulltextToken(token)) {
+ return new WildcardQuery(newFulltextTerm(token));
+ } else {
+ return new TermQuery(newFulltextTerm(token));
+ }
+ } else {
+ if (hasFulltextToken(tokens)) {
+ MultiPhraseQuery mpq = new MultiPhraseQuery();
+ for(String token: tokens){
+ if (hasFulltextToken(token)) {
+ Term[] terms = extractMatchingTokens(reader, token);
+ if (terms != null && terms.length > 0) {
+ mpq.add(terms);
+ }
+ } else {
+ mpq.add(newFulltextTerm(token));
+ }
}
+ return mpq;
+ } else {
+ PhraseQuery pq = new PhraseQuery();
+ for (String t : tokens) {
+ pq.add(newFulltextTerm(t));
+ }
+ return pq;
}
+ }
+ }
- if (hasFulltextToken) {
- return new WildcardQuery(newFulltextTerm(text));
- } else {
- return new TermQuery(newFulltextTerm(text));
+ private static Term[] extractMatchingTokens(IndexReader reader, String token) {
+ if (reader == null) {
+ // getPlan call
+ return null;
+ }
+
+ try {
+ List<Term> terms = new ArrayList<Term>();
+ Terms t = MultiFields.getTerms(reader, FieldNames.FULLTEXT);
+ Automaton a = WildcardQuery.toAutomaton(newFulltextTerm(token));
+ CompiledAutomaton ca = new CompiledAutomaton(a);
+ TermsEnum te = ca.getTermsEnum(t);
+ BytesRef text;
+ while ((text = te.next()) != null) {
+ terms.add(newFulltextTerm(text.utf8ToString()));
}
- } else {
- PhraseQuery pq = new PhraseQuery();
- for (String t : tokens) {
- pq.add(newFulltextTerm(t));
+ return terms.toArray(new Term[terms.size()]);
+ } catch (IOException e) {
+ LOG.error("Building fulltext query failed", e.getMessage());
+ return null;
+ }
+ }
+
+ private static boolean hasFulltextToken(List<String> tokens) {
+ for (String token : tokens) {
+ if (hasFulltextToken(token)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private static boolean hasFulltextToken(String token) {
+ for (char c : fulltextTokens) {
+ if (token.indexOf(c) != -1) {
+ return true;
}
- return pq;
}
+ return false;
}
private static char[] fulltextTokens = new char[] { '*', '?' };
@@ -727,6 +780,7 @@ public class LuceneIndex implements FulltextQueryIndex {
poz = end;
if (hasFulltextToken) {
token.append(term);
+ hasFulltextToken = false;
} else {
if (token.length() > 0) {
tokens.add(token.toString());
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1270_70564c7c.diff |
bugs-dot-jar_data_OAK-3411_978c77ff | ---
BugID: OAK-3411
Summary: Inconsistent read on DocumentNodeStore startup
Description: |-
This is a regression introduced with OAK-2929. On DocumentNodeStore startup the RevisionComparator of the local instance is initialized with the current _lastRev entries from the other cluster nodes. The external _lastRev entries are 'seenAt' the same revision, which means for those revisions the RevisionComparator will use the clusterId to compare them. This is also described in OAK-3388.
OAK-2929 changed the sequence of revisions to check for conflicts from StableRevisionComparator to RevisionComparator. This makes the conflict check susceptible to the RevisionComparison behaviour described in OAK-3388. Commits may be rejected with a conflict, when there isn't really a conflict.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
index 2317b57..1004e06 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
@@ -37,6 +37,7 @@ import javax.annotation.Nullable;
import com.google.common.base.Function;
import com.google.common.base.Predicate;
import com.google.common.collect.AbstractIterator;
+import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterators;
import com.google.common.collect.Queues;
import org.apache.jackrabbit.oak.cache.CacheValue;
@@ -57,7 +58,6 @@ import com.google.common.collect.Sets;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.collect.Iterables.filter;
import static com.google.common.collect.Iterables.transform;
-import static java.util.Collections.reverseOrder;
import static org.apache.jackrabbit.oak.plugins.document.Collection.NODES;
import static org.apache.jackrabbit.oak.plugins.document.StableRevisionComparator.REVERSE;
import static org.apache.jackrabbit.oak.plugins.document.UpdateOp.Key;
@@ -737,11 +737,11 @@ public final class NodeDocument extends Document implements CachedNodeDocument{
Revision newestRev = null;
// check local commits first
- Comparator<Revision> comp = reverseOrder(context.getRevisionComparator());
- SortedSet<Revision> revisions = Sets.newTreeSet(comp);
- revisions.addAll(getLocalRevisions().keySet());
- revisions.addAll(getLocalCommitRoot().keySet());
- Iterator<Revision> it = filter(revisions, predicate).iterator();
+ SortedMap<Revision, String> revisions = getLocalRevisions();
+ SortedMap<Revision, String> commitRoots = getLocalCommitRoot();
+ Iterator<Revision> it = filter(Iterables.mergeSorted(
+ ImmutableList.of(revisions.keySet(), commitRoots.keySet()),
+ revisions.comparator()), predicate).iterator();
if (it.hasNext()) {
newestRev = it.next();
} else {
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3411_978c77ff.diff |
bugs-dot-jar_data_OAK-1035_b2ca8baa | ---
BugID: OAK-1035
Summary: 'Property Index: cost calculation is wrong (zero) when searching for many
values'
Description: "Currently, for queries of the form \n\n{code}\nselect * from [nt:unstructured]
where type = 'xyz'\n{code}\n\nthe node type index is used in some cases, even if
there is an index on the property \"type\". The reason is that the cost for the
node type index is 0 due to a bug. The node type index internally uses the property
index on the property \"jcr:primaryType\", and asks for the cost using all possible
children node types of \"nt:unstructured\". The returned cost is 0 because of this
bug. The cost estimation is an extrapolation of the number of entries for the first
3 values. It is currently coded as:\n\n{code}\ncount = count / size / i;\n{code}\n\nwhen
in fact it should be written as:\n\n{code}\ncount = count * size / i;\n{code}\n"
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/property/strategy/ContentMirrorStoreStrategy.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/property/strategy/ContentMirrorStoreStrategy.java
index 433bac3..97a3408 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/property/strategy/ContentMirrorStoreStrategy.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/property/strategy/ContentMirrorStoreStrategy.java
@@ -164,7 +164,9 @@ public class ContentMirrorStoreStrategy implements IndexStoreStrategy {
int i = 0;
for (String p : values) {
if (count > max && i > 3) {
- count = count / size / i;
+ // the total count is extrapolated from the the number
+ // of values counted so far to the total number of values
+ count = count * size / i;
break;
}
NodeState s = index.getChildNode(p);
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1035_b2ca8baa.diff |
bugs-dot-jar_data_OAK-3104_38f5ef13 | ---
BugID: OAK-3104
Summary: Version garbage collector doesn't collect a rolled back document if it was
never deleted
Description: |-
If a commit gets rolled back it can leave (in case the document was never deleted explicitly) a document in a state like:
{noformat}
{
"_id" : "7:/etc/workflow/packages/2014/10/12/rep:ours",
"_deleted" : {
},
"_commitRoot" : {
},
"jcr:primaryType" : {
},
"_modified" : NumberLong(1413126245),
"_children" : true,
"_modCount" : NumberLong(2)
}
{noformat}
If the path is fairly busy, the document can get created naturally later and then follow the usual cycle. But, at times, such documents are ephemeral in nature and never re-used. In those cases, such documents can remain silently without getting collected.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Commit.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Commit.java
index eec0609..5bd72a3 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Commit.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Commit.java
@@ -448,10 +448,14 @@ public class Commit {
DocumentStore store = nodeStore.getDocumentStore();
for (UpdateOp op : changed) {
UpdateOp reverse = op.getReverseOperation();
+ if (op.isNew()) {
+ NodeDocument.setDeletedOnce(reverse);
+ }
store.findAndUpdate(NODES, reverse);
}
for (UpdateOp op : newDocuments) {
UpdateOp reverse = op.getReverseOperation();
+ NodeDocument.setDeletedOnce(reverse);
store.findAndUpdate(NODES, reverse);
}
UpdateOp removeCollision = new UpdateOp(commitRoot.getId(), false);
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
index 8424663..c5dd0d3 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
@@ -1372,10 +1372,13 @@ public final class NodeDocument extends Document implements CachedNodeDocument{
if(deleted) {
//DELETED_ONCE would be set upon every delete.
//possibly we can avoid that
- checkNotNull(op).set(DELETED_ONCE, Boolean.TRUE);
+ setDeletedOnce(op);
}
- checkNotNull(op).setMapEntry(DELETED, checkNotNull(revision),
- String.valueOf(deleted));
+ checkNotNull(op).setMapEntry(DELETED, checkNotNull(revision), String.valueOf(deleted));
+ }
+
+ public static void setDeletedOnce(@Nonnull UpdateOp op) {
+ checkNotNull(op).set(DELETED_ONCE, Boolean.TRUE);
}
public static void removeDeleted(@Nonnull UpdateOp op,
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3104_38f5ef13.diff |
bugs-dot-jar_data_OAK-2426_920f32d0 | ---
BugID: OAK-2426
Summary: "[LucenePropertyIndex] full-text search on first level relative node returns
no result"
Description: |
Following query does not return any result even with a proper index defined [1].
{noformat}//element(*, test:Page)[ " +
"jcr:contains(jcr:content, 'summer') ]
{noformat}
[1]
{code}
{
"jcr:primaryType": "oak:QueryIndexDefinition",
"compatVersion": 2,
"name": "pageIndex",
"type": "lucene",
"async": "async",
"reindex": true,
"aggregates": {
"jcr:primaryType": "nt:unstructured",
"test:Page": {
"jcr:primaryType": "nt:unstructured",
"include0": {
"jcr:primaryType": "nt:unstructured",
"relativeNode": true,
"path": "jcr:content"
}
}
},
"indexRules": {
"jcr:primaryType": "nt:unstructured",
"test:Page": {
"jcr:primaryType": "nt:unstructured",
"properties": {
"jcr:primaryType": "nt:unstructured",
"jcr:lastModified": {
"jcr:primaryType": "nt:unstructured",
"ordered": true,
"propertyIndex": true,
"name": "jcr:content/jcr:lastModified",
"type": "Date"
}
}
}
}
}
{code}
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/Aggregate.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/Aggregate.java
index 1143b81..8c94e08 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/Aggregate.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/Aggregate.java
@@ -242,7 +242,7 @@ class Aggregate {
public void collectResults(NodeInclude rootInclude, String rootIncludePath, String nodePath,
NodeState nodeState, ResultCollector results) throws CommitFailedException {
//For supporting jcr:contains(jcr:content, 'foo')
- if (rootInclude != this && rootInclude.relativeNode){
+ if (rootInclude.relativeNode){
results.onResult(new NodeIncludeResult(nodePath, rootIncludePath, nodeState));
}
@@ -511,7 +511,10 @@ class Aggregate {
public void collectResults(ResultCollector results)
throws CommitFailedException {
checkArgument(status == Status.MATCH_FOUND);
- String rootIncludePath = aggregateStack.isEmpty() ? null : aggregateStack.get(0);
+
+ //If result being collected as part of reaggregation then take path
+ //from the stack otherwise its the current path
+ String rootIncludePath = aggregateStack.isEmpty() ? currentPath : aggregateStack.get(0);
currentInclude.collectResults(rootState.rootInclude, rootIncludePath,
currentPath, matchedNodeState, results);
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2426_920f32d0.diff |
bugs-dot-jar_data_OAK-3028_89317b28 | ---
BugID: OAK-3028
Summary: Hierarchy conflict detection broken
Description: Hierarchy conflict detection is broken in 1.0.14. It may happen that
a child document is created even though the parent is considered deleted.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Commit.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Commit.java
index a764669..7f09df6 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Commit.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Commit.java
@@ -383,7 +383,7 @@ public class Commit {
// to set isNew to false. If we get here the
// commitRoot document already exists and
// only needs an update
- UpdateOp commit = commitRoot.shallowCopy(commitRoot.getId());
+ UpdateOp commit = commitRoot.copy();
commit.setNew(false);
// only set revision on commit root when there is
// no collision for this commit revision
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
index 3455f4b..65f79f3 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
@@ -1170,13 +1170,11 @@ public final class DocumentNodeStore
@CheckForNull
NodeDocument updateCommitRoot(UpdateOp commit) throws DocumentStoreException {
// use batch commit when there are only revision and modified updates
- // and collision checks
boolean batch = true;
for (Map.Entry<Key, Operation> op : commit.getChanges().entrySet()) {
String name = op.getKey().getName();
if (NodeDocument.isRevisionsEntry(name)
- || NodeDocument.MODIFIED_IN_SECS.equals(name)
- || NodeDocument.COLLISIONS.equals(name)) {
+ || NodeDocument.MODIFIED_IN_SECS.equals(name)) {
continue;
}
batch = false;
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/UpdateOp.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/UpdateOp.java
index f72a7ae..0196878 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/UpdateOp.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/UpdateOp.java
@@ -50,23 +50,32 @@ public final class UpdateOp {
* @param isNew whether this is a new document
*/
UpdateOp(String id, boolean isNew) {
- this(id, isNew, false, new HashMap<Key, Operation>());
+ this(id, isNew, false, new HashMap<Key, Operation>(), null);
}
- private UpdateOp(String id, boolean isNew, boolean isDelete,
- Map<Key, Operation> changes) {
- this.id = id;
+ private UpdateOp(@Nonnull String id, boolean isNew, boolean isDelete,
+ @Nonnull Map<Key, Operation> changes,
+ @Nullable Map<Key, Condition> conditions) {
+ this.id = checkNotNull(id);
this.isNew = isNew;
this.isDelete = isDelete;
- this.changes = changes;
+ this.changes = checkNotNull(changes);
+ this.conditions = conditions;
}
static UpdateOp combine(String id, Iterable<UpdateOp> ops) {
Map<Key, Operation> changes = Maps.newHashMap();
+ Map<Key, Condition> conditions = Maps.newHashMap();
for (UpdateOp op : ops) {
changes.putAll(op.getChanges());
+ if (op.conditions != null) {
+ conditions.putAll(op.conditions);
+ }
+ }
+ if (conditions.isEmpty()) {
+ conditions = null;
}
- return new UpdateOp(id, false, false, changes);
+ return new UpdateOp(id, false, false, changes, conditions);
}
/**
@@ -76,7 +85,7 @@ public final class UpdateOp {
* @param id the primary key.
*/
public UpdateOp shallowCopy(String id) {
- return new UpdateOp(id, isNew, isDelete, changes);
+ return new UpdateOp(id, isNew, isDelete, changes, conditions);
}
/**
@@ -86,8 +95,12 @@ public final class UpdateOp {
* @return a copy of this operation.
*/
public UpdateOp copy() {
+ Map<Key, Condition> conditionMap = null;
+ if (conditions != null) {
+ conditionMap = new HashMap<Key, Condition>(conditions);
+ }
return new UpdateOp(id, isNew, isDelete,
- new HashMap<Key, Operation>(changes));
+ new HashMap<Key, Operation>(changes), conditionMap);
}
public String getId() {
@@ -254,7 +267,11 @@ public final class UpdateOp {
@Override
public String toString() {
- return "key: " + id + " " + (isNew ? "new" : "update") + " " + changes;
+ String s = "key: " + id + " " + (isNew ? "new" : "update") + " " + changes;
+ if (conditions != null) {
+ s += " conditions " + conditions;
+ }
+ return s;
}
private Map<Key, Condition> getOrCreateConditions() {
@@ -462,5 +479,4 @@ public final class UpdateOp {
return false;
}
}
-
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3028_89317b28.diff |
bugs-dot-jar_data_OAK-135_438e31a7 | ---
BugID: OAK-135
Summary: Better support for RangeIterators
Description: 'Currently all RangeIterators returned from the JCR API don''t implement
the {{getSize()}} method but rather return {{-1}}. We should return the size of
the iterator if and where feasible. '
diff --git a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/NodeImpl.java b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/NodeImpl.java
index 90b9c74..fbdc48d 100644
--- a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/NodeImpl.java
+++ b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/NodeImpl.java
@@ -16,8 +16,6 @@
*/
package org.apache.jackrabbit.oak.jcr;
-import static org.apache.jackrabbit.oak.util.Iterators.filter;
-
import java.io.InputStream;
import java.math.BigDecimal;
import java.util.ArrayList;
@@ -70,6 +68,8 @@ import org.apache.jackrabbit.value.ValueHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import static org.apache.jackrabbit.oak.util.Iterators.filter;
+
/**
* {@code NodeImpl}...
*/
@@ -441,7 +441,8 @@ public class NodeImpl extends ItemImpl implements Node {
checkStatus();
Iterator<NodeDelegate> children = dlg.getChildren();
- return new NodeIteratorAdapter(nodeIterator(children));
+ long size = dlg.getChildCount();
+ return new NodeIteratorAdapter(nodeIterator(children), size);
}
@Override
@@ -505,7 +506,8 @@ public class NodeImpl extends ItemImpl implements Node {
checkStatus();
Iterator<PropertyDelegate> properties = dlg.getProperties();
- return new PropertyIteratorAdapter(propertyIterator(properties));
+ long size = dlg.getPropertyCount();
+ return new PropertyIteratorAdapter(propertyIterator(properties), size);
}
@Override
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-135_438e31a7.diff |
bugs-dot-jar_data_OAK-2433_7fca85bf | ---
BugID: OAK-2433
Summary: IllegalStateException for ValueMap on _revisions
Description: An IllegalStateException may be thrown by the MergeSortedIterator when
_revisions on the root document are read with the ValueMap implementation. It only
happens when the local _revisions map has entries that are lower than the most recent
split document.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/ValueMap.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/ValueMap.java
index 7b1bec6..259474d 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/ValueMap.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/ValueMap.java
@@ -18,9 +18,12 @@ package org.apache.jackrabbit.oak.plugins.document;
import java.util.AbstractMap;
import java.util.AbstractSet;
+import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
+import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
@@ -32,6 +35,7 @@ import org.apache.jackrabbit.oak.plugins.document.util.MergeSortedIterators;
import com.google.common.base.Objects;
import com.google.common.collect.Iterators;
+import com.google.common.collect.Lists;
/**
* A value map contains the versioned values of a property. The key into this
@@ -61,9 +65,32 @@ class ValueMap {
if (map.isEmpty()) {
docs = doc.getPreviousDocs(property, null).iterator();
} else {
- docs = Iterators.concat(
- Iterators.singletonIterator(doc),
- doc.getPreviousDocs(property, null).iterator());
+ // merge sort local map into maps of previous documents
+ List<Iterator<NodeDocument>> iterators =
+ new ArrayList<Iterator<NodeDocument>>(2);
+ iterators.add(Iterators.singletonIterator(doc));
+ iterators.add(doc.getPreviousDocs(property, null).iterator());
+ docs = Iterators.mergeSorted(iterators, new Comparator<NodeDocument>() {
+ @Override
+ public int compare(NodeDocument o1,
+ NodeDocument o2) {
+ Revision r1 = getFirstRevision(o1);
+ Revision r2 = getFirstRevision(o2);
+ return c.compare(r1, r2);
+ }
+
+ private Revision getFirstRevision(NodeDocument d) {
+ Map<Revision, String> values;
+ if (Objects.equal(d.getId(), doc.getId())) {
+ // return local map for main document
+ values = d.getLocalMap(property);
+ } else {
+ values = d.getValueMap(property);
+ }
+ return values.keySet().iterator().next();
+ }
+
+ });
}
return new MergeSortedIterators<Map.Entry<Revision, String>>(
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/util/MergeSortedIterators.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/util/MergeSortedIterators.java
index 48bed08..3cb3dc5 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/util/MergeSortedIterators.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/util/MergeSortedIterators.java
@@ -100,7 +100,9 @@ public abstract class MergeSortedIterators<T> implements Iterator<T> {
PeekingIterator<T> pIt = Iterators.peekingIterator(it);
if (!iterators.isEmpty()
&& comparator.compare(pIt.peek(), lastPeek) < 0) {
- throw new IllegalStateException(description() + " First element of next iterator must be greater than previous iterator");
+ throw new IllegalStateException(description() +
+ " First element of next iterator (" + pIt.peek() + ")" +
+ " must be after previous iterator (" + lastPeek + ")");
}
lastPeek = pIt.peek();
iterators.add(pIt);
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2433_7fca85bf.diff |
bugs-dot-jar_data_OAK-510_f63d745a | ---
BugID: OAK-510
Summary: Multivalued properties with array size 0 forget their type
Description: "thought i remember that i have seen a related TODO or issue before,
i\ncouldn't find it any more... sorry for that.\n\nwhile cleaning up the node type
code i found that one FIXME in the \nReadOnlyNodeTypeManager related to definition
generation was only needed\nbecause the TypeValidator failed upon validation of
an empty jcr:supertypes\ndefinition. not storing the super types if none has be
declared solved the\nproblem for the time being.\n\nhowever, it seems to me that
the underlying problem is in a completely\ndifferent area: namely that mv properties
with an empty value array\nforget their type.\n\nthis can be verified with the following
test:\n{code}\n @Test\n public void addEmptyMultiValueName() throws RepositoryException
{\n Node parentNode = getNode(TEST_PATH);\n Value[] values = new Value[0];\n\n
\ parentNode.setProperty(\"multi name\", values);\n parentNode.getSession().save();\n\n
\ Session session2 = createAnonymousSession();\n try {\n Property
property = session2.getProperty(TEST_PATH + \"/multi name\");\n assertTrue(property.isMultiple());\n
\ assertEquals(PropertyType.NAME, property.getType());\n Value[]
values2 = property.getValues();\n assertEquals(values.length, values2.length);\n
\ assertEquals(values[0], values2[0]);\n assertEquals(values[1],
values2[1]);\n } finally {\n session2.logout();\n }\n }\n{code}"
diff --git a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/ItemImpl.java b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/ItemImpl.java
index d59feb5..ba16709 100644
--- a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/ItemImpl.java
+++ b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/ItemImpl.java
@@ -430,8 +430,12 @@ abstract class ItemImpl<T extends ItemDelegate> implements Item {
Value[] nonNullValues = compact(values);
int targetType = getType(definition, type);
if (nonNullValues.length == 0) {
+ if (targetType == PropertyType.UNDEFINED) {
+ // default to string when no other type hints are available
+ targetType = PropertyType.STRING;
+ }
return MemoryPropertyBuilder
- .array(Type.fromTag(type, false), name)
+ .array(Type.fromTag(targetType, false), name)
.getPropertyState();
} else if (targetType == type) {
return PropertyStates.createProperty(name, Arrays.asList(nonNullValues));
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-510_f63d745a.diff |
bugs-dot-jar_data_OAK-2249_6dde8e9d | ---
BugID: OAK-2249
Summary: 'XPath: Query with mixed full-text, "and", "or" conditions fails'
Description: "When performing a query like \n\n{noformat}\n //element(*, test:Asset)[\n
\ (\n jcr:contains(., 'summer')\n or\n jcr:content/metadata/@tags
= 'namespace:season/summer'\n ) and\n jcr:contains(jcr:content/metadata/@format,
'image')\n ]\n\n{noformat}\n\nThe Lucene/Aggregate returns as well nodes
that does not match all the\ncriterias.\n\n"
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/ast/FullTextSearchImpl.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/ast/FullTextSearchImpl.java
index f922007..f93ed0e 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/ast/FullTextSearchImpl.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/ast/FullTextSearchImpl.java
@@ -48,7 +48,7 @@ public class FullTextSearchImpl extends ConstraintImpl {
* instead, as in the spec, using double quotes.
*/
public static final boolean JACKRABBIT_2_SINGLE_QUOTED_PHRASE = true;
-
+
private final String selectorName;
private final String relativePath;
private final String propertyName;
@@ -251,8 +251,12 @@ public class FullTextSearchImpl extends ConstraintImpl {
public void restrict(FilterImpl f) {
if (propertyName != null) {
if (f.getSelector().equals(selector)) {
- String pn = normalizePropertyName(propertyName);
- f.restrictProperty(pn, Operator.NOT_EQUAL, null);
+ String p = propertyName;
+ if (relativePath != null) {
+ p = PathUtils.concat(p, relativePath);
+ }
+ p = normalizePropertyName(p);
+ f.restrictProperty(p, Operator.NOT_EQUAL, null);
}
}
f.restrictFulltextCondition(fullTextSearchExpression.currentValue().getValue(Type.STRING));
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/index/FilterImpl.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/index/FilterImpl.java
index 728973c..6f61a58 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/index/FilterImpl.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/index/FilterImpl.java
@@ -401,7 +401,7 @@ public class FilterImpl implements Filter {
buff.append("query=").append(queryStatement);
}
if (fullTextConstraint != null) {
- buff.append("fullText=").append(fullTextConstraint);
+ buff.append(" fullText=").append(fullTextConstraint);
}
buff.append(", path=").append(getPathPlan());
if (!propertyRestrictions.isEmpty()) {
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/xpath/Statement.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/xpath/Statement.java
index 6113c0c..9763c13 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/xpath/Statement.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/xpath/Statement.java
@@ -83,9 +83,7 @@ public class Statement {
}
private static void addToUnionList(Expression condition, ArrayList<Expression> unionList) {
- if (condition.containsFullTextCondition()) {
- // do not use union
- } else if (condition instanceof OrCondition) {
+ if (condition instanceof OrCondition) {
OrCondition or = (OrCondition) condition;
// conditions of type
// @x = 1 or @y = 2
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/query/Cursors.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/query/Cursors.java
index 74060a1..10a459e 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/query/Cursors.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/query/Cursors.java
@@ -439,7 +439,12 @@ public class Cursors {
ConcatCursor(List<Cursor> cursors, QueryEngineSettings settings) {
this.cursors = cursors;
this.settings = settings;
- this.currentCursor = cursors.remove(0);
+ if (cursors.size() == 0) {
+ init = true;
+ closed = true;
+ } else {
+ this.currentCursor = cursors.remove(0);
+ }
}
@Override
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2249_6dde8e9d.diff |
bugs-dot-jar_data_OAK-1081_4ce4e3c9 | ---
BugID: OAK-1081
Summary: Node.getNodes throwing exception if user does not have access to any child
node
Description: "When trying to obtain child iterator via Node.getNodes {{InvalidItemStateException}}
is thrown if user does not have access to its content\n\n{code:java}\n @Test\n
\ public void testGetChildren() throws Exception {\n deny(path, privilegesFromName(PrivilegeConstants.JCR_ADD_CHILD_NODES));\n
\ NodeIterator it1 = testSession.getNode(path).getNodes();\n while(it1.hasNext()){\n
\ Node n = it1.nextNode();\n NodeIterator it2 = n.getNodes();\n
\ }\n }\n{code}\n\nExecuting above code leads to following exception\n\n{noformat}\njavax.jcr.InvalidItemStateException:
Item is stale\n\tat org.apache.jackrabbit.oak.jcr.delegate.NodeDelegate.getTree(NodeDelegate.java:827)\n\tat
org.apache.jackrabbit.oak.jcr.delegate.NodeDelegate.getChildren(NodeDelegate.java:336)\n\tat
org.apache.jackrabbit.oak.jcr.session.NodeImpl$8.perform(NodeImpl.java:546)\n\tat
org.apache.jackrabbit.oak.jcr.session.NodeImpl$8.perform(NodeImpl.java:543)\n\tat
org.apache.jackrabbit.oak.jcr.delegate.SessionDelegate.perform(SessionDelegate.java:125)\n\tat
org.apache.jackrabbit.oak.jcr.session.ItemImpl.perform(ItemImpl.java:113)\n\tat
org.apache.jackrabbit.oak.jcr.session.NodeImpl.getNodes(NodeImpl.java:543)\n\tat
org.apache.jackrabbit.oak.jcr.security.authorization.ReadPropertyTest.testGetChildren(ReadPropertyTest.java:135)\n\tat
sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)\n\tat
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)\n\tat
org.apache.jackrabbit.test.AbstractJCRTest.run(AbstractJCRTest.java:464)\n\tat org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:83)\n\tat
org.junit.runner.JUnitCore.run(JUnitCore.java:157)\n\tat com.intellij.junit4.JUnit4IdeaTestRunner.startRunnerWithArgs(JUnit4IdeaTestRunner.java:77)\n\tat
com.intellij.rt.execution.junit.JUnitStarter.prepareStreamsAndStart(JUnitStarter.java:195)\n\tat
com.intellij.rt.execution.junit.JUnitStarter.main(JUnitStarter.java:63)\n\tat sun.reflect.NativeMethodAccessorImpl.invoke0(Native
Method)\n\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)\n\tat
com.intellij.rt.execution.application.AppMain.main(AppMain.java:120)\n{noformat}\n\nThe
exception is thrown for path {{/testroot/node1/rep:policy}}. \n\nThe issue occurs
because the {{NodeIterator}} {{it1}} includes {{rep:policy}} and later when its
child are accessed security check leads to exception. Probably the {{it1}} should
not include {{rep:policy}} as part of child list and filter it out"
diff --git a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/delegate/NodeDelegate.java b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/delegate/NodeDelegate.java
index 16c3150..9cbfaf0 100644
--- a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/delegate/NodeDelegate.java
+++ b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/delegate/NodeDelegate.java
@@ -338,7 +338,7 @@ public class NodeDelegate extends ItemDelegate {
filter(iterator, new Predicate<Tree>() {
@Override
public boolean apply(Tree tree) {
- return !tree.getName().startsWith(":");
+ return tree.exists();
}
}),
new Function<Tree, NodeDelegate>() {
@@ -447,7 +447,6 @@ public class NodeDelegate extends ItemDelegate {
/**
* Set a property
*
- * @param propertyState
* @return the set property
*/
@Nonnull
@@ -824,7 +823,7 @@ public class NodeDelegate extends ItemDelegate {
@Nonnull // FIXME this should be package private. OAK-672
public Tree getTree() throws InvalidItemStateException {
if (!tree.exists()) {
- throw new InvalidItemStateException("Item is stale");
+ throw new InvalidItemStateException("Item is stale " + tree.getPath());
}
return tree;
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1081_4ce4e3c9.diff |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.