id
stringlengths 33
40
| content
stringlengths 662
61.5k
| max_stars_repo_path
stringlengths 85
97
|
---|---|---|
bugs-dot-jar_data_ACCUMULO-2390_28294266 | ---
BugID: ACCUMULO-2390
Summary: TraceProxy.trace should not throw InvocationTargetException
Description: |-
In {{TraceProxy.trace}} there is the following code snippet:
{code}
try {
return method.invoke(instance, args);
} catch (Throwable ex) {
ex.printStackTrace();
throw ex;
}
{code}
When this is an InvocationTargetException, it can really mess with the calling code's exception handling logic.
diff --git a/src/trace/src/main/java/org/apache/accumulo/cloudtrace/instrument/TraceProxy.java b/src/trace/src/main/java/org/apache/accumulo/cloudtrace/instrument/TraceProxy.java
index 67c4463..6b71361 100644
--- a/src/trace/src/main/java/org/apache/accumulo/cloudtrace/instrument/TraceProxy.java
+++ b/src/trace/src/main/java/org/apache/accumulo/cloudtrace/instrument/TraceProxy.java
@@ -17,43 +17,56 @@
package org.apache.accumulo.cloudtrace.instrument;
import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
+import org.apache.log4j.Logger;
+
public class TraceProxy {
- // private static final Logger log = Logger.getLogger(TraceProxy.class);
-
+ private static final Logger log = Logger.getLogger(TraceProxy.class);
+
static final Sampler ALWAYS = new Sampler() {
@Override
public boolean next() {
return true;
}
};
-
+
public static <T> T trace(T instance) {
return trace(instance, ALWAYS);
}
-
+
@SuppressWarnings("unchecked")
public static <T> T trace(final T instance, final Sampler sampler) {
InvocationHandler handler = new InvocationHandler() {
@Override
public Object invoke(Object obj, Method method, Object[] args) throws Throwable {
- if (!sampler.next()) {
- return method.invoke(instance, args);
+ Span span = null;
+ if (sampler.next()) {
+ span = Trace.on(method.getName());
}
- Span span = Trace.on(method.getName());
try {
return method.invoke(instance, args);
- } catch (Throwable ex) {
- ex.printStackTrace();
- throw ex;
+ // Can throw RuntimeException, Error, or any checked exceptions of the method.
+ } catch (InvocationTargetException ite) {
+ Throwable cause = ite.getCause();
+ if (cause == null) {
+ // This should never happen, but account for it anyway
+ log.error("Invocation exception during trace with null cause: ", ite);
+ throw new RuntimeException(ite);
+ }
+ throw cause;
+ } catch (IllegalAccessException e) {
+ throw new RuntimeException(e);
} finally {
- span.stop();
+ if (span != null) {
+ span.stop();
+ }
}
}
};
return (T) Proxy.newProxyInstance(instance.getClass().getClassLoader(), instance.getClass().getInterfaces(), handler);
}
-
+
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-2390_28294266.diff |
bugs-dot-jar_data_ACCUMULO-366_db4a291f | ---
BugID: ACCUMULO-366
Summary: master killed a tablet server
Description: |+
Master killed a tablet server for having long hold times.
The tablet server had this error during minor compaction:
{noformat}
01 23:57:20,073 [security.ZKAuthenticator] ERROR: org.apache.zookeeper.KeeperException$NoNodeException: KeeperErrorCode = NoNode for /accumulo/88cd0f63-a36a-4218-86b1-9ba1d2cccf08/users/user004
org.apache.zookeeper.KeeperException$NoNodeException: KeeperErrorCode = NoNode for /accumulo/88cd0f63-a36a-4218-86b1-9ba1d2cccf08/users/user004
at org.apache.zookeeper.KeeperException.create(KeeperException.java:102)
at org.apache.zookeeper.KeeperException.create(KeeperException.java:42)
at org.apache.zookeeper.ZooKeeper.getChildren(ZooKeeper.java:1243)
at org.apache.zookeeper.ZooKeeper.getChildren(ZooKeeper.java:1271)
at org.apache.accumulo.core.zookeeper.ZooUtil.recursiveDelete(ZooUtil.java:103)
at org.apache.accumulo.core.zookeeper.ZooUtil.recursiveDelete(ZooUtil.java:117)
at org.apache.accumulo.server.zookeeper.ZooReaderWriter.recursiveDelete(ZooReaderWriter.java:67)
at sun.reflect.GeneratedMethodAccessor53.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.accumulo.server.zookeeper.ZooReaderWriter$1.invoke(ZooReaderWriter.java:169)
at $Proxy4.recursiveDelete(Unknown Source)
at org.apache.accumulo.server.security.ZKAuthenticator.dropUser(ZKAuthenticator.java:252)
at org.apache.accumulo.server.security.Auditor.dropUser(Auditor.java:104)
at org.apache.accumulo.server.client.ClientServiceHandler.dropUser(ClientServiceHandler.java:136)
at sun.reflect.GeneratedMethodAccessor52.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at cloudtrace.instrument.thrift.TraceWrap$1.invoke(TraceWrap.java:58)
at $Proxy2.dropUser(Unknown Source)
at org.apache.accumulo.core.client.impl.thrift.ClientService$Processor$dropUser.process(ClientService.java:2257)
at org.apache.accumulo.core.tabletserver.thrift.TabletClientService$Processor.process(TabletClientService.java:2037)
at org.apache.accumulo.server.util.TServerUtils$TimedProcessor.process(TServerUtils.java:151)
at org.apache.thrift.server.TNonblockingServer$FrameBuffer.invoke(TNonblockingServer.java:631)
at org.apache.accumulo.server.util.TServerUtils$THsHaServer$Invocation.run(TServerUtils.java:199)
at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908)
at org.apache.accumulo.core.util.LoggingRunnable.run(LoggingRunnable.java:34)
at java.lang.Thread.run(Thread.java:662)
{noformat}
This tablet was the result of a split that occurred during a delete. The master missed this tablet when taking tablets offline.
We need to do a consistency check on the offline tablets before deleting the table information in zookeeper.
diff --git a/src/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooCache.java b/src/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooCache.java
index 6931ea8..f5bdd6b 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooCache.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooCache.java
@@ -136,7 +136,7 @@ public class ZooCache {
}
log.warn("Zookeeper error, will retry", e);
} catch (InterruptedException e) {
- log.warn("Zookeeper error, will retry", e);
+ log.info("Zookeeper error, will retry", e);
} catch (ConcurrentModificationException e) {
log.debug("Zookeeper was modified, will retry");
}
diff --git a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/ArticleExtractor.java b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/ArticleExtractor.java
index 54e47b6..06d1670 100644
--- a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/ArticleExtractor.java
+++ b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/ArticleExtractor.java
@@ -16,6 +16,9 @@
*/
package org.apache.accumulo.examples.wikisearch.ingest;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
import java.io.Reader;
import java.text.ParseException;
import java.text.SimpleDateFormat;
@@ -29,6 +32,7 @@ import javax.xml.stream.XMLStreamReader;
import org.apache.accumulo.examples.wikisearch.normalizer.LcNoDiacriticsNormalizer;
import org.apache.accumulo.examples.wikisearch.normalizer.NumberNormalizer;
+import org.apache.hadoop.io.Writable;
public class ArticleExtractor {
@@ -37,13 +41,15 @@ public class ArticleExtractor {
private static NumberNormalizer nn = new NumberNormalizer();
private static LcNoDiacriticsNormalizer lcdn = new LcNoDiacriticsNormalizer();
- public static class Article {
+ public static class Article implements Writable {
int id;
String title;
long timestamp;
String comments;
String text;
+ public Article(){}
+
private Article(int id, String title, long timestamp, String comments, String text) {
super();
this.id = id;
@@ -90,6 +96,24 @@ public class ArticleExtractor {
fields.put("COMMENTS", lcdn.normalizeFieldValue("COMMENTS", this.comments));
return fields;
}
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ id = in.readInt();
+ title = in.readUTF();
+ timestamp = in.readLong();
+ comments = in.readUTF();
+ text = in.readUTF();
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(id);
+ out.writeUTF(title);
+ out.writeLong(timestamp);
+ out.writeUTF(comments);
+ out.writeUTF(text);
+ }
}
diff --git a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaConfiguration.java b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaConfiguration.java
index d76d713..5a0aad4 100644
--- a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaConfiguration.java
+++ b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaConfiguration.java
@@ -48,6 +48,11 @@ public class WikipediaConfiguration {
public final static String NUM_GROUPS = "wikipedia.ingest.groups";
+ public final static String PARTITIONED_ARTICLES_DIRECTORY = "wikipedia.partitioned.directory";
+
+ public final static String RUN_PARTITIONER = "wikipedia.run.partitioner";
+ public final static String RUN_INGEST = "wikipedia.run.ingest";
+
public static String getUser(Configuration conf) {
return conf.get(USER);
@@ -117,6 +122,18 @@ public class WikipediaConfiguration {
return conf.getInt(NUM_GROUPS, 1);
}
+ public static Path getPartitionedArticlesPath(Configuration conf) {
+ return new Path(conf.get(PARTITIONED_ARTICLES_DIRECTORY));
+ }
+
+ public static boolean runPartitioner(Configuration conf) {
+ return conf.getBoolean(RUN_PARTITIONER, false);
+ }
+
+ public static boolean runIngest(Configuration conf) {
+ return conf.getBoolean(RUN_INGEST, true);
+ }
+
/**
* Helper method to get properties from Hadoop configuration
*
diff --git a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaInputFormat.java b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaInputFormat.java
index e8b8b52..dd2eeb9 100644
--- a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaInputFormat.java
+++ b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaInputFormat.java
@@ -75,10 +75,14 @@ public class WikipediaInputFormat extends TextInputFormat {
Path file = new Path(in.readUTF());
long start = in.readLong();
long length = in.readLong();
- int numHosts = in.readInt();
- String[] hosts = new String[numHosts];
- for(int i = 0; i < numHosts; i++)
- hosts[i] = in.readUTF();
+ String [] hosts = null;
+ if(in.readBoolean())
+ {
+ int numHosts = in.readInt();
+ hosts = new String[numHosts];
+ for(int i = 0; i < numHosts; i++)
+ hosts[i] = in.readUTF();
+ }
fileSplit = new FileSplit(file, start, length, hosts);
partition = in.readInt();
}
@@ -89,10 +93,17 @@ public class WikipediaInputFormat extends TextInputFormat {
out.writeLong(fileSplit.getStart());
out.writeLong(fileSplit.getLength());
String [] hosts = fileSplit.getLocations();
- out.writeInt(hosts.length);
- for(String host:hosts)
+ if(hosts == null)
+ {
+ out.writeBoolean(false);
+ }
+ else
+ {
+ out.writeBoolean(true);
+ out.writeInt(hosts.length);
+ for(String host:hosts)
out.writeUTF(host);
- fileSplit.write(out);
+ }
out.writeInt(partition);
}
diff --git a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedIngester.java b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedIngester.java
new file mode 100644
index 0000000..e7493dc
--- /dev/null
+++ b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedIngester.java
@@ -0,0 +1,247 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.examples.wikisearch.ingest;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.IteratorSetting.Column;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
+import org.apache.accumulo.core.iterators.user.SummingCombiner;
+import org.apache.accumulo.examples.wikisearch.ingest.ArticleExtractor.Article;
+import org.apache.accumulo.examples.wikisearch.iterator.GlobalIndexUidCombiner;
+import org.apache.accumulo.examples.wikisearch.iterator.TextIndexCombiner;
+import org.apache.accumulo.examples.wikisearch.reader.AggregatingRecordReader;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+public class WikipediaPartitionedIngester extends Configured implements Tool {
+
+ public final static String INGEST_LANGUAGE = "wikipedia.ingest_language";
+ public final static String SPLIT_FILE = "wikipedia.split_file";
+ public final static String TABLE_NAME = "wikipedia.table";
+
+ public static void main(String[] args) throws Exception {
+ int res = ToolRunner.run(new Configuration(), new WikipediaPartitionedIngester(), args);
+ System.exit(res);
+ }
+
+ private void createTables(TableOperations tops, String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException,
+ TableExistsException {
+ // Create the shard table
+ String indexTableName = tableName + "Index";
+ String reverseIndexTableName = tableName + "ReverseIndex";
+ String metadataTableName = tableName + "Metadata";
+
+ // create the shard table
+ if (!tops.exists(tableName)) {
+ // Set a text index combiner on the given field names. No combiner is set if the option is not supplied
+ String textIndexFamilies = WikipediaMapper.TOKENS_FIELD_NAME;
+
+ tops.create(tableName);
+ if (textIndexFamilies.length() > 0) {
+ System.out.println("Adding content combiner on the fields: " + textIndexFamilies);
+
+ IteratorSetting setting = new IteratorSetting(10, TextIndexCombiner.class);
+ List<Column> columns = new ArrayList<Column>();
+ for (String family : StringUtils.split(textIndexFamilies, ',')) {
+ columns.add(new Column("fi\0" + family));
+ }
+ TextIndexCombiner.setColumns(setting, columns);
+ TextIndexCombiner.setLossyness(setting, true);
+
+ tops.attachIterator(tableName, setting, EnumSet.allOf(IteratorScope.class));
+ }
+
+ // Set the locality group for the full content column family
+ tops.setLocalityGroups(tableName, Collections.singletonMap("WikipediaDocuments", Collections.singleton(new Text(WikipediaMapper.DOCUMENT_COLUMN_FAMILY))));
+
+ }
+
+ if (!tops.exists(indexTableName)) {
+ tops.create(indexTableName);
+ // Add the UID combiner
+ IteratorSetting setting = new IteratorSetting(19, "UIDAggregator", GlobalIndexUidCombiner.class);
+ GlobalIndexUidCombiner.setCombineAllColumns(setting, true);
+ GlobalIndexUidCombiner.setLossyness(setting, true);
+ tops.attachIterator(indexTableName, setting, EnumSet.allOf(IteratorScope.class));
+ }
+
+ if (!tops.exists(reverseIndexTableName)) {
+ tops.create(reverseIndexTableName);
+ // Add the UID combiner
+ IteratorSetting setting = new IteratorSetting(19, "UIDAggregator", GlobalIndexUidCombiner.class);
+ GlobalIndexUidCombiner.setCombineAllColumns(setting, true);
+ GlobalIndexUidCombiner.setLossyness(setting, true);
+ tops.attachIterator(reverseIndexTableName, setting, EnumSet.allOf(IteratorScope.class));
+ }
+
+ if (!tops.exists(metadataTableName)) {
+ // Add the SummingCombiner with VARLEN encoding for the frequency column
+ tops.create(metadataTableName);
+ IteratorSetting setting = new IteratorSetting(10, SummingCombiner.class);
+ SummingCombiner.setColumns(setting, Collections.singletonList(new Column("f")));
+ SummingCombiner.setEncodingType(setting, SummingCombiner.Type.VARLEN);
+ tops.attachIterator(metadataTableName, setting, EnumSet.allOf(IteratorScope.class));
+ }
+ }
+
+ @Override
+ public int run(String[] args) throws Exception {
+ Configuration conf = getConf();
+ if(WikipediaConfiguration.runPartitioner(conf))
+ {
+ int result = runPartitionerJob();
+ if(result != 0)
+ return result;
+ }
+ if(WikipediaConfiguration.runIngest(conf))
+ return runIngestJob();
+ return 0;
+ }
+
+ public int runPartitionerJob() throws Exception
+ {
+ Job partitionerJob = new Job(getConf(), "Partition Wikipedia");
+ Configuration partitionerConf = partitionerJob.getConfiguration();
+ partitionerConf.set("mapred.map.tasks.speculative.execution", "false");
+
+ configurePartitionerJob(partitionerJob);
+
+ List<Path> inputPaths = new ArrayList<Path>();
+ SortedSet<String> languages = new TreeSet<String>();
+ FileSystem fs = FileSystem.get(partitionerConf);
+ Path parent = new Path(partitionerConf.get("wikipedia.input"));
+ listFiles(parent, fs, inputPaths, languages);
+
+ System.out.println("Input files in " + parent + ":" + inputPaths.size());
+ Path[] inputPathsArray = new Path[inputPaths.size()];
+ inputPaths.toArray(inputPathsArray);
+
+ System.out.println("Languages:" + languages.size());
+
+ // setup input format
+
+ WikipediaInputFormat.setInputPaths(partitionerJob, inputPathsArray);
+
+ partitionerJob.setMapperClass(WikipediaPartitioner.class);
+ partitionerJob.setNumReduceTasks(0);
+
+ // setup output format
+ partitionerJob.setMapOutputKeyClass(Text.class);
+ partitionerJob.setMapOutputValueClass(Article.class);
+ partitionerJob.setOutputFormatClass(SequenceFileOutputFormat.class);
+ Path outputDir = WikipediaConfiguration.getPartitionedArticlesPath(partitionerConf);
+ SequenceFileOutputFormat.setOutputPath(partitionerJob, outputDir);
+
+ return partitionerJob.waitForCompletion(true) ? 0 : 1;
+ }
+
+ public int runIngestJob() throws Exception
+ {
+ Job ingestJob = new Job(getConf(), "Ingest Partitioned Wikipedia");
+ Configuration ingestConf = ingestJob.getConfiguration();
+ ingestConf.set("mapred.map.tasks.speculative.execution", "false");
+
+ String tablename = WikipediaConfiguration.getTableName(ingestConf);
+
+ String zookeepers = WikipediaConfiguration.getZookeepers(ingestConf);
+ String instanceName = WikipediaConfiguration.getInstanceName(ingestConf);
+
+ String user = WikipediaConfiguration.getUser(ingestConf);
+ byte[] password = WikipediaConfiguration.getPassword(ingestConf);
+ Connector connector = WikipediaConfiguration.getConnector(ingestConf);
+
+ TableOperations tops = connector.tableOperations();
+
+ createTables(tops, tablename);
+
+ // setup input format
+ ingestJob.setInputFormatClass(SequenceFileInputFormat.class);
+ SequenceFileInputFormat.setInputPaths(ingestJob, WikipediaConfiguration.getPartitionedArticlesPath(ingestConf));
+
+ // setup output format
+ ingestJob.setMapOutputKeyClass(Text.class);
+ ingestJob.setMapOutputValueClass(Mutation.class);
+ ingestJob.setOutputFormatClass(AccumuloOutputFormat.class);
+ AccumuloOutputFormat.setOutputInfo(ingestJob.getConfiguration(), user, password, true, tablename);
+ AccumuloOutputFormat.setZooKeeperInstance(ingestJob.getConfiguration(), instanceName, zookeepers);
+
+ return ingestJob.waitForCompletion(true) ? 0 : 1;
+ }
+
+ public final static PathFilter partFilter = new PathFilter() {
+ @Override
+ public boolean accept(Path path) {
+ return path.getName().startsWith("part");
+ };
+ };
+
+ protected void configurePartitionerJob(Job job) {
+ Configuration conf = job.getConfiguration();
+ job.setJarByClass(WikipediaPartitionedIngester.class);
+ job.setInputFormatClass(WikipediaInputFormat.class);
+ conf.set(AggregatingRecordReader.START_TOKEN, "<page>");
+ conf.set(AggregatingRecordReader.END_TOKEN, "</page>");
+ }
+
+ protected static final Pattern filePattern = Pattern.compile("([a-z_]+).*.xml(.bz2)?");
+
+ protected void listFiles(Path path, FileSystem fs, List<Path> files, Set<String> languages) throws IOException {
+ for (FileStatus status : fs.listStatus(path)) {
+ if (status.isDir()) {
+ listFiles(status.getPath(), fs, files, languages);
+ } else {
+ Path p = status.getPath();
+ Matcher matcher = filePattern.matcher(p.getName());
+ if (matcher.matches()) {
+ languages.add(matcher.group(1));
+ files.add(p);
+ }
+ }
+ }
+ }
+}
diff --git a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedMapper.java b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedMapper.java
new file mode 100644
index 0000000..4d94c24
--- /dev/null
+++ b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedMapper.java
@@ -0,0 +1,192 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *
+ */
+package org.apache.accumulo.examples.wikisearch.ingest;
+
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.nio.charset.Charset;
+import java.util.HashSet;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.examples.wikisearch.ingest.ArticleExtractor.Article;
+import org.apache.accumulo.examples.wikisearch.normalizer.LcNoDiacriticsNormalizer;
+import org.apache.accumulo.examples.wikisearch.protobuf.Uid;
+import org.apache.accumulo.examples.wikisearch.protobuf.Uid.List.Builder;
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.log4j.Logger;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.wikipedia.analysis.WikipediaTokenizer;
+
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Multimap;
+
+public class WikipediaPartitionedMapper extends Mapper<Text,Article,Text,Mutation> {
+
+ private static final Logger log = Logger.getLogger(WikipediaPartitionedMapper.class);
+
+ public final static Charset UTF8 = Charset.forName("UTF-8");
+ public static final String DOCUMENT_COLUMN_FAMILY = "d";
+ public static final String METADATA_EVENT_COLUMN_FAMILY = "e";
+ public static final String METADATA_INDEX_COLUMN_FAMILY = "i";
+ public static final String TOKENS_FIELD_NAME = "TEXT";
+
+ private static final Value NULL_VALUE = new Value(new byte[0]);
+ private static final String cvPrefix = "all|";
+
+ private int numPartitions = 0;
+
+ private Text tablename = null;
+ private Text indexTableName = null;
+ private Text reverseIndexTableName = null;
+ private Text metadataTableName = null;
+
+ @Override
+ public void setup(Context context) {
+ Configuration conf = context.getConfiguration();
+ tablename = new Text(WikipediaConfiguration.getTableName(conf));
+ indexTableName = new Text(tablename + "Index");
+ reverseIndexTableName = new Text(tablename + "ReverseIndex");
+ metadataTableName = new Text(tablename + "Metadata");
+
+ numPartitions = WikipediaConfiguration.getNumPartitions(conf);
+ }
+
+ @Override
+ protected void map(Text language, Article article, Context context) throws IOException, InterruptedException {
+ String NULL_BYTE = "\u0000";
+ String colfPrefix = language.toString() + NULL_BYTE;
+ String indexPrefix = "fi" + NULL_BYTE;
+ ColumnVisibility cv = new ColumnVisibility(cvPrefix + language);
+
+ if (article != null) {
+ Text partitionId = new Text(Integer.toString(WikipediaMapper.getPartitionId(article, numPartitions)));
+
+ // Create the mutations for the document.
+ // Row is partition id, colf is language0articleid, colq is fieldName\0fieldValue
+ Mutation m = new Mutation(partitionId);
+ for (Entry<String,Object> entry : article.getFieldValues().entrySet()) {
+ m.put(colfPrefix + article.getId(), entry.getKey() + NULL_BYTE + entry.getValue().toString(), cv, article.getTimestamp(), NULL_VALUE);
+ // Create mutations for the metadata table.
+ Mutation mm = new Mutation(entry.getKey());
+ mm.put(METADATA_EVENT_COLUMN_FAMILY, language.toString(), cv, article.getTimestamp(), NULL_VALUE);
+ context.write(metadataTableName, mm);
+ }
+
+ // Tokenize the content
+ Set<String> tokens = getTokens(article);
+
+ // We are going to put the fields to be indexed into a multimap. This allows us to iterate
+ // over the entire set once.
+ Multimap<String,String> indexFields = HashMultimap.create();
+ // Add the normalized field values
+ LcNoDiacriticsNormalizer normalizer = new LcNoDiacriticsNormalizer();
+ for (Entry<String,String> index : article.getNormalizedFieldValues().entrySet())
+ indexFields.put(index.getKey(), index.getValue());
+ // Add the tokens
+ for (String token : tokens)
+ indexFields.put(TOKENS_FIELD_NAME, normalizer.normalizeFieldValue("", token));
+
+ for (Entry<String,String> index : indexFields.entries()) {
+ // Create mutations for the in partition index
+ // Row is partition id, colf is 'fi'\0fieldName, colq is fieldValue\0language\0article id
+ m.put(indexPrefix + index.getKey(), index.getValue() + NULL_BYTE + colfPrefix + article.getId(), cv, article.getTimestamp(), NULL_VALUE);
+
+ // Create mutations for the global index
+ // Create a UID object for the Value
+ Builder uidBuilder = Uid.List.newBuilder();
+ uidBuilder.setIGNORE(false);
+ uidBuilder.setCOUNT(1);
+ uidBuilder.addUID(Integer.toString(article.getId()));
+ Uid.List uidList = uidBuilder.build();
+ Value val = new Value(uidList.toByteArray());
+
+ // Create mutations for the global index
+ // Row is field value, colf is field name, colq is partitionid\0language, value is Uid.List object
+ Mutation gm = new Mutation(index.getValue());
+ gm.put(index.getKey(), partitionId + NULL_BYTE + language, cv, article.getTimestamp(), val);
+ context.write(indexTableName, gm);
+
+ // Create mutations for the global reverse index
+ Mutation grm = new Mutation(StringUtils.reverse(index.getValue()));
+ grm.put(index.getKey(), partitionId + NULL_BYTE + language, cv, article.getTimestamp(), val);
+ context.write(reverseIndexTableName, grm);
+
+ // Create mutations for the metadata table.
+ Mutation mm = new Mutation(index.getKey());
+ mm.put(METADATA_INDEX_COLUMN_FAMILY, language + NULL_BYTE + LcNoDiacriticsNormalizer.class.getName(), cv, article.getTimestamp(), NULL_VALUE);
+ context.write(metadataTableName, mm);
+
+ }
+ // Add the entire text to the document section of the table.
+ // row is the partition, colf is 'd', colq is language\0articleid, value is Base64 encoded GZIP'd document
+ m.put(DOCUMENT_COLUMN_FAMILY, colfPrefix + article.getId(), cv, article.getTimestamp(), new Value(Base64.encodeBase64(article.getText().getBytes())));
+ context.write(tablename, m);
+
+ } else {
+ context.getCounter("wikipedia", "invalid articles").increment(1);
+ }
+ context.progress();
+ }
+
+ /**
+ * Tokenize the wikipedia content
+ *
+ * @param article
+ * @return
+ * @throws IOException
+ */
+ private Set<String> getTokens(Article article) throws IOException {
+ Set<String> tokenList = new HashSet<String>();
+ WikipediaTokenizer tok = new WikipediaTokenizer(new StringReader(article.getText()));
+ TermAttribute term = tok.addAttribute(TermAttribute.class);
+ try {
+ while (tok.incrementToken()) {
+ String token = term.term();
+ if (!StringUtils.isEmpty(token))
+ tokenList.add(token);
+ }
+ } catch (IOException e) {
+ log.error("Error tokenizing text", e);
+ } finally {
+ try {
+ tok.end();
+ } catch (IOException e) {
+ log.error("Error calling end()", e);
+ } finally {
+ try {
+ tok.close();
+ } catch (IOException e) {
+ log.error("Error closing tokenizer", e);
+ }
+ }
+ }
+ return tokenList;
+ }
+
+}
diff --git a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitioner.java b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitioner.java
new file mode 100644
index 0000000..82af9fd
--- /dev/null
+++ b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitioner.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *
+ */
+package org.apache.accumulo.examples.wikisearch.ingest;
+
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.StringReader;
+import java.nio.charset.Charset;
+import java.util.HashSet;
+import java.util.IllegalFormatException;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.examples.wikisearch.ingest.ArticleExtractor.Article;
+import org.apache.accumulo.examples.wikisearch.ingest.WikipediaInputFormat.WikipediaInputSplit;
+import org.apache.accumulo.examples.wikisearch.normalizer.LcNoDiacriticsNormalizer;
+import org.apache.accumulo.examples.wikisearch.protobuf.Uid;
+import org.apache.accumulo.examples.wikisearch.protobuf.Uid.List.Builder;
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.lib.input.FileSplit;
+import org.apache.log4j.Logger;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.wikipedia.analysis.WikipediaTokenizer;
+
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Multimap;
+
+public class WikipediaPartitioner extends Mapper<LongWritable,Text,Text,Article> {
+
+ private static final Logger log = Logger.getLogger(WikipediaPartitioner.class);
+
+ public final static Charset UTF8 = Charset.forName("UTF-8");
+ public static final String DOCUMENT_COLUMN_FAMILY = "d";
+ public static final String METADATA_EVENT_COLUMN_FAMILY = "e";
+ public static final String METADATA_INDEX_COLUMN_FAMILY = "i";
+ public static final String TOKENS_FIELD_NAME = "TEXT";
+
+ private final static Pattern languagePattern = Pattern.compile("([a-z_]+).*.xml(.bz2)?");
+
+ private ArticleExtractor extractor;
+ private String language;
+
+ private int myGroup = -1;
+ private int numGroups = -1;
+
+ @Override
+ public void setup(Context context) {
+ Configuration conf = context.getConfiguration();
+
+ WikipediaInputSplit wiSplit = (WikipediaInputSplit)context.getInputSplit();
+ myGroup = wiSplit.getPartition();
+ numGroups = WikipediaConfiguration.getNumGroups(conf);
+
+ FileSplit split = wiSplit.getFileSplit();
+ String fileName = split.getPath().getName();
+ Matcher matcher = languagePattern.matcher(fileName);
+ if (matcher.matches()) {
+ language = matcher.group(1).replace('_', '-').toLowerCase();
+ } else {
+ throw new RuntimeException("Unknown ingest language! " + fileName);
+ }
+ extractor = new ArticleExtractor();
+ }
+
+ @Override
+ protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
+ Article article = extractor.extract(new InputStreamReader(new ByteArrayInputStream(value.getBytes()), UTF8));
+ if (article != null) {
+ int groupId = WikipediaMapper.getPartitionId(article, numGroups);
+ if(groupId != myGroup)
+ return;
+ context.write(new Text(language), article);
+ } else {
+ context.getCounter("wikipedia", "invalid articles").increment(1);
+ context.progress();
+ }
+ }
+
+}
diff --git a/src/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java b/src/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java
index 3e719e6..e709704 100644
--- a/src/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java
+++ b/src/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java
@@ -123,6 +123,8 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.log4j.Logger;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.KeeperException.NoNodeException;
import cloudtrace.instrument.Span;
import cloudtrace.instrument.Trace;
@@ -2274,6 +2276,7 @@ public class Tablet {
if (updateMetadata) {
synchronized (this) {
updatingFlushID = false;
+ this.notifyAll();
}
}
}
@@ -2281,8 +2284,19 @@ public class Tablet {
}
boolean initiateMinorCompaction() {
+ if (isClosed()) {
+ // don't bother trying to get flush id if closed... could be closed after this check but that is ok... just trying to cut down on uneeded log messages....
+ return false;
+ }
+
// get the flush id before the new memmap is made available for write
- long flushId = getFlushID();
+ long flushId;
+ try {
+ flushId = getFlushID();
+ } catch (NoNodeException e) {
+ log.info("Asked to initiate MinC when there was no flush id " + getExtent() + " " + e.getMessage());
+ return false;
+ }
return initiateMinorCompaction(flushId);
}
@@ -2338,23 +2352,39 @@ public class Tablet {
return true;
}
- long getFlushID() {
+ long getFlushID() throws NoNodeException {
try {
String zTablePath = Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID() + Constants.ZTABLES + "/" + extent.getTableId()
+ Constants.ZTABLE_FLUSH_ID;
return Long.parseLong(new String(ZooReaderWriter.getRetryingInstance().getData(zTablePath, null)));
- } catch (Exception e) {
+ } catch (InterruptedException e) {
throw new RuntimeException(e);
+ } catch (NumberFormatException nfe) {
+ throw new RuntimeException(nfe);
+ } catch (KeeperException ke) {
+ if (ke instanceof NoNodeException) {
+ throw (NoNodeException) ke;
+ } else {
+ throw new RuntimeException(ke);
+ }
}
}
- long getCompactionID() {
+ long getCompactionID() throws NoNodeException {
try {
String zTablePath = Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID() + Constants.ZTABLES + "/" + extent.getTableId()
+ Constants.ZTABLE_COMPACT_ID;
return Long.parseLong(new String(ZooReaderWriter.getRetryingInstance().getData(zTablePath, null)));
- } catch (Exception e) {
+ } catch (InterruptedException e) {
throw new RuntimeException(e);
+ } catch (NumberFormatException nfe) {
+ throw new RuntimeException(nfe);
+ } catch (KeeperException ke) {
+ if (ke instanceof NoNodeException) {
+ throw (NoNodeException) ke;
+ } else {
+ throw new RuntimeException(ke);
+ }
}
}
@@ -2557,13 +2587,25 @@ public class Tablet {
}
}
+ while (updatingFlushID) {
+ try {
+ this.wait(50);
+ } catch (InterruptedException e) {
+ log.error(e.toString());
+ }
+ }
+
if (!saveState || tabletMemory.getMemTable().getNumEntries() == 0) {
return;
}
tabletMemory.waitForMinC();
- mct = prepareForMinC(getFlushID());
+ try {
+ mct = prepareForMinC(getFlushID());
+ } catch (NoNodeException e) {
+ throw new RuntimeException(e);
+ }
if (queueMinC) {
tabletResources.executeMinorCompaction(mct);
@@ -2612,7 +2654,11 @@ public class Tablet {
tabletMemory.waitForMinC();
if (saveState && tabletMemory.getMemTable().getNumEntries() > 0) {
- prepareForMinC(getFlushID()).run();
+ try {
+ prepareForMinC(getFlushID()).run();
+ } catch (NoNodeException e) {
+ throw new RuntimeException(e);
+ }
}
if (saveState) {
@@ -3103,7 +3149,11 @@ public class Tablet {
Long compactionId = null;
if (!propogateDeletes) {
// compacting everything, so update the compaction id in !METADATA
- compactionId = getCompactionID();
+ try {
+ compactionId = getCompactionID();
+ } catch (NoNodeException e) {
+ throw new RuntimeException(e);
+ }
}
// need to handle case where only one file is being major compacted
diff --git a/src/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java b/src/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
index e01ca07..94e8137 100644
--- a/src/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
+++ b/src/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
@@ -194,6 +194,7 @@ import org.apache.thrift.TProcessor;
import org.apache.thrift.TServiceClient;
import org.apache.thrift.server.TServer;
import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.KeeperException.NoNodeException;
import cloudtrace.instrument.Span;
import cloudtrace.instrument.Trace;
@@ -1887,7 +1888,13 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
if (flushID == null) {
// read the flush id once from zookeeper instead of reading
// it for each tablet
- flushID = tablet.getFlushID();
+ try {
+ flushID = tablet.getFlushID();
+ } catch (NoNodeException e) {
+ // table was probably deleted
+ log.info("Asked to flush table that has no flush id " + ke + " " + e.getMessage());
+ return;
+ }
}
tablet.flush(flushID);
}
@@ -1904,7 +1911,11 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
Tablet tablet = onlineTablets.get(new KeyExtent(textent));
if (tablet != null) {
log.info("Flushing " + tablet.getExtent());
- tablet.flush(tablet.getFlushID());
+ try {
+ tablet.flush(tablet.getFlushID());
+ } catch (NoNodeException nne) {
+ log.info("Asked to flush tablet that has no flush id " + new KeyExtent(textent) + " " + nne.getMessage());
+ }
}
}
@@ -1999,7 +2010,12 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
// all for the same table id, so only need to read
// compaction id once
if (compactionId == null)
- compactionId = tablet.getCompactionID();
+ try {
+ compactionId = tablet.getCompactionID();
+ } catch (NoNodeException e) {
+ log.info("Asked to compact table with no compaction id " + ke + " " + e.getMessage());
+ return;
+ }
tablet.compactAll(compactionId);
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-366_db4a291f.diff |
bugs-dot-jar_data_ACCUMULO-3945_36225565 | ---
BugID: ACCUMULO-3945
Summary: In Accumulo 1.7.0, connecting to a minicluster started via bin/accumulo minicluster
doesn't work
Description: "In Accumulo 1.7.0, connecting to a minicluster started via \"bin/accumulo
minicluster\" doesn't work. When connecting, it appears to ignore the ZK port supplied
in the command and is attempting to listen to ZK on 2181.\n\nFor example:\naccumulo-1.7.0
> bin/accumulo minicluster\n…\nMini Accumulo Cluster\n\n Directory: /var/folders/rv/44k88tps4ql0dc1f68ck4d2w0000gn/T/1437925819514-0\n
\ Logs: /var/folders/rv/44k88tps4ql0dc1f68ck4d2w0000gn/T/1437925819514-0/logs\n
\ Instance Name: miniInstance\n Root Password: secret\n ZooKeeper:
\ localhost:56783\n Shutdown Port: 4445\n\n To connect with shell,
use the following command :\n accumulo shell -zh localhost:56783 -zi miniInstance
-u root\n\nSuccessfully started on Sun Jul 26 11:50:28 EDT 2015\n===================\n\nFrom
a new terminal:\n\naccumulo-1.7.0 > accumulo shell -zh localhost:56783 -zi miniInstance
-u root\nPassword: *******\n…. 60 seconds later ….\n2015-07-26 11:52:44,436 [tracer.ZooTraceClient]
ERROR: Unabled to get destination tracer hosts\nin ZooKeeper, will retry in 5000
milliseconds\njava.lang.RuntimeException: Failed to connect to zookeeper (localhost:2181)
within 2x zookeeper\ntimeout period 30000\n\tat org.apache.accumulo.fate.zookeeper.ZooSession.connect(ZooSession.java:124)\n\nShell
- Apache Accumulo Interactive Shell\n-\n- version: 1.7.0\n- instance name: miniInstance\n-
instance id: a371d4ac-8bc7-4a6a-865f-5f3c8e27fbe1\n-\n- type 'help' for a list of
available commands\n-\nroot@miniInstance>\n\n"
diff --git a/shell/src/main/java/org/apache/accumulo/shell/ShellOptionsJC.java b/shell/src/main/java/org/apache/accumulo/shell/ShellOptionsJC.java
index 92ea1a5..01b7ce3 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/ShellOptionsJC.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/ShellOptionsJC.java
@@ -315,6 +315,11 @@ public class ShellOptionsJC {
clientConfig.withZkHosts(siteConf.get(Property.INSTANCE_ZK_HOST));
}
+ // If the user provided the hosts, set the ZK for tracing too
+ if (null != zooKeeperHosts) {
+ clientConfig.setProperty(ClientProperty.INSTANCE_ZK_HOST, zooKeeperHosts);
+ }
+
return clientConfig;
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3945_36225565.diff |
bugs-dot-jar_data_ACCUMULO-1051_25cf3ccd | ---
BugID: ACCUMULO-1051
Summary: Authorizations has inconsistent serialization
Description: |-
The same set of authorizations may not serialize to the same value each time, if specified in a different order when constructed (like new Authorizations("a", "b") and new Authorizations("b", "a")), because serialization reproducibility depends on the insert order in the underlying HashSet.
So, one could get the following to happen:
{code:java}
true == auths1.equals(auths2) && !auths1.serialize().equals(auths2.serialize());
{code}
diff --git a/core/src/main/java/org/apache/accumulo/core/security/Authorizations.java b/core/src/main/java/org/apache/accumulo/core/security/Authorizations.java
index 5933325..a677f3f 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/Authorizations.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/Authorizations.java
@@ -23,10 +23,9 @@ import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
+import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
-import java.util.Set;
-import java.util.TreeSet;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.accumulo.core.data.ByteSequence;
@@ -38,14 +37,14 @@ public class Authorizations implements Iterable<byte[]>, Serializable {
private static final long serialVersionUID = 1L;
- private Set<ByteSequence> auths = new TreeSet<ByteSequence>();
+ private HashSet<ByteSequence> auths = new HashSet<ByteSequence>();
private List<byte[]> authsList = new ArrayList<byte[]>();
private List<byte[]> immutableList = Collections.unmodifiableList(authsList);
private static final boolean[] validAuthChars = new boolean[256];
public static final String HEADER = "!AUTH1:";
-
+
static {
for (int i = 0; i < 256; i++) {
validAuthChars[i] = false;
@@ -104,11 +103,11 @@ public class Authorizations implements Iterable<byte[]>, Serializable {
* @param authorizations
* a serialized authorizations string produced by {@link #getAuthorizationsArray()} or {@link #serialize()}
*/
-
+
public Authorizations(byte[] authorizations) {
ArgumentChecker.notNull(authorizations);
-
+
String authsString = new String(authorizations);
if (authsString.startsWith(HEADER)) {
// its the new format
@@ -141,7 +140,7 @@ public class Authorizations implements Iterable<byte[]>, Serializable {
public Authorizations(Charset charset, String... authorizations) {
setAuthorizations(charset, authorizations);
}
-
+
public Authorizations(String... authorizations) {
setAuthorizations(authorizations);
}
@@ -177,7 +176,6 @@ public class Authorizations implements Iterable<byte[]>, Serializable {
return ByteBufferUtil.toByteBuffers(immutableList);
}
- @Override
public String toString() {
StringBuilder sb = new StringBuilder();
String sep = "";
@@ -198,7 +196,6 @@ public class Authorizations implements Iterable<byte[]>, Serializable {
return auths.contains(auth);
}
- @Override
public boolean equals(Object o) {
if (o == null) {
return false;
@@ -213,7 +210,6 @@ public class Authorizations implements Iterable<byte[]>, Serializable {
return false;
}
- @Override
public int hashCode() {
int result = 0;
for (ByteSequence b : auths)
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1051_25cf3ccd.diff |
bugs-dot-jar_data_ACCUMULO-3634_9339ecf8 | ---
BugID: ACCUMULO-3634
Summary: AuthenticationTokenSecretManager might delete key while ZooAuthenticationKeyWatcher
enumerates existing keys
Description: |-
Noticed the following race condition.
The secret manager (in the master) on startup will enumerate the old keys used for creating delegation tokens and delete the keys that are expired.
At the same time, the watcher (in each tserver) might see some updates to these keys and update the secret manager. There's a race condition there that the watcher might try to read a key that the secret manager just deleted.
Need to catch the NoNodeException in the watcher and just accept that it's ok if one of these children are deleted to avoid a scary error in the monitor.
diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/delegation/ZooAuthenticationKeyWatcher.java b/server/base/src/main/java/org/apache/accumulo/server/security/delegation/ZooAuthenticationKeyWatcher.java
index 2913343..fe4407e 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/delegation/ZooAuthenticationKeyWatcher.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/delegation/ZooAuthenticationKeyWatcher.java
@@ -22,6 +22,7 @@ import java.io.IOException;
import org.apache.accumulo.fate.zookeeper.ZooReader;
import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.KeeperException.NoNodeException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.Watcher.Event.EventType;
@@ -131,10 +132,15 @@ public class ZooAuthenticationKeyWatcher implements Watcher {
int keysAdded = 0;
for (String child : zk.getChildren(path, this)) {
String childPath = path + "/" + child;
- // Get the node data and reset the watcher
- AuthenticationKey key = deserializeKey(zk.getData(childPath, this, null));
- secretManager.addKey(key);
- keysAdded++;
+ try {
+ // Get the node data and reset the watcher
+ AuthenticationKey key = deserializeKey(zk.getData(childPath, this, null));
+ secretManager.addKey(key);
+ keysAdded++;
+ } catch (NoNodeException e) {
+ // The master expired(deleted) the key between when we saw it in getChildren() and when we went to add it to our secret manager.
+ log.trace("{} was deleted when we tried to access it", childPath);
+ }
}
return keysAdded;
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3634_9339ecf8.diff |
bugs-dot-jar_data_ACCUMULO-2520_a64151e6 | ---
BugID: ACCUMULO-2520
Summary: Garbage collector deleted everything when given bad input
Description: "Patch v3 of the upgrade from ACCUMULO-2145 had a test that did the following
before upgrade.\n\n{noformat}\nroot@testUp> table !METADATA\nroot@testUp !METADATA>
grant Table.WRITE -u root \nroot@testUp !METADATA> insert ~del testDel test valueTest\n{noformat}\n\nThis
is a malformed delete entry. Accumulo code should not delete such entries. When
the 1.5.1 garbage collector saw this it did the following.\n\n{noformat}\n2014-03-20
18:20:05,359 [gc.SimpleGarbageCollector] DEBUG: Deleting /accumuloTest/tables\n2014-03-20
18:20:05,359 [gc.SimpleGarbageCollector] DEBUG: Deleting /accumuloTest/tables/!0/default_tablet/F0000009.rf\n2014-03-20
18:20:05,360 [gc.SimpleGarbageCollector] DEBUG: Deleting /accumuloTest/tables/!0/table_info/F000000b.rf\n{noformat}\n\nGC
should validate that delete entries are paths of the expected length. I have confirmed
this bug exist in 1.5.1. I am assuming it exist in 1.4 and 1.6 branches."
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
index 464d0d9..40fb847 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
@@ -85,10 +85,7 @@ public class GarbageCollectionAlgorithm {
tokens = tmp.toArray(new String[tmp.size()]);
}
- if (tokens.length > 3) {
- if (!path.contains(":"))
- throw new IllegalArgumentException(path);
-
+ if (tokens.length > 3 && path.contains(":")) {
if (tokens[tokens.length - 4].equals(ServerConstants.TABLE_DIR) && (expectedLen == 0 || expectedLen == 3)) {
relPath = tokens[tokens.length - 3] + "/" + tokens[tokens.length - 2] + "/" + tokens[tokens.length - 1];
} else if (tokens[tokens.length - 3].equals(ServerConstants.TABLE_DIR) && (expectedLen == 0 || expectedLen == 2)) {
@@ -96,9 +93,9 @@ public class GarbageCollectionAlgorithm {
} else {
throw new IllegalArgumentException(path);
}
- } else if (tokens.length == 3 && (expectedLen == 0 || expectedLen == 3)) {
+ } else if (tokens.length == 3 && (expectedLen == 0 || expectedLen == 3) && !path.contains(":")) {
relPath = tokens[0] + "/" + tokens[1] + "/" + tokens[2];
- } else if (tokens.length == 2 && (expectedLen == 0 || expectedLen == 2)) {
+ } else if (tokens.length == 2 && (expectedLen == 0 || expectedLen == 2) && !path.contains(":")) {
relPath = tokens[0] + "/" + tokens[1];
} else {
throw new IllegalArgumentException(path);
@@ -112,7 +109,13 @@ public class GarbageCollectionAlgorithm {
SortedMap<String,String> ret = new TreeMap<String,String>();
for (String candidate : candidates) {
- String relPath = makeRelative(candidate, 0);
+ String relPath;
+ try {
+ relPath = makeRelative(candidate, 0);
+ } catch (IllegalArgumentException iae) {
+ log.warn("Ingoring invalid deletion candidate " + candidate);
+ continue;
+ }
ret.put(relPath, candidate);
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-2520_a64151e6.diff |
bugs-dot-jar_data_ACCUMULO-1044_ea2f9856 | ---
BugID: ACCUMULO-1044
Summary: bulk imported files showing up in metadata after bulk import fails
Description: |
Bulk import fails. The file is moved to the failures directory.
But references in the !METADATA table remain.
diff --git a/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java b/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
index bd19d1f..463b7b0 100644
--- a/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
+++ b/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
@@ -34,6 +34,7 @@ import org.apache.accumulo.core.zookeeper.ZooUtil;
import org.apache.accumulo.server.client.HdfsZooInstance;
import org.apache.accumulo.server.zookeeper.ZooCache;
import org.apache.accumulo.server.zookeeper.ZooLock;
+import org.apache.accumulo.server.zookeeper.TransactionWatcher.ZooArbitrator;
import org.apache.hadoop.io.Text;
import org.apache.log4j.Logger;
@@ -72,6 +73,22 @@ public class MetadataConstraints implements Constraint {
return false;
}
+ static private ArrayList<Short> addViolation(ArrayList<Short> lst, int violation) {
+ if (lst == null)
+ lst = new ArrayList<Short>();
+ lst.add((short)violation);
+ return lst;
+ }
+
+ static private ArrayList<Short> addIfNotPresent(ArrayList<Short> lst, int intViolation) {
+ if (lst == null)
+ return addViolation(lst, intViolation);
+ short violation = (short)intViolation;
+ if (!lst.contains(violation))
+ return addViolation(lst, intViolation);
+ return lst;
+ }
+
public List<Short> check(Environment env, Mutation mutation) {
ArrayList<Short> violations = null;
@@ -96,44 +113,30 @@ public class MetadataConstraints implements Constraint {
break;
if (!validTableNameChars[0xff & b]) {
- if (violations == null)
- violations = new ArrayList<Short>();
- if (!violations.contains((short) 4))
- violations.add((short) 4);
+ violations = addIfNotPresent(violations, 4);
}
}
if (!containsSemiC) {
// see if last row char is <
if (row.length == 0 || row[row.length - 1] != '<') {
- if (violations == null)
- violations = new ArrayList<Short>();
- if (!violations.contains((short) 4))
- violations.add((short) 4);
+ violations = addIfNotPresent(violations, 4);
}
} else {
if (row.length == 0) {
- if (violations == null)
- violations = new ArrayList<Short>();
- if (!violations.contains((short) 4))
- violations.add((short) 4);
+ violations = addIfNotPresent(violations, 4);
}
}
if (row.length > 0 && row[0] == '!') {
if (row.length < 3 || row[1] != '0' || (row[2] != '<' && row[2] != ';')) {
- if (violations == null)
- violations = new ArrayList<Short>();
- if (!violations.contains((short) 4))
- violations.add((short) 4);
+ violations = addIfNotPresent(violations, 4);
}
}
// ensure row is not less than Constants.METADATA_TABLE_ID
if (new Text(row).compareTo(new Text(Constants.METADATA_TABLE_ID)) < 0) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 5);
+ violations = addViolation(violations, 5);
}
for (ColumnUpdate columnUpdate : colUpdates) {
@@ -141,17 +144,13 @@ public class MetadataConstraints implements Constraint {
if (columnUpdate.isDeleted()) {
if (!isValidColumn(columnUpdate)) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 2);
+ violations = addViolation(violations, 2);
}
continue;
}
if (columnUpdate.getValue().length == 0 && !columnFamily.equals(Constants.METADATA_SCANFILE_COLUMN_FAMILY)) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 6);
+ violations = addViolation(violations, 6);
}
if (columnFamily.equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
@@ -159,26 +158,49 @@ public class MetadataConstraints implements Constraint {
DataFileValue dfv = new DataFileValue(columnUpdate.getValue());
if (dfv.getSize() < 0 || dfv.getNumEntries() < 0) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 1);
+ violations = addViolation(violations, 1);
}
} catch (NumberFormatException nfe) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 1);
+ violations = addViolation(violations, 1);
} catch (ArrayIndexOutOfBoundsException aiooe) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 1);
+ violations = addViolation(violations, 1);
}
} else if (columnFamily.equals(Constants.METADATA_SCANFILE_COLUMN_FAMILY)) {
+ } else if (columnFamily.equals(Constants.METADATA_BULKFILE_COLUMN_FAMILY)) {
+ if (!columnUpdate.isDeleted()) {
+ // splits, which also write the time reference, are allowed to write this reference even when
+ // the transaction is not running because the other half of the tablet is holding a reference
+ // to the file.
+ boolean isSplitMutation = false;
+ // When a tablet is assigned, it re-writes the metadata. It should probably only update the location information,
+ // but it writes everything. We allow it to re-write the bulk information if it is setting the location.
+ // See ACCUMULO-1230.
+ boolean isLocationMutation = false;
+ for (ColumnUpdate update : mutation.getUpdates()) {
+ if (new ColumnFQ(update).equals(Constants.METADATA_TIME_COLUMN)) {
+ isSplitMutation = true;
+ }
+ if (update.getColumnFamily().equals(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY)) {
+ isLocationMutation = true;
+ }
+ }
+
+ if (!isSplitMutation && !isLocationMutation) {
+ String tidString = new String(columnUpdate.getValue());
+ long tid = Long.parseLong(tidString);
+ try {
+ if (!new ZooArbitrator().transactionAlive(Constants.BULK_ARBITRATOR_TYPE, tid)) {
+ violations = addViolation(violations, 8);
+ }
+ } catch (Exception ex) {
+ violations = addViolation(violations, 8);
+ }
+ }
+ }
} else {
if (!isValidColumn(columnUpdate)) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 2);
+ violations = addViolation(violations, 2);
} else if (new ColumnFQ(columnUpdate).equals(Constants.METADATA_PREV_ROW_COLUMN) && columnUpdate.getValue().length > 0
&& (violations == null || !violations.contains((short) 4))) {
KeyExtent ke = new KeyExtent(new Text(mutation.getRow()), (Text) null);
@@ -188,9 +210,7 @@ public class MetadataConstraints implements Constraint {
boolean prevEndRowLessThanEndRow = per == null || ke.getEndRow() == null || per.compareTo(ke.getEndRow()) < 0;
if (!prevEndRowLessThanEndRow) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 3);
+ violations = addViolation(violations, 3);
}
} else if (new ColumnFQ(columnUpdate).equals(Constants.METADATA_LOCK_COLUMN)) {
if (zooCache == null) {
@@ -211,9 +231,7 @@ public class MetadataConstraints implements Constraint {
}
if (!lockHeld) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 7);
+ violations = addViolation(violations, 7);
}
}
@@ -221,7 +239,10 @@ public class MetadataConstraints implements Constraint {
}
if (violations != null) {
- log.debug(" violating metadata mutation : " + mutation);
+ log.debug("violating metadata mutation : " + new String(mutation.getRow()));
+ for (ColumnUpdate update : mutation.getUpdates()) {
+ log.debug(" update: " + new String(update.getColumnFamily()) + ":" + new String(update.getColumnQualifier()) + " value " + (update.isDeleted() ? "[delete]" : new String(update.getValue())));
+ }
}
return violations;
@@ -243,6 +264,8 @@ public class MetadataConstraints implements Constraint {
return "Empty values are not allowed for any " + Constants.METADATA_TABLE_NAME + " column";
case 7:
return "Lock not held in zookeeper by writer";
+ case 8:
+ return "Bulk load transaction no longer running";
}
return null;
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1044_ea2f9856.diff |
bugs-dot-jar_data_ACCUMULO-1800_3143b9c5 | ---
BugID: ACCUMULO-1800
Summary: delete mutations not working through the Proxy
Description: |
Aru Sahni writes:
{quote}
I'm new to Accumulo and am still trying to wrap my head around its ways. To further that challenge, I'm using Pyaccumulo, which doesn't present much in terms of available reference material.
Right now I'm trying to understand how Accumulo manages record (key-value pair) deletions.
conn = Accumulo(host, port, user, password)
table = 'test_table'
conn.create_table(table)
writer = conn.create_batch_writer(table)
mut = Mutation('mut_01')
mut.put(cf='item', cq='name', value='car')
writer.add_mutation(mut)
writer.close()
conn.close()
Will generate a record (found via a shell scan):
mut_01 item:name [] car
However the subsequent mutation...
writer = conn.create_batch_writer(table)
mut = Mutation('mut_01')
mut.put(cf='item', cq='name', is_delete=True)
writer.add_mutation(mut)
writer.close()
Results in:
mut_01 item:name []
How should one expect the deleted row to be represented? That record sticks around even after I force a compaction of the table. I was expecting it to not show up in any iterators, or at least provide an easy way to see if the cell has been deleted.
{quote}
[~ecn] has confirmed the problem.
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
index 3c5c88a..c6e74f1 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
@@ -1126,14 +1126,14 @@ public class ProxyServer implements AccumuloProxy.Iface {
if (update.isSetDeleteCell()) {
m.putDelete(update.getColFamily(), update.getColQualifier(), viz, update.getTimestamp());
} else {
- if (update.isSetDeleteCell()) {
- m.putDelete(update.getColFamily(), update.getColQualifier(), viz, update.getTimestamp());
- } else {
- m.put(update.getColFamily(), update.getColQualifier(), viz, update.getTimestamp(), value);
+ m.put(new Text(update.getColFamily()), new Text(update.getColQualifier()), viz, update.getTimestamp(), new Value(value));
}
- }
} else {
- m.put(update.getColFamily(), update.getColQualifier(), viz, value);
+ if (update.isSetDeleteCell()) {
+ m.putDelete(new Text(update.getColFamily()), new Text(update.getColQualifier()), viz);
+ } else {
+ m.put(new Text(update.getColFamily()), new Text(update.getColQualifier()), viz, new Value(value));
+ }
}
}
try {
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1800_3143b9c5.diff |
bugs-dot-jar_data_ACCUMULO-2962_023be574 | ---
BugID: ACCUMULO-2962
Summary: RangeInputSplit Writable methods don't serialize IteratorSettings
Description: |-
Was trying to figure out why some information was getting lost on a RangeInputSplit after serialization, and found out it was because the serialization and deserialization of the class didn't include the configured IteratorSettings.
This likely isn't a big problem for normal users as, when no IteratorSettings are configured on the RangeInputSplit, it falls back to pulling from the Configuration, but it's possible, with "non-standard" uses of mapreduce, that information could be missing in the Configuration that the mappers receive, and would subsequently error.
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
index 73c9b59..05316a1 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
@@ -205,6 +205,14 @@ public class RangeInputSplit extends InputSplit implements Writable {
}
if (in.readBoolean()) {
+ int numIterators = in.readInt();
+ iterators = new ArrayList<IteratorSetting>(numIterators);
+ for (int i = 0; i < numIterators; i++) {
+ iterators.add(new IteratorSetting(in));
+ }
+ }
+
+ if (in.readBoolean()) {
level = Level.toLevel(in.readInt());
}
}
@@ -275,6 +283,14 @@ public class RangeInputSplit extends InputSplit implements Writable {
out.writeUTF(zooKeepers);
}
+ out.writeBoolean(null != iterators);
+ if (null != iterators) {
+ out.writeInt(iterators.size());
+ for (IteratorSetting iterator : iterators) {
+ iterator.write(out);
+ }
+ }
+
out.writeBoolean(null != level);
if (null != level) {
out.writeInt(level.toInt());
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-2962_023be574.diff |
bugs-dot-jar_data_ACCUMULO-3385_a3267d3e | ---
BugID: ACCUMULO-3385
Summary: DateLexicoder fails to correctly order dates prior to 1970
Description: |-
DateLexicoder incorrectly orders dates before 1970 at the end of all other dates.
Therefore, the order was correct for all dates if the user only wrote dates before 1970, or only dates after 1970, but not if they did both.
The DateLexicoder should be fixed to store using a signed LongLexicoder internally, instead of the ULongLexicoder that it used before.
diff --git a/core/src/main/java/org/apache/accumulo/core/client/lexicoder/DateLexicoder.java b/core/src/main/java/org/apache/accumulo/core/client/lexicoder/DateLexicoder.java
index c93ba70..8533bfe 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/lexicoder/DateLexicoder.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/lexicoder/DateLexicoder.java
@@ -20,20 +20,21 @@ import java.util.Date;
/**
* A lexicoder for date objects. It preserves the native Java sort order for Date.
+ *
* @since 1.6.0
*/
public class DateLexicoder implements Lexicoder<Date> {
-
- private ULongLexicoder longEncoder = new ULongLexicoder();
-
+
+ private LongLexicoder longEncoder = new LongLexicoder();
+
@Override
public byte[] encode(Date data) {
return longEncoder.encode(data.getTime());
}
-
+
@Override
public Date decode(byte[] data) {
return new Date(longEncoder.decode(data));
}
-
+
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3385_a3267d3e.diff |
bugs-dot-jar_data_ACCUMULO-3897_699b8bf0 | ---
BugID: ACCUMULO-3897
Summary: ShutdownTServer never sets requestedShutdown
Description: |-
ACCUMULO-1259 made ShutdownTServer a bit more sane WRT to what it was doing and the FATE repo interface.
One attempt it makes it to not repeatedly invoke shutdownTServer on the master..
Except {{requestedShutdown}} is never set to {{true}}.
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java b/server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java
index 11cd91b..171e312 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java
@@ -58,6 +58,7 @@ public class ShutdownTServer extends MasterRepo {
// only send this request once
if (!requestedShutdown) {
master.shutdownTServer(server);
+ requestedShutdown = true;
}
if (master.onlineTabletServers().contains(server)) {
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3897_699b8bf0.diff |
bugs-dot-jar_data_ACCUMULO-1183_742960f1 | ---
BugID: ACCUMULO-1183
Summary: ProxyServer does not set column information on BatchScanner
Description: The createScanner method uses the options from the thrift request to
call fetchColumn() and fetchColumnFamily(). The createBatchScanner should be doing
have the same feature, though the statements are absent from the code.
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
index 911d187..167cecc 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
@@ -819,7 +819,17 @@ public class ProxyServer implements AccumuloProxy.Iface {
}
}
scanner.setRanges(ranges);
+
+ if (opts.columns != null) {
+ for (ScanColumn col : opts.columns) {
+ if (col.isSetColQualifier())
+ scanner.fetchColumn(ByteBufferUtil.toText(col.colFamily), ByteBufferUtil.toText(col.colQualifier));
+ else
+ scanner.fetchColumnFamily(ByteBufferUtil.toText(col.colFamily));
+ }
+ }
}
+
UUID uuid = UUID.randomUUID();
ScannerPlusIterator spi = new ScannerPlusIterator();
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1183_742960f1.diff |
bugs-dot-jar_data_ACCUMULO-4138_4d23d784 | ---
BugID: ACCUMULO-4138
Summary: CompactCommand description is incorrect
Description: "The compact command has the following description \n{code}\nroot@accumulo>
compact -?\nusage: compact [<table>{ <table>}] [-?] [-b <begin-row>] [--cancel]
[-e <end-row>] [-nf] [-ns <namespace> | -p <pattern> | -t <tableName>] [-pn <profile>]
\ [-w]\ndescription: sets all tablets for a table to major compact as soon as possible
(based on current time)\n -?,--help display this help\n -b,--begin-row
<begin-row> begin row (inclusive)\n --cancel cancel
user initiated compactions\n -e,--end-row <end-row> end row (inclusive)\n
\ -nf,--noFlush do not flush table data in memory before compacting.\n
\ -ns,--namespace <namespace> name of a namespace to operate on\n -p,--pattern
<pattern> regex pattern of table names to operate on\n -pn,--profile <profile>
\ iterator profile name\n -t,--table <tableName> name of a table
to operate on\n -w,--wait wait for compact to finish\n{code}\n\nHowever,
the --begin-row is not inclusive. Here is a simple demonstration.\n{code}\ncreatetable
compacttest\naddsplits a b c\ninsert \"a\" \"1\" \"\" \"\"\ninsert \"a\" \"2\" \"\"
\"\"\ninsert \"b\" \"3\" \"\" \"\"\ninsert \"b\" \"4\" \"\" \"\"\ninsert \"c\" \"5\"
\"\" \"\"\ninsert \"c\" \"6\" \"\" \"\"\nflush -w\nscan -t accumulo.metadata -np\ncompact
-b a -e c -t compacttest -w\nscan -t accumulo.metadata -np\ndeletetable compacttest
-f\n{code}\n\nYou will see that file associated with the 'a' split is still a F
flush file, which the files in the 'b' and 'c' split are A files.\n\nNot sure if
the fix is to update the commands description, which would be easy, or to make the
begin row actually inclusive."
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OptUtil.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OptUtil.java
index 432f17a..99e09e3 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OptUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OptUtil.java
@@ -117,7 +117,7 @@ public abstract class OptUtil {
}
public static Option startRowOpt() {
- final Option o = new Option(START_ROW_OPT, "begin-row", true, "begin row (NOT) inclusive");
+ final Option o = new Option(START_ROW_OPT, "begin-row", true, "begin row (exclusive)");
o.setArgName("begin-row");
return o;
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-4138_4d23d784.diff |
bugs-dot-jar_data_ACCUMULO-1348_6ff92b12 | ---
BugID: ACCUMULO-1348
Summary: Accumulo Shell does not respect 'exit' when executing file
Description: |-
If there is an {{exit}} statement in the file given via {{accumulo shell -f file}}, the execution seems to skip it and go on to the next command instead of terminating.
To recreate:
{noformat}
[mike@home ~] cat bug.accumulo
exit
scan -np -t !METADATA
[mike@home ~] bin/accumulo shell -f /home/mike/bug.accumulo
{noformat}
Expected output: None
Actual output: A full scan of the !METADATA
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java
index 1a3c518..4469d5c 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java
@@ -78,8 +78,13 @@ public class MockShell extends Shell {
if (execFile != null) {
java.util.Scanner scanner = new java.util.Scanner(new File(execFile));
- while (scanner.hasNextLine())
- execCommand(scanner.nextLine(), true, isVerbose());
+ try {
+ while (scanner.hasNextLine() && !hasExited()) {
+ execCommand(scanner.nextLine(), true, isVerbose());
+ }
+ } finally {
+ scanner.close();
+ }
} else if (execCommand != null) {
for (String command : execCommand.split("\n")) {
execCommand(command, true, isVerbose());
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java b/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
index ab08c32..75f7bd0 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
@@ -250,7 +250,7 @@ public class Shell extends ShellOptions {
if (sysUser == null)
sysUser = "root";
String user = cl.getOptionValue(usernameOption.getOpt(), sysUser);
-
+
String passw = cl.getOptionValue(passwOption.getOpt(), null);
tabCompletion = !cl.hasOption(tabCompleteOption.getLongOpt());
String[] loginOptions = cl.getOptionValues(loginOption.getOpt());
@@ -265,13 +265,13 @@ public class Shell extends ShellOptions {
if (loginOptions == null && cl.hasOption(tokenOption.getOpt()))
throw new IllegalArgumentException("Must supply '-" + loginOption.getOpt() + "' option with '-" + tokenOption.getOpt() + "' option");
-
+
if (passw != null && cl.hasOption(tokenOption.getOpt()))
throw new IllegalArgumentException("Can not supply '-" + passwOption.getOpt() + "' option with '-" + tokenOption.getOpt() + "' option");
-
+
if (user == null)
throw new MissingArgumentException(usernameOption);
-
+
if (loginOptions != null && cl.hasOption(tokenOption.getOpt())) {
Properties props = new Properties();
for (String loginOption : loginOptions)
@@ -283,7 +283,7 @@ public class Shell extends ShellOptions {
this.token = Class.forName(cl.getOptionValue(tokenOption.getOpt())).asSubclass(AuthenticationToken.class).newInstance();
this.token.init(props);
}
-
+
if (!cl.hasOption(fakeOption.getLongOpt())) {
DistributedTrace.enable(instance, new ZooReader(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut()), "shell", InetAddress.getLocalHost()
.getHostName());
@@ -447,8 +447,9 @@ public class Shell extends ShellOptions {
if (execFile != null) {
java.util.Scanner scanner = new java.util.Scanner(new File(execFile));
try {
- while (scanner.hasNextLine())
+ while (scanner.hasNextLine() && !hasExited()) {
execCommand(scanner.nextLine(), true, isVerbose());
+ }
} finally {
scanner.close();
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1348_6ff92b12.diff |
bugs-dot-jar_data_ACCUMULO-1518_df4b1985 | ---
BugID: ACCUMULO-1518
Summary: FileOperations expects RFile filenames to contain only 1 dot.
Description: |-
If I attempt to create or read an RFile that contains more than 1 dot in the filename, FileOperations throws an IllegalArgumentException("File name " + name + " has no extension").
Please allow creation/import of RFiles that have more than 1 dot in the filename.
diff --git a/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java b/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
index 9f60725..17e540b 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
@@ -44,14 +44,13 @@ class DispatchingFileFactory extends FileOperations {
if (name.startsWith(Constants.MAPFILE_EXTENSION + "_")) {
return new MapFileOperations();
}
-
String[] sp = name.split("\\.");
- if (sp.length != 2) {
+ if (sp.length < 2) {
throw new IllegalArgumentException("File name " + name + " has no extension");
}
- String extension = sp[1];
+ String extension = sp[sp.length - 1];
if (extension.equals(Constants.MAPFILE_EXTENSION) || extension.equals(Constants.MAPFILE_EXTENSION + "_tmp")) {
return new MapFileOperations();
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1518_df4b1985.diff |
bugs-dot-jar_data_ACCUMULO-2494_0dc92ca1 | ---
BugID: ACCUMULO-2494
Summary: Stat calculation of STDEV may be inaccurate
Description: |-
The math is sound, but it is susceptible to rounding errors. We should address that.
See http://www.strchr.com/standard_deviation_in_one_pass and http://www.cs.berkeley.edu/~mhoemmen/cs194/Tutorials/variance.pdf
diff --git a/core/src/main/java/org/apache/accumulo/core/util/Stat.java b/core/src/main/java/org/apache/accumulo/core/util/Stat.java
index e65265c..d2d560e 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/Stat.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/Stat.java
@@ -16,54 +16,66 @@
*/
package org.apache.accumulo.core.util;
+import org.apache.commons.math.stat.descriptive.StorelessUnivariateStatistic;
+import org.apache.commons.math.stat.descriptive.moment.Mean;
+import org.apache.commons.math.stat.descriptive.moment.StandardDeviation;
+import org.apache.commons.math.stat.descriptive.rank.Max;
+import org.apache.commons.math.stat.descriptive.rank.Min;
+import org.apache.commons.math.stat.descriptive.summary.Sum;
+
public class Stat {
-
- long max = Long.MIN_VALUE;
- long min = Long.MAX_VALUE;
- long sum = 0;
- int count = 0;
- double partialStdDev = 0;
-
+ Min min;
+ Max max;
+ Sum sum;
+ Mean mean;
+ StandardDeviation sd;
+
+ StorelessUnivariateStatistic[] stats;
+
+ public Stat() {
+ min = new Min();
+ max = new Max();
+ sum = new Sum();
+ mean = new Mean();
+ sd = new StandardDeviation();
+
+ stats = new StorelessUnivariateStatistic[] {min, max, sum, mean, sd};
+ }
+
public void addStat(long stat) {
- if (stat > max)
- max = stat;
- if (stat < min)
- min = stat;
-
- sum += stat;
-
- partialStdDev += stat * stat;
-
- count++;
+ for (StorelessUnivariateStatistic statistic : stats) {
+ statistic.increment(stat);
+ }
}
-
+
public long getMin() {
- return min;
+ return (long) min.getResult();
}
-
+
public long getMax() {
- return max;
+ return (long) max.getResult();
+ }
+
+ public long getSum() {
+ return (long) sum.getResult();
}
-
+
public double getAverage() {
- return ((double) sum) / count;
+ return mean.getResult();
}
-
+
public double getStdDev() {
- return Math.sqrt(partialStdDev / count - getAverage() * getAverage());
+ return sd.getResult();
}
-
+
public String toString() {
- return String.format("%,d %,d %,.2f %,d", getMin(), getMax(), getAverage(), count);
+ return String.format("%,d %,d %,.2f %,d", getMin(), getMax(), getAverage(), mean.getN());
}
-
+
public void clear() {
- sum = 0;
- count = 0;
- partialStdDev = 0;
- }
-
- public long getSum() {
- return sum;
+ for (StorelessUnivariateStatistic statistic : stats) {
+ statistic.clear();
+ }
}
+
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-2494_0dc92ca1.diff |
bugs-dot-jar_data_ACCUMULO-3475_7651b777 | ---
BugID: ACCUMULO-3475
Summary: Shell.config()'s return value is ignored.
Description: "{{Shell.config()}} returns a boolean which is true if there was an error
configuring the shell, but the value is never observed. This can result in other
unintended errors (like trying to use the ConsoleReader member when it's not initialized)."
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java
index 5ff340b..0fbe879 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java
@@ -44,15 +44,19 @@ public class MockShell extends Shell {
this.writer = writer;
}
+ @Override
public boolean config(String... args) {
- configError = super.config(args);
+ // If configuring the shell failed, fail quickly
+ if (!super.config(args)) {
+ return false;
+ }
// Update the ConsoleReader with the input and output "redirected"
try {
this.reader = new ConsoleReader(in, writer);
} catch (Exception e) {
printException(e);
- configError = true;
+ return false;
}
// Don't need this for testing purposes
@@ -61,7 +65,7 @@ public class MockShell extends Shell {
// Make the parsing from the client easier;
this.verbose = false;
- return configError;
+ return true;
}
@Override
@@ -71,9 +75,6 @@ public class MockShell extends Shell {
}
public int start() throws IOException {
- if (configError)
- return 1;
-
String input;
if (isVerbose())
printInfo();
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java b/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
index cc2053f..808d340 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
@@ -185,7 +185,6 @@ public class Shell extends ShellOptions {
private Token rootToken;
public final Map<String,Command> commandFactory = new TreeMap<String,Command>();
public final Map<String,Command[]> commandGrouping = new TreeMap<String,Command[]>();
- protected boolean configError = false;
// exit if true
private boolean exit = false;
@@ -215,7 +214,11 @@ public class Shell extends ShellOptions {
this.writer = writer;
}
- // Not for client use
+ /**
+ * Configures the shell using the provided options. Not for client use.
+ *
+ * @return true if the shell was successfully configured, false otherwise.
+ */
public boolean config(String... args) {
CommandLine cl;
@@ -225,9 +228,9 @@ public class Shell extends ShellOptions {
throw new ParseException("Unrecognized arguments: " + cl.getArgList());
if (cl.hasOption(helpOpt.getOpt())) {
- configError = true;
printHelp("shell", SHELL_DESCRIPTION, opts);
- return true;
+ exitCode = 0;
+ return false;
}
setDebugging(cl.hasOption(debugOption.getLongOpt()));
@@ -238,10 +241,10 @@ public class Shell extends ShellOptions {
throw new MissingArgumentException(zooKeeperInstance);
} catch (Exception e) {
- configError = true;
printException(e);
printHelp("shell", SHELL_DESCRIPTION, opts);
- return true;
+ exitCode = 1;
+ return false;
}
// get the options that were parsed
@@ -316,7 +319,8 @@ public class Shell extends ShellOptions {
} catch (Exception e) {
printException(e);
- configError = true;
+ exitCode = 1;
+ return false;
}
// decide whether to execute commands from a file and quit
@@ -373,7 +377,7 @@ public class Shell extends ShellOptions {
for (Command cmd : otherCommands) {
commandFactory.put(cmd.getName(), cmd);
}
- return configError;
+ return true;
}
protected void setInstance(CommandLine cl) {
@@ -408,15 +412,14 @@ public class Shell extends ShellOptions {
public static void main(String args[]) throws IOException {
Shell shell = new Shell();
- shell.config(args);
+ if (!shell.config(args)) {
+ System.exit(shell.getExitCode());
+ }
System.exit(shell.start());
}
public int start() throws IOException {
- if (configError)
- return 1;
-
String input;
if (isVerbose())
printInfo();
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3475_7651b777.diff |
bugs-dot-jar_data_ACCUMULO-1544_0cf2ff72 | ---
BugID: ACCUMULO-1544
Summary: Remove username from initialization
Description: This is an artifact from a brief transition area during the 1.5 development.
We have a flag for the user to set what the root username is, except it's never
used. We should remove both the variable and the flag for it.
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
index 7c27dd8..43fa6cb 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
@@ -337,7 +337,7 @@ public class MiniAccumuloCluster {
if (!initialized) {
// sleep a little bit to let zookeeper come up before calling init, seems to work better
UtilWaitThread.sleep(250);
- Process initProcess = exec(Initialize.class, "--instance-name", config.getInstanceName(), "--password", config.getRootPassword(), "--username", "root");
+ Process initProcess = exec(Initialize.class, "--instance-name", config.getInstanceName(), "--password", config.getRootPassword());
int ret = initProcess.waitFor();
if (ret != 0) {
throw new RuntimeException("Initialize process returned " + ret);
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1544_0cf2ff72.diff |
bugs-dot-jar_data_ACCUMULO-1514_fb25913c | ---
BugID: ACCUMULO-1514
Summary: AccumuloVFSClassloader incorrectly treats folders as folders of jar files
Description: |
Specifying a directory of classes is incorrectly interpreted as a directory of jars in the general.dynamic.classpaths configuration property.
Example: adding a path such as *_$ACCUMULO_HOME/core/target/classes_* gets incorrectly interpreted as *_$ACCUMULO_HOME/core/target/classes/\*_* and evaluates to *_$ACCUMULO_HOME/core/target/classes/org_* and *_$ACCUMULO_HOME/core/target/classes/META-INF_*, but *NOT* to *_$ACCUMULO_HOME/core/target/classes_* as expected.
diff --git a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java
index b1e829a..eb653bc 100644
--- a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java
+++ b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java
@@ -58,9 +58,9 @@ import org.apache.log4j.Logger;
*
*/
public class AccumuloVFSClassLoader {
-
+
public static class AccumuloVFSClassLoaderShutdownThread implements Runnable {
-
+
public void run() {
try {
AccumuloVFSClassLoader.close();
@@ -68,35 +68,35 @@ public class AccumuloVFSClassLoader {
// do nothing, we are shutting down anyway
}
}
-
+
}
-
+
private static List<WeakReference<DefaultFileSystemManager>> vfsInstances = Collections
.synchronizedList(new ArrayList<WeakReference<DefaultFileSystemManager>>());
-
+
public static final String DYNAMIC_CLASSPATH_PROPERTY_NAME = "general.dynamic.classpaths";
-
+
public static final String DEFAULT_DYNAMIC_CLASSPATH_VALUE = "$ACCUMULO_HOME/lib/ext/[^.].*.jar\n";
-
+
public static final String VFS_CLASSLOADER_SYSTEM_CLASSPATH_PROPERTY = "general.vfs.classpaths";
-
+
public static final String VFS_CONTEXT_CLASSPATH_PROPERTY = "general.vfs.context.classpath.";
-
+
public static final String VFS_CACHE_DIR = "general.vfs.cache.dir";
-
+
private static ClassLoader parent = null;
private static volatile ReloadingClassLoader loader = null;
private static final Object lock = new Object();
-
+
private static ContextManager contextManager;
-
+
private static Logger log = Logger.getLogger(AccumuloVFSClassLoader.class);
-
+
static {
// Register the shutdown hook
Runtime.getRuntime().addShutdownHook(new Thread(new AccumuloVFSClassLoaderShutdownThread()));
}
-
+
public synchronized static <U> Class<? extends U> loadClass(String classname, Class<U> extension) throws ClassNotFoundException {
try {
return (Class<? extends U>) getClassLoader().loadClass(classname).asSubclass(extension);
@@ -104,40 +104,45 @@ public class AccumuloVFSClassLoader {
throw new ClassNotFoundException("IO Error loading class " + classname, e);
}
}
-
+
public static Class<?> loadClass(String classname) throws ClassNotFoundException {
return loadClass(classname, Object.class).asSubclass(Object.class);
}
-
+
static FileObject[] resolve(FileSystemManager vfs, String uris) throws FileSystemException {
return resolve(vfs, uris, new ArrayList<FileObject>());
}
-
+
static FileObject[] resolve(FileSystemManager vfs, String uris, ArrayList<FileObject> pathsToMonitor) throws FileSystemException {
if (uris == null)
return new FileObject[0];
-
+
ArrayList<FileObject> classpath = new ArrayList<FileObject>();
-
+
pathsToMonitor.clear();
-
+
for (String path : uris.split(",")) {
-
+
path = path.trim();
-
+
if (path.equals(""))
continue;
-
+
path = AccumuloClassLoader.replaceEnvVars(path, System.getenv());
-
+
FileObject fo = vfs.resolveFile(path);
-
+
switch (fo.getType()) {
case FILE:
- case FOLDER:
classpath.add(fo);
pathsToMonitor.add(fo);
break;
+ case FOLDER:
+ pathsToMonitor.add(fo);
+ for (FileObject child : fo.getChildren()) {
+ classpath.add(child);
+ }
+ break;
case IMAGINARY:
// assume its a pattern
String pattern = fo.getName().getBaseName();
@@ -157,67 +162,67 @@ public class AccumuloVFSClassLoader {
log.warn("ignoring classpath entry " + fo);
break;
}
-
+
}
-
+
return classpath.toArray(new FileObject[classpath.size()]);
}
-
+
private static ReloadingClassLoader createDynamicClassloader(final ClassLoader parent) throws FileSystemException, IOException {
String dynamicCPath = AccumuloClassLoader.getAccumuloString(DYNAMIC_CLASSPATH_PROPERTY_NAME, DEFAULT_DYNAMIC_CLASSPATH_VALUE);
-
+
String envJars = System.getenv("ACCUMULO_XTRAJARS");
if (null != envJars && !envJars.equals(""))
if (dynamicCPath != null && !dynamicCPath.equals(""))
dynamicCPath = dynamicCPath + "," + envJars;
else
dynamicCPath = envJars;
-
+
ReloadingClassLoader wrapper = new ReloadingClassLoader() {
@Override
public ClassLoader getClassLoader() {
return parent;
}
};
-
+
if (dynamicCPath == null || dynamicCPath.equals(""))
return wrapper;
-
+
// TODO monitor time for lib/ext was 1 sec... should this be configurable? - ACCUMULO-1301
return new AccumuloReloadingVFSClassLoader(dynamicCPath, generateVfs(), wrapper, 1000, true);
}
-
+
public static ClassLoader getClassLoader() throws IOException {
ReloadingClassLoader localLoader = loader;
while (null == localLoader) {
synchronized (lock) {
if (null == loader) {
-
+
FileSystemManager vfs = generateVfs();
-
+
// Set up the 2nd tier class loader
if (null == parent) {
parent = AccumuloClassLoader.getClassLoader();
}
-
+
FileObject[] vfsCP = resolve(vfs, AccumuloClassLoader.getAccumuloString(VFS_CLASSLOADER_SYSTEM_CLASSPATH_PROPERTY, ""));
-
+
if (vfsCP.length == 0) {
localLoader = createDynamicClassloader(parent);
loader = localLoader;
return localLoader.getClassLoader();
}
-
+
// Create the Accumulo Context ClassLoader using the DEFAULT_CONTEXT
localLoader = createDynamicClassloader(new VFSClassLoader(vfsCP, vfs, parent));
loader = localLoader;
}
}
}
-
+
return localLoader.getClassLoader();
}
-
+
public static FileSystemManager generateVfs() throws FileSystemException {
DefaultFileSystemManager vfs = new FinalCloseDefaultFileSystemManager();
vfs.addProvider("res", new org.apache.commons.vfs2.provider.res.ResourceFileProvider());
@@ -263,11 +268,11 @@ public class AccumuloVFSClassLoader {
vfsInstances.add(new WeakReference<DefaultFileSystemManager>(vfs));
return vfs;
}
-
+
public interface Printer {
void print(String s);
}
-
+
public static void printClassPath() {
printClassPath(new Printer() {
@Override
@@ -276,28 +281,28 @@ public class AccumuloVFSClassLoader {
}
});
}
-
+
public static void printClassPath(Printer out) {
try {
ClassLoader cl = getClassLoader();
ArrayList<ClassLoader> classloaders = new ArrayList<ClassLoader>();
-
+
while (cl != null) {
classloaders.add(cl);
cl = cl.getParent();
}
-
+
Collections.reverse(classloaders);
-
+
int level = 0;
-
+
for (ClassLoader classLoader : classloaders) {
if (level > 0)
out.print("");
level++;
-
+
String classLoaderDescription;
-
+
switch (level) {
case 1:
classLoaderDescription = level + ": Java System Classloader (loads Java system resources)";
@@ -316,16 +321,16 @@ public class AccumuloVFSClassLoader {
+ AccumuloVFSClassLoader.class.getName() + ")";
break;
}
-
+
if (classLoader instanceof URLClassLoader) {
// If VFS class loader enabled, but no contexts defined.
URLClassLoader ucl = (URLClassLoader) classLoader;
out.print("Level " + classLoaderDescription + " URL classpath items are:");
-
+
for (URL u : ucl.getURLs()) {
out.print("\t" + u.toExternalForm());
}
-
+
} else if (classLoader instanceof VFSClassLoader) {
out.print("Level " + classLoaderDescription + " VFS classpaths items are:");
VFSClassLoader vcl = (VFSClassLoader) classLoader;
@@ -336,12 +341,12 @@ public class AccumuloVFSClassLoader {
out.print("Unknown classloader configuration " + classLoader.getClass());
}
}
-
+
} catch (Throwable t) {
throw new RuntimeException(t);
}
}
-
+
public static synchronized ContextManager getContextManager() throws IOException {
if (contextManager == null) {
getClassLoader();
@@ -356,10 +361,10 @@ public class AccumuloVFSClassLoader {
}
});
}
-
+
return contextManager;
}
-
+
public static void close() {
for (WeakReference<DefaultFileSystemManager> vfsInstance : vfsInstances) {
DefaultFileSystemManager ref = vfsInstance.get();
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1514_fb25913c.diff |
bugs-dot-jar_data_ACCUMULO-3408_81d25bc2 | ---
BugID: ACCUMULO-3408
Summary: display the exact number of tablet servers
Description: "This is a regression of ACCUMULO-1140\n\n"
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/PreciseNumberType.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/PreciseNumberType.java
index 66f97e1..1642fc2 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/PreciseNumberType.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/PreciseNumberType.java
@@ -24,9 +24,16 @@ public class PreciseNumberType extends NumberType<Integer> {
super(warnMin, warnMax, errMin, errMax);
}
- public PreciseNumberType() {}
-
- public static String bigNumber(long big, String[] SUFFIXES, long base) {
- return String.format("%,d", big);
+ @Override
+ public String format(Object obj) {
+ int i = (Integer)obj;
+ String display = String.format("%,d", obj);
+ if (i < errMin || i > errMax)
+ return String.format("<span class='error'>%s</span>", display);
+ if (i < warnMin || i > warnMax)
+ return String.format("<span class='warning'>%s</span>", display);
+ return display;
}
+
+ public PreciseNumberType() {}
}
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/util/celltypes/NumberType.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/util/celltypes/NumberType.java
index d311603..b285727 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/util/celltypes/NumberType.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/util/celltypes/NumberType.java
@@ -20,7 +20,7 @@ import static org.apache.accumulo.core.util.NumUtil.bigNumberForQuantity;
public class NumberType<T extends Number> extends CellType<T> {
- private T warnMin, warnMax, errMin, errMax;
+ protected final T warnMin, warnMax, errMin, errMax;
public NumberType(T warnMin, T warnMax, T errMin, T errMax) {
this.warnMin = warnMin;
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3408_81d25bc2.diff |
bugs-dot-jar_data_ACCUMULO-193_c831e44d | ---
BugID: ACCUMULO-193
Summary: key.followingKey(PartialKey.ROW_COLFAM_COLQUAL_COLVIS) can produce a key
with an invalid COLVIS
Description: Need a new algorithm for calculating the next biggest column visibility,
because tagging \0 to the end creates an invalid column visibility. We might be
able to minimize the timestamp for this (i.e. set timestamp to Long.MIN_VALUE, but
keep column and row elements the same).
diff --git a/src/core/src/main/java/org/apache/accumulo/core/data/Key.java b/src/core/src/main/java/org/apache/accumulo/core/data/Key.java
index 3d1f92d..afab887 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/data/Key.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/data/Key.java
@@ -22,6 +22,8 @@ package org.apache.accumulo.core.data;
*
*/
+import static org.apache.accumulo.core.util.ByteBufferUtil.toBytes;
+
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
@@ -38,8 +40,6 @@ import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils;
-import static org.apache.accumulo.core.util.ByteBufferUtil.toBytes;
-
public class Key implements WritableComparable<Key>, Cloneable {
protected byte[] row;
@@ -444,8 +444,10 @@ public class Key implements WritableComparable<Key>, Cloneable {
}
public static String toPrintableString(byte ba[], int offset, int len, int maxLen) {
- StringBuilder sb = new StringBuilder();
-
+ return appendPrintableString(ba, offset, len, maxLen, new StringBuilder()).toString();
+ }
+
+ public static StringBuilder appendPrintableString(byte ba[], int offset, int len, int maxLen, StringBuilder sb) {
int plen = Math.min(len, maxLen);
for (int i = 0; i < plen; i++) {
@@ -460,26 +462,33 @@ public class Key implements WritableComparable<Key>, Cloneable {
sb.append("... TRUNCATED");
}
- return sb.toString();
+ return sb;
+ }
+
+ private StringBuilder rowColumnStringBuilder() {
+ StringBuilder sb = new StringBuilder();
+ appendPrintableString(row, 0, row.length, Constants.MAX_DATA_TO_PRINT, sb);
+ sb.append(" ");
+ appendPrintableString(colFamily, 0, colFamily.length, Constants.MAX_DATA_TO_PRINT, sb);
+ sb.append(":");
+ appendPrintableString(colQualifier, 0, colQualifier.length, Constants.MAX_DATA_TO_PRINT, sb);
+ sb.append(" [");
+ appendPrintableString(colVisibility, 0, colVisibility.length, Constants.MAX_DATA_TO_PRINT, sb);
+ sb.append("]");
+ return sb;
}
public String toString() {
- String labelString = new ColumnVisibility(colVisibility).toString();
-
- String s = toPrintableString(row, 0, row.length, Constants.MAX_DATA_TO_PRINT) + " "
- + toPrintableString(colFamily, 0, colFamily.length, Constants.MAX_DATA_TO_PRINT) + ":"
- + toPrintableString(colQualifier, 0, colQualifier.length, Constants.MAX_DATA_TO_PRINT) + " " + labelString + " " + Long.toString(timestamp) + " "
- + deleted;
- return s;
+ StringBuilder sb = rowColumnStringBuilder();
+ sb.append(" ");
+ sb.append(Long.toString(timestamp));
+ sb.append(" ");
+ sb.append(deleted);
+ return sb.toString();
}
public String toStringNoTime() {
-
- String labelString = new ColumnVisibility(colVisibility).toString();
-
- String s = new String(row, 0, row.length) + " " + new String(colFamily, 0, colFamily.length) + ":" + new String(colQualifier, 0, colQualifier.length) + " "
- + labelString;
- return s;
+ return rowColumnStringBuilder().toString();
}
public int getLength() {
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-193_c831e44d.diff |
bugs-dot-jar_data_ACCUMULO-3474_cfb832a1 | ---
BugID: ACCUMULO-3474
Summary: ProxyServer ignores value of isDeleted on ColumnUpdate
Description: |-
The ProxyServer ignores the actual boolean value of the isDeleted flag on a ColumnUpdate. If the isDeleted value is set, regardless of the actual boolean value, the ProxyServer marks the update as a delete.
The ProxyServer should be updated to check the value of the flag.
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
index 0fedb1d..f873010 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
@@ -1124,13 +1124,13 @@ public class ProxyServer implements AccumuloProxy.Iface {
if (update.isSetValue())
value = update.getValue();
if (update.isSetTimestamp()) {
- if (update.isSetDeleteCell()) {
+ if (update.isSetDeleteCell() && update.isDeleteCell()) {
m.putDelete(update.getColFamily(), update.getColQualifier(), viz, update.getTimestamp());
} else {
m.put(new Text(update.getColFamily()), new Text(update.getColQualifier()), viz, update.getTimestamp(), new Value(value));
}
} else {
- if (update.isSetDeleteCell()) {
+ if (update.isSetDeleteCell() && update.isDeleteCell()) {
m.putDelete(new Text(update.getColFamily()), new Text(update.getColQualifier()), viz);
} else {
m.put(new Text(update.getColFamily()), new Text(update.getColQualifier()), viz, new Value(value));
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3474_cfb832a1.diff |
bugs-dot-jar_data_ACCUMULO-795_9453bcfa | ---
BugID: ACCUMULO-795
Summary: MockTable doesn't obey useVersions parameter
Description: "The constructor for {{MockTable}} will call {{IteratorUtil.generateInitialTableProperties()}},
and thus set a versioning iterator on itself regardless of whether the useVersion
parameter is set to true or false. \n\nI believe {{MockTable}}'s constructor should
call IteratorUtil.generateInitialTableProperties() only if useVersions is true,
otherwise, it should populate {{settings}} with a new {{TreeMap}}"
diff --git a/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsImpl.java b/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsImpl.java
index c35d7fa..ea4f311 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsImpl.java
@@ -190,11 +190,7 @@ public class TableOperationsImpl extends TableOperationsHelper {
List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableName.getBytes()), ByteBuffer.wrap(timeType.name().getBytes()));
- Map<String,String> opts;
- if (limitVersion) {
- opts = IteratorUtil.generateInitialTableProperties();
- } else
- opts = Collections.emptyMap();
+ Map<String,String> opts = IteratorUtil.generateInitialTableProperties(limitVersion);
try {
doTableOperation(TableOperation.CREATE, args, opts);
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
index f558822..9289608 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
@@ -88,9 +88,9 @@ public class MockTable {
private TimeType timeType;
SortedSet<Text> splits = new TreeSet<Text>();
- MockTable(boolean useVersions, TimeType timeType) {
+ MockTable(boolean limitVersion, TimeType timeType) {
this.timeType = timeType;
- settings = IteratorUtil.generateInitialTableProperties();
+ settings = IteratorUtil.generateInitialTableProperties(limitVersion);
for (Entry<String,String> entry : AccumuloConfiguration.getDefaultConfiguration()) {
String key = entry.getKey();
if (key.startsWith(Property.TABLE_PREFIX.getKey()))
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java b/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java
index 172fa63..9b1ca69 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java
@@ -63,13 +63,22 @@ public class IteratorUtil {
}
- public static Map<String,String> generateInitialTableProperties() {
+ /**
+ * Generate the initial (default) properties for a table
+ * @param limitVersion
+ * include a VersioningIterator at priority 20 that retains a single version of a given K/V pair.
+ * @return A map of Table properties
+ */
+ public static Map<String,String> generateInitialTableProperties(boolean limitVersion) {
TreeMap<String,String> props = new TreeMap<String,String>();
- for (IteratorScope iterScope : IteratorScope.values()) {
- props.put(Property.TABLE_ITERATOR_PREFIX + iterScope.name() + ".vers", "20," + VersioningIterator.class.getName());
- props.put(Property.TABLE_ITERATOR_PREFIX + iterScope.name() + ".vers.opt.maxVersions", "1");
+ if (limitVersion) {
+ for (IteratorScope iterScope : IteratorScope.values()) {
+ props.put(Property.TABLE_ITERATOR_PREFIX + iterScope.name() + ".vers", "20," + VersioningIterator.class.getName());
+ props.put(Property.TABLE_ITERATOR_PREFIX + iterScope.name() + ".vers.opt.maxVersions", "1");
+ }
}
+
return props;
}
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/CreateTableCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/CreateTableCommand.java
index 83829a9..f2495cc 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/CreateTableCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/CreateTableCommand.java
@@ -110,7 +110,7 @@ public class CreateTableCommand extends Command {
// context
if (cl.hasOption(createTableNoDefaultIters.getOpt())) {
- for (String key : IteratorUtil.generateInitialTableProperties().keySet()) {
+ for (String key : IteratorUtil.generateInitialTableProperties(true).keySet()) {
shellState.getConnector().tableOperations().removeProperty(tableName, key);
}
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-795_9453bcfa.diff |
bugs-dot-jar_data_ACCUMULO-1986_adee0f12 | ---
BugID: ACCUMULO-1986
Summary: Validity checks missing for readFields and Thrift deserialization
Description: Classes in o.a.a.core.data (and potentially elsewhere) that support construction
from a Thrift object and/or population from a {{DataInput}} (via a {{readFields()}}
method) often lack data validity checks that the classes' constructors enforce.
The missing checks make it possible for an attacker to create invalid objects by
manipulating the bytes being read. The situation is analogous to the need to check
objects deserialized from their Java serialized form within the {{readObject()}}
method.
diff --git a/src/core/src/main/java/org/apache/accumulo/core/data/Key.java b/src/core/src/main/java/org/apache/accumulo/core/data/Key.java
index cfb0b5c..b6cfad7 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/data/Key.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/data/Key.java
@@ -291,6 +291,19 @@ public class Key implements WritableComparable<Key>, Cloneable {
this.colVisibility = toBytes(tkey.colVisibility);
this.timestamp = tkey.timestamp;
this.deleted = false;
+
+ if (row == null) {
+ throw new IllegalArgumentException("null row");
+ }
+ if (colFamily == null) {
+ throw new IllegalArgumentException("null column family");
+ }
+ if (colQualifier == null) {
+ throw new IllegalArgumentException("null column qualifier");
+ }
+ if (colVisibility == null) {
+ throw new IllegalArgumentException("null column visibility");
+ }
}
/**
diff --git a/src/core/src/main/java/org/apache/accumulo/core/data/Mutation.java b/src/core/src/main/java/org/apache/accumulo/core/data/Mutation.java
index 3979da9..6b2c09f 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/data/Mutation.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/data/Mutation.java
@@ -187,6 +187,13 @@ public class Mutation implements Writable {
this.data = ByteBufferUtil.toBytes(tmutation.data);
this.entries = tmutation.entries;
this.values = ByteBufferUtil.toBytesList(tmutation.values);
+
+ if (this.row == null) {
+ throw new IllegalArgumentException("null row");
+ }
+ if (this.data == null) {
+ throw new IllegalArgumentException("null serialized data");
+ }
}
public Mutation(Mutation m) {
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1986_adee0f12.diff |
bugs-dot-jar_data_ACCUMULO-776_dc9f23d9 | ---
BugID: ACCUMULO-776
Summary: TimestampFilter should serialize start and end as longs in the IteratorSetting
Description: "Although the TimestampFilter supports using longs to set the start or
end timestamp, it formats them as strings using SimpleDateFormat when storing or
retrieving them in the IteratorSetting.\n\nThis results in exceptions when the timestamps
being used aren't able to be formatted as _yyyyMMddHHmmssz_. For example, try {{setEnd(253402300800001,true)}}\n\nInstead,
{{setStart()}} and {{setEnd()}} could just as easily use {{String.valueOf(long i)}}
to store the values, and {{init()}} could retrieve them using {{Long.valueOf(String
s)}}. "
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/TimestampFilter.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/TimestampFilter.java
index 2dbfe66..49f0146 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/TimestampFilter.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/TimestampFilter.java
@@ -17,8 +17,8 @@
package org.apache.accumulo.core.iterators.user;
import java.io.IOException;
+import java.text.ParseException;
import java.text.SimpleDateFormat;
-import java.util.Date;
import java.util.Map;
import java.util.TimeZone;
@@ -33,6 +33,7 @@ import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
* A Filter that matches entries whose timestamps fall within a range.
*/
public class TimestampFilter extends Filter {
+ private static final String LONG_PREFIX = "LONG";
private final SimpleDateFormat dateParser = initDateParser();
private static SimpleDateFormat initDateParser() {
@@ -86,10 +87,20 @@ public class TimestampFilter extends Filter {
throw new IllegalArgumentException("must have either start or end for " + TimestampFilter.class.getName());
try {
- if (hasStart)
- start = dateParser.parse(options.get(START)).getTime();
- if (hasEnd)
- end = dateParser.parse(options.get(END)).getTime();
+ if (hasStart) {
+ String s = options.get(START);
+ if (s.startsWith(LONG_PREFIX))
+ start = Long.valueOf(s.substring(LONG_PREFIX.length()));
+ else
+ start = dateParser.parse(s).getTime();
+ }
+ if (hasEnd) {
+ String s = options.get(END);
+ if (s.startsWith(LONG_PREFIX))
+ end = Long.valueOf(s.substring(LONG_PREFIX.length()));
+ else
+ end = dateParser.parse(s).getTime();
+ }
} catch (Exception e) {
throw new IllegalArgumentException(e);
}
@@ -116,8 +127,8 @@ public class TimestampFilter extends Filter {
IteratorOptions io = super.describeOptions();
io.setName("tsfilter");
io.setDescription("TimestampFilter displays entries with timestamps between specified values");
- io.addNamedOption("start", "start timestamp (yyyyMMddHHmmssz)");
- io.addNamedOption("end", "end timestamp (yyyyMMddHHmmssz)");
+ io.addNamedOption("start", "start timestamp (yyyyMMddHHmmssz or LONG<longstring>)");
+ io.addNamedOption("end", "end timestamp (yyyyMMddHHmmssz or LONG<longstring>)");
io.addNamedOption("startInclusive", "true or false");
io.addNamedOption("endInclusive", "true or false");
return io;
@@ -126,11 +137,27 @@ public class TimestampFilter extends Filter {
@Override
public boolean validateOptions(Map<String,String> options) {
super.validateOptions(options);
+ boolean hasStart = false;
+ boolean hasEnd = false;
try {
- if (options.containsKey(START))
- dateParser.parse(options.get(START));
- if (options.containsKey(END))
- dateParser.parse(options.get(END));
+ if (options.containsKey(START)) {
+ hasStart = true;
+ String s = options.get(START);
+ if (s.startsWith(LONG_PREFIX))
+ Long.valueOf(s.substring(LONG_PREFIX.length()));
+ else
+ dateParser.parse(s);
+ }
+ if (options.containsKey(END)) {
+ hasEnd = true;
+ String s = options.get(END);
+ if (s.startsWith(LONG_PREFIX))
+ Long.valueOf(s.substring(LONG_PREFIX.length()));
+ else
+ dateParser.parse(s);
+ }
+ if (!hasStart && !hasEnd)
+ return false;
if (options.get(START_INCL) != null)
Boolean.parseBoolean(options.get(START_INCL));
if (options.get(END_INCL) != null)
@@ -185,8 +212,13 @@ public class TimestampFilter extends Filter {
* boolean indicating whether the start is inclusive
*/
public static void setStart(IteratorSetting is, String start, boolean startInclusive) {
- is.addOption(START, start);
- is.addOption(START_INCL, Boolean.toString(startInclusive));
+ SimpleDateFormat dateParser = initDateParser();
+ try {
+ long startTS = dateParser.parse(start).getTime();
+ setStart(is, startTS, startInclusive);
+ } catch (ParseException e) {
+ throw new IllegalArgumentException("couldn't parse " + start);
+ }
}
/**
@@ -200,8 +232,13 @@ public class TimestampFilter extends Filter {
* boolean indicating whether the end is inclusive
*/
public static void setEnd(IteratorSetting is, String end, boolean endInclusive) {
- is.addOption(END, end);
- is.addOption(END_INCL, Boolean.toString(endInclusive));
+ SimpleDateFormat dateParser = initDateParser();
+ try {
+ long endTS = dateParser.parse(end).getTime();
+ setEnd(is, endTS, endInclusive);
+ } catch (ParseException e) {
+ throw new IllegalArgumentException("couldn't parse " + end);
+ }
}
/**
@@ -248,8 +285,7 @@ public class TimestampFilter extends Filter {
* boolean indicating whether the start is inclusive
*/
public static void setStart(IteratorSetting is, long start, boolean startInclusive) {
- SimpleDateFormat dateParser = initDateParser();
- is.addOption(START, dateParser.format(new Date(start)));
+ is.addOption(START, LONG_PREFIX + Long.toString(start));
is.addOption(START_INCL, Boolean.toString(startInclusive));
}
@@ -264,8 +300,7 @@ public class TimestampFilter extends Filter {
* boolean indicating whether the end is inclusive
*/
public static void setEnd(IteratorSetting is, long end, boolean endInclusive) {
- SimpleDateFormat dateParser = initDateParser();
- is.addOption(END, dateParser.format(new Date(end)));
+ is.addOption(END, LONG_PREFIX + Long.toString(end));
is.addOption(END_INCL, Boolean.toString(endInclusive));
}
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-776_dc9f23d9.diff |
bugs-dot-jar_data_ACCUMULO-3718_73ce9cfb | ---
BugID: ACCUMULO-3718
Summary: not possible to create a Mutation object from scala w/o some extra helper
code
Description: "issue: \n\nit's not possible to create a Mutation object from scala
without employing a standalone java jar wrapper. the preferred method for creating
the object has you do it in two stages: create with table row, then employ Mutation.put()
to populate the object with the actual mutation data. when you do this in scala,
you get a\n\njava.lang.IllegalStateException: Can not add to mutation after serializing
it at org.apache.accumulo.core.data.Mutation.put(Mutation.java:168) at org.apache.accumulo.core.data.Mutation.put(Mutation.java:163)
at org.apache.accumulo.core.data.Mutation.put(Mutation.java:211)\n\nerror. I *think*
this has something to do with the byte array going out of scope in Scala but somehow
not in Java. If you concat the operations (constuctor().put(data, data, ...) you
don't run into the error, but scala sees a Unit return type, so you can't actually
add the mutation to a BatchWriter. The only way I was able to get around this was
to create a stand-alone jar with a method that created then returned a populated
mutation object. \n\nI wasn't sure whether or not to call this a bug or an enhancement.
given that you probably want Accumulo to play nice with Scala I decided to call
it a bug. \n\nbelow is a link to the stack overflow thread I created whilst figuring
all this out: \n\nhttp://stackoverflow.com/questions/29497547/odd-error-when-populating-accumulo-1-6-mutation-object-via-spark-notebook/29527189#29527189\n\n\n"
diff --git a/core/src/main/java/org/apache/accumulo/core/data/Mutation.java b/core/src/main/java/org/apache/accumulo/core/data/Mutation.java
index 0861cc4..81ad531 100644
--- a/core/src/main/java/org/apache/accumulo/core/data/Mutation.java
+++ b/core/src/main/java/org/apache/accumulo/core/data/Mutation.java
@@ -191,6 +191,20 @@ public class Mutation implements Writable {
}
}
+ /* This is so hashCode & equals can be called without changing this object.
+ *
+ * It will return a copy of the current data buffer if serialized has not been
+ * called previously. Otherwise, this.data will be returned since the buffer is
+ * null and will not change.
+ */
+ private byte[] serializedSnapshot() {
+ if (buffer != null) {
+ return buffer.toArray();
+ } else {
+ return this.data;
+ }
+ }
+
/**
* @since 1.5.0
*/
@@ -691,13 +705,13 @@ public class Mutation implements Writable {
@Override
public int hashCode() {
- return toThrift().hashCode();
+ return toThrift(false).hashCode();
}
public boolean equals(Mutation m) {
- serialize();
- m.serialize();
- if (Arrays.equals(row, m.row) && entries == m.entries && Arrays.equals(data, m.data)) {
+ byte[] myData = serializedSnapshot();
+ byte[] otherData = m.serializedSnapshot();
+ if (Arrays.equals(row, m.row) && entries == m.entries && Arrays.equals(myData, otherData)) {
if (values == null && m.values == null)
return true;
@@ -716,7 +730,17 @@ public class Mutation implements Writable {
}
public TMutation toThrift() {
- serialize();
+ return toThrift(true);
+ }
+
+ private TMutation toThrift(boolean serialize) {
+ byte[] data;
+ if (serialize) {
+ this.serialize();
+ data = this.data;
+ } else {
+ data = serializedSnapshot();
+ }
return new TMutation(java.nio.ByteBuffer.wrap(row), java.nio.ByteBuffer.wrap(data), ByteBufferUtil.toByteBuffers(values), entries);
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3718_73ce9cfb.diff |
bugs-dot-jar_data_ACCUMULO-1730_872b6db3 | ---
BugID: ACCUMULO-1730
Summary: ColumnVisibility parse tree nodes do not have correct location offsets for
AND and OR nodes
Description: |-
Trying to do some transformations on visibility strings and running into issues working with the parse tree:
Clojure 1.5.1
user=> (import [org.apache.accumulo.core.security ColumnVisibility])
org.apache.accumulo.core.security.ColumnVisibility
user=> (def vis (ColumnVisibility. "(W)|(U|V)"))
#'user/vis
user=> (.getTermStart (first (.getChildren (.getParseTree vis))))
1
user=> (.getTermEnd (first (.getChildren (.getParseTree vis))))
2
user=> (.getTermStart (second (.getChildren (.getParseTree vis))))
0
user=> (.getTermEnd (second (.getChildren (.getParseTree vis))))
8
Shouldn't those last two be 5 and 8?
diff --git a/core/src/main/java/org/apache/accumulo/core/security/ColumnVisibility.java b/core/src/main/java/org/apache/accumulo/core/security/ColumnVisibility.java
index 55763bc..f9c8382 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/ColumnVisibility.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/ColumnVisibility.java
@@ -302,6 +302,7 @@ public class ColumnVisibility {
result.add(c);
else
result.add(child);
+ result.end = index - 1;
return result;
}
case '"': {
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1730_872b6db3.diff |
bugs-dot-jar_data_ACCUMULO-1044_9396979b | ---
BugID: ACCUMULO-1044
Summary: bulk imported files showing up in metadata after bulk import fails
Description: |
Bulk import fails. The file is moved to the failures directory.
But references in the !METADATA table remain.
diff --git a/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java b/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
index bd19d1f..463b7b0 100644
--- a/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
+++ b/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
@@ -34,6 +34,7 @@ import org.apache.accumulo.core.zookeeper.ZooUtil;
import org.apache.accumulo.server.client.HdfsZooInstance;
import org.apache.accumulo.server.zookeeper.ZooCache;
import org.apache.accumulo.server.zookeeper.ZooLock;
+import org.apache.accumulo.server.zookeeper.TransactionWatcher.ZooArbitrator;
import org.apache.hadoop.io.Text;
import org.apache.log4j.Logger;
@@ -72,6 +73,22 @@ public class MetadataConstraints implements Constraint {
return false;
}
+ static private ArrayList<Short> addViolation(ArrayList<Short> lst, int violation) {
+ if (lst == null)
+ lst = new ArrayList<Short>();
+ lst.add((short)violation);
+ return lst;
+ }
+
+ static private ArrayList<Short> addIfNotPresent(ArrayList<Short> lst, int intViolation) {
+ if (lst == null)
+ return addViolation(lst, intViolation);
+ short violation = (short)intViolation;
+ if (!lst.contains(violation))
+ return addViolation(lst, intViolation);
+ return lst;
+ }
+
public List<Short> check(Environment env, Mutation mutation) {
ArrayList<Short> violations = null;
@@ -96,44 +113,30 @@ public class MetadataConstraints implements Constraint {
break;
if (!validTableNameChars[0xff & b]) {
- if (violations == null)
- violations = new ArrayList<Short>();
- if (!violations.contains((short) 4))
- violations.add((short) 4);
+ violations = addIfNotPresent(violations, 4);
}
}
if (!containsSemiC) {
// see if last row char is <
if (row.length == 0 || row[row.length - 1] != '<') {
- if (violations == null)
- violations = new ArrayList<Short>();
- if (!violations.contains((short) 4))
- violations.add((short) 4);
+ violations = addIfNotPresent(violations, 4);
}
} else {
if (row.length == 0) {
- if (violations == null)
- violations = new ArrayList<Short>();
- if (!violations.contains((short) 4))
- violations.add((short) 4);
+ violations = addIfNotPresent(violations, 4);
}
}
if (row.length > 0 && row[0] == '!') {
if (row.length < 3 || row[1] != '0' || (row[2] != '<' && row[2] != ';')) {
- if (violations == null)
- violations = new ArrayList<Short>();
- if (!violations.contains((short) 4))
- violations.add((short) 4);
+ violations = addIfNotPresent(violations, 4);
}
}
// ensure row is not less than Constants.METADATA_TABLE_ID
if (new Text(row).compareTo(new Text(Constants.METADATA_TABLE_ID)) < 0) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 5);
+ violations = addViolation(violations, 5);
}
for (ColumnUpdate columnUpdate : colUpdates) {
@@ -141,17 +144,13 @@ public class MetadataConstraints implements Constraint {
if (columnUpdate.isDeleted()) {
if (!isValidColumn(columnUpdate)) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 2);
+ violations = addViolation(violations, 2);
}
continue;
}
if (columnUpdate.getValue().length == 0 && !columnFamily.equals(Constants.METADATA_SCANFILE_COLUMN_FAMILY)) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 6);
+ violations = addViolation(violations, 6);
}
if (columnFamily.equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
@@ -159,26 +158,49 @@ public class MetadataConstraints implements Constraint {
DataFileValue dfv = new DataFileValue(columnUpdate.getValue());
if (dfv.getSize() < 0 || dfv.getNumEntries() < 0) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 1);
+ violations = addViolation(violations, 1);
}
} catch (NumberFormatException nfe) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 1);
+ violations = addViolation(violations, 1);
} catch (ArrayIndexOutOfBoundsException aiooe) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 1);
+ violations = addViolation(violations, 1);
}
} else if (columnFamily.equals(Constants.METADATA_SCANFILE_COLUMN_FAMILY)) {
+ } else if (columnFamily.equals(Constants.METADATA_BULKFILE_COLUMN_FAMILY)) {
+ if (!columnUpdate.isDeleted()) {
+ // splits, which also write the time reference, are allowed to write this reference even when
+ // the transaction is not running because the other half of the tablet is holding a reference
+ // to the file.
+ boolean isSplitMutation = false;
+ // When a tablet is assigned, it re-writes the metadata. It should probably only update the location information,
+ // but it writes everything. We allow it to re-write the bulk information if it is setting the location.
+ // See ACCUMULO-1230.
+ boolean isLocationMutation = false;
+ for (ColumnUpdate update : mutation.getUpdates()) {
+ if (new ColumnFQ(update).equals(Constants.METADATA_TIME_COLUMN)) {
+ isSplitMutation = true;
+ }
+ if (update.getColumnFamily().equals(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY)) {
+ isLocationMutation = true;
+ }
+ }
+
+ if (!isSplitMutation && !isLocationMutation) {
+ String tidString = new String(columnUpdate.getValue());
+ long tid = Long.parseLong(tidString);
+ try {
+ if (!new ZooArbitrator().transactionAlive(Constants.BULK_ARBITRATOR_TYPE, tid)) {
+ violations = addViolation(violations, 8);
+ }
+ } catch (Exception ex) {
+ violations = addViolation(violations, 8);
+ }
+ }
+ }
} else {
if (!isValidColumn(columnUpdate)) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 2);
+ violations = addViolation(violations, 2);
} else if (new ColumnFQ(columnUpdate).equals(Constants.METADATA_PREV_ROW_COLUMN) && columnUpdate.getValue().length > 0
&& (violations == null || !violations.contains((short) 4))) {
KeyExtent ke = new KeyExtent(new Text(mutation.getRow()), (Text) null);
@@ -188,9 +210,7 @@ public class MetadataConstraints implements Constraint {
boolean prevEndRowLessThanEndRow = per == null || ke.getEndRow() == null || per.compareTo(ke.getEndRow()) < 0;
if (!prevEndRowLessThanEndRow) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 3);
+ violations = addViolation(violations, 3);
}
} else if (new ColumnFQ(columnUpdate).equals(Constants.METADATA_LOCK_COLUMN)) {
if (zooCache == null) {
@@ -211,9 +231,7 @@ public class MetadataConstraints implements Constraint {
}
if (!lockHeld) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 7);
+ violations = addViolation(violations, 7);
}
}
@@ -221,7 +239,10 @@ public class MetadataConstraints implements Constraint {
}
if (violations != null) {
- log.debug(" violating metadata mutation : " + mutation);
+ log.debug("violating metadata mutation : " + new String(mutation.getRow()));
+ for (ColumnUpdate update : mutation.getUpdates()) {
+ log.debug(" update: " + new String(update.getColumnFamily()) + ":" + new String(update.getColumnQualifier()) + " value " + (update.isDeleted() ? "[delete]" : new String(update.getValue())));
+ }
}
return violations;
@@ -243,6 +264,8 @@ public class MetadataConstraints implements Constraint {
return "Empty values are not allowed for any " + Constants.METADATA_TABLE_NAME + " column";
case 7:
return "Lock not held in zookeeper by writer";
+ case 8:
+ return "Bulk load transaction no longer running";
}
return null;
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1044_9396979b.diff |
bugs-dot-jar_data_ACCUMULO-1358_6c565dfb | ---
BugID: ACCUMULO-1358
Summary: Shell's setiter is not informative when using a bad class name
Description: In the shell, I did setiter using a class that wasn't found. Rather then
a message about it not being found, I just get told that I have an invalid argument.
Even turning on debug, I had to use the stack trace to figure out why it was erroring.
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/SetIterCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/SetIterCommand.java
index 4c6d2d2..26e38e6 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/SetIterCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/SetIterCommand.java
@@ -175,14 +175,23 @@ public class SetIterCommand extends Command {
clazz = classloader.loadClass(className).asSubclass(OptionDescriber.class);
skvi = clazz.newInstance();
} catch (ClassNotFoundException e) {
- throw new IllegalArgumentException(e.getMessage());
+ StringBuilder msg = new StringBuilder("Unable to load ").append(className);
+ if (className.indexOf('.') < 0) {
+ msg.append("; did you use a fully qualified package name?");
+ } else {
+ msg.append("; class not found.");
+ }
+ throw new ShellCommandException(ErrorCode.INITIALIZATION_FAILURE, msg.toString());
} catch (InstantiationException e) {
throw new IllegalArgumentException(e.getMessage());
} catch (IllegalAccessException e) {
throw new IllegalArgumentException(e.getMessage());
} catch (ClassCastException e) {
- throw new ShellCommandException(ErrorCode.INITIALIZATION_FAILURE, "Unable to load " + className + " as type " + OptionDescriber.class.getName()
- + "; configure with 'config' instead");
+ StringBuilder msg = new StringBuilder("Loaded ");
+ msg.append(className).append(" but it does not implement ");
+ msg.append(OptionDescriber.class.getSimpleName());
+ msg.append("; use 'config -s' instead.");
+ throw new ShellCommandException(ErrorCode.INITIALIZATION_FAILURE, msg.toString());
}
final IteratorOptions itopts = skvi.describeOptions();
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1358_6c565dfb.diff |
bugs-dot-jar_data_ACCUMULO-217_46f62443 | ---
BugID: ACCUMULO-217
Summary: MockAccumulo doesn't throw informative errors
Description: Users are unable to tell if an error has occurred and whether it is due
to unimplemented features in MockAccumulo.
diff --git a/src/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsHelper.java b/src/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsHelper.java
index 96a31e7..31f7405 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsHelper.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsHelper.java
@@ -48,6 +48,8 @@ public abstract class TableOperationsHelper implements TableOperations {
@Override
public void removeIterator(String tableName, String name, EnumSet<IteratorScope> scopes) throws AccumuloSecurityException, AccumuloException,
TableNotFoundException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(null, tableName, null);
Map<String,String> copy = new TreeMap<String,String>();
for (Entry<String,String> property : this.getProperties(tableName)) {
copy.put(property.getKey(), property.getValue());
@@ -64,6 +66,8 @@ public abstract class TableOperationsHelper implements TableOperations {
@Override
public IteratorSetting getIteratorSetting(String tableName, String name, IteratorScope scope) throws AccumuloSecurityException, AccumuloException,
TableNotFoundException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(null, tableName, null);
int priority = -1;
String classname = null;
Map<String,String> settings = new HashMap<String,String>();
@@ -90,6 +94,8 @@ public abstract class TableOperationsHelper implements TableOperations {
@Override
public Set<String> listIterators(String tableName) throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(null, tableName, null);
Set<String> result = new HashSet<String>();
Set<String> lifecycles = new HashSet<String>();
for (IteratorScope scope : IteratorScope.values())
@@ -107,6 +113,8 @@ public abstract class TableOperationsHelper implements TableOperations {
@Override
public void checkIteratorConflicts(String tableName, IteratorSetting setting) throws AccumuloException, TableNotFoundException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(null, tableName, null);
for (IteratorScope scope : setting.getScopes()) {
String scopeStr = String.format("%s%s", Property.TABLE_ITERATOR_PREFIX, scope.name().toLowerCase());
String nameStr = String.format("%s.%s", scopeStr, setting.getName());
diff --git a/src/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java b/src/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
index 4063b76..1b2a3d0 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
@@ -81,6 +81,8 @@ public class MockTableOperations extends TableOperationsHelper {
if (!tableName.matches(Constants.VALID_TABLE_NAME_REGEX)) {
throw new IllegalArgumentException();
}
+ if (exists(tableName))
+ throw new TableExistsException(tableName, tableName, "");
acu.createTable(username, tableName, versioningIter, timeType);
}
@@ -90,30 +92,42 @@ public class MockTableOperations extends TableOperationsHelper {
@Override
public void addAggregators(String tableName, List<? extends PerColumnIteratorConfig> aggregators) throws AccumuloSecurityException, TableNotFoundException,
AccumuloException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(tableName, tableName, "");
acu.addAggregators(tableName, aggregators);
}
@Override
- public void addSplits(String tableName, SortedSet<Text> partitionKeys) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {}
+ public void addSplits(String tableName, SortedSet<Text> partitionKeys) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
+ throw new NotImplementedException();
+ }
@Override
- public Collection<Text> getSplits(String tableName) {
+ public Collection<Text> getSplits(String tableName) throws TableNotFoundException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(tableName, tableName, "");
return Collections.emptyList();
}
@Override
- public Collection<Text> getSplits(String tableName, int maxSplits) {
- return Collections.emptyList();
+ public Collection<Text> getSplits(String tableName, int maxSplits) throws TableNotFoundException {
+ return getSplits(tableName);
}
@Override
public void delete(String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(tableName, tableName, "");
acu.tables.remove(tableName);
}
@Override
public void rename(String oldTableName, String newTableName) throws AccumuloSecurityException, TableNotFoundException, AccumuloException,
TableExistsException {
+ if (!exists(oldTableName))
+ throw new TableNotFoundException(oldTableName, oldTableName, "");
+ if (exists(newTableName))
+ throw new TableExistsException(newTableName, newTableName, "");
MockTable t = acu.tables.remove(oldTableName);
acu.tables.put(newTableName, t);
}
@@ -133,15 +147,19 @@ public class MockTableOperations extends TableOperationsHelper {
@Override
public Iterable<Entry<String,String>> getProperties(String tableName) throws TableNotFoundException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(tableName, tableName, "");
return acu.tables.get(tableName).settings.entrySet();
}
@Override
- public void setLocalityGroups(String tableName, Map<String,Set<Text>> groups) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {}
+ public void setLocalityGroups(String tableName, Map<String,Set<Text>> groups) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+ throw new NotImplementedException();
+ }
@Override
public Map<String,Set<Text>> getLocalityGroups(String tableName) throws AccumuloException, TableNotFoundException {
- return null;
+ throw new NotImplementedException();
}
@Override
@@ -163,13 +181,17 @@ public class MockTableOperations extends TableOperationsHelper {
}
@Override
- public void offline(String tableName) throws AccumuloSecurityException, AccumuloException {}
+ public void offline(String tableName) throws AccumuloSecurityException, AccumuloException {
+ throw new NotImplementedException();
+ }
@Override
public void online(String tableName) throws AccumuloSecurityException, AccumuloException {}
@Override
- public void clearLocatorCache(String tableName) throws TableNotFoundException {}
+ public void clearLocatorCache(String tableName) throws TableNotFoundException {
+ throw new NotImplementedException();
+ }
@Override
public Map<String,String> tableIdMap() {
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-217_46f62443.diff |
bugs-dot-jar_data_ACCUMULO-1183_cfbf5999 | ---
BugID: ACCUMULO-1183
Summary: ProxyServer does not set column information on BatchScanner
Description: The createScanner method uses the options from the thrift request to
call fetchColumn() and fetchColumnFamily(). The createBatchScanner should be doing
have the same feature, though the statements are absent from the code.
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
index 911d187..167cecc 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
@@ -819,7 +819,17 @@ public class ProxyServer implements AccumuloProxy.Iface {
}
}
scanner.setRanges(ranges);
+
+ if (opts.columns != null) {
+ for (ScanColumn col : opts.columns) {
+ if (col.isSetColQualifier())
+ scanner.fetchColumn(ByteBufferUtil.toText(col.colFamily), ByteBufferUtil.toText(col.colQualifier));
+ else
+ scanner.fetchColumnFamily(ByteBufferUtil.toText(col.colFamily));
+ }
+ }
}
+
UUID uuid = UUID.randomUUID();
ScannerPlusIterator spi = new ScannerPlusIterator();
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1183_cfbf5999.diff |
bugs-dot-jar_data_ACCUMULO-907_4aeaeb2a | ---
BugID: ACCUMULO-907
Summary: stacking combiners produces a strange result
Description: |+
Paste the following into your shell:
{noformat}
deletetable test
createtable test
setiter -t test -p 16 -scan -n test_1 -class org.apache.accumulo.core.iterators.user.SummingCombiner
count:a
STRING
setiter -t test -p 17 -scan -n test_2 -class org.apache.accumulo.core.iterators.user.SummingCombiner
count:a
STRING
setiter -t test -p 18 -scan -n test_3 -class org.apache.accumulo.core.iterators.user.SummingCombiner
count:a
STRING
setiter -t test -p 10 -scan -n test_4 -class org.apache.accumulo.core.iterators.user.SummingCombiner
count
STRING
insert row count a 1
insert row count a 1
insert row count b 1
insert row count b 1
insert row count b 1
insert row count c 1
scan
{noformat}
I expect:
{noformat}
row count:a [] 2
row count:b [] 3
row count:c [] 1
{noformat}
But instead, I get this:
{noformat}
row count:a [] 12
{noformat}
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java b/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java
index 6e72073..584eb14 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java
@@ -63,7 +63,7 @@ public abstract class Combiner extends WrappingIterator implements OptionDescrib
*/
public ValueIterator(SortedKeyValueIterator<Key,Value> source) {
this.source = source;
- topKey = source.getTopKey();
+ topKey = new Key(source.getTopKey());
hasNext = _hasNext();
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-907_4aeaeb2a.diff |
bugs-dot-jar_data_ACCUMULO-412_be2fdba7 | ---
BugID: ACCUMULO-412
Summary: importdirectory failing on split table
Description: 'bulk import for the wikisearch example isn''t working properly: files
are not being assigned to partitions if there are splits.'
diff --git a/src/core/src/main/java/org/apache/accumulo/core/conf/Property.java b/src/core/src/main/java/org/apache/accumulo/core/conf/Property.java
index 4f95e1a..83283ac 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/conf/Property.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/conf/Property.java
@@ -65,7 +65,6 @@ public enum Property {
MASTER_RECOVERY_POOL("master.recovery.pool", "recovery", PropertyType.STRING, "Priority queue to use for log recovery map/reduce jobs."),
MASTER_RECOVERY_SORT_MAPREDUCE("master.recovery.sort.mapreduce", "false", PropertyType.BOOLEAN,
"If true, use map/reduce to sort write-ahead logs during recovery"),
- MASTER_BULK_SERVERS("master.bulk.server.max", "4", PropertyType.COUNT, "The number of servers to use during a bulk load"),
MASTER_BULK_RETRIES("master.bulk.retries", "3", PropertyType.COUNT, "The number of attempts to bulk-load a file before giving up."),
MASTER_BULK_THREADPOOL_SIZE("master.bulk.threadpool.size", "5", PropertyType.COUNT, "The number of threads to use when coordinating a bulk-import."),
MASTER_MINTHREADS("master.server.threads.minimum", "2", PropertyType.COUNT, "The minimum number of threads to use to handle incoming requests."),
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/Filter.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/Filter.java
index 94daf03..a9ed76c 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/Filter.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/Filter.java
@@ -69,7 +69,7 @@ public abstract class Filter extends WrappingIterator implements OptionDescriber
* Iterates over the source until an acceptable key/value pair is found.
*/
protected void findTop() {
- while (getSource().hasTop() && (negate == accept(getSource().getTopKey(), getSource().getTopValue()))) {
+ while (getSource().hasTop() && !getSource().getTopKey().isDeleted() && (negate == accept(getSource().getTopKey(), getSource().getTopValue()))) {
try {
getSource().next();
} catch (IOException e) {
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/SortedKeyValueIterator.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/SortedKeyValueIterator.java
index 8bbf18a..edeaa1d 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/SortedKeyValueIterator.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/SortedKeyValueIterator.java
@@ -59,7 +59,9 @@ public interface SortedKeyValueIterator<K extends WritableComparable<?>,V extend
boolean hasTop();
/**
- * Advances to the next K,V pair.
+ * Advances to the next K,V pair. Note that in minor compaction scope and in non-full major compaction scopes the iterator may see deletion entries. These
+ * entries should be preserved by all iterators except ones that are strictly scan-time iterators that will never be configured for the minc or majc scopes.
+ * Deletion entries are only removed during full major compactions.
*
* @throws IOException
* if an I/O error occurs.
@@ -88,7 +90,9 @@ public interface SortedKeyValueIterator<K extends WritableComparable<?>,V extend
void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException;
/**
- * Returns top key. Can be called 0 or more times without affecting behavior of next() or hasTop().
+ * Returns top key. Can be called 0 or more times without affecting behavior of next() or hasTop(). Note that in minor compaction scope and in non-full major
+ * compaction scopes the iterator may see deletion entries. These entries should be preserved by all iterators except ones that are strictly scan-time
+ * iterators that will never be configured for the minc or majc scopes. Deletion entries are only removed during full major compactions.
*
* @return <tt>K</tt>
* @exception IllegalStateException
diff --git a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedMapper.java b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedMapper.java
index 5e82a7d..bb4ae64 100644
--- a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedMapper.java
+++ b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedMapper.java
@@ -42,14 +42,13 @@ import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.log4j.Logger;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
public class WikipediaPartitionedMapper extends Mapper<Text,Article,Text,Mutation> {
- private static final Logger log = Logger.getLogger(WikipediaPartitionedMapper.class);
+ // private static final Logger log = Logger.getLogger(WikipediaPartitionedMapper.class);
public final static Charset UTF8 = Charset.forName("UTF-8");
public static final String DOCUMENT_COLUMN_FAMILY = "d";
diff --git a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitioner.java b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitioner.java
index 82af9fd..3507108 100644
--- a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitioner.java
+++ b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitioner.java
@@ -23,40 +23,21 @@ package org.apache.accumulo.examples.wikisearch.ingest;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
-import java.io.StringReader;
import java.nio.charset.Charset;
-import java.util.HashSet;
-import java.util.IllegalFormatException;
-import java.util.Map.Entry;
-import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.accumulo.examples.wikisearch.ingest.ArticleExtractor.Article;
import org.apache.accumulo.examples.wikisearch.ingest.WikipediaInputFormat.WikipediaInputSplit;
-import org.apache.accumulo.examples.wikisearch.normalizer.LcNoDiacriticsNormalizer;
-import org.apache.accumulo.examples.wikisearch.protobuf.Uid;
-import org.apache.accumulo.examples.wikisearch.protobuf.Uid.List.Builder;
-import org.apache.commons.codec.binary.Base64;
-import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
-import org.apache.log4j.Logger;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.wikipedia.analysis.WikipediaTokenizer;
-
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.Multimap;
public class WikipediaPartitioner extends Mapper<LongWritable,Text,Text,Article> {
- private static final Logger log = Logger.getLogger(WikipediaPartitioner.class);
+ // private static final Logger log = Logger.getLogger(WikipediaPartitioner.class);
public final static Charset UTF8 = Charset.forName("UTF-8");
public static final String DOCUMENT_COLUMN_FAMILY = "d";
diff --git a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/output/SortingRFileOutputFormat.java b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/output/SortingRFileOutputFormat.java
index d8c57c2..2738e2c 100644
--- a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/output/SortingRFileOutputFormat.java
+++ b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/output/SortingRFileOutputFormat.java
@@ -4,20 +4,18 @@ import java.io.IOException;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.examples.wikisearch.ingest.WikipediaMapper;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.io.Text;
-import org.apache.log4j.Logger;
public class SortingRFileOutputFormat extends OutputFormat<Text,Mutation> {
- private static final Logger log = Logger.getLogger(SortingRFileOutputFormat.class);
+ // private static final Logger log = Logger.getLogger(SortingRFileOutputFormat.class);
public static final String PATH_NAME = "sortingrfileoutputformat.path";
public static final String MAX_BUFFER_SIZE = "sortingrfileoutputformat.max.buffer.size";
diff --git a/src/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java b/src/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
index 071b8bd..4ee5371 100644
--- a/src/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
+++ b/src/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
@@ -38,8 +38,8 @@ import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.Instance;
import org.apache.accumulo.core.client.impl.ServerClient;
import org.apache.accumulo.core.client.impl.TabletLocator;
-import org.apache.accumulo.core.client.impl.Translator;
import org.apache.accumulo.core.client.impl.TabletLocator.TabletLocation;
+import org.apache.accumulo.core.client.impl.Translator;
import org.apache.accumulo.core.client.impl.thrift.ClientService;
import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
@@ -150,7 +150,7 @@ public class BulkImporter {
} catch (Exception ex) {
log.warn("Unable to find tablets that overlap file " + mapFile.toString());
}
-
+ log.debug("Map file " + mapFile + " found to overlap " + tabletsToAssignMapFileTo.size() + " tablets");
if (tabletsToAssignMapFileTo.size() == 0) {
List<KeyExtent> empty = Collections.emptyList();
completeFailures.put(mapFile, empty);
@@ -652,33 +652,41 @@ public class BulkImporter {
return findOverlappingTablets(acuConf, fs, locator, file, start, failed.getEndRow());
}
+ final static byte[] byte0 = {0};
+
public static List<TabletLocation> findOverlappingTablets(AccumuloConfiguration acuConf, FileSystem fs, TabletLocator locator, Path file, Text startRow,
Text endRow) throws Exception {
List<TabletLocation> result = new ArrayList<TabletLocation>();
-
Collection<ByteSequence> columnFamilies = Collections.emptyList();
-
- FileSKVIterator reader = FileOperations.getInstance().openReader(file.toString(), true, fs, fs.getConf(), acuConf);
+ String filename = file.toString();
+ // log.debug(filename + " finding overlapping tablets " + startRow + " -> " + endRow);
+ FileSKVIterator reader = FileOperations.getInstance().openReader(filename, true, fs, fs.getConf(), acuConf);
try {
Text row = startRow;
if (row == null)
row = new Text();
while (true) {
+ // log.debug(filename + " Seeking to row " + row);
reader.seek(new Range(row, null), columnFamilies, false);
- if (!reader.hasTop())
+ if (!reader.hasTop()) {
+ // log.debug(filename + " not found");
break;
+ }
row = reader.getTopKey().getRow();
TabletLocation tabletLocation = locator.locateTablet(row, false, true);
+ // log.debug(filename + " found row " + row + " at location " + tabletLocation);
result.add(tabletLocation);
row = tabletLocation.tablet_extent.getEndRow();
- if (row != null && (endRow == null || row.compareTo(endRow) < 0))
- row = Range.followingPrefix(row);
- else
+ if (row != null && (endRow == null || row.compareTo(endRow) < 0)) {
+ row = new Text(row);
+ row.append(byte0, 0, byte0.length);
+ } else
break;
}
} finally {
reader.close();
}
+ // log.debug(filename + " to be sent to " + result);
return result;
}
diff --git a/src/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java b/src/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java
index c4a3f50..05c353d 100644
--- a/src/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java
+++ b/src/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java
@@ -19,11 +19,15 @@ package org.apache.accumulo.server.master.tableOps;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.Collections;
+import java.util.HashMap;
import java.util.HashSet;
-import java.util.Iterator;
import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
import java.util.Set;
+import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
@@ -41,12 +45,13 @@ import org.apache.accumulo.core.client.impl.thrift.TableOperation;
import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.conf.SiteConfiguration;
import org.apache.accumulo.core.file.FileOperations;
import org.apache.accumulo.core.master.state.tables.TableState;
import org.apache.accumulo.core.security.thrift.AuthInfo;
import org.apache.accumulo.core.util.CachedConfiguration;
import org.apache.accumulo.core.util.Daemon;
-import org.apache.accumulo.core.util.LoggingRunnable;
+import org.apache.accumulo.core.util.ThriftUtil;
import org.apache.accumulo.core.util.UtilWaitThread;
import org.apache.accumulo.server.ServerConstants;
import org.apache.accumulo.server.client.HdfsZooInstance;
@@ -370,7 +375,7 @@ class LoadFiles extends MasterRepo {
@Override
public Repo<Master> call(final long tid, Master master) throws Exception {
-
+ final SiteConfiguration conf = ServerConfiguration.getSiteConfiguration();
FileSystem fs = TraceFileSystem.wrap(org.apache.accumulo.core.file.FileUtil.getFileSystem(CachedConfiguration.getInstance(),
ServerConfiguration.getSiteConfiguration()));
List<FileStatus> files = new ArrayList<FileStatus>();
@@ -389,42 +394,68 @@ class LoadFiles extends MasterRepo {
}
fs.delete(writable, false);
- // group files into N-sized chunks, send the chunks to random servers
- final int SERVERS_TO_USE = Math.min(ServerConfiguration.getSystemConfiguration().getCount(Property.MASTER_BULK_SERVERS), master.onlineTabletServers()
- .size());
-
- log.debug("tid " + tid + " using " + SERVERS_TO_USE + " servers");
- // wait for success, repeat failures R times
final List<String> filesToLoad = Collections.synchronizedList(new ArrayList<String>());
for (FileStatus f : files)
filesToLoad.add(f.getPath().toString());
- final int RETRIES = Math.max(1, ServerConfiguration.getSystemConfiguration().getCount(Property.MASTER_BULK_RETRIES));
- for (int i = 0; i < RETRIES && filesToLoad.size() > 0; i++) {
- List<Future<?>> results = new ArrayList<Future<?>>();
- for (List<String> chunk : groupFiles(filesToLoad, SERVERS_TO_USE)) {
- final List<String> attempt = chunk;
- results.add(threadPool.submit(new LoggingRunnable(log, new Runnable() {
+
+ final int RETRIES = Math.max(1, conf.getCount(Property.MASTER_BULK_RETRIES));
+ for (int attempt = 0; attempt < RETRIES && filesToLoad.size() > 0; attempt++) {
+ List<Future<List<String>>> results = new ArrayList<Future<List<String>>>();
+
+ // Figure out which files will be sent to which server
+ Set<TServerInstance> currentServers = Collections.synchronizedSet(new HashSet<TServerInstance>(master.onlineTabletServers()));
+ Map<String,List<String>> loadAssignments = new HashMap<String,List<String>>();
+ for (TServerInstance server : currentServers) {
+ loadAssignments.put(server.hostPort(), new ArrayList<String>());
+ }
+ int i = 0;
+ List<Entry<String,List<String>>> entries = new ArrayList<Entry<String,List<String>>>(loadAssignments.entrySet());
+ for (String file : filesToLoad) {
+ entries.get(i % entries.size()).getValue().add(file);
+ i++;
+ }
+
+ // Use the threadpool to assign files one-at-a-time to the server
+ for (Entry<String,List<String>> entry : entries) {
+ if (entry.getValue().isEmpty()) {
+ continue;
+ }
+ final Entry<String,List<String>> finalEntry = entry;
+ results.add(threadPool.submit(new Callable<List<String>>() {
@Override
- public void run() {
+ public List<String> call() {
+ if (log.isDebugEnabled()) {
+ log.debug("Asking " + finalEntry.getKey() + " to load " + sampleList(finalEntry.getValue(), 10));
+ }
+ List<String> failures = new ArrayList<String>();
ClientService.Iface client = null;
try {
- client = ServerClient.getConnection(HdfsZooInstance.getInstance());
- List<String> fail = client.bulkImportFiles(null, SecurityConstants.getSystemCredentials(), tid, tableId, attempt, errorDir, setTime);
- attempt.removeAll(fail);
- filesToLoad.removeAll(attempt);
+ client = ThriftUtil.getTServerClient(finalEntry.getKey(), conf);
+ for (String file : finalEntry.getValue()) {
+ List<String> attempt = Collections.singletonList(file);
+ log.debug("Asking " + finalEntry.getKey() + " to bulk import " + file);
+ List<String> fail = client.bulkImportFiles(null, SecurityConstants.getSystemCredentials(), tid, tableId, attempt, errorDir, setTime);
+ if (fail.isEmpty()) {
+ filesToLoad.remove(file);
+ } else {
+ failures.addAll(fail);
+ }
+ }
} catch (Exception ex) {
log.error(ex, ex);
} finally {
ServerClient.close(client);
}
+ return failures;
}
- })));
+ }));
}
- for (Future<?> f : results)
- f.get();
+ Set<String> failures = new HashSet<String>();
+ for (Future<List<String>> f : results)
+ failures.addAll(f.get());
if (filesToLoad.size() > 0) {
- log.debug("tid " + tid + " attempt " + (i + 1) + " " + filesToLoad + " failed");
+ log.debug("tid " + tid + " attempt " + (i + 1) + " " + sampleList(filesToLoad, 10) + " failed");
UtilWaitThread.sleep(100);
}
}
@@ -449,16 +480,24 @@ class LoadFiles extends MasterRepo {
return new CompleteBulkImport(tableId, source, bulk, errorDir);
}
- private List<List<String>> groupFiles(List<String> files, int groups) {
- List<List<String>> result = new ArrayList<List<String>>();
- Iterator<String> iter = files.iterator();
- for (int i = 0; i < groups && iter.hasNext(); i++) {
- List<String> group = new ArrayList<String>();
- for (int j = 0; j < Math.ceil(files.size() / (double) groups) && iter.hasNext(); j++) {
- group.add(iter.next());
+ static String sampleList(Collection<?> potentiallyLongList, int max) {
+ StringBuffer result = new StringBuffer();
+ result.append("[");
+ int i = 0;
+ for (Object obj : potentiallyLongList) {
+ result.append(obj);
+ if (i >= max) {
+ result.append("...");
+ break;
+ } else {
+ result.append(", ");
}
- result.add(group);
+ i++;
}
- return result;
+ if (i < max)
+ result.delete(result.length() - 2, result.length());
+ result.append("]");
+ return result.toString();
}
+
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-412_be2fdba7.diff |
bugs-dot-jar_data_ACCUMULO-189_6dbbdc21 | ---
BugID: ACCUMULO-189
Summary: RegExFilter deepCopy NullPointerException
Description: 'If any of the regex matcher objects are null (i.e. for example, if you
only specify a regex for the column family), the deepCopy call will throw a NullPointerException.
'
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
index fcf77c4..0b3b73f 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
@@ -39,10 +39,10 @@ public class RegExFilter extends Filter {
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
RegExFilter result = new RegExFilter();
result.setSource(getSource().deepCopy(env));
- result.rowMatcher = rowMatcher.pattern().matcher("");
- result.colfMatcher = colfMatcher.pattern().matcher("");
- result.colqMatcher = colqMatcher.pattern().matcher("");
- result.valueMatcher = valueMatcher.pattern().matcher("");
+ result.rowMatcher = copyMatcher(rowMatcher);
+ result.colfMatcher = copyMatcher(colfMatcher);
+ result.colqMatcher = copyMatcher(colqMatcher);
+ result.valueMatcher = copyMatcher(valueMatcher);
result.orFields = orFields;
return result;
}
@@ -61,6 +61,14 @@ public class RegExFilter extends Filter {
private ByteArrayBackedCharSequence babcs = new ByteArrayBackedCharSequence();
+ private Matcher copyMatcher(Matcher m)
+ {
+ if(m == null)
+ return m;
+ else
+ return m.pattern().matcher("");
+ }
+
private boolean matches(Matcher matcher, ByteSequence bs) {
if (matcher != null) {
babcs.set(bs);
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-189_6dbbdc21.diff |
bugs-dot-jar_data_ACCUMULO-189_cd7feb4d | ---
BugID: ACCUMULO-189
Summary: RegExFilter deepCopy NullPointerException
Description: 'If any of the regex matcher objects are null (i.e. for example, if you
only specify a regex for the column family), the deepCopy call will throw a NullPointerException.
'
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
index fcf77c4..0b3b73f 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
@@ -39,10 +39,10 @@ public class RegExFilter extends Filter {
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
RegExFilter result = new RegExFilter();
result.setSource(getSource().deepCopy(env));
- result.rowMatcher = rowMatcher.pattern().matcher("");
- result.colfMatcher = colfMatcher.pattern().matcher("");
- result.colqMatcher = colqMatcher.pattern().matcher("");
- result.valueMatcher = valueMatcher.pattern().matcher("");
+ result.rowMatcher = copyMatcher(rowMatcher);
+ result.colfMatcher = copyMatcher(colfMatcher);
+ result.colqMatcher = copyMatcher(colqMatcher);
+ result.valueMatcher = copyMatcher(valueMatcher);
result.orFields = orFields;
return result;
}
@@ -61,6 +61,14 @@ public class RegExFilter extends Filter {
private ByteArrayBackedCharSequence babcs = new ByteArrayBackedCharSequence();
+ private Matcher copyMatcher(Matcher m)
+ {
+ if(m == null)
+ return m;
+ else
+ return m.pattern().matcher("");
+ }
+
private boolean matches(Matcher matcher, ByteSequence bs) {
if (matcher != null) {
babcs.set(bs);
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-189_cd7feb4d.diff |
bugs-dot-jar_data_ACCUMULO-2857_9fcca2ed | ---
BugID: ACCUMULO-2857
Summary: MockTableOperations.tableIdMap always returns tableName as ID
Description: |-
Noticed and fixed this during ACCUMULO-378.
An exception was thrown unexpectedly when trying to use tableIdMap with a MockInstance. Lift fix from 93c8bddc71d1ee190649eeab263205185d75421c into main tree.
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockAccumulo.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockAccumulo.java
index 5977d1d..272d1af 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockAccumulo.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockAccumulo.java
@@ -21,6 +21,7 @@ import java.util.EnumSet;
import java.util.HashMap;
import java.util.Map;
import java.util.SortedSet;
+import java.util.concurrent.atomic.AtomicInteger;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.client.BatchScanner;
@@ -38,6 +39,7 @@ public class MockAccumulo {
final Map<String,String> systemProperties = new HashMap<String,String>();
Map<String,MockUser> users = new HashMap<String,MockUser>();
final FileSystem fs;
+ final AtomicInteger tableIdCounter = new AtomicInteger(0);
MockAccumulo(FileSystem fs) {
this.fs = fs;
@@ -76,7 +78,7 @@ public class MockAccumulo {
}
public void createTable(String username, String tableName, boolean useVersions, TimeType timeType) {
- MockTable t = new MockTable(useVersions, timeType);
+ MockTable t = new MockTable(useVersions, timeType, Integer.toString(tableIdCounter.incrementAndGet()));
t.userPermissions.put(username, EnumSet.allOf(TablePermission.class));
tables.put(tableName, t);
}
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
index 3dcab11..2e13d84 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
@@ -89,9 +89,11 @@ public class MockTable {
private TimeType timeType;
SortedSet<Text> splits = new ConcurrentSkipListSet<Text>();
Map<String,Set<Text>> localityGroups = new TreeMap<String, Set<Text>>();
+ private String tableId;
- MockTable(boolean limitVersion, TimeType timeType) {
+ MockTable(boolean limitVersion, TimeType timeType, String tableId) {
this.timeType = timeType;
+ this.tableId = tableId;
settings = IteratorUtil.generateInitialTableProperties(limitVersion);
for (Entry<String,String> entry : AccumuloConfiguration.getDefaultConfiguration()) {
String key = entry.getKey();
@@ -143,4 +145,8 @@ public class MockTable {
if (reAdd)
splits.add(start);
}
+
+ public String getTableId() {
+ return this.tableId;
+ }
}
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
index 64f8225..5b15351 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
@@ -296,8 +296,8 @@ public class MockTableOperations extends TableOperationsHelper {
@Override
public Map<String,String> tableIdMap() {
Map<String,String> result = new HashMap<String,String>();
- for (String table : acu.tables.keySet()) {
- result.put(table, table);
+ for (Entry<String,MockTable> entry : acu.tables.entrySet()) {
+ result.put(entry.getKey(), entry.getValue().getTableId());
}
return result;
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-2857_9fcca2ed.diff |
bugs-dot-jar_data_ACCUMULO-2899_31aea2ad | ---
BugID: ACCUMULO-2899
Summary: WAL handling fails to deal with 1.4 -> 1.5 -> 1.6
Description: |-
After doing a 1.4 -> 1.5 -> 1.6 upgrade that still has WALs for some tables, the 1.6 instance fails to correctly handle the 1.4 recovered WALs.
This can happen either through not waiting long enough after the upgrade to 1.5 or because of an offline table brought online on 1.6 (ala ACCUMULO-2816).
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
index 5c1194a..d4a2d4f 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
@@ -529,8 +529,15 @@ public class VolumeManagerImpl implements VolumeManager {
@Override
public Path getFullPath(FileType fileType, String path) {
- if (path.contains(":"))
- return new Path(path);
+ int colon = path.indexOf(':');
+ if (colon > -1) {
+ // Check if this is really an absolute path or if this is a 1.4 style relative path for a WAL
+ if (fileType == FileType.WAL && path.charAt(colon + 1) != '/') {
+ path = path.substring(path.indexOf('/'));
+ } else {
+ return new Path(path);
+ }
+ }
// normalize the path
Path fullPath = new Path(defaultVolume.getBasePath(), fileType.getDirectory());
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/recovery/RecoveryPath.java b/server/base/src/main/java/org/apache/accumulo/server/master/recovery/RecoveryPath.java
index 1da945d..4a6638a 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/recovery/RecoveryPath.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/recovery/RecoveryPath.java
@@ -34,8 +34,11 @@ public class RecoveryPath {
String uuid = walPath.getName();
// drop uuid
walPath = walPath.getParent();
- // drop server
- walPath = walPath.getParent();
+ // recovered 1.4 WALs won't have a server component
+ if (!walPath.getName().equals(FileType.WAL.getDirectory())) {
+ // drop server
+ walPath = walPath.getParent();
+ }
if (!walPath.getName().equals(FileType.WAL.getDirectory()))
throw new IllegalArgumentException("Bad path " + walPath);
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java
index ae850af..56a0fd5 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java
@@ -281,7 +281,9 @@ public class GarbageCollectWriteAheadLogs {
while (iterator.hasNext()) {
for (String entry : iterator.next().logSet) {
- String uuid = new Path(entry).getName();
+ // old style WALs will have the IP:Port of their logger and new style will either be a Path either absolute or relative, in all cases
+ // the last "/" will mark a UUID file name.
+ String uuid = entry.substring(entry.lastIndexOf("/") + 1);
if (!isUUID(uuid)) {
// fully expect this to be a uuid, if its not then something is wrong and walog GC should not proceed!
throw new IllegalArgumentException("Expected uuid, but got " + uuid + " from " + entry);
@@ -327,8 +329,8 @@ public class GarbageCollectWriteAheadLogs {
continue;
for (FileStatus status : listing) {
String server = status.getPath().getName();
- servers.add(server);
if (status.isDir()) {
+ servers.add(server);
for (FileStatus file : fs.listStatus(new Path(walRoot, server))) {
if (isUUID(file.getPath().getName())) {
fileToServerMap.put(file.getPath(), server);
@@ -339,7 +341,9 @@ public class GarbageCollectWriteAheadLogs {
}
} else if (isUUID(server)) {
// old-style WAL are not under a directory
+ servers.add("");
fileToServerMap.put(status.getPath(), "");
+ nameToFileMap.put(server, status.getPath());
} else {
log.info("Ignoring file " + status.getPath() + " because it doesn't look like a uuid");
}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/Tablet.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/Tablet.java
index f73d4ca..36b2289 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/Tablet.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/Tablet.java
@@ -1357,6 +1357,8 @@ public class Tablet {
tabletResources.setTablet(this, acuTableConf);
if (!logEntries.isEmpty()) {
log.info("Starting Write-Ahead Log recovery for " + this.extent);
+ // count[0] = entries used on tablet
+ // count[1] = track max time from walog entries wihtout timestamps
final long[] count = new long[2];
final CommitSession commitSession = tabletMemory.getCommitSession();
count[1] = Long.MIN_VALUE;
@@ -1388,6 +1390,7 @@ public class Tablet {
commitSession.updateMaxCommittedTime(tabletTime.getTime());
if (count[0] == 0) {
+ log.debug("No replayed mutations applied, removing unused entries for " + extent);
MetadataTableUtil.removeUnusedWALEntries(extent, logEntries, tabletServer.getLock());
logEntries.clear();
}
@@ -1403,7 +1406,7 @@ public class Tablet {
currentLogs = new HashSet<DfsLogger>();
for (LogEntry logEntry : logEntries) {
for (String log : logEntry.logSet) {
- currentLogs.add(new DfsLogger(tabletServer.getServerConfig(), log));
+ currentLogs.add(new DfsLogger(tabletServer.getServerConfig(), log, logEntry.getColumnQualifier().toString()));
}
}
@@ -3661,12 +3664,12 @@ public class Tablet {
for (DfsLogger logger : otherLogs) {
otherLogsCopy.add(logger.toString());
- doomed.add(logger.toString());
+ doomed.add(logger.getMeta());
}
for (DfsLogger logger : currentLogs) {
currentLogsCopy.add(logger.toString());
- doomed.remove(logger.toString());
+ doomed.remove(logger.getMeta());
}
otherLogs = Collections.emptySet();
@@ -3684,6 +3687,10 @@ public class Tablet {
log.debug("Logs for current memory: " + getExtent() + " " + logger);
}
+ for (String logger : doomed) {
+ log.debug("Logs to be destroyed: " + getExtent() + " " + logger);
+ }
+
return doomed;
}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
index cca2953..b152380 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
@@ -220,13 +220,21 @@ public class DfsLogger {
private String logPath;
private Daemon syncThread;
+ /* Track what's actually in +r/!0 for this logger ref */
+ private String metaReference;
+
public DfsLogger(ServerResources conf) throws IOException {
this.conf = conf;
}
- public DfsLogger(ServerResources conf, String filename) throws IOException {
+ /**
+ * Refernce a pre-existing log file.
+ * @param meta the cq for the "log" entry in +r/!0
+ */
+ public DfsLogger(ServerResources conf, String filename, String meta) throws IOException {
this.conf = conf;
this.logPath = filename;
+ metaReference = meta;
}
public static DFSLoggerInputStreams readHeaderAndReturnStream(VolumeManager fs, Path path, AccumuloConfiguration conf) throws IOException {
@@ -315,6 +323,7 @@ public class DfsLogger {
VolumeManager fs = conf.getFileSystem();
logPath = fs.choose(ServerConstants.getWalDirs()) + "/" + logger + "/" + filename;
+ metaReference = toString();
try {
short replication = (short) conf.getConfiguration().getCount(Property.TSERV_WAL_REPLICATION);
if (replication == 0)
@@ -400,6 +409,16 @@ public class DfsLogger {
return fileName;
}
+ /**
+ * get the cq needed to reference this logger's entry in +r/!0
+ */
+ public String getMeta() {
+ if (null == metaReference) {
+ throw new IllegalStateException("logger doesn't have meta reference. " + this);
+ }
+ return metaReference;
+ }
+
public String getFileName() {
return logPath.toString();
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-2899_31aea2ad.diff |
bugs-dot-jar_data_ACCUMULO-2544_7ec60f1b | ---
BugID: ACCUMULO-2544
Summary: Incorrect boundry matching for MockTableOperations.deleteRows
Description: "The api for deleteRows specifies: Delete rows between (start, end] but
the current implementation for MockTableOperations.deleteRows is implemented as
(start, end)\n\nHere is the failing test case\n\n{code:java}\npublic class TestDelete
{\n private static final String INSTANCE = \"mock\";\n private static final String
TABLE = \"foo\";\n private static final String USER = \"user\";\n private static
final String PASS = \"password\";\n private static final Authorizations AUTHS =
new Authorizations();\n\n @Test\n public void testDelete() throws TableNotFoundException,
AccumuloException,\n AccumuloSecurityException, TableExistsException {\n\n
\ MockInstance mockAcc = new MockInstance(INSTANCE);\n Connector conn = mockAcc.getConnector(USER,
new PasswordToken(PASS));\n conn.tableOperations().create(TABLE);\n conn.securityOperations().grantTablePermission(USER,
TABLE, TablePermission.READ);\n conn.securityOperations().grantTablePermission(USER,
TABLE, TablePermission.WRITE);\n\n Mutation mut = new Mutation(\"2\");\n mut.put(\"colfam\",
\"colqual\", \"value\");\n BatchWriter writer = conn.createBatchWriter(TABLE,
new BatchWriterConfig());\n writer.addMutation(mut);\n\n Scanner scan = conn.createScanner(TABLE,
AUTHS);\n scan.setRange(new Range(\"2\", \"2\"));\n\n assertEquals(1, countRecords(scan));\n
\ \n // this should delete (1,2] \n conn.tableOperations().deleteRows(TABLE,
new Text(\"1\"), new Text(\"2\"));\n\n scan = conn.createScanner(TABLE, AUTHS);\n
\ scan.setRange(new Range(\"2\", \"2\"));\n \n // this will fail if row
2 exists\n assertEquals(0, countRecords(scan));\n }\n\n private int countRecords(Scanner
scan) {\n int cnt = 0;\n for (Entry<Key, Value> entry : scan) {\n cnt++;\n
\ }\n scan.close();\n return cnt;\n }\n}\n{code}"
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
index f088b1f..dc4a619 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
@@ -55,9 +55,9 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
public class MockTableOperations extends TableOperationsHelper {
-
- final private MockAccumulo acu;
- final private String username;
+ private static final byte[] ZERO = {0};
+ private final MockAccumulo acu;
+ private final String username;
MockTableOperations(MockAccumulo acu, String username) {
this.acu = acu;
@@ -314,7 +314,11 @@ public class MockTableOperations extends TableOperationsHelper {
if (!exists(tableName))
throw new TableNotFoundException(tableName, tableName, "");
MockTable t = acu.tables.get(tableName);
- Set<Key> keep = new TreeSet<Key>(t.table.tailMap(new Key(start)).headMap(new Key(end)).keySet());
+ Text startText = new Text(start);
+ Text endText = new Text(end);
+ startText.append(ZERO, 0, 1);
+ endText.append(ZERO, 0, 1);
+ Set<Key> keep = new TreeSet<Key>(t.table.subMap(new Key(startText), new Key(endText)).keySet());
t.table.keySet().removeAll(keep);
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-2544_7ec60f1b.diff |
bugs-dot-jar_data_ACCUMULO-209_76d727f0 | ---
BugID: ACCUMULO-209
Summary: RegExFilter does not properly regex when using multi-byte characters
Description: "The current RegExFilter class uses a ByteArrayBackedCharSequence to
set the data to match against. The ByteArrayBackedCharSequence contains a line of
code that prevents the matcher from properly matching multi-byte characters.\n\nLine
49 of ByteArrayBackedCharSequence.java is:\nreturn (char) (0xff & data[offset +
index]); \n\nThis
incorrectly casts a single byte from the byte array to a char, which is 2 bytes
in Java. This prevents the RegExFilter from properly performing Regular Expressions
on multi-byte character encoded values.\n\nA patch for the RegExFilter.java file
has been created and will be submitted."
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
index 0b3b73f..fb53801 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
@@ -17,6 +17,7 @@
package org.apache.accumulo.core.iterators.user;
import java.io.IOException;
+import java.io.UnsupportedEncodingException;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@@ -28,7 +29,6 @@ import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.Filter;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
-import org.apache.accumulo.core.util.ByteArrayBackedCharSequence;
/**
* A Filter that matches entries based on Java regular expressions.
@@ -52,6 +52,9 @@ public class RegExFilter extends Filter {
public static final String COLQ_REGEX = "colqRegex";
public static final String VALUE_REGEX = "valueRegex";
public static final String OR_FIELDS = "orFields";
+ public static final String ENCODING = "encoding";
+
+ public static final String ENCODING_DEFAULT = "UTF-8";
private Matcher rowMatcher;
private Matcher colfMatcher;
@@ -59,33 +62,36 @@ public class RegExFilter extends Filter {
private Matcher valueMatcher;
private boolean orFields = false;
- private ByteArrayBackedCharSequence babcs = new ByteArrayBackedCharSequence();
+ private String encoding = ENCODING_DEFAULT;
- private Matcher copyMatcher(Matcher m)
- {
- if(m == null)
- return m;
- else
- return m.pattern().matcher("");
+ private Matcher copyMatcher(Matcher m) {
+ if (m == null)
+ return m;
+ else
+ return m.pattern().matcher("");
}
private boolean matches(Matcher matcher, ByteSequence bs) {
if (matcher != null) {
- babcs.set(bs);
- matcher.reset(babcs);
- return matcher.matches();
+ try {
+ matcher.reset(new String(bs.getBackingArray(), bs.offset(), bs.length(), encoding));
+ return matcher.matches();
+ } catch (UnsupportedEncodingException e) {
+ e.printStackTrace();
+ }
}
-
return !orFields;
}
private boolean matches(Matcher matcher, byte data[], int offset, int len) {
if (matcher != null) {
- babcs.set(data, offset, len);
- matcher.reset(babcs);
- return matcher.matches();
+ try {
+ matcher.reset(new String(data, offset, len, encoding));
+ return matcher.matches();
+ } catch (UnsupportedEncodingException e) {
+ e.printStackTrace();
+ }
}
-
return !orFields;
}
@@ -130,6 +136,10 @@ public class RegExFilter extends Filter {
} else {
orFields = false;
}
+
+ if (options.containsKey(ENCODING)) {
+ encoding = options.get(ENCODING);
+ }
}
@Override
@@ -142,6 +152,7 @@ public class RegExFilter extends Filter {
io.addNamedOption(RegExFilter.COLQ_REGEX, "regular expression on column qualifier");
io.addNamedOption(RegExFilter.VALUE_REGEX, "regular expression on value");
io.addNamedOption(RegExFilter.OR_FIELDS, "use OR instread of AND when multiple regexes given");
+ io.addNamedOption(RegExFilter.ENCODING, "character encoding of byte array value (default is " + ENCODING_DEFAULT + ")");
return io;
}
@@ -160,6 +171,17 @@ public class RegExFilter extends Filter {
if (options.containsKey(VALUE_REGEX))
Pattern.compile(options.get(VALUE_REGEX)).matcher("");
+ if (options.containsKey(ENCODING)) {
+ try {
+ this.encoding = options.get(ENCODING);
+ @SuppressWarnings("unused")
+ String test = new String("test".getBytes(), encoding);
+ } catch (UnsupportedEncodingException e) {
+ e.printStackTrace();
+ return false;
+ }
+ }
+
return true;
}
@@ -192,4 +214,19 @@ public class RegExFilter extends Filter {
si.addOption(RegExFilter.OR_FIELDS, "true");
}
}
+
+ /**
+ * Set the encoding string to use when interpreting characters
+ *
+ * @param si
+ * ScanIterator config to be updated
+ * @param encoding
+ * the encoding string to use for character interpretation.
+ *
+ */
+ public static void setEncoding(IteratorSetting si, String encoding) {
+ if (!encoding.isEmpty()) {
+ si.addOption(RegExFilter.ENCODING, encoding);
+ }
+ }
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-209_76d727f0.diff |
bugs-dot-jar_data_ACCUMULO-1505_b082fc1e | ---
BugID: ACCUMULO-1505
Summary: MockTable's addMutation does not check for empty mutation
Description: "When calling addMutation or addMutations on a MockBatchWriter, the updates
stored in the mutation are iterated over then committed in the MockTable class.
\n\nWhen this occurs in the TabletServerBatchWriter (eventually called from the
BatchWriterImpl), however, the mutation size is first checked and if the mutation
size is 0, an IllegalArgumentException is thrown.\n\nIn practice, if you have code
that tries to submit an empty mutation to a BatchWriter, it will fail and throw
an exception in the real world, but this will not be caught in tests against MockAccumulo."
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchWriter.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchWriter.java
index b33ebcb..d89a263 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchWriter.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchWriter.java
@@ -19,6 +19,7 @@ package org.apache.accumulo.core.client.mock;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.util.ArgumentChecker;
public class MockBatchWriter implements BatchWriter {
@@ -32,11 +33,13 @@ public class MockBatchWriter implements BatchWriter {
@Override
public void addMutation(Mutation m) throws MutationsRejectedException {
+ ArgumentChecker.notNull(m);
acu.addMutation(tablename, m);
}
@Override
public void addMutations(Iterable<Mutation> iterable) throws MutationsRejectedException {
+ ArgumentChecker.notNull(iterable);
for (Mutation m : iterable) {
acu.addMutation(tablename, m);
}
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
index 6d6d534..3dcab11 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
@@ -101,6 +101,8 @@ public class MockTable {
}
synchronized void addMutation(Mutation m) {
+ if (m.size() == 0)
+ throw new IllegalArgumentException("Can not add empty mutations");
long now = System.currentTimeMillis();
mutationCount++;
for (ColumnUpdate u : m.getUpdates()) {
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1505_b082fc1e.diff |
bugs-dot-jar_data_ACCUMULO-1348_ef0f6ddc | ---
BugID: ACCUMULO-1348
Summary: Accumulo Shell does not respect 'exit' when executing file
Description: |-
If there is an {{exit}} statement in the file given via {{accumulo shell -f file}}, the execution seems to skip it and go on to the next command instead of terminating.
To recreate:
{noformat}
[mike@home ~] cat bug.accumulo
exit
scan -np -t !METADATA
[mike@home ~] bin/accumulo shell -f /home/mike/bug.accumulo
{noformat}
Expected output: None
Actual output: A full scan of the !METADATA
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java
index 1a3c518..4469d5c 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java
@@ -78,8 +78,13 @@ public class MockShell extends Shell {
if (execFile != null) {
java.util.Scanner scanner = new java.util.Scanner(new File(execFile));
- while (scanner.hasNextLine())
- execCommand(scanner.nextLine(), true, isVerbose());
+ try {
+ while (scanner.hasNextLine() && !hasExited()) {
+ execCommand(scanner.nextLine(), true, isVerbose());
+ }
+ } finally {
+ scanner.close();
+ }
} else if (execCommand != null) {
for (String command : execCommand.split("\n")) {
execCommand(command, true, isVerbose());
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java b/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
index abb324e..7efe5e6 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
@@ -246,7 +246,7 @@ public class Shell extends ShellOptions {
if (sysUser == null)
sysUser = "root";
String user = cl.getOptionValue(usernameOption.getOpt(), sysUser);
-
+
String passw = cl.getOptionValue(passwOption.getOpt(), null);
tabCompletion = !cl.hasOption(tabCompleteOption.getLongOpt());
String[] loginOptions = cl.getOptionValues(loginOption.getOpt());
@@ -261,13 +261,13 @@ public class Shell extends ShellOptions {
if (loginOptions == null && cl.hasOption(tokenOption.getOpt()))
throw new IllegalArgumentException("Must supply '-" + loginOption.getOpt() + "' option with '-" + tokenOption.getOpt() + "' option");
-
+
if (passw != null && cl.hasOption(tokenOption.getOpt()))
throw new IllegalArgumentException("Can not supply '-" + passwOption.getOpt() + "' option with '-" + tokenOption.getOpt() + "' option");
-
+
if (user == null)
throw new MissingArgumentException(usernameOption);
-
+
if (loginOptions != null && cl.hasOption(tokenOption.getOpt())) {
Properties props = new Properties();
for (String loginOption : loginOptions)
@@ -279,7 +279,7 @@ public class Shell extends ShellOptions {
this.token = Class.forName(cl.getOptionValue(tokenOption.getOpt())).asSubclass(AuthenticationToken.class).newInstance();
this.token.init(props);
}
-
+
if (!cl.hasOption(fakeOption.getLongOpt())) {
DistributedTrace.enable(instance, new ZooReader(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut()), "shell", InetAddress.getLocalHost()
.getHostName());
@@ -438,8 +438,13 @@ public class Shell extends ShellOptions {
if (execFile != null) {
java.util.Scanner scanner = new java.util.Scanner(new File(execFile));
- while (scanner.hasNextLine())
- execCommand(scanner.nextLine(), true, isVerbose());
+ try {
+ while (scanner.hasNextLine() && !hasExited()) {
+ execCommand(scanner.nextLine(), true, isVerbose());
+ }
+ } finally {
+ scanner.close();
+ }
} else if (execCommand != null) {
for (String command : execCommand.split("\n")) {
execCommand(command, true, isVerbose());
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1348_ef0f6ddc.diff |
bugs-dot-jar_data_ACCUMULO-2952_11d11e0d | ---
BugID: ACCUMULO-2952
Summary: DefaultLoadBalancer takes a long time when tablets are highly unbalanced
Description: After creating a thousand splits on a large cluster, I noticed the master
was only moving tablets to one server at a time.
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/DefaultLoadBalancer.java b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/DefaultLoadBalancer.java
index 46b9b5f..3490405 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/DefaultLoadBalancer.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/DefaultLoadBalancer.java
@@ -86,7 +86,7 @@ public class DefaultLoadBalancer extends TabletBalancer {
static class ServerCounts implements Comparable<ServerCounts> {
public final TServerInstance server;
- public final int count;
+ public int count;
public final TabletServerStatus status;
ServerCounts(int count, TServerInstance server, TabletServerStatus status) {
@@ -145,7 +145,7 @@ public class DefaultLoadBalancer extends TabletBalancer {
int end = totals.size() - 1;
int movedAlready = 0;
int tooManyIndex = 0;
- while (tooManyIndex < totals.size() && end > tooManyIndex) {
+ while (tooManyIndex < end) {
ServerCounts tooMany = totals.get(tooManyIndex);
int goal = even;
if (tooManyIndex < numServersOverEven) {
@@ -255,7 +255,8 @@ public class DefaultLoadBalancer extends TabletBalancer {
tooLittleCount = 0;
}
tooLittleMap.put(table, tooLittleCount + 1);
-
+ tooMuch.count--;
+ tooLittle.count++;
result.add(new TabletMigration(extent, tooMuch.server, tooLittle.server));
}
return result;
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-2952_11d11e0d.diff |
bugs-dot-jar_data_ACCUMULO-3015_f848178e | ---
BugID: ACCUMULO-3015
Summary: RangeInputSplit doesn't serialize table name
Description: |-
Found another missed member in the serialization of RangeInputSplit: the table name.
Not a huge deal because the table information should still be in the Configuration for most users, but this does break in "advanced" uses of mapreduce. Work around is to re-set the table in the RangeInputSplit in your overridden InputFormat.getRecordReader or make sure the Configuration is consistent from getRecordReader and getSplits.
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
index 05316a1..15c6185 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
@@ -144,6 +144,9 @@ public class RangeInputSplit extends InputSplit implements Writable {
@Override
public void readFields(DataInput in) throws IOException {
range.readFields(in);
+ if (in.readBoolean()) {
+ table = in.readUTF();
+ }
int numLocs = in.readInt();
locations = new String[numLocs];
for (int i = 0; i < numLocs; ++i)
@@ -220,6 +223,12 @@ public class RangeInputSplit extends InputSplit implements Writable {
@Override
public void write(DataOutput out) throws IOException {
range.write(out);
+
+ out.writeBoolean(null != table);
+ if (null != table) {
+ out.writeUTF(table);
+ }
+
out.writeInt(locations.length);
for (int i = 0; i < locations.length; ++i)
out.writeUTF(locations[i]);
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3015_f848178e.diff |
bugs-dot-jar_data_ACCUMULO-1120_474b2577 | ---
BugID: ACCUMULO-1120
Summary: 'stop-all doesn''t work: Error BAD_CREDENTIALS for user root'
Description: "{noformat}\n$ bin/accumulo admin stopAll\n2013-02-27 14:56:14,072 [util.Admin]
ERROR: org.apache.accumulo.core.client.AccumuloSecurityException: Error BAD_CREDENTIALS
for user root - Username or Password is Invalid\norg.apache.accumulo.core.client.AccumuloSecurityException:
Error BAD_CREDENTIALS for user root - Username or Password is Invalid\n\tat org.apache.accumulo.core.client.impl.MasterClient.execute(MasterClient.java:119)\n\tat
org.apache.accumulo.server.util.Admin.stopServer(Admin.java:107)\n\tat org.apache.accumulo.server.util.Admin.main(Admin.java:95)\n\tat
sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)\n\tat
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)\n\tat
java.lang.reflect.Method.invoke(Method.java:597)\n\tat org.apache.accumulo.start.Main$1.run(Main.java:97)\n\tat
java.lang.Thread.run(Thread.java:662)\nCaused by: ThriftSecurityException(user:root,
code:BAD_CREDENTIALS)\n\tat org.apache.accumulo.core.master.thrift.MasterClientService$shutdown_result$shutdown_resultStandardScheme.read(MasterClientService.java:8424)\n\tat
org.apache.accumulo.core.master.thrift.MasterClientService$shutdown_result$shutdown_resultStandardScheme.read(MasterClientService.java:8410)\n\tat
org.apache.accumulo.core.master.thrift.MasterClientService$shutdown_result.read(MasterClientService.java:8360)\n\tat
org.apache.thrift.TServiceClient.receiveBase(TServiceClient.java:78)\n\tat org.apache.accumulo.core.master.thrift.MasterClientService$Client.recv_shutdown(MasterClientService.java:312)\n\tat
org.apache.accumulo.core.master.thrift.MasterClientService$Client.shutdown(MasterClientService.java:297)\n\tat
org.apache.accumulo.server.util.Admin$1.execute(Admin.java:110)\n\tat org.apache.accumulo.server.util.Admin$1.execute(Admin.java:107)\n\tat
org.apache.accumulo.core.client.impl.MasterClient.execute(MasterClient.java:113)\n\t...
8 more\n\n{noformat}\n"
diff --git a/core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java b/core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java
index 50d7398..e426c4a 100644
--- a/core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java
+++ b/core/src/main/java/org/apache/accumulo/core/cli/ClientOpts.java
@@ -104,7 +104,7 @@ public class ClientOpts extends Help {
public String principal = System.getProperty("user.name");
@Parameter(names = "-p", converter = PasswordConverter.class, description = "Connection password")
- public Password password = new Password("secret");
+ public Password password = null;
@Parameter(names = "--password", converter = PasswordConverter.class, description = "Enter the connection password", password = true)
public Password securePassword = null;
@@ -112,7 +112,7 @@ public class ClientOpts extends Help {
public SecurityToken getToken() {
PasswordToken pt = new PasswordToken();
if (securePassword == null) {
- if (password.value == null)
+ if (password == null)
return null;
return pt.setPassword(password.value);
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1120_474b2577.diff |
bugs-dot-jar_data_ACCUMULO-4113_27300d81 | ---
BugID: ACCUMULO-4113
Summary: Fix incorrect usage of ByteBuffer
Description: |+
While working on ACCUMULO-4098 I found one place where ByteBuffer was being used incorrectly. Looking around the code, I have found other places that are using ByteBuffer incorrectly. Some of the problems I found are as follows :
* Calling {{ByteBuffer.array()}} without calling {{ByteBuffer.hasArray()}}.
* Using {{ByteBuffer.position()}} or {{ByteBuffer.limit()}} without adding {{ByteBuffer.arrayOffset()}} when dealing with an array returned by {{ByteBuffer.array()}}.
* Using {{ByteBuffer.arrayOffset()}} without adding {{ByteBuffer.position()}} when dealing with an array returned by {{ByteBuffer.array()}}.
diff --git a/core/src/main/java/org/apache/accumulo/core/data/ArrayByteSequence.java b/core/src/main/java/org/apache/accumulo/core/data/ArrayByteSequence.java
index d9ddc67..d040139 100644
--- a/core/src/main/java/org/apache/accumulo/core/data/ArrayByteSequence.java
+++ b/core/src/main/java/org/apache/accumulo/core/data/ArrayByteSequence.java
@@ -21,6 +21,8 @@ import static com.google.common.base.Charsets.UTF_8;
import java.io.Serializable;
import java.nio.ByteBuffer;
+import org.apache.accumulo.core.util.ByteBufferUtil;
+
public class ArrayByteSequence extends ByteSequence implements Serializable {
private static final long serialVersionUID = 1L;
@@ -52,15 +54,14 @@ public class ArrayByteSequence extends ByteSequence implements Serializable {
}
public ArrayByteSequence(ByteBuffer buffer) {
- this.length = buffer.remaining();
-
if (buffer.hasArray()) {
this.data = buffer.array();
- this.offset = buffer.position();
+ this.offset = buffer.position() + buffer.arrayOffset();
+ this.length = buffer.remaining();
} else {
- this.data = new byte[length];
this.offset = 0;
- buffer.get(data);
+ this.data = ByteBufferUtil.toBytes(buffer);
+ this.length = data.length;
}
}
@@ -118,6 +119,7 @@ public class ArrayByteSequence extends ByteSequence implements Serializable {
return copy;
}
+ @Override
public String toString() {
return new String(data, offset, length, UTF_8);
}
diff --git a/core/src/main/java/org/apache/accumulo/core/util/ByteBufferUtil.java b/core/src/main/java/org/apache/accumulo/core/util/ByteBufferUtil.java
index be5cddf..cdde05c 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/ByteBufferUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/ByteBufferUtil.java
@@ -31,7 +31,15 @@ public class ByteBufferUtil {
public static byte[] toBytes(ByteBuffer buffer) {
if (buffer == null)
return null;
- return Arrays.copyOfRange(buffer.array(), buffer.position(), buffer.limit());
+ if (buffer.hasArray()) {
+ // did not use buffer.get() because it changes the position
+ return Arrays.copyOfRange(buffer.array(), buffer.position() + buffer.arrayOffset(), buffer.limit() + buffer.arrayOffset());
+ } else {
+ byte[] data = new byte[buffer.remaining()];
+ // duplicate inorder to avoid changing position
+ buffer.duplicate().get(data);
+ return data;
+ }
}
public static List<ByteBuffer> toByteBuffers(Collection<byte[]> bytesList) {
@@ -47,23 +55,32 @@ public class ByteBufferUtil {
public static List<byte[]> toBytesList(Collection<ByteBuffer> bytesList) {
if (bytesList == null)
return null;
- ArrayList<byte[]> result = new ArrayList<byte[]>();
+ ArrayList<byte[]> result = new ArrayList<byte[]>(bytesList.size());
for (ByteBuffer bytes : bytesList) {
result.add(toBytes(bytes));
}
return result;
}
- public static Text toText(ByteBuffer bytes) {
- if (bytes == null)
+ public static Text toText(ByteBuffer byteBuffer) {
+ if (byteBuffer == null)
return null;
- Text result = new Text();
- result.set(bytes.array(), bytes.position(), bytes.remaining());
- return result;
+
+ if (byteBuffer.hasArray()) {
+ Text result = new Text();
+ result.set(byteBuffer.array(), byteBuffer.arrayOffset() + byteBuffer.position(), byteBuffer.remaining());
+ return result;
+ } else {
+ return new Text(toBytes(byteBuffer));
+ }
}
public static String toString(ByteBuffer bytes) {
- return new String(bytes.array(), bytes.position(), bytes.remaining(), UTF_8);
+ if (bytes.hasArray()) {
+ return new String(bytes.array(), bytes.arrayOffset() + bytes.position(), bytes.remaining(), UTF_8);
+ } else {
+ return new String(toBytes(bytes), UTF_8);
+ }
}
public static ByteBuffer toByteBuffers(ByteSequence bs) {
@@ -73,7 +90,6 @@ public class ByteBufferUtil {
if (bs.isBackedByArray()) {
return ByteBuffer.wrap(bs.getBackingArray(), bs.offset(), bs.length());
} else {
- // TODO create more efficient impl
return ByteBuffer.wrap(bs.toArray());
}
}
diff --git a/core/src/main/java/org/apache/accumulo/core/util/UnsynchronizedBuffer.java b/core/src/main/java/org/apache/accumulo/core/util/UnsynchronizedBuffer.java
index f353613..b776553 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/UnsynchronizedBuffer.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/UnsynchronizedBuffer.java
@@ -122,8 +122,8 @@ public class UnsynchronizedBuffer {
offset = buffer.arrayOffset() + buffer.position();
data = buffer.array();
} else {
- data = new byte[buffer.remaining()];
- buffer.get(data);
+ offset = 0;
+ data = ByteBufferUtil.toBytes(buffer);
}
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-4113_27300d81.diff |
bugs-dot-jar_data_ACCUMULO-193_8ad5a888 | ---
BugID: ACCUMULO-193
Summary: key.followingKey(PartialKey.ROW_COLFAM_COLQUAL_COLVIS) can produce a key
with an invalid COLVIS
Description: Need a new algorithm for calculating the next biggest column visibility,
because tagging \0 to the end creates an invalid column visibility. We might be
able to minimize the timestamp for this (i.e. set timestamp to Long.MIN_VALUE, but
keep column and row elements the same).
diff --git a/src/core/src/main/java/org/apache/accumulo/core/data/Key.java b/src/core/src/main/java/org/apache/accumulo/core/data/Key.java
index 3d1f92d..afab887 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/data/Key.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/data/Key.java
@@ -22,6 +22,8 @@ package org.apache.accumulo.core.data;
*
*/
+import static org.apache.accumulo.core.util.ByteBufferUtil.toBytes;
+
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
@@ -38,8 +40,6 @@ import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils;
-import static org.apache.accumulo.core.util.ByteBufferUtil.toBytes;
-
public class Key implements WritableComparable<Key>, Cloneable {
protected byte[] row;
@@ -444,8 +444,10 @@ public class Key implements WritableComparable<Key>, Cloneable {
}
public static String toPrintableString(byte ba[], int offset, int len, int maxLen) {
- StringBuilder sb = new StringBuilder();
-
+ return appendPrintableString(ba, offset, len, maxLen, new StringBuilder()).toString();
+ }
+
+ public static StringBuilder appendPrintableString(byte ba[], int offset, int len, int maxLen, StringBuilder sb) {
int plen = Math.min(len, maxLen);
for (int i = 0; i < plen; i++) {
@@ -460,26 +462,33 @@ public class Key implements WritableComparable<Key>, Cloneable {
sb.append("... TRUNCATED");
}
- return sb.toString();
+ return sb;
+ }
+
+ private StringBuilder rowColumnStringBuilder() {
+ StringBuilder sb = new StringBuilder();
+ appendPrintableString(row, 0, row.length, Constants.MAX_DATA_TO_PRINT, sb);
+ sb.append(" ");
+ appendPrintableString(colFamily, 0, colFamily.length, Constants.MAX_DATA_TO_PRINT, sb);
+ sb.append(":");
+ appendPrintableString(colQualifier, 0, colQualifier.length, Constants.MAX_DATA_TO_PRINT, sb);
+ sb.append(" [");
+ appendPrintableString(colVisibility, 0, colVisibility.length, Constants.MAX_DATA_TO_PRINT, sb);
+ sb.append("]");
+ return sb;
}
public String toString() {
- String labelString = new ColumnVisibility(colVisibility).toString();
-
- String s = toPrintableString(row, 0, row.length, Constants.MAX_DATA_TO_PRINT) + " "
- + toPrintableString(colFamily, 0, colFamily.length, Constants.MAX_DATA_TO_PRINT) + ":"
- + toPrintableString(colQualifier, 0, colQualifier.length, Constants.MAX_DATA_TO_PRINT) + " " + labelString + " " + Long.toString(timestamp) + " "
- + deleted;
- return s;
+ StringBuilder sb = rowColumnStringBuilder();
+ sb.append(" ");
+ sb.append(Long.toString(timestamp));
+ sb.append(" ");
+ sb.append(deleted);
+ return sb.toString();
}
public String toStringNoTime() {
-
- String labelString = new ColumnVisibility(colVisibility).toString();
-
- String s = new String(row, 0, row.length) + " " + new String(colFamily, 0, colFamily.length) + ":" + new String(colQualifier, 0, colQualifier.length) + " "
- + labelString;
- return s;
+ return rowColumnStringBuilder().toString();
}
public int getLength() {
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-193_8ad5a888.diff |
bugs-dot-jar_data_ACCUMULO-4138_eb0f9b41 | ---
BugID: ACCUMULO-4138
Summary: CompactCommand description is incorrect
Description: "The compact command has the following description \n{code}\nroot@accumulo>
compact -?\nusage: compact [<table>{ <table>}] [-?] [-b <begin-row>] [--cancel]
[-e <end-row>] [-nf] [-ns <namespace> | -p <pattern> | -t <tableName>] [-pn <profile>]
\ [-w]\ndescription: sets all tablets for a table to major compact as soon as possible
(based on current time)\n -?,--help display this help\n -b,--begin-row
<begin-row> begin row (inclusive)\n --cancel cancel
user initiated compactions\n -e,--end-row <end-row> end row (inclusive)\n
\ -nf,--noFlush do not flush table data in memory before compacting.\n
\ -ns,--namespace <namespace> name of a namespace to operate on\n -p,--pattern
<pattern> regex pattern of table names to operate on\n -pn,--profile <profile>
\ iterator profile name\n -t,--table <tableName> name of a table
to operate on\n -w,--wait wait for compact to finish\n{code}\n\nHowever,
the --begin-row is not inclusive. Here is a simple demonstration.\n{code}\ncreatetable
compacttest\naddsplits a b c\ninsert \"a\" \"1\" \"\" \"\"\ninsert \"a\" \"2\" \"\"
\"\"\ninsert \"b\" \"3\" \"\" \"\"\ninsert \"b\" \"4\" \"\" \"\"\ninsert \"c\" \"5\"
\"\" \"\"\ninsert \"c\" \"6\" \"\" \"\"\nflush -w\nscan -t accumulo.metadata -np\ncompact
-b a -e c -t compacttest -w\nscan -t accumulo.metadata -np\ndeletetable compacttest
-f\n{code}\n\nYou will see that file associated with the 'a' split is still a F
flush file, which the files in the 'b' and 'c' split are A files.\n\nNot sure if
the fix is to update the commands description, which would be easy, or to make the
begin row actually inclusive."
diff --git a/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperations.java b/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperations.java
index 536d6e6..bcad3a3 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperations.java
@@ -260,7 +260,7 @@ public interface TableOperations {
* @param start
* first tablet to be compacted contains the row after this row, null means the first tablet in table
* @param end
- * last tablet to be merged contains this row, null means the last tablet in table
+ * last tablet to be compacted contains this row, null means the last tablet in table
* @param flush
* when true, table memory is flushed before compaction starts
* @param wait
@@ -276,7 +276,7 @@ public interface TableOperations {
* @param start
* first tablet to be compacted contains the row after this row, null means the first tablet in table
* @param end
- * last tablet to be merged contains this row, null means the last tablet in table
+ * last tablet to be compacted contains this row, null means the last tablet in table
* @param iterators
* A set of iterators that will be applied to each tablet compacted
* @param flush
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/DeleteRowsCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/DeleteRowsCommand.java
index 64968f0..6ffa3f4 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/DeleteRowsCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/DeleteRowsCommand.java
@@ -54,9 +54,7 @@ public class DeleteRowsCommand extends Command {
public Options getOptions() {
final Options o = new Options();
forceOpt = new Option("f", "force", false, "delete data even if start or end are not specified");
- startRowOptExclusive = new Option(OptUtil.START_ROW_OPT, "begin-row", true, "begin row (exclusive)");
- startRowOptExclusive.setArgName("begin-row");
- o.addOption(startRowOptExclusive);
+ o.addOption(OptUtil.startRowOpt());
o.addOption(OptUtil.endRowOpt());
o.addOption(OptUtil.tableOpt("table to delete a row range from"));
o.addOption(forceOpt);
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/MergeCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/MergeCommand.java
index 9213a06..18d519d 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/MergeCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/MergeCommand.java
@@ -96,9 +96,7 @@ public class MergeCommand extends Command {
sizeOpt = new Option("s", "size", true, "merge tablets to the given size over the entire table");
forceOpt = new Option("f", "force", false, "merge small tablets to large tablets, even if it goes over the given size");
allOpt = new Option("", "all", false, "allow an entire table to be merged into one tablet without prompting the user for confirmation");
- Option startRowOpt = OptUtil.startRowOpt();
- startRowOpt.setDescription("begin row (NOT inclusive)");
- o.addOption(startRowOpt);
+ o.addOption(OptUtil.startRowOpt());
o.addOption(OptUtil.endRowOpt());
o.addOption(OptUtil.tableOpt("table to be merged"));
o.addOption(verboseOpt);
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OptUtil.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OptUtil.java
index 9915bdf..99e09e3 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OptUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OptUtil.java
@@ -117,7 +117,7 @@ public abstract class OptUtil {
}
public static Option startRowOpt() {
- final Option o = new Option(START_ROW_OPT, "begin-row", true, "begin row (inclusive)");
+ final Option o = new Option(START_ROW_OPT, "begin-row", true, "begin row (exclusive)");
o.setArgName("begin-row");
return o;
}
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/ScanCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/ScanCommand.java
index 9a0026a..60ae0a7 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/ScanCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/ScanCommand.java
@@ -57,6 +57,7 @@ public class ScanCommand extends Command {
protected Option timestampOpt;
private Option optStartRowExclusive;
+ private Option optStartRowInclusive;
private Option optEndRowExclusive;
private Option timeoutOption;
private Option profileOpt;
@@ -318,7 +319,9 @@ public class ScanCommand extends Command {
o.addOption(scanOptAuths);
o.addOption(scanOptRow);
- o.addOption(OptUtil.startRowOpt());
+ optStartRowInclusive = new Option(OptUtil.START_ROW_OPT, "begin-row", true, "begin row (inclusive)");
+ optStartRowInclusive.setArgName("begin-row");
+ o.addOption(optStartRowInclusive);
o.addOption(OptUtil.endRowOpt());
o.addOption(optStartRowExclusive);
o.addOption(optEndRowExclusive);
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-4138_eb0f9b41.diff |
bugs-dot-jar_data_ACCUMULO-1358_4d10c92f | ---
BugID: ACCUMULO-1358
Summary: Shell's setiter is not informative when using a bad class name
Description: In the shell, I did setiter using a class that wasn't found. Rather then
a message about it not being found, I just get told that I have an invalid argument.
Even turning on debug, I had to use the stack trace to figure out why it was erroring.
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/SetIterCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/SetIterCommand.java
index 4c6d2d2..26e38e6 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/SetIterCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/SetIterCommand.java
@@ -175,14 +175,23 @@ public class SetIterCommand extends Command {
clazz = classloader.loadClass(className).asSubclass(OptionDescriber.class);
skvi = clazz.newInstance();
} catch (ClassNotFoundException e) {
- throw new IllegalArgumentException(e.getMessage());
+ StringBuilder msg = new StringBuilder("Unable to load ").append(className);
+ if (className.indexOf('.') < 0) {
+ msg.append("; did you use a fully qualified package name?");
+ } else {
+ msg.append("; class not found.");
+ }
+ throw new ShellCommandException(ErrorCode.INITIALIZATION_FAILURE, msg.toString());
} catch (InstantiationException e) {
throw new IllegalArgumentException(e.getMessage());
} catch (IllegalAccessException e) {
throw new IllegalArgumentException(e.getMessage());
} catch (ClassCastException e) {
- throw new ShellCommandException(ErrorCode.INITIALIZATION_FAILURE, "Unable to load " + className + " as type " + OptionDescriber.class.getName()
- + "; configure with 'config' instead");
+ StringBuilder msg = new StringBuilder("Loaded ");
+ msg.append(className).append(" but it does not implement ");
+ msg.append(OptionDescriber.class.getSimpleName());
+ msg.append("; use 'config -s' instead.");
+ throw new ShellCommandException(ErrorCode.INITIALIZATION_FAILURE, msg.toString());
}
final IteratorOptions itopts = skvi.describeOptions();
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1358_4d10c92f.diff |
bugs-dot-jar_data_ACCUMULO-3218_1b35d263 | ---
BugID: ACCUMULO-3218
Summary: ZooKeeperInstance only uses first ZooKeeper in list of quorum
Description: |-
Had tests running which had a quorum of 3 ZooKeeper servers. One appears to have died and the test was then unable to connect to the Accumulo shell, hanging on trying to connect to ZooKeeper.
There was no client.conf file present, so a ClientConfiguration was constructed from accumulo-site.xml.
{code}
this.zooKeepers = clientConf.get(ClientProperty.INSTANCE_ZK_HOST);
{code}
When the commons configuration AbstractConfiguration class is used with the get() method, only the first element in the value is returned, as the implementation treats the other items as a list because of the default separator of a comma.
It's easily reproduced with the following:
{code}
ZooKeeperInstance inst = new ZooKeeperInstance("accumulo", "localhost,127.0.0.1");
System.out.println(inst.getZooKeepers());
{code}
The above will print
{noformat}
localhost
{noformat}
instead of the expected
{noformat}
localhost,127.0.0.1
{noformat}
diff --git a/core/src/main/java/org/apache/accumulo/core/client/ClientConfiguration.java b/core/src/main/java/org/apache/accumulo/core/client/ClientConfiguration.java
index b64fab4..17ad10b 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/ClientConfiguration.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/ClientConfiguration.java
@@ -107,6 +107,8 @@ public class ClientConfiguration extends CompositeConfiguration {
public ClientConfiguration(List<? extends Configuration> configs) {
super(configs);
+ // Don't do list interpolation
+ this.setListDelimiter('\0');
}
/**
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3218_1b35d263.diff |
bugs-dot-jar_data_ACCUMULO-1986_a5e3ed3b | ---
BugID: ACCUMULO-1986
Summary: Validity checks missing for readFields and Thrift deserialization
Description: Classes in o.a.a.core.data (and potentially elsewhere) that support construction
from a Thrift object and/or population from a {{DataInput}} (via a {{readFields()}}
method) often lack data validity checks that the classes' constructors enforce.
The missing checks make it possible for an attacker to create invalid objects by
manipulating the bytes being read. The situation is analogous to the need to check
objects deserialized from their Java serialized form within the {{readObject()}}
method.
diff --git a/core/src/main/java/org/apache/accumulo/core/data/Key.java b/core/src/main/java/org/apache/accumulo/core/data/Key.java
index de9e22d..4b6867f 100644
--- a/core/src/main/java/org/apache/accumulo/core/data/Key.java
+++ b/core/src/main/java/org/apache/accumulo/core/data/Key.java
@@ -291,6 +291,19 @@ public class Key implements WritableComparable<Key>, Cloneable {
this.colVisibility = toBytes(tkey.colVisibility);
this.timestamp = tkey.timestamp;
this.deleted = false;
+
+ if (row == null) {
+ throw new IllegalArgumentException("null row");
+ }
+ if (colFamily == null) {
+ throw new IllegalArgumentException("null column family");
+ }
+ if (colQualifier == null) {
+ throw new IllegalArgumentException("null column qualifier");
+ }
+ if (colVisibility == null) {
+ throw new IllegalArgumentException("null column visibility");
+ }
}
/**
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1986_a5e3ed3b.diff |
bugs-dot-jar_data_ACCUMULO-3424_27d4ee21 | ---
BugID: ACCUMULO-3424
Summary: Token class option always requires token property
Description: |-
In testing out ACCUMULO-2815, I attempted to manually provide a KerberosToken to authenticate myself and then launch the shell, but ran into an issue. The KerberosToken (in its current state) needs no options: it's wholly functional on its own.
{{accumulo shell -tc org.apache.accumulo.core.client.security.tokens.KerberosToken}} gives an error
{noformat}
2014-12-16 11:41:09,712 [shell.Shell] ERROR: com.beust.jcommander.ParameterException: Must supply either both or neither of '--tokenClass' and '--tokenProperty'
{noformat}
And providing an empty option just prints the help message {{accumulo shell -tc org.apache.accumulo.core.client.security.tokens.KerberosToken -l ""}}
I'm guessing the latter is just how the JCommander DynamicParameter is implemented, but I don't see a reason why every authentication *must* have some properties provided to it.
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java b/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
index b187a76..a7ab8db 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
@@ -286,8 +286,7 @@ public class Shell extends ShellOptions {
// process default parameters if unspecified
try {
- boolean hasToken = (token != null);
- boolean hasTokenOptions = !loginOptions.isEmpty();
+ final boolean hasToken = (token != null);
if (hasToken && password != null) {
throw new ParameterException("Can not supply '--pass' option with '--tokenClass' option");
@@ -300,16 +299,15 @@ public class Shell extends ShellOptions {
}
});
- // Need either both a token and options, or neither, but not just one.
- if (hasToken != hasTokenOptions) {
- throw new ParameterException("Must supply either both or neither of '--tokenClass' and '--tokenProperty'");
- } else if (hasToken) { // implied hasTokenOptions
+ if (hasToken) { // implied hasTokenOptions
// Fully qualified name so we don't shadow java.util.Properties
org.apache.accumulo.core.client.security.tokens.AuthenticationToken.Properties props;
// and line wrap it because the package name is so long
props = new org.apache.accumulo.core.client.security.tokens.AuthenticationToken.Properties();
- props.putAllStrings(loginOptions);
+ if (!loginOptions.isEmpty()) {
+ props.putAllStrings(loginOptions);
+ }
token.init(props);
} else {
// Read password if the user explicitly asked for it, or didn't specify anything at all
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3424_27d4ee21.diff |
bugs-dot-jar_data_ACCUMULO-2659_019edb16 | ---
BugID: ACCUMULO-2659
Summary: Incompatible API changes in 1.6.0
Description: "While examining API changes for 1.6.0 I noticed some non-deprecated
methods were removed. I am not sure how important these are, but technically these
methods are in the public API. Opening this issue to document what I found.\n\nI
compared 1.6.0 to 1.5.0.\n\nIn ACCUMULO-1674 the following methods were removed\n\n{noformat}\npackage
org.apache.accumulo.core.client.mapreduce.lib.util\nConfiguratorBase.getToken (
Class<?>, Configuration ) [static] : byte[ ]\nConfiguratorBase.getTokenClass (
Class<?> ,Configuration) [static] : String\n{noformat}\n\nIn ACCUMULO-391 the
following method was removed\n\n{noformat}\npackage org.apache.accumulo.core.client.mapreduce.lib.util\nInputConfigurator.getTabletLocator
( Class<?>, Configuration ) [static] : TabletLocator \n{noformat}\n\nIn ACCUMULO-391
the following method was removed and not properly fixed in ACCUMULO-2586\n\n{noformat}\naccumulo-core.jar,
RangeInputSplit.class\npackage org.apache.accumulo.core.client.mapred\nInputFormatBase.RangeInputSplit.InputFormatBase.RangeInputSplit
( String table, Range range, String[ ] locations )\npackage org.apache.accumulo.core.client.mapreduce\nInputFormatBase.RangeInputSplit.InputFormatBase.RangeInputSplit
( String table, Range range, String[ ] locations ) \n{noformat}\n\n It seems like
the following were removed in ACCUMULO-1854 \n\n{noformat}\npackage org.apache.accumulo.core.client.mapred\nInputFormatBase.RecordReaderBase<K.setupIterators
(JobConf job, Scanner scanner ) : void\npackage org.apache.accumulo.core.client.mapreduce\nInputFormatBase.RecordReaderBase<K.setupIterators
(TaskAttemptContext context, Scanner scanner) : void\n{noformat}\n\nIn ACCUMULO-1018
the following method was removed\n\n{noformat}\npackage org.apache.accumulo.core.client\nMutationsRejectedException.MutationsRejectedException
( List, HashMap, Set, Collection, int cause, Throwable cvsList ) \n{noformat}"
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapred/InputFormatBase.java b/core/src/main/java/org/apache/accumulo/core/client/mapred/InputFormatBase.java
index 54b983f..0cee355 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapred/InputFormatBase.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapred/InputFormatBase.java
@@ -322,6 +322,9 @@ public abstract class InputFormatBase<K,V> extends AbstractInputFormat<K,V> {
iterators = getIterators(job);
} else {
iterators = split.getIterators();
+ if (null == iterators) {
+ iterators = getIterators(job);
+ }
}
setupIterators(iterators, scanner);
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-2659_019edb16.diff |
bugs-dot-jar_data_ACCUMULO-178_2f0643a9 | ---
BugID: ACCUMULO-178
Summary: Off-by-one error in FamilyIntersectingIterator
Description: In the buildDocKey() function within the FamilyIntersectingIterator there
is a bug that shortens the docID by 1. This causes the wrong doc's data to be returned
in the results of a query using this Iterator.
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/FamilyIntersectingIterator.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/FamilyIntersectingIterator.java
index 6df0e80..f870b30 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/FamilyIntersectingIterator.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/FamilyIntersectingIterator.java
@@ -151,7 +151,7 @@ public class FamilyIntersectingIterator extends IntersectingIterator {
if (log.isTraceEnabled())
log.trace(zeroIndex + " " + currentDocID.getLength());
Text colq = new Text();
- colq.set(currentDocID.getBytes(), zeroIndex + 1, currentDocID.getLength() - zeroIndex - 2);
+ colq.set(currentDocID.getBytes(), zeroIndex + 1, currentDocID.getLength() - zeroIndex - 1);
Key k = new Key(currentPartition, colf, colq);
if (log.isTraceEnabled())
log.trace("built doc key for seek: " + k.toString());
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-178_2f0643a9.diff |
bugs-dot-jar_data_ACCUMULO-1800_8ec4cb84 | ---
BugID: ACCUMULO-1800
Summary: delete mutations not working through the Proxy
Description: |
Aru Sahni writes:
{quote}
I'm new to Accumulo and am still trying to wrap my head around its ways. To further that challenge, I'm using Pyaccumulo, which doesn't present much in terms of available reference material.
Right now I'm trying to understand how Accumulo manages record (key-value pair) deletions.
conn = Accumulo(host, port, user, password)
table = 'test_table'
conn.create_table(table)
writer = conn.create_batch_writer(table)
mut = Mutation('mut_01')
mut.put(cf='item', cq='name', value='car')
writer.add_mutation(mut)
writer.close()
conn.close()
Will generate a record (found via a shell scan):
mut_01 item:name [] car
However the subsequent mutation...
writer = conn.create_batch_writer(table)
mut = Mutation('mut_01')
mut.put(cf='item', cq='name', is_delete=True)
writer.add_mutation(mut)
writer.close()
Results in:
mut_01 item:name []
How should one expect the deleted row to be represented? That record sticks around even after I force a compaction of the table. I was expecting it to not show up in any iterators, or at least provide an easy way to see if the cell has been deleted.
{quote}
[~ecn] has confirmed the problem.
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
index cec8cfc..ee993b9 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
@@ -1174,14 +1174,14 @@ public class ProxyServer implements AccumuloProxy.Iface {
if (update.isSetDeleteCell()) {
m.putDelete(update.getColFamily(), update.getColQualifier(), viz, update.getTimestamp());
} else {
- if (update.isSetDeleteCell()) {
- m.putDelete(update.getColFamily(), update.getColQualifier(), viz, update.getTimestamp());
- } else {
- m.put(update.getColFamily(), update.getColQualifier(), viz, update.getTimestamp(), value);
- }
+ m.put(update.getColFamily(), update.getColQualifier(), viz, update.getTimestamp(), value);
}
} else {
- m.put(update.getColFamily(), update.getColQualifier(), viz, value);
+ if (update.isSetDeleteCell()) {
+ m.putDelete(new Text(update.getColFamily()), new Text(update.getColQualifier()), viz);
+ } else {
+ m.put(new Text(update.getColFamily()), new Text(update.getColQualifier()), viz, new Value(value));
+ }
}
}
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1800_8ec4cb84.diff |
bugs-dot-jar_data_ACCUMULO-1192_c489d866 | ---
BugID: ACCUMULO-1192
Summary: '"du" on a table without files does not report'
Description: |
{noformat}
shell> createtable t
shell> du t
shell>
{noformat}
expected:
{noformat}
shell> du t
0 t
shell>
{noformat}
diff --git a/core/src/main/java/org/apache/accumulo/core/util/TableDiskUsage.java b/core/src/main/java/org/apache/accumulo/core/util/TableDiskUsage.java
index 64d5970..6a61c50 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/TableDiskUsage.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/TableDiskUsage.java
@@ -144,12 +144,17 @@ public class TableDiskUsage {
tdu.addTable(tableId);
HashSet<String> tablesReferenced = new HashSet<String>(tableIds);
+ HashSet<String> emptyTableIds = new HashSet<String>();
for (String tableId : tableIds) {
Scanner mdScanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
mdScanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
mdScanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
+ if(!mdScanner.iterator().hasNext()) {
+ emptyTableIds.add(tableId);
+ }
+
for (Entry<Key,Value> entry : mdScanner) {
String file = entry.getKey().getColumnQualifier().toString();
if (file.startsWith("../")) {
@@ -215,6 +220,14 @@ public class TableDiskUsage {
usage.put(tableNames, entry.getValue());
}
+
+ if(!emptyTableIds.isEmpty()) {
+ TreeSet<String> emptyTables = new TreeSet<String>();
+ for (String tableId : emptyTableIds) {
+ emptyTables.add(reverseTableIdMap.get(tableId));
+ }
+ usage.put(emptyTables, 0L);
+ }
for (Entry<TreeSet<String>,Long> entry : usage.entrySet())
printer.print(String.format("%,24d %s", entry.getValue(), entry.getKey()));
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1192_c489d866.diff |
bugs-dot-jar_data_ACCUMULO-1661_13eb19c2 | ---
BugID: ACCUMULO-1661
Summary: AccumuloInputFormat cannot fetch empty column family
Description: |-
The following fails:
{code:java}
Job job = new Job();
HashSet<Pair<Text,Text>> cols = new HashSet<Pair<Text,Text>>();
cols.add(new Pair<Text,Text>(new Text(""), null));
AccumuloInputFormat.fetchColumns(job, cols);
Set<Pair<Text,Text>> setCols = AccumuloInputFormat.getFetchedColumns(job);
assertEquals(cols.size(), setCols.size());
{code}
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/InputConfigurator.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/InputConfigurator.java
index ff14107..b0e649b 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/InputConfigurator.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/InputConfigurator.java
@@ -238,8 +238,15 @@ public class InputConfigurator extends ConfiguratorBase {
*/
public static Set<Pair<Text,Text>> getFetchedColumns(Class<?> implementingClass, Configuration conf) {
ArgumentChecker.notNull(conf);
-
- return deserializeFetchedColumns(conf.getStringCollection(enumToConfKey(implementingClass, ScanOpts.COLUMNS)));
+ String confValue = conf.get(enumToConfKey(implementingClass, ScanOpts.COLUMNS));
+ List<String> serialized = new ArrayList<String>();
+ if (confValue != null) {
+ // Split and include any trailing empty strings to allow empty column families
+ for (String val : confValue.split(",", -1)) {
+ serialized.add(val);
+ }
+ }
+ return deserializeFetchedColumns(serialized);
}
public static Set<Pair<Text,Text>> deserializeFetchedColumns(Collection<String> serialized) {
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1661_13eb19c2.diff |
bugs-dot-jar_data_ACCUMULO-1986_2d97b875 | ---
BugID: ACCUMULO-1986
Summary: Validity checks missing for readFields and Thrift deserialization
Description: Classes in o.a.a.core.data (and potentially elsewhere) that support construction
from a Thrift object and/or population from a {{DataInput}} (via a {{readFields()}}
method) often lack data validity checks that the classes' constructors enforce.
The missing checks make it possible for an attacker to create invalid objects by
manipulating the bytes being read. The situation is analogous to the need to check
objects deserialized from their Java serialized form within the {{readObject()}}
method.
diff --git a/core/src/main/java/org/apache/accumulo/core/data/Mutation.java b/core/src/main/java/org/apache/accumulo/core/data/Mutation.java
index 407dbc0..56ae7a6 100644
--- a/core/src/main/java/org/apache/accumulo/core/data/Mutation.java
+++ b/core/src/main/java/org/apache/accumulo/core/data/Mutation.java
@@ -223,6 +223,13 @@ public class Mutation implements Writable {
this.data = ByteBufferUtil.toBytes(tmutation.data);
this.entries = tmutation.entries;
this.values = ByteBufferUtil.toBytesList(tmutation.values);
+
+ if (this.row == null) {
+ throw new IllegalArgumentException("null row");
+ }
+ if (this.data == null) {
+ throw new IllegalArgumentException("null serialized data");
+ }
}
public Mutation(Mutation m) {
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1986_2d97b875.diff |
bugs-dot-jar_data_ACCUMULO-4029_5ca779a0 | ---
BugID: ACCUMULO-4029
Summary: hashCode for Mutation has an unfortunate implementation
Description: |
While looking at how a tablet server processes constraint violations, I happened to look into Mutation's hashCode implementation:
{code}
@Override
public int hashCode() {
return toThrift(false).hashCode();
}
{code}
Clicking through to TMutation hashCode finds this gem:
{code}
@Override
public int hashCode() {
return 0;
}
{code}
diff --git a/core/src/main/java/org/apache/accumulo/core/data/Mutation.java b/core/src/main/java/org/apache/accumulo/core/data/Mutation.java
index 5b052c3..e4e229c 100644
--- a/core/src/main/java/org/apache/accumulo/core/data/Mutation.java
+++ b/core/src/main/java/org/apache/accumulo/core/data/Mutation.java
@@ -598,7 +598,7 @@ public class Mutation implements Writable {
@Override
public int hashCode() {
- return toThrift(false).hashCode();
+ return serializedSnapshot().hashCode();
}
/**
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-4029_5ca779a0.diff |
bugs-dot-jar_data_ACCUMULO-1199_813109d7 | ---
BugID: ACCUMULO-1199
Summary: Verify all methods in the ProxyService that take table names actually throw
TableNotFoundException when the table is missing.
Description:
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
index 926c413..c0dee9a 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
@@ -50,6 +50,8 @@ import org.apache.accumulo.core.client.ZooKeeperInstance;
import org.apache.accumulo.core.client.admin.ActiveCompaction;
import org.apache.accumulo.core.client.admin.ActiveScan;
import org.apache.accumulo.core.client.admin.TimeType;
+import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
+import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
import org.apache.accumulo.core.client.mock.MockInstance;
import org.apache.accumulo.core.client.security.SecurityErrorCode;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
@@ -144,7 +146,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
protected Cache<UUID,BatchWriterPlusException> writerCache;
public ProxyServer(Properties props) {
-
+
String useMock = props.getProperty("org.apache.accumulo.proxy.ProxyServer.useMockInstance");
if (useMock != null && Boolean.parseBoolean(useMock))
instance = new MockInstance();
@@ -157,7 +159,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
-
+
scannerCache = CacheBuilder.newBuilder().expireAfterAccess(10, TimeUnit.MINUTES).maximumSize(1000).removalListener(new CloseScanner()).build();
writerCache = CacheBuilder.newBuilder().expireAfterAccess(10, TimeUnit.MINUTES).maximumSize(1000).removalListener(new CloseWriter()).build();
@@ -175,21 +177,27 @@ public class ProxyServer implements AccumuloProxy.Iface {
try {
throw ex;
} catch (MutationsRejectedException e) {
- logger.debug(e,e);
- return new org.apache.accumulo.proxy.thrift.MutationsRejectedException(e.toString());
+ logger.debug(e, e);
+ return new org.apache.accumulo.proxy.thrift.MutationsRejectedException(e.toString());
} catch (AccumuloException e) {
- logger.debug(e,e);
+ if (e.getCause() instanceof ThriftTableOperationException) {
+ ThriftTableOperationException ttoe = (ThriftTableOperationException) e.getCause();
+ if (ttoe.type == TableOperationExceptionType.NOTFOUND) {
+ return new org.apache.accumulo.proxy.thrift.TableNotFoundException(e.toString());
+ }
+ }
+ logger.debug(e, e);
return new org.apache.accumulo.proxy.thrift.AccumuloException(e.toString());
} catch (AccumuloSecurityException e) {
- logger.debug(e,e);
+ logger.debug(e, e);
if (e.getSecurityErrorCode().equals(SecurityErrorCode.TABLE_DOESNT_EXIST))
return new org.apache.accumulo.proxy.thrift.TableNotFoundException(e.toString());
return new org.apache.accumulo.proxy.thrift.AccumuloSecurityException(e.toString());
} catch (TableNotFoundException e) {
- logger.debug(e,e);
+ logger.debug(e, e);
return new org.apache.accumulo.proxy.thrift.TableNotFoundException(e.toString());
} catch (TableExistsException e) {
- logger.debug(e,e);
+ logger.debug(e, e);
return new org.apache.accumulo.proxy.thrift.TableExistsException(e.toString());
} catch (RuntimeException e) {
if (e.getCause() != null) {
@@ -628,7 +636,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
}
@Override
- public boolean authenticateUser(ByteBuffer login, String principal, Map<String, String> properties) throws TException {
+ public boolean authenticateUser(ByteBuffer login, String principal, Map<String,String> properties) throws TException {
try {
return getConnector(login).securityOperations().authenticateUser(principal, getToken(principal, properties));
} catch (Exception e) {
@@ -840,7 +848,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
}
}
scanner.setRanges(ranges);
-
+
if (opts.columns != null) {
for (ScanColumn col : opts.columns) {
if (col.isSetColQualifier())
@@ -850,7 +858,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
}
}
}
-
+
UUID uuid = UUID.randomUUID();
ScannerPlusIterator spi = new ScannerPlusIterator();
@@ -1235,7 +1243,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
}
}
- private AuthenticationToken getToken(String principal, Map<String, String> properties) throws AccumuloSecurityException, AccumuloException {
+ private AuthenticationToken getToken(String principal, Map<String,String> properties) throws AccumuloSecurityException, AccumuloException {
Properties props = new Properties();
props.putAll(properties);
AuthenticationToken token;
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/thrift/AccumuloProxy.java b/proxy/src/main/java/org/apache/accumulo/proxy/thrift/AccumuloProxy.java
index 857320a..f53b6ac 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/thrift/AccumuloProxy.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/thrift/AccumuloProxy.java
@@ -104,17 +104,17 @@ import org.slf4j.LoggerFactory;
public void onlineTable(ByteBuffer login, String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, org.apache.thrift.TException;
- public void removeConstraint(ByteBuffer login, String tableName, int constraint) throws AccumuloException, AccumuloSecurityException, org.apache.thrift.TException;
+ public void removeConstraint(ByteBuffer login, String tableName, int constraint) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, org.apache.thrift.TException;
public void removeIterator(ByteBuffer login, String tableName, String iterName, Set<IteratorScope> scopes) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, org.apache.thrift.TException;
- public void removeTableProperty(ByteBuffer login, String tableName, String property) throws AccumuloException, AccumuloSecurityException, org.apache.thrift.TException;
+ public void removeTableProperty(ByteBuffer login, String tableName, String property) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, org.apache.thrift.TException;
public void renameTable(ByteBuffer login, String oldTableName, String newTableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, TableExistsException, org.apache.thrift.TException;
public void setLocalityGroups(ByteBuffer login, String tableName, Map<String,Set<String>> groups) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, org.apache.thrift.TException;
- public void setTableProperty(ByteBuffer login, String tableName, String property, String value) throws AccumuloException, AccumuloSecurityException, org.apache.thrift.TException;
+ public void setTableProperty(ByteBuffer login, String tableName, String property, String value) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, org.apache.thrift.TException;
public Set<Range> splitRangeByTablets(ByteBuffer login, String tableName, Range range, int maxSplits) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, org.apache.thrift.TException;
@@ -1206,7 +1206,7 @@ import org.slf4j.LoggerFactory;
return;
}
- public void removeConstraint(ByteBuffer login, String tableName, int constraint) throws AccumuloException, AccumuloSecurityException, org.apache.thrift.TException
+ public void removeConstraint(ByteBuffer login, String tableName, int constraint) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, org.apache.thrift.TException
{
send_removeConstraint(login, tableName, constraint);
recv_removeConstraint();
@@ -1221,7 +1221,7 @@ import org.slf4j.LoggerFactory;
sendBase("removeConstraint", args);
}
- public void recv_removeConstraint() throws AccumuloException, AccumuloSecurityException, org.apache.thrift.TException
+ public void recv_removeConstraint() throws AccumuloException, AccumuloSecurityException, TableNotFoundException, org.apache.thrift.TException
{
removeConstraint_result result = new removeConstraint_result();
receiveBase(result, "removeConstraint");
@@ -1231,6 +1231,9 @@ import org.slf4j.LoggerFactory;
if (result.ouch2 != null) {
throw result.ouch2;
}
+ if (result.ouch3 != null) {
+ throw result.ouch3;
+ }
return;
}
@@ -1266,7 +1269,7 @@ import org.slf4j.LoggerFactory;
return;
}
- public void removeTableProperty(ByteBuffer login, String tableName, String property) throws AccumuloException, AccumuloSecurityException, org.apache.thrift.TException
+ public void removeTableProperty(ByteBuffer login, String tableName, String property) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, org.apache.thrift.TException
{
send_removeTableProperty(login, tableName, property);
recv_removeTableProperty();
@@ -1281,7 +1284,7 @@ import org.slf4j.LoggerFactory;
sendBase("removeTableProperty", args);
}
- public void recv_removeTableProperty() throws AccumuloException, AccumuloSecurityException, org.apache.thrift.TException
+ public void recv_removeTableProperty() throws AccumuloException, AccumuloSecurityException, TableNotFoundException, org.apache.thrift.TException
{
removeTableProperty_result result = new removeTableProperty_result();
receiveBase(result, "removeTableProperty");
@@ -1291,6 +1294,9 @@ import org.slf4j.LoggerFactory;
if (result.ouch2 != null) {
throw result.ouch2;
}
+ if (result.ouch3 != null) {
+ throw result.ouch3;
+ }
return;
}
@@ -1359,7 +1365,7 @@ import org.slf4j.LoggerFactory;
return;
}
- public void setTableProperty(ByteBuffer login, String tableName, String property, String value) throws AccumuloException, AccumuloSecurityException, org.apache.thrift.TException
+ public void setTableProperty(ByteBuffer login, String tableName, String property, String value) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, org.apache.thrift.TException
{
send_setTableProperty(login, tableName, property, value);
recv_setTableProperty();
@@ -1375,7 +1381,7 @@ import org.slf4j.LoggerFactory;
sendBase("setTableProperty", args);
}
- public void recv_setTableProperty() throws AccumuloException, AccumuloSecurityException, org.apache.thrift.TException
+ public void recv_setTableProperty() throws AccumuloException, AccumuloSecurityException, TableNotFoundException, org.apache.thrift.TException
{
setTableProperty_result result = new setTableProperty_result();
receiveBase(result, "setTableProperty");
@@ -1385,6 +1391,9 @@ import org.slf4j.LoggerFactory;
if (result.ouch2 != null) {
throw result.ouch2;
}
+ if (result.ouch3 != null) {
+ throw result.ouch3;
+ }
return;
}
@@ -3575,7 +3584,7 @@ import org.slf4j.LoggerFactory;
prot.writeMessageEnd();
}
- public void getResult() throws AccumuloException, AccumuloSecurityException, org.apache.thrift.TException {
+ public void getResult() throws AccumuloException, AccumuloSecurityException, TableNotFoundException, org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
@@ -3654,7 +3663,7 @@ import org.slf4j.LoggerFactory;
prot.writeMessageEnd();
}
- public void getResult() throws AccumuloException, AccumuloSecurityException, org.apache.thrift.TException {
+ public void getResult() throws AccumuloException, AccumuloSecurityException, TableNotFoundException, org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
@@ -3771,7 +3780,7 @@ import org.slf4j.LoggerFactory;
prot.writeMessageEnd();
}
- public void getResult() throws AccumuloException, AccumuloSecurityException, org.apache.thrift.TException {
+ public void getResult() throws AccumuloException, AccumuloSecurityException, TableNotFoundException, org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
@@ -5983,6 +5992,8 @@ import org.slf4j.LoggerFactory;
result.ouch1 = ouch1;
} catch (AccumuloSecurityException ouch2) {
result.ouch2 = ouch2;
+ } catch (TableNotFoundException ouch3) {
+ result.ouch3 = ouch3;
}
return result;
}
@@ -6037,6 +6048,8 @@ import org.slf4j.LoggerFactory;
result.ouch1 = ouch1;
} catch (AccumuloSecurityException ouch2) {
result.ouch2 = ouch2;
+ } catch (TableNotFoundException ouch3) {
+ result.ouch3 = ouch3;
}
return result;
}
@@ -6121,6 +6134,8 @@ import org.slf4j.LoggerFactory;
result.ouch1 = ouch1;
} catch (AccumuloSecurityException ouch2) {
result.ouch2 = ouch2;
+ } catch (TableNotFoundException ouch3) {
+ result.ouch3 = ouch3;
}
return result;
}
@@ -39944,6 +39959,7 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField OUCH1_FIELD_DESC = new org.apache.thrift.protocol.TField("ouch1", org.apache.thrift.protocol.TType.STRUCT, (short)1);
private static final org.apache.thrift.protocol.TField OUCH2_FIELD_DESC = new org.apache.thrift.protocol.TField("ouch2", org.apache.thrift.protocol.TType.STRUCT, (short)2);
+ private static final org.apache.thrift.protocol.TField OUCH3_FIELD_DESC = new org.apache.thrift.protocol.TField("ouch3", org.apache.thrift.protocol.TType.STRUCT, (short)3);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -39953,11 +39969,13 @@ import org.slf4j.LoggerFactory;
public AccumuloException ouch1; // required
public AccumuloSecurityException ouch2; // required
+ public TableNotFoundException ouch3; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
@SuppressWarnings("all") public enum _Fields implements org.apache.thrift.TFieldIdEnum {
OUCH1((short)1, "ouch1"),
- OUCH2((short)2, "ouch2");
+ OUCH2((short)2, "ouch2"),
+ OUCH3((short)3, "ouch3");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -39976,6 +39994,8 @@ import org.slf4j.LoggerFactory;
return OUCH1;
case 2: // OUCH2
return OUCH2;
+ case 3: // OUCH3
+ return OUCH3;
default:
return null;
}
@@ -40023,6 +40043,8 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
tmpMap.put(_Fields.OUCH2, new org.apache.thrift.meta_data.FieldMetaData("ouch2", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+ tmpMap.put(_Fields.OUCH3, new org.apache.thrift.meta_data.FieldMetaData("ouch3", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(removeConstraint_result.class, metaDataMap);
}
@@ -40032,11 +40054,13 @@ import org.slf4j.LoggerFactory;
public removeConstraint_result(
AccumuloException ouch1,
- AccumuloSecurityException ouch2)
+ AccumuloSecurityException ouch2,
+ TableNotFoundException ouch3)
{
this();
this.ouch1 = ouch1;
this.ouch2 = ouch2;
+ this.ouch3 = ouch3;
}
/**
@@ -40049,6 +40073,9 @@ import org.slf4j.LoggerFactory;
if (other.isSetOuch2()) {
this.ouch2 = new AccumuloSecurityException(other.ouch2);
}
+ if (other.isSetOuch3()) {
+ this.ouch3 = new TableNotFoundException(other.ouch3);
+ }
}
public removeConstraint_result deepCopy() {
@@ -40059,6 +40086,7 @@ import org.slf4j.LoggerFactory;
public void clear() {
this.ouch1 = null;
this.ouch2 = null;
+ this.ouch3 = null;
}
public AccumuloException getOuch1() {
@@ -40109,6 +40137,30 @@ import org.slf4j.LoggerFactory;
}
}
+ public TableNotFoundException getOuch3() {
+ return this.ouch3;
+ }
+
+ public removeConstraint_result setOuch3(TableNotFoundException ouch3) {
+ this.ouch3 = ouch3;
+ return this;
+ }
+
+ public void unsetOuch3() {
+ this.ouch3 = null;
+ }
+
+ /** Returns true if field ouch3 is set (has been assigned a value) and false otherwise */
+ public boolean isSetOuch3() {
+ return this.ouch3 != null;
+ }
+
+ public void setOuch3IsSet(boolean value) {
+ if (!value) {
+ this.ouch3 = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case OUCH1:
@@ -40127,6 +40179,14 @@ import org.slf4j.LoggerFactory;
}
break;
+ case OUCH3:
+ if (value == null) {
+ unsetOuch3();
+ } else {
+ setOuch3((TableNotFoundException)value);
+ }
+ break;
+
}
}
@@ -40138,6 +40198,9 @@ import org.slf4j.LoggerFactory;
case OUCH2:
return getOuch2();
+ case OUCH3:
+ return getOuch3();
+
}
throw new IllegalStateException();
}
@@ -40153,6 +40216,8 @@ import org.slf4j.LoggerFactory;
return isSetOuch1();
case OUCH2:
return isSetOuch2();
+ case OUCH3:
+ return isSetOuch3();
}
throw new IllegalStateException();
}
@@ -40188,6 +40253,15 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_ouch3 = true && this.isSetOuch3();
+ boolean that_present_ouch3 = true && that.isSetOuch3();
+ if (this_present_ouch3 || that_present_ouch3) {
+ if (!(this_present_ouch3 && that_present_ouch3))
+ return false;
+ if (!this.ouch3.equals(that.ouch3))
+ return false;
+ }
+
return true;
}
@@ -40224,6 +40298,16 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetOuch3()).compareTo(typedOther.isSetOuch3());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetOuch3()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ouch3, typedOther.ouch3);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -40259,6 +40343,14 @@ import org.slf4j.LoggerFactory;
sb.append(this.ouch2);
}
first = false;
+ if (!first) sb.append(", ");
+ sb.append("ouch3:");
+ if (this.ouch3 == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.ouch3);
+ }
+ first = false;
sb.append(")");
return sb.toString();
}
@@ -40320,6 +40412,15 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 3: // OUCH3
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+ struct.ouch3 = new TableNotFoundException();
+ struct.ouch3.read(iprot);
+ struct.setOuch3IsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -40345,6 +40446,11 @@ import org.slf4j.LoggerFactory;
struct.ouch2.write(oprot);
oprot.writeFieldEnd();
}
+ if (struct.ouch3 != null) {
+ oprot.writeFieldBegin(OUCH3_FIELD_DESC);
+ struct.ouch3.write(oprot);
+ oprot.writeFieldEnd();
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -40369,19 +40475,25 @@ import org.slf4j.LoggerFactory;
if (struct.isSetOuch2()) {
optionals.set(1);
}
- oprot.writeBitSet(optionals, 2);
+ if (struct.isSetOuch3()) {
+ optionals.set(2);
+ }
+ oprot.writeBitSet(optionals, 3);
if (struct.isSetOuch1()) {
struct.ouch1.write(oprot);
}
if (struct.isSetOuch2()) {
struct.ouch2.write(oprot);
}
+ if (struct.isSetOuch3()) {
+ struct.ouch3.write(oprot);
+ }
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, removeConstraint_result struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
- BitSet incoming = iprot.readBitSet(2);
+ BitSet incoming = iprot.readBitSet(3);
if (incoming.get(0)) {
struct.ouch1 = new AccumuloException();
struct.ouch1.read(iprot);
@@ -40392,6 +40504,11 @@ import org.slf4j.LoggerFactory;
struct.ouch2.read(iprot);
struct.setOuch2IsSet(true);
}
+ if (incoming.get(2)) {
+ struct.ouch3 = new TableNotFoundException();
+ struct.ouch3.read(iprot);
+ struct.setOuch3IsSet(true);
+ }
}
}
@@ -42244,6 +42361,7 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField OUCH1_FIELD_DESC = new org.apache.thrift.protocol.TField("ouch1", org.apache.thrift.protocol.TType.STRUCT, (short)1);
private static final org.apache.thrift.protocol.TField OUCH2_FIELD_DESC = new org.apache.thrift.protocol.TField("ouch2", org.apache.thrift.protocol.TType.STRUCT, (short)2);
+ private static final org.apache.thrift.protocol.TField OUCH3_FIELD_DESC = new org.apache.thrift.protocol.TField("ouch3", org.apache.thrift.protocol.TType.STRUCT, (short)3);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -42253,11 +42371,13 @@ import org.slf4j.LoggerFactory;
public AccumuloException ouch1; // required
public AccumuloSecurityException ouch2; // required
+ public TableNotFoundException ouch3; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
@SuppressWarnings("all") public enum _Fields implements org.apache.thrift.TFieldIdEnum {
OUCH1((short)1, "ouch1"),
- OUCH2((short)2, "ouch2");
+ OUCH2((short)2, "ouch2"),
+ OUCH3((short)3, "ouch3");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -42276,6 +42396,8 @@ import org.slf4j.LoggerFactory;
return OUCH1;
case 2: // OUCH2
return OUCH2;
+ case 3: // OUCH3
+ return OUCH3;
default:
return null;
}
@@ -42323,6 +42445,8 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
tmpMap.put(_Fields.OUCH2, new org.apache.thrift.meta_data.FieldMetaData("ouch2", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+ tmpMap.put(_Fields.OUCH3, new org.apache.thrift.meta_data.FieldMetaData("ouch3", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(removeTableProperty_result.class, metaDataMap);
}
@@ -42332,11 +42456,13 @@ import org.slf4j.LoggerFactory;
public removeTableProperty_result(
AccumuloException ouch1,
- AccumuloSecurityException ouch2)
+ AccumuloSecurityException ouch2,
+ TableNotFoundException ouch3)
{
this();
this.ouch1 = ouch1;
this.ouch2 = ouch2;
+ this.ouch3 = ouch3;
}
/**
@@ -42349,6 +42475,9 @@ import org.slf4j.LoggerFactory;
if (other.isSetOuch2()) {
this.ouch2 = new AccumuloSecurityException(other.ouch2);
}
+ if (other.isSetOuch3()) {
+ this.ouch3 = new TableNotFoundException(other.ouch3);
+ }
}
public removeTableProperty_result deepCopy() {
@@ -42359,6 +42488,7 @@ import org.slf4j.LoggerFactory;
public void clear() {
this.ouch1 = null;
this.ouch2 = null;
+ this.ouch3 = null;
}
public AccumuloException getOuch1() {
@@ -42409,6 +42539,30 @@ import org.slf4j.LoggerFactory;
}
}
+ public TableNotFoundException getOuch3() {
+ return this.ouch3;
+ }
+
+ public removeTableProperty_result setOuch3(TableNotFoundException ouch3) {
+ this.ouch3 = ouch3;
+ return this;
+ }
+
+ public void unsetOuch3() {
+ this.ouch3 = null;
+ }
+
+ /** Returns true if field ouch3 is set (has been assigned a value) and false otherwise */
+ public boolean isSetOuch3() {
+ return this.ouch3 != null;
+ }
+
+ public void setOuch3IsSet(boolean value) {
+ if (!value) {
+ this.ouch3 = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case OUCH1:
@@ -42427,6 +42581,14 @@ import org.slf4j.LoggerFactory;
}
break;
+ case OUCH3:
+ if (value == null) {
+ unsetOuch3();
+ } else {
+ setOuch3((TableNotFoundException)value);
+ }
+ break;
+
}
}
@@ -42438,6 +42600,9 @@ import org.slf4j.LoggerFactory;
case OUCH2:
return getOuch2();
+ case OUCH3:
+ return getOuch3();
+
}
throw new IllegalStateException();
}
@@ -42453,6 +42618,8 @@ import org.slf4j.LoggerFactory;
return isSetOuch1();
case OUCH2:
return isSetOuch2();
+ case OUCH3:
+ return isSetOuch3();
}
throw new IllegalStateException();
}
@@ -42488,6 +42655,15 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_ouch3 = true && this.isSetOuch3();
+ boolean that_present_ouch3 = true && that.isSetOuch3();
+ if (this_present_ouch3 || that_present_ouch3) {
+ if (!(this_present_ouch3 && that_present_ouch3))
+ return false;
+ if (!this.ouch3.equals(that.ouch3))
+ return false;
+ }
+
return true;
}
@@ -42524,6 +42700,16 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetOuch3()).compareTo(typedOther.isSetOuch3());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetOuch3()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ouch3, typedOther.ouch3);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -42559,6 +42745,14 @@ import org.slf4j.LoggerFactory;
sb.append(this.ouch2);
}
first = false;
+ if (!first) sb.append(", ");
+ sb.append("ouch3:");
+ if (this.ouch3 == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.ouch3);
+ }
+ first = false;
sb.append(")");
return sb.toString();
}
@@ -42620,6 +42814,15 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 3: // OUCH3
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+ struct.ouch3 = new TableNotFoundException();
+ struct.ouch3.read(iprot);
+ struct.setOuch3IsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -42645,6 +42848,11 @@ import org.slf4j.LoggerFactory;
struct.ouch2.write(oprot);
oprot.writeFieldEnd();
}
+ if (struct.ouch3 != null) {
+ oprot.writeFieldBegin(OUCH3_FIELD_DESC);
+ struct.ouch3.write(oprot);
+ oprot.writeFieldEnd();
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -42669,19 +42877,25 @@ import org.slf4j.LoggerFactory;
if (struct.isSetOuch2()) {
optionals.set(1);
}
- oprot.writeBitSet(optionals, 2);
+ if (struct.isSetOuch3()) {
+ optionals.set(2);
+ }
+ oprot.writeBitSet(optionals, 3);
if (struct.isSetOuch1()) {
struct.ouch1.write(oprot);
}
if (struct.isSetOuch2()) {
struct.ouch2.write(oprot);
}
+ if (struct.isSetOuch3()) {
+ struct.ouch3.write(oprot);
+ }
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, removeTableProperty_result struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
- BitSet incoming = iprot.readBitSet(2);
+ BitSet incoming = iprot.readBitSet(3);
if (incoming.get(0)) {
struct.ouch1 = new AccumuloException();
struct.ouch1.read(iprot);
@@ -42692,6 +42906,11 @@ import org.slf4j.LoggerFactory;
struct.ouch2.read(iprot);
struct.setOuch2IsSet(true);
}
+ if (incoming.get(2)) {
+ struct.ouch3 = new TableNotFoundException();
+ struct.ouch3.read(iprot);
+ struct.setOuch3IsSet(true);
+ }
}
}
@@ -45818,6 +46037,7 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField OUCH1_FIELD_DESC = new org.apache.thrift.protocol.TField("ouch1", org.apache.thrift.protocol.TType.STRUCT, (short)1);
private static final org.apache.thrift.protocol.TField OUCH2_FIELD_DESC = new org.apache.thrift.protocol.TField("ouch2", org.apache.thrift.protocol.TType.STRUCT, (short)2);
+ private static final org.apache.thrift.protocol.TField OUCH3_FIELD_DESC = new org.apache.thrift.protocol.TField("ouch3", org.apache.thrift.protocol.TType.STRUCT, (short)3);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -45827,11 +46047,13 @@ import org.slf4j.LoggerFactory;
public AccumuloException ouch1; // required
public AccumuloSecurityException ouch2; // required
+ public TableNotFoundException ouch3; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
@SuppressWarnings("all") public enum _Fields implements org.apache.thrift.TFieldIdEnum {
OUCH1((short)1, "ouch1"),
- OUCH2((short)2, "ouch2");
+ OUCH2((short)2, "ouch2"),
+ OUCH3((short)3, "ouch3");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -45850,6 +46072,8 @@ import org.slf4j.LoggerFactory;
return OUCH1;
case 2: // OUCH2
return OUCH2;
+ case 3: // OUCH3
+ return OUCH3;
default:
return null;
}
@@ -45897,6 +46121,8 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
tmpMap.put(_Fields.OUCH2, new org.apache.thrift.meta_data.FieldMetaData("ouch2", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+ tmpMap.put(_Fields.OUCH3, new org.apache.thrift.meta_data.FieldMetaData("ouch3", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(setTableProperty_result.class, metaDataMap);
}
@@ -45906,11 +46132,13 @@ import org.slf4j.LoggerFactory;
public setTableProperty_result(
AccumuloException ouch1,
- AccumuloSecurityException ouch2)
+ AccumuloSecurityException ouch2,
+ TableNotFoundException ouch3)
{
this();
this.ouch1 = ouch1;
this.ouch2 = ouch2;
+ this.ouch3 = ouch3;
}
/**
@@ -45923,6 +46151,9 @@ import org.slf4j.LoggerFactory;
if (other.isSetOuch2()) {
this.ouch2 = new AccumuloSecurityException(other.ouch2);
}
+ if (other.isSetOuch3()) {
+ this.ouch3 = new TableNotFoundException(other.ouch3);
+ }
}
public setTableProperty_result deepCopy() {
@@ -45933,6 +46164,7 @@ import org.slf4j.LoggerFactory;
public void clear() {
this.ouch1 = null;
this.ouch2 = null;
+ this.ouch3 = null;
}
public AccumuloException getOuch1() {
@@ -45983,6 +46215,30 @@ import org.slf4j.LoggerFactory;
}
}
+ public TableNotFoundException getOuch3() {
+ return this.ouch3;
+ }
+
+ public setTableProperty_result setOuch3(TableNotFoundException ouch3) {
+ this.ouch3 = ouch3;
+ return this;
+ }
+
+ public void unsetOuch3() {
+ this.ouch3 = null;
+ }
+
+ /** Returns true if field ouch3 is set (has been assigned a value) and false otherwise */
+ public boolean isSetOuch3() {
+ return this.ouch3 != null;
+ }
+
+ public void setOuch3IsSet(boolean value) {
+ if (!value) {
+ this.ouch3 = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case OUCH1:
@@ -46001,6 +46257,14 @@ import org.slf4j.LoggerFactory;
}
break;
+ case OUCH3:
+ if (value == null) {
+ unsetOuch3();
+ } else {
+ setOuch3((TableNotFoundException)value);
+ }
+ break;
+
}
}
@@ -46012,6 +46276,9 @@ import org.slf4j.LoggerFactory;
case OUCH2:
return getOuch2();
+ case OUCH3:
+ return getOuch3();
+
}
throw new IllegalStateException();
}
@@ -46027,6 +46294,8 @@ import org.slf4j.LoggerFactory;
return isSetOuch1();
case OUCH2:
return isSetOuch2();
+ case OUCH3:
+ return isSetOuch3();
}
throw new IllegalStateException();
}
@@ -46062,6 +46331,15 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_ouch3 = true && this.isSetOuch3();
+ boolean that_present_ouch3 = true && that.isSetOuch3();
+ if (this_present_ouch3 || that_present_ouch3) {
+ if (!(this_present_ouch3 && that_present_ouch3))
+ return false;
+ if (!this.ouch3.equals(that.ouch3))
+ return false;
+ }
+
return true;
}
@@ -46098,6 +46376,16 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetOuch3()).compareTo(typedOther.isSetOuch3());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetOuch3()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ouch3, typedOther.ouch3);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -46133,6 +46421,14 @@ import org.slf4j.LoggerFactory;
sb.append(this.ouch2);
}
first = false;
+ if (!first) sb.append(", ");
+ sb.append("ouch3:");
+ if (this.ouch3 == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.ouch3);
+ }
+ first = false;
sb.append(")");
return sb.toString();
}
@@ -46194,6 +46490,15 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 3: // OUCH3
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+ struct.ouch3 = new TableNotFoundException();
+ struct.ouch3.read(iprot);
+ struct.setOuch3IsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -46219,6 +46524,11 @@ import org.slf4j.LoggerFactory;
struct.ouch2.write(oprot);
oprot.writeFieldEnd();
}
+ if (struct.ouch3 != null) {
+ oprot.writeFieldBegin(OUCH3_FIELD_DESC);
+ struct.ouch3.write(oprot);
+ oprot.writeFieldEnd();
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -46243,19 +46553,25 @@ import org.slf4j.LoggerFactory;
if (struct.isSetOuch2()) {
optionals.set(1);
}
- oprot.writeBitSet(optionals, 2);
+ if (struct.isSetOuch3()) {
+ optionals.set(2);
+ }
+ oprot.writeBitSet(optionals, 3);
if (struct.isSetOuch1()) {
struct.ouch1.write(oprot);
}
if (struct.isSetOuch2()) {
struct.ouch2.write(oprot);
}
+ if (struct.isSetOuch3()) {
+ struct.ouch3.write(oprot);
+ }
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, setTableProperty_result struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
- BitSet incoming = iprot.readBitSet(2);
+ BitSet incoming = iprot.readBitSet(3);
if (incoming.get(0)) {
struct.ouch1 = new AccumuloException();
struct.ouch1.read(iprot);
@@ -46266,6 +46582,11 @@ import org.slf4j.LoggerFactory;
struct.ouch2.read(iprot);
struct.setOuch2IsSet(true);
}
+ if (incoming.get(2)) {
+ struct.ouch3 = new TableNotFoundException();
+ struct.ouch3.read(iprot);
+ struct.setOuch3IsSet(true);
+ }
}
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1199_813109d7.diff |
bugs-dot-jar_data_ACCUMULO-3229_891584fb | ---
BugID: ACCUMULO-3229
Summary: Shell displays authTimeout poorly
Description: |-
The authTimeout in the shell is displayed badly when executing {{about -v}}.
Even though it is configured in integer minutes, it is converted to seconds for display as a floating point number with 2 decimals. This makes no sense, since the decimals will always be {{.00}}.
We can keep the units in seconds, I guess, but this needs to be displayed with {{%ds}} not {{%.2fs}}. This was broken in ACCUMULO-3224 by using TimeUnit to convert the number, instead of dividing by 1000.0 as we were doing manually before.
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java b/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
index bb3c06e..fa0f5d4 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
@@ -493,7 +493,7 @@ public class Shell extends ShellOptions {
if (disableAuthTimeout)
sb.append("- Authorization timeout: disabled\n");
else
- sb.append("- Authorization timeout: ").append(String.format("%.2fs%n", TimeUnit.NANOSECONDS.toSeconds(authTimeout)));
+ sb.append("- Authorization timeout: ").append(String.format("%ds%n", TimeUnit.NANOSECONDS.toSeconds(authTimeout)));
sb.append("- Debug: ").append(isDebuggingEnabled() ? "on" : "off").append("\n");
if (!scanIteratorOptions.isEmpty()) {
for (Entry<String,List<IteratorSetting>> entry : scanIteratorOptions.entrySet()) {
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3229_891584fb.diff |
bugs-dot-jar_data_ACCUMULO-2928_f99b5654 | ---
BugID: ACCUMULO-2928
Summary: Missing toString, hashCode and equals methods on BatchWriterConfig
Description: Tried to test equality of two BatchWriterConfig objects, found they're
missing all of the methods from Object that they should be implementing.
diff --git a/core/src/main/java/org/apache/accumulo/core/client/BatchWriterConfig.java b/core/src/main/java/org/apache/accumulo/core/client/BatchWriterConfig.java
index d3ad3fe..28955f5 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/BatchWriterConfig.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/BatchWriterConfig.java
@@ -24,6 +24,7 @@ import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
+import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.util.StringUtils;
@@ -33,19 +34,19 @@ import org.apache.hadoop.util.StringUtils;
* @since 1.5.0
*/
public class BatchWriterConfig implements Writable {
-
+
private static final Long DEFAULT_MAX_MEMORY = 50 * 1024 * 1024l;
private Long maxMemory = null;
-
+
private static final Long DEFAULT_MAX_LATENCY = 2 * 60 * 1000l;
private Long maxLatency = null;
-
+
private static final Long DEFAULT_TIMEOUT = Long.MAX_VALUE;
private Long timeout = null;
-
+
private static final Integer DEFAULT_MAX_WRITE_THREADS = 3;
private Integer maxWriteThreads = null;
-
+
/**
* Sets the maximum memory to batch before writing. The smaller this value, the more frequently the {@link BatchWriter} will write.<br />
* If set to a value smaller than a single mutation, then it will {@link BatchWriter#flush()} after each added mutation. Must be non-negative.
@@ -65,7 +66,7 @@ public class BatchWriterConfig implements Writable {
this.maxMemory = maxMemory;
return this;
}
-
+
/**
* Sets the maximum amount of time to hold the data in memory before flushing it to servers.<br />
* For no maximum, set to zero, or {@link Long#MAX_VALUE} with {@link TimeUnit#MILLISECONDS}.
@@ -89,7 +90,7 @@ public class BatchWriterConfig implements Writable {
public BatchWriterConfig setMaxLatency(long maxLatency, TimeUnit timeUnit) {
if (maxLatency < 0)
throw new IllegalArgumentException("Negative max latency not allowed " + maxLatency);
-
+
if (maxLatency == 0)
this.maxLatency = Long.MAX_VALUE;
else
@@ -97,7 +98,7 @@ public class BatchWriterConfig implements Writable {
this.maxLatency = Math.max(1, timeUnit.toMillis(maxLatency));
return this;
}
-
+
/**
* Sets the maximum amount of time an unresponsive server will be re-tried. When this timeout is exceeded, the {@link BatchWriter} should throw an exception.<br />
* For no timeout, set to zero, or {@link Long#MAX_VALUE} with {@link TimeUnit#MILLISECONDS}.
@@ -121,7 +122,7 @@ public class BatchWriterConfig implements Writable {
public BatchWriterConfig setTimeout(long timeout, TimeUnit timeUnit) {
if (timeout < 0)
throw new IllegalArgumentException("Negative timeout not allowed " + timeout);
-
+
if (timeout == 0)
this.timeout = Long.MAX_VALUE;
else
@@ -129,7 +130,7 @@ public class BatchWriterConfig implements Writable {
this.timeout = Math.max(1, timeUnit.toMillis(timeout));
return this;
}
-
+
/**
* Sets the maximum number of threads to use for writing data to the tablet servers.
*
@@ -145,27 +146,27 @@ public class BatchWriterConfig implements Writable {
public BatchWriterConfig setMaxWriteThreads(int maxWriteThreads) {
if (maxWriteThreads <= 0)
throw new IllegalArgumentException("Max threads must be positive " + maxWriteThreads);
-
+
this.maxWriteThreads = maxWriteThreads;
return this;
}
-
+
public long getMaxMemory() {
return maxMemory != null ? maxMemory : DEFAULT_MAX_MEMORY;
}
-
+
public long getMaxLatency(TimeUnit timeUnit) {
return timeUnit.convert(maxLatency != null ? maxLatency : DEFAULT_MAX_LATENCY, TimeUnit.MILLISECONDS);
}
-
+
public long getTimeout(TimeUnit timeUnit) {
return timeUnit.convert(timeout != null ? timeout : DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
}
-
+
public int getMaxWriteThreads() {
return maxWriteThreads != null ? maxWriteThreads : DEFAULT_MAX_WRITE_THREADS;
}
-
+
@Override
public void write(DataOutput out) throws IOException {
// write this out in a human-readable way
@@ -179,7 +180,7 @@ public class BatchWriterConfig implements Writable {
if (timeout != null)
addField(fields, "timeout", timeout);
String output = StringUtils.join(",", fields);
-
+
byte[] bytes = output.getBytes(Charset.forName("UTF-8"));
byte[] len = String.format("%6s#", Integer.toString(bytes.length, 36)).getBytes("UTF-8");
if (len.length != 7)
@@ -187,13 +188,13 @@ public class BatchWriterConfig implements Writable {
out.write(len);
out.write(bytes);
}
-
+
private void addField(List<String> fields, String name, Object value) {
String key = StringUtils.escapeString(name, '\\', new char[] {',', '='});
String val = StringUtils.escapeString(String.valueOf(value), '\\', new char[] {',', '='});
fields.add(key + '=' + val);
}
-
+
@Override
public void readFields(DataInput in) throws IOException {
byte[] len = new byte[7];
@@ -203,7 +204,7 @@ public class BatchWriterConfig implements Writable {
throw new IllegalStateException("length was not encoded correctly");
byte[] bytes = new byte[Integer.parseInt(strLen.substring(strLen.lastIndexOf(' ') + 1, strLen.length() - 1), 36)];
in.readFully(bytes);
-
+
String strFields = new String(bytes, Charset.forName("UTF-8"));
String[] fields = StringUtils.split(strFields, '\\', ',');
for (String field : fields) {
@@ -223,4 +224,70 @@ public class BatchWriterConfig implements Writable {
}
}
}
+
+ @Override
+ public boolean equals(Object o) {
+ if (o instanceof BatchWriterConfig) {
+ BatchWriterConfig other = (BatchWriterConfig) o;
+
+ if (null != maxMemory) {
+ if (!maxMemory.equals(other.maxMemory)) {
+ return false;
+ }
+ } else {
+ if (null != other.maxMemory) {
+ return false;
+ }
+ }
+
+ if (null != maxLatency) {
+ if (!maxLatency.equals(other.maxLatency)) {
+ return false;
+ }
+ } else {
+ if (null != other.maxLatency) {
+ return false;
+ }
+ }
+
+ if (null != maxWriteThreads) {
+ if (!maxWriteThreads.equals(other.maxWriteThreads)) {
+ return false;
+ }
+ } else {
+ if (null != other.maxWriteThreads) {
+ return false;
+ }
+ }
+
+ if (null != timeout) {
+ if (!timeout.equals(other.timeout)) {
+ return false;
+ }
+ } else {
+ if (null != other.timeout) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ return false;
+ }
+
+ @Override
+ public int hashCode() {
+ HashCodeBuilder hcb = new HashCodeBuilder();
+ hcb.append(maxMemory).append(maxLatency).append(maxWriteThreads).append(timeout);
+ return hcb.toHashCode();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder(32);
+ sb.append("[maxMemory=").append(getMaxMemory()).append(", maxLatency=").append(getMaxLatency(TimeUnit.MILLISECONDS)).append(", maxWriteThreads=")
+ .append(getMaxWriteThreads()).append(", timeout=").append(getTimeout(TimeUnit.MILLISECONDS)).append("]");
+ return sb.toString();
+ }
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-2928_f99b5654.diff |
bugs-dot-jar_data_ACCUMULO-1192_9476b877 | ---
BugID: ACCUMULO-1192
Summary: '"du" on a table without files does not report'
Description: |
{noformat}
shell> createtable t
shell> du t
shell>
{noformat}
expected:
{noformat}
shell> du t
0 t
shell>
{noformat}
diff --git a/core/src/main/java/org/apache/accumulo/core/util/TableDiskUsage.java b/core/src/main/java/org/apache/accumulo/core/util/TableDiskUsage.java
index 64d5970..6a61c50 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/TableDiskUsage.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/TableDiskUsage.java
@@ -144,12 +144,17 @@ public class TableDiskUsage {
tdu.addTable(tableId);
HashSet<String> tablesReferenced = new HashSet<String>(tableIds);
+ HashSet<String> emptyTableIds = new HashSet<String>();
for (String tableId : tableIds) {
Scanner mdScanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
mdScanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
mdScanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
+ if(!mdScanner.iterator().hasNext()) {
+ emptyTableIds.add(tableId);
+ }
+
for (Entry<Key,Value> entry : mdScanner) {
String file = entry.getKey().getColumnQualifier().toString();
if (file.startsWith("../")) {
@@ -215,6 +220,14 @@ public class TableDiskUsage {
usage.put(tableNames, entry.getValue());
}
+
+ if(!emptyTableIds.isEmpty()) {
+ TreeSet<String> emptyTables = new TreeSet<String>();
+ for (String tableId : emptyTableIds) {
+ emptyTables.add(reverseTableIdMap.get(tableId));
+ }
+ usage.put(emptyTables, 0L);
+ }
for (Entry<TreeSet<String>,Long> entry : usage.entrySet())
printer.print(String.format("%,24d %s", entry.getValue(), entry.getKey()));
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1192_9476b877.diff |
bugs-dot-jar_data_ACCUMULO-1312_d9ab8449 | ---
BugID: ACCUMULO-1312
Summary: Don't cache credentials in client-side Connector
Description: |
AuthenticationToken objects are Destroyable. However, this cannot be exercised properly in the client code, because the Connector immediately serializes the credentials and stores them as long as the Connector lives.
It should be possible to destroy a token after creating a Connector, and thereby forcing any further RPC calls initiated by that Connector to fail to authenticate. This means that serialization on the client side to a TCredentials object needs to occur just before the RPC call.
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java
index 14df55f..bd11569 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ConnectorImpl.java
@@ -55,6 +55,9 @@ public class ConnectorImpl extends Connector {
public ConnectorImpl(final Instance instance, Credentials cred) throws AccumuloException, AccumuloSecurityException {
ArgumentChecker.notNull(instance, cred);
+ if (cred.getToken().isDestroyed())
+ throw new AccumuloSecurityException(cred.getPrincipal(), SecurityErrorCode.TOKEN_EXPIRED);
+
this.instance = instance;
this.credentials = cred;
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockConnector.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockConnector.java
index 4af2ea5..80ec513 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockConnector.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockConnector.java
@@ -18,6 +18,7 @@ package org.apache.accumulo.core.client.mock;
import java.util.concurrent.TimeUnit;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.BatchDeleter;
import org.apache.accumulo.core.client.BatchScanner;
import org.apache.accumulo.core.client.BatchWriter;
@@ -32,7 +33,10 @@ import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.admin.InstanceOperations;
import org.apache.accumulo.core.client.admin.SecurityOperations;
import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode;
+import org.apache.accumulo.core.client.security.tokens.NullToken;
import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.Credentials;
public class MockConnector extends Connector {
@@ -40,12 +44,14 @@ public class MockConnector extends Connector {
private final MockAccumulo acu;
private final Instance instance;
- MockConnector(String username, MockInstance instance) {
- this(username, new MockAccumulo(MockInstance.getDefaultFileSystem()), instance);
+ MockConnector(String username, MockInstance instance) throws AccumuloSecurityException {
+ this(new Credentials(username, new NullToken()), new MockAccumulo(MockInstance.getDefaultFileSystem()), instance);
}
- MockConnector(String username, MockAccumulo acu, MockInstance instance) {
- this.username = username;
+ MockConnector(Credentials credentials, MockAccumulo acu, MockInstance instance) throws AccumuloSecurityException {
+ if (credentials.getToken().isDestroyed())
+ throw new AccumuloSecurityException(credentials.getPrincipal(), SecurityErrorCode.TOKEN_EXPIRED);
+ this.username = credentials.getPrincipal();
this.acu = acu;
this.instance = instance;
}
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstance.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstance.java
index f37994d..2ba8c67 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstance.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockInstance.java
@@ -31,6 +31,7 @@ import org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
import org.apache.accumulo.core.client.security.tokens.PasswordToken;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.security.Credentials;
import org.apache.accumulo.core.util.ByteBufferUtil;
import org.apache.accumulo.core.util.CachedConfiguration;
import org.apache.accumulo.core.util.TextUtil;
@@ -150,7 +151,7 @@ public class MockInstance implements Instance {
@Override
public Connector getConnector(String principal, AuthenticationToken token) throws AccumuloException, AccumuloSecurityException {
- Connector conn = new MockConnector(principal, acu, this);
+ Connector conn = new MockConnector(new Credentials(principal, token), acu, this);
if (!acu.users.containsKey(principal))
conn.securityOperations().createLocalUser(principal, (PasswordToken) token);
else if (!acu.users.get(principal).token.equals(token))
diff --git a/core/src/main/java/org/apache/accumulo/core/client/security/tokens/PasswordToken.java b/core/src/main/java/org/apache/accumulo/core/client/security/tokens/PasswordToken.java
index c39fb8d..11bbf49 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/security/tokens/PasswordToken.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/security/tokens/PasswordToken.java
@@ -46,7 +46,9 @@ public class PasswordToken implements AuthenticationToken {
/**
* Constructor for use with {@link Writable}. Call {@link #readFields(DataInput)}.
*/
- public PasswordToken() {}
+ public PasswordToken() {
+ password = new byte[0];
+ }
/**
* Constructs a token from a copy of the password. Destroying the argument after construction will not destroy the copy in this token, and destroying this
diff --git a/core/src/main/java/org/apache/accumulo/core/security/Credentials.java b/core/src/main/java/org/apache/accumulo/core/security/Credentials.java
index 71a09f6..0552e7e 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/Credentials.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/Credentials.java
@@ -22,6 +22,7 @@ import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.Connector;
import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.AuthenticationTokenSerializer;
import org.apache.accumulo.core.security.thrift.TCredentials;
@@ -58,7 +59,11 @@ public class Credentials {
* {@link AuthenticationToken}, so this should be used just before placing on the wire, and references to it should be tightly controlled.
*/
public TCredentials toThrift(Instance instance) {
- return new TCredentials(principal, token.getClass().getName(), ByteBuffer.wrap(AuthenticationTokenSerializer.serialize(token)), instance.getInstanceID());
+ TCredentials tCreds = new TCredentials(getPrincipal(), getToken().getClass().getName(),
+ ByteBuffer.wrap(AuthenticationTokenSerializer.serialize(getToken())), instance.getInstanceID());
+ if (getToken().isDestroyed())
+ throw new RuntimeException("Token has been destroyed", new AccumuloSecurityException(getPrincipal(), SecurityErrorCode.TOKEN_EXPIRED));
+ return tCreds;
}
/**
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1312_d9ab8449.diff |
bugs-dot-jar_data_ACCUMULO-1732_941e3cb1 | ---
BugID: ACCUMULO-1732
Summary: Resolve table name to table id once in Accumulo input format
Description: "AccumuloInputFormat (and I suspect AccumuloOutputFormat) sends the table
name to each mapper. The mapper uses this table name to create a scanner. In the
case of the following events a map reduce job could read from two different table
ids. \n\n # start M/R job reading table A\n # rename table A (tableId=1) to table
C\n # rename table B (tableId=2) to table A\n\nIf the input format passed table
id 1 to the mappers, then the renames would not cause a problem."
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java
index c89c5d7..eaf99cb 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java
@@ -371,6 +371,8 @@ public abstract class AbstractInputFormat<K,V> implements InputFormat<K,V> {
log.debug("Authorizations are: " + authorizations);
if (tableConfig.isOfflineScan()) {
scanner = new OfflineScanner(instance, new Credentials(principal, token), split.getTableId(), authorizations);
+ } else if (instance instanceof MockInstance) {
+ scanner = instance.getConnector(principal, token).createScanner(split.getTableName(), authorizations);
} else {
scanner = new ScannerImpl(instance, new Credentials(principal, token), split.getTableId(), authorizations);
}
@@ -382,7 +384,7 @@ public abstract class AbstractInputFormat<K,V> implements InputFormat<K,V> {
log.info("Using local iterators");
scanner = new ClientSideIteratorScanner(scanner);
}
- setupIterators(job, scanner, split.getTableId());
+ setupIterators(job, scanner, split.getTableName());
} catch (Exception e) {
throw new IOException(e);
}
@@ -460,7 +462,11 @@ public abstract class AbstractInputFormat<K,V> implements InputFormat<K,V> {
TabletLocator tl;
try {
// resolve table name to id once, and use id from this point forward
- tableId = Tables.getTableId(getInstance(job), tableName);
+ Instance instance = getInstance(job);
+ if (instance instanceof MockInstance)
+ tableId = "";
+ else
+ tableId = Tables.getTableId(instance, tableName);
if (tableConfig.isOfflineScan()) {
binnedRanges = binOfflineTable(job, tableId, ranges);
while (binnedRanges == null) {
@@ -469,7 +475,6 @@ public abstract class AbstractInputFormat<K,V> implements InputFormat<K,V> {
binnedRanges = binOfflineTable(job, tableId, ranges);
}
} else {
- Instance instance = getInstance(job);
tl = getTabletLocator(job, tableId);
// its possible that the cache could contain complete, but old information about a tables tablets... so clear it
tl.invalidateCache();
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
index 74f8f8b..d426caf 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
@@ -386,6 +386,8 @@ public abstract class AbstractInputFormat<K,V> extends InputFormat<K,V> {
log.debug("Authorizations are: " + authorizations);
if (tableConfig.isOfflineScan()) {
scanner = new OfflineScanner(instance, new Credentials(principal, token), split.getTableId(), authorizations);
+ } else if (instance instanceof MockInstance) {
+ scanner = instance.getConnector(principal, token).createScanner(split.getTableName(), authorizations);
} else {
scanner = new ScannerImpl(instance, new Credentials(principal, token), split.getTableId(), authorizations);
}
@@ -397,7 +399,7 @@ public abstract class AbstractInputFormat<K,V> extends InputFormat<K,V> {
log.info("Using local iterators");
scanner = new ClientSideIteratorScanner(scanner);
}
- setupIterators(attempt, scanner, split.getTableId());
+ setupIterators(attempt, scanner, split.getTableName());
} catch (Exception e) {
throw new IOException(e);
}
@@ -488,7 +490,11 @@ public abstract class AbstractInputFormat<K,V> extends InputFormat<K,V> {
TabletLocator tl;
try {
// resolve table name to id once, and use id from this point forward
- tableId = Tables.getTableId(getInstance(context), tableName);
+ Instance instance = getInstance(context);
+ if (instance instanceof MockInstance)
+ tableId = "";
+ else
+ tableId = Tables.getTableId(instance, tableName);
if (tableConfig.isOfflineScan()) {
binnedRanges = binOfflineTable(context, tableId, ranges);
while (binnedRanges == null) {
@@ -498,7 +504,6 @@ public abstract class AbstractInputFormat<K,V> extends InputFormat<K,V> {
}
} else {
- Instance instance = getInstance(context);
tl = getTabletLocator(context, tableId);
// its possible that the cache could contain complete, but old information about a tables tablets... so clear it
tl.invalidateCache();
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1732_941e3cb1.diff |
bugs-dot-jar_data_ACCUMULO-218_15476a0d | ---
BugID: ACCUMULO-218
Summary: Mock Accumulo Inverts order of mutations w/ same timestamp
Description: Mock accumulo has different behavior than real accumulo when the same
key is updated in the same millisecond. The hidden in memory map counter in mock
accumulo needs to sort descending.
diff --git a/src/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java b/src/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
index ae16709..2fe637a 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
@@ -69,9 +69,9 @@ public class MockTable {
if (o instanceof MockMemKey) {
MockMemKey other = (MockMemKey) o;
if (count < other.count)
- return -1;
- if (count > other.count)
return 1;
+ if (count > other.count)
+ return -1;
} else {
return 1;
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-218_15476a0d.diff |
bugs-dot-jar_data_ACCUMULO-334_9d8cc45d | ---
BugID: ACCUMULO-334
Summary: Bulk random walk test failed
Description: "The bulk random walk test failed while running on a 10 node cluster
w/ the following error message.\n\n{noformat}\n18 23:36:05,167 [bulk.Setup] INFO
: Starting bulk test on 459a04a0\n\n\n19 00:24:33,950 [randomwalk.Framework] ERROR:
Error during random walk\njava.lang.Exception: Error running node Bulk.xml\n at
org.apache.accumulo.server.test.randomwalk.Module.visit(Module.java:253)\n at
org.apache.accumulo.server.test.randomwalk.Framework.run(Framework.java:61)\n at
org.apache.accumulo.server.test.randomwalk.Framework.main(Framework.java:114)\n
\ at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)\n
\ at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)\n
\ at java.lang.reflect.Method.invoke(Method.java:597)\n at org.apache.accumulo.start.Main$1.run(Main.java:89)\n
\ at java.lang.Thread.run(Thread.java:662)\nCaused by: java.lang.Exception:
Error running node bulk.Verify\n at org.apache.accumulo.server.test.randomwalk.Module.visit(Module.java:253)\n
\ at org.apache.accumulo.server.test.randomwalk.Module.visit(Module.java:249)\n
\ ... 8 more\nCaused by: java.lang.Exception: Bad key at r0d646 cf:000 []
1326932285943 false -1\n at org.apache.accumulo.server.test.randomwalk.bulk.Verify.visit(Verify.java:51)\n
\ at org.apache.accumulo.server.test.randomwalk.Module.visit(Module.java:249)\n
\ ... 9 more\n{noformat}\n\nLooking at the table the rows [r0d646, r0edd9]
and [r0f056, r10467] all had -1 values. There was a tablet that overlapped the
first range of -1 rows exactly 268;r0edd9;r0d645. This tablet had only the following
activity on a tablet server and was then merged out of existence. The merge operation
was 268;r10eff;r093b1.\n\n{noformat}\n19 00:05:10,966 [tabletserver.Tablet] DEBUG:
Files for low split 268;r0edd9;r0d645 [/b-0001azp/I0001azt.rf, /b-0001azp/I0001azu.rf,
/t-0001ale/A0001an3.rf]\n19 00:05:10,974 [tabletserver.Tablet] TABLET_HIST: 268;r0f055;r0d645
split 268;r0edd9;r0d645 268;r0f055;r0edd9\n19 00:05:10,975 [tabletserver.Tablet]
TABLET_HIST: 268;r0edd9;r0d645 opened \n19 00:05:15,029 [tabletserver.Tablet] TABLET_HIST:
268;r0edd9;r0d645 import /b-0001azi/I0001azm.rf 17138 0\n19 00:05:15,103 [tabletserver.Tablet]
DEBUG: Starting MajC 268;r0edd9;r0d645 [/b-0001azi/I0001azm.rf, /b-0001azp/I0001azt.rf,
/b-0001azp/I0001azu.rf, /t-0001ale/A0001an3.rf] --> /t-0001apj/A0001bri.rf_tmp\n19
00:05:15,339 [tabletserver.Tablet] TABLET_HIST: 268;r0edd9;r0d645 import /b-0001azx/I0001azy.rf
16620 0\n19 00:05:15,651 [tabletserver.Compactor] DEBUG: Compaction 268;r0edd9;r0d645
181,080 read | 60,360 written | 553,761 entries/sec | 0.327 secs\n19 00:05:15,661
[tabletserver.Tablet] TABLET_HIST: 268;r0edd9;r0d645 MajC [/b-0001azi/I0001azm.rf,
/b-0001azp/I0001azt.rf, /b-0001azp/I0001azu.rf, /t-0001ale/A0001an3.rf] --> /t-0001apj/A0001bri.rf\n19
00:05:30,672 [tabletserver.Tablet] DEBUG: Starting MajC 268;r0edd9;r0d645 [/b-0001azx/I0001azy.rf]
--> /t-0001apj/C0001brn.rf_tmp\n19 00:05:30,810 [tabletserver.Compactor] DEBUG:
Compaction 268;r0edd9;r0d645 60,360 read | 60,360 written | 534,159 entries/sec
| 0.113 secs\n19 00:05:30,824 [tabletserver.Tablet] TABLET_HIST: 268;r0edd9;r0d645
MajC [/b-0001azx/I0001azy.rf] --> /t-0001apj/C0001brn.rf\n19 00:05:30,943 [tabletserver.Tablet]
DEBUG: initiateClose(saveState=true queueMinC=false disableWrites=false) 268;r0edd9;r0d645\n19
00:05:30,943 [tabletserver.Tablet] DEBUG: completeClose(saveState=true completeClose=true)
268;r0edd9;r0d645\n19 00:05:30,947 [tabletserver.Tablet] TABLET_HIST: 268;r0edd9;r0d645
closed\n19 00:05:30,947 [tabletserver.TabletServer] DEBUG: Unassigning 268;r0edd9;r0d645@(null,xxx.xxx.xxx.xxx:9997[134d7425fc59413],null)\n19
00:05:30,949 [tabletserver.TabletServer] INFO : unloaded 268;r0edd9;r0d645\n19 00:05:30,949
[tabletserver.TabletServer] INFO : unloaded 268;r0edd9;r0d645\n\n{noformat}\n\n\nFor
the second range of -1 values [r0f056, r10467], r0f056 corresponds to the split
point r0f055. Howerver, there is no split point corresponding to r10467. All of
the tablets w/ a split of r0f055 lived on one tablet server. \n\n{noformat}\n19
00:02:21,262 [tabletserver.Tablet] TABLET_HIST: 268<;r0d645 split 268;r0f055;r0d645
268<;r0f055\n19 00:02:21,263 [tabletserver.Tablet] TABLET_HIST: 268;r0f055;r0d645
opened \n19 00:02:21,264 [tabletserver.Tablet] TABLET_HIST: 268<;r0f055 opened \n19
00:02:44,504 [tabletserver.Tablet] TABLET_HIST: 268<;r0f055 split 268;r11da6;r0f055
268<;r11da6\n19 00:02:44,505 [tabletserver.Tablet] TABLET_HIST: 268;r11da6;r0f055
opened \n19 00:05:10,974 [tabletserver.Tablet] TABLET_HIST: 268;r0f055;r0d645 split
268;r0edd9;r0d645 268;r0f055;r0edd9\n19 00:05:10,975 [tabletserver.Tablet] TABLET_HIST:
268;r0f055;r0edd9 opened \n19 00:05:15,023 [tabletserver.Tablet] TABLET_HIST: 268;r11da6;r0f055
split 268;r0f622;r0f055 268;r11da6;r0f622\n19 00:05:15,024 [tabletserver.Tablet]
TABLET_HIST: 268;r0f622;r0f055 opened \n{noformat}\n\nAll of the tablets mentioned
so far were all merged away in the same merge operation, making this operation a
possible place were data loss occurred. However, I can not pinpoint the issue at
this point in time. Below is a little info about the merge from the master logs
showing which tablets were involved in the merge.\n\n{noformat}\n19 00:05:30,616
[master.EventCoordinator] INFO : Merge state of 268;r10eff;r093b1 set to WAITING_FOR_CHOPPED\n19
00:05:30,677 [master.Master] INFO : Asking xxx.xxx.xxx.xxx:9997[134d7425fc5940c]
to chop 268;r09927;r0903a\n19 00:05:30,678 [master.Master] INFO : Asking xxx.xxx.xxx.xxx:9997[134d7425fc5940c]
to chop 268;r0ca9e;r09927\n19 00:05:30,678 [master.Master] INFO : Asking xxx.xxx.xxx.xxx:9997[134d7425fc5940a]
to chop 268;r0d2b5;r0ca9e\n19 00:05:30,678 [master.Master] INFO : Asking xxx.xxx.xxx.xxx:9997[134d7425fc59412]
to chop 268;r0d645;r0d2b5\n19 00:05:30,678 [master.Master] INFO : Asking xxx.xxx.xxx.xxx:9997[134d7425fc59413]
to chop 268;r0edd9;r0d645\n19 00:05:30,678 [master.Master] INFO : Asking xxx.xxx.xxx.xxx:9997[134d7425fc59413]
to chop 268;r0f055;r0edd9\n19 00:05:30,678 [master.Master] INFO : Asking xxx.xxx.xxx.xxx:9997[134d7425fc59413]
to chop 268;r0f622;r0f055\n19 00:05:30,678 [master.Master] INFO : Asking xxx.xxx.xxx.xxx:9997[134d7425fc59413]
to chop 268;r0f68b;r0f622\n19 00:05:30,678 [master.Master] INFO : Asking xxx.xxx.xxx.xxx:9997[134d7425fc59413]
to chop 268;r10c14;r0f68b\n19 00:05:30,678 [master.Master] INFO : Asking xxx.xxx.xxx.xxx:9997[134d7425fc59413]
to chop 268;r110f7;r10c14\n{noformat}\n\nWhen this test verifies its data and detects
data loss, there is no easy way to determine at what time the data loss occurred.
\ It might be useful to modify the data in the bulk test such that it is easier
to determine the time when data was lost. For example the continuous ingest test
creates linked list and it is possible to determine tight time bounds when a node
was ingested. However that may change the nature of this test and the bugs that
it might find."
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java
index 5281f4d..8532e56 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java
@@ -228,6 +228,20 @@ public abstract class Combiner extends WrappingIterator implements OptionDescrib
}
@Override
+ public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
+ Combiner newInstance;
+ try {
+ newInstance = this.getClass().newInstance();
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ newInstance.setSource(getSource().deepCopy(env));
+ newInstance.combiners = combiners;
+ newInstance.combineAllColumns = combineAllColumns;
+ return newInstance;
+ }
+
+ @Override
public IteratorOptions describeOptions() {
IteratorOptions io = new IteratorOptions("comb", "Combiners apply reduce functions to values with identical keys", null, null);
io.addNamedOption(ALL_OPTION, "set to true to apply Combiner to every column, otherwise leave blank. if true, " + COLUMNS_OPTION
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/TypedValueCombiner.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/TypedValueCombiner.java
index 628c9dd..7b8d636 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/TypedValueCombiner.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/TypedValueCombiner.java
@@ -145,6 +145,14 @@ public abstract class TypedValueCombiner<V> extends Combiner {
}
}
+ @SuppressWarnings("unchecked")
+ @Override
+ public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
+ TypedValueCombiner<V> newInstance = (TypedValueCombiner<V>) super.deepCopy(env);
+ newInstance.setEncoder(encoder);
+ return newInstance;
+ }
+
@Override
public Value reduce(Key key, Iterator<Value> iter) {
return new Value(encoder.encode(typedReduce(key, new VIterator<V>(iter, encoder))));
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/VersioningIterator.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/VersioningIterator.java
index e38c68e..9d92db3 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/VersioningIterator.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/VersioningIterator.java
@@ -29,6 +29,8 @@ public class VersioningIterator extends org.apache.accumulo.core.iterators.user.
public VersioningIterator() {}
public VersioningIterator(SortedKeyValueIterator<Key,Value> iterator, int maxVersions) {
- super(iterator, maxVersions);
+ super();
+ this.setSource(iterator);
+ this.maxVersions = maxVersions;
}
}
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/AgeOffFilter.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/AgeOffFilter.java
index 9feffd8..486e6cb 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/AgeOffFilter.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/AgeOffFilter.java
@@ -37,26 +37,6 @@ public class AgeOffFilter extends Filter {
private long threshold;
private long currentTime;
- public AgeOffFilter() {}
-
- /**
- * Constructs a filter that omits entries read from a source iterator if the Key's timestamp is less than currentTime - threshold.
- *
- * @param iterator
- * The source iterator.
- *
- * @param threshold
- * Maximum age in milliseconds of data to keep.
- *
- * @param threshold
- * Current time in milliseconds.
- */
- private AgeOffFilter(SortedKeyValueIterator<Key,Value> iterator, long threshold, long currentTime) {
- setSource(iterator);
- this.threshold = threshold;
- this.currentTime = currentTime;
- }
-
/**
* Accepts entries whose timestamps are less than currentTime - threshold.
*
@@ -93,7 +73,10 @@ public class AgeOffFilter extends Filter {
@Override
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
- return new AgeOffFilter(getSource(), threshold, currentTime);
+ AgeOffFilter copy = (AgeOffFilter) super.deepCopy(env);
+ copy.currentTime = currentTime;
+ copy.threshold = threshold;
+ return copy;
}
@Override
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/ColumnAgeOffFilter.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/ColumnAgeOffFilter.java
index 4bffbcb..8c95728 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/ColumnAgeOffFilter.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/ColumnAgeOffFilter.java
@@ -36,15 +36,6 @@ import org.apache.hadoop.io.Text;
* Different thresholds are set for each column.
*/
public class ColumnAgeOffFilter extends Filter {
-
- public ColumnAgeOffFilter() {}
-
- private ColumnAgeOffFilter(SortedKeyValueIterator<Key,Value> iterator, TTLSet ttls, long currentTime) {
- setSource(iterator);
- this.ttls = ttls;
- this.currentTime = currentTime;
- }
-
public static class TTLSet extends ColumnToClassMapping<Long> {
public TTLSet(Map<String,String> objectStrings) {
super();
@@ -87,7 +78,10 @@ public class ColumnAgeOffFilter extends Filter {
@Override
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
- return new ColumnAgeOffFilter(getSource(), ttls, currentTime);
+ ColumnAgeOffFilter copy = (ColumnAgeOffFilter) super.deepCopy(env);
+ copy.currentTime = currentTime;
+ copy.ttls = ttls;
+ return copy;
}
public void overrideCurrentTime(long ts) {
@@ -123,7 +117,7 @@ public class ColumnAgeOffFilter extends Filter {
public static void addTTL(IteratorSetting is, IteratorSetting.Column column, Long ttl) {
is.addOption(ColumnSet.encodeColumns(column.getFirst(), column.getSecond()), Long.toString(ttl));
}
-
+
/**
* A convenience method for removing an age off threshold for a column.
*
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
index fb53801..e508631 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
@@ -37,8 +37,7 @@ public class RegExFilter extends Filter {
@Override
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
- RegExFilter result = new RegExFilter();
- result.setSource(getSource().deepCopy(env));
+ RegExFilter result = (RegExFilter) super.deepCopy(env);
result.rowMatcher = copyMatcher(rowMatcher);
result.colfMatcher = copyMatcher(colfMatcher);
result.colqMatcher = copyMatcher(colqMatcher);
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/TimestampFilter.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/TimestampFilter.java
index 5b652aa..2dbfe66 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/TimestampFilter.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/TimestampFilter.java
@@ -54,17 +54,6 @@ public class TimestampFilter extends Filter {
public TimestampFilter() {}
- private TimestampFilter(SortedKeyValueIterator<Key,Value> iterator, boolean hasStart, long start, boolean startInclusive, boolean hasEnd, long end,
- boolean endInclusive) {
- setSource(iterator);
- this.start = start;
- this.startInclusive = startInclusive;
- this.hasStart = true;
- this.end = end;
- this.endInclusive = endInclusive;
- this.hasEnd = true;
- }
-
@Override
public boolean accept(Key k, Value v) {
long ts = k.getTimestamp();
@@ -112,7 +101,14 @@ public class TimestampFilter extends Filter {
@Override
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
- return new TimestampFilter(getSource(), hasStart, start, startInclusive, hasEnd, end, endInclusive);
+ TimestampFilter copy = (TimestampFilter) super.deepCopy(env);
+ copy.hasStart = hasStart;
+ copy.start = start;
+ copy.startInclusive = startInclusive;
+ copy.hasEnd = hasEnd;
+ copy.end = end;
+ copy.endInclusive = endInclusive;
+ return copy;
}
@Override
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/VersioningIterator.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/VersioningIterator.java
index 0b2c767..53833f4 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/VersioningIterator.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/VersioningIterator.java
@@ -37,25 +37,14 @@ public class VersioningIterator extends WrappingIterator implements OptionDescri
private Key currentKey = new Key();
private int numVersions;
- private int maxVersions;
+ protected int maxVersions;
@Override
public VersioningIterator deepCopy(IteratorEnvironment env) {
- return new VersioningIterator(this, env);
- }
-
- private VersioningIterator(VersioningIterator other, IteratorEnvironment env) {
- setSource(other.getSource().deepCopy(env));
- maxVersions = other.maxVersions;
- }
-
- public VersioningIterator() {}
-
- public VersioningIterator(SortedKeyValueIterator<Key,Value> iterator, int maxVersions) {
- if (maxVersions < 1)
- throw new IllegalArgumentException("maxVersions for versioning iterator must be >= 1");
- this.setSource(iterator);
- this.maxVersions = maxVersions;
+ VersioningIterator copy = new VersioningIterator();
+ copy.setSource(getSource().deepCopy(env));
+ copy.maxVersions = maxVersions;
+ return copy;
}
@Override
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-334_9d8cc45d.diff |
bugs-dot-jar_data_ACCUMULO-2713_6138a80f | ---
BugID: ACCUMULO-2713
Summary: Instance secret written out with other configuration items to RFiles and
WALogs when encryption is turned on
Description: |-
The encryption at rest feature records configuration information in order to encrypted RFiles and WALogs so that if the configuration changes, the files can be read back. The code that does this recording hovers up all the "instance.*" entries, and does not pick out the instance.secret as a special one not to write. Thus the instance secret goes into each file in the clear, which is non-ideal to say the least.
Patch forthcoming.
diff --git a/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleFactory.java b/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleFactory.java
index 649bfc8..4d04125 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleFactory.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleFactory.java
@@ -258,6 +258,7 @@ public class CryptoModuleFactory {
// Get all the options from the configuration
Map<String,String> cryptoOpts = conf.getAllPropertiesWithPrefix(Property.CRYPTO_PREFIX);
cryptoOpts.putAll(conf.getAllPropertiesWithPrefix(Property.INSTANCE_PREFIX));
+ cryptoOpts.remove(Property.INSTANCE_SECRET.getKey());
cryptoOpts.put(Property.CRYPTO_BLOCK_STREAM_SIZE.getKey(), Integer.toString((int) conf.getMemoryInBytes(Property.CRYPTO_BLOCK_STREAM_SIZE)));
return fillParamsObjectFromStringMap(params, cryptoOpts);
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-2713_6138a80f.diff |
bugs-dot-jar_data_ACCUMULO-151_b007b22e | ---
BugID: ACCUMULO-151
Summary: Combiner default behavior is dangerous
Description: "Currently if the users does not give the combiner any columns to work
against, it will work against all columns. This is dangerous, if a user accidentally
forgets to specify columns then their data could be unintentionally corrupted. Something
different needs to be done. \n\nAlso classes that extend combiner should call super.validateOptions(). "
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java
index 4da51d3..173b00d 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java
@@ -152,15 +152,15 @@ public abstract class Combiner extends WrappingIterator implements OptionDescrib
private Key workKey = new Key();
/*
- * Sets the topKey and topValue based on the top key of the source. If the column of the source top key is in the set of combiners, or if there are no columns
- * in the set of combiners, topKey will be the top key of the source and topValue will be the result of the reduce method. Otherwise, topKey and topValue will
- * be null.
+ * Sets the topKey and topValue based on the top key of the source. If the column of the source top key is in the set of combiners, topKey will be the top key
+ * of the source and topValue will be the result of the reduce method. Otherwise, topKey and topValue will be unchanged. (They are always set to null before
+ * this method is called.)
*/
private void findTop() throws IOException {
// check if aggregation is needed
if (super.hasTop()) {
workKey.set(super.getTopKey());
- if (combiners.isEmpty() || combiners.contains(workKey)) {
+ if (combiners.contains(workKey)) {
if (workKey.isDeleted())
return;
topKey = workKey;
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-151_b007b22e.diff |
bugs-dot-jar_data_ACCUMULO-4138_50db442b | ---
BugID: ACCUMULO-4138
Summary: CompactCommand description is incorrect
Description: "The compact command has the following description \n{code}\nroot@accumulo>
compact -?\nusage: compact [<table>{ <table>}] [-?] [-b <begin-row>] [--cancel]
[-e <end-row>] [-nf] [-ns <namespace> | -p <pattern> | -t <tableName>] [-pn <profile>]
\ [-w]\ndescription: sets all tablets for a table to major compact as soon as possible
(based on current time)\n -?,--help display this help\n -b,--begin-row
<begin-row> begin row (inclusive)\n --cancel cancel
user initiated compactions\n -e,--end-row <end-row> end row (inclusive)\n
\ -nf,--noFlush do not flush table data in memory before compacting.\n
\ -ns,--namespace <namespace> name of a namespace to operate on\n -p,--pattern
<pattern> regex pattern of table names to operate on\n -pn,--profile <profile>
\ iterator profile name\n -t,--table <tableName> name of a table
to operate on\n -w,--wait wait for compact to finish\n{code}\n\nHowever,
the --begin-row is not inclusive. Here is a simple demonstration.\n{code}\ncreatetable
compacttest\naddsplits a b c\ninsert \"a\" \"1\" \"\" \"\"\ninsert \"a\" \"2\" \"\"
\"\"\ninsert \"b\" \"3\" \"\" \"\"\ninsert \"b\" \"4\" \"\" \"\"\ninsert \"c\" \"5\"
\"\" \"\"\ninsert \"c\" \"6\" \"\" \"\"\nflush -w\nscan -t accumulo.metadata -np\ncompact
-b a -e c -t compacttest -w\nscan -t accumulo.metadata -np\ndeletetable compacttest
-f\n{code}\n\nYou will see that file associated with the 'a' split is still a F
flush file, which the files in the 'b' and 'c' split are A files.\n\nNot sure if
the fix is to update the commands description, which would be easy, or to make the
begin row actually inclusive."
diff --git a/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperations.java b/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperations.java
index 536d6e6..bcad3a3 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperations.java
@@ -260,7 +260,7 @@ public interface TableOperations {
* @param start
* first tablet to be compacted contains the row after this row, null means the first tablet in table
* @param end
- * last tablet to be merged contains this row, null means the last tablet in table
+ * last tablet to be compacted contains this row, null means the last tablet in table
* @param flush
* when true, table memory is flushed before compaction starts
* @param wait
@@ -276,7 +276,7 @@ public interface TableOperations {
* @param start
* first tablet to be compacted contains the row after this row, null means the first tablet in table
* @param end
- * last tablet to be merged contains this row, null means the last tablet in table
+ * last tablet to be compacted contains this row, null means the last tablet in table
* @param iterators
* A set of iterators that will be applied to each tablet compacted
* @param flush
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/DeleteRowsCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/DeleteRowsCommand.java
index 64968f0..6ffa3f4 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/DeleteRowsCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/DeleteRowsCommand.java
@@ -54,9 +54,7 @@ public class DeleteRowsCommand extends Command {
public Options getOptions() {
final Options o = new Options();
forceOpt = new Option("f", "force", false, "delete data even if start or end are not specified");
- startRowOptExclusive = new Option(OptUtil.START_ROW_OPT, "begin-row", true, "begin row (exclusive)");
- startRowOptExclusive.setArgName("begin-row");
- o.addOption(startRowOptExclusive);
+ o.addOption(OptUtil.startRowOpt());
o.addOption(OptUtil.endRowOpt());
o.addOption(OptUtil.tableOpt("table to delete a row range from"));
o.addOption(forceOpt);
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/MergeCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/MergeCommand.java
index 9213a06..18d519d 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/MergeCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/MergeCommand.java
@@ -96,9 +96,7 @@ public class MergeCommand extends Command {
sizeOpt = new Option("s", "size", true, "merge tablets to the given size over the entire table");
forceOpt = new Option("f", "force", false, "merge small tablets to large tablets, even if it goes over the given size");
allOpt = new Option("", "all", false, "allow an entire table to be merged into one tablet without prompting the user for confirmation");
- Option startRowOpt = OptUtil.startRowOpt();
- startRowOpt.setDescription("begin row (NOT inclusive)");
- o.addOption(startRowOpt);
+ o.addOption(OptUtil.startRowOpt());
o.addOption(OptUtil.endRowOpt());
o.addOption(OptUtil.tableOpt("table to be merged"));
o.addOption(verboseOpt);
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OptUtil.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OptUtil.java
index 9915bdf..432f17a 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OptUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OptUtil.java
@@ -117,7 +117,7 @@ public abstract class OptUtil {
}
public static Option startRowOpt() {
- final Option o = new Option(START_ROW_OPT, "begin-row", true, "begin row (inclusive)");
+ final Option o = new Option(START_ROW_OPT, "begin-row", true, "begin row (NOT) inclusive");
o.setArgName("begin-row");
return o;
}
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/ScanCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/ScanCommand.java
index 9a0026a..60ae0a7 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/ScanCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/ScanCommand.java
@@ -57,6 +57,7 @@ public class ScanCommand extends Command {
protected Option timestampOpt;
private Option optStartRowExclusive;
+ private Option optStartRowInclusive;
private Option optEndRowExclusive;
private Option timeoutOption;
private Option profileOpt;
@@ -318,7 +319,9 @@ public class ScanCommand extends Command {
o.addOption(scanOptAuths);
o.addOption(scanOptRow);
- o.addOption(OptUtil.startRowOpt());
+ optStartRowInclusive = new Option(OptUtil.START_ROW_OPT, "begin-row", true, "begin row (inclusive)");
+ optStartRowInclusive.setArgName("begin-row");
+ o.addOption(optStartRowInclusive);
o.addOption(OptUtil.endRowOpt());
o.addOption(optStartRowExclusive);
o.addOption(optEndRowExclusive);
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-4138_50db442b.diff |
bugs-dot-jar_data_ACCUMULO-414_116d5928 | ---
BugID: ACCUMULO-414
Summary: Make sure iterators handle deletion entries properly
Description: In minor compaction scope and in non-full major compaction scopes the
iterator may see deletion entries. These entries should be preserved by all iterators
except ones that are strictly scan-time iterators that will never be configured
for the minc or majc scopes. Deletion entries are only removed during full major
compactions.
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/GrepIterator.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/GrepIterator.java
index e768bff..ab8ca84 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/GrepIterator.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/GrepIterator.java
@@ -17,35 +17,27 @@
package org.apache.accumulo.core.iterators.user;
import java.io.IOException;
+import java.util.Arrays;
import java.util.Map;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.Filter;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
-import org.apache.accumulo.core.iterators.SkippingIterator;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
/**
* This iterator provides exact string matching. It searches both the Key and Value for the string. The string to match is specified by the "term" option.
*/
-public class GrepIterator extends SkippingIterator {
+public class GrepIterator extends Filter {
private byte term[];
@Override
- protected void consume() throws IOException {
- while (getSource().hasTop()) {
- Key k = getSource().getTopKey();
- Value v = getSource().getTopValue();
-
- if (match(v.get()) || match(k.getRowData()) || match(k.getColumnFamilyData()) || match(k.getColumnQualifierData())) {
- break;
- }
-
- getSource().next();
- }
+ public boolean accept(Key k, Value v) {
+ return match(v.get()) || match(k.getRowData()) || match(k.getColumnFamilyData()) || match(k.getColumnQualifierData());
}
private boolean match(ByteSequence bs) {
@@ -88,7 +80,9 @@ public class GrepIterator extends SkippingIterator {
@Override
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
- throw new UnsupportedOperationException();
+ GrepIterator copy = (GrepIterator) super.deepCopy(env);
+ copy.term = Arrays.copyOf(term, term.length);
+ return copy;
}
@Override
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-414_116d5928.diff |
bugs-dot-jar_data_ACCUMULO-4098_a2c2d38a | ---
BugID: ACCUMULO-4098
Summary: ConditionalWriterIT is failing
Description: |-
I noticed that the ConditionalWriterIT was failing in master. Using the following command with {{git bisect}} I tracked it down to commit {{3af75fc}} for ACCUMULO-4077 as the change which broke the IT. Have not looked into why its failing yet.
{noformat}
mvn clean verify -Dit.test=ConditionalWriterIT -Dfindbugs.skip -Dcheckstyle.skip -Dtest=foo -DfailIfNoTests=false
{noformat}
diff --git a/core/src/main/java/org/apache/accumulo/core/util/UnsynchronizedBuffer.java b/core/src/main/java/org/apache/accumulo/core/util/UnsynchronizedBuffer.java
index 6947d64..f353613 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/UnsynchronizedBuffer.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/UnsynchronizedBuffer.java
@@ -118,8 +118,8 @@ public class UnsynchronizedBuffer {
}
public Reader(ByteBuffer buffer) {
- if (buffer.hasArray()) {
- offset = buffer.arrayOffset();
+ if (buffer.hasArray() && buffer.array().length == buffer.arrayOffset() + buffer.limit()) {
+ offset = buffer.arrayOffset() + buffer.position();
data = buffer.array();
} else {
data = new byte[buffer.remaining()];
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-4098_a2c2d38a.diff |
bugs-dot-jar_data_ACCUMULO-3242_15e83709 | ---
BugID: ACCUMULO-3242
Summary: Consolidate ZK code WRT retries
Description: |-
A couple of general ZK things that should be fixed up:
# Multiple means of automatic retrying of recoverable ZooKeeper errors through use of an InvocationHandler and a Proxy around IZooReader(Writer)
# Encapsulate retry logic
# Switch over callers to use the retrying instance instead of the non-retrying instance
diff --git a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooCache.java b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooCache.java
index b3dd19a..d72ac08 100644
--- a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooCache.java
+++ b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooCache.java
@@ -162,7 +162,7 @@ public class ZooCache {
if (code == Code.NONODE) {
log.error("Looked up non-existent node in cache " + e.getPath(), e);
} else if (code == Code.CONNECTIONLOSS || code == Code.OPERATIONTIMEOUT || code == Code.SESSIONEXPIRED) {
- log.warn("Saw (possibly) transient exception communicating with ZooKeeper, wil retry", e);
+ log.warn("Saw (possibly) transient exception communicating with ZooKeeper, will retry", e);
continue;
}
log.warn("Zookeeper error, will retry", e);
diff --git a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooUtil.java b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooUtil.java
index 674c1d8..1a3af6b 100644
--- a/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooUtil.java
+++ b/fate/src/main/java/org/apache/accumulo/fate/zookeeper/ZooUtil.java
@@ -18,9 +18,11 @@ package org.apache.accumulo.fate.zookeeper;
import java.math.BigInteger;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collections;
import java.util.List;
+import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.log4j.Logger;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
@@ -32,6 +34,8 @@ import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Stat;
+import com.google.common.base.Preconditions;
+
public class ZooUtil {
private static final Logger log = Logger.getLogger(ZooUtil.class);
@@ -87,11 +91,68 @@ public class ZooUtil {
byte[] auth;
public ZooKeeperConnectionInfo(String keepers, int timeout, String scheme, byte[] auth) {
+ Preconditions.checkNotNull(keepers);
this.keepers = keepers;
this.timeout = timeout;
this.scheme = scheme;
this.auth = auth;
}
+
+ @Override
+ public int hashCode() {
+ final HashCodeBuilder hcb = new HashCodeBuilder(31, 47);
+ hcb.append(keepers).append(timeout);
+ if (null != scheme) {
+ hcb.append(scheme);
+ }
+ if (null != auth) {
+ hcb.append(auth);
+ }
+ return hcb.toHashCode();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (o instanceof ZooKeeperConnectionInfo) {
+ ZooKeeperConnectionInfo other = (ZooKeeperConnectionInfo) o;
+ if (!keepers.equals(other.keepers) || timeout != other.timeout) {
+ return false;
+ }
+
+ if (null != scheme) {
+ if (null == other.scheme) {
+ // Ours is non-null, theirs is null
+ return false;
+ } else if (!scheme.equals(other.scheme)) {
+ // Both non-null but not equal
+ return false;
+ }
+ }
+
+ if (null != auth) {
+ if (null == other.auth) {
+ return false;
+ } else if (!Arrays.equals(auth, other.auth)) {
+ // both non-null but not equal
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder(64);
+ sb.append("zookeepers=").append(keepers);
+ sb.append(", timeout=").append(timeout);
+ sb.append(", scheme=").append(scheme);
+ sb.append(", auth=").append(null == auth ? "null" : "REDACTED");
+ return sb.toString();
+ }
}
public static final List<ACL> PRIVATE;
@@ -206,8 +267,7 @@ public class ZooUtil {
}
public static boolean putPersistentData(ZooKeeperConnectionInfo info, String zPath, byte[] data, int version, NodeExistsPolicy policy)
- throws KeeperException,
- InterruptedException {
+ throws KeeperException, InterruptedException {
return putData(info, zPath, data, CreateMode.PERSISTENT, version, policy, PUBLIC);
}
@@ -216,8 +276,7 @@ public class ZooUtil {
return putData(info, zPath, data, CreateMode.PERSISTENT, version, policy, acls);
}
- private static boolean putData(ZooKeeperConnectionInfo info, String zPath, byte[] data, CreateMode mode, int version,
- NodeExistsPolicy policy, List<ACL> acls)
+ private static boolean putData(ZooKeeperConnectionInfo info, String zPath, byte[] data, CreateMode mode, int version, NodeExistsPolicy policy, List<ACL> acls)
throws KeeperException, InterruptedException {
if (policy == null)
policy = NodeExistsPolicy.FAIL;
@@ -347,8 +406,9 @@ public class ZooUtil {
}
retry.waitForNextAttempt();
}
- for (String child : children)
+ for (String child : children) {
recursiveCopyPersistent(info, source + "/" + child, destination + "/" + child, policy);
+ }
}
}
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3242_15e83709.diff |
bugs-dot-jar_data_ACCUMULO-3055_94c2a31f | ---
BugID: ACCUMULO-3055
Summary: calling MiniAccumuloCluster.stop multiple times fails with NPE
Description: "On the mailing list [~ctubbsii] mentioned seeing some NPEs in the stderr
for {{mvn verify}}.\n\nI see one here when running mvn verify with either hadoop
profile:\n\n{quote}\nException in thread \"Thread-0\" java.lang.NullPointerException\n\tat
org.apache.accumulo.minicluster.MiniAccumuloCluster.stopProcessWithTimeout(MiniAccumuloCluster.java:449)\n\tat
org.apache.accumulo.minicluster.MiniAccumuloCluster.stop(MiniAccumuloCluster.java:376)\n\tat
org.apache.accumulo.minicluster.MiniAccumuloCluster$1.run(MiniAccumuloCluster.java:318)\n{quote}\n\nThe
relevant piece of code (in 1.5.2-SNAP) is the {{executor.execute}} below\n\n{code}\n
\ private int stopProcessWithTimeout(final Process proc, long timeout, TimeUnit
unit) throws InterruptedException, ExecutionException, TimeoutException {\n FutureTask<Integer>
future = new FutureTask<Integer>(new Callable<Integer>() {\n @Override\n
\ public Integer call() throws InterruptedException {\n proc.destroy();\n
\ return proc.waitFor();\n }\n });\n\n executor.execute(future);\n\n
\ return future.get(timeout, unit);\n }\n{code}\n\nReading through the code for
stop, it nulls out executor when it's done. So the easy way to get an NPE is calling
stop() multiple times on a MAC instance. Since we have a shutdown hook that calls
stop, that means that a single user invocation of stop should result in a NPE later.\n\nSince
start() doesn't allow multiple starts, we probably shouldn't allow multiple stops.
That would mean adding logic to the shutdown hook to check if we're already stopped
or making a private unguarded version of stop that allows multiple calls and using
that from the hook.\n\ncriteria for closing this issue:\n\n* MAC should document
wether calling stop() multiple times is allowed\n* fix MAC.stop to either guard
against multiple calls or handle them gracefully\n* find out why this only gets
an NPE in one place. Do we rely on the shutdown hook everywhere?"
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
index 7a9bc0d..8246c51 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
@@ -53,6 +53,8 @@ import org.apache.accumulo.start.Main;
import org.apache.log4j.Logger;
import org.apache.zookeeper.server.ZooKeeperServerMain;
+import com.google.common.base.Preconditions;
+
/**
* A utility class that will create Zookeeper and Accumulo processes that write all of their data to a single local directory. This class makes it easy to test
* code against a real Accumulo instance. Its much more accurate for testing than MockAccumulo, but much slower than MockAccumulo.
@@ -306,7 +308,7 @@ public class MiniAccumuloCluster {
* @throws IllegalStateException
* if already started
*/
- public void start() throws IOException, InterruptedException {
+ public synchronized void start() throws IOException, InterruptedException {
if (zooKeeperProcess != null)
throw new IllegalStateException("Already started");
@@ -365,10 +367,15 @@ public class MiniAccumuloCluster {
}
/**
- * Stops Accumulo and Zookeeper processes. If stop is not called, there is a shutdown hook that is setup to kill the processes. Howerver its probably best to
+ * Stops Accumulo and Zookeeper processes. If stop is not called, there is a shutdown hook that is setup to kill the processes. However its probably best to
* call stop in a finally block as soon as possible.
*/
- public void stop() throws IOException, InterruptedException {
+ public synchronized void stop() throws IOException, InterruptedException {
+ if (null == executor) {
+ // keep repeated calls to stop() from failing
+ return;
+ }
+
if (zooKeeperProcess != null) {
try {
stopProcessWithTimeout(zooKeeperProcess, 30, TimeUnit.SECONDS);
@@ -436,6 +443,7 @@ public class MiniAccumuloCluster {
}
private int stopProcessWithTimeout(final Process proc, long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
+ Preconditions.checkNotNull(executor, "Executor was already null");
FutureTask<Integer> future = new FutureTask<Integer>(new Callable<Integer>() {
@Override
public Integer call() throws InterruptedException {
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3055_94c2a31f.diff |
bugs-dot-jar_data_ACCUMULO-821_a450ac2f | ---
BugID: ACCUMULO-821
Summary: MockBatchScanner inappropriately filters on ranges
Description: I believe I have a legitimate case where an iterator will return something
outside of the seeked-to range. This appears to work in a live system, but fails
to work in test cases using the MockBatchScanner. I believe this is because the
MockBatchScanner filters on the supplied ranges in addition to seeking the iterators
to each range. Either we need to remove this range filter, or fix the real system
to do the same thing. I prefer the former of course.
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchScanner.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchScanner.java
index c33599b..351fdf9 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchScanner.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchScanner.java
@@ -24,10 +24,12 @@ import java.util.List;
import java.util.Map.Entry;
import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.mock.MockScanner.RangeFilter;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.Filter;
+import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iterators.SortedMapIterator;
import org.apache.accumulo.core.security.Authorizations;
@@ -53,7 +55,11 @@ public class MockBatchScanner extends MockScannerBase implements BatchScanner {
static class RangesFilter extends Filter {
List<Range> ranges;
- RangesFilter(SortedKeyValueIterator<Key,Value> iterator, List<Range> ranges) {
+ public RangesFilter deepCopy(IteratorEnvironment env) {
+ return new RangesFilter(getSource().deepCopy(env), ranges);
+ }
+
+ public RangesFilter(SortedKeyValueIterator<Key,Value> iterator, List<Range> ranges) {
setSource(iterator);
this.ranges = ranges;
}
@@ -77,9 +83,9 @@ public class MockBatchScanner extends MockScannerBase implements BatchScanner {
IteratorChain chain = new IteratorChain();
for (Range range : ranges) {
- SortedKeyValueIterator<Key,Value> i = new SortedMapIterator(table.table);
+ SortedKeyValueIterator<Key,Value> i = new RangesFilter(new SortedMapIterator(table.table), ranges);
try {
- i = new RangesFilter(createFilter(i), ranges);
+ i = createFilter(i);
i.seek(range, createColumnBSS(fetchedColumns), !fetchedColumns.isEmpty());
chain.addIterator(new IteratorAdapter(i));
} catch (IOException e) {
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockScanner.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockScanner.java
index 82b9b19..2d78bbf 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockScanner.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockScanner.java
@@ -26,8 +26,10 @@ import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.Filter;
+import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iterators.SortedMapIterator;
+import org.apache.accumulo.core.iterators.system.DeletingIterator;
import org.apache.accumulo.core.security.Authorizations;
public class MockScanner extends MockScannerBase implements Scanner {
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-821_a450ac2f.diff |
bugs-dot-jar_data_ACCUMULO-3077_17654199 | ---
BugID: ACCUMULO-3077
Summary: File never picked up for replication
Description: |-
I was running some tests and noticed that a single file was getting ignored. The logs were warning that the Status message that was written to {{accumulo.metadata}} didn't have a createdTime on the Status record.
The odd part is that all other Status messages had a createdTime and were successfully replicated. Looking at the writes from the TabletServer logs, the expected record *was* written by the TabletServer, and writing a test with the full series of Status records written does net the correct Status (which was different than what was observed in the actual table).
Looking into it, the log which was subject to this error was the first WAL that was used when the instance was started. Because the table configurations are lazily configured when they are actually used, I believe that the StatusCombiner that is set on {{accumulo.metadata}} was not seen by the TabletServer, and the VersioningIterator "ate" the first record.
I need to come up with a way that I can be sure that all tservers will have seen the Combiner set on accumulo.metadata before any data is written to it to avoid losing a record like this.
diff --git a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
index 0a681c4..9b952ba 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
@@ -20,6 +20,7 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
+import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Locale;
@@ -31,6 +32,8 @@ import jline.console.ConsoleReader;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.cli.Help;
import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.IteratorSetting.Column;
import org.apache.accumulo.core.client.impl.Namespaces;
import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
@@ -41,6 +44,8 @@ import org.apache.accumulo.core.data.KeyExtent;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.file.FileOperations;
import org.apache.accumulo.core.file.FileSKVWriter;
+import org.apache.accumulo.core.iterators.Combiner;
+import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
import org.apache.accumulo.core.iterators.user.VersioningIterator;
import org.apache.accumulo.core.master.state.tables.TableState;
import org.apache.accumulo.core.master.thrift.MasterGoalState;
@@ -65,10 +70,12 @@ import org.apache.accumulo.server.constraints.MetadataConstraints;
import org.apache.accumulo.server.fs.VolumeManager;
import org.apache.accumulo.server.fs.VolumeManagerImpl;
import org.apache.accumulo.server.iterators.MetadataBulkLoadFilter;
+import org.apache.accumulo.server.replication.StatusCombiner;
import org.apache.accumulo.server.security.AuditedSecurityOperation;
import org.apache.accumulo.server.security.SystemCredentials;
import org.apache.accumulo.server.tables.TableManager;
import org.apache.accumulo.server.tablets.TabletTime;
+import org.apache.accumulo.server.util.ReplicationTableUtil;
import org.apache.accumulo.server.util.TablePropUtil;
import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
import org.apache.hadoop.conf.Configuration;
@@ -84,7 +91,7 @@ import com.beust.jcommander.Parameter;
/**
* This class is used to setup the directory structure and the root tablet to get an instance started
- *
+ *
*/
public class Initialize {
private static final Logger log = Logger.getLogger(Initialize.class);
@@ -102,7 +109,7 @@ public class Initialize {
/**
* Sets this class's ZooKeeper reader/writer.
- *
+ *
* @param izoo
* reader/writer
*/
@@ -112,7 +119,7 @@ public class Initialize {
/**
* Gets this class's ZooKeeper reader/writer.
- *
+ *
* @return reader/writer
*/
static IZooReaderWriter getZooReaderWriter() {
@@ -566,6 +573,23 @@ public class Initialize {
protected static void initMetadataConfig() throws IOException {
initMetadataConfig(RootTable.ID);
initMetadataConfig(MetadataTable.ID);
+
+ // ACCUMULO-3077 Set the combiner on accumulo.metadata during init to reduce the likelihood of a race
+ // condition where a tserver compacts away Status updates because it didn't see the Combiner configured
+ IteratorSetting setting = new IteratorSetting(9, ReplicationTableUtil.COMBINER_NAME, StatusCombiner.class);
+ Combiner.setColumns(setting, Collections.singletonList(new Column(MetadataSchema.ReplicationSection.COLF)));
+ try {
+ for (IteratorScope scope : IteratorScope.values()) {
+ String root = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX, scope.name().toLowerCase(), setting.getName());
+ for (Entry<String,String> prop : setting.getOptions().entrySet()) {
+ TablePropUtil.setTableProperty(MetadataTable.ID, root + ".opt." + prop.getKey(), prop.getValue());
+ }
+ TablePropUtil.setTableProperty(MetadataTable.ID, root, setting.getPriority() + "," + setting.getIteratorClass());
+ }
+ } catch (Exception e) {
+ log.fatal("Error talking to ZooKeeper", e);
+ throw new IOException(e);
+ }
}
private static void setMetadataReplication(int replication, String reason) throws IOException {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/ReplicationTableUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/ReplicationTableUtil.java
index 2a9774d..ab5ee86 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/ReplicationTableUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/ReplicationTableUtil.java
@@ -72,7 +72,7 @@ public class ReplicationTableUtil {
* For testing purposes only -- should not be called by server code
* <p>
* Allows mocking of a Writer for testing
- *
+ *
* @param creds
* Credentials
* @param writer
@@ -187,7 +187,7 @@ public class ReplicationTableUtil {
*/
public static void updateFiles(Credentials creds, KeyExtent extent, Collection<String> files, Status stat) {
if (log.isDebugEnabled()) {
- log.debug("Updating replication for " + extent + " with " + files + " using " + ProtobufUtil.toString(stat));
+ log.debug("Updating replication status for " + extent + " with " + files + " using " + ProtobufUtil.toString(stat));
}
// TODO could use batch writer, would need to handle failure and retry like update does - ACCUMULO-1294
if (files.isEmpty()) {
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/TabletServerLogger.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/TabletServerLogger.java
index b4f14ec..26e6891 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/TabletServerLogger.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/TabletServerLogger.java
@@ -276,8 +276,8 @@ public class TabletServerLogger {
logs.add(logger.getFileName());
}
Status status = StatusUtil.fileCreated(System.currentTimeMillis());
- log.debug("Writing " + ProtobufUtil.toString(status) + " to replication table for " + logs);
- // Got some new WALs, note this in the replication table
+ log.debug("Writing " + ProtobufUtil.toString(status) + " to metadata table for " + logs);
+ // Got some new WALs, note this in the metadata table
ReplicationTableUtil.updateFiles(SystemCredentials.get(), commitSession.getExtent(), logs, status);
}
}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java
index 5b46b7b..78a2ed6 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/DatafileManager.java
@@ -61,7 +61,7 @@ class DatafileManager {
// access to datafilesizes needs to be synchronized: see CompactionRunner#getNumFiles
private final Map<FileRef,DataFileValue> datafileSizes = Collections.synchronizedMap(new TreeMap<FileRef,DataFileValue>());
private final Tablet tablet;
-
+
// ensure we only have one reader/writer of our bulk file notes at at time
private final Object bulkFileImportLock = new Object();
@@ -80,7 +80,7 @@ class DatafileManager {
private boolean reservationsBlocked = false;
private final Set<FileRef> majorCompactingFiles = new HashSet<FileRef>();
-
+
static void rename(VolumeManager fs, Path src, Path dst) throws IOException {
if (!fs.rename(src, dst)) {
throw new IOException("Rename " + src + " to " + dst + " returned false ");
@@ -268,7 +268,7 @@ class DatafileManager {
dfv.setTime(bulkTime);
}
}
-
+
tablet.updatePersistedTime(bulkTime, paths, tid);
}
}
@@ -424,6 +424,9 @@ class DatafileManager {
// This WAL could still be in use by other Tablets *from the same table*, so we can only mark that there is data to replicate,
// but it is *not* closed
if (replicate) {
+ if (log.isDebugEnabled()) {
+ log.debug("Recording that data has been ingested into " + tablet.getExtent() + " using " + logFileOnly);
+ }
ReplicationTableUtil.updateFiles(SystemCredentials.get(), tablet.getExtent(), logFileOnly, StatusUtil.openWithUnknownLength());
}
} finally {
@@ -434,7 +437,7 @@ class DatafileManager {
try {
// the purpose of making this update use the new commit session, instead of the old one passed in,
// is because the new one will reference the logs used by current memory...
-
+
tablet.getTabletServer().minorCompactionFinished(tablet.getTabletMemory().getCommitSession(), newDatafile.toString(), commitSession.getWALogSeq() + 2);
break;
} catch (IOException e) {
@@ -449,19 +452,19 @@ class DatafileManager {
if (datafileSizes.containsKey(newDatafile)) {
log.error("Adding file that is already in set " + newDatafile);
}
-
+
if (dfv.getNumEntries() > 0) {
datafileSizes.put(newDatafile, dfv);
}
-
+
if (absMergeFile != null) {
datafileSizes.remove(absMergeFile);
}
-
+
unreserveMergingMinorCompactionFile(absMergeFile);
-
+
tablet.flushComplete(flushId);
-
+
t2 = System.currentTimeMillis();
}
@@ -597,9 +600,9 @@ class DatafileManager {
return Collections.unmodifiableSet(files);
}
}
-
+
public int getNumFiles() {
return datafileSizes.size();
}
-}
\ No newline at end of file
+}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3077_17654199.diff |
bugs-dot-jar_data_ACCUMULO-3150_72fd6bec | ---
BugID: ACCUMULO-3150
Summary: MiniAccumuloConfig doesn't set 0 for monitor log4j port
Description: MonitorLoggingIT will fail on a host if the monitor is already running
because MAC doesn't configure itself to use an ephemeral port. We haven't really
noticed this because MAC doesn't start a monitor by default.
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
index 8246c51..c9aa1f1 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
@@ -270,6 +270,7 @@ public class MiniAccumuloCluster {
mergePropWithRandomPort(siteConfig, Property.TSERV_CLIENTPORT.getKey());
mergePropWithRandomPort(siteConfig, Property.MONITOR_PORT.getKey());
mergePropWithRandomPort(siteConfig, Property.GC_PORT.getKey());
+ mergePropWithRandomPort(siteConfig, Property.MONITOR_LOG4J_PORT.getKey());
// since there is a small amount of memory, check more frequently for majc... setting may not be needed in 1.5
appendProp(fileWriter, Property.TSERV_MAJC_DELAY, "3", siteConfig);
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3150_72fd6bec.diff |
bugs-dot-jar_data_ACCUMULO-3746_47c64d9a | ---
BugID: ACCUMULO-3746
Summary: ClientConfiguration.getAllPropertiesWithPrefix doesn't work
Description: I think I introduced this method for trace.span.receiver.*, and didn't
write a test for it. My mistake.
diff --git a/core/src/main/java/org/apache/accumulo/core/client/ClientConfiguration.java b/core/src/main/java/org/apache/accumulo/core/client/ClientConfiguration.java
index a926d35..7aab80c 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/ClientConfiguration.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/ClientConfiguration.java
@@ -302,8 +302,12 @@ public class ClientConfiguration extends CompositeConfiguration {
public Map<String,String> getAllPropertiesWithPrefix(ClientProperty property) {
checkType(property, PropertyType.PREFIX);
- Map<String,String> propMap = new HashMap<String,String>();
- Iterator<?> iter = this.getKeys(property.getKey());
+ Map<String,String> propMap = new HashMap<>();
+ String prefix = property.getKey();
+ if (prefix.endsWith(".")) {
+ prefix = prefix.substring(0, prefix.length() - 1);
+ }
+ Iterator<?> iter = this.getKeys(prefix);
while (iter.hasNext()) {
String p = (String) iter.next();
propMap.put(p, getString(p));
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3746_47c64d9a.diff |
bugs-dot-jar_data_ACCUMULO-1505_994df698 | ---
BugID: ACCUMULO-1505
Summary: MockTable's addMutation does not check for empty mutation
Description: "When calling addMutation or addMutations on a MockBatchWriter, the updates
stored in the mutation are iterated over then committed in the MockTable class.
\n\nWhen this occurs in the TabletServerBatchWriter (eventually called from the
BatchWriterImpl), however, the mutation size is first checked and if the mutation
size is 0, an IllegalArgumentException is thrown.\n\nIn practice, if you have code
that tries to submit an empty mutation to a BatchWriter, it will fail and throw
an exception in the real world, but this will not be caught in tests against MockAccumulo."
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchWriter.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchWriter.java
index b33ebcb..d89a263 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchWriter.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockBatchWriter.java
@@ -19,6 +19,7 @@ package org.apache.accumulo.core.client.mock;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.util.ArgumentChecker;
public class MockBatchWriter implements BatchWriter {
@@ -32,11 +33,13 @@ public class MockBatchWriter implements BatchWriter {
@Override
public void addMutation(Mutation m) throws MutationsRejectedException {
+ ArgumentChecker.notNull(m);
acu.addMutation(tablename, m);
}
@Override
public void addMutations(Iterable<Mutation> iterable) throws MutationsRejectedException {
+ ArgumentChecker.notNull(iterable);
for (Mutation m : iterable) {
acu.addMutation(tablename, m);
}
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
index 6d6d534..3dcab11 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
@@ -101,6 +101,8 @@ public class MockTable {
}
synchronized void addMutation(Mutation m) {
+ if (m.size() == 0)
+ throw new IllegalArgumentException("Can not add empty mutations");
long now = System.currentTimeMillis();
mutationCount++;
for (ColumnUpdate u : m.getUpdates()) {
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1505_994df698.diff |
bugs-dot-jar_data_ACCUMULO-414_ebf22df0 | ---
BugID: ACCUMULO-414
Summary: Make sure iterators handle deletion entries properly
Description: In minor compaction scope and in non-full major compaction scopes the
iterator may see deletion entries. These entries should be preserved by all iterators
except ones that are strictly scan-time iterators that will never be configured
for the minc or majc scopes. Deletion entries are only removed during full major
compactions.
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/Filter.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/Filter.java
index 94daf03..a9ed76c 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/Filter.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/Filter.java
@@ -69,7 +69,7 @@ public abstract class Filter extends WrappingIterator implements OptionDescriber
* Iterates over the source until an acceptable key/value pair is found.
*/
protected void findTop() {
- while (getSource().hasTop() && (negate == accept(getSource().getTopKey(), getSource().getTopValue()))) {
+ while (getSource().hasTop() && !getSource().getTopKey().isDeleted() && (negate == accept(getSource().getTopKey(), getSource().getTopValue()))) {
try {
getSource().next();
} catch (IOException e) {
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/SortedKeyValueIterator.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/SortedKeyValueIterator.java
index 8bbf18a..edeaa1d 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/SortedKeyValueIterator.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/SortedKeyValueIterator.java
@@ -59,7 +59,9 @@ public interface SortedKeyValueIterator<K extends WritableComparable<?>,V extend
boolean hasTop();
/**
- * Advances to the next K,V pair.
+ * Advances to the next K,V pair. Note that in minor compaction scope and in non-full major compaction scopes the iterator may see deletion entries. These
+ * entries should be preserved by all iterators except ones that are strictly scan-time iterators that will never be configured for the minc or majc scopes.
+ * Deletion entries are only removed during full major compactions.
*
* @throws IOException
* if an I/O error occurs.
@@ -88,7 +90,9 @@ public interface SortedKeyValueIterator<K extends WritableComparable<?>,V extend
void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException;
/**
- * Returns top key. Can be called 0 or more times without affecting behavior of next() or hasTop().
+ * Returns top key. Can be called 0 or more times without affecting behavior of next() or hasTop(). Note that in minor compaction scope and in non-full major
+ * compaction scopes the iterator may see deletion entries. These entries should be preserved by all iterators except ones that are strictly scan-time
+ * iterators that will never be configured for the minc or majc scopes. Deletion entries are only removed during full major compactions.
*
* @return <tt>K</tt>
* @exception IllegalStateException
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-414_ebf22df0.diff |
bugs-dot-jar_data_ACCUMULO-2671_17344890 | ---
BugID: ACCUMULO-2671
Summary: BlockedOutputStream can hit a StackOverflowError
Description: "This issue mostly came up after a resolution to ACCUMULO-2668 that allows
a byte[] to be passed directly to the underlying stream from the NoFlushOutputStream.\n\nThe
problem appears to be due to the BlockedOutputStream.write(byte[], int, int) implementation
that recursively writes out blocks/buffers out. When the stream is passed a large
mutation (128MB was sufficient to trigger the error for me), this will cause a StackOverflowError.
\n\nThis is appears to be specifically with encryption at rest turned on.\n\nA simple
fix would be to unroll the recursion."
diff --git a/core/src/main/java/org/apache/accumulo/core/security/crypto/BlockedOutputStream.java b/core/src/main/java/org/apache/accumulo/core/security/crypto/BlockedOutputStream.java
index ca72055..3ce648e 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/crypto/BlockedOutputStream.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/BlockedOutputStream.java
@@ -72,15 +72,18 @@ public class BlockedOutputStream extends OutputStream {
@Override
public void write(byte b[], int off, int len) throws IOException {
- if (bb.remaining() >= len) {
- bb.put(b, off, len);
- if (bb.remaining() == 0)
- flush();
- } else {
+ // Can't recurse here in case the len is large and the blocksize is small (and the stack is small)
+ // So we'll just fill up the buffer over and over
+ while (len >= bb.remaining()) {
int remaining = bb.remaining();
- write(b, off, remaining);
- write(b, off + remaining, len - remaining);
+ bb.put(b, off, remaining);
+ // This is guaranteed to have the buffer filled, so we'll just flush it. No check needed
+ flush();
+ off += remaining;
+ len -= remaining;
}
+ // And then write the remainder (and this is guaranteed to not fill the buffer, so we won't flush afteward
+ bb.put(b, off, len);
}
@Override
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-2671_17344890.diff |
bugs-dot-jar_data_ACCUMULO-1190_e29dc4f5 | ---
BugID: ACCUMULO-1190
Summary: The update() method on the ProxyServer should throw a MutationsRejectedException
Description:
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
index ed8c71f..859b948 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
@@ -104,11 +104,19 @@ public class ProxyServer implements AccumuloProxy.Iface {
public Iterator<Map.Entry<Key,Value>> iterator;
}
- static class CloseWriter implements RemovalListener<UUID,BatchWriter> {
+ static protected class BatchWriterPlusException {
+ public BatchWriter writer;
+ public MutationsRejectedException exception = null;
+ }
+
+ static class CloseWriter implements RemovalListener<UUID,BatchWriterPlusException> {
@Override
- public void onRemoval(RemovalNotification<UUID,BatchWriter> notification) {
+ public void onRemoval(RemovalNotification<UUID,BatchWriterPlusException> notification) {
try {
- notification.getValue().close();
+ BatchWriterPlusException value = notification.getValue();
+ if (value.exception != null)
+ throw value.exception;
+ notification.getValue().writer.close();
} catch (MutationsRejectedException e) {
logger.warn(e, e);
}
@@ -131,7 +139,7 @@ public class ProxyServer implements AccumuloProxy.Iface {
}
protected Cache<UUID,ScannerPlusIterator> scannerCache;
- protected Cache<UUID,BatchWriter> writerCache;
+ protected Cache<UUID,BatchWriterPlusException> writerCache;
public ProxyServer(Properties props) {
@@ -904,10 +912,12 @@ public class ProxyServer implements AccumuloProxy.Iface {
@Override
public void updateAndFlush(ByteBuffer login, String tableName, Map<ByteBuffer,List<ColumnUpdate>> cells) throws TException {
try {
- BatchWriter writer = getWriter(login, tableName, null);
- addCellsToWriter(cells, writer);
- writer.flush();
- writer.close();
+ BatchWriterPlusException bwpe = getWriter(login, tableName, null);
+ addCellsToWriter(cells, bwpe);
+ if (bwpe.exception != null)
+ throw bwpe.exception;
+ bwpe.writer.flush();
+ bwpe.writer.close();
} catch (Exception e) {
throw translateException(e);
}
@@ -915,7 +925,10 @@ public class ProxyServer implements AccumuloProxy.Iface {
private static final ColumnVisibility EMPTY_VIS = new ColumnVisibility();
- private void addCellsToWriter(Map<ByteBuffer,List<ColumnUpdate>> cells, BatchWriter writer) throws MutationsRejectedException {
+ private void addCellsToWriter(Map<ByteBuffer,List<ColumnUpdate>> cells, BatchWriterPlusException bwpe) throws MutationsRejectedException {
+ if (bwpe.exception != null)
+ return;
+
HashMap<Text,ColumnVisibility> vizMap = new HashMap<Text,ColumnVisibility>();
for (Entry<ByteBuffer,List<ColumnUpdate>> entry : cells.entrySet()) {
@@ -947,14 +960,18 @@ public class ProxyServer implements AccumuloProxy.Iface {
m.put(update.getColFamily(), update.getColQualifier(), viz, value);
}
}
- writer.addMutation(m);
+ try {
+ bwpe.writer.addMutation(m);
+ } catch (MutationsRejectedException mre) {
+ bwpe.exception = mre;
+ }
}
}
@Override
public String createWriter(ByteBuffer login, String tableName, WriterOptions opts) throws TException {
try {
- BatchWriter writer = getWriter(login, tableName, opts);
+ BatchWriterPlusException writer = getWriter(login, tableName, opts);
UUID uuid = UUID.randomUUID();
writerCache.put(uuid, writer);
return uuid.toString();
@@ -966,11 +983,11 @@ public class ProxyServer implements AccumuloProxy.Iface {
@Override
public void update(String writer, Map<ByteBuffer,List<ColumnUpdate>> cells) throws TException {
try {
- BatchWriter batchwriter = writerCache.getIfPresent(UUID.fromString(writer));
- if (batchwriter == null) {
+ BatchWriterPlusException bwpe = writerCache.getIfPresent(UUID.fromString(writer));
+ if (bwpe == null) {
throw new UnknownWriter("Writer never existed or no longer exists");
}
- addCellsToWriter(cells, batchwriter);
+ addCellsToWriter(cells, bwpe);
} catch (Exception e) {
throw translateException(e);
}
@@ -979,11 +996,13 @@ public class ProxyServer implements AccumuloProxy.Iface {
@Override
public void flush(String writer) throws TException {
try {
- BatchWriter batchwriter = writerCache.getIfPresent(UUID.fromString(writer));
- if (batchwriter == null) {
+ BatchWriterPlusException bwpe = writerCache.getIfPresent(UUID.fromString(writer));
+ if (bwpe == null) {
throw new UnknownWriter("Writer never existed or no longer exists");
}
- batchwriter.flush();
+ if (bwpe.exception != null)
+ throw bwpe.exception;
+ bwpe.writer.flush();
} catch (Exception e) {
throw translateException(e);
}
@@ -992,18 +1011,20 @@ public class ProxyServer implements AccumuloProxy.Iface {
@Override
public void closeWriter(String writer) throws TException {
try {
- BatchWriter batchwriter = writerCache.getIfPresent(UUID.fromString(writer));
- if (batchwriter == null) {
+ BatchWriterPlusException bwpe = writerCache.getIfPresent(UUID.fromString(writer));
+ if (bwpe == null) {
throw new UnknownWriter("Writer never existed or no longer exists");
}
- batchwriter.close();
+ if (bwpe.exception != null)
+ throw bwpe.exception;
+ bwpe.writer.close();
writerCache.invalidate(UUID.fromString(writer));
} catch (Exception e) {
throw translateException(e);
}
}
- private BatchWriter getWriter(ByteBuffer login, String tableName, WriterOptions opts) throws Exception {
+ private BatchWriterPlusException getWriter(ByteBuffer login, String tableName, WriterOptions opts) throws Exception {
BatchWriterConfig cfg = new BatchWriterConfig();
if (opts != null) {
if (opts.maxMemory != 0)
@@ -1015,7 +1036,9 @@ public class ProxyServer implements AccumuloProxy.Iface {
if (opts.latencyMs != 0)
cfg.setMaxLatency(opts.latencyMs, TimeUnit.MILLISECONDS);
}
- return getConnector(login).createBatchWriter(tableName, cfg);
+ BatchWriterPlusException result = new BatchWriterPlusException();
+ result.writer = getConnector(login).createBatchWriter(tableName, cfg);
+ return result;
}
private IteratorSetting getIteratorSetting(org.apache.accumulo.proxy.thrift.IteratorSetting setting) {
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/thrift/AccumuloProxy.java b/proxy/src/main/java/org/apache/accumulo/proxy/thrift/AccumuloProxy.java
index 28d7e02..cc244f3 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/thrift/AccumuloProxy.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/thrift/AccumuloProxy.java
@@ -182,7 +182,7 @@ import org.slf4j.LoggerFactory;
public String createWriter(ByteBuffer login, String tableName, WriterOptions opts) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, org.apache.thrift.TException;
- public void update(String writer, Map<ByteBuffer,List<ColumnUpdate>> cells) throws UnknownWriter, MutationsRejectedException, org.apache.thrift.TException;
+ public void update(String writer, Map<ByteBuffer,List<ColumnUpdate>> cells) throws org.apache.thrift.TException;
public void flush(String writer) throws UnknownWriter, MutationsRejectedException, org.apache.thrift.TException;
@@ -2358,10 +2358,9 @@ import org.slf4j.LoggerFactory;
throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "createWriter failed: unknown result");
}
- public void update(String writer, Map<ByteBuffer,List<ColumnUpdate>> cells) throws UnknownWriter, MutationsRejectedException, org.apache.thrift.TException
+ public void update(String writer, Map<ByteBuffer,List<ColumnUpdate>> cells) throws org.apache.thrift.TException
{
send_update(writer, cells);
- recv_update();
}
public void send_update(String writer, Map<ByteBuffer,List<ColumnUpdate>> cells) throws org.apache.thrift.TException
@@ -2372,19 +2371,6 @@ import org.slf4j.LoggerFactory;
sendBase("update", args);
}
- public void recv_update() throws UnknownWriter, MutationsRejectedException, org.apache.thrift.TException
- {
- update_result result = new update_result();
- receiveBase(result, "update");
- if (result.ouch1 != null) {
- throw result.ouch1;
- }
- if (result.ouch2 != null) {
- throw result.ouch2;
- }
- return;
- }
-
public void flush(String writer) throws UnknownWriter, MutationsRejectedException, org.apache.thrift.TException
{
send_flush(writer);
@@ -4994,7 +4980,7 @@ import org.slf4j.LoggerFactory;
private String writer;
private Map<ByteBuffer,List<ColumnUpdate>> cells;
public update_call(String writer, Map<ByteBuffer,List<ColumnUpdate>> cells, org.apache.thrift.async.AsyncMethodCallback<update_call> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
- super(client, protocolFactory, transport, resultHandler, false);
+ super(client, protocolFactory, transport, resultHandler, true);
this.writer = writer;
this.cells = cells;
}
@@ -5008,13 +4994,12 @@ import org.slf4j.LoggerFactory;
prot.writeMessageEnd();
}
- public void getResult() throws UnknownWriter, MutationsRejectedException, org.apache.thrift.TException {
+ public void getResult() throws org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
- (new Client(prot)).recv_update();
}
}
@@ -7007,19 +6992,12 @@ import org.slf4j.LoggerFactory;
}
protected boolean isOneway() {
- return false;
+ return true;
}
- public update_result getResult(I iface, update_args args) throws org.apache.thrift.TException {
- update_result result = new update_result();
- try {
- iface.update(args.writer, args.cells);
- } catch (UnknownWriter ouch1) {
- result.ouch1 = ouch1;
- } catch (MutationsRejectedException ouch2) {
- result.ouch2 = ouch2;
- }
- return result;
+ public org.apache.thrift.TBase getResult(I iface, update_args args) throws org.apache.thrift.TException {
+ iface.update(args.writer, args.cells);
+ return null;
}
}
@@ -81630,464 +81608,6 @@ import org.slf4j.LoggerFactory;
}
- public static class update_result implements org.apache.thrift.TBase<update_result, update_result._Fields>, java.io.Serializable, Cloneable {
- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_result");
-
- private static final org.apache.thrift.protocol.TField OUCH1_FIELD_DESC = new org.apache.thrift.protocol.TField("ouch1", org.apache.thrift.protocol.TType.STRUCT, (short)1);
- private static final org.apache.thrift.protocol.TField OUCH2_FIELD_DESC = new org.apache.thrift.protocol.TField("ouch2", org.apache.thrift.protocol.TType.STRUCT, (short)2);
-
- private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
- static {
- schemes.put(StandardScheme.class, new update_resultStandardSchemeFactory());
- schemes.put(TupleScheme.class, new update_resultTupleSchemeFactory());
- }
-
- public UnknownWriter ouch1; // required
- public MutationsRejectedException ouch2; // required
-
- /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
- @SuppressWarnings("all") public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- OUCH1((short)1, "ouch1"),
- OUCH2((short)2, "ouch2");
-
- private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
- static {
- for (_Fields field : EnumSet.allOf(_Fields.class)) {
- byName.put(field.getFieldName(), field);
- }
- }
-
- /**
- * Find the _Fields constant that matches fieldId, or null if its not found.
- */
- public static _Fields findByThriftId(int fieldId) {
- switch(fieldId) {
- case 1: // OUCH1
- return OUCH1;
- case 2: // OUCH2
- return OUCH2;
- default:
- return null;
- }
- }
-
- /**
- * Find the _Fields constant that matches fieldId, throwing an exception
- * if it is not found.
- */
- public static _Fields findByThriftIdOrThrow(int fieldId) {
- _Fields fields = findByThriftId(fieldId);
- if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
- return fields;
- }
-
- /**
- * Find the _Fields constant that matches name, or null if its not found.
- */
- public static _Fields findByName(String name) {
- return byName.get(name);
- }
-
- private final short _thriftId;
- private final String _fieldName;
-
- _Fields(short thriftId, String fieldName) {
- _thriftId = thriftId;
- _fieldName = fieldName;
- }
-
- public short getThriftFieldId() {
- return _thriftId;
- }
-
- public String getFieldName() {
- return _fieldName;
- }
- }
-
- // isset id assignments
- public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
- static {
- Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
- tmpMap.put(_Fields.OUCH1, new org.apache.thrift.meta_data.FieldMetaData("ouch1", org.apache.thrift.TFieldRequirementType.DEFAULT,
- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
- tmpMap.put(_Fields.OUCH2, new org.apache.thrift.meta_data.FieldMetaData("ouch2", org.apache.thrift.TFieldRequirementType.DEFAULT,
- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
- metaDataMap = Collections.unmodifiableMap(tmpMap);
- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(update_result.class, metaDataMap);
- }
-
- public update_result() {
- }
-
- public update_result(
- UnknownWriter ouch1,
- MutationsRejectedException ouch2)
- {
- this();
- this.ouch1 = ouch1;
- this.ouch2 = ouch2;
- }
-
- /**
- * Performs a deep copy on <i>other</i>.
- */
- public update_result(update_result other) {
- if (other.isSetOuch1()) {
- this.ouch1 = new UnknownWriter(other.ouch1);
- }
- if (other.isSetOuch2()) {
- this.ouch2 = new MutationsRejectedException(other.ouch2);
- }
- }
-
- public update_result deepCopy() {
- return new update_result(this);
- }
-
- @Override
- public void clear() {
- this.ouch1 = null;
- this.ouch2 = null;
- }
-
- public UnknownWriter getOuch1() {
- return this.ouch1;
- }
-
- public update_result setOuch1(UnknownWriter ouch1) {
- this.ouch1 = ouch1;
- return this;
- }
-
- public void unsetOuch1() {
- this.ouch1 = null;
- }
-
- /** Returns true if field ouch1 is set (has been assigned a value) and false otherwise */
- public boolean isSetOuch1() {
- return this.ouch1 != null;
- }
-
- public void setOuch1IsSet(boolean value) {
- if (!value) {
- this.ouch1 = null;
- }
- }
-
- public MutationsRejectedException getOuch2() {
- return this.ouch2;
- }
-
- public update_result setOuch2(MutationsRejectedException ouch2) {
- this.ouch2 = ouch2;
- return this;
- }
-
- public void unsetOuch2() {
- this.ouch2 = null;
- }
-
- /** Returns true if field ouch2 is set (has been assigned a value) and false otherwise */
- public boolean isSetOuch2() {
- return this.ouch2 != null;
- }
-
- public void setOuch2IsSet(boolean value) {
- if (!value) {
- this.ouch2 = null;
- }
- }
-
- public void setFieldValue(_Fields field, Object value) {
- switch (field) {
- case OUCH1:
- if (value == null) {
- unsetOuch1();
- } else {
- setOuch1((UnknownWriter)value);
- }
- break;
-
- case OUCH2:
- if (value == null) {
- unsetOuch2();
- } else {
- setOuch2((MutationsRejectedException)value);
- }
- break;
-
- }
- }
-
- public Object getFieldValue(_Fields field) {
- switch (field) {
- case OUCH1:
- return getOuch1();
-
- case OUCH2:
- return getOuch2();
-
- }
- throw new IllegalStateException();
- }
-
- /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
- public boolean isSet(_Fields field) {
- if (field == null) {
- throw new IllegalArgumentException();
- }
-
- switch (field) {
- case OUCH1:
- return isSetOuch1();
- case OUCH2:
- return isSetOuch2();
- }
- throw new IllegalStateException();
- }
-
- @Override
- public boolean equals(Object that) {
- if (that == null)
- return false;
- if (that instanceof update_result)
- return this.equals((update_result)that);
- return false;
- }
-
- public boolean equals(update_result that) {
- if (that == null)
- return false;
-
- boolean this_present_ouch1 = true && this.isSetOuch1();
- boolean that_present_ouch1 = true && that.isSetOuch1();
- if (this_present_ouch1 || that_present_ouch1) {
- if (!(this_present_ouch1 && that_present_ouch1))
- return false;
- if (!this.ouch1.equals(that.ouch1))
- return false;
- }
-
- boolean this_present_ouch2 = true && this.isSetOuch2();
- boolean that_present_ouch2 = true && that.isSetOuch2();
- if (this_present_ouch2 || that_present_ouch2) {
- if (!(this_present_ouch2 && that_present_ouch2))
- return false;
- if (!this.ouch2.equals(that.ouch2))
- return false;
- }
-
- return true;
- }
-
- @Override
- public int hashCode() {
- return 0;
- }
-
- public int compareTo(update_result other) {
- if (!getClass().equals(other.getClass())) {
- return getClass().getName().compareTo(other.getClass().getName());
- }
-
- int lastComparison = 0;
- update_result typedOther = (update_result)other;
-
- lastComparison = Boolean.valueOf(isSetOuch1()).compareTo(typedOther.isSetOuch1());
- if (lastComparison != 0) {
- return lastComparison;
- }
- if (isSetOuch1()) {
- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ouch1, typedOther.ouch1);
- if (lastComparison != 0) {
- return lastComparison;
- }
- }
- lastComparison = Boolean.valueOf(isSetOuch2()).compareTo(typedOther.isSetOuch2());
- if (lastComparison != 0) {
- return lastComparison;
- }
- if (isSetOuch2()) {
- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ouch2, typedOther.ouch2);
- if (lastComparison != 0) {
- return lastComparison;
- }
- }
- return 0;
- }
-
- public _Fields fieldForId(int fieldId) {
- return _Fields.findByThriftId(fieldId);
- }
-
- public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
- schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
- }
-
- public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
- schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
- }
-
- @Override
- public String toString() {
- StringBuilder sb = new StringBuilder("update_result(");
- boolean first = true;
-
- sb.append("ouch1:");
- if (this.ouch1 == null) {
- sb.append("null");
- } else {
- sb.append(this.ouch1);
- }
- first = false;
- if (!first) sb.append(", ");
- sb.append("ouch2:");
- if (this.ouch2 == null) {
- sb.append("null");
- } else {
- sb.append(this.ouch2);
- }
- first = false;
- sb.append(")");
- return sb.toString();
- }
-
- public void validate() throws org.apache.thrift.TException {
- // check for required fields
- // check for sub-struct validity
- }
-
- private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
- try {
- write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
- } catch (org.apache.thrift.TException te) {
- throw new java.io.IOException(te);
- }
- }
-
- private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
- try {
- read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
- } catch (org.apache.thrift.TException te) {
- throw new java.io.IOException(te);
- }
- }
-
- private static class update_resultStandardSchemeFactory implements SchemeFactory {
- public update_resultStandardScheme getScheme() {
- return new update_resultStandardScheme();
- }
- }
-
- private static class update_resultStandardScheme extends StandardScheme<update_result> {
-
- public void read(org.apache.thrift.protocol.TProtocol iprot, update_result struct) throws org.apache.thrift.TException {
- org.apache.thrift.protocol.TField schemeField;
- iprot.readStructBegin();
- while (true)
- {
- schemeField = iprot.readFieldBegin();
- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
- break;
- }
- switch (schemeField.id) {
- case 1: // OUCH1
- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
- struct.ouch1 = new UnknownWriter();
- struct.ouch1.read(iprot);
- struct.setOuch1IsSet(true);
- } else {
- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
- }
- break;
- case 2: // OUCH2
- if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
- struct.ouch2 = new MutationsRejectedException();
- struct.ouch2.read(iprot);
- struct.setOuch2IsSet(true);
- } else {
- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
- }
- break;
- default:
- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
- }
- iprot.readFieldEnd();
- }
- iprot.readStructEnd();
-
- // check for required fields of primitive type, which can't be checked in the validate method
- struct.validate();
- }
-
- public void write(org.apache.thrift.protocol.TProtocol oprot, update_result struct) throws org.apache.thrift.TException {
- struct.validate();
-
- oprot.writeStructBegin(STRUCT_DESC);
- if (struct.ouch1 != null) {
- oprot.writeFieldBegin(OUCH1_FIELD_DESC);
- struct.ouch1.write(oprot);
- oprot.writeFieldEnd();
- }
- if (struct.ouch2 != null) {
- oprot.writeFieldBegin(OUCH2_FIELD_DESC);
- struct.ouch2.write(oprot);
- oprot.writeFieldEnd();
- }
- oprot.writeFieldStop();
- oprot.writeStructEnd();
- }
-
- }
-
- private static class update_resultTupleSchemeFactory implements SchemeFactory {
- public update_resultTupleScheme getScheme() {
- return new update_resultTupleScheme();
- }
- }
-
- private static class update_resultTupleScheme extends TupleScheme<update_result> {
-
- @Override
- public void write(org.apache.thrift.protocol.TProtocol prot, update_result struct) throws org.apache.thrift.TException {
- TTupleProtocol oprot = (TTupleProtocol) prot;
- BitSet optionals = new BitSet();
- if (struct.isSetOuch1()) {
- optionals.set(0);
- }
- if (struct.isSetOuch2()) {
- optionals.set(1);
- }
- oprot.writeBitSet(optionals, 2);
- if (struct.isSetOuch1()) {
- struct.ouch1.write(oprot);
- }
- if (struct.isSetOuch2()) {
- struct.ouch2.write(oprot);
- }
- }
-
- @Override
- public void read(org.apache.thrift.protocol.TProtocol prot, update_result struct) throws org.apache.thrift.TException {
- TTupleProtocol iprot = (TTupleProtocol) prot;
- BitSet incoming = iprot.readBitSet(2);
- if (incoming.get(0)) {
- struct.ouch1 = new UnknownWriter();
- struct.ouch1.read(iprot);
- struct.setOuch1IsSet(true);
- }
- if (incoming.get(1)) {
- struct.ouch2 = new MutationsRejectedException();
- struct.ouch2.read(iprot);
- struct.setOuch2IsSet(true);
- }
- }
- }
-
- }
-
public static class flush_args implements org.apache.thrift.TBase<flush_args, flush_args._Fields>, java.io.Serializable, Cloneable {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("flush_args");
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1190_e29dc4f5.diff |
bugs-dot-jar_data_ACCUMULO-844_692efde2 | ---
BugID: ACCUMULO-844
Summary: VisibilityFilter does not catch BadArgumentException
Description: If an invalid column visibility makes it into the system, then the VisibilityFilter
may not handle it properly. The accept method handles VisibilityParseException,
but some of the parse code throws a BadArgumentException which is not handled.
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java b/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java
index a41f7be..4902e61 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java
@@ -25,6 +25,7 @@ import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.accumulo.core.security.VisibilityEvaluator;
import org.apache.accumulo.core.security.VisibilityParseException;
+import org.apache.accumulo.core.util.BadArgumentException;
import org.apache.accumulo.core.util.TextUtil;
import org.apache.commons.collections.map.LRUMap;
import org.apache.hadoop.io.Text;
@@ -73,6 +74,9 @@ public class VisibilityFilter extends Filter {
} catch (VisibilityParseException e) {
log.error("Parse Error", e);
return false;
+ } catch (BadArgumentException e) {
+ log.error("Parse Error", e);
+ return false;
}
}
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-844_692efde2.diff |
bugs-dot-jar_data_ACCUMULO-217_add180fb | ---
BugID: ACCUMULO-217
Summary: MockAccumulo doesn't throw informative errors
Description: Users are unable to tell if an error has occurred and whether it is due
to unimplemented features in MockAccumulo.
diff --git a/src/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsHelper.java b/src/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsHelper.java
index b533beb..dfb75ba 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsHelper.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsHelper.java
@@ -48,6 +48,8 @@ public abstract class TableOperationsHelper implements TableOperations {
@Override
public void removeIterator(String tableName, String name, EnumSet<IteratorScope> scopes) throws AccumuloSecurityException, AccumuloException,
TableNotFoundException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(null, tableName, null);
Map<String,String> copy = new HashMap<String,String>();
for (Entry<String,String> property : this.getProperties(tableName)) {
copy.put(property.getKey(), property.getValue());
@@ -64,6 +66,8 @@ public abstract class TableOperationsHelper implements TableOperations {
@Override
public IteratorSetting getIteratorSetting(String tableName, String name, IteratorScope scope) throws AccumuloSecurityException, AccumuloException,
TableNotFoundException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(null, tableName, null);
int priority = -1;
String classname = null;
Map<String,String> settings = new HashMap<String,String>();
@@ -90,6 +94,8 @@ public abstract class TableOperationsHelper implements TableOperations {
@Override
public Set<String> listIterators(String tableName) throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(null, tableName, null);
Set<String> result = new HashSet<String>();
Set<String> lifecycles = new HashSet<String>();
for (IteratorScope scope : IteratorScope.values())
@@ -107,6 +113,8 @@ public abstract class TableOperationsHelper implements TableOperations {
@Override
public void checkIteratorConflicts(String tableName, IteratorSetting setting) throws AccumuloException, TableNotFoundException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(null, tableName, null);
for (IteratorScope scope : setting.getScopes()) {
String scopeStr = String.format("%s%s", Property.TABLE_ITERATOR_PREFIX, scope.name().toLowerCase());
String nameStr = String.format("%s.%s", scopeStr, setting.getName());
diff --git a/src/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java b/src/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
index 4353ab1..f2f32df 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
@@ -82,6 +82,8 @@ public class MockTableOperations extends TableOperationsHelper {
if (!tableName.matches(Constants.VALID_TABLE_NAME_REGEX)) {
throw new IllegalArgumentException();
}
+ if (exists(tableName))
+ throw new TableExistsException(tableName, tableName, "");
acu.createTable(username, tableName, versioningIter, timeType);
}
@@ -91,30 +93,42 @@ public class MockTableOperations extends TableOperationsHelper {
@Override
public void addAggregators(String tableName, List<? extends PerColumnIteratorConfig> aggregators) throws AccumuloSecurityException, TableNotFoundException,
AccumuloException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(tableName, tableName, "");
acu.addAggregators(tableName, aggregators);
}
@Override
- public void addSplits(String tableName, SortedSet<Text> partitionKeys) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {}
+ public void addSplits(String tableName, SortedSet<Text> partitionKeys) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
+ throw new NotImplementedException();
+ }
@Override
- public Collection<Text> getSplits(String tableName) {
+ public Collection<Text> getSplits(String tableName) throws TableNotFoundException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(tableName, tableName, "");
return Collections.emptyList();
}
@Override
- public Collection<Text> getSplits(String tableName, int maxSplits) {
- return Collections.emptyList();
+ public Collection<Text> getSplits(String tableName, int maxSplits) throws TableNotFoundException {
+ return getSplits(tableName);
}
@Override
public void delete(String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(tableName, tableName, "");
acu.tables.remove(tableName);
}
@Override
public void rename(String oldTableName, String newTableName) throws AccumuloSecurityException, TableNotFoundException, AccumuloException,
TableExistsException {
+ if (!exists(oldTableName))
+ throw new TableNotFoundException(oldTableName, oldTableName, "");
+ if (exists(newTableName))
+ throw new TableExistsException(newTableName, newTableName, "");
MockTable t = acu.tables.remove(oldTableName);
acu.tables.put(newTableName, t);
}
@@ -134,15 +148,19 @@ public class MockTableOperations extends TableOperationsHelper {
@Override
public Iterable<Entry<String,String>> getProperties(String tableName) throws TableNotFoundException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(tableName, tableName, "");
return acu.tables.get(tableName).settings.entrySet();
}
@Override
- public void setLocalityGroups(String tableName, Map<String,Set<Text>> groups) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {}
+ public void setLocalityGroups(String tableName, Map<String,Set<Text>> groups) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+ throw new NotImplementedException();
+ }
@Override
public Map<String,Set<Text>> getLocalityGroups(String tableName) throws AccumuloException, TableNotFoundException {
- return null;
+ throw new NotImplementedException();
}
@Override
@@ -164,13 +182,17 @@ public class MockTableOperations extends TableOperationsHelper {
}
@Override
- public void offline(String tableName) throws AccumuloSecurityException, AccumuloException {}
+ public void offline(String tableName) throws AccumuloSecurityException, AccumuloException {
+ throw new NotImplementedException();
+ }
@Override
public void online(String tableName) throws AccumuloSecurityException, AccumuloException {}
@Override
- public void clearLocatorCache(String tableName) throws TableNotFoundException {}
+ public void clearLocatorCache(String tableName) throws TableNotFoundException {
+ throw new NotImplementedException();
+ }
@Override
public Map<String,String> tableIdMap() {
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-217_add180fb.diff |
bugs-dot-jar_data_ACCUMULO-1518_dc95cb69 | ---
BugID: ACCUMULO-1518
Summary: FileOperations expects RFile filenames to contain only 1 dot.
Description: |-
If I attempt to create or read an RFile that contains more than 1 dot in the filename, FileOperations throws an IllegalArgumentException("File name " + name + " has no extension").
Please allow creation/import of RFiles that have more than 1 dot in the filename.
diff --git a/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java b/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
index 9f60725..17e540b 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
@@ -44,14 +44,13 @@ class DispatchingFileFactory extends FileOperations {
if (name.startsWith(Constants.MAPFILE_EXTENSION + "_")) {
return new MapFileOperations();
}
-
String[] sp = name.split("\\.");
- if (sp.length != 2) {
+ if (sp.length < 2) {
throw new IllegalArgumentException("File name " + name + " has no extension");
}
- String extension = sp[1];
+ String extension = sp[sp.length - 1];
if (extension.equals(Constants.MAPFILE_EXTENSION) || extension.equals(Constants.MAPFILE_EXTENSION + "_tmp")) {
return new MapFileOperations();
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1518_dc95cb69.diff |
bugs-dot-jar_data_ACCUMULO-209_397f86f6 | ---
BugID: ACCUMULO-209
Summary: RegExFilter does not properly regex when using multi-byte characters
Description: "The current RegExFilter class uses a ByteArrayBackedCharSequence to
set the data to match against. The ByteArrayBackedCharSequence contains a line of
code that prevents the matcher from properly matching multi-byte characters.\n\nLine
49 of ByteArrayBackedCharSequence.java is:\nreturn (char) (0xff & data[offset +
index]); \n\nThis
incorrectly casts a single byte from the byte array to a char, which is 2 bytes
in Java. This prevents the RegExFilter from properly performing Regular Expressions
on multi-byte character encoded values.\n\nA patch for the RegExFilter.java file
has been created and will be submitted."
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
index 0b3b73f..86b2bde 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
@@ -17,6 +17,7 @@
package org.apache.accumulo.core.iterators.user;
import java.io.IOException;
+import java.io.UnsupportedEncodingException;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@@ -28,7 +29,6 @@ import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.iterators.Filter;
import org.apache.accumulo.core.iterators.IteratorEnvironment;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
-import org.apache.accumulo.core.util.ByteArrayBackedCharSequence;
/**
* A Filter that matches entries based on Java regular expressions.
@@ -52,6 +52,9 @@ public class RegExFilter extends Filter {
public static final String COLQ_REGEX = "colqRegex";
public static final String VALUE_REGEX = "valueRegex";
public static final String OR_FIELDS = "orFields";
+ public static final String ENCODING = "encoding";
+
+ public static final String ENCODING_DEFAULT = "UTF-8";
private Matcher rowMatcher;
private Matcher colfMatcher;
@@ -59,33 +62,36 @@ public class RegExFilter extends Filter {
private Matcher valueMatcher;
private boolean orFields = false;
- private ByteArrayBackedCharSequence babcs = new ByteArrayBackedCharSequence();
+ private String encoding = ENCODING_DEFAULT;
- private Matcher copyMatcher(Matcher m)
- {
- if(m == null)
- return m;
- else
- return m.pattern().matcher("");
+ private Matcher copyMatcher(Matcher m) {
+ if (m == null)
+ return m;
+ else
+ return m.pattern().matcher("");
}
private boolean matches(Matcher matcher, ByteSequence bs) {
if (matcher != null) {
- babcs.set(bs);
- matcher.reset(babcs);
- return matcher.matches();
+ try {
+ matcher.reset(new String(bs.getBackingArray(), encoding));
+ return matcher.matches();
+ } catch (UnsupportedEncodingException e) {
+ e.printStackTrace();
+ }
}
-
return !orFields;
}
private boolean matches(Matcher matcher, byte data[], int offset, int len) {
if (matcher != null) {
- babcs.set(data, offset, len);
- matcher.reset(babcs);
- return matcher.matches();
+ try {
+ matcher.reset(new String(data, offset, len, encoding));
+ return matcher.matches();
+ } catch (UnsupportedEncodingException e) {
+ e.printStackTrace();
+ }
}
-
return !orFields;
}
@@ -130,6 +136,10 @@ public class RegExFilter extends Filter {
} else {
orFields = false;
}
+
+ if (options.containsKey(ENCODING)) {
+ encoding = options.get(ENCODING);
+ }
}
@Override
@@ -142,6 +152,7 @@ public class RegExFilter extends Filter {
io.addNamedOption(RegExFilter.COLQ_REGEX, "regular expression on column qualifier");
io.addNamedOption(RegExFilter.VALUE_REGEX, "regular expression on value");
io.addNamedOption(RegExFilter.OR_FIELDS, "use OR instread of AND when multiple regexes given");
+ io.addNamedOption(RegExFilter.ENCODING, "character encoding of byte array value (default is " + ENCODING_DEFAULT + ")");
return io;
}
@@ -160,6 +171,17 @@ public class RegExFilter extends Filter {
if (options.containsKey(VALUE_REGEX))
Pattern.compile(options.get(VALUE_REGEX)).matcher("");
+ if (options.containsKey(ENCODING)) {
+ try {
+ this.encoding = options.get(ENCODING);
+ @SuppressWarnings("unused")
+ String test = new String("test".getBytes(), encoding);
+ } catch (UnsupportedEncodingException e) {
+ e.printStackTrace();
+ return false;
+ }
+ }
+
return true;
}
@@ -192,4 +214,19 @@ public class RegExFilter extends Filter {
si.addOption(RegExFilter.OR_FIELDS, "true");
}
}
+
+ /**
+ * Set the encoding string to use when interpreting characters
+ *
+ * @param si
+ * ScanIterator config to be updated
+ * @param encoding
+ * the encoding string to use for character interpretation.
+ *
+ */
+ public static void setEncoding(IteratorSetting si, String encoding) {
+ if (!encoding.isEmpty()) {
+ si.addOption(RegExFilter.ENCODING, encoding);
+ }
+ }
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-209_397f86f6.diff |
bugs-dot-jar_data_ACCUMULO-178_efef09b0 | ---
BugID: ACCUMULO-178
Summary: Off-by-one error in FamilyIntersectingIterator
Description: In the buildDocKey() function within the FamilyIntersectingIterator there
is a bug that shortens the docID by 1. This causes the wrong doc's data to be returned
in the results of a query using this Iterator.
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/FamilyIntersectingIterator.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/FamilyIntersectingIterator.java
index 6df0e80..f870b30 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/FamilyIntersectingIterator.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/FamilyIntersectingIterator.java
@@ -151,7 +151,7 @@ public class FamilyIntersectingIterator extends IntersectingIterator {
if (log.isTraceEnabled())
log.trace(zeroIndex + " " + currentDocID.getLength());
Text colq = new Text();
- colq.set(currentDocID.getBytes(), zeroIndex + 1, currentDocID.getLength() - zeroIndex - 2);
+ colq.set(currentDocID.getBytes(), zeroIndex + 1, currentDocID.getLength() - zeroIndex - 1);
Key k = new Key(currentPartition, colf, colq);
if (log.isTraceEnabled())
log.trace("built doc key for seek: " + k.toString());
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-178_efef09b0.diff |
bugs-dot-jar_data_ACCUMULO-3383_97f16db4 | ---
BugID: ACCUMULO-3383
Summary: AccumuloVFSClassloader creates conflicting local cache directory names when
vfs.cache.dir property is set.
Description: "When the vfs.cache.dir property is not set, the AccumuloVFSClassloader
will use java.io.tmpdir as a base directory for the local cache of jars and then
generate a unique directory name using a combination of the processid, hostname
and userid executing the JVM.\n\nWhen the vfs.cache.dir property is set, that value
is used as the base directory and an attempt to generate a unique directory is
made using an AtomicInteger. This isn't suitable because for non-long lived processes,
this will always be 1 - and there's a good chance that directory already exists
and is owned by another user, and not writable to by the user in question. \n\nThis
leads to a failure of the invoked accumulo component to start.\n\nModify the behavior
of the unique directory creation when vfs.cache.dir is set so that it employs the
same mechanism for unique directory naming that is used when it is not set.\n"
diff --git a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java
index 983ad7c..bd1f943 100644
--- a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java
+++ b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java
@@ -92,8 +92,6 @@ public class AccumuloVFSClassLoader {
public static final String VFS_CACHE_DIR = "general.vfs.cache.dir";
- public static final AtomicInteger uniqueDirectoryGenerator = new AtomicInteger(0);
-
private static ClassLoader parent = null;
private static volatile ReloadingClassLoader loader = null;
private static final Object lock = new Object();
@@ -279,10 +277,7 @@ public class AccumuloVFSClassLoader {
vfs.addMimeTypeMap("application/zip", "zip");
vfs.setFileContentInfoFactory(new FileContentInfoFilenameFactory());
vfs.setFilesCache(new SoftRefFilesCache());
- String cacheDirPath = AccumuloClassLoader.getAccumuloString(VFS_CACHE_DIR, "");
File cacheDir = computeTopCacheDir();
- if (!cacheDirPath.isEmpty())
- cacheDir = new File(cacheDirPath, "" + uniqueDirectoryGenerator.getAndIncrement());
vfs.setReplicator(new UniqueFileReplicator(cacheDir));
vfs.setCacheStrategy(CacheStrategy.ON_RESOLVE);
vfs.init();
@@ -291,8 +286,9 @@ public class AccumuloVFSClassLoader {
}
private static File computeTopCacheDir() {
+ String cacheDirPath = AccumuloClassLoader.getAccumuloString(VFS_CACHE_DIR, System.getProperty("java.io.tmpdir"));
String procName = ManagementFactory.getRuntimeMXBean().getName();
- return new File(System.getProperty("java.io.tmpdir"), "accumulo-vfs-cache-" + procName + "-" + System.getProperty("user.name", "nouser"));
+ return new File(cacheDirPath, "accumulo-vfs-cache-" + procName + "-" + System.getProperty("user.name", "nouser"));
}
public interface Printer {
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3383_97f16db4.diff |
bugs-dot-jar_data_ACCUMULO-2962_2fd7633f | ---
BugID: ACCUMULO-2962
Summary: RangeInputSplit Writable methods don't serialize IteratorSettings
Description: |-
Was trying to figure out why some information was getting lost on a RangeInputSplit after serialization, and found out it was because the serialization and deserialization of the class didn't include the configured IteratorSettings.
This likely isn't a big problem for normal users as, when no IteratorSettings are configured on the RangeInputSplit, it falls back to pulling from the Configuration, but it's possible, with "non-standard" uses of mapreduce, that information could be missing in the Configuration that the mappers receive, and would subsequently error.
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
index 73c9b59..05316a1 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
@@ -205,6 +205,14 @@ public class RangeInputSplit extends InputSplit implements Writable {
}
if (in.readBoolean()) {
+ int numIterators = in.readInt();
+ iterators = new ArrayList<IteratorSetting>(numIterators);
+ for (int i = 0; i < numIterators; i++) {
+ iterators.add(new IteratorSetting(in));
+ }
+ }
+
+ if (in.readBoolean()) {
level = Level.toLevel(in.readInt());
}
}
@@ -275,6 +283,14 @@ public class RangeInputSplit extends InputSplit implements Writable {
out.writeUTF(zooKeepers);
}
+ out.writeBoolean(null != iterators);
+ if (null != iterators) {
+ out.writeInt(iterators.size());
+ for (IteratorSetting iterator : iterators) {
+ iterator.write(out);
+ }
+ }
+
out.writeBoolean(null != level);
if (null != level) {
out.writeInt(level.toInt());
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-2962_2fd7633f.diff |
bugs-dot-jar_data_ACCUMULO-2487_f2920c26 | ---
BugID: ACCUMULO-2487
Summary: Value implementation provides conflicting statements
Description: |-
The javadoc for the no-arg constructor for {{Value}} states that it "Creates a zero-size sequence." However, the implementation of get will error in this case.
{code}
public byte[] get() {
if (this.value == null) {
throw new IllegalStateException("Uninitialized. Null constructor " + "called w/o accompanying readFields invocation");
}
{code}
Either we need to change the javadoc to be more explicit or change the behaviour of various accessors in the class. I would consider both solutions to be breaking of the API contract since we are changing what clients can expect from us.
diff --git a/core/src/main/java/org/apache/accumulo/core/data/Value.java b/core/src/main/java/org/apache/accumulo/core/data/Value.java
index 39ebbd0..11e60e1 100644
--- a/core/src/main/java/org/apache/accumulo/core/data/Value.java
+++ b/core/src/main/java/org/apache/accumulo/core/data/Value.java
@@ -25,6 +25,8 @@ import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.List;
+import com.google.common.base.Preconditions;
+
import org.apache.accumulo.core.Constants;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.WritableComparable;
@@ -36,39 +38,52 @@ import org.apache.hadoop.io.WritableComparator;
* 'immutable'.
*/
public class Value implements WritableComparable<Object> {
+ private static final byte[] EMPTY = new byte[0];
protected byte[] value;
-
+
/**
* Create a zero-size sequence.
*/
public Value() {
- super();
+ this(EMPTY, false);
}
/**
* Create a Value using the byte array as the initial value.
*
- * @param bytes
- * This array becomes the backing storage for the object.
+ * @param bytes May not be null
*/
-
public Value(byte[] bytes) {
this(bytes, false);
}
+ /**
+ * Create a Value using a copy of the ByteBuffer's content.
+ *
+ * @param bytes May not be null
+ */
public Value(ByteBuffer bytes) {
+ /* TODO ACCUMULO-2509 right now this uses the entire backing array, which must be accessible. */
this(toBytes(bytes), false);
}
/**
+ * @param bytes may not be null
* @deprecated A copy of the bytes in the buffer is always made. Use {@link #Value(ByteBuffer)} instead.
*/
@Deprecated
public Value(ByteBuffer bytes, boolean copy) {
+ /* TODO ACCUMULO-2509 right now this uses the entire backing array, which must be accessible. */
this(toBytes(bytes), false);
}
+ /**
+ * Create a Value based on the given bytes.
+ * @param bytes may not be null
+ * @param copy signal if Value must make its own copy of bytes, or if it can use the array directly.
+ */
public Value(byte[] bytes, boolean copy) {
+ Preconditions.checkNotNull(bytes);
if (!copy) {
this.value = bytes;
} else {
@@ -81,8 +96,7 @@ public class Value implements WritableComparable<Object> {
/**
* Set the new Value to a copy of the contents of the passed <code>ibw</code>.
*
- * @param ibw
- * the value to set this Value to.
+ * @param ibw may not be null.
*/
public Value(final Value ibw) {
this(ibw.get(), 0, ibw.getSize());
@@ -91,55 +105,49 @@ public class Value implements WritableComparable<Object> {
/**
* Set the value to a copy of the given byte range
*
- * @param newData
- * the new values to copy in
+ * @param newData source of copy, may not be null
* @param offset
* the offset in newData to start at
* @param length
* the number of bytes to copy
*/
public Value(final byte[] newData, final int offset, final int length) {
+ Preconditions.checkNotNull(newData);
this.value = new byte[length];
System.arraycopy(newData, offset, this.value, 0, length);
}
/**
- * Get the data from the BytesWritable.
- *
- * @return The data is only valid between 0 and getSize() - 1.
+ * @return the underlying byte array directly.
*/
public byte[] get() {
- if (this.value == null) {
- throw new IllegalStateException("Uninitialized. Null constructor " + "called w/o accompanying readFields invocation");
- }
+ assert(null != value);
return this.value;
}
/**
- * @param b
- * Use passed bytes as backing array for this instance.
+ * @param b Use passed bytes as backing array for this instance, may not be null.
*/
public void set(final byte[] b) {
+ Preconditions.checkNotNull(b);
this.value = b;
}
/**
*
- * @param b
- * copy bytes
+ * @param b copy the given byte array, may not be null.
*/
public void copy(byte[] b) {
+ Preconditions.checkNotNull(b);
this.value = new byte[b.length];
System.arraycopy(b, 0, this.value, 0, b.length);
}
/**
- * @return the current size of the buffer.
+ * @return the current size of the underlying buffer.
*/
public int getSize() {
- if (this.value == null) {
- throw new IllegalStateException("Uninitialized. Null constructor " + "called w/o accompanying readFields invocation");
- }
+ assert(null != value);
return this.value.length;
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-2487_f2920c26.diff |
bugs-dot-jar_data_ACCUMULO-2974_5eceb10e | ---
BugID: ACCUMULO-2974
Summary: Unable to assign single tablet table migrated to 1.6.0
Description: |-
Sorry for the screen caps, no copy/paste from machines.
Background- several tables migrated from 1.5.1 to 1.6.0. Only one of which was a single tablet. Upon starting, we noticed that that single table was not loading and the master was reporting an unassigned tablet. Had a stack trace in the monitor (attached).
Also attached is a a metadata scan of the table in question (ID: 12). I was able to get a functional copy of the table by offlining 12 and cloning it. It functioned without issues. Attached is a copy of it's metadata scan as well (ID: 9o)
The stack trace leads me to it being a specific issue with the contents of srv:dir, and the only difference is the relative vs. absolute file names. This cluster was not changed to multiple namenodes and ../tables/default_tablet does not exist. There are other tables which still use the relative naming scheme, and the system does not seem to be having issues with them.
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
index 9ebdef4..2cdd3fe 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
@@ -37,6 +37,7 @@ import org.apache.accumulo.core.conf.DefaultConfiguration;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.file.rfile.RFile;
import org.apache.accumulo.core.util.CachedConfiguration;
import org.apache.accumulo.core.volume.NonConfiguredVolume;
import org.apache.accumulo.core.volume.Volume;
@@ -55,6 +56,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.Logger;
import com.google.common.collect.HashMultimap;
@@ -538,10 +540,30 @@ public class VolumeManagerImpl implements VolumeManager {
}
}
- // normalize the path
- Path fullPath = new Path(defaultVolume.getBasePath(), fileType.getDirectory());
if (path.startsWith("/"))
path = path.substring(1);
+
+ // ACCUMULO-2974 To ensure that a proper absolute path is created, the caller needs to include the table ID
+ // in the relative path. Fail when this doesn't appear to happen.
+ if (FileType.TABLE == fileType) {
+ // Trailing slash doesn't create an additional element
+ String[] pathComponents = StringUtils.split(path, Path.SEPARATOR_CHAR);
+
+ // Is an rfile
+ if (path.endsWith(RFile.EXTENSION)) {
+ if (pathComponents.length < 3) {
+ throw new IllegalArgumentException("Fewer components in file path than expected");
+ }
+ } else {
+ // is a directory
+ if (pathComponents.length < 2) {
+ throw new IllegalArgumentException("Fewer components in directory path than expected");
+ }
+ }
+ }
+
+ // normalize the path
+ Path fullPath = new Path(defaultVolume.getBasePath(), fileType.getDirectory());
fullPath = new Path(fullPath, path);
FileSystem fs = getVolumeByPath(fullPath).getFileSystem();
diff --git a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
index d72abd2..fbc9738 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
@@ -80,6 +80,7 @@ import org.apache.accumulo.server.security.SystemCredentials;
import org.apache.accumulo.server.tables.TableManager;
import org.apache.accumulo.server.tablets.TabletTime;
import org.apache.accumulo.server.util.MetadataTableUtil;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.thrift.TException;
@@ -512,7 +513,10 @@ class TabletGroupWatcher extends Daemon {
} else if (key.compareColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME) == 0) {
throw new IllegalStateException("Tablet " + key.getRow() + " is assigned during a merge!");
} else if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
- datafiles.add(new FileRef(entry.getValue().toString(), this.master.fs.getFullPath(FileType.TABLE, entry.getValue().toString())));
+ // ACCUMULO-2974 Need to include the TableID when converting a relative path to an absolute path.
+ // The value has the leading path separator already included so it doesn't need it included.
+ datafiles.add(new FileRef(entry.getValue().toString(), this.master.fs.getFullPath(FileType.TABLE, Path.SEPARATOR + extent.getTableId()
+ + entry.getValue().toString())));
if (datafiles.size() > 1000) {
MetadataTableUtil.addDeleteEntries(extent, datafiles, SystemCredentials.get());
datafiles.clear();
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-2974_5eceb10e.diff |
bugs-dot-jar_data_ACCUMULO-3143_ddd2c3bc | ---
BugID: ACCUMULO-3143
Summary: InputTableConfig missing isOfflineScan field in Serializer
Description: InputTableConfig write(DataOutput dataOutput) forgets to write out the
isOfflineScan field, which makes it always false when it gets unserialized.
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputTableConfig.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputTableConfig.java
index e59451e..fa3b7eb 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputTableConfig.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputTableConfig.java
@@ -281,6 +281,7 @@ public class InputTableConfig implements Writable {
dataOutput.writeBoolean(autoAdjustRanges);
dataOutput.writeBoolean(useLocalIterators);
dataOutput.writeBoolean(useIsolatedScanners);
+ dataOutput.writeBoolean(offlineScan);
}
/**
@@ -325,6 +326,7 @@ public class InputTableConfig implements Writable {
autoAdjustRanges = dataInput.readBoolean();
useLocalIterators = dataInput.readBoolean();
useIsolatedScanners = dataInput.readBoolean();
+ offlineScan = dataInput.readBoolean();
}
@Override
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3143_ddd2c3bc.diff |
bugs-dot-jar_data_ACCUMULO-633_8dad5e0f | ---
BugID: ACCUMULO-633
Summary: FirstEntryInRowIterator is broken and has no test
Description: |-
In 1.4 and trunk, the iterator throws a NullPointerException when seeked.
In 1.3 the iterator runs, but there is a question as to what it should do when it is seeked to the middle of a row. Currently, it returns the first key found within the range. I believe this should be changed to ignore the remaining portion of that row and return the first key of the next row. Should this change be made in 1.3, or should I leave it as is and just change it in 1.4 and greater?
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/FirstEntryInRowIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/FirstEntryInRowIterator.java
index 96c7b80..7507bff 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/FirstEntryInRowIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/FirstEntryInRowIterator.java
@@ -75,6 +75,8 @@ public class FirstEntryInRowIterator extends SkippingIterator implements OptionD
// this is only ever called immediately after getting "next" entry
@Override
protected void consume() throws IOException {
+ if (lastRowFound == null)
+ return;
int count = 0;
while (getSource().hasTop() && lastRowFound.equals(getSource().getTopKey().getRow())) {
@@ -101,10 +103,17 @@ public class FirstEntryInRowIterator extends SkippingIterator implements OptionD
latestRange = range;
latestColumnFamilies = columnFamilies;
latestInclusive = inclusive;
+ lastRowFound = null;
- // seek to first possible pattern in range
- super.seek(range, columnFamilies, inclusive);
- lastRowFound = getSource().hasTop() ? getSource().getTopKey().getRow() : null;
+ Key startKey = range.getStartKey();
+ Range seekRange = new Range(startKey == null ? null : new Key(startKey.getRow()), true, range.getEndKey(), range.isEndKeyInclusive());
+ super.seek(seekRange, columnFamilies, inclusive);
+
+ if (getSource().hasTop()) {
+ lastRowFound = getSource().getTopKey().getRow();
+ if (range.beforeStartKey(getSource().getTopKey()))
+ consume();
+ }
}
@Override
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-633_8dad5e0f.diff |
bugs-dot-jar_data_ACCUMULO-843_65390f8c | ---
BugID: ACCUMULO-843
Summary: Mock does not implement locality groups or merging
Description: |-
The Mock Instance does not implement locality groups and throws an exception if one attempts to set them. It would be useful for the unit tests that I am writing for the Accumulo proxy to have at least minimal locality group functionality in the Mock instance, for example simply storing the groups and returning the stored groups when asked for.
*Edit: Tablet merging would be useful as well.
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
index 8fb9f0e..c4262c0 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
@@ -20,9 +20,11 @@ import java.util.Collection;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Map;
+import java.util.Set;
import java.util.Map.Entry;
import java.util.SortedMap;
import java.util.SortedSet;
+import java.util.TreeMap;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentSkipListMap;
@@ -86,6 +88,7 @@ public class MockTable {
Map<String,EnumSet<TablePermission>> userPermissions = new HashMap<String,EnumSet<TablePermission>>();
private TimeType timeType;
SortedSet<Text> splits = new TreeSet<Text>();
+ Map<String,Set<Text>> localityGroups = new TreeMap<String, Set<Text>>();
MockTable(boolean limitVersion, TimeType timeType) {
this.timeType = timeType;
@@ -122,4 +125,11 @@ public class MockTable {
public Collection<Text> getSplits() {
return splits;
}
+
+ public void setLocalityGroups(Map<String,Set<Text>> groups) {
+ localityGroups = groups;
+ }
+ public Map<String,Set<Text>> getLocalityGroups() {
+ return localityGroups;
+ }
}
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
index b5ffc73..5da7d64 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
@@ -40,6 +40,7 @@ import org.apache.accumulo.core.client.admin.TimeType;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.file.FileOperations;
@@ -155,17 +156,23 @@ public class MockTableOperations extends TableOperationsHelper {
@Override
public void setLocalityGroups(String tableName, Map<String,Set<Text>> groups) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
- throw new NotImplementedException();
+ if (!exists(tableName))
+ throw new TableNotFoundException(tableName, tableName, "");
+ acu.tables.get(tableName).setLocalityGroups(groups);
}
@Override
public Map<String,Set<Text>> getLocalityGroups(String tableName) throws AccumuloException, TableNotFoundException {
- throw new NotImplementedException();
+ if (!exists(tableName))
+ throw new TableNotFoundException(tableName, tableName, "");
+ return acu.tables.get(tableName).getLocalityGroups();
}
@Override
public Set<Range> splitRangeByTablets(String tableName, Range range, int maxSplits) throws AccumuloException, AccumuloSecurityException,
TableNotFoundException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(tableName, tableName, "");
return Collections.singleton(range);
}
@@ -262,15 +269,20 @@ public class MockTableOperations extends TableOperationsHelper {
@Override
public void offline(String tableName) throws AccumuloSecurityException, AccumuloException {
- throw new NotImplementedException();
+ if (!exists(tableName))
+ throw new AccumuloException(tableName + " does not exists");
}
@Override
- public void online(String tableName) throws AccumuloSecurityException, AccumuloException {}
+ public void online(String tableName) throws AccumuloSecurityException, AccumuloException {
+ if (!exists(tableName))
+ throw new AccumuloException(tableName + " does not exists");
+ }
@Override
public void clearLocatorCache(String tableName) throws TableNotFoundException {
- throw new NotImplementedException();
+ if (!exists(tableName))
+ throw new TableNotFoundException(tableName, tableName, "");
}
@Override
@@ -284,24 +296,31 @@ public class MockTableOperations extends TableOperationsHelper {
@Override
public void merge(String tableName, Text start, Text end) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
- throw new NotImplementedException();
- }
+ if (!exists(tableName))
+ throw new TableNotFoundException(tableName, tableName, "");
+}
@Override
public void deleteRows(String tableName, Text start, Text end) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
- throw new NotImplementedException();
+ if (!exists(tableName))
+ throw new TableNotFoundException(tableName, tableName, "");
+ MockTable t = acu.tables.get(tableName);
+ Set<Key> keep = new TreeSet<Key>(t.table.tailMap(new Key(start)).headMap(new Key(end)).keySet());
+ t.table.keySet().removeAll(keep);
}
@Override
public void compact(String tableName, Text start, Text end, boolean flush, boolean wait) throws AccumuloSecurityException, TableNotFoundException,
AccumuloException {
- throw new NotImplementedException();
+ if (!exists(tableName))
+ throw new TableNotFoundException(tableName, tableName, "");
}
@Override
public void compact(String tableName, Text start, Text end, List<IteratorSetting> iterators, boolean flush, boolean wait) throws AccumuloSecurityException,
TableNotFoundException, AccumuloException {
- throw new NotImplementedException();
+ if (!exists(tableName))
+ throw new TableNotFoundException(tableName, tableName, "");
}
@Override
@@ -312,8 +331,9 @@ public class MockTableOperations extends TableOperationsHelper {
@Override
public void flush(String tableName, Text start, Text end, boolean wait) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
- throw new NotImplementedException();
- }
+ if (!exists(tableName))
+ throw new TableNotFoundException(tableName, tableName, "");
+ }
@Override
public Text getMaxRow(String tableName, Authorizations auths, Text startRow, boolean startInclusive, Text endRow, boolean endInclusive)
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-843_65390f8c.diff |
bugs-dot-jar_data_ACCUMULO-2742_1f7dd2d5 | ---
BugID: ACCUMULO-2742
Summary: History command incorrectly numbers commands
Description: |-
When you use the history command, it will provide you with a list of previous commands that have been executed, each with a command number. However, if you try to use history expansion by number to invoke one of those commands, you will be off by one.
I think this is because the history command in added to the list after it shows you the list, and pushes everything else up by one. Uncertain if this is something we do wrong, or if this is an upstream JLine bug.
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/HistoryCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/HistoryCommand.java
index 9531d90..d6068ba 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/HistoryCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/HistoryCommand.java
@@ -18,7 +18,6 @@ package org.apache.accumulo.core.util.shell.commands;
import java.io.IOException;
import java.util.Iterator;
-import java.util.ListIterator;
import jline.console.history.History.Entry;
@@ -27,39 +26,33 @@ import org.apache.accumulo.core.util.shell.Shell.Command;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
-import org.apache.commons.collections.iterators.AbstractIteratorDecorator;
+
+import com.google.common.base.Function;
+import com.google.common.collect.Iterators;
public class HistoryCommand extends Command {
private Option clearHist;
private Option disablePaginationOpt;
- @SuppressWarnings("unchecked")
@Override
public int execute(final String fullCommand, final CommandLine cl, final Shell shellState) throws IOException {
if (cl.hasOption(clearHist.getOpt())) {
shellState.getReader().getHistory().clear();
} else {
- ListIterator<Entry> it = shellState.getReader().getHistory().entries();
- shellState.printLines(new HistoryLineIterator(it), !cl.hasOption(disablePaginationOpt.getOpt()));
+ Iterator<Entry> source = shellState.getReader().getHistory().entries();
+ Iterator<String> historyIterator = Iterators.transform(source, new Function<Entry,String>() {
+ @Override
+ public String apply(Entry input) {
+ return String.format("%d: %s", input.index() + 1, input.value());
+ }
+ });
+
+ shellState.printLines(historyIterator, !cl.hasOption(disablePaginationOpt.getOpt()));
}
return 0;
}
- /**
- * Decorator that converts an Iterator<History.Entry> to an Iterator<String>.
- */
- private static class HistoryLineIterator extends AbstractIteratorDecorator {
- public HistoryLineIterator(Iterator<Entry> iterator) {
- super(iterator);
- }
-
- @Override
- public String next() {
- return super.next().toString();
- }
- }
-
@Override
public String description() {
return ("generates a list of commands previously executed");
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-2742_1f7dd2d5.diff |
bugs-dot-jar_data_ACCUMULO-2748_ff8c2383 | ---
BugID: ACCUMULO-2748
Summary: MockTableOperations.deleteRow does not handle null for start or end keys
Description: "The deleteRow function does not check for null values for start or end
keys.\nThese null values are passed down into key constructor which will throw a
NullPointerException:\njava.lang.NullPointerException\n\tat org.apache.accumulo.core.data.Key.<init>(Key.java:103)\n\tat
org.apache.accumulo.core.client.mock.MockTableOperations.deleteRows(MockTableOperations.java:315)\n\nThe
API semantics dictate:\nif (start == null ) then start == Text()\nif (end == null
) then end == maxKey()\n\n\n\n"
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
index dc4a619..64f8225 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
@@ -314,8 +314,8 @@ public class MockTableOperations extends TableOperationsHelper {
if (!exists(tableName))
throw new TableNotFoundException(tableName, tableName, "");
MockTable t = acu.tables.get(tableName);
- Text startText = new Text(start);
- Text endText = new Text(end);
+ Text startText = start != null ? new Text(start) : new Text();
+ Text endText = end != null ? new Text(end) : new Text(t.table.lastKey().getRow().getBytes());
startText.append(ZERO, 0, 1);
endText.append(ZERO, 0, 1);
Set<Key> keep = new TreeSet<Key>(t.table.subMap(new Key(startText), new Key(endText)).keySet());
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-2748_ff8c2383.diff |
bugs-dot-jar_data_ACCUMULO-412_5594b2e0 | ---
BugID: ACCUMULO-412
Summary: importdirectory failing on split table
Description: 'bulk import for the wikisearch example isn''t working properly: files
are not being assigned to partitions if there are splits.'
diff --git a/src/core/src/main/java/org/apache/accumulo/core/conf/Property.java b/src/core/src/main/java/org/apache/accumulo/core/conf/Property.java
index 4f95e1a..83283ac 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/conf/Property.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/conf/Property.java
@@ -65,7 +65,6 @@ public enum Property {
MASTER_RECOVERY_POOL("master.recovery.pool", "recovery", PropertyType.STRING, "Priority queue to use for log recovery map/reduce jobs."),
MASTER_RECOVERY_SORT_MAPREDUCE("master.recovery.sort.mapreduce", "false", PropertyType.BOOLEAN,
"If true, use map/reduce to sort write-ahead logs during recovery"),
- MASTER_BULK_SERVERS("master.bulk.server.max", "4", PropertyType.COUNT, "The number of servers to use during a bulk load"),
MASTER_BULK_RETRIES("master.bulk.retries", "3", PropertyType.COUNT, "The number of attempts to bulk-load a file before giving up."),
MASTER_BULK_THREADPOOL_SIZE("master.bulk.threadpool.size", "5", PropertyType.COUNT, "The number of threads to use when coordinating a bulk-import."),
MASTER_MINTHREADS("master.server.threads.minimum", "2", PropertyType.COUNT, "The minimum number of threads to use to handle incoming requests."),
diff --git a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedMapper.java b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedMapper.java
index 5e82a7d..bb4ae64 100644
--- a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedMapper.java
+++ b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedMapper.java
@@ -42,14 +42,13 @@ import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.log4j.Logger;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
public class WikipediaPartitionedMapper extends Mapper<Text,Article,Text,Mutation> {
- private static final Logger log = Logger.getLogger(WikipediaPartitionedMapper.class);
+ // private static final Logger log = Logger.getLogger(WikipediaPartitionedMapper.class);
public final static Charset UTF8 = Charset.forName("UTF-8");
public static final String DOCUMENT_COLUMN_FAMILY = "d";
diff --git a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitioner.java b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitioner.java
index 82af9fd..3507108 100644
--- a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitioner.java
+++ b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitioner.java
@@ -23,40 +23,21 @@ package org.apache.accumulo.examples.wikisearch.ingest;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
-import java.io.StringReader;
import java.nio.charset.Charset;
-import java.util.HashSet;
-import java.util.IllegalFormatException;
-import java.util.Map.Entry;
-import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.accumulo.examples.wikisearch.ingest.ArticleExtractor.Article;
import org.apache.accumulo.examples.wikisearch.ingest.WikipediaInputFormat.WikipediaInputSplit;
-import org.apache.accumulo.examples.wikisearch.normalizer.LcNoDiacriticsNormalizer;
-import org.apache.accumulo.examples.wikisearch.protobuf.Uid;
-import org.apache.accumulo.examples.wikisearch.protobuf.Uid.List.Builder;
-import org.apache.commons.codec.binary.Base64;
-import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
-import org.apache.log4j.Logger;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.wikipedia.analysis.WikipediaTokenizer;
-
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.Multimap;
public class WikipediaPartitioner extends Mapper<LongWritable,Text,Text,Article> {
- private static final Logger log = Logger.getLogger(WikipediaPartitioner.class);
+ // private static final Logger log = Logger.getLogger(WikipediaPartitioner.class);
public final static Charset UTF8 = Charset.forName("UTF-8");
public static final String DOCUMENT_COLUMN_FAMILY = "d";
diff --git a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/output/SortingRFileOutputFormat.java b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/output/SortingRFileOutputFormat.java
index d8c57c2..2738e2c 100644
--- a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/output/SortingRFileOutputFormat.java
+++ b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/output/SortingRFileOutputFormat.java
@@ -4,20 +4,18 @@ import java.io.IOException;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.examples.wikisearch.ingest.WikipediaMapper;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.io.Text;
-import org.apache.log4j.Logger;
public class SortingRFileOutputFormat extends OutputFormat<Text,Mutation> {
- private static final Logger log = Logger.getLogger(SortingRFileOutputFormat.class);
+ // private static final Logger log = Logger.getLogger(SortingRFileOutputFormat.class);
public static final String PATH_NAME = "sortingrfileoutputformat.path";
public static final String MAX_BUFFER_SIZE = "sortingrfileoutputformat.max.buffer.size";
diff --git a/src/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java b/src/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
index 071b8bd..4ee5371 100644
--- a/src/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
+++ b/src/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
@@ -38,8 +38,8 @@ import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.Instance;
import org.apache.accumulo.core.client.impl.ServerClient;
import org.apache.accumulo.core.client.impl.TabletLocator;
-import org.apache.accumulo.core.client.impl.Translator;
import org.apache.accumulo.core.client.impl.TabletLocator.TabletLocation;
+import org.apache.accumulo.core.client.impl.Translator;
import org.apache.accumulo.core.client.impl.thrift.ClientService;
import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
@@ -150,7 +150,7 @@ public class BulkImporter {
} catch (Exception ex) {
log.warn("Unable to find tablets that overlap file " + mapFile.toString());
}
-
+ log.debug("Map file " + mapFile + " found to overlap " + tabletsToAssignMapFileTo.size() + " tablets");
if (tabletsToAssignMapFileTo.size() == 0) {
List<KeyExtent> empty = Collections.emptyList();
completeFailures.put(mapFile, empty);
@@ -652,33 +652,41 @@ public class BulkImporter {
return findOverlappingTablets(acuConf, fs, locator, file, start, failed.getEndRow());
}
+ final static byte[] byte0 = {0};
+
public static List<TabletLocation> findOverlappingTablets(AccumuloConfiguration acuConf, FileSystem fs, TabletLocator locator, Path file, Text startRow,
Text endRow) throws Exception {
List<TabletLocation> result = new ArrayList<TabletLocation>();
-
Collection<ByteSequence> columnFamilies = Collections.emptyList();
-
- FileSKVIterator reader = FileOperations.getInstance().openReader(file.toString(), true, fs, fs.getConf(), acuConf);
+ String filename = file.toString();
+ // log.debug(filename + " finding overlapping tablets " + startRow + " -> " + endRow);
+ FileSKVIterator reader = FileOperations.getInstance().openReader(filename, true, fs, fs.getConf(), acuConf);
try {
Text row = startRow;
if (row == null)
row = new Text();
while (true) {
+ // log.debug(filename + " Seeking to row " + row);
reader.seek(new Range(row, null), columnFamilies, false);
- if (!reader.hasTop())
+ if (!reader.hasTop()) {
+ // log.debug(filename + " not found");
break;
+ }
row = reader.getTopKey().getRow();
TabletLocation tabletLocation = locator.locateTablet(row, false, true);
+ // log.debug(filename + " found row " + row + " at location " + tabletLocation);
result.add(tabletLocation);
row = tabletLocation.tablet_extent.getEndRow();
- if (row != null && (endRow == null || row.compareTo(endRow) < 0))
- row = Range.followingPrefix(row);
- else
+ if (row != null && (endRow == null || row.compareTo(endRow) < 0)) {
+ row = new Text(row);
+ row.append(byte0, 0, byte0.length);
+ } else
break;
}
} finally {
reader.close();
}
+ // log.debug(filename + " to be sent to " + result);
return result;
}
diff --git a/src/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java b/src/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java
index c4a3f50..05c353d 100644
--- a/src/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java
+++ b/src/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java
@@ -19,11 +19,15 @@ package org.apache.accumulo.server.master.tableOps;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.Collections;
+import java.util.HashMap;
import java.util.HashSet;
-import java.util.Iterator;
import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
import java.util.Set;
+import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
@@ -41,12 +45,13 @@ import org.apache.accumulo.core.client.impl.thrift.TableOperation;
import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.conf.SiteConfiguration;
import org.apache.accumulo.core.file.FileOperations;
import org.apache.accumulo.core.master.state.tables.TableState;
import org.apache.accumulo.core.security.thrift.AuthInfo;
import org.apache.accumulo.core.util.CachedConfiguration;
import org.apache.accumulo.core.util.Daemon;
-import org.apache.accumulo.core.util.LoggingRunnable;
+import org.apache.accumulo.core.util.ThriftUtil;
import org.apache.accumulo.core.util.UtilWaitThread;
import org.apache.accumulo.server.ServerConstants;
import org.apache.accumulo.server.client.HdfsZooInstance;
@@ -370,7 +375,7 @@ class LoadFiles extends MasterRepo {
@Override
public Repo<Master> call(final long tid, Master master) throws Exception {
-
+ final SiteConfiguration conf = ServerConfiguration.getSiteConfiguration();
FileSystem fs = TraceFileSystem.wrap(org.apache.accumulo.core.file.FileUtil.getFileSystem(CachedConfiguration.getInstance(),
ServerConfiguration.getSiteConfiguration()));
List<FileStatus> files = new ArrayList<FileStatus>();
@@ -389,42 +394,68 @@ class LoadFiles extends MasterRepo {
}
fs.delete(writable, false);
- // group files into N-sized chunks, send the chunks to random servers
- final int SERVERS_TO_USE = Math.min(ServerConfiguration.getSystemConfiguration().getCount(Property.MASTER_BULK_SERVERS), master.onlineTabletServers()
- .size());
-
- log.debug("tid " + tid + " using " + SERVERS_TO_USE + " servers");
- // wait for success, repeat failures R times
final List<String> filesToLoad = Collections.synchronizedList(new ArrayList<String>());
for (FileStatus f : files)
filesToLoad.add(f.getPath().toString());
- final int RETRIES = Math.max(1, ServerConfiguration.getSystemConfiguration().getCount(Property.MASTER_BULK_RETRIES));
- for (int i = 0; i < RETRIES && filesToLoad.size() > 0; i++) {
- List<Future<?>> results = new ArrayList<Future<?>>();
- for (List<String> chunk : groupFiles(filesToLoad, SERVERS_TO_USE)) {
- final List<String> attempt = chunk;
- results.add(threadPool.submit(new LoggingRunnable(log, new Runnable() {
+
+ final int RETRIES = Math.max(1, conf.getCount(Property.MASTER_BULK_RETRIES));
+ for (int attempt = 0; attempt < RETRIES && filesToLoad.size() > 0; attempt++) {
+ List<Future<List<String>>> results = new ArrayList<Future<List<String>>>();
+
+ // Figure out which files will be sent to which server
+ Set<TServerInstance> currentServers = Collections.synchronizedSet(new HashSet<TServerInstance>(master.onlineTabletServers()));
+ Map<String,List<String>> loadAssignments = new HashMap<String,List<String>>();
+ for (TServerInstance server : currentServers) {
+ loadAssignments.put(server.hostPort(), new ArrayList<String>());
+ }
+ int i = 0;
+ List<Entry<String,List<String>>> entries = new ArrayList<Entry<String,List<String>>>(loadAssignments.entrySet());
+ for (String file : filesToLoad) {
+ entries.get(i % entries.size()).getValue().add(file);
+ i++;
+ }
+
+ // Use the threadpool to assign files one-at-a-time to the server
+ for (Entry<String,List<String>> entry : entries) {
+ if (entry.getValue().isEmpty()) {
+ continue;
+ }
+ final Entry<String,List<String>> finalEntry = entry;
+ results.add(threadPool.submit(new Callable<List<String>>() {
@Override
- public void run() {
+ public List<String> call() {
+ if (log.isDebugEnabled()) {
+ log.debug("Asking " + finalEntry.getKey() + " to load " + sampleList(finalEntry.getValue(), 10));
+ }
+ List<String> failures = new ArrayList<String>();
ClientService.Iface client = null;
try {
- client = ServerClient.getConnection(HdfsZooInstance.getInstance());
- List<String> fail = client.bulkImportFiles(null, SecurityConstants.getSystemCredentials(), tid, tableId, attempt, errorDir, setTime);
- attempt.removeAll(fail);
- filesToLoad.removeAll(attempt);
+ client = ThriftUtil.getTServerClient(finalEntry.getKey(), conf);
+ for (String file : finalEntry.getValue()) {
+ List<String> attempt = Collections.singletonList(file);
+ log.debug("Asking " + finalEntry.getKey() + " to bulk import " + file);
+ List<String> fail = client.bulkImportFiles(null, SecurityConstants.getSystemCredentials(), tid, tableId, attempt, errorDir, setTime);
+ if (fail.isEmpty()) {
+ filesToLoad.remove(file);
+ } else {
+ failures.addAll(fail);
+ }
+ }
} catch (Exception ex) {
log.error(ex, ex);
} finally {
ServerClient.close(client);
}
+ return failures;
}
- })));
+ }));
}
- for (Future<?> f : results)
- f.get();
+ Set<String> failures = new HashSet<String>();
+ for (Future<List<String>> f : results)
+ failures.addAll(f.get());
if (filesToLoad.size() > 0) {
- log.debug("tid " + tid + " attempt " + (i + 1) + " " + filesToLoad + " failed");
+ log.debug("tid " + tid + " attempt " + (i + 1) + " " + sampleList(filesToLoad, 10) + " failed");
UtilWaitThread.sleep(100);
}
}
@@ -449,16 +480,24 @@ class LoadFiles extends MasterRepo {
return new CompleteBulkImport(tableId, source, bulk, errorDir);
}
- private List<List<String>> groupFiles(List<String> files, int groups) {
- List<List<String>> result = new ArrayList<List<String>>();
- Iterator<String> iter = files.iterator();
- for (int i = 0; i < groups && iter.hasNext(); i++) {
- List<String> group = new ArrayList<String>();
- for (int j = 0; j < Math.ceil(files.size() / (double) groups) && iter.hasNext(); j++) {
- group.add(iter.next());
+ static String sampleList(Collection<?> potentiallyLongList, int max) {
+ StringBuffer result = new StringBuffer();
+ result.append("[");
+ int i = 0;
+ for (Object obj : potentiallyLongList) {
+ result.append(obj);
+ if (i >= max) {
+ result.append("...");
+ break;
+ } else {
+ result.append(", ");
}
- result.add(group);
+ i++;
}
- return result;
+ if (i < max)
+ result.delete(result.length() - 2, result.length());
+ result.append("]");
+ return result.toString();
}
+
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-412_5594b2e0.diff |
bugs-dot-jar_data_ACCUMULO-218_3d55560a | ---
BugID: ACCUMULO-218
Summary: Mock Accumulo Inverts order of mutations w/ same timestamp
Description: Mock accumulo has different behavior than real accumulo when the same
key is updated in the same millisecond. The hidden in memory map counter in mock
accumulo needs to sort descending.
diff --git a/src/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java b/src/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
index ae16709..2fe637a 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
@@ -69,9 +69,9 @@ public class MockTable {
if (o instanceof MockMemKey) {
MockMemKey other = (MockMemKey) o;
if (count < other.count)
- return -1;
- if (count > other.count)
return 1;
+ if (count > other.count)
+ return -1;
} else {
return 1;
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-218_3d55560a.diff |
bugs-dot-jar_data_ACCUMULO-3006_d6472040 | ---
BugID: ACCUMULO-3006
Summary: Don't allow viewfs in instance.volumes
Description: |-
I think one of our folks put viewfs into instance.volumes on accident. File references in accumulo.root and accumulo.metadata were then written with viewfs in the path. The garbage collector then throws errors as compactions occur and it tries delete and move the files to the hdfs users trash directory.
viewfs should never be allowed in instance.volumes property. It should fail.
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
index 0cfb457..877b9a6 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
@@ -219,16 +219,6 @@ public class VolumeManagerImpl implements VolumeManager {
final String volumeName = entry.getKey();
FileSystem fs = entry.getValue().getFileSystem();
- if (ViewFSUtils.isViewFS(fs)) {
- try {
- FileSystem resolvedFs = ViewFSUtils.resolvePath(fs, new Path("/")).getFileSystem(fs.getConf());
- log.debug("resolved " + fs.getUri() + " to " + resolvedFs.getUri() + " for sync check");
- fs = resolvedFs;
- } catch (IOException e) {
- log.warn("Failed to resolve " + fs.getUri(), e);
- }
- }
-
if (fs instanceof DistributedFileSystem) {
final String DFS_DURABLE_SYNC = "dfs.durable.sync", DFS_SUPPORT_APPEND = "dfs.support.append";
final String ticketMessage = "See ACCUMULO-623 and ACCUMULO-1637 for more details.";
@@ -410,6 +400,9 @@ public class VolumeManagerImpl implements VolumeManager {
// Cannot re-define the default volume
throw new IllegalArgumentException();
+ if (volumeUriOrDir.startsWith("viewfs"))
+ throw new IllegalArgumentException();
+
// We require a URI here, fail if it doesn't look like one
if (volumeUriOrDir.contains(":")) {
volumes.put(volumeUriOrDir, VolumeConfiguration.create(new Path(volumeUriOrDir), hadoopConf));
@@ -426,16 +419,6 @@ public class VolumeManagerImpl implements VolumeManager {
for (Volume volume : getFileSystems().values()) {
FileSystem fs = volume.getFileSystem();
- if (ViewFSUtils.isViewFS(fs)) {
- try {
- FileSystem resolvedFs = ViewFSUtils.resolvePath(fs, new Path("/")).getFileSystem(fs.getConf());
- log.debug("resolved " + fs.getUri() + " to " + resolvedFs.getUri() + " for ready check");
- fs = resolvedFs;
- } catch (IOException e) {
- log.warn("Failed to resolve " + fs.getUri(), e);
- }
- }
-
if (!(fs instanceof DistributedFileSystem))
continue;
DistributedFileSystem dfs = (DistributedFileSystem) fs;
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3006_d6472040.diff |
bugs-dot-jar_data_CAMEL-5720_4a05eccf | ---
BugID: CAMEL-5720
Summary: Aggregate EIP - Dynamic completion size should override fixed values if in
exchange
Description: |-
See nabble
http://camel.465427.n5.nabble.com/Bug-with-completionSize-on-AggregatorProcessor-tp5721307.html
diff --git a/camel-core/src/main/java/org/apache/camel/processor/aggregate/AggregateProcessor.java b/camel-core/src/main/java/org/apache/camel/processor/aggregate/AggregateProcessor.java
index a5c592b..00cb744 100644
--- a/camel-core/src/main/java/org/apache/camel/processor/aggregate/AggregateProcessor.java
+++ b/camel-core/src/main/java/org/apache/camel/processor/aggregate/AggregateProcessor.java
@@ -313,6 +313,9 @@ public class AggregateProcessor extends ServiceSupport implements Processor, Nav
int size = exchange.getProperty(Exchange.AGGREGATED_SIZE, 1, Integer.class);
if (size >= value) {
return "size";
+ } else {
+ // not completed yet
+ return null;
}
}
}
| bugs-dot-jar/camel_extracted_diff/developer-patch_bugs-dot-jar_CAMEL-5720_4a05eccf.diff |
bugs-dot-jar_data_CAMEL-7586_1f92fa42 | ---
BugID: CAMEL-7586
Summary: 'NotCompliantMBeanException : Attribute MessageHistory has more than one
getter'
Description: |-
Hello, I wasn't able to subscribe on the mailing list, so I'm posting my issue directly here.
In my project I need to use some _ManagedCamelContextMBean_, which I am trying to access through [JMX.newMBeanProxy|http://docs.oracle.com/javase/8/docs/api/javax/management/JMX.html#newMBeanProxy-javax.management.MBeanServerConnection-javax.management.ObjectName-java.lang.Class-]
However, it is not working as I'm getting a *NotCompliantMBeanException* because the attribute _MessageHistory_ is said to have more than one getter.
I checked the source code of newMBeanProxy, then the [JMX 1.4 specification|http://docs.oracle.com/javase/8/docs/technotes/guides/jmx/JMX_1_4_specification.pdf], and then Camel's source code, and it appears that ManagedCamelContextMBean is indeed not respecting the standard MBean.
The problem is that two methods are defined in _ManagedCamelContextMBean_ : isMessageHistory() and getMessageHistory()
Since the return type is boolean, isMessageHistory is considered to be a getter, which makes two getter according to the JMX specification and is blocking the newMBeanProxy() method.
diff --git a/camel-core/src/main/java/org/apache/camel/api/management/mbean/ManagedCamelContextMBean.java b/camel-core/src/main/java/org/apache/camel/api/management/mbean/ManagedCamelContextMBean.java
index 700ae22..9a58c0d 100644
--- a/camel-core/src/main/java/org/apache/camel/api/management/mbean/ManagedCamelContextMBean.java
+++ b/camel-core/src/main/java/org/apache/camel/api/management/mbean/ManagedCamelContextMBean.java
@@ -80,9 +80,6 @@ public interface ManagedCamelContextMBean extends ManagedPerformanceCounterMBean
@ManagedAttribute(description = "Tracing")
void setTracing(Boolean tracing);
- @ManagedAttribute(description = "Message History")
- Boolean getMessageHistory();
-
@ManagedAttribute(description = "Current number of inflight Exchanges")
Integer getInflightExchanges();
diff --git a/camel-core/src/main/java/org/apache/camel/management/mbean/ManagedCamelContext.java b/camel-core/src/main/java/org/apache/camel/management/mbean/ManagedCamelContext.java
index a641b68..19a58c0 100644
--- a/camel-core/src/main/java/org/apache/camel/management/mbean/ManagedCamelContext.java
+++ b/camel-core/src/main/java/org/apache/camel/management/mbean/ManagedCamelContext.java
@@ -127,10 +127,6 @@ public class ManagedCamelContext extends ManagedPerformanceCounter implements Ti
context.setTracing(tracing);
}
- public Boolean getMessageHistory() {
- return context.isMessageHistory();
- }
-
public Integer getInflightExchanges() {
return context.getInflightRepository().size();
}
@@ -212,7 +208,7 @@ public class ManagedCamelContext extends ManagedPerformanceCounter implements Ti
}
public boolean isMessageHistory() {
- return context.isMessageHistory();
+ return context.isMessageHistory() != null ? context.isMessageHistory() : false;
}
public boolean isUseMDCLogging() {
| bugs-dot-jar/camel_extracted_diff/developer-patch_bugs-dot-jar_CAMEL-7586_1f92fa42.diff |