From c82d533887fec8afd52149e3834047bd7a4fa90c Mon Sep 17 00:00:00 2001 From: Bruce Mitchener Date: Wed, 9 Feb 2011 08:05:30 +0700 Subject: [PATCH] FLUME-513: Fix various typos. --- .../cloudera/flume/agent/AgentFailChainSink.java | 2 +- src/java/com/cloudera/flume/agent/FlumeNode.java | 8 ++++---- src/java/com/cloudera/flume/agent/LogicalNode.java | 8 ++++---- .../com/cloudera/flume/agent/MultiMasterRPC.java | 4 ++-- .../com/cloudera/flume/agent/WALAckManager.java | 4 ++-- .../agent/diskfailover/DiskFailoverManager.java | 2 +- .../diskfailover/NaiveFileFailoverManager.java | 2 +- .../flume/agent/durability/WALManager.java | 6 +++--- .../cloudera/flume/collector/CollectorSource.java | 4 ++-- .../com/cloudera/flume/conf/FlumeArgException.java | 2 +- src/java/com/cloudera/flume/conf/FlumeBuilder.java | 2 +- src/java/com/cloudera/flume/conf/PatternMatch.java | 2 +- .../com/cloudera/flume/conf/SinkFactoryImpl.java | 2 +- src/java/com/cloudera/flume/core/Attributes.java | 6 +++--- .../cloudera/flume/core/BackOffFailOverSink.java | 2 +- src/java/com/cloudera/flume/core/Event.java | 6 +++--- src/java/com/cloudera/flume/core/EventImpl.java | 6 +++--- src/java/com/cloudera/flume/core/EventSink.java | 2 +- src/java/com/cloudera/flume/core/EventSource.java | 2 +- .../flume/core/extractors/RegexExtractor.java | 2 +- .../flume/handlers/avro/AvroEventAdaptor.java | 4 ++-- .../flume/handlers/avro/AvroEventSource.java | 4 ++-- .../handlers/avro/AvroNativeFileOutputFormat.java | 2 +- .../flume/handlers/batch/BatchingDecorator.java | 2 +- .../flume/handlers/console/JLineStdinSource.java | 2 +- .../handlers/debug/BenchmarkReportDecorator.java | 2 +- .../flume/handlers/debug/BloomGeneratorDeco.java | 2 +- .../flume/handlers/debug/ChokeDecorator.java | 6 +++--- .../flume/handlers/debug/ChokeManager.java | 4 ++-- .../flume/handlers/debug/FlakeyEventSink.java | 2 +- .../handlers/debug/IntervalFlakeyEventSink.java | 2 +- .../flume/handlers/debug/NoNlASCIISynthSource.java | 2 +- .../flume/handlers/debug/NoNlSynthSource.java | 2 +- .../flume/handlers/exec/ExecNioSource.java | 4 ++-- .../flume/handlers/hdfs/CustomDfsSink.java | 2 +- .../flume/handlers/hdfs/WriteableEvent.java | 4 ++-- .../handlers/hive/HiveDirCreatedNotification.java | 2 +- .../cloudera/flume/handlers/rolling/RollSink.java | 2 +- .../cloudera/flume/handlers/text/TailSource.java | 2 +- .../thrift/PrioritizedThriftEventSource.java | 6 +++--- .../flume/handlers/thrift/ThriftEventAdaptor.java | 2 +- .../flume/handlers/thrift/ThriftEventSource.java | 4 ++-- .../com/cloudera/flume/master/CommandStatus.java | 2 +- .../com/cloudera/flume/master/ConfigManager.java | 4 ++-- .../flume/master/ConfigurationManager.java | 2 +- .../com/cloudera/flume/master/FlumeMaster.java | 2 +- .../flume/master/MasterClientServerAvro.java | 2 +- .../flume/master/MasterClientServerThrift.java | 2 +- .../master/TranslatingConfigurationManager.java | 4 ++-- .../flume/master/ZooKeeperConfigStore.java | 2 +- .../cloudera/flume/master/ZooKeeperService.java | 18 +++++++++--------- .../failover/FailoverConfigurationManager.java | 6 +++--- .../flume/master/flows/FlowConfigManager.java | 4 ++-- .../com/cloudera/flume/reporter/ReportEvent.java | 8 ++++---- .../histogram/RegexGroupHistogramSink.java | 2 +- .../flume/reporter/sampler/ProbabilitySampler.java | 2 +- .../com/cloudera/flume/shell/CommandBuilder.java | 2 +- 57 files changed, 100 insertions(+), 100 deletions(-) diff --git a/src/java/com/cloudera/flume/agent/AgentFailChainSink.java b/src/java/com/cloudera/flume/agent/AgentFailChainSink.java index 0e18d77..8606574 100644 --- a/src/java/com/cloudera/flume/agent/AgentFailChainSink.java +++ b/src/java/com/cloudera/flume/agent/AgentFailChainSink.java @@ -83,7 +83,7 @@ public class AgentFailChainSink extends EventSink.Base { break; } default: { - throw new FlumeSpecException("Unknown relability " + rel); + throw new FlumeSpecException("Unknown reliability " + rel); } } } diff --git a/src/java/com/cloudera/flume/agent/FlumeNode.java b/src/java/com/cloudera/flume/agent/FlumeNode.java index 24c9658..94cea8a 100644 --- a/src/java/com/cloudera/flume/agent/FlumeNode.java +++ b/src/java/com/cloudera/flume/agent/FlumeNode.java @@ -280,7 +280,7 @@ public class FlumeNode implements Reportable { } catch (IOException e) { LOG.error("Flume node failed: " + e.getMessage(), e); } catch (Throwable t) { - LOG.error("Unexcepted exception/error thrown! " + t.getMessage(), t); + LOG.error("Unexpected exception/error thrown! " + t.getMessage(), t); } } @@ -416,8 +416,8 @@ public class FlumeNode implements Reportable { /** * This function checks the agent logs dir to make sure that the process has - * the ability to the directory if necesary, that the path if it does exist is - * a directory, and that it can infact create files inside of the directory. + * the ability to the directory if necessary, that the path if it does exist is + * a directory, and that it can in fact create files inside of the directory. * If it fails any of these, it throws an exception. * * Finally, it checks to see if the path is in /tmp and warns the user that @@ -497,7 +497,7 @@ public class FlumeNode implements Reportable { options.addOption("1", false, "Make flume node one shot (if closes or errors, exits)"); options.addOption("m", false, - "Have flume hard exit if in likey gc thrash situation"); + "Have flume hard exit if in likely GC thrash situation"); options.addOption("h", false, "Print help information"); options.addOption("v", false, "Print version information"); try { diff --git a/src/java/com/cloudera/flume/agent/LogicalNode.java b/src/java/com/cloudera/flume/agent/LogicalNode.java index 7419d0f..31024ef 100644 --- a/src/java/com/cloudera/flume/agent/LogicalNode.java +++ b/src/java/com/cloudera/flume/agent/LogicalNode.java @@ -58,7 +58,7 @@ import com.cloudera.util.NetUtils; * provided by FlumeConfigData for the logical node. It is assumed that there * are not multiple concurrent checkConfig calls. * - * If the config needs to be upadted, the logical node updates itself via + * If the config needs to be updated, the logical node updates itself via * loadConfig. If there is a previous configuration the driver, its source and * its sink is first closed. Configuration for a source and a sink are * instantiated and then instantiated into a new Driver. Any @@ -158,9 +158,9 @@ public class LogicalNode implements Reportable { */ synchronized void openLoadNode(EventSource newSrc, EventSink newSnk) throws IOException, InterruptedException { - // TODO HACK! This is to prevent heartbeat from hanging if one fo the + // TODO HACK! This is to prevent heartbeat from hanging if one of the // configs is unable to start due to open exception. It has the effect of - // defering any exceptions open would have triggered into the Driver thread. + // deferring any exceptions open would have triggered into the Driver thread. // This acts similarly to a 'future' concurrency concept. newSnk = new LazyOpenDecorator(newSnk); @@ -335,7 +335,7 @@ public class LogicalNode implements Reportable { // this configuration will open without errors! this.lastGoodCfg = cfg; - LOG.info("Node config sucessfully set to " + cfg); + LOG.info("Node config successfully set to " + cfg); } catch (InterruptedException e) { // TODO figure out what to do on interruption LOG.error("Load Config interrupted", e); diff --git a/src/java/com/cloudera/flume/agent/MultiMasterRPC.java b/src/java/com/cloudera/flume/agent/MultiMasterRPC.java index f40b19e..5f3116b 100644 --- a/src/java/com/cloudera/flume/agent/MultiMasterRPC.java +++ b/src/java/com/cloudera/flume/agent/MultiMasterRPC.java @@ -113,7 +113,7 @@ public class MultiMasterRPC implements MasterRPC { } else if (FlumeConfiguration.RPC_TYPE_AVRO.equals(rpcProtocol)) { out = new AvroMasterRPC(host.getLeft(), host.getRight()); } else { - LOG.error("No valid RPC protocl in configurations."); + LOG.error("No valid RPC protocol in configurations."); continue; } curHost = host.getLeft(); @@ -263,7 +263,7 @@ public class MultiMasterRPC implements MasterRPC { /** * This method returns the ChokeId->limit (in KB/sec) map for the given * physical node. This limit puts an approximate upperbound on the number of - * bytes which can be shipped accross a choke decorator. + * bytes which can be shipped across a choke decorator. */ public Map getChokeMap(final String physicalNode) throws IOException { diff --git a/src/java/com/cloudera/flume/agent/WALAckManager.java b/src/java/com/cloudera/flume/agent/WALAckManager.java index d6e3c16..5b14ec2 100644 --- a/src/java/com/cloudera/flume/agent/WALAckManager.java +++ b/src/java/com/cloudera/flume/agent/WALAckManager.java @@ -143,7 +143,7 @@ public class WALAckManager implements Reportable { for (Entry ack : pending.entrySet()) { long delta = now - ack.getValue(); if (delta > retransmitTime) { - // retransmit.. enqueue to retransimt.... move it back to agent dir.. + // retransmit.. enqueue to retransmit.... move it back to agent dir.. // (lame but good enough for now) try { LOG.info("Retransmitting " + ack.getKey() + " after being stale for " @@ -165,7 +165,7 @@ public class WALAckManager implements Reportable { long now = Clock.unixTime(); List retried = new ArrayList(); for (Entry ack : pending.entrySet()) { - // retransmit.. enqueue to retransimt.... move it back to agent dir.. + // retransmit.. enqueue to retransmit.... move it back to agent dir.. // (lame but good enough for now) try { LOG.info("Retransmitting " + ack.getKey()); diff --git a/src/java/com/cloudera/flume/agent/diskfailover/DiskFailoverManager.java b/src/java/com/cloudera/flume/agent/diskfailover/DiskFailoverManager.java index 64d0171..b02b76e 100644 --- a/src/java/com/cloudera/flume/agent/diskfailover/DiskFailoverManager.java +++ b/src/java/com/cloudera/flume/agent/diskfailover/DiskFailoverManager.java @@ -119,7 +119,7 @@ public interface DiskFailoverManager extends Reportable { * * TODO (jon) This interface is not quite right -- it should take a file and a * format as an arg. This will be revisited when we revist the Log4J, Log4Net, - * and avro serialization integration. + * and Avro serialization integration. */ public void importData() throws IOException; diff --git a/src/java/com/cloudera/flume/agent/diskfailover/NaiveFileFailoverManager.java b/src/java/com/cloudera/flume/agent/diskfailover/NaiveFileFailoverManager.java index b844d87..94148a1 100644 --- a/src/java/com/cloudera/flume/agent/diskfailover/NaiveFileFailoverManager.java +++ b/src/java/com/cloudera/flume/agent/diskfailover/NaiveFileFailoverManager.java @@ -136,7 +136,7 @@ public class NaiveFileFailoverManager implements DiskFailoverManager, } synchronized public void open() throws IOException { - // TODO (jon) be less strict. ?? need to return on and figure out why thisis + // TODO (jon) be less strict. ?? need to return on and figure out why this is // wrong, add // latches. diff --git a/src/java/com/cloudera/flume/agent/durability/WALManager.java b/src/java/com/cloudera/flume/agent/durability/WALManager.java index 9d4c2f2..d555f6c 100644 --- a/src/java/com/cloudera/flume/agent/durability/WALManager.java +++ b/src/java/com/cloudera/flume/agent/durability/WALManager.java @@ -30,7 +30,7 @@ import com.cloudera.flume.reporter.Reportable; /** * This is the interface for providing durability of events until the reach the - * permanent store. This is intended fo use as a write ahead log option that + * permanent store. This is intended for use as a write ahead log option that * requires an ack before data can be eliminated. Different implementations can * be encapsulated by this interface. * @@ -124,8 +124,8 @@ public interface WALManager extends Reportable, WALCompletionNotifier { * testing. * * TODO (jon) This interface is not quite right -- it should take a file and a - * format as an arg. This will be revisited when we revist the Log4J, Log4Net, - * and avro serialization integration. + * format as an arg. This will be revisited when we revisit the Log4J, Log4Net, + * and Avro serialization integration. */ public void importData() throws IOException; diff --git a/src/java/com/cloudera/flume/collector/CollectorSource.java b/src/java/com/cloudera/flume/collector/CollectorSource.java index f813294..204e989 100644 --- a/src/java/com/cloudera/flume/collector/CollectorSource.java +++ b/src/java/com/cloudera/flume/collector/CollectorSource.java @@ -39,8 +39,8 @@ import com.google.common.base.Preconditions; * implementation details to may user configuration simpler. It has a default * options that come from flume-*.xml configuration file. * - * The actual implementation may change in the future (for example, thrift may - * be replaced with avro) but user configurations would not need to change. + * The actual implementation may change in the future (for example, Thrift may + * be replaced with Avro) but user configurations would not need to change. * * TODO (jon) auto version negotiation? (With agent sink) */ diff --git a/src/java/com/cloudera/flume/conf/FlumeArgException.java b/src/java/com/cloudera/flume/conf/FlumeArgException.java index 8063cee..e342223 100644 --- a/src/java/com/cloudera/flume/conf/FlumeArgException.java +++ b/src/java/com/cloudera/flume/conf/FlumeArgException.java @@ -18,7 +18,7 @@ package com.cloudera.flume.conf; /** - * This excpetion is thrown when an illegal or invalid argument is passed to a + * This exception is thrown when an illegal or invalid argument is passed to a * flume spec's sink/source/deco */ public class FlumeArgException extends FlumeSpecException { diff --git a/src/java/com/cloudera/flume/conf/FlumeBuilder.java b/src/java/com/cloudera/flume/conf/FlumeBuilder.java index 24baaff..e5ced77 100644 --- a/src/java/com/cloudera/flume/conf/FlumeBuilder.java +++ b/src/java/com/cloudera/flume/conf/FlumeBuilder.java @@ -184,7 +184,7 @@ public class FlumeBuilder { /** * This parses a aggregate configuration (name: src|snk; ...) and returns a * map from logical node name to a source sink pair. Context is required now - * because a Flumenode's PhysicalNode information may need to be passed in + * because a FlumeNode's PhysicalNode information may need to be passed in */ @SuppressWarnings("unchecked") public static Map> parseConf(Context ctx, diff --git a/src/java/com/cloudera/flume/conf/PatternMatch.java b/src/java/com/cloudera/flume/conf/PatternMatch.java index 2877cf4..ab25525 100644 --- a/src/java/com/cloudera/flume/conf/PatternMatch.java +++ b/src/java/com/cloudera/flume/conf/PatternMatch.java @@ -283,7 +283,7 @@ public class PatternMatch { /** * This method creates an nth child pattern match relation. For this to match, - * 'this' matches the parent, and the nth child of the parent maches the child + * 'this' matches the parent, and the nth child of the parent matches the child * pattern. */ public PatternMatch nth(int n, PatternMatch child) { diff --git a/src/java/com/cloudera/flume/conf/SinkFactoryImpl.java b/src/java/com/cloudera/flume/conf/SinkFactoryImpl.java index b28d08d..61d26b0 100644 --- a/src/java/com/cloudera/flume/conf/SinkFactoryImpl.java +++ b/src/java/com/cloudera/flume/conf/SinkFactoryImpl.java @@ -113,7 +113,7 @@ public class SinkFactoryImpl extends SinkFactory { { "agentBEChain", AgentFailChainSink.beBuilder() }, // autoE2EChain, autoDFOChain and autoBEChains are essentially node - // specific "macros", and use let expresion shadowing + // specific "macros", and use let expression shadowing { "autoBEChain", EventSink.StubSink.builder("autoBEChain") }, { "autoDFOChain", EventSink.StubSink.builder("autoDFOChain") }, { "autoE2EChain", EventSink.StubSink.builder("autoE2EChain") }, diff --git a/src/java/com/cloudera/flume/core/Attributes.java b/src/java/com/cloudera/flume/core/Attributes.java index e2778c5..867549d 100644 --- a/src/java/com/cloudera/flume/core/Attributes.java +++ b/src/java/com/cloudera/flume/core/Attributes.java @@ -113,7 +113,7 @@ public class Attributes { } /** - * This toStrng method is for human readable output. The html reports use this + * This toString method is for human readable output. The html reports use this * as opposed to the other. */ public static String toString(Event e, String attr) { @@ -138,7 +138,7 @@ public class Attributes { + readDouble(e, attr).toString(); } - // this is a simlar hack that prints in int and string format when there + // this is a similar hack that prints in int and string format when there // are 4 bytes. if (bytes.length == 4) { return readInt(e, attr).toString() + " '" + readString(e, attr) + "'"; @@ -171,7 +171,7 @@ public class Attributes { /** * This toString method strictly uses the Attribute type information. When - * there is an untyped attribute, it defaults to outputing the data as a byte + * there is an untyped attribute, it defaults to outputting the data as a byte * array. */ public static String toStringStrict(Event e, String attr) { diff --git a/src/java/com/cloudera/flume/core/BackOffFailOverSink.java b/src/java/com/cloudera/flume/core/BackOffFailOverSink.java index bd814a5..e379c14 100644 --- a/src/java/com/cloudera/flume/core/BackOffFailOverSink.java +++ b/src/java/com/cloudera/flume/core/BackOffFailOverSink.java @@ -47,7 +47,7 @@ import com.google.common.base.Preconditions; * attempt to reopen the primary and append to the primary. If the primary fails * again, backoff adjusted and we fall back to the secondary again. * - * If we reach the secodary and it fails, the append calls will throw an + * If we reach the secondary and it fails, the append calls will throw an * exception. * * These can be chained if multiple failovers are desired. (failover to another diff --git a/src/java/com/cloudera/flume/core/Event.java b/src/java/com/cloudera/flume/core/Event.java index a952550..45e04cb 100644 --- a/src/java/com/cloudera/flume/core/Event.java +++ b/src/java/com/cloudera/flume/core/Event.java @@ -183,7 +183,7 @@ abstract public class Event { * * All shorthands are Date format strings, currently. * - * Returns the empty string if an escape is not recognised. + * Returns the empty string if an escape is not recognized. * * Dates follow the same format as unix date, with a few exceptions. * @@ -295,7 +295,7 @@ abstract public class Event { * Replace all substrings of form %{tagname} with get(tagname).toString() and * all shorthand substrings of form %x with a special value. * - * Any unrecognised / not found tags will be replaced with the empty string. + * Any unrecognized / not found tags will be replaced with the empty string. * * TODO(henry): we may want to consider taking this out of Event and into a * more general class when we get more use cases for this pattern. @@ -330,7 +330,7 @@ abstract public class Event { /** * Instead of replacing escape sequences in a string, this method returns a - * mapping of an attibute name to the value based on the escape sequence found + * mapping of an attribute name to the value based on the escape sequence found * in the argument string. */ public Map getEscapeMapping(String in) { diff --git a/src/java/com/cloudera/flume/core/EventImpl.java b/src/java/com/cloudera/flume/core/EventImpl.java index 6d6f26c..be68e41 100644 --- a/src/java/com/cloudera/flume/core/EventImpl.java +++ b/src/java/com/cloudera/flume/core/EventImpl.java @@ -49,7 +49,7 @@ public class EventImpl extends EventBaseImpl { .getEventMaxSizeBytes(); /** - * Reflection based tools (like avro) require a null constructor + * Reflection based tools (like Avro) require a null constructor */ public EventImpl() { this(new byte[0], 0, Priority.INFO, 0, ""); @@ -57,7 +57,7 @@ public class EventImpl extends EventBaseImpl { /** * Copy constructor for converting events into EventImpl (required for - * reflection/avro) + * reflection/Avro) */ public EventImpl(Event e) { this(e.getBody(), e.getTimestamp(), e.getPriority(), e.getNanos(), e @@ -181,7 +181,7 @@ public class EventImpl extends EventBaseImpl { /** * This takes an event and a list of attribute names. It returns a new event * that has the same core event values and all of the attribute/values - * *except* for those attributes sepcified by the list. + * *except* for those attributes specified by the list. */ public static Event unselect(Event e, String... attrs) { Event e2 = new EventImpl(e.getBody(), e.getTimestamp(), e.getPriority(), e diff --git a/src/java/com/cloudera/flume/core/EventSink.java b/src/java/com/cloudera/flume/core/EventSink.java index 6af2583..b46cee5 100644 --- a/src/java/com/cloudera/flume/core/EventSink.java +++ b/src/java/com/cloudera/flume/core/EventSink.java @@ -201,7 +201,7 @@ public interface EventSink extends Reportable { */ @Override public void append(Event e) throws IOException, InterruptedException { - throw new IOException("Attemping to append to a Stub Sink!"); + throw new IOException("Attempting to append to a Stub Sink!"); } /** diff --git a/src/java/com/cloudera/flume/core/EventSource.java b/src/java/com/cloudera/flume/core/EventSource.java index 06ab6a6..e747226 100644 --- a/src/java/com/cloudera/flume/core/EventSource.java +++ b/src/java/com/cloudera/flume/core/EventSource.java @@ -110,7 +110,7 @@ public interface EventSource extends Reportable { "Too few arguments: expected at least " + minArgs + " but only had " + argv.length); Preconditions.checkArgument(argv.length <= maxArgs, - "Too many arguments : exepected at most " + maxArgs + " but had " + "Too many arguments : expected at most " + maxArgs + " but had " + argv.length); return new StubSource(); } diff --git a/src/java/com/cloudera/flume/core/extractors/RegexExtractor.java b/src/java/com/cloudera/flume/core/extractors/RegexExtractor.java index 9c86e02..e1ee84e 100644 --- a/src/java/com/cloudera/flume/core/extractors/RegexExtractor.java +++ b/src/java/com/cloudera/flume/core/extractors/RegexExtractor.java @@ -49,7 +49,7 @@ import com.google.common.base.Preconditions; * * NOTE: the NFA-based regex algorithm used by java.util.regex.* (and in this * class) is slow and does not scale. It is fully featured but has an - * exponential worst case runnning time. + * exponential worst case running time. */ public class RegexExtractor extends EventSinkDecorator { final String attr; diff --git a/src/java/com/cloudera/flume/handlers/avro/AvroEventAdaptor.java b/src/java/com/cloudera/flume/handlers/avro/AvroEventAdaptor.java index 6dfbca7..0644556 100644 --- a/src/java/com/cloudera/flume/handlers/avro/AvroEventAdaptor.java +++ b/src/java/com/cloudera/flume/handlers/avro/AvroEventAdaptor.java @@ -55,7 +55,7 @@ class AvroEventAdaptor extends Event { } public static Priority convert(com.cloudera.flume.handlers.avro.Priority p) { - Preconditions.checkNotNull(p, "Prioirity argument must be valid."); + Preconditions.checkNotNull(p, "Priority argument must be valid."); switch (p) { case FATAL: return Priority.FATAL; @@ -133,7 +133,7 @@ class AvroEventAdaptor extends Event { } /** - * This returns the FlumEvent corresponding to the AvroEvent passed in the + * This returns the FlumeEvent corresponding to the AvroEvent passed in the * constructor of this object. */ public Event toFlumeEvent() { diff --git a/src/java/com/cloudera/flume/handlers/avro/AvroEventSource.java b/src/java/com/cloudera/flume/handlers/avro/AvroEventSource.java index 967e63b..a41454f 100644 --- a/src/java/com/cloudera/flume/handlers/avro/AvroEventSource.java +++ b/src/java/com/cloudera/flume/handlers/avro/AvroEventSource.java @@ -166,7 +166,7 @@ public class AvroEventSource extends EventSource.Base { // no progress made, timeout and close it. LOG .warn("Close timed out due to no progress. Closing despite having " - + q.size() + " values still enqued"); + + q.size() + " values still enqueued"); return; } // there was some progress, go another cycle. @@ -210,7 +210,7 @@ public class AvroEventSource extends EventSource.Base { return e; } } catch (InterruptedException e) { - throw new IOException("Waiting for queue element was interupted! " + throw new IOException("Waiting for queue element was interrupted! " + e.getMessage(), e); } } diff --git a/src/java/com/cloudera/flume/handlers/avro/AvroNativeFileOutputFormat.java b/src/java/com/cloudera/flume/handlers/avro/AvroNativeFileOutputFormat.java index 5a1fdc7..d12b015 100644 --- a/src/java/com/cloudera/flume/handlers/avro/AvroNativeFileOutputFormat.java +++ b/src/java/com/cloudera/flume/handlers/avro/AvroNativeFileOutputFormat.java @@ -35,7 +35,7 @@ import com.google.common.base.Preconditions; /** * This writes native Avro formatted files out as an output format. * - * Note: There is a separate avro container that does encoding currently from + * Note: There is a separate Avro container that does encoding currently from * the AvroEventSource/Sinks. A separate patch will consolidate the two. */ public class AvroNativeFileOutputFormat extends AbstractOutputFormat { diff --git a/src/java/com/cloudera/flume/handlers/batch/BatchingDecorator.java b/src/java/com/cloudera/flume/handlers/batch/BatchingDecorator.java index 56c55a9..c37acd8 100644 --- a/src/java/com/cloudera/flume/handlers/batch/BatchingDecorator.java +++ b/src/java/com/cloudera/flume/handlers/batch/BatchingDecorator.java @@ -186,7 +186,7 @@ public class BatchingDecorator extends timeoutThreadDone = true; } catch (InterruptedException e) { // TODO verify this is correct - LOG.error("Interrupted exceptoin when ending batch", e); + LOG.error("Interrupted exception when ending batch", e); timeoutThreadDone = true; } } diff --git a/src/java/com/cloudera/flume/handlers/console/JLineStdinSource.java b/src/java/com/cloudera/flume/handlers/console/JLineStdinSource.java index ee6f055..64b5bc7 100644 --- a/src/java/com/cloudera/flume/handlers/console/JLineStdinSource.java +++ b/src/java/com/cloudera/flume/handlers/console/JLineStdinSource.java @@ -48,7 +48,7 @@ import com.google.common.base.Preconditions; * The normal StdinSource that uses System.in.readLine() only has a blocking * mode. It does not return if in the readLine call with no incoming data. * - * Here we use jline's readline which acts at a character by character. To close + * Here we use jline's readline which acts character by character. To close * the jline readline, we interpose a extra check on read() that will return * CTRL_D (EOF) and allow a pending readline to exit. * diff --git a/src/java/com/cloudera/flume/handlers/debug/BenchmarkReportDecorator.java b/src/java/com/cloudera/flume/handlers/debug/BenchmarkReportDecorator.java index 7de22ac..786a567 100644 --- a/src/java/com/cloudera/flume/handlers/debug/BenchmarkReportDecorator.java +++ b/src/java/com/cloudera/flume/handlers/debug/BenchmarkReportDecorator.java @@ -79,7 +79,7 @@ public class BenchmarkReportDecorator extends /** * Checks for Benchmark Tags. If there are not tags events are passed through. - * If ther are, there are three kinds - 'start' which instantiates a + * If there are, there are three kinds - 'start' which instantiates a * benchmark; 'first' which starts a benchmark; and 'stop' which ends a * benchmark. These are consumed by this decorator. */ diff --git a/src/java/com/cloudera/flume/handlers/debug/BloomGeneratorDeco.java b/src/java/com/cloudera/flume/handlers/debug/BloomGeneratorDeco.java index b79d236..3c0033f 100644 --- a/src/java/com/cloudera/flume/handlers/debug/BloomGeneratorDeco.java +++ b/src/java/com/cloudera/flume/handlers/debug/BloomGeneratorDeco.java @@ -36,7 +36,7 @@ import com.google.common.base.Preconditions; /** * This decorator takes hashes of messages and then inserts them into a bloom * filter. On deco close, the bit map representation of the bloom filter is - * transmittted into the stream. + * transmitted into the stream. * * A corresponding BloomChecker can track received message and can approximately * verify that all messages injected were included. If the generator sends diff --git a/src/java/com/cloudera/flume/handlers/debug/ChokeDecorator.java b/src/java/com/cloudera/flume/handlers/debug/ChokeDecorator.java index 92c53d6..df929a8 100644 --- a/src/java/com/cloudera/flume/handlers/debug/ChokeDecorator.java +++ b/src/java/com/cloudera/flume/handlers/debug/ChokeDecorator.java @@ -28,8 +28,8 @@ import com.cloudera.flume.core.EventSinkDecorator; import com.google.common.base.Preconditions; /** - * This decorator adds a the capabilty to Throttle the data going out of the - * sink. Each Chokedecorator is associated with a chokeId, and all the + * This decorator adds a the capability to Throttle the data going out of the + * sink. Each ChokeDecorator is associated with a chokeId, and all the * choke-decorators with the same chokeId are throttled together with some max * data transfer limit. The mapping from the chokeId to limit is set by the * Master and passed to FlumeNodes using an RPC call called getChokeMap(). @@ -48,7 +48,7 @@ public class ChokeDecorator extends EventSinkDecorator { /** * This append can block for a little while if the number of bytes shipped - * accross this Choke has reached its limit. But it does not block forever. + * across this Choke has reached its limit. But it does not block forever. */ @Override public void append(Event e) throws IOException, InterruptedException { diff --git a/src/java/com/cloudera/flume/handlers/debug/ChokeManager.java b/src/java/com/cloudera/flume/handlers/debug/ChokeManager.java index 61e7034..8e4e213 100644 --- a/src/java/com/cloudera/flume/handlers/debug/ChokeManager.java +++ b/src/java/com/cloudera/flume/handlers/debug/ChokeManager.java @@ -32,7 +32,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; public class ChokeManager extends Thread { // Time quanta in millisecs. It is a constant right now, we can change this - // later. The main thread of the Chokemanager fills up the buckets + // later. The main thread of the ChokeManager fills up the buckets // corresponding to different choke-ids and the physical node after this time // quanta. @@ -185,7 +185,7 @@ public class ChokeManager extends Thread { // throttling policy. rwlChokeInfoMap.readLock().lock(); try { - // simple policy for now: if the chokeid is not there then simply return, + // simple policy for now: if the chokeId is not there then simply return, // essentially no throttling with an invalid chokeID. if (this.isChokeId(id) != false) { int loopCount = 0; diff --git a/src/java/com/cloudera/flume/handlers/debug/FlakeyEventSink.java b/src/java/com/cloudera/flume/handlers/debug/FlakeyEventSink.java index 7f3326a..52f124e 100644 --- a/src/java/com/cloudera/flume/handlers/debug/FlakeyEventSink.java +++ b/src/java/com/cloudera/flume/handlers/debug/FlakeyEventSink.java @@ -47,7 +47,7 @@ public class FlakeyEventSink extends EventSinkDecorator @Override public void append(Event e) throws IOException , InterruptedException { if (rand.nextDouble() < prob) { - throw new IOException("flakeyness struct and caused a failure"); + throw new IOException("flakiness struck and caused a failure"); } super.append(e); } diff --git a/src/java/com/cloudera/flume/handlers/debug/IntervalFlakeyEventSink.java b/src/java/com/cloudera/flume/handlers/debug/IntervalFlakeyEventSink.java index a2c5915..dab40d9 100644 --- a/src/java/com/cloudera/flume/handlers/debug/IntervalFlakeyEventSink.java +++ b/src/java/com/cloudera/flume/handlers/debug/IntervalFlakeyEventSink.java @@ -48,7 +48,7 @@ public class IntervalFlakeyEventSink extends count++; if (count % interval == 0) { count = 0; - throw new IOException("flakeyness struck and caused a failure"); + throw new IOException("flakiness struck and caused a failure"); } super.append(e); } diff --git a/src/java/com/cloudera/flume/handlers/debug/NoNlASCIISynthSource.java b/src/java/com/cloudera/flume/handlers/debug/NoNlASCIISynthSource.java index 35cb047..d5bad5a 100644 --- a/src/java/com/cloudera/flume/handlers/debug/NoNlASCIISynthSource.java +++ b/src/java/com/cloudera/flume/handlers/debug/NoNlASCIISynthSource.java @@ -40,7 +40,7 @@ public class NoNlASCIISynthSource extends SynthSource { } /** - * Converts all bytes into the ascii pritable range (32 >= 126), + * Converts all bytes into the ascii printable range (32 >= 126), */ static byte toAscii(byte b) { b &= 0x7f; diff --git a/src/java/com/cloudera/flume/handlers/debug/NoNlSynthSource.java b/src/java/com/cloudera/flume/handlers/debug/NoNlSynthSource.java index 089366a..1562f94 100644 --- a/src/java/com/cloudera/flume/handlers/debug/NoNlSynthSource.java +++ b/src/java/com/cloudera/flume/handlers/debug/NoNlSynthSource.java @@ -27,7 +27,7 @@ import com.cloudera.flume.core.EventSource; /** * Syslog parser depends on using '\n' as a record delimiter. When we just use * random data, there is a chance that '\n's are in the event and can foul - * benchmarks (due to forat exceptions) that assume clean data. + * benchmarks (due to format exceptions) that assume clean data. * * This wrapper just replaces '\n' with ' ' */ diff --git a/src/java/com/cloudera/flume/handlers/exec/ExecNioSource.java b/src/java/com/cloudera/flume/handlers/exec/ExecNioSource.java index 19962b9..3be1754 100644 --- a/src/java/com/cloudera/flume/handlers/exec/ExecNioSource.java +++ b/src/java/com/cloudera/flume/handlers/exec/ExecNioSource.java @@ -501,7 +501,7 @@ public class ExecNioSource extends EventSource.Base { proc = Runtime.getRuntime().exec(command); // Just reading from stdout and stderr can block, so we wrap them with - // InputSTreamPipe allows them to be nonblocking. + // InputStreamPipe allows them to be nonblocking. stdinISP = new InputStreamPipe(proc.getInputStream()); stderrISP = new InputStreamPipe(proc.getErrorStream()); stdout = (ReadableByteChannel) stdinISP.getChannel(); @@ -566,7 +566,7 @@ public class ExecNioSource extends EventSource.Base { /** * This builder creates a source that execs a long running program and takes - * each line of input as the body of an event. It takes one arguemnt, the + * each line of input as the body of an event. It takes one argument, the * command to run. If the command exits, the exec source returns null signally * end of records. */ diff --git a/src/java/com/cloudera/flume/handlers/hdfs/CustomDfsSink.java b/src/java/com/cloudera/flume/handlers/hdfs/CustomDfsSink.java index 26db6e5..fb43d57 100644 --- a/src/java/com/cloudera/flume/handlers/hdfs/CustomDfsSink.java +++ b/src/java/com/cloudera/flume/handlers/hdfs/CustomDfsSink.java @@ -149,7 +149,7 @@ public class CustomDfsSink extends EventSink.Base { writer = codec.createOutputStream(writer, cmp); } catch (NullPointerException npe) { // tries to find "native" version of codec, if that fails, then tries to - // find java version. If there is no java version, the createOutpuStream + // find java version. If there is no java version, the createOutputStream // exits via NPE. We capture this and convert it into a IOE with a more // useful error message. LOG.error("Unable to load compression codec " + codec); diff --git a/src/java/com/cloudera/flume/handlers/hdfs/WriteableEvent.java b/src/java/com/cloudera/flume/handlers/hdfs/WriteableEvent.java index 08ed666..25a18e2 100644 --- a/src/java/com/cloudera/flume/handlers/hdfs/WriteableEvent.java +++ b/src/java/com/cloudera/flume/handlers/hdfs/WriteableEvent.java @@ -41,7 +41,7 @@ import com.cloudera.flume.core.EventImpl; import com.google.common.base.Preconditions; /** - * A wrapper to make my events hadoop/hdfs writables. + * A wrapper to make my events hadoop/hdfs writeables. * */ public class WriteableEvent extends EventBaseImpl implements Writable { @@ -104,7 +104,7 @@ public class WriteableEvent extends EventBaseImpl implements Writable { } public void readFields(DataInput in) throws IOException { - // NOTE: NOT using read UTF8 becuase it is limited to 2^16 bytes (not + // NOTE: NOT using read UTF8 because it is limited to 2^16 bytes (not // characters). Char encoding will likely cause problems in edge cases. // String s = in.readUTF(); diff --git a/src/java/com/cloudera/flume/handlers/hive/HiveDirCreatedNotification.java b/src/java/com/cloudera/flume/handlers/hive/HiveDirCreatedNotification.java index 3c943ce..939b89f 100644 --- a/src/java/com/cloudera/flume/handlers/hive/HiveDirCreatedNotification.java +++ b/src/java/com/cloudera/flume/handlers/hive/HiveDirCreatedNotification.java @@ -33,7 +33,7 @@ public class HiveDirCreatedNotification { // hive table name and partitioning key/value metadata final String dir; // new dir added - final String table; // table that the dir should be added as a partiiton to + final String table; // table that the dir should be added as a partition to final Map meta; // partition key-value metadata /** diff --git a/src/java/com/cloudera/flume/handlers/rolling/RollSink.java b/src/java/com/cloudera/flume/handlers/rolling/RollSink.java index f5e6e4f..8a2dd27 100644 --- a/src/java/com/cloudera/flume/handlers/rolling/RollSink.java +++ b/src/java/com/cloudera/flume/handlers/rolling/RollSink.java @@ -115,7 +115,7 @@ public class RollSink extends EventSink.Base { startedLatch.countDown(); try { while (!isInterrupted()) { - // TODO there should probably be a lcok on Roll sink but until we + // TODO there should probably be a lock on Roll sink but until we // handle // interruptions throughout the code, we cannot because this causes a // deadlock diff --git a/src/java/com/cloudera/flume/handlers/text/TailSource.java b/src/java/com/cloudera/flume/handlers/text/TailSource.java index 267a288..e32e1c6 100644 --- a/src/java/com/cloudera/flume/handlers/text/TailSource.java +++ b/src/java/com/cloudera/flume/handlers/text/TailSource.java @@ -81,7 +81,7 @@ import com.google.common.base.Preconditions; * TestTailSource.readRotatePrexistingSameSizeWithNewModetime) * * Ideally this would use the inode number of file handle number but didn't find - * java api to get these, or Java 7's WatchSevice file watcher API. + * java api to get these, or Java 7's WatchService file watcher API. */ public class TailSource extends EventSource.Base { private static final Logger LOG = LoggerFactory.getLogger(TailSource.class); diff --git a/src/java/com/cloudera/flume/handlers/thrift/PrioritizedThriftEventSource.java b/src/java/com/cloudera/flume/handlers/thrift/PrioritizedThriftEventSource.java index 7dbe1e0..c90c712 100644 --- a/src/java/com/cloudera/flume/handlers/thrift/PrioritizedThriftEventSource.java +++ b/src/java/com/cloudera/flume/handlers/thrift/PrioritizedThriftEventSource.java @@ -42,7 +42,7 @@ import com.google.common.base.Preconditions; /** * This sets up the port that listens for incoming flume event rpc calls. In * this version events are prioritized based on event priority and then by age - * (older has higher priority). This doesn't have mechanims for dropping events + * (older has higher priority). This doesn't have mechanism for dropping events * at the moment. * * There is a problem with the nonblocking server -- for some reason (it gets @@ -98,7 +98,7 @@ public class PrioritizedThriftEventSource extends EventSource.Base { final BlockingQueue q; /** - * Creates a new priotized event source on port port with event queue size + * Creates a new prioritized event source on port port with event queue size * qsize. */ public PrioritizedThriftEventSource(int port, int qsize) { @@ -159,7 +159,7 @@ public class PrioritizedThriftEventSource extends EventSource.Base { return e; } catch (InterruptedException e) { e.printStackTrace(); - throw new IOException("Waiting for queue element was interupted! " + e); + throw new IOException("Waiting for queue element was interrupted! " + e); } } diff --git a/src/java/com/cloudera/flume/handlers/thrift/ThriftEventAdaptor.java b/src/java/com/cloudera/flume/handlers/thrift/ThriftEventAdaptor.java index f1500cf..346a2a9 100644 --- a/src/java/com/cloudera/flume/handlers/thrift/ThriftEventAdaptor.java +++ b/src/java/com/cloudera/flume/handlers/thrift/ThriftEventAdaptor.java @@ -70,7 +70,7 @@ class ThriftEventAdaptor extends Event { } public static Priority convert(com.cloudera.flume.handlers.thrift.Priority p) { - Preconditions.checkNotNull(p, "Prioirity argument must be valid."); + Preconditions.checkNotNull(p, "Priority argument must be valid."); switch (p) { case FATAL: diff --git a/src/java/com/cloudera/flume/handlers/thrift/ThriftEventSource.java b/src/java/com/cloudera/flume/handlers/thrift/ThriftEventSource.java index 5d666a7..233ed74 100644 --- a/src/java/com/cloudera/flume/handlers/thrift/ThriftEventSource.java +++ b/src/java/com/cloudera/flume/handlers/thrift/ThriftEventSource.java @@ -176,7 +176,7 @@ public class ThriftEventSource extends EventSource.Base { // no progress made, timeout and close it. LOG .warn("Close timed out due to no progress. Closing despite having " - + q.size() + " values still enqued"); + + q.size() + " values still enqueued"); return; } // there was some progress, go another cycle. @@ -219,7 +219,7 @@ public class ThriftEventSource extends EventSource.Base { return e; } } catch (InterruptedException e) { - throw new IOException("Waiting for queue element was interupted! " + throw new IOException("Waiting for queue element was interrupted! " + e.getMessage(), e); } } diff --git a/src/java/com/cloudera/flume/master/CommandStatus.java b/src/java/com/cloudera/flume/master/CommandStatus.java index ccf61fb..f71c21b 100644 --- a/src/java/com/cloudera/flume/master/CommandStatus.java +++ b/src/java/com/cloudera/flume/master/CommandStatus.java @@ -41,7 +41,7 @@ public class CommandStatus { QUEUED, EXECING, SUCCEEDED, FAILED }; - long cmdId; // uniq id for command. Used to check status of a command. + long cmdId; // unique id for command. Used to check status of a command. Command cmd; State curState; diff --git a/src/java/com/cloudera/flume/master/ConfigManager.java b/src/java/com/cloudera/flume/master/ConfigManager.java index a7cf273..b17d8d6 100644 --- a/src/java/com/cloudera/flume/master/ConfigManager.java +++ b/src/java/com/cloudera/flume/master/ConfigManager.java @@ -240,7 +240,7 @@ public class ConfigManager implements ConfigurationManager { tmp.deleteOnExit(); PrintWriter out = new PrintWriter(new FileWriter(tmp)); - // writh all specs to tmp. + // write all specs to tmp. Map cfgs = cfgStore.getConfigs(); for (Entry e : cfgs.entrySet()) { String name = e.getKey(); @@ -394,7 +394,7 @@ public class ConfigManager implements ConfigurationManager { } /** - * Unmaps all logical nodes from phsyical nodes except for the default one. + * Unmaps all logical nodes from physical nodes except for the default one. * (logical==physical) */ @Override diff --git a/src/java/com/cloudera/flume/master/ConfigurationManager.java b/src/java/com/cloudera/flume/master/ConfigurationManager.java index 5d3bd7b..11f1153 100644 --- a/src/java/com/cloudera/flume/master/ConfigurationManager.java +++ b/src/java/com/cloudera/flume/master/ConfigurationManager.java @@ -50,7 +50,7 @@ public interface ConfigurationManager extends Reportable { throws IOException, FlumeSpecException; /** - * Load configurations from file 'from'. This does not clear prexisting + * Load configurations from file 'from'. This does not clear pre-existing * configurations but may overwrite configurations for existing nodes. */ public void loadConfigFile(String from) throws IOException; diff --git a/src/java/com/cloudera/flume/master/FlumeMaster.java b/src/java/com/cloudera/flume/master/FlumeMaster.java index 1006738..2063d23 100644 --- a/src/java/com/cloudera/flume/master/FlumeMaster.java +++ b/src/java/com/cloudera/flume/master/FlumeMaster.java @@ -513,7 +513,7 @@ public class FlumeMaster implements Reportable { } if (cmd != null && cmd.hasOption("i")) { - // if manually overriden by command line, accept it, live with + // if manually overridden by command line, accept it, live with // consequences. String sid = cmd.getOptionValue("i"); LOG.info("Setting serverid from command line to be " + sid); diff --git a/src/java/com/cloudera/flume/master/MasterClientServerAvro.java b/src/java/com/cloudera/flume/master/MasterClientServerAvro.java index ff08c11..badcfe5 100644 --- a/src/java/com/cloudera/flume/master/MasterClientServerAvro.java +++ b/src/java/com/cloudera/flume/master/MasterClientServerAvro.java @@ -54,7 +54,7 @@ public class MasterClientServerAvro implements AvroFlumeClientServer, RPCServer public MasterClientServerAvro(MasterClientServer delegate) { Preconditions.checkArgument(delegate != null, - "MasterCleintServer is null in 'AvroMasterClientServer!"); + "MasterClientServer is null in 'MasterClientServerAvro!"); this.delegate = delegate; this.port = FlumeConfiguration.get().getMasterHeartbeatPort(); } diff --git a/src/java/com/cloudera/flume/master/MasterClientServerThrift.java b/src/java/com/cloudera/flume/master/MasterClientServerThrift.java index bb840b0..24652c6 100644 --- a/src/java/com/cloudera/flume/master/MasterClientServerThrift.java +++ b/src/java/com/cloudera/flume/master/MasterClientServerThrift.java @@ -53,7 +53,7 @@ public class MasterClientServerThrift extends ThriftServer implements public MasterClientServerThrift(MasterClientServer delegate) { Preconditions.checkArgument(delegate != null, - "MasterCleintServer is null in 'ThriftMasterClientServer!"); + "MasterClientServer is null in 'MasterClientServerThrift!"); this.delegate = delegate; this.port = FlumeConfiguration.get().getMasterHeartbeatPort(); } diff --git a/src/java/com/cloudera/flume/master/TranslatingConfigurationManager.java b/src/java/com/cloudera/flume/master/TranslatingConfigurationManager.java index aaff352..8d56a9f 100644 --- a/src/java/com/cloudera/flume/master/TranslatingConfigurationManager.java +++ b/src/java/com/cloudera/flume/master/TranslatingConfigurationManager.java @@ -49,7 +49,7 @@ import com.google.common.collect.Multimap; * If in-place changes are ok, use the same configuration manager as the parent * and self. If not, use a different configuration manager for parent and self. * - * Read method calls on a TranslatingConfiguraitonManager always read from the + * Read method calls on a TranslatingConfigurationManager always read from the * self manager. Write method calls write to the parent manager and write the * translated versions to the self manager. */ @@ -214,7 +214,7 @@ abstract public class TranslatingConfigurationManager implements } /** - * Returns the translations of all configuraitons + * Returns the translations of all configurations */ synchronized public Map getTranslatedConfigs() { return selfMan.getAllConfigs(); diff --git a/src/java/com/cloudera/flume/master/ZooKeeperConfigStore.java b/src/java/com/cloudera/flume/master/ZooKeeperConfigStore.java index 7224c7d..abd6ee4 100644 --- a/src/java/com/cloudera/flume/master/ZooKeeperConfigStore.java +++ b/src/java/com/cloudera/flume/master/ZooKeeperConfigStore.java @@ -100,7 +100,7 @@ public class ZooKeeperConfigStore extends ConfigStore implements Watcher { } /** - * Reads the standard configuration and initialises client and optionally + * Reads the standard configuration and initializes client and optionally * server accordingly. */ @Override diff --git a/src/java/com/cloudera/flume/master/ZooKeeperService.java b/src/java/com/cloudera/flume/master/ZooKeeperService.java index 6ba4bd2..9e88616 100644 --- a/src/java/com/cloudera/flume/master/ZooKeeperService.java +++ b/src/java/com/cloudera/flume/master/ZooKeeperService.java @@ -87,7 +87,7 @@ public class ZooKeeperService { } /** - * Initialises in standalone mode, creating an in-process ZK. + * Initializes in standalone mode, creating an in-process ZK. */ protected void startZKStandalone(int port, String dir) throws IOException, InterruptedException { @@ -97,7 +97,7 @@ public class ZooKeeperService { } /** - * Initialises in distributed mode, starting an in-process server + * Initializes in distributed mode, starting an in-process server */ protected void startZKDistributed(FlumeConfiguration cfg) throws IOException, InterruptedException, ConfigException { @@ -108,17 +108,17 @@ public class ZooKeeperService { /** * Returns the singleton ZooKeeperService, which will not be null. However, it - * may not be initialised and therefore attempts to connect to it with a + * may not be initialized and therefore attempts to connect to it with a * ZKClient may fail. Use getAndInit if you are not sure whether the service - * is initialised. + * is initialized. */ static public ZooKeeperService get() { return zkServiceSingleton; } /** - * Returns the singleton ZooKeeperService, initialising it if it has not been - * initialised already. + * Returns the singleton ZooKeeperService, initializing it if it has not been + * initialized already. */ synchronized public static ZooKeeperService getAndInit() throws IOException, InterruptedException { @@ -127,8 +127,8 @@ public class ZooKeeperService { } /** - * Returns the singleton ZooKeeperService, initialising it if it has not been - * initialised already, using the supplied configuration + * Returns the singleton ZooKeeperService, initializing it if it has not been + * initialized already, using the supplied configuration */ synchronized public static ZooKeeperService getAndInit(FlumeConfiguration cfg) throws IOException, InterruptedException { @@ -137,7 +137,7 @@ public class ZooKeeperService { } /** - * Returns a new ZKClient initialised for this service, but not connected. + * Returns a new ZKClient initialized for this service, but not connected. */ synchronized public ZKClient createClient() throws IOException { if (!initialised) { diff --git a/src/java/com/cloudera/flume/master/failover/FailoverConfigurationManager.java b/src/java/com/cloudera/flume/master/failover/FailoverConfigurationManager.java index 06277b6..6513e41 100644 --- a/src/java/com/cloudera/flume/master/failover/FailoverConfigurationManager.java +++ b/src/java/com/cloudera/flume/master/failover/FailoverConfigurationManager.java @@ -222,7 +222,7 @@ public class FailoverConfigurationManager extends } // diskfailover's subsink needs to never give up. So we wrap it with an - // inistentAppend. But append can fail if its subsink is not open. So + // insistentAppend. But append can fail if its subsink is not open. So // we add a stubborn append (it closes and reopens a subsink) and retries // opening the chain using the insistentOpen String dfo = "< " + FlumeSpecGen.genEventSink(dfoPrimaryChain) @@ -299,7 +299,7 @@ public class FailoverConfigurationManager extends * expanded wal+end2end ack chain. * * This version at one point was different from substE2EChainSimple's - * implementation but they have not convernged. This one should likely be + * implementation but they have not converged. This one should likely be * removed in the future. */ @Deprecated @@ -364,7 +364,7 @@ public class FailoverConfigurationManager extends * * This pipeline writes data to the WAL adding ack tags. In the WAL's subsink * in a subservient DriverThread will attempt to send data to logical sink - * arg1, and then to logicla sink arg2, etc.. If all fail, stubbornAppend + * arg1, and then to logical sink arg2, etc.. If all fail, stubbornAppend * causes the entire failover chain to be closed and then reopened. If all the * elements of the failover chain still fail, the insistentOpen ensures that * they are tried again after an backing off. diff --git a/src/java/com/cloudera/flume/master/flows/FlowConfigManager.java b/src/java/com/cloudera/flume/master/flows/FlowConfigManager.java index 9af0d65..c8c91c9 100644 --- a/src/java/com/cloudera/flume/master/flows/FlowConfigManager.java +++ b/src/java/com/cloudera/flume/master/flows/FlowConfigManager.java @@ -97,7 +97,7 @@ abstract public class FlowConfigManager implements ConfigurationManager { /** * This creates a specific instance of a Configuration manager that will only - * recieve FCD's for a particular flow. This shall never return null. THis + * receive FCD's for a particular flow. This shall never return null. THis * does not need to be guarded by a lock. */ public abstract ConfigurationManager createConfigMan(); @@ -354,7 +354,7 @@ abstract public class FlowConfigManager implements ConfigurationManager { */ public static class FailoverFlowConfigManager extends FlowConfigManager { /** - * This is needed when constructing child ConfiguraitonManagers + * This is needed when constructing child ConfigurationManagers */ final StatusManager statman; diff --git a/src/java/com/cloudera/flume/reporter/ReportEvent.java b/src/java/com/cloudera/flume/reporter/ReportEvent.java index 88c91fb..b0fe08e 100644 --- a/src/java/com/cloudera/flume/reporter/ReportEvent.java +++ b/src/java/com/cloudera/flume/reporter/ReportEvent.java @@ -43,7 +43,7 @@ import com.google.common.base.Preconditions; * human readable or machine parseable formats. * * For now, we use a convention where a attribute starting with "rpt." is a a - * reported field. Later we may include a avro schema or something to include + * reported field. Later we may include an Avro schema or something to include * type information as well. */ public class ReportEvent extends EventImpl { @@ -204,7 +204,7 @@ public class ReportEvent extends EventImpl { } /** - * Serialises event as JSON string + * Serializes event as JSON string */ public void toJson(Writer o) throws IOException { PrintWriter pw = new PrintWriter(o); @@ -344,7 +344,7 @@ public class ReportEvent extends EventImpl { } /** - * Serialises event as text to supplied writer. + * Serializes event as text to supplied writer. */ public void toText(Writer o) throws IOException { o.write(StringEscapeUtils.escapeJava(this.toString())); @@ -435,7 +435,7 @@ public class ReportEvent extends EventImpl { } /** - * Return escaped String serialisation of this event + * Return escaped String serialization of this event */ public String toText() { return StringEscapeUtils.escapeJava(this.toString()); diff --git a/src/java/com/cloudera/flume/reporter/histogram/RegexGroupHistogramSink.java b/src/java/com/cloudera/flume/reporter/histogram/RegexGroupHistogramSink.java index d3e9da5..18596f7 100644 --- a/src/java/com/cloudera/flume/reporter/histogram/RegexGroupHistogramSink.java +++ b/src/java/com/cloudera/flume/reporter/histogram/RegexGroupHistogramSink.java @@ -42,7 +42,7 @@ import com.google.common.base.Preconditions; * * NOTE: the NFA-based regex algorithm used by java.util.regex.* (and in this * class) is slow and does not scale. It is fully featured but has an - * exponential worst case runnning time. This will be replaced with a faster but + * exponential worst case running time. This will be replaced with a faster but * more memory hungry and less featured DFA-based regex algorithm. (We will lose * capture groups). */ diff --git a/src/java/com/cloudera/flume/reporter/sampler/ProbabilitySampler.java b/src/java/com/cloudera/flume/reporter/sampler/ProbabilitySampler.java index e2cc255..4eb77cc 100644 --- a/src/java/com/cloudera/flume/reporter/sampler/ProbabilitySampler.java +++ b/src/java/com/cloudera/flume/reporter/sampler/ProbabilitySampler.java @@ -62,7 +62,7 @@ public class ProbabilitySampler extends public EventSinkDecorator build(Context context, String... argv) { Preconditions.checkArgument(argv.length == 1 || argv.length == 2, - "usage: proabilitySampler(prob[, seed])"); + "usage: probabilitySampler(prob[, seed])"); double prob = Double.parseDouble(argv[0]); long seed = Clock.unixTime(); diff --git a/src/java/com/cloudera/flume/shell/CommandBuilder.java b/src/java/com/cloudera/flume/shell/CommandBuilder.java index 95188fd..22e0e00 100644 --- a/src/java/com/cloudera/flume/shell/CommandBuilder.java +++ b/src/java/com/cloudera/flume/shell/CommandBuilder.java @@ -37,7 +37,7 @@ import com.cloudera.flume.shell.antlr.FlumeShellParser; * * Unquoted tokens can contain alphanumeric, '.',':','_', or '-'. Tokens * enclosed in '"' will be java string unescaped. Tokens enclosed in ''' (single - * quotes) are not unescaped at all and concontain any char except for '''. + * quotes) are not unescaped at all and can contain any char except for '''. * Exceptions are thrown if quotes are not properly matched or invalid chars * present in unquoted tokens. . */ -- 1.7.3.2