diff --git a/examples/java8/pom.xml b/examples/java8/pom.xml index 8727f8d427ec..69e71a67bcfa 100644 --- a/examples/java8/pom.xml +++ b/examples/java8/pom.xml @@ -39,16 +39,6 @@ - - - org.apache.maven.plugins - maven-javadoc-plugin - - true - - - maven-compiler-plugin diff --git a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/GameStats.java b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/GameStats.java index 7814eb196c21..9c4316e25534 100644 --- a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/GameStats.java +++ b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/GameStats.java @@ -102,7 +102,7 @@ public class GameStats extends LeaderBoard { /** * Filter out all but those users with a high clickrate, which we will consider as 'spammy' uesrs. * We do this by finding the mean total score per user, then using that information as a side - * input to filter out all but those user scores that are > (mean * SCORE_WEIGHT) + * input to filter out all but those user scores that are greater (mean * SCORE_WEIGHT). */ // [START DocInclude_AbuseDetect] public static class CalculateSpammyUsers @@ -193,6 +193,8 @@ static interface Options extends LeaderBoard.Options { /** * Create a map of information that describes how to write pipeline output to BigQuery. This map * is used to write information about team score sums. + * + * @return The map of fields to write on big query. */ protected static Map>> configureWindowedWrite() { @@ -219,6 +221,8 @@ static interface Options extends LeaderBoard.Options { /** * Create a map of information that describes how to write pipeline output to BigQuery. This map * is used to write information about mean user session time. + * + * @return The map of fields to write on big query. */ protected static Map> configureSessionWindowWrite() { diff --git a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/HourlyTeamScore.java b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/HourlyTeamScore.java index 845c56fa0336..8663f904651e 100644 --- a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/HourlyTeamScore.java +++ b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/HourlyTeamScore.java @@ -119,6 +119,8 @@ static interface Options extends UserScore.Options { * Create a map of information that describes how to write pipeline output to BigQuery. This map * is passed to the {@link WriteWindowedToBigQuery} constructor to write team score sums and * includes information about window start time. + * + * @return The map of field to write on big querey. */ protected static Map>> configureWindowedTableWrite() { @@ -142,6 +144,9 @@ static interface Options extends UserScore.Options { /** * Run a batch pipeline to do windowed analysis of the data. + * + * @param args The main arguments. + * @throws Exception in case of execution failure. */ // [START DocInclude_HTSMain] public static void main(String[] args) throws Exception { diff --git a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/LeaderBoard.java b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/LeaderBoard.java index d79403a88794..b635ea339eeb 100644 --- a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/LeaderBoard.java +++ b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/LeaderBoard.java @@ -72,7 +72,7 @@ * results, e.g. for 'team prizes'. We're now outputing window results as they're * calculated, giving us much lower latency than with the previous batch examples. * - *

Run {@link injector.Injector} to generate pubsub data for this pipeline. The Injector + *

Run {@code injector.Injector} to generate pubsub data for this pipeline. The Injector * documentation provides more detail on how to do this. * *

To execute this pipeline using the Dataflow service, specify the pipeline configuration @@ -128,6 +128,8 @@ static interface Options extends HourlyTeamScore.Options, DataflowExampleOptions /** * Create a map of information that describes how to write pipeline output to BigQuery. This map * is used to write team score sums and includes event timing information. + * + * @return The map of fields to write on big query. */ protected static Map>> configureWindowedTableWrite() { @@ -158,6 +160,8 @@ static interface Options extends HourlyTeamScore.Options, DataflowExampleOptions /** * Create a map of information that describes how to write pipeline output to BigQuery. This map * is used to write user score sums. + * + * @return the Map of field to write on big query. */ protected static Map>> configureGlobalWindowBigQueryWrite() { diff --git a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/UserScore.java b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/UserScore.java index 866adefdb886..133ab2b9b030 100644 --- a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/UserScore.java +++ b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/UserScore.java @@ -70,7 +70,7 @@ * where the BigQuery dataset you specify must already exist. * *

Optionally include the --input argument to specify a batch input file. - * See the --input default value for example batch data file, or use {@link injector.Injector} to + * See the --input default value for example batch data file, or use {@code injector.Injector} to * generate your own batch data. */ public class UserScore { @@ -201,6 +201,8 @@ public static interface Options extends PipelineOptions { /** * Create a map of information that describes how to write pipeline output to BigQuery. This map * is passed to the {@link WriteToBigQuery} constructor to write user score sums. + * + * @return A map to fields written on big query. */ protected static Map>> configureBigQueryWrite() { @@ -216,6 +218,9 @@ public static interface Options extends PipelineOptions { /** * Run a batch pipeline. + * + * @param args The main arguments. + * @throws Exception in case of execution failure. */ // [START DocInclude_USMain] public static void main(String[] args) throws Exception { diff --git a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/utils/WriteToBigQuery.java b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/utils/WriteToBigQuery.java index 5897e447e8b5..e1cd30896956 100644 --- a/examples/java8/src/main/java/org/apache/beam/examples/complete/game/utils/WriteToBigQuery.java +++ b/examples/java8/src/main/java/org/apache/beam/examples/complete/game/utils/WriteToBigQuery.java @@ -100,7 +100,10 @@ public void processElement(ProcessContext c) { } } - /** Build the output table schema. */ + /** Build the output table schema. + * + * @return The {@link TableSchema}. + */ protected TableSchema getSchema() { List fields = new ArrayList<>(); for (Map.Entry> entry : fieldInfo.entrySet()) { diff --git a/runners/flink/examples/pom.xml b/runners/flink/examples/pom.xml index b000ab12a0c0..7f267fb3a1bb 100644 --- a/runners/flink/examples/pom.xml +++ b/runners/flink/examples/pom.xml @@ -86,16 +86,6 @@ --> - - - org.apache.maven.plugins - maven-javadoc-plugin - - true - - - org.codehaus.mojo exec-maven-plugin diff --git a/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/TFIDF.java b/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/TFIDF.java index 0afde0a57978..e3c509912265 100644 --- a/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/TFIDF.java +++ b/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/TFIDF.java @@ -110,6 +110,10 @@ private interface Options extends PipelineOptions, FlinkPipelineOptions { /** * Lists documents contained beneath the {@code options.input} prefix/directory. + * + * @param options the pipeline options. + * @throws URISyntaxException in case of malformed URI + * @throws IOException in case of URI loading issue. */ public static Set listInputDocuments(Options options) throws URISyntaxException, IOException { diff --git a/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/AutoComplete.java b/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/AutoComplete.java index 9d1168ba40ee..476cfad14fe6 100644 --- a/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/AutoComplete.java +++ b/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/AutoComplete.java @@ -55,11 +55,11 @@ /** * To run the example, first open a socket on a terminal by executing the command: - *

  • + *
      *
    • * nc -lk 9999 *
    • - * + *
    * and then launch the example. Now whatever you type in the terminal is going to be * the input to the program. * */ diff --git a/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/JoinExamples.java b/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/JoinExamples.java index d3e963d91255..671f2788c8e0 100644 --- a/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/JoinExamples.java +++ b/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/JoinExamples.java @@ -42,14 +42,14 @@ /** * To run the example, first open two sockets on two terminals by executing the commands: - *
  • + *
      *
    • * nc -lk 9999, and *
    • *
    • * nc -lk 9998 *
    • - * + *
    * and then launch the example. Now whatever you type in the terminal is going to be * the input to the program. * */ diff --git a/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/KafkaIOExamples.java b/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/KafkaIOExamples.java index af6bb351f1ee..695f339a21bb 100644 --- a/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/KafkaIOExamples.java +++ b/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/KafkaIOExamples.java @@ -171,7 +171,8 @@ public static void main(String[] args) { /** * Serialiation/Deserialiation schema for Avro types - * @param + * + * @param serialization type. */ static class AvroSerializationDeserializationSchema implements SerializationSchema, DeserializationSchema { diff --git a/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/WindowedWordCount.java b/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/WindowedWordCount.java index e803e6ed80ea..83ca5ba50d3a 100644 --- a/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/WindowedWordCount.java +++ b/runners/flink/examples/src/main/java/org/apache/beam/runners/flink/examples/streaming/WindowedWordCount.java @@ -44,11 +44,11 @@ /** * To run the example, first open a socket on a terminal by executing the command: - *
  • + *
      *
    • * nc -lk 9999 *
    • - * + *
    * and then launch the example. Now whatever you type in the terminal is going to be * the input to the program. * */ diff --git a/runners/flink/runner/pom.xml b/runners/flink/runner/pom.xml index 757ac9c4d154..e61333d4c1de 100644 --- a/runners/flink/runner/pom.xml +++ b/runners/flink/runner/pom.xml @@ -159,16 +159,6 @@ --> - - - org.apache.maven.plugins - maven-javadoc-plugin - - true - - - org.apache.maven.plugins diff --git a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkPipelineExecutionEnvironment.java b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkPipelineExecutionEnvironment.java index d31d7902186d..71fedaa8f192 100644 --- a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkPipelineExecutionEnvironment.java +++ b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkPipelineExecutionEnvironment.java @@ -120,6 +120,8 @@ private void createPipelineTranslator() { * the {@link org.apache.beam.sdk.values.PCollection} program into * a {@link org.apache.flink.api.java.DataSet} or {@link org.apache.flink.streaming.api.datastream.DataStream} * one. + * + * @param pipeline The pipeline to translate. * */ public void translate(Pipeline pipeline) { checkInitializationState(); @@ -134,7 +136,10 @@ public void translate(Pipeline pipeline) { /** * Launches the program execution. - * */ + * + * @return The job execution result. + * @throws Exception In case of error during pipeline execution. + */ public JobExecutionResult executePipeline() throws Exception { if (options.isStreaming()) { if (this.flinkStreamEnv == null) { diff --git a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkPipelineOptions.java b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkPipelineOptions.java index c40473e10853..3435eb6bb119 100644 --- a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkPipelineOptions.java +++ b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkPipelineOptions.java @@ -39,11 +39,17 @@ public interface FlinkPipelineOptions extends PipelineOptions, ApplicationNameOptions, StreamingOptions { /** + *

    * List of local files to make available to workers. + *

    *

    * Jars are placed on the worker's classpath. + *

    *

    * The default value is the list of jars from the main program's classpath. + *

    + * + * @return The list of files to stage. */ @Description("Jar-Files to send to all workers and put on the classpath. " + "The default value is all files from the classpath.") @@ -53,6 +59,8 @@ public interface FlinkPipelineOptions extends PipelineOptions, ApplicationNameOp /** * The job name is used to identify jobs running on a Flink cluster. + * + * @return The Flink job name. */ @Description("Flink job name, to uniquely identify active jobs. " + "Defaults to using the ApplicationName-UserName-Date.") @@ -66,6 +74,8 @@ public interface FlinkPipelineOptions extends PipelineOptions, ApplicationNameOp * Strings "[local]", "[collection]" or "[auto]". "[local]" will start a local Flink * Cluster in the JVM, "[collection]" will execute the pipeline on Java Collections while * "[auto]" will let the system decide where to execute the pipeline based on the environment. + * + * @return The Flink master URL. */ @Description("Address of the Flink Master where the Pipeline should be executed. Can" + " either be of the form \"host:port\" or one of the special values [local], " + diff --git a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkPipelineRunner.java b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkPipelineRunner.java index b5ffced60d19..e81c95d2ae91 100644 --- a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkPipelineRunner.java +++ b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/FlinkPipelineRunner.java @@ -41,10 +41,11 @@ import java.util.Map; /** + *

    * A {@link PipelineRunner} that executes the operations in the * pipeline by first translating them to a Flink Plan and then executing them either locally * or on a Flink cluster, depending on the configuration. - *

    + *

    */ public class FlinkPipelineRunner extends PipelineRunner { @@ -133,6 +134,8 @@ public FlinkRunnerResult run(Pipeline pipeline) { /** * For testing. + * + * @return The {@link FlinkPipelineOptions}. */ public FlinkPipelineOptions getPipelineOptions() { return options; diff --git a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/FlinkBatchTranslationContext.java b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/FlinkBatchTranslationContext.java index ecc3a65c7965..778073043fae 100644 --- a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/FlinkBatchTranslationContext.java +++ b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/FlinkBatchTranslationContext.java @@ -98,7 +98,8 @@ public void setOutputDataSet(PValue value, DataSet> set) { /** * Sets the AppliedPTransform which carries input/output. - * @param currentTransform + * + * @param currentTransform The current applied {@link PTransform}. */ public void setCurrentTransform(AppliedPTransform currentTransform) { this.currentTransform = currentTransform; diff --git a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/FlinkStreamingPipelineTranslator.java b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/FlinkStreamingPipelineTranslator.java index 31b2bee63f72..16712e8a7df4 100644 --- a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/FlinkStreamingPipelineTranslator.java +++ b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/FlinkStreamingPipelineTranslator.java @@ -32,7 +32,7 @@ * {@link org.apache.beam.sdk.values.PCollection}-based job into a * {@link org.apache.flink.streaming.api.datastream.DataStream} one. * - * This is based on {@link org.apache.beam.runners.dataflow.DataflowPipelineTranslator} + * This is based on {@code org.apache.beam.runners.dataflow.DataflowPipelineTranslator} * */ public class FlinkStreamingPipelineTranslator extends FlinkPipelineTranslator { diff --git a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/FlinkStreamingTranslationContext.java b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/FlinkStreamingTranslationContext.java index 0cb80baa7cc8..ee4680aae29f 100644 --- a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/FlinkStreamingTranslationContext.java +++ b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/FlinkStreamingTranslationContext.java @@ -82,7 +82,8 @@ public void setOutputDataStream(PValue value, DataStream set) { /** * Sets the AppliedPTransform which carries input/output. - * @param currentTransform + * + * @param currentTransform the current applied {@link PTransform}. */ public void setCurrentTransform(AppliedPTransform currentTransform) { this.currentTransform = currentTransform; diff --git a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/types/InspectableByteArrayOutputStream.java b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/types/InspectableByteArrayOutputStream.java index 36b5ba319180..30381d1d4b4b 100644 --- a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/types/InspectableByteArrayOutputStream.java +++ b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/types/InspectableByteArrayOutputStream.java @@ -27,6 +27,8 @@ public class InspectableByteArrayOutputStream extends ByteArrayOutputStream { /** * Get the underlying byte array. + * + * @return a buffer as {@code byte[]}. */ public byte[] getBuffer() { return buf; diff --git a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/wrappers/streaming/FlinkGroupAlsoByWindowWrapper.java b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/wrappers/streaming/FlinkGroupAlsoByWindowWrapper.java index 9d2cad847a38..f12f742dbeb3 100644 --- a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/wrappers/streaming/FlinkGroupAlsoByWindowWrapper.java +++ b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/wrappers/streaming/FlinkGroupAlsoByWindowWrapper.java @@ -80,23 +80,27 @@ import java.util.Set; /** + *

    * This class is the key class implementing all the windowing/triggering logic of Apache Beam. - * To provide full compatibility and support for all the windowing/triggering combinations offered by - * Beam, we opted for a strategy that uses the SDK's code for doing these operations. See the code in - * ({@link org.apache.beam.runners.core.GroupAlsoByWindowsDoFn}. - *

    + * To provide full compatibility and support for all the windowing/triggering combinations + * offered by Beam, we opted for a strategy that uses the SDK's code for doing these operations. + * See the code in ({@link org.apache.beam.sdk.util.GroupAlsoByWindowsDoFn}. + *

    + *

    * In a nutshell, when the execution arrives to this operator, we expect to have a stream already * grouped by key. Each of the elements that enter here, registers a timer * (see {@link TimerInternals#setTimer(TimerInternals.TimerData)} in the * {@link FlinkGroupAlsoByWindowWrapper#activeTimers}. * This is essentially a timestamp indicating when to trigger the computation over the window this * element belongs to. - *

    + *

    + *

    * When a watermark arrives, all the registered timers are checked to see which ones are ready to - * fire (see {@link FlinkGroupAlsoByWindowWrapper#processWatermark(Watermark)}). These are deregistered from - * the {@link FlinkGroupAlsoByWindowWrapper#activeTimers} - * list, and are fed into the {@link org.apache.beam.runners.core.GroupAlsoByWindowsDoFn} - * for furhter processing. + * fire (see {@link FlinkGroupAlsoByWindowWrapper#processWatermark(Watermark)}). + * These are deregistered from the {@link FlinkGroupAlsoByWindowWrapper#activeTimers} + * list, and are fed into the {@link org.apache.beam.sdk.util.GroupAlsoByWindowsDoFn} + * for further processing. + *

    */ public class FlinkGroupAlsoByWindowWrapper extends AbstractStreamOperator>> @@ -132,19 +136,30 @@ public class FlinkGroupAlsoByWindowWrapper private FlinkTimerInternals timerInternals = new FlinkTimerInternals(); /** + *

    * Creates an DataStream where elements are grouped in windows based on the specified windowing strategy. + *

    + *

    + * Creates an DataStream where elements are grouped in windows based on the + * specified windowing strategy. * This method assumes that elements are already grouped by key. - *

    + *

    + *

    * The difference with {@link #createForIterable(PipelineOptions, PCollection, KeyedStream)} * is that this method assumes that a combiner function is provided * (see {@link org.apache.beam.sdk.transforms.Combine.KeyedCombineFn}). - * A combiner helps at increasing the speed and, in most of the cases, reduce the per-window state. + *

    * - * @param options the general job configuration options. - * @param input the input Dataflow {@link org.apache.beam.sdk.values.PCollection}. - * @param groupedStreamByKey the input stream, it is assumed to already be grouped by key. - * @param combiner the combiner to be used. - * @param outputKvCoder the type of the output values. + * @param options The general job configuration options. + * @param input The input Beam {@link org.apache.beam.sdk.values.PCollection}. + * @param groupedStreamByKey The input stream, it is assumed to already be grouped by key. + * @param combiner The combiner to be used. + * @param outputKvCoder The type of the output values. + * @param The key type. + * @param The input type. + * @param The accumulator type. + * @param The output type. + * @return a {@link DataStream} with {@link WindowedValue} key-value pairs. */ public static DataStream>> create( PipelineOptions options, @@ -175,16 +190,24 @@ public static DataStream>> create } /** - * Creates an DataStream where elements are grouped in windows based on the specified windowing strategy. + *

    + * Creates an DataStream where elements are grouped in windows based on the specified + * windowing strategy. * This method assumes that elements are already grouped by key. - *

    - * The difference with {@link #create(PipelineOptions, PCollection, KeyedStream, Combine.KeyedCombineFn, KvCoder)} + *

    + *

    + * The difference with + * {@link #create(PipelineOptions, PCollection, KeyedStream, Combine.KeyedCombineFn, KvCoder)} * is that this method assumes no combiner function * (see {@link org.apache.beam.sdk.transforms.Combine.KeyedCombineFn}). + *

    * - * @param options the general job configuration options. - * @param input the input Dataflow {@link org.apache.beam.sdk.values.PCollection}. + * @param options the general job configuration options. + * @param input the input Beam {@link org.apache.beam.sdk.values.PCollection}. * @param groupedStreamByKey the input stream, it is assumed to already be grouped by key. + * @param the key type. + * @param the input type. + * @return a {@link DataStream} with the {@link WindowedValue} key-value pair. */ public static DataStream>>> createForIterable( PipelineOptions options, diff --git a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/wrappers/streaming/io/UnboundedSourceWrapper.java b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/wrappers/streaming/io/UnboundedSourceWrapper.java index b816e2a40881..298dd2a0d29e 100644 --- a/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/wrappers/streaming/io/UnboundedSourceWrapper.java +++ b/runners/flink/runner/src/main/java/org/apache/beam/runners/flink/translation/wrappers/streaming/io/UnboundedSourceWrapper.java @@ -370,6 +370,8 @@ private long getTimeToNextWatermark(long watermarkInterval) { /** * Visible so that we can check this in tests. Must not be used for anything else. + * + * @return a {@code List} of split sources. */ @VisibleForTesting public List> getSplitSources() {