public static class SourceRDD.Unbounded<T,CheckpointMarkT extends UnboundedSource.CheckpointMark> extends org.apache.spark.rdd.RDD<scala.Tuple2<Source<T>,CheckpointMarkT>>
SourceRDD.Unbounded
is the implementation of a micro-batch
in a SourceDStream
.
This RDD is made of P partitions, each containing a single pair-element of the partitioned
MicrobatchSource
and an optional starting UnboundedSource.CheckpointMark
.
Constructor and Description |
---|
Unbounded(org.apache.spark.SparkContext sc,
org.apache.beam.runners.core.construction.SerializablePipelineOptions options,
MicrobatchSource<T,CheckpointMarkT> microbatchSource,
int initialNumPartitions) |
Modifier and Type | Method and Description |
---|---|
scala.collection.Iterator<scala.Tuple2<Source<T>,CheckpointMarkT>> |
compute(org.apache.spark.Partition split,
org.apache.spark.TaskContext context) |
org.apache.spark.Partition[] |
getPartitions() |
scala.Option<org.apache.spark.Partitioner> |
partitioner() |
$plus$plus, aggregate, cache, cartesian, checkpoint, checkpointData_$eq, checkpointData, clearDependencies, coalesce, coalesce$default$2, coalesce$default$3, coalesce$default$4, collect, collect, collectPartitions, computeOrReadCheckpoint, conf, context, count, countApprox, countApprox$default$2, countApproxDistinct, countApproxDistinct, countApproxDistinct$default$1, countByValue, countByValue$default$1, countByValueApprox, countByValueApprox$default$2, countByValueApprox$default$3, creationSite, dependencies, distinct, distinct, distinct$default$2, doCheckpoint, doubleRDDToDoubleRDDFunctions, elementClassTag, filter, first, firstParent, flatMap, fold, foreach, foreachPartition, getCheckpointFile, getCreationSite, getDependencies, getNarrowAncestors, getNumPartitions, getOrCompute, getPreferredLocations, getStorageLevel, glom, groupBy, groupBy, groupBy, groupBy$default$4, id, initializeLogIfNecessary, initializeLogIfNecessary, initializeLogIfNecessary$default$2, intersection, intersection, intersection, intersection$default$3, isCheckpointed, isCheckpointedAndMaterialized, isEmpty, isLocallyCheckpointed, isTraceEnabled, iterator, keyBy, localCheckpoint, log, logDebug, logDebug, logError, logError, logInfo, logInfo, logName, logTrace, logTrace, logWarning, logWarning, map, mapPartitions, mapPartitions$default$2, mapPartitionsInternal, mapPartitionsInternal$default$2, mapPartitionsWithIndex, mapPartitionsWithIndex$default$2, mapPartitionsWithIndexInternal, mapPartitionsWithIndexInternal$default$2, markCheckpointed, max, min, name_$eq, name, numericRDDToDoubleRDDFunctions, org$apache$spark$internal$Logging$$log__$eq, org$apache$spark$internal$Logging$$log_, org$apache$spark$rdd$RDD$$checkpointAllMarkedAncestors, org$apache$spark$rdd$RDD$$debugString$1, org$apache$spark$rdd$RDD$$debugString$default$4$1, org$apache$spark$rdd$RDD$$dependencies__$eq, org$apache$spark$rdd$RDD$$dependencies_, org$apache$spark$rdd$RDD$$doCheckpointCalled_$eq, org$apache$spark$rdd$RDD$$doCheckpointCalled, org$apache$spark$rdd$RDD$$partitions__$eq, org$apache$spark$rdd$RDD$$partitions_, org$apache$spark$rdd$RDD$$sc, org$apache$spark$rdd$RDD$$visit$1, parent, partitions, persist, persist, pipe, pipe, pipe, pipe$default$2, pipe$default$3, pipe$default$4, pipe$default$5, pipe$default$6, pipe$default$7, preferredLocations, randomSampleWithRange, randomSplit, randomSplit$default$2, rddToAsyncRDDActions, rddToOrderedRDDFunctions, rddToPairRDDFunctions, rddToPairRDDFunctions$default$4, rddToSequenceFileRDDFunctions, reduce, repartition, repartition$default$2, retag, retag, sample, sample$default$3, saveAsObjectFile, saveAsTextFile, saveAsTextFile, scope, setName, sortBy, sortBy$default$2, sortBy$default$3, sparkContext, subtract, subtract, subtract, subtract$default$3, take, takeOrdered, takeSample, takeSample$default$3, toDebugString, toJavaRDD, toLocalIterator, top, toString, treeAggregate, treeAggregate$default$4, treeReduce, treeReduce$default$2, union, unpersist, unpersist$default$1, withScope, zip, zipPartitions, zipPartitions, zipPartitions, zipPartitions, zipPartitions, zipPartitions, zipWithIndex, zipWithUniqueId
public Unbounded(org.apache.spark.SparkContext sc, org.apache.beam.runners.core.construction.SerializablePipelineOptions options, MicrobatchSource<T,CheckpointMarkT> microbatchSource, int initialNumPartitions)
public org.apache.spark.Partition[] getPartitions()
getPartitions
in class org.apache.spark.rdd.RDD<scala.Tuple2<Source<T>,CheckpointMarkT extends UnboundedSource.CheckpointMark>>
public scala.Option<org.apache.spark.Partitioner> partitioner()
partitioner
in class org.apache.spark.rdd.RDD<scala.Tuple2<Source<T>,CheckpointMarkT extends UnboundedSource.CheckpointMark>>
public scala.collection.Iterator<scala.Tuple2<Source<T>,CheckpointMarkT>> compute(org.apache.spark.Partition split, org.apache.spark.TaskContext context)
compute
in class org.apache.spark.rdd.RDD<scala.Tuple2<Source<T>,CheckpointMarkT extends UnboundedSource.CheckpointMark>>