public class TranslationContext
extends java.lang.Object
PTransform
translation: keeping track of the
datasets, the SparkSession
, the current transform being translated.Constructor and Description |
---|
TranslationContext(SparkStructuredStreamingPipelineOptions options) |
Modifier and Type | Method and Description |
---|---|
<T> org.apache.spark.sql.Dataset<T> |
emptyDataset() |
org.apache.beam.sdk.runners.AppliedPTransform<?,?,?> |
getCurrentTransform() |
<T> org.apache.spark.sql.Dataset<org.apache.beam.sdk.util.WindowedValue<T>> |
getDataset(PValue value) |
PValue |
getInput() |
java.util.Map<TupleTag<?>,PCollection<?>> |
getInputs() |
PValue |
getOutput() |
java.util.Map<TupleTag<?>,Coder<?>> |
getOutputCoders() |
java.util.Map<TupleTag<?>,PCollection<?>> |
getOutputs() |
org.apache.beam.runners.core.construction.SerializablePipelineOptions |
getSerializableOptions() |
<T> org.apache.spark.sql.Dataset<T> |
getSideInputDataSet(PCollectionView<?> value) |
org.apache.spark.sql.SparkSession |
getSparkSession() |
static void |
printDatasetContent(org.apache.spark.sql.Dataset<org.apache.beam.sdk.util.WindowedValue> dataset) |
<T> void |
putDataset(PValue value,
org.apache.spark.sql.Dataset<org.apache.beam.sdk.util.WindowedValue<T>> dataset) |
void |
putDatasetWildcard(PValue value,
org.apache.spark.sql.Dataset<org.apache.beam.sdk.util.WindowedValue<?>> dataset)
TODO: All these 3 methods (putDataset*) are temporary and they are used only for generics type
checking.
|
void |
setCurrentTransform(org.apache.beam.sdk.runners.AppliedPTransform<?,?,?> currentTransform) |
<ViewT,ElemT> |
setSideInputDataset(PCollectionView<ViewT> value,
org.apache.spark.sql.Dataset<org.apache.beam.sdk.util.WindowedValue<ElemT>> set) |
void |
startPipeline()
Starts the pipeline.
|
public TranslationContext(SparkStructuredStreamingPipelineOptions options)
public org.apache.spark.sql.SparkSession getSparkSession()
public org.apache.beam.runners.core.construction.SerializablePipelineOptions getSerializableOptions()
public void setCurrentTransform(org.apache.beam.sdk.runners.AppliedPTransform<?,?,?> currentTransform)
public org.apache.beam.sdk.runners.AppliedPTransform<?,?,?> getCurrentTransform()
public <T> org.apache.spark.sql.Dataset<T> emptyDataset()
public <T> org.apache.spark.sql.Dataset<org.apache.beam.sdk.util.WindowedValue<T>> getDataset(PValue value)
public void putDatasetWildcard(PValue value, org.apache.spark.sql.Dataset<org.apache.beam.sdk.util.WindowedValue<?>> dataset)
public <T> void putDataset(PValue value, org.apache.spark.sql.Dataset<org.apache.beam.sdk.util.WindowedValue<T>> dataset)
public <ViewT,ElemT> void setSideInputDataset(PCollectionView<ViewT> value, org.apache.spark.sql.Dataset<org.apache.beam.sdk.util.WindowedValue<ElemT>> set)
public <T> org.apache.spark.sql.Dataset<T> getSideInputDataSet(PCollectionView<?> value)
public PValue getInput()
public java.util.Map<TupleTag<?>,PCollection<?>> getInputs()
public PValue getOutput()
public java.util.Map<TupleTag<?>,PCollection<?>> getOutputs()
public void startPipeline()
public static void printDatasetContent(org.apache.spark.sql.Dataset<org.apache.beam.sdk.util.WindowedValue> dataset)