protected class TransformTranslator.Context extends java.lang.Object implements PipelineTranslator.TranslationState
PTransform. The context is backed by the
 shared PipelineTranslator.TranslationState of the PipelineTranslator.EncoderProvider.Factory<T>| Modifier and Type | Method and Description | 
|---|---|
| <T> org.apache.spark.broadcast.Broadcast<T> | broadcast(T value) | 
| <T> org.apache.spark.sql.Dataset<org.apache.beam.sdk.util.WindowedValue<T>> | createDataset(java.util.List<org.apache.beam.sdk.util.WindowedValue<T>> data,
             org.apache.spark.sql.Encoder<org.apache.beam.sdk.util.WindowedValue<T>> enc) | 
| <T> org.apache.spark.sql.Encoder<T> | encoderOf(Coder<T> coder,
         EncoderProvider.Factory<T> factory) | 
| org.apache.beam.sdk.runners.AppliedPTransform<InT,OutT,TransformT> | getCurrentTransform() | 
| <T> org.apache.spark.sql.Dataset<org.apache.beam.sdk.util.WindowedValue<T>> | getDataset(PCollection<T> pCollection) | 
| InT | getInput() | 
| java.util.Map<TupleTag<?>,PCollection<?>> | getInputs() | 
| PipelineOptions | getOptions() | 
| java.util.function.Supplier<PipelineOptions> | getOptionsSupplier() | 
| OutT | getOutput() | 
| <T> PCollection<T> | getOutput(TupleTag<T> tag) | 
| java.util.Map<TupleTag<?>,PCollection<?>> | getOutputs() | 
| <T> org.apache.spark.broadcast.Broadcast<SideInputValues<T>> | getSideInputBroadcast(PCollection<T> pCollection,
                     SideInputValues.Loader<T> loader) | 
| org.apache.spark.sql.SparkSession | getSparkSession() | 
| boolean | isLeaf(PCollection<?> pCollection) | 
| <T> void | putDataset(PCollection<T> pCollection,
          org.apache.spark.sql.Dataset<org.apache.beam.sdk.util.WindowedValue<T>> dataset,
          boolean cache) | 
| <InputT,T> void | putUnresolved(PCollection<T> out,
             PipelineTranslator.UnresolvedTranslation<InputT,T> unresolved) | 
| <T1,T2> org.apache.spark.sql.Encoder<scala.Tuple2<T1,T2>> | tupleEncoder(org.apache.spark.sql.Encoder<T1> e1,
            org.apache.spark.sql.Encoder<T2> e2) | 
| <T> org.apache.spark.sql.Encoder<org.apache.beam.sdk.util.WindowedValue<T>> | windowedEncoder(Coder<T> coder) | 
| <T,W extends BoundedWindow> | windowedEncoder(Coder<T> coder,
               Coder<W> windowCoder) | 
| <T> org.apache.spark.sql.Encoder<org.apache.beam.sdk.util.WindowedValue<T>> | windowedEncoder(org.apache.spark.sql.Encoder<T> enc) | 
| org.apache.spark.sql.Encoder<BoundedWindow> | windowEncoder() | 
clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, waitputDatasetencoderFactory, encoderOf, keyEncoderOf, kvEncoderOf, valueEncoderOfpublic InT getInput()
public java.util.Map<TupleTag<?>,PCollection<?>> getInputs()
public java.util.Map<TupleTag<?>,PCollection<?>> getOutputs()
public OutT getOutput()
public <T> PCollection<T> getOutput(TupleTag<T> tag)
public org.apache.beam.sdk.runners.AppliedPTransform<InT,OutT,TransformT> getCurrentTransform()
public <T> org.apache.spark.sql.Dataset<org.apache.beam.sdk.util.WindowedValue<T>> getDataset(PCollection<T> pCollection)
getDataset in interface PipelineTranslator.TranslationStatepublic <T> org.apache.spark.broadcast.Broadcast<SideInputValues<T>> getSideInputBroadcast(PCollection<T> pCollection, SideInputValues.Loader<T> loader)
getSideInputBroadcast in interface PipelineTranslator.TranslationStatepublic <T> void putDataset(PCollection<T> pCollection, org.apache.spark.sql.Dataset<org.apache.beam.sdk.util.WindowedValue<T>> dataset, boolean cache)
putDataset in interface PipelineTranslator.TranslationStatepublic <InputT,T> void putUnresolved(PCollection<T> out, PipelineTranslator.UnresolvedTranslation<InputT,T> unresolved)
putUnresolved in interface PipelineTranslator.TranslationStatepublic boolean isLeaf(PCollection<?> pCollection)
isLeaf in interface PipelineTranslator.TranslationStatepublic java.util.function.Supplier<PipelineOptions> getOptionsSupplier()
getOptionsSupplier in interface PipelineTranslator.TranslationStatepublic PipelineOptions getOptions()
getOptions in interface PipelineTranslator.TranslationStatepublic <T> org.apache.spark.sql.Dataset<org.apache.beam.sdk.util.WindowedValue<T>> createDataset(java.util.List<org.apache.beam.sdk.util.WindowedValue<T>> data,
                                                                                                 org.apache.spark.sql.Encoder<org.apache.beam.sdk.util.WindowedValue<T>> enc)
public <T> org.apache.spark.broadcast.Broadcast<T> broadcast(T value)
public org.apache.spark.sql.SparkSession getSparkSession()
getSparkSession in interface PipelineTranslator.TranslationStatepublic <T> org.apache.spark.sql.Encoder<T> encoderOf(Coder<T> coder, EncoderProvider.Factory<T> factory)
encoderOf in interface EncoderProviderpublic <T1,T2> org.apache.spark.sql.Encoder<scala.Tuple2<T1,T2>> tupleEncoder(org.apache.spark.sql.Encoder<T1> e1,
                                                                              org.apache.spark.sql.Encoder<T2> e2)
public <T> org.apache.spark.sql.Encoder<org.apache.beam.sdk.util.WindowedValue<T>> windowedEncoder(Coder<T> coder)
public <T> org.apache.spark.sql.Encoder<org.apache.beam.sdk.util.WindowedValue<T>> windowedEncoder(org.apache.spark.sql.Encoder<T> enc)
public <T,W extends BoundedWindow> org.apache.spark.sql.Encoder<org.apache.beam.sdk.util.WindowedValue<T>> windowedEncoder(Coder<T> coder, Coder<W> windowCoder)
public org.apache.spark.sql.Encoder<BoundedWindow> windowEncoder()