apache_beam.ml.inference package
A package with various modules for running inferences and predictions on models. This package contains support for popular frameworks as well as an interface for adding unsupported frameworks.
Note: on top of the frameworks captured in submodules below, Beam also has a supported TensorFlow model handler via the tfx-bsl library. See https://beam.apache.org/documentation/ml/about-ml/#tensorflow for more information on using TensorFlow in Beam.
Submodules
- apache_beam.ml.inference.base module
PredictionResultModelMetadataRunInferenceDLQKeyModelPathMappingModelHandlerModelHandler.load_model()ModelHandler.run_inference()ModelHandler.get_num_bytes()ModelHandler.get_metrics_namespace()ModelHandler.get_resource_hints()ModelHandler.batch_elements_kwargs()ModelHandler.validate_inference_args()ModelHandler.update_model_path()ModelHandler.update_model_paths()ModelHandler.get_preprocess_fns()ModelHandler.get_postprocess_fns()ModelHandler.should_skip_batching()ModelHandler.set_environment_vars()ModelHandler.with_preprocess_fn()ModelHandler.with_postprocess_fn()ModelHandler.with_no_batching()ModelHandler.share_model_across_processes()ModelHandler.model_copies()ModelHandler.override_metrics()ModelHandler.should_garbage_collect_on_timeout()
KeyModelMappingKeyedModelHandlerKeyedModelHandler.load_model()KeyedModelHandler.run_inference()KeyedModelHandler.get_num_bytes()KeyedModelHandler.get_metrics_namespace()KeyedModelHandler.get_resource_hints()KeyedModelHandler.batch_elements_kwargs()KeyedModelHandler.validate_inference_args()KeyedModelHandler.update_model_paths()KeyedModelHandler.update_model_path()KeyedModelHandler.share_model_across_processes()KeyedModelHandler.model_copies()KeyedModelHandler.override_metrics()KeyedModelHandler.should_garbage_collect_on_timeout()
MaybeKeyedModelHandlerMaybeKeyedModelHandler.load_model()MaybeKeyedModelHandler.run_inference()MaybeKeyedModelHandler.get_num_bytes()MaybeKeyedModelHandler.get_metrics_namespace()MaybeKeyedModelHandler.get_resource_hints()MaybeKeyedModelHandler.batch_elements_kwargs()MaybeKeyedModelHandler.validate_inference_args()MaybeKeyedModelHandler.update_model_path()MaybeKeyedModelHandler.get_preprocess_fns()MaybeKeyedModelHandler.get_postprocess_fns()MaybeKeyedModelHandler.should_skip_batching()MaybeKeyedModelHandler.share_model_across_processes()MaybeKeyedModelHandler.model_copies()
RunInferenceload_model_status()
- apache_beam.ml.inference.huggingface_inference module
HuggingFaceModelHandlerTensorHuggingFaceModelHandlerTensor.load_model()HuggingFaceModelHandlerTensor.run_inference()HuggingFaceModelHandlerTensor.update_model_path()HuggingFaceModelHandlerTensor.get_num_bytes()HuggingFaceModelHandlerTensor.batch_elements_kwargs()HuggingFaceModelHandlerTensor.share_model_across_processes()HuggingFaceModelHandlerTensor.model_copies()HuggingFaceModelHandlerTensor.get_metrics_namespace()
HuggingFaceModelHandlerKeyedTensorHuggingFaceModelHandlerKeyedTensor.load_model()HuggingFaceModelHandlerKeyedTensor.run_inference()HuggingFaceModelHandlerKeyedTensor.update_model_path()HuggingFaceModelHandlerKeyedTensor.get_num_bytes()HuggingFaceModelHandlerKeyedTensor.batch_elements_kwargs()HuggingFaceModelHandlerKeyedTensor.share_model_across_processes()HuggingFaceModelHandlerKeyedTensor.model_copies()HuggingFaceModelHandlerKeyedTensor.get_metrics_namespace()
HuggingFacePipelineModelHandlerHuggingFacePipelineModelHandler.load_model()HuggingFacePipelineModelHandler.run_inference()HuggingFacePipelineModelHandler.update_model_path()HuggingFacePipelineModelHandler.get_num_bytes()HuggingFacePipelineModelHandler.batch_elements_kwargs()HuggingFacePipelineModelHandler.share_model_across_processes()HuggingFacePipelineModelHandler.model_copies()HuggingFacePipelineModelHandler.get_metrics_namespace()
- apache_beam.ml.inference.onnx_inference module
- apache_beam.ml.inference.pytorch_inference module
PytorchModelHandlerTensorPytorchModelHandlerTensor.load_model()PytorchModelHandlerTensor.update_model_path()PytorchModelHandlerTensor.run_inference()PytorchModelHandlerTensor.get_num_bytes()PytorchModelHandlerTensor.get_metrics_namespace()PytorchModelHandlerTensor.validate_inference_args()PytorchModelHandlerTensor.batch_elements_kwargs()PytorchModelHandlerTensor.share_model_across_processes()PytorchModelHandlerTensor.model_copies()
PytorchModelHandlerKeyedTensorPytorchModelHandlerKeyedTensor.load_model()PytorchModelHandlerKeyedTensor.update_model_path()PytorchModelHandlerKeyedTensor.run_inference()PytorchModelHandlerKeyedTensor.get_num_bytes()PytorchModelHandlerKeyedTensor.get_metrics_namespace()PytorchModelHandlerKeyedTensor.validate_inference_args()PytorchModelHandlerKeyedTensor.batch_elements_kwargs()PytorchModelHandlerKeyedTensor.share_model_across_processes()PytorchModelHandlerKeyedTensor.model_copies()
- apache_beam.ml.inference.sklearn_inference module
SklearnModelHandlerNumpySklearnModelHandlerNumpy.load_model()SklearnModelHandlerNumpy.update_model_path()SklearnModelHandlerNumpy.run_inference()SklearnModelHandlerNumpy.get_num_bytes()SklearnModelHandlerNumpy.get_metrics_namespace()SklearnModelHandlerNumpy.batch_elements_kwargs()SklearnModelHandlerNumpy.share_model_across_processes()SklearnModelHandlerNumpy.model_copies()
SklearnModelHandlerPandasSklearnModelHandlerPandas.load_model()SklearnModelHandlerPandas.update_model_path()SklearnModelHandlerPandas.run_inference()SklearnModelHandlerPandas.get_num_bytes()SklearnModelHandlerPandas.get_metrics_namespace()SklearnModelHandlerPandas.batch_elements_kwargs()SklearnModelHandlerPandas.share_model_across_processes()SklearnModelHandlerPandas.model_copies()
- apache_beam.ml.inference.tensorflow_inference module
TFModelHandlerNumpyTFModelHandlerNumpy.load_model()TFModelHandlerNumpy.update_model_path()TFModelHandlerNumpy.run_inference()TFModelHandlerNumpy.get_num_bytes()TFModelHandlerNumpy.get_metrics_namespace()TFModelHandlerNumpy.validate_inference_args()TFModelHandlerNumpy.batch_elements_kwargs()TFModelHandlerNumpy.share_model_across_processes()TFModelHandlerNumpy.model_copies()
TFModelHandlerTensorTFModelHandlerTensor.load_model()TFModelHandlerTensor.update_model_path()TFModelHandlerTensor.run_inference()TFModelHandlerTensor.get_num_bytes()TFModelHandlerTensor.get_metrics_namespace()TFModelHandlerTensor.validate_inference_args()TFModelHandlerTensor.batch_elements_kwargs()TFModelHandlerTensor.share_model_across_processes()TFModelHandlerTensor.model_copies()
- apache_beam.ml.inference.tensorrt_inference module
TensorRTEngineTensorRTEngineHandlerNumPyTensorRTEngineHandlerNumPy.batch_elements_kwargs()TensorRTEngineHandlerNumPy.load_model()TensorRTEngineHandlerNumPy.load_onnx()TensorRTEngineHandlerNumPy.build_engine()TensorRTEngineHandlerNumPy.run_inference()TensorRTEngineHandlerNumPy.get_num_bytes()TensorRTEngineHandlerNumPy.get_metrics_namespace()TensorRTEngineHandlerNumPy.share_model_across_processes()TensorRTEngineHandlerNumPy.model_copies()
- apache_beam.ml.inference.utils module
- apache_beam.ml.inference.vertex_ai_inference module
- apache_beam.ml.inference.vllm_inference module
- apache_beam.ml.inference.xgboost_inference module