From bb1f953cd9d151dbcaea972dc9605331f8ecf1a4 Mon Sep 17 00:00:00 2001 From: Antonio Velazquez Date: Thu, 9 Jul 2020 00:58:36 -0700 Subject: [PATCH] Update OnnxTransformer Docs --- .../OnnxCatalog.cs | 27 ++++++++++++++++--- .../OnnxTransform.cs | 11 ++++---- 2 files changed, 30 insertions(+), 8 deletions(-) diff --git a/src/Microsoft.ML.OnnxTransformer/OnnxCatalog.cs b/src/Microsoft.ML.OnnxTransformer/OnnxCatalog.cs index 1188496e2f..9734baaa90 100644 --- a/src/Microsoft.ML.OnnxTransformer/OnnxCatalog.cs +++ b/src/Microsoft.ML.OnnxTransformer/OnnxCatalog.cs @@ -15,6 +15,8 @@ public static class OnnxCatalog /// /// Create a , which applies a pre-trained Onnx model to the input column. /// Input/output columns are determined based on the input/output columns of the provided ONNX model. + /// Please refer to to learn more about the necessary dependencies, + /// and how to run it on a GPU. /// /// /// The name/type of input columns must exactly match name/type of the ONNX model inputs. @@ -40,6 +42,8 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog /// /// Create a , which applies a pre-trained Onnx model to the input column. /// Input/output columns are determined based on the input/output columns of the provided ONNX model. + /// Please refer to to learn more about the necessary dependencies, + /// and how to run it on a GPU. /// /// /// The name/type of input columns must exactly match name/type of the ONNX model inputs. @@ -47,7 +51,10 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog /// /// The transform's catalog. /// The path of the file containing the ONNX model. - /// ONNX shape should be used to over those loaded from . + /// ONNX shapes to be used over those loaded from . + /// For keys use names as stated in the ONNX model, e.g. "input". Stating the shapes with this parameter + /// is particullarly useful for working with variable dimension inputs and outputs. + /// /// Optional GPU device ID to run execution on, to run on CPU. /// If GPU error, raise exception or fallback to CPU. /// @@ -67,6 +74,8 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog /// /// Create a , which applies a pre-trained Onnx model to the column. + /// Please refer to to learn more about the necessary dependencies, + /// and how to run it on a GPU. /// /// The transform's catalog. /// The output column resulting from the transformation. @@ -92,12 +101,17 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog /// /// Create a , which applies a pre-trained Onnx model to the column. + /// Please refer to to learn more about the necessary dependencies, + /// and how to run it on a GPU. /// /// The transform's catalog. /// The output column resulting from the transformation. /// The input column. /// The path of the file containing the ONNX model. - /// ONNX shape should be used to over those loaded from . + /// ONNX shapes to be used over those loaded from . + /// For keys use names as stated in the ONNX model, e.g. "input". Stating the shapes with this parameter + /// is particullarly useful for working with variable dimension inputs and outputs. + /// /// Optional GPU device ID to run execution on, to run on CPU. /// If GPU error, raise exception or fallback to CPU. /// @@ -119,6 +133,8 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog /// /// Create a , which applies a pre-trained Onnx model to the columns. + /// Please refer to to learn more about the necessary dependencies, + /// and how to run it on a GPU. /// /// The transform's catalog. /// The output columns resulting from the transformation. @@ -137,12 +153,17 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog /// /// Create a , which applies a pre-trained Onnx model to the columns. + /// Please refer to to learn more about the necessary dependencies, + /// and how to run it on a GPU. /// /// The transform's catalog. /// The output columns resulting from the transformation. /// The input columns. /// The path of the file containing the ONNX model. - /// ONNX shape should be used to over those loaded from . + /// ONNX shapes to be used over those loaded from . + /// For keys use names as stated in the ONNX model, e.g. "input". Stating the shapes with this parameter + /// is particullarly useful for working with variable dimension inputs and outputs. + /// /// Optional GPU device ID to run execution on, to run on CPU. /// If GPU error, raise exception or fallback to CPU. public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog, diff --git a/src/Microsoft.ML.OnnxTransformer/OnnxTransform.cs b/src/Microsoft.ML.OnnxTransformer/OnnxTransform.cs index 2cfb8f6366..9b454fd41c 100644 --- a/src/Microsoft.ML.OnnxTransformer/OnnxTransform.cs +++ b/src/Microsoft.ML.OnnxTransformer/OnnxTransform.cs @@ -36,6 +36,8 @@ namespace Microsoft.ML.Transforms.Onnx { /// /// resulting from fitting an . + /// Please refer to to learn more about the necessary dependencies, + /// and how to run it on a GPU. /// public sealed class OnnxTransformer : RowToRowTransformerBase { @@ -719,6 +721,9 @@ public NamedOnnxValue GetNamedOnnxValue() /// | Required NuGet in addition to Microsoft.ML | Microsoft.ML.OnnxTransformer (always), either Microsoft.ML.OnnxRuntime 1.3.0 (for CPU processing) or Microsoft.ML.OnnxRuntime.Gpu 1.3.0 (for GPU processing if GPU is available) | /// | Exportable to ONNX | No | /// + /// To create this estimator use the following APIs: + /// [ApplyOnnxModel](xref:Microsoft.ML.OnnxCatalog.ApplyOnnxModel*) + /// /// Supports inferencing of models in ONNX 1.6 format (opset 11), using the [Microsoft.ML.OnnxRuntime](https://www.nuget.org/packages/Microsoft.ML.OnnxRuntime/) library. /// Models are scored on CPU if the project references Microsoft.ML.OnnxRuntime and on the GPU if the project references Microsoft.ML.OnnxRuntime.Gpu. /// Every project using the OnnxScoringEstimator must reference one of the above two packages. @@ -726,7 +731,7 @@ public NamedOnnxValue GetNamedOnnxValue() /// To run on a GPU, use the /// NuGet package [Microsoft.ML.OnnxRuntime.Gpu](https://www.nuget.org/packages/Microsoft.ML.OnnxRuntime.Gpu/) instead of the Microsoft.ML.OnnxRuntime nuget (which is for CPU processing). Microsoft.ML.OnnxRuntime.Gpu /// requires a [CUDA supported GPU](https://developer.nvidia.com/cuda-gpus#compute), the [CUDA 10.1 Toolkit](https://developer.nvidia.com/cuda-downloads), and [cuDNN 7.6.5](https://developer.nvidia.com/cudnn) (as indicated on [Onnxruntime's documentation](https://github.com/Microsoft/onnxruntime#default-gpu-cuda)). - /// Set parameter 'gpuDeviceId' to a valid non-negative integer. Typical device ID values are 0 or 1. + /// When creating the estimator through [ApplyOnnxModel](xref:Microsoft.ML.OnnxCatalog.ApplyOnnxModel*), set the parameter 'gpuDeviceId' to a valid non-negative integer. Typical device ID values are 0 or 1. If the GPU device isn't found but 'fallbackToCpu = true' then the estimator will run on the CPU. If the GPU device isn't found but 'fallbackToCpu = false' then the estimator will throw an exception /// /// The inputs and outputs of the ONNX models must be Tensor type. Sequence and Maps are not yet supported. /// @@ -734,10 +739,6 @@ public NamedOnnxValue GetNamedOnnxValue() /// Visit [ONNX Models](https://github.com/onnx/models) to see a list of readily available models to get started with. /// Refer to [ONNX](http://onnx.ai) for more information. /// - /// To create this estimator use the following: - /// [ApplyOnnxModel](xref:Microsoft.ML.OnnxCatalog.ApplyOnnxModel*) - /// - /// Check the See Also section for links to usage examples. /// ]]> /// ///