Open
Description
Exporting model in ONNX format following the process described in PyTorch's documentation fails.
Below I attach returned error message:
/home/dkarageo/development/ManTraNet-pytorch/venv/lib/python3.9/site-packages/torch/cuda/__init__.py:82: UserWarning: CUDA initialization: Unexpected error from cudaGetDeviceCount(). Did you run some cuda functions before calling NumCudaDevices() that might have already set an error? Error 804: forward compatibility was attempted on non supported HW (Triggered internally at ../c10/cuda/CUDAFunctions.cpp:112.)
return torch._C._cuda_getDeviceCount() > 0
/home/dkarageo/development/ManTraNet-pytorch/MantraNet/mantranet.py:432: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!
x_idx = np.arange(-left, w + right)
/home/dkarageo/development/ManTraNet-pytorch/MantraNet/mantranet.py:432: TracerWarning: Converting a tensor to a Python float might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!
x_idx = np.arange(-left, w + right)
/home/dkarageo/development/ManTraNet-pytorch/MantraNet/mantranet.py:433: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!
y_idx = np.arange(-top, h + bottom)
/home/dkarageo/development/ManTraNet-pytorch/MantraNet/mantranet.py:433: TracerWarning: Converting a tensor to a Python float might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!
y_idx = np.arange(-top, h + bottom)
Traceback (most recent call last):
File "/home/dkarageo/development/ManTraNet-pytorch/MantraNet/export_onnx_mantranet.py", line 37, in <module>
torch.onnx.export(model,
File "/home/dkarageo/development/ManTraNet-pytorch/venv/lib/python3.9/site-packages/torch/onnx/__init__.py", line 305, in export
return utils.export(model, args, f, export_params, verbose, training,
File "/home/dkarageo/development/ManTraNet-pytorch/venv/lib/python3.9/site-packages/torch/onnx/utils.py", line 118, in export
_export(model, args, f, export_params, verbose, training, input_names, output_names,
File "/home/dkarageo/development/ManTraNet-pytorch/venv/lib/python3.9/site-packages/torch/onnx/utils.py", line 719, in _export
_model_to_graph(model, args, verbose, input_names,
File "/home/dkarageo/development/ManTraNet-pytorch/venv/lib/python3.9/site-packages/torch/onnx/utils.py", line 499, in _model_to_graph
graph, params, torch_out, module = _create_jit_graph(model, args)
File "/home/dkarageo/development/ManTraNet-pytorch/venv/lib/python3.9/site-packages/torch/onnx/utils.py", line 440, in _create_jit_graph
graph, torch_out = _trace_and_get_graph_from_model(model, args)
File "/home/dkarageo/development/ManTraNet-pytorch/venv/lib/python3.9/site-packages/torch/onnx/utils.py", line 391, in _trace_and_get_graph_from_model
torch.jit._get_trace_graph(model, args, strict=False, _force_outplace=False, _return_inputs_states=True)
File "/home/dkarageo/development/ManTraNet-pytorch/venv/lib/python3.9/site-packages/torch/jit/_trace.py", line 1166, in _get_trace_graph
outs = ONNXTracedModule(f, strict, _force_outplace, return_inputs, _return_inputs_states)(*args, **kwargs)
File "/home/dkarageo/development/ManTraNet-pytorch/venv/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "/home/dkarageo/development/ManTraNet-pytorch/venv/lib/python3.9/site-packages/torch/jit/_trace.py", line 127, in forward
graph, out = torch._C._create_graph_by_tracing(
File "/home/dkarageo/development/ManTraNet-pytorch/venv/lib/python3.9/site-packages/torch/jit/_trace.py", line 118, in wrapper
outs.append(self.inner(*trace_inputs))
File "/home/dkarageo/development/ManTraNet-pytorch/venv/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "/home/dkarageo/development/ManTraNet-pytorch/venv/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1098, in _slow_forward
result = self.forward(*input, **kwargs)
File "/home/dkarageo/development/ManTraNet-pytorch/MantraNet/mantranet.py", line 653, in forward
return self.AnomalyDetector(self.IMTFE(x))
File "/home/dkarageo/development/ManTraNet-pytorch/venv/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "/home/dkarageo/development/ManTraNet-pytorch/venv/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1098, in _slow_forward
result = self.forward(*input, **kwargs)
File "/home/dkarageo/development/ManTraNet-pytorch/MantraNet/mantranet.py", line 524, in forward
x = symm_pad(x, (2, 2, 2, 2))
File "/home/dkarageo/development/ManTraNet-pytorch/MantraNet/mantranet.py", line 435, in symm_pad
x_pad = reflect(x_idx, -0.5, w - 0.5)
File "/home/dkarageo/development/ManTraNet-pytorch/MantraNet/mantranet.py", line 425, in reflect
out = np.where(normed_mod >= rng, double_rng - normed_mod, normed_mod) + minx
TypeError: '>=' not supported between instances of 'numpy.ndarray' and 'Tensor'
Code utilized for exporting to ONNX:
from pathlib import Path
import onnx
import torch.onnx
from mantranet import device, pre_trained_model
EXPORT_PATH: Path = Path("models/mantranet_v4.onnx")
BATCH_SIZE: int = 1
# Load pretrained model to default device.
model = pre_trained_model()
model.to(device)
model.eval()
# Initialize a random utility tensor for tracing and testing the model.
x: torch.Tensor = torch.rand(BATCH_SIZE, 3, 600, 600, requires_grad=True) * 255
# Export model
EXPORT_PATH.parent.mkdir(parents=True, exist_ok=True)
torch.onnx.export(model,
x,
EXPORT_PATH,
export_params=True,
opset_version=15,
do_constant_folding=True,
input_names=["input"],
output_names=["output"],
dynamic_axes={"input": {0: "batch_size",
2: "height",
3: "width"},
"output": {0: "batch_size",
2: "height",
3: "width"}})
It would be really useful to support the increasing in popularity ONNX runtime for inference.
Metadata
Metadata
Assignees
Labels
No labels