Closed as not planned
Description
🐛 Describe the bug
code:
import torch
from torch import nn
import torchaudio
class DataCov(nn.Module):
def __init__(self):
super(DataCov, self).__init__()
self.transform = nn.Sequential(
torchaudio.transforms.MelSpectrogram(sample_rate=48000, n_fft=1536, hop_length=768, f_min=20, f_max=20000)
)
def forward(self, x1):
x1 = self.transform(x1)
return x1
def export():
model = DataCov().to(torch.float32)
model.eval()
input = torch.rand((1, 1, 12 * 48000), dtype=torch.float32)
torch.onnx.dynamo_export(model, (input), "DataCov.onnx", verbose=False,
input_names=['input1'], output_names=['output1'], opset_version=18)
if __name__ == '__main__':
export()
linux error:
Traceback (most recent call last):
File "/root/autodl-tmp/./main.py", line 27, in <module>
export()
File "/root/autodl-tmp/./main.py", line 22, in export
torch.onnx.dynamo_export(model, (input), "DataCov.onnx", verbose=False,
^^^^^^^^^^
File "/root/miniconda3/envs/test_onnx/lib/python3.11/site-packages/torch
C86C
/__init__.py", line 1827, in __getattr__
return importlib.import_module(f".{name}", __name__)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/root/miniconda3/envs/test_onnx/lib/python3.11/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "<frozen importlib._bootstrap>", line 1204, in _gcd_import
File "<frozen importlib._bootstrap>", line 1176, in _find_and_load
File "<frozen importlib._bootstrap>", line 1147, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 690, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 940, in exec_module
File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed
File "/root/miniconda3/envs/test_onnx/lib/python3.11/site-packages/torch/onnx/__init__.py", line 48, in <module>
from ._internal.exporter import ( # usort:skip. needs to be last to avoid circular import
File "/root/miniconda3/envs/test_onnx/lib/python3.11/site-packages/torch/onnx/_internal/exporter.py", line 65, in <module>
from torch.onnx._internal.fx import diagnostics
File "/root/miniconda3/envs/test_onnx/lib/python3.11/site-packages/torch/onnx/_internal/fx/diagnostics.py", line 10, in <module>
import onnxscript # type: ignore[import]
^^^^^^^^^^^^^^^^^
File "/root/miniconda3/envs/test_onnx/lib/python3.11/site-packages/onnxscript/__init__.py", line 7, in <module>
from .backend.onnx_export import export2python as proto2python
File "/root/miniconda3/envs/test_onnx/lib/python3.11/site-packages/onnxscript/backend/onnx_export.py", line 14, in <module>
import onnxscript.onnx_types
File "/root/miniconda3/envs/test_onnx/lib/python3.11/site-packages/onnxscript/onnx_types.py", line 177, in <module>
class FLOAT8E4M3FN(TensorType, dtype=onnx.TensorProto.FLOAT8E4M3FN):
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
AttributeError: FLOAT8E4M3FN
windows error:
C:\Users\dell\miniconda3\envs\onnx_export\Lib\site-packages\torch\onnx\_internal\exporter.py:130: UserWarning: torch.onnx.dynamo_export only implements opset version 18 for now. If you need to use a different opset version, please register them with register_custom_op.
warnings.warn(
Traceback (most recent call last):
File "C:\Users\dell\miniconda3\envs\onnx_export\Lib\site-packages\torch\onnx\_internal\exporter.py", line 1091, in dynamo_export
).export()
^^^^^^^^
File "C:\Users\dell\miniconda3\envs\onnx_export\Lib\site-packages\torch\onnx\_internal\exporter.py", line 892, in export
graph_module = self.options.fx_tracer.generate_fx(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\dell\miniconda3\envs\onnx_export\Lib\site-packages\torch\onnx\_internal\fx\dynamo_graph_extractor.py", line 199, in generate_fx
graph_module, graph_guard = torch._dynamo.export(
^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\dell\miniconda3\envs\onnx_export\Lib\site-packages\torch\_dynamo\eval_frame.py", line 1018, in inner
check_if_dynamo_supported()
File "C:\Users\dell\miniconda3\envs\onnx_export\Lib\site-packages\torch\_dynamo\eval_frame.py", line 533, in check_if_dynamo_supported
raise RuntimeError("Windows not yet supported for torch.compile")
RuntimeError: Windows not yet supported for torch.compile
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:\work\pytorch_to_onnx\main.py", line 27, in <module>
export()
File "C:\work\pytorch_to_onnx\main.py", line 22, in export
torch.onnx.dynamo_export(model, (input), "DataCov.onnx", verbose=False,
File "C:\Users\dell\miniconda3\envs\onnx_export\Lib\site-packages\torch\onnx\_internal\exporter.py", line 1102, in dynamo_export
raise OnnxExporterError(
torch.onnx.OnnxExporterError: Failed to export the model to ONNX. Generating SARIF report at {sarif_report_path}. SARIF is a standard format for the output of static analysis tools. SARIF log can be loaded in VS Code SARIF viewer extension, or SARIF web viewer(https://microsoft.github.io/sarif-web-component/).Please report a bug on PyTorch Github: https://github.com/pytorch/pytorch/issues
Versions
linux versions:
Collecting environment information...
PyTorch version: 2.1.0.dev20230824
Is debug build: False
CUDA used to build PyTorch: Could not collect
ROCM used to build PyTorch: N/A
OS: Ubuntu 22.04.3 LTS (x86_64)
GCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0
Clang version: Could not collect
CMake version: version 3.22.1
Libc version: glibc-2.35
Python version: 3.11.4 (main, Jul 5 2023, 13:45:01) [GCC 11.2.0] (64-bit runtime)
Python platform: Linux-5.4.0-146-generic-x86_64-with-glibc2.35
Is CUDA available: False
CUDA runtime version: 11.8.89
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: GPU 0: NVIDIA GeForce RTX 4090
Nvidia driver version: 525.105.17
cuDNN version: Probably one of the following:
/usr/lib/x86_64-linux-gnu/libcudnn.so.8.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_adv_infer.so.8.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_adv_train.so.8.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_cnn_infer.so.8.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_cnn_train.so.8.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_ops_infer.so.8.6.0
/usr/lib/x86_64-linux-gnu/libcudnn_ops_train.so.8.6.0
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Address sizes: 46 bits physical, 57 bits virtual
Byte Order: Little Endian
CPU(s): 128
On-line CPU(s) list: 0-127
Vendor ID: GenuineIntel
Model name: Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz
CPU family: 6
Model: 106
Thread(s) per core: 2
Core(s) per socket: 32
Socket(s): 2
Stepping: 6
Frequency boost: enabled
CPU max MHz: 2901.0000
CPU min MHz: 800.0000
BogoMIPS: 5800.00
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb cat_l3 invpcid_single ssbd mba ibrs ibpb stibp ibrs_enhanced tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid cqm rdt_a avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb intel_pt avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local wbnoinvd dtherm ida arat pln pts avx512vbmi umip pku ospke avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg tme avx512_vpopcntdq rdpid md_clear pconfig flush_l1d arch_capabilities
Virtualization: VT-x
L1d cache: 3 MiB (64 instances)
L1i cache: 2 MiB (64 instances)
L2 cache: 80 MiB (64 instances)
L3 cache: 108 MiB (2 instances)
NUMA node(s): 2
NUMA node0 CPU(s): 0-31,64-95
NUMA node1 CPU(s): 32-63,96-127
Vulnerability Itlb multihit: Not affected
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Mmio stale data: Mitigation; Clear CPU buffers; SMT vulnerable
Vulnerability Retbleed: Not affected
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; Enhanced IBRS, IBPB conditional, RSB filling, PBRSB-eIBRS SW sequence
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Not affected
Versions of relevant libraries:
[pip3] numpy==1.24.3
[pip3] torch==2.1.0.dev20230824
[pip3] torchaudio==2.1.0.dev20230824
[pip3] torchvision==0.16.0.dev20230824
[conda] blas 1.0 mkl https://mirrors.ustc.edu.cn/anaconda/pkgs/main
[conda] brotlipy 0.7.0 py311h9bf148f_1002 pytorch-nightly
[conda] cffi 1.15.1 py311h9bf148f_3 pytorch-nightly
[conda] cpuonly 2.0 0 pytorch-nightly
[conda] cryptography 38.0.4 py311h46ebde7_0 pytorch-nightly
[conda] filelock 3.9.0 py311_0 pytorch-nightly
[conda] libjpeg-turbo 2.0.0 h9bf148f_0 pytorch-nightly
[conda] mkl 2021.4.0 h06a4308_640 https://mirrors.ustc.edu.cn/anaconda/pkgs/main
[conda] mkl-service 2.4.0 py311h9bf148f_0 pytorch-nightly
[conda] mkl_fft 1.3.1 py311hc796f24_0 pytorch-nightly
[conda] mkl_random 1.2.2 py311hbba84a0_0 pytorch-nightly
[conda] mpmath 1.2.1 py311_0 pytorch-nightly
[conda] numpy 1.24.3 py311hc206e33_0 https://mirrors.ustc.edu.cn/anaconda/pkgs/main
[conda] numpy-base 1.24.3 py311hfd5febd_0 https://mirrors.ustc.edu.cn/anaconda/pkgs/main
[conda] pillow 9.3.0 py311h3fd9d12_2 pytorch-nightly
[conda] pysocks 1.7.1 py311_0 pytorch-nightly
[conda] pytorch 2.1.0.dev20230824 py3.11_cpu_0 pytorch-nightly
[conda] pytorch-mutex 1.0 cpu pytorch-nightly
[conda] requests 2.28.1 py311_0 pytorch-nightly
[conda] torchaudio 2.1.0.dev20230824 py311_cpu pytorch-nightly
[conda] torchvision 0.16.0.dev20230824 py311_cpu pytorch-nightly
[conda] urllib3 1.26.14 py311_0 pytorch-nightly
onnxscript-preview in /root/miniconda3/envs/test_onnx/lib/python3.11/site-packages (0.1.0.dev20230814)
windows version:
Collecting environment information...
PyTorch version: 2.1.0.dev20230824
Is debug build: False
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: Microsoft Windows 10 专业版
GCC version: (Rev5, Built by MSYS2 project) 13.1.0
Clang version: Could not collect
CMake version: version 3.26.4
Libc version: N/A
Python version: 3.11.4 | packaged by conda-forge | (main, Jun 10 2023, 17:59:51) [MSC v.1935 64 bit (AMD64)] (64-bit runtime)
Python platform: Windows-10-10.0.19045-SP0
Is CUDA available: False
CUDA runtime version: No CUDA
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Revision=
Versions of relevant libraries:
[pip3] numpy==1.24.4
[pip3] torch==2.1.0.dev20230824
[pip3] torchaudio==2.1.0.dev20230824
[pip3] torchvision==0.16.0.dev20230824
[conda] blas 1.0 mkl defaults
[conda] cpuonly 2.0 0 pytorch-nightly
[conda] mkl 2023.1.0 h6b88ed4_46357 defaults
[conda] mkl-service 2.4.0 py311h2bbff1b_1 defaults
[conda] mkl_fft 1.3.6 py311hf62ec03_1 defaults
[conda] mkl_random 1.2.2 py311hf62ec03_1 defaults
[conda] mpmath 1.2.1 py311_0 pytorch-nightly
[conda] numpy 1.24.4 pypi_0 pypi
[conda] numpy-base 1.25.2 py311hd01c5d8_0 defaults
[conda] pytorch 2.1.0.dev20230824 py3.11_cpu_0 pytorch-nightly
[conda] pytorch-mutex 1.0 cpu pytorch-nightly
[conda] torchaudio 2.1.0.dev20230824 py311_cpu pytorch-nightly
[conda] torchvision 0.16.0.dev20230824 py311_cpu pytorch-nightly
onnxscript-preview in c:\users\dell\miniconda3\envs\onnx_export\lib\site-packages (0.1.0.dev20230814)
Metadata
Metadata
Assignees
Labels
Type
Projects
Status