-
Notifications
You must be signed in to change notification settings - Fork 25.7k
Description
🐛 Describe the bug
g++: error: /tmp/torchinductor_XXXXtaomiao/lj/cljv3ce3ownrxxmaw4yjvbkwmvb3h5icpvobo5f5gglcgkelhw3c.cpp: file not found
Error logs
Traceback (most recent call last):
File "/home/XXXX/taomiao/FxGo/tests/dynamic_shape_compile.py", line 48, in
opt_mm(*input_tensors)
File "/home/XXXX/taomiao/pytorch/torch/nn/modules/module.py", line 1519, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/XXXX/taomiao/pytorch/torch/nn/modules/module.py", line 1528, in _call_impl
return forward_call(*args, **kwargs)
File "/home/XXXX/taomiao/pytorch/torch/_dynamo/eval_frame.py", line 401, in _fn
return fn(*args, **kwargs)
File "/home/XXXX/taomiao/pytorch/torch/nn/modules/module.py", line 1519, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/XXXX/taomiao/pytorch/torch/nn/modules/module.py", line 1528, in _call_impl
return forward_call(*args, **kwargs)
File "/home/XXXX/taomiao/FxGo/tests/dynamic_shape_compile.py", line 35, in forward
positions, gt = self.p1(positions, cond)
File "/home/XXXX/taomiao/pytorch/torch/nn/modules/module.py", line 1519, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/XXXX/taomiao/pytorch/torch/nn/modules/module.py", line 1528, in _call_impl
return forward_call(*args, **kwargs)
File "/home/XXXX/taomiao/pytorch/torch/_dynamo/eval_frame.py", line 549, in catch_errors
return callback(frame, cache_entry, hooks, frame_state)
File "/home/XXXX/taomiao/pytorch/torch/_dynamo/convert_frame.py", line 643, in _convert_frame
result = inner_convert(frame, cache_entry, hooks, frame_state)
File "/home/XXXX/taomiao/pytorch/torch/_dynamo/convert_frame.py", line 142, in _fn
return fn(*args, **kwargs)
File "/home/XXXX/taomiao/pytorch/torch/_dynamo/convert_frame.py", line 384, in _convert_frame_assert
return _compile(
File "/home/XXXX/taomiao/pytorch/torch/_dynamo/convert_frame.py", line 570, in _compile
guarded_code = compile_inner(code, one_graph, hooks, transform)
File "/home/XXXX/taomiao/pytorch/torch/_dynamo/utils.py", line 221, in time_wrapper
r = func(*args, **kwargs)
File "/home/XXXX/taomiao/pytorch/torch/_dynamo/convert_frame.py", line 492, in compile_inner
out_code = transform_code_object(code, transform)
File "/home/XXXX/taomiao/pytorch/torch/_dynamo/bytecode_transformation.py", line 1028, in transform_code_object
transformations(instructions, code_options)
File "/home/XXXX/taomiao/pytorch/torch/_dynamo/convert_frame.py", line 462, in transform
tracer.run()
File "/home/XXXX/taomiao/pytorch/torch/_dynamo/symbolic_convert.py", line 2107, in run
super().run()
File "/home/XXXX/taomiao/pytorch/torch/_dynamo/symbolic_convert.py", line 747, in run
and self.step()
File "/home/XXXX/taomiao/pytorch/torch/_dynamo/symbolic_convert.py", line 710, in step
getattr(self, inst.opname)(inst)
File "/home/XXXX/taomiao/pytorch/torch/_dynamo/symbolic_convert.py", line 455, in wrapper
self.output.compile_subgraph(self, reason=reason)
File "/home/XXXX/taomiao/pytorch/torch/_dynamo/output_graph.py", line 885, in compile_subgraph
self.compile_and_call_fx_graph(tx, pass2.graph_output_vars(), root)
File "/home/XXXX/taomiao/Downloads/conda/lib/python3.10/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/home/XXXX/taomiao/pytorch/torch/_dynamo/output_graph.py", line 987, in compile_and_call_fx_graph
compiled_fn = self.call_user_compiler(gm)
File "/home/XXXX/taomiao/pytorch/torch/_dynamo/utils.py", line 221, in time_wrapper
r = func(*args, **kwargs)
File "/home/XXXX/taomiao/pytorch/torch/_dynamo/output_graph.py", line 1054, in call_user_compiler
raise BackendCompilerFailed(self.compiler_fn, e).with_traceback(
File "/home/XXXX/taomiao/pytorch/torch/dynamo/output_graph.py", line 1039, in call_user_compiler
compiled_fn = compiler_fn(gm, self.example_inputs())
File "/home/XXXX/taomiao/pytorch/torch/dynamo/repro/after_dynamo.py", line 117, in debug_wrapper
compiled_gm = compiler_fn(gm, example_inputs)
File "/home/XXXX/taomiao/pytorch/torch/init.py", line 1604, in call
return compile_fx(model, inputs, config_patches=self.config)
File "/home/XXXX/taomiao/pytorch/torch/_inductor/compile_fx.py", line 1171, in compile_fx
return aot_autograd(
File "/home/XXXX/taomiao/pytorch/torch/_dynamo/backends/common.py", line 55, in compiler_fn
cg = aot_module_simplified(gm, example_inputs, **kwargs)
File "/home/XXXX/taomiao/pytorch/torch/_functorch/aot_autograd.py", line 3922, in aot_module_simplified
compiled_fn = create_aot_dispatcher_function(
File "/home/XXXX/taomiao/pytorch/torch/_dynamo/utils.py", line 221, in time_wrapper
r = func(*args, **kwargs)
File "/home/XXXX/taomiao/pytorch/torch/_functorch/aot_autograd.py", line 3460, in create_aot_dispatcher_function
compiled_fn = compiler_fn(flat_fn, fake_flat_args, aot_config, fw_metadata=fw_metadata)
File "/home/XXXX/taomiao/pytorch/torch/_functorch/aot_autograd.py", line 2243, in aot_wrapper_dedupe
return compiler_fn(flat_fn, leaf_flat_args, aot_config, fw_metadata=fw_metadata)
File "/home/XXXX/taomiao/pytorch/torch/_functorch/aot_autograd.py", line 2423, in aot_wrapper_synthetic_base
return compiler_fn(flat_fn, flat_args, aot_config, fw_metadata=fw_metadata)
File "/home/XXXX/taomiao/pytorch/torch/_functorch/aot_autograd.py", line 1604, in aot_dispatch_base
compiled_fw = compiler(fw_module, flat_args)
File "/home/XXXX/taomiao/pytorch/torch/_dynamo/utils.py", line 221, in time_wrapper
r = func(*args, **kwargs)
File "/home/XXXX/taomiao/pytorch/torch/_inductor/compile_fx.py", line 1108, in fw_compiler_base
return inner_compile(
File "/home/XXXX/taomiao/pytorch/torch/_dynamo/repro/after_aot.py", line 80, in debug_wrapper
inner_compiled_fn = compiler_fn(gm, example_inputs)
File "/home/XXXX/taomiao/pytorch/torch/_inductor/debug.py", line 297, in inner
return fn(*args, **kwargs)
File "/home/XXXX/taomiao/Downloads/conda/lib/python3.10/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/home/XXXX/taomiao/pytorch/torch/_inductor/compile_fx.py", line 350, in compile_fx_inner
compiled_graph = fx_codegen_and_compile(
File "/home/XXXX/taomiao/pytorch/torch/_inductor/compile_fx.py", line 563, in fx_codegen_and_compile
compiled_fn = graph.compile_to_fn()
File "/home/XXXX/taomiao/pytorch/torch/_inductor/graph.py", line 1022, in compile_to_fn
return self.compile_to_module().call
File "/home/XXXX/taomiao/pytorch/torch/_dynamo/utils.py", line 221, in time_wrapper
r = func(*args, **kwargs)
File "/home/XXXX/taomiao/pytorch/torch/_inductor/graph.py", line 977, in compile_to_module
mod = PyCodeCache.load_by_key_path(
File "/home/XXXX/taomiao/pytorch/torch/_inductor/codecache.py", line 1618, in load_by_key_path
exec(code, mod.dict, mod.dict)
File "/tmp/torchinductor_XXXX\taomiao/dy/cdyeuzfxk4z24mhc3722xrwhhalf4kerfxdjcujrhgrgzlam3gge.py", line 42, in
async_compile.wait(globals())
File "/home/XXXX/taomiao/pytorch/torch/_inductor/codecache.py", line 2190, in wait
scope[key] = result.result()
File "/home/XXXX/taomiao/Downloads/conda/lib/python3.10/concurrent/futures/_base.py", line 458, in result
return self.__get_result()
File "/home/XXXX/taomiao/Downloads/conda/lib/python3.10/concurrent/futures/_base.py", line 403, in __get_result
raise self._exception
File "/home/XXXX/taomiao/Downloads/conda/lib/python3.10/concurrent/futures/thread.py", line 58, in run
result = self.fn(*self.args, **self.kwargs)
File "/home/XXXX/taomiao/pytorch/torch/_inductor/codecache.py", line 2161, in task
return CppCodeCache.load(source_code).kernel
File "/home/XXXX/taomiao/pytorch/torch/_inductor/codecache.py", line 1570, in load
compile_file(input_path, output_path, cmd)
File "/home/XXXX/taomiao/pytorch/torch/_inductor/codecache.py", line 1523, in compile_file
raise exc.CppCompileError(cmd, output) from e
torch._dynamo.exc.BackendCompilerFailed: backend='inductor' raised:
CppCompileError: C++ compile error
Minified repro
No response
Versions
PyTorch version: 2.2.0a0+gitddb0c26
Is debug build: False
CUDA used to build PyTorch: 11.7
ROCM used to build PyTorch: N/A
OS: Ubuntu 20.04.5 LTS (x86_64)
GCC version: (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0
Clang version: 10.0.0-4ubuntu1
CMake version: version 3.26.4
Libc version: glibc-2.31
Python version: 3.10.13 (main, Sep 11 2023, 13:44:35) [GCC 11.2.0] (64-bit runtime)
Python platform: Linux-5.4.0-148-generic-x86_64-with-glibc2.31
Is CUDA available: True
CUDA runtime version: 11.7.99
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration: GPU 0: NVIDIA GeForce GTX 1660 Ti
Nvidia driver version: 525.116.04
cuDNN version: Probably one of the following:
/usr/lib/x86_64-linux-gnu/libcudnn.so.8.9.3
/usr/lib/x86_64-linux-gnu/libcudnn_adv_infer.so.8.9.3
/usr/lib/x86_64-linux-gnu/libcudnn_adv_train.so.8.9.3
/usr/lib/x86_64-linux-gnu/libcudnn_cnn_infer.so.8.9.3
/usr/lib/x86_64-linux-gnu/libcudnn_cnn_train.so.8.9.3
/usr/lib/x86_64-linux-gnu/libcudnn_ops_infer.so.8.9.3
/usr/lib/x86_64-linux-gnu/libcudnn_ops_train.so.8.9.3
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
架构: x86_64
CPU 运行模式: 32-bit, 64-bit
字节序: Little Endian
Address sizes: 39 bits physical, 48 bits virtual
CPU: 16
在线 CPU 列表: 0-15
每个核的线程数: 2
每个座的核数: 8
座: 1
NUMA 节点: 1
厂商 ID: GenuineIntel
CPU 系列: 6
型号: 167
型号名称: 11th Gen Intel(R) Core(TM) i7-11700 @ 2.50GHz
步进: 1
CPU MHz: 4195.157
CPU 最大 MHz: 4900.0000
CPU 最小 MHz: 800.0000
BogoMIPS: 4992.00
虚拟化: VT-x
L1d 缓存: 384 KiB
L1i 缓存: 256 KiB
L2 缓存: 4 MiB
L3 缓存: 16 MiB
NUMA 节点0 CPU: 0-15
Vulnerability Itlb multihit: Not affected
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Mmio stale data: Mitigation; Clear CPU buffers; SMT vulnerable
Vulnerability Retbleed: Mitigation; Enhanced IBRS
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; Enhanced IBRS, IBPB conditional, RSB filling, PBRSB-eIBRS SW sequence
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Not affected
标记: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid mpx avx512f avx512dq rdseed adx smap avx512ifma clflushopt intel_pt avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp hwp_pkg_req avx512vbmi umip pku ospke avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq rdpid md_clear flush_l1d arch_capabilities
Versions of relevant libraries:
[pip3] mypy-extensions==0.4.3
[pip3] numpy==1.23.0
[pip3] onnx==1.14.0
[pip3] onnxruntime==1.14.1
[pip3] optree==0.9.2
[pip3] pytorch-triton==2.1.0+7d1a95b046
[pip3] torch==2.2.0a0+gitddb0c26
[pip3] torchaudio==2.1.0.dev20230505+cu117
[pip3] torchinfo==1.8.0
[pip3] torchvision==0.16.0.dev20230505+cu117
[conda] magma-cuda110 2.5.2 1 pytorch
[conda] mkl 2023.1.0 h213fc3f_46343
[conda] mkl-include 2023.1.0 h06a4308_46343
[conda] numpy 1.23.0 pypi_0 pypi
[conda] optree 0.9.2 pypi_0 pypi
[conda] pytorch-triton 2.1.0+7d1a95b046 pypi_0 pypi
[conda] torch 2.2.0a0+gitddb0c26 dev_0
[conda] torchaudio 2.1.0.dev20230505+cu117 pypi_0 pypi
[conda] torchinfo 1.8.0 pypi_0 pypi
[conda] torchvision 0.16.0.dev20230505+cu117 pypi_0 pypi