8000 [BE] Replace `std::runtime_error` with `TORCH_CHECK` [1/N] (#151880) · pytorch/pytorch@b32b002 · GitHub
[go: up one dir, main page]

Skip to content

Commit b32b002

Browse files
shinkpytorchmergebot
authored andcommitted
[BE] Replace std::runtime_error with TORCH_CHECK [1/N] (#151880)
Part of: #148114 Pull Request resolved: #151880 Approved by: https://github.com/albanD, https://github.com/Skylion007, https://github.com/cyyever
1 parent 6d28d61 commit b32b002

File tree

9 files changed

+70
-62
lines changed

9 files changed

+70
-62
lines changed

aten/src/ATen/Context.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -694,7 +694,7 @@ void Context::setAllowFP16ReductionCPU(bool b) {
694694
#else
695695
if (true)
696696
#endif
697-
throw std::runtime_error("Float16 arithmetic is not supported by the CPU!");
697+
TORCH_CHECK(false, "Float16 arithmetic is not supported by the CPU!");
698698
}
699699
allow_fp16_reduction_cpu = b;
700700
}

aten/src/ATen/Context.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -550,7 +550,8 @@ inline size_t getNumGPUs() {
550550
// devices for a specific device type, add that function to the
551551
// relevant library (e.g., similar to at::cuda::device_count())
552552
if (hasCUDA() && hasHIP()) {
553-
throw std::runtime_error(
553+
TORCH_CHECK(
554+
false,
554555
"Enabling both CUDA and HIP in ATen is not supported, as HIP masquerades "
555556
"to be CUDA (e.g., when you say CUDA, on a HIP build of ATen, this actually "
556557
"means HIP. Rebuild PyTorch with one or the other disabled.");

aten/src/ATen/autocast_mode.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -195,7 +195,8 @@ inline DispatchKey get_autocast_dispatch_key_from_device_type(
195195
case c10::DeviceType::MPS:
196196
return DispatchKey::AutocastMPS;
197197
default:
198-
throw std::runtime_error(
198+
TORCH_CHECK(
199+
false,
199200
"unknown device type for autocast in get_autocast_dispatch_key_from_device_type");
200201
}
201202
}
@@ -216,7 +217,8 @@ inline at::ScalarType get_lower_precision_fp_from_device_type(
216217
if (is_autocast_available(device_type)) {
217218
return get_autocast_dtype(device_type);
218219
} else {
219-
throw std::runtime_error(
220+
TORCH_CHECK(
221+
false,
220222
"unknown device type for autocast in get_lower_precision_fp_from_device_type");
221223
}
222224
}

aten/src/ATen/core/Dict_inl.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,8 +53,7 @@ inline size_t DictKeyHash::operator()(const IValue& ivalue) const {
5353
} else if (ivalue.isDevice()) {
5454
return std::hash<Device>()(ivalue.toDevice());
5555
} else {
56-
throw std::runtime_error(
57-
"Can't hash IValues with tag '" + ivalue.tagKind() + "'");
56+
TORCH_CHECK(false, "Can't hash IValues with tag '", ivalue.tagKind(), 6DB6 "'");
5857
}
5958
}
6059

aten/src/ATen/core/Tensor.cpp

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -51,9 +51,8 @@ TensorBase TensorBase::to(
5151
}
5252

5353
void TensorBase::enforce_invariants() {
54-
if (impl_.get() == nullptr) {
55-
throw std::runtime_error("TensorImpl with nullptr is not supported");
56-
}
54+
TORCH_CHECK(
55+
impl_.get() != nullptr, "TensorImpl with nullptr is not supported");
5756
// Following line throws if the method is not a POD data type or is not
5857
// supported by ATen
5958
scalar_type();

aten/src/ATen/core/function_schema_inl.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ inline void FunctionSchema::checkAndNormalizeInputs(
7171
for(const auto& k : kwargs) {
7272
names.emplace_back(k.first);
7373
}
74-
throw std::runtime_error(findErrorInKwargs(names));
74+
TORCH_CHECK(false, findErrorInKwargs(names));
7575
}
7676
}
7777

aten/src/ATen/miopen/Descriptors.cpp

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,9 @@ inline miopenDataType_t getDataType(const at::Tensor& t) {
1717
} else if (scalar_type == at::kBFloat16) {
1818
return miopenBFloat16;
1919
} else {
20-
throw std::runtime_error("TensorDescriptor only supports float, half and bfloat16 tensors");
20+
TORCH_CHECK(
21+
false,
22+
"TensorDescriptor only supports float, half and bfloat16 tensors");
2123
}
2224
}
2325

@@ -35,7 +37,11 @@ void TensorDescriptor::set(miopenDataType_t datatype, IntArrayRef t_sizes, IntAr
3537
if (dim > MIOPEN_DIM_MAX || pad > MIOPEN_DIM_MAX)
3638
#define _STR(X) #X
3739
#define STR(X) _STR(X)
38-
throw std::runtime_error("MIOpen supports only up to " STR(MIOPEN_DIM_MAX) " dimensions");
40+
TORCH_CHECK(
41+
false,
42+
"MIOpen supports only up to ",
43+
STR(MIOPEN_DIM_MAX),
44+
" dimensions");
3945
#undef _STR
4046
#undef STR
4147
int size[MIOPEN_DIM_MAX];
@@ -96,7 +102,11 @@ void FilterDescriptor::set(const at::Tensor &t, const at::MemoryFormat memory_fo
96102
if (dim > static_cast<int64_t>(MIOPEN_DIM_MAX) || pad > static_cast<int64_t>(MIOPEN_DIM_MAX)) {
97103
#define _STR(X) #X
98104
#define STR(X) _STR(X)
99-
throw std::runtime_error("MIOpen supports only up to " STR(MIOPEN_DIM_MAX) " dimensions");
105+
TORCH_CHECK(
106+
false,
107+
"MIOpen supports only up to ",
108+
STR(MIOPEN_DIM_MAX),
109+
" dimensions");
100110
#undef _STR
101111
#undef STR
102112
}

aten/src/ATen/mkl/Descriptors.h

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,8 @@ struct DftiDescriptorDeleter {
1717
class DftiDescriptor {
1818
public:
1919
void init(DFTI_CONFIG_VALUE precision, DFTI_CONFIG_VALUE signal_type, MKL_LONG signal_ndim, MKL_LONG* sizes) {
20-
if (desc_ != nullptr) {
21-
throw std::runtime_error("DFTI DESCRIPTOR can only be initialized once");
22-
}
20+
TORCH_CHECK(
21+
desc_ == nullptr, "DFTI DESCRIPTOR can only be initialized once");
2322
DFTI_DESCRIPTOR *raw_desc;
2423
if (signal_ndim == 1) {
2524
MKL_DFTI_CHECK(DftiCreateDescriptor(&raw_desc, precision, signal_type, 1, sizes[0]));
@@ -30,9 +29,8 @@ class DftiDescriptor {
3029
}
3130

3231
DFTI_DESCRIPTOR *get() const {
33-
if (desc_ == nullptr) {
34-
throw std::runtime_error("DFTI DESCRIPTOR has not been initialized");
35-
}
32+
TORCH_CHECK(
33+
desc_ != nullptr, "DFTI DESCRIPTOR has not been initialized");
3634
return desc_.get();
3735
}
3836

aten/src/ATen/native/NNPACK.cpp

Lines changed: 42 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,7 @@ at::Tensor _nnpack_spatial_convolution(
2525
const Tensor& weight, const std::optional<Tensor>& bias_opt,
2626
const IntArrayRef padding,
2727
const IntArrayRef stride) {
28-
throw std::runtime_error(
29-
"nnpack_spatial_convolution: ATen not compiled with NNPACK support");
28+
TORCH_CHECK(false, "nnpack_spatial_convolution: ATen not compiled with NNPACK support");
3029
}
3130

3231
bool _nnpack_available() {
@@ -143,51 +142,51 @@ Tensor _nnpack_spatial_convolution(
143142
input.options());
144143

145144
// Our input Tensor must be in the form N,C,H,W
146-
if (input.ndimension() != 4) {
147-
throw std::runtime_error(
148-
"NNPack convolutionOutput expects 4D input Tensor N,C,H,W");
149-
}
145+
TORCH_CHECK(
146+
input.ndimension() == 4,
147+
"NNPack convolutionOutput expects 4D input Tensor N,C,H,W");
148+
150149
// Our weight Tensor must be in the form oC,iC,kH,kW
151-
if (weight.ndimension() != 4) {
152-
throw std::runtime_error(
153-
"NNPack convolutionOutput expects 4D weight Tensor oC,iC,kH,kW");
154-
}
150+
TORCH_CHECK(
151+
weight.ndimension() == 4,
152+
"NNPack convolutionOutput expects 4D weight Tensor oC,iC,kH,kW");
153+
155154
// Our output Tensor must be in the form N,oC,oH,oW
156-
if (output.ndimension() != 4) {
157-
throw std::runtime_error(
158-
"NNPack convolutionOutput expects 4D output Tensor N,oC,oH,oW");
159-
}
155+
TORCH_CHECK(
156+
output.ndimension() == 4,
157+
"NNPack convolutionOutput expects 4D output Tensor N,oC,oH,oW");
160158

161159
// Some basic shape checking, not comprehensive
162-
if (input.size(1) != weight.size(1)) {
163-
std::stringstream err;
164-
err << "Mismatch between number of input channels in input Tensor ("
165-
<< input.size(1) << ") and weight Tensor (" << weight.size(1)
166-
<< ") in NNPack convolutionOutput";
167-
throw std::runtime_error(err.str());
168-
}
169-
if (weight.size(0) != output.size(1)) {
170-
std::stringstream err;
171-
err << "Mismatch between number of output channels in weight Tensor ("
172-
<< weight.size(0) << ") and output Tensor (" << output.size(1)
173-
<< ") in NNPack convolutionOutput";
174-
throw std::runtime_error(err.str());
175-
}
176-
if (input.size(0) != output.size(0)) {
177-
std::stringstream err;
178-
err << "Mismatch between batch size in input Tensor (" << input.size(0)
179-
<< ") and output Tensor (" << output.size(0)
180-
<< ") in NNPack convolutionOutput";
181-
throw std::runtime_error(err.str());
182-
}
160+
TORCH_CHECK(
161+
input.size(1) == weight.size(1),
162+
"Mismatch between number of input channels in input Tensor (",
163+
input.size(1),
164+
") and weight Tensor (",
165+
weight.size(1),
166+
") in NNPack convolutionOutput");
167+
168+
TORCH_CHECK(
169+
weight.size(0) == output.size(1),
170+
"Mismatch between number of output channels in weight Tensor (",
171+
weight.size(0),
172+
") and output Tensor (",
173+
output.size(1),
174+
") in NNPack convolutionOutput");
175+
176+
TORCH_CHECK(
177+
input.size(0) == output.size(0),
178+
"Mismatch between batch size in input Tensor (",
179+
input.size(0),
180+
") and output Tensor (",
181+
output.size(0),
182+
") in NNPack convolutionOutput");
183183

184184
// All Tensors must be float Tensors
185185
if (input.device().type() != kCPU || input.scalar_type() != kFloat ||
186186
weight.device().type() != kCPU || weight.scalar_type() != kFloat ||
187187
output.device().type() != kCPU || output.scalar_type() != kFloat ||
188188
(bias.defined() && (bias.device().type() != kCPU || bias.scalar_type() != kFloat))) {
189-
throw std::runtime_error(
190-
"Mismatched Tensor types in NNPack convolutionOutput");
189+
TORCH_CHECK(false, "Mismatched Tensor types in NNPack convolutionOutput");
191190
}
192191

193192
const auto algorithm = nnp_convolution_algorithm_auto;
@@ -281,9 +280,9 @@ Tensor _nnpack_spatial_convolution(
281280
auto size_and_allocate_ws = [&]() {
282281
// Run a single pass to get the size of memory workspace buffer
283282
const auto status = compute(batch_size);
284-
if (status != nnp_status_success) {
285-
throw std::runtime_error("NNPACK SpatialConvolution_updateOutput failed");
286-
}
283+
TORCH_CHECK(
284+
status == nnp_status_success,
285+
"NNPACK SpatialConvolution_updateOutput failed");
287286
workspace.allocate();
288287
};
289288

@@ -304,9 +303,9 @@ Tensor _nnpack_spatial_convolution(
304303
status = compute(batch_size);
305304
}
306305

307-
if (status != nnp_status_success) {
308-
throw std::runtime_error("NNPACK SpatialConvolution_updateOutput failed");
309-
}
306+
TORCH_CHECK(
307+
status == nnp_status_success,
308+
"NNPACK SpatialConvolution_updateOutput failed");
310309

311310
return output;
312311
}

0 commit comments

Comments
 (0)
0