8000 Renaming scales parameter for interpolate (#31526) · pytorch/pytorch@c4f10e0 · GitHub
[go: up one dir, main page]

Skip to content

Commit c4f10e0

Browse files
BowenBaofacebook-github-bot
authored andcommitted
Renaming scales parameter for interpolate (#31526)
Summary: PR separated from #31274. Pull Request resolved: #31526 Reviewed By: zou3519 Differential Revision: D19221931 Pulled By: gchanan fbshipit-source-id: 81958a9910867ac9d62f2b47abc49384526c4e51
1 parent 236b0a3 commit c4f10e0

39 files changed

+576
-570
lines changed

aten/src/ATen/native/Pooling.cpp

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -138,14 +138,5 @@ Tensor max_pool3d(
138138
self, kernel_size, stride, padding, dilation, ceil_mode);
139139
return std::get<0>(output_and_indices);
140140
}
141-
142-
Tensor _test_optional_float(const Tensor & self, c10::optional<double> scale) {
143-
if (scale.has_value()) {
144-
return at::full({}, scale.value(), self.options());
145-
} else {
146-
return at::empty({0}, self.options());
147-
}
148-
}
149-
150141
} // namespace native
151142
} // namespace at

aten/src/ATen/native/UpSample.h

Lines changed: 14 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -8,22 +8,23 @@
88
* Note [compute_scales_value]
99
* Note [area_pixel_compute_scale]
1010
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11-
* Interpolate with scale_factors can have different behaviors
12-
* depending on the value of use_scale_factor:
11+
* Interpolate with scale_factor can have different behaviors
12+
* depending on the value of recompute_scale_factor:
1313
*
14-
* - With use_scale_factor = False (current default behavior):
15-
* the scale_factors provided by the user, are used to calculate
14+
* - With recompute_scale_factor = True (current default behavior):
15+
* the scale_factor, when provided by the user, are used to calculate
1616
* the output size. The input size and the computed output_size
1717
* are then used to infer new values for the scales which are
18-
* used in the interpolation.
18+
* used in the interpolation. Because floating-point math is not exact,
19+
* this may be a different value from the user-supplied scales.
1920
*
20-
* - With use_scale_factor = True (which will be the default
21+
* - With recompute_scale_factor = False (which will be the default
2122
* behavior starting 1.5.0):
2223
* the behavior follows opencv logic, and the scales provided by
2324
* the user are the ones used in the interpolation calculations.
2425
*
25-
* If the scales are not available or if they are available but
26-
* use_scale_factor is set to False (default behavior), the scales
26+
* If the scales are not provided or if they are provided but
27+
* recompute_scale_factor is set to True (default behavior), the scales
2728
* are computed from the input and the output size;
2829
*
2930
*
@@ -150,12 +151,13 @@ static inline void upsample_3d_shape_check(
150151

151152
template <typename scalar_t>
152153
static inline scalar_t compute_scales_value(
153-
const double scale,
154+
const c10::optional<double> scale,
154155
int64_t input_size,
155156
int64_t output_size) {
156157
// see Note [compute_scales_value]
157-
return (scale > 0.)
158-
? static_cast<scalar_t>(1.0 / scale)
158+
// FIXME: remove magic > 0 after we ensure no models were serialized with -1 defaults.
159+
return (scale.has_value() && scale.value() > 0.)
160+
? static_cast<scalar_t>(1.0 / scale.value())
159161
: (static_cast<scalar_t>(input_size) / output_size);
160162
}
161163

@@ -164,7 +166,7 @@ static inline scalar_t area_pixel_compute_scale(
164166
int64_t input_size,
165167
int64_t output_size,
166168
bool align_corners,
167-
const double scale=-1.0) {
169+
const c10::optional<double> scale) {
168170
// see Note [area_pixel_compute_scale]
169171
if (output_size > 1) {
170172
return align_corners

aten/src/ATen/native/UpSampleBicubic2d.cpp

Lines changed: 28 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,8 @@ static void upsample_bicubic2d_out_frame(
1717
int64_t nbatch,
1818
int64_t channels,
1919
bool align_corners,
20-
double scales_1,
21-
double scales_2) {
20+
c10::optional<double> scales_h,
21+
c10::optional<double> scales_w) {
2222
// Special case: input/output same size, just copy
2323
if (input_height == output_height && input_width == output_width) {
2424
for (int64_t output_y = 0; output_y < output_height; output_y++) {
@@ -38,9 +38,9 @@ static void upsample_bicubic2d_out_frame(
3838

3939
// Bicubic interpolation
4040
const scalar_t height_scale = area_pixel_compute_scale<scalar_t>(
41-
input_height, output_height, align_corners, scales_1);
41+
input_height, output_height, align_corners, scales_h);
4242
const scalar_t width_scale = area_pixel_compute_scale<scalar_t>(
43-
input_width, output_width, align_corners, scales_2);
43+
input_width, output_width, align_corners, scales_w);
4444

4545
for (int64_t output_y = 0; output_y < output_height; output_y++) {
4646
for (int64_t output_x = 0; output_x < output_width; output_x++) {
@@ -99,8 +99,8 @@ static void upsample_bicubic2d_backward_out_frame(
9999
int64_t nbatch,
100100
int64_t channels,
101101
bool align_corners,
102-
double scales_1,
103-
double scales_2) {
102+
c10::optional<double> scales_h,
103+
c10::optional<double> scales_w) {
104104
channels = channels * nbatch;
105105

106106
// Special case: input/output same size, just copy
@@ -120,9 +120,9 @@ static void upsample_bicubic2d_backward_out_frame(
120120
}
121121

122122
const scalar_t height_scale = area_pixel_compute_scale<scalar_t>(
123-
input_height, output_height, align_corners, scales_1);
123+
input_height, output_height, align_corners, scales_h);
124124
const scalar_t width_scale = area_pixel_compute_scale<scalar_t>(
125-
input_width, output_width, align_corners, scales_2);
125+
input_width, output_width, align_corners, scales_w);
126126

127127
for (int64_t output_y = 0; output_y < output_height; output_y++) {
128128
for (int64_t output_x = 0; output_x < output_width; output_x++) {
@@ -170,8 +170,8 @@ static void upsample_bicubic2d_out_cpu_template(
170170
const Tensor& input_,
171171
IntArrayRef output_size,
172172
bool align_corners,
173-
double scales_1,
174-
double scales_2) {
173+
c10::optional<double> scales_h,
174+
c10::optional<double> scales_w) {
175175
TORCH_CHECK(
176176
output_size.size() == 2,
177177
"It is expected output_size equals to 2, but got size ",
@@ -214,8 +214,8 @@ static void upsample_bicubic2d_out_cpu_template(
214214
nbatch,
215215
channels,
216216
align_corners,
217-
scales_1,
218-
scales_2);
217+
scales_h,
218+
scales_w);
219219
});
220220
}
221221

@@ -225,8 +225,8 @@ static void upsample_bicubic2d_backward_out_cpu_template(
225225
IntArrayRef output_size,
226226
IntArrayRef input_size,
227227
bool align_corners,
228-
double scales_1,
229-
double scales_2) {
228+
c10::optional<double> scales_h,
229+
c10::optional<double> scales_w) {
230230
TORCH_CHECK(
231231
output_size.size() == 2,
232232
"It is expected output_size equals to 2, but got size ",
@@ -275,8 +275,8 @@ static void upsample_bicubic2d_backward_out_cpu_template(
275275
nbatch,
276276
channels,
277277
align_corners,
278-
scales_1,
279-
scales_2);
278+
scales_h,
279+
scales_w);
280280
});
281281
}
282282
} // namespace
@@ -286,22 +286,22 @@ Tensor& upsample_bicubic2d_out_cpu(
286286
const Tensor& input,
287287
IntArrayRef output_size,
288288
bool align_corners,
289-
double scales_1,
290-
double scales_2) {
289+
c10::optional<double> scales_h,
290+
c10::optional<double> scales_w) {
291291
upsample_bicubic2d_out_cpu_template(
292-
output, input, output_size, align_corners, scales_1, scales_2);
292+
output, input, output_size, align_corners, scales_h, scales_w);
293293
return output;
294294
}
295295

296296
Tensor upsample_bicubic2d_cpu(
297297
const Tensor& input,
298298
IntArrayRef output_size,
299299
bool align_corners,
300-
double scales_1,
301-
double scales_2) {
300+
c10::optional<double> scales_h,
301+
c10::optional<double> scales_w) {
302302
auto output = at::empty({0}, input.options());
303303
upsample_bicubic2d_out_cpu_template(
304-
output, input, output_size, align_corners, scales_1, scales_2);
304+
output, input, output_size, align_corners, scales_h, scales_w);
305305
return output;
306306
}
307307

@@ -311,10 +311,10 @@ Tensor& upsample_bicubic2d_backward_out_cpu(
311311
IntArrayRef output_size,
312312
IntArrayRef input_size,
313313
bool align_corners,
314-
double scales_1,
315-
double scales_2) {
314+
c10::optional<double> scales_h,
315+
c10::optional<double> scales_w) {
316316
upsample_bicubic2d_backward_out_cpu_template(
317-
grad_input, grad_output, output_size, input_size, align_corners, scales_1, scales_2);
317+
grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
318318
return grad_input;
319319
}
320320

@@ -323,11 +323,11 @@ Tensor upsample_bicubic2d_backward_cpu(
323323
IntArrayRef output_size,
324324
IntArrayRef input_size,
325325
bool align_corners,
326-
double scales_1,
327-
double scales_2) {
326+
c10::optional<double> scales_h,
327+
c10::optional<double> scales_w) {
328328
auto grad_input = at::zeros(input_size, grad_output.options());
329329
upsample_bicubic2d_backward_out_cpu_template(
330-
grad_input, grad_output, output_size, input_size, align_corners, scales_1, scales_2);
330+
grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
331331
return grad_input;
332332
}
333333

aten/src/ATen/native/UpSampleBilinear2d.cpp

Lines changed: 28 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,8 @@ static void upsample_bilinear2d_out_frame(
2020
int64_t nbatch,
2121
int64_t channels,
2222
bool align_corners,
23-
double scales_1,
24-
double scales_2) {
23+
c10::optional<double> scales_h,
24+
c10::optional<double> scales_w) {
2525
channels = channels * nbatch;
2626

2727
// special case: just copy
@@ -44,10 +44,10 @@ static void upsample_bilinear2d_out_frame(
4444
return;
4545
}
4646
const scalar_t rheight = area_pixel_compute_scale<scalar_t>(
47-
input_height, output_height, align_corners, scales_1);
47+
input_height, output_height, align_corners, scales_h);
4848

4949
const scalar_t rwidth = area_pixel_compute_scale<scalar_t>(
50-
input_width, output_width, align_corners, scales_2);
50+
input_width, output_width, align_corners, scales_w);
5151

5252
for (int64_t h2 = 0; h2 < output_height; ++h2) {
5353
const scalar_t h1r = area_pixel_compute_source_index<scalar_t>(
@@ -94,8 +94,8 @@ static void upsample_bilinear2d_backward_out_frame(
9494
int64_t nbatch,
9595
int64_t channels,
9696
bool align_corners,
97-
double scales_1,
98-
double scales_2) {
97+
c10::optional<double> scales_h,
98+
c10::optional<double> scales_w) {
9999
channels = channels * nbatch;
100100

101101
// special case: same-size matching grids
@@ -118,9 +118,9 @@ static void upsample_bilinear2d_backward_out_frame(
118118
}
119119

120120
const scalar_t rheight = area_pixel_compute_scale<scalar_t>(
121-
input_height, output_height, align_corners, scales_1);
121+
input_height, output_height, align_corners, scales_h);
122122
const scalar_t rwidth = area_pixel_compute_scale<scalar_t>(
123-
input_width, output_width, align_corners, scales_2);
123+
input_width, output_width, align_corners, scales_w);
124124

125125
for (int64_t h2 = 0; h2 < output_height; ++h2) {
126126
const scalar_t h1r = area_pixel_compute_source_index<scalar_t>(
@@ -163,8 +163,8 @@ static void upsample_bilinear2d_out_cpu_template(
163163
const Tensor& input_,
164164
IntArrayRef output_size,
165165
bool align_corners,
166-
double scales_1,
167-
double scales_2) {
166+
c10::optional<double> scales_h,
167+
c10::optional<double> scales_w) {
168168
TORCH_CHECK(
169169
output_size.size() == 2,
170170
"It is expected output_size equals to 2, but got size ",
@@ -211,8 +211,8 @@ static void upsample_bilinear2d_out_cpu_template(
211211
nbatch,
212212
channels,
213213
align_corners,
214-
scales_1,
215-
scales_2);
214+
scales_h,
215+
scales_w);
216216
});
217217
}
218218

@@ -222,8 +222,8 @@ static void upsample_bilinear2d_backward_out_cpu_template(
222222
IntArrayRef output_size,
223223
IntArrayRef input_size,
224224
bool align_corners,
225-
double scales_1,
226-
double scales_2) {
225+
c10::optional<double> scales_h,
226+
c10::optional<double> scales_w) {
227227
TORCH_CHECK(
228228
output_size.size() == 2,
229229
"It is expected output_size equals to 2, but got size ",
@@ -272,8 +272,8 @@ static void upsample_bilinear2d_backward_out_cpu_template(
272272
nbatch,
273273
channels,
274274
align_corners,
275-
scales_1,
276-
scales_2);
275+
scales_h,
276+
scales_w);
277277
});
278278
}
279279
} // namespace
@@ -283,22 +283,22 @@ Tensor& upsample_bilinear2d_out_cpu(
283283
const Tensor& input,
284284
IntArrayRef output_size,
285285
bool align_corners,
286-
double scales_1,
287-
double scales_2) {
286+
c10::optional<double> scales_h,
287+
c10::optional<double> scales_w) {
288288
upsample_bilinear2d_out_cpu_template(
289-
output, input, output_size, align_corners, scales_1, scales_2);
289+
output, input, output_size, align_corners, scales_h, scales_w);
290290
return output;
291291
}
292292

293293
Tensor upsample_bilinear2d_cpu(
294294
const Tensor& input,
295295
IntArrayRef output_size,
296296
bool align_corners,
297-
double scales_1,
298-
double scales_2) {
297+
c10::optional<double> scales_h,
298+
c10::optional<double> scales_w) {
299299
auto output = at::empty({0}, input.options());
300300
upsample_bilinear2d_out_cpu_template(
301-
output, input, output_size, align_corners, scales_1, scales_2);
301+
output, input, output_size, align_corners, scales_h, scales_w);
302302
return output;
303303
}
304304

@@ -308,10 +308,10 @@ Tensor& upsample_bilinear2d_backward_out_cpu(
308308
IntArrayRef output_size,
309309
IntArrayRef input_size,
310310
bool align_corners,
311-
double scales_1,
312-
double scales_2) {
311+
c10::optional<double> scales_h,
312+
c10::optional<double> scales_w) {
313313
upsample_bilinear2d_backward_out_cpu_template(
314-
grad_input, grad_output, output_size, input_size, align_corners, scales_1, scales_2);
314+
grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
315315
return grad_input;
316316
}
317317

@@ -320,11 +320,11 @@ Tensor upsample_bilinear2d_backward_cpu(
320320
IntArrayRef output_size,
321321
IntArrayRef input_size,
322322
bool align_corners,
323-
double scales_1,
324-
double scales_2) {
323+
c10::optional<double> scales_h,
324+
c10::optional<double> scales_w) {
325325
auto grad_input = at::zeros(input_size, grad_output.options());
326326
upsample_bilinear2d_backward_out_cpu_template(
327-
grad_input, grad_output, output_size, input_size, align_corners, scales_1, scales_2);
327+
grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
328328
return grad_input;
329329
}
330330

0 commit comments

Comments
 (0)
0