@@ -314,7 +314,6 @@ std::tuple<Tensor &,Tensor &> mode_out(Tensor& values, Tensor& indices,
314
314
}
315
315
316
316
std::tuple<Tensor, Tensor> max (const Tensor& self, int64_t dim, bool keepdim) {
317
- TORCH_CHECK (!self.is_complex (), " max is not yet implemented for complex tensors." );
318
317
Tensor max_indices = at::empty ({0 }, self.options ().dtype (kLong ));
319
318
if (self.is_quantized ()) {
320
319
Tensor max = at::empty ({0 }, self.options ().dtype (toUnderlying (self.scalar_type ())));
@@ -329,7 +328,6 @@ std::tuple<Tensor, Tensor> max(const Tensor& self, int64_t dim, bool keepdim) {
329
328
330
329
static std::tuple<Tensor &,Tensor &> max_out_impl (Tensor& max, Tensor& max_indices,
331
330
const Tensor& self, int64_t dim, bool keepdim) {
332
- TORCH_CHECK (!self.is_complex (), " max is not yet implemented for complex tensors." );
333
331
TORCH_CHECK (self.device ().type () == DeviceType::CPU || self.device ().type () == DeviceType::CUDA,
334
332
" max only supports CPU AND CUDA device type, got: " , self.device ().type ());
335
333
TORCH_CHECK (self.layout () == Layout::Strided,
@@ -342,6 +340,7 @@ static std::tuple<Tensor &,Tensor &> max_out_impl(Tensor& max, Tensor& max_indic
342
340
max_indices.device (), " for indices output" );
343
341
dim = maybe_wrap_dim (dim, self.dim ());
344
342
if (_dimreduce_return_trivial_no_ident (max, self, dim, keepdim, " max" )) {
343
+ TORCH_CHECK (!self.is_complex (), " max does not support complex inputs." );
345
344
AT_ASSERT (max.dim () == 0 );
346
345
max_indices.resize_ ({}).fill_ (0 );
347
346
return std::forward_as_tuple (max, max_indices);
@@ -353,7 +352,6 @@ static std::tuple<Tensor &,Tensor &> max_out_impl(Tensor& max, Tensor& max_indic
353
352
354
353
std::tuple<Tensor&,Tensor&> max_out (Tensor& max, Tensor& max_indices,
355
354
const Tensor& self, int64_t dim, bool keepdim) {
356
- TORCH_CHECK (!self.is_complex (), " max is not yet implemented for complex tensors." );
357
355
auto result = [&]() {
358
356
NoNamesGuard guard;
359
357
return max_out_impl (max, max_indices, self, dim, keepdim);
@@ -364,7 +362,6 @@ std::tuple<Tensor&,Tensor&> max_out(Tensor& max, Tensor& max_indices,
364
362
}
365
363
366
364
std::tuple<Tensor, Tensor> min (const Tensor& self, int64_t dim, bool keepdim) {
367
- TORCH_CHECK (!self.is_complex (), " min is not yet implemented for complex tensors." );
368
365
Tensor min_indices = at::empty ({0 }, self.options ().dtype (kLong ));
369
366
if (self.is_quantized ()) {
370
367
Tensor min = at::empty ({0 }, self.options ().dtype (toUnderlying (self.scalar_type ())));
@@ -378,7 +375,6 @@ std::tuple<Tensor, Tensor> min(const Tensor& self, int64_t dim, bool keepdim) {
378
375
379
376
static std::tuple<Tensor &, Tensor &> _aminmax_out_impl (Tensor& min, Tensor& max,
380
377
const Tensor& self, int64_t dim, bool keepdim) {
381
- TORCH_CHECK (!self.is_complex (), " max is not yet implemented for complex tensors." );
382
378
TORCH_CHECK (self.device ().type () == DeviceType::CPU || self.device ().type () == DeviceType::CUDA,
383
379
" min_max_val only supports CPU AND CUDA device type, got: " , self.device ().type ());
384
380
TORCH_CHECK (self.layout () == Layout::Strided,
@@ -392,6 +388,7 @@ static std::tuple<Tensor &, Tensor &> _aminmax_out_impl(Tensor& min, Tensor& max
392
388
dim = maybe_wrap_dim (dim, self.dim ());
393
389
if (_dimreduce_return_trivial_no_ident (min, self, dim, keepdim, " min" ) &&
394
390
_dimreduce_return_trivial_no_ident (max, self, dim, keepdim, " max" )) {
391
+ TORCH_CHECK (!self.is_complex (), " min_max does not support complex inputs." );
395
392
return std::forward_as_tuple (min, max);
396
393
} else {
397
394
_aminmax_stub (self.device ().type (), min, max, self, dim, keepdim);
@@ -400,7 +397,6 @@ static std::tuple<Tensor &, Tensor &> _aminmax_out_impl(Tensor& min, Tensor& max
400
397
}
401
398
402
399
std::tuple<Tensor, Tensor> _aminmax (const Tensor& self, int64_t dim, bool keepdim) {
403
- TORCH_CHECK (!self.is_complex (), " min_max is not yet implemented for complex tensors." );
404
400
TORCH_CHECK (!self.is_quantized (), " min is not yet implemented for quantized tensors." );
405
401
406
402
Tensor min = at::empty ({0 }, self.options ());
@@ -412,7 +408,6 @@ std::tuple<Tensor, Tensor> _aminmax(const Tensor& self, int64_t dim, bool keepdi
412
408
413
409
static std::tuple<Tensor &,Tensor &> min_out_impl (Tensor& min, Tensor& min_indices,
414
410
const Tensor& self, int64_t dim, bool keepdim) {
415
- TORCH_CHECK (!self.is_complex (), " min is not yet implemented for complex tensors." );
416
411
TORCH_CHECK (self.device ().type () == DeviceType::CPU || self.device ().type () == DeviceType::CUDA,
417
412
" min only supports CPU AND CUDA device type, got: " , self.device ().type ());
418
413
TORCH_CHECK (self.layout () == Layout::Strided,
@@ -425,6 +420,7 @@ static std::tuple<Tensor &,Tensor &> min_out_impl(Tensor& min, Tensor& min_indic
425
420
min_indices.device (), " for indices output" );
426
421
dim = maybe_wrap_dim (dim, self.dim ());
427
422
if (_dimreduce_return_trivial_no_ident (min, self, dim, keepdim, " min" )) {
423
+ TORCH_CHECK (!self.is_complex (), " min does not support complex inputs." );
428
424
AT_ASSERT (min.dim () == 0 );
429
425
min_indices.resize_ ({}).fill_ (0 );
430
426
return std::forward_as_tuple (min, min_indices);
@@ -436,7 +432,6 @@ static std::tuple<Tensor &,Tensor &> min_out_impl(Tensor& min, Tensor& min_indic
436
432
437
433
std::tuple<Tensor&,Tensor&> min_out (Tensor& min, Tensor& min_indices,
438
434
const Tensor& self, int64_t dim, bool keepdim) {
439
- TORCH_CHECK (!self.is_complex (), " min is not yet implemented for complex tensors." );
440
435
auto result = [&]() {
441
436
NoNamesGuard guard;
442
437
return min_out_impl (min, min_indices, self, dim, keepdim);
@@ -450,21 +445,17 @@ std::tuple<Tensor&,Tensor&> min_out(Tensor& min, Tensor& min_indices,
450
445
// Named tensor overloads
451
446
452
447
std::tuple<Tensor, Tensor> min (const Tensor& self, Dimname dim, bool keepdim) {
453
- TORCH_CHECK (!self.is_complex (), " min is not yet implemented for complex tensors." );
454
448
return at::min (self, dimname_to_position (self, dim), keepdim);
455
449
}
456
450
std::tuple<Tensor &,Tensor &> min_out (Tensor& min, Tensor& min_indices,
457
451
const Tensor& self, Dimname dim, bool keepdim) {
458
- TORCH_CHECK (!self.is_complex (), " min is not yet implemented for complex tensors." );
459
452
return at::min_out (min, min_indices, self, dimname_to_position (self, dim), keepdim);
460
453
}
461
454
std::tuple<Tensor, Tensor> max (const Tensor& self, Dimname dim, bool keepdim) {
462
- TORCH_CHECK (!self.is_complex (), " max is not yet implemented for complex tensors." );
463
455
return at::max (self, dimname_to_position (self, dim), keepdim);
464
456
}
465
457
std::tuple<Tensor &,Tensor &> max_out (Tensor& max, Tensor& max_indices,
466
458
const Tensor& self, Dimname dim, bool keepdim) {
467
- TORCH_CHECK (!self.is_complex (), " max is not yet implemented for complex tensors." );
468
459
return at::max_out (max, max_indices, self, dimname_to_position (self, dim), keepdim);
469
460
}
470
461
Tensor argmax (const Tensor& self, Dimname dim, bool keepdim) {
0 commit comments