@@ -427,93 +427,21 @@ def _make_image(self, A, in_bbox, out_bbox, clip_bbox, magnification=1.0,
427427 # input data is not going to match the size on the screen so we
428428 # have to resample to the correct number of pixels
429429
430- # TODO slice input array first
431- a_min = A .min ()
432- a_max = A .max ()
433- if a_min is np .ma .masked : # All masked; values don't matter.
434- a_min , a_max = np .int32 (0 ), np .int32 (1 )
435430 if A .dtype .kind == 'f' : # Float dtype: scale to same dtype.
436- scaled_dtype = np .dtype (
437
C02E
td>- np .float64 if A .dtype .itemsize > 4 else np .float32 )
431+ scaled_dtype = np .dtype ("f8" if A .dtype .itemsize > 4 else "f4" )
438432 if scaled_dtype .itemsize < A .dtype .itemsize :
439433 _api .warn_external (f"Casting input data from { A .dtype } "
440434 f" to { scaled_dtype } for imshow." )
441435 else : # Int dtype, likely.
436+ # TODO slice input array first
442437 # Scale to appropriately sized float: use float32 if the
443438 # dynamic range is small, to limit the memory footprint.
444- da = a_max .astype (np .float64 ) - a_min .astype (np .float64 )
445- scaled_dtype = np .float64 if da > 1e8 else np .float32
446-
447- # Scale the input data to [.1, .9]. The Agg interpolators clip
448- # to [0, 1] internally, and we use a smaller input scale to
449- # identify the interpolated points that need to be flagged as
450- # over/under. This may introduce numeric instabilities in very
451- # broadly scaled data.
452-
453- # Always copy, and don't allow array subtypes.
454- A_scaled = np .array (A , dtype = scaled_dtype )
455- # Clip scaled data around norm if necessary. This is necessary
456- # for big numbers at the edge of float64's ability to represent
457- # changes. Applying a norm first would be good, but ruins the
458- # interpolation of over numbers.
459- self .norm .autoscale_None (A )
460- dv = np .float64 (self .norm .vmax ) - np .float64 (self .norm .vmin )
461- vmid = np .float64 (self .norm .vmin ) + dv / 2
462- fact = 1e7 if scaled_dtype == np .float64 else 1e4
463- newmin = vmid - dv * fact
464- if newmin < a_min :
465- newmin = None
466- else :
467- a_min = np .float64 (newmin )
468- newmax = vmid + dv * fact
469- if newmax > a_max :
470- newmax = None
471- else :
472- a_max = np .float64 (newmax )
473- if newmax is not None or newmin is not None :
474- np .clip (A_scaled , newmin , newmax , out = A_scaled )
475-
476- # Rescale the raw data to [offset, 1-offset] so that the
477- # resampling code will run cleanly. Using dyadic numbers here
478- # could reduce the error, but would not fully eliminate it and
479- # breaks a number of tests (due to the slightly different
480- # error bouncing some pixels across a boundary in the (very
481- # quantized) colormapping step).
482- offset = .1
483- frac = .8
484- # Run vmin/vmax through the same rescaling as the raw data;
485- # otherwise, data values close or equal to the boundaries can
486- # end up on the wrong side due to floating point error.
487- vmin , vmax = self .norm .vmin , self .norm .vmax
488- if vmin is np .ma .masked :
489- vmin , vmax = a_min , a_max
490- vrange = np .array ([vmin , vmax ], dtype = scaled_dtype )
491-
492- A_scaled -= a_min
493- vrange -= a_min
494- # .item() handles a_min/a_max being ndarray subclasses.
495- a_min = a_min .astype (scaled_dtype ).item ()
496- a_max = a_max .astype (scaled_dtype ).item ()
497-
498- if a_min != a_max :
499- A_scaled /= ((a_max - a_min ) / frac )
500- vrange /= ((a_max - a_min ) / frac )
501- A_scaled += offset
502- vrange += offset
439+ da = A .max ().astype ("f8" ) - A .min ().astype ("f8" )
440+ scaled_dtype = "f8" if da > 1e8 else "f4"
441+
503442 # resample the input data to the correct resolution and shape
504- A_resampled = _resample (self , A_scaled , out_shape , t )
505- del A_scaled # Make sure we don't use A_scaled anymore!
506- # Un-scale the resampled data to approximately the original
507- # range. Things that interpolated to outside the original range
508- # will still be outside, but possibly clipped in the case of
509- # higher order interpolation + drastically changing data.
510- A_resampled -= offset
511- vrange -= offset
512- if a_min != a_max :
513- A_resampled *= ((a_max - a_min ) / frac )
514- vrange *= ((a_max - a_min ) / frac )
515- A_resampled += a_min
516- vrange += a_min
443+ A_resampled = _resample (self , A .astype (scaled_dtype ), out_shape , t )
444+
517445 # if using NoNorm, cast back to the original datatype
518446 if isinstance (self .norm , mcolors .NoNorm ):
519447 A_resampled = A_resampled .astype (A .dtype )
@@ -536,21 +464,10 @@ def _make_image(self, A, in_bbox, out_bbox, clip_bbox, magnification=1.0,
536464 # Apply the pixel-by-pixel alpha values if present
537465 alpha = self .get_alpha ()
538466 if alpha is not None and np .ndim (alpha ) > 0 :
539- out_alpha *= _resample (self , alpha , out_shape ,
540- t , resample = True )
467+ out_alpha *= _resample (self , alpha , out_shape , t , resample = True )
541468 # mask and run through the norm
542469 resampled_masked = np .ma .masked_array (A_resampled , out_mask )
543- # we have re-set the vmin/vmax to account for small errors
544- # that may have moved input values in/out of range
545- s_vmin , s_vmax = vrange
546- if isinstance (self .norm , mcolors .LogNorm ) and s_vmin <= 0 :
547- # Don't give 0 or negative values to LogNorm
548- s_vmin = np .finfo (scaled_dtype ).eps
549- # Block the norm from sending an update signal during the
550- # temporary vmin/vmax change
551- with self .norm .callbacks .blocked (), \
552- cbook ._setattr_cm (self .norm , vmin = s_vmin , vmax = s_vmax ):
553- output = self .norm (resampled_masked )
470+ output = self .norm (resampled_masked )
554471 else :
555472 if A .ndim == 2 : # _interpolation_stage == 'rgba'
556473 self .norm .autoscale_None (A )
0 commit comments