Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit a5abb46

Browse files
committed
Autoinfer norm bounds.
Instead of special-casing lognorm to only autoscale from positive values, perform autoscaling from values that map to finite transformed values. This ensures e.g. that make_norm_from_scale(LogitScale) automatically does the right thing (autoscaling from values in [0, 1]). This means that autoscale() and autoscale_None() are now slightly more expensive (because the transform needs to be applied), so skip the call to autoscale_None if not needed. However, note that these should typically only be called once per norm anyways, so hopefully this isn't a bottleneck. (Another idea would be to use `trf.inverse().transform([-np.inf, np.inf])` as clipping bounds, but there are some tests using `x->x**2` / `x->sqrt(x)` as a test for FuncNorm, which 1. doesn't go all the way to -inf, and 2. isn't even increasing for negative x's, so that idea doesn't work.)
1 parent 092f833 commit a5abb46

File tree

1 file changed

+13
-10
lines changed

1 file changed

+13
-10
lines changed

lib/matplotlib/colors.py

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1229,7 +1229,8 @@ def __call__(self, value, clip=None):
12291229

12301230
result, is_scalar = self.process_value(value)
12311231

1232-
self.autoscale_None(result)
1232+
if self.vmin is None or self.vmax is None:
1233+
self.autoscale_None(result)
12331234
# Convert at least to float, without losing precision.
12341235
(vmin,), _ = self.process_value(self.vmin)
12351236
(vmax,), _ = self.process_value(self.vmax)
@@ -1520,7 +1521,8 @@ def __init__(self, *args, **kwargs):
15201521

15211522
def __call__(self, value, clip=None):
15221523
value, is_scalar = self.process_value(value)
1523-
self.autoscale_None(value)
1524+
if self.vmin is None or self.vmax is None:
1525+
self.autoscale_None(value)
15241526
if self.vmin > self.vmax:
15251527
raise ValueError("vmin must be less or equal to vmax")
15261528
if self.vmin == self.vmax:
@@ -1555,6 +1557,15 @@ def inverse(self, value):
15551557
.reshape(np.shape(value)))
15561558
return value[0] if is_scalar else value
15571559

1560+
def autoscale(self, A):
1561+
# i.e. A[np.isfinite(...)], but also for non-array A's
1562+
in_trf_domain = np.extract(np.isfinite(self._trf.transform(A)), A)
1563+
return super().autoscale(in_trf_domain)
1564+
1565+
def autoscale_None(self, A):
1566+
in_trf_domain = np.extract(np.isfinite(self._trf.transform(A)), A)
1567+
return super().autoscale_None(in_trf_domain)
1568+
15581569
Norm.__name__ = (f"{scale_cls.__name__}Norm" if base_norm_cls is Normalize
15591570
else base_norm_cls.__name__)
15601571
Norm.__qualname__ = base_norm_cls.__qualname__
@@ -1603,14 +1614,6 @@ def forward(values: array-like) -> array-like
16031614
class LogNorm(Normalize):
16041615
"""Normalize a given value to the 0-1 range on a log scale."""
16051616

1606-
def autoscale(self, A):
1607-
# docstring inherited.
1608-
super().autoscale(np.ma.array(A, mask=(A <= 0)))
1609-
1610-
def autoscale_None(self, A):
1611-
# docstring inherited.
1612-
super().autoscale_None(np.ma.array(A, mask=(A <= 0)))
1613-
16141617

16151618
@make_norm_from_scale(
16161619
scale.SymmetricalLogScale,

0 commit comments

Comments
 (0)