Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 7fe02bc

Browse files
phobsonjklymak
authored andcommitted
ENH: Add PiecewiseLinearNorm and tests
Borrows heavily from @Tillsen's solution found on StackOverflow here: http://goo.gl/RPXMYB Used with his permission dicussesd on Github here: https://github.com/matplotlib/matplotlib/pull/3858`
1 parent 9869fd7 commit 7fe02bc

File tree

4 files changed

+361
-2
lines changed

4 files changed

+361
-2
lines changed

doc/users/whats_new.rst

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@ revision, see the :ref:`github-stats`.
1010
.. contents:: Table of Contents
1111
:depth: 4
1212

13-
1413
..
1514
For a release, add a new section after this, then comment out the include
1615
and toctree below by indenting them. Uncomment them after the release.

lib/matplotlib/colors.py

Lines changed: 159 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,7 @@
6262

6363
from collections.abc import Sized
6464
import itertools
65+
import operator
6566
import re
6667

6768
import numpy as np
@@ -992,6 +993,164 @@ def scaled(self):
992993
return self.vmin is not None and self.vmax is not None
993994

994995

996+
class PiecewiseLinearNorm(Normalize):
997+
"""
998+
A subclass of matplotlib.colors.Normalize.
999+
1000+
Normalizes data into the ``[0.0, 1.0]`` interval.
1001+
"""
1002+
# TODO rewrite the internals of this class once we support OrderedDicts
1003+
# i.e. after we drop support for python 2.6
1004+
def __init__(self, stops=None):
1005+
"""Normalize data linearly between the defined stop points.
1006+
Use this as more generic form of ``DivergingNorm``
1007+
1008+
Parameters
1009+
----------
1010+
stops : dict-like, optional
1011+
Accepts a dictionary or anything that can get converted to a
1012+
dictionary which maps the space [0.0, 1.0] to data point, i.e. key
1013+
value pairs.
1014+
1015+
Examples
1016+
--------
1017+
Note this example is equivalent to the DivergingNorm example.
1018+
>>> import matplotlib.colors as mcolors
1019+
>>> offset = mcolors.PiecewiseLinearNorm({0.: -2., 0.5: 0., 1.=4.})
1020+
>>> data = [-2., -1., 0., 1., 2., 3., 4.]
1021+
>>> offset(data)
1022+
array([0., 0.25, 0.5, 0.625, 0.75, 0.875, 1.0])
1023+
1024+
"""
1025+
self._set_stops(stops)
1026+
1027+
@property
1028+
def vmin(self):
1029+
try:
1030+
if self._stops[0][0] == 0:
1031+
return self._stops[0][1]
1032+
except IndexError:
1033+
return None
1034+
1035+
@vmin.setter
1036+
def vmin(self, vmin):
1037+
try:
1038+
if self._stops[0][0] == 0:
1039+
self._stops[0] = (self._stops[0][0], vmin)
1040+
return
1041+
except IndexError:
1042+
pass
1043+
self.append_stop(0., vmin)
1044+
1045+
@property
1046+
def vmax(self):
1047+
try:
1048+
if self._stops[-1][0] == 1:
1049+
return self._stops[-1][1]
1050+
except IndexError:
1051+
return None
1052+
1053+
@vmax.setter
1054+
def vmax(self, vmax):
1055+
try:
1056+
if self._stops[-1][0] == 1:
1057+
self._stops[-1] = (self._stops[-1][0], vmax)
1058+
return
1059+
except IndexError:
1060+
pass
1061+
self.append_stop(1., vmax)
1062+
1063+
# TODO Change this to a property when we drop 2.6 and use Ordered Dicts
1064+
def _set_stops(self, stops):
1065+
if not stops:
1066+
self._stops = []
1067+
return
1068+
1069+
stops = dict(stops)
1070+
self._stops = sorted(stops.items(), key=operator.itemgetter(0))
1071+
map_points, data_points = zip(*self._stops)
1072+
if not np.all(np.diff(data_points) > 0):
1073+
raise ValueError("stops must increase monotonically")
1074+
1075+
def append_stop(self, cmap_fraction, data_value):
1076+
i = -1
1077+
for i, (map_point, data_point) in enumerate(self._stops):
1078+
if map_point >= cmap_fraction:
1079+
d1 = data_point # the current index
1080+
break
1081+
else:
1082+
i += 1 # the index to insert before
1083+
d1 = np.inf
1084+
1085+
if i > 0:
1086+
d0 = self._stops[i-1][1]
1087+
else:
1088+
d0 = -np.inf
1089+
1090+
if not (d0 < data_value < d1):
1091+
raise ValueError(('Stops must increase monotonically, due to the '
1092+
+ 'stops already set, the cmap_fraction specified'
1093+
+ ' (%f) means that the data_value must lie '
1094+
+ 'between %f and %f, but %f given') %
1095+
(cmap_fraction, d0, d1, data_value))
1096+
1097+
self._stops.insert(i, (cmap_fraction, data_value))
1098+
1099+
def __call__(self, value, clip=None):
1100+
"""Map value to the interval [0, 1]. The clip argument is unused."""
1101+
1102+
result, is_scalar = self.process_value(value)
1103+
self.autoscale_None(result)
1104+
1105+
map_points, data_points = zip(*self._stops)
1106+
result = np.ma.masked_array(np.interp(result, data_points, map_points),
1107+
mask=np.ma.getmask(result))
1108+
if is_scalar:
1109+
result = np.atleast_1d(result)[0]
1110+
return result
1111+
1112+
def autoscale_None(self, A):
1113+
"""Ensures we have the upper and lower bounds set, using the data A"""
1114+
if len(self._stops) == 0 or self._stops[0][0] != 0:
1115+
self.append_stop(0., ma.min(A))
1116+
if self._stops[-1][0] != 1:
1117+
self.append_stop(1., ma.max(A))
1118+
1119+
1120+
class DivergingNorm(PiecewiseLinearNorm):
1121+
def __init__(self, vmin=None, vcenter=None, vmax=None):
1122+
"""Normalize data with an offset midpoint
1123+
Useful when mapping data unequally centered around a conceptual
1124+
center, e.g., data that range from -2 to 4, with 0 as the midpoint.
1125+
Parameters
1126+
----------
1127+
vmin : float, optional
1128+
The data value that defines ``0.0`` in the normalized data.
1129+
Defaults to the min value of the dataset.
1130+
vcenter : float, optional
1131+
The data value that defines ``0.5`` in the normalized data.
1132+
Defaults to halfway between *vmin* and *vmax*.
1133+
vmax : float, optional
1134+
The data value that defines ``1.0`` in the normalized data.
1135+
Defaults to the the max value of the dataset.
1136+
Examples
1137+
--------
1138+
>>> import matplotlib.colors as mcolors
1139+
>>> offset = mcolors.PiecewiseLinearNorm(vmin=-2., vcenter=0., vmax=4.)
1140+
>>> data = [-2., -1., 0., 1., 2., 3., 4.]
1141+
>>> offset(data)
1142+
array([0., 0.25, 0.5, 0.625, 0.75, 0.875, 1.0])
1143+
"""
1144+
stops = {}
1145+
if vmin is not None:
1146+
stops[0.] = vmin
1147+
if vcenter is not None:
1148+
stops[0.5] = vcenter
1149+
if vmax is not None:
1150+
stops[1.] = vmax
1151+
super(DivergingNorm, self).__init__(stops)
1152+
1153+
9951154
class LogNorm(Normalize):
9961155
"""Normalize a given value to the 0-1 range on a log scale."""
9971156

lib/matplotlib/tests/test_colors.py

Lines changed: 202 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
import copy
22
import itertools
3-
43
import numpy as np
54
import pytest
65

@@ -221,6 +220,208 @@ def test_Normalize():
221220
assert 0 < norm(1 + 50 * eps) < 1
222221

223222

223+
class BaseNormMixin(object):
224+
def test_call(self):
225+
normed_vals = self.norm(self.vals)
226+
assert_array_almost_equal(normed_vals, self.expected)
227+
228+
def test_inverse(self):
229+
if self.test_inverse:
230+
_inverse_tester(self.norm, self.vals)
231+
else:
232+
pass
233+
234+
def test_scalar(self):
235+
_scalar_tester(self.norm, self.vals)
236+
237+
def test_mask(self):
238+
_mask_tester(self.norm, self.vals)
239+
240+
def test_autoscale(self):
241+
norm = self.normclass()
242+
norm.autoscale([10, 20, 30, 40])
243+
assert_equal(norm.vmin, 10.)
244+
assert_equal(norm.vmax, 40.)
245+
246+
def test_autoscale_None_vmin(self):
247+
norm = self.normclass(vmin=0, vmax=None)
248+
norm.autoscale_None([1, 2, 3, 4, 5])
249+
assert_equal(norm.vmin, 0.)
250+
assert_equal(norm.vmax, 5.)
251+
252+
def test_autoscale_None_vmax(self):
253+
norm = self.normclass(vmin=None, vmax=10)
254+
norm.autoscale_None([1, 2, 3, 4, 5])
255+
assert_equal(norm.vmin, 1.)
256+
assert_equal(norm.vmax, 10.)
257+
258+
def test_scale(self):
259+
norm = self.normclass()
260+
assert_false(norm.scaled())
261+
262+
norm([1, 2, 3, 4])
263+
assert_true(norm.scaled())
264+
265+
def test_process_value_scalar(self):
266+
res, is_scalar = mcolors.Normalize.process_value(5)
267+
assert_true(is_scalar)
268+
assert_array_equal(res, np.array([5.]))
269+
270+
def test_process_value_list(self):
271+
res, is_scalar = mcolors.Normalize.process_value([5, 10])
272+
assert_false(is_scalar)
273+
assert_array_equal(res, np.array([5., 10.]))
274+
275+
def test_process_value_tuple(self):
276+
res, is_scalar = mcolors.Normalize.process_value((5, 10))
277+
assert_false(is_scalar)
278+
assert_array_equal(res, np.array([5., 10.]))
279+
280+
def test_process_value_array(self):
281+
res, is_scalar = mcolors.Normalize.process_value(np.array([5, 10]))
282+
assert_false(is_scalar)
283+
assert_array_equal(res, np.array([5., 10.]))
284+
285+
286+
class BaseDivergingNorm(BaseNormMixin):
287+
normclass = mcolors.DivergingNorm
288+
test_inverse = False
289+
290+
291+
class test_DivergingNorm_Even(BaseDivergingNorm):
292+
def setup(self):
293+
self.norm = self.normclass(vmin=-1, vcenter=0, vmax=4)
294+
self.vals = np.array([-1.0, -0.5, 0.0, 1.0, 2.0, 3.0, 4.0])
295+
self.expected = np.array([0.0, 0.25, 0.5, 0.625, 0.75, 0.875, 1.0])
296+
297+
298+
class test_DivergingNorm_Odd(BaseDivergingNorm):
299+
def setup(self):
300+
self.normclass = mcolors.DivergingNorm
301+
self.norm = self.normclass(vmin=-2, vcenter=0, vmax=5)
302+
self.vals = np.array([-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
303+
self.expected = np.array([0.0, 0.25, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
304+
305+
306+
class test_DivergingNorm_AllNegative(BaseDivergingNorm):
307+
def setup(self):
308+
self.normclass = mcolors.DivergingNorm
309+
self.norm = self.normclass(vmin=-10, vcenter=-8, vmax=-2)
310+
self.vals = np.array([-10., -9., -8., -6., -4., -2.])
311+
self.expected = np.array([0.0, 0.25, 0.5, 0.666667, 0.833333, 1.0])
312+
313+
314+
class test_DivergingNorm_AllPositive(BaseDivergingNorm):
315+
def setup(self):
316+
self.normclass = mcolors.DivergingNorm
317+
self.norm = self.normclass(vmin=0, vcenter=3, vmax=9)
318+
self.vals = np.array([0., 1.5, 3., 4.5, 6.0, 7.5, 9.])
319+
self.expected = np.array([0.0, 0.25, 0.5, 0.625, 0.75, 0.875, 1.0])
320+
321+
322+
class test_DivergingNorm_NoVs(BaseDivergingNorm):
323+
def setup(self):
324+
self.normclass = mcolors.DivergingNorm
325+
self.norm = self.normclass(vmin=None, vcenter=None, vmax=None)
326+
self.vals = np.array([-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0])
327+
self.expected = np.array([0., 0.16666667, 0.33333333,
328+
0.5, 0.66666667, 0.83333333, 1.0])
329+
self.expected_vmin = -2
330+
self.expected_vcenter = 1
331+
self.expected_vmax = 4
332+
333+
def test_vmin(self):
334+
assert_true(self.norm.vmin is None)
335+
self.norm(self.vals)
336+
assert_equal(self.norm.vmin, self.expected_vmin)
337+
338+
def test_vcenter(self):
339+
assert_true(self.norm.vcenter is None)
340+
self.norm(self.vals)
341+
assert_equal(self.norm.vcenter, self.expected_vcenter)
342+
343+
def test_vmax(self):
344+
assert_true(self.norm.vmax is None)
345+
self.norm(self.vals)
346+
assert_equal(self.norm.vmax, self.expected_vmax)
347+
348+
349+
class test_DivergingNorm_VminEqualsVcenter(BaseDivergingNorm):
350+
def setup(self):
351+
self.normclass = mcolors.DivergingNorm
352+
self.norm = self.normclass(vmin=-2, vcenter=-2, vmax=2)
353+
self.vals = np.array([-2.0, -1.0, 0.0, 1.0, 2.0])
354+
self.expected = np.array([0.5, 0.625, 0.75, 0.875, 1.0])
355+
356+
357+
class test_DivergingNorm_VmaxEqualsVcenter(BaseDivergingNorm):
358+
def setup(self):
359+
self.normclass = mcolors.DivergingNorm
360+
self.norm = self.normclass(vmin=-2, vcenter=2, vmax=2)
361+
self.vals = np.array([-2.0, -1.0, 0.0, 1.0, 2.0])
362+
self.expected = np.array([0.0, 0.125, 0.25, 0.375, 0.5])
363+
364+
365+
class test_DivergingNorm_VsAllEqual(BaseDivergingNorm):
366+
def setup(self):
367+
self.v = 10
368+
self.normclass = mcolors.DivergingNorm
369+
self.norm = self.normclass(vmin=self.v, vcenter=self.v, vmax=self.v)
370+
self.vals = np.array([-2.0, -1.0, 0.0, 1.0, 2.0])
371+
self.expected = np.array([0.0, 0.0, 0.0, 0.0, 0.0])
372+
self.expected_inv = self.expected + self.v
373+
374+
def test_inverse(self):
375+
assert_array_almost_equal(
376+
self.norm.inverse(self.norm(self.vals)),
377+
self.expected_inv
378+
)
379+
380+
381+
class test_DivergingNorm_Errors(object):
382+
def setup(self):
383+
self.vals = np.arange(50)
384+
385+
def test_VminGTVcenter(self):
386+
with pytest.raises(ValueError):
387+
norm = mcolors.DivergingNorm(vmin=10, vcenter=0, vmax=20)
388+
norm(self.vals)
389+
390+
def test_VminGTVmax(self):
391+
with pytest.raises(ValueError):
392+
norm = mcolors.DivergingNorm(vmin=10, vcenter=0, vmax=5)
393+
norm(self.vals)
394+
395+
def test_VcenterGTVmax(self):
396+
with pytest.raises(ValueError):
397+
norm = mcolors.DivergingNorm(vmin=10, vcenter=25, vmax=20)
398+
norm(self.vals)
399+
400+
def test_premature_scaling(self):
401+
with pytest.raises(ValueError):
402+
norm = mcolors.DivergingNorm()
403+
norm.inverse(np.array([0.1, 0.5, 0.9]))
404+
405+
406+
@image_comparison(baseline_images=['test_offset_norm'], extensions=['png'],
407+
style='mpl20')
408+
def test_offset_norm_img():
409+
x = np.linspace(-2, 7)
410+
y = np.linspace(-1*np.pi, np.pi)
411+
X, Y = np.meshgrid(x, y)
412+
Z = x * np.sin(Y)**2
413+
414+
fig, (ax1, ax2) = plt.subplots(ncols=2)
415+
cmap = plt.cm.coolwarm
416+
norm = mcolors.DivergingNorm(vmin=-2, vcenter=0, vmax=7)
417+
418+
img1 = ax1.pcolormesh(Z, cmap=cmap, norm=None)
419+
cbar1 = fig.colorbar(img1, ax=ax1)
420+
421+
img2 = ax2.pcolormesh(Z, cmap=cmap, norm=norm)
422+
cbar2 = fig.colorbar(img2, ax=ax2)
423+
424+
224425
def test_SymLogNorm():
225426
"""
226427
Test SymLogNorm behavior

0 commit comments

Comments
 (0)