diff --git a/.circleci/config.yml b/.circleci/config.yml
index 2b6ef7c642f4..bd139de68890 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -141,7 +141,7 @@ commands:
[ "$CIRCLE_PR_NUMBER" = "" ]; then
export RELEASE_TAG='-t release'
fi
- make html O="-T $RELEASE_TAG -j4"
+ make html O="-T $RELEASE_TAG -j4 -w /tmp/sphinxerrorswarnings.log"
rm -r build/html/_sources
working_directory: doc
- save_cache:
@@ -149,6 +149,24 @@ commands:
paths:
- doc/build/doctrees
+ doc-show-errors-warnings:
+ steps:
+ - run:
+ name: Extract possible build errors and warnings
+ command: |
+ (grep "WARNING\|ERROR" /tmp/sphinxerrorswarnings.log ||
+ echo "No errors or warnings")
+
+ doc-show-deprecations:
+ steps:
+ - run:
+ name: Extract possible deprecation warnings in examples and tutorials
+ command: |
+ (grep DeprecationWarning -r -l doc/build/html/gallery ||
+ echo "No deprecation warnings in gallery")
+ (grep DeprecationWarning -r -l doc/build/html/tutorials ||
+ echo "No deprecation warnings in tutorials")
+
doc-bundle:
steps:
- run:
@@ -186,6 +204,8 @@ jobs:
- doc-deps-install
- doc-build
+ - doc-show-errors-warnings
+ - doc-show-deprecations
- doc-bundle
diff --git a/.flake8 b/.flake8
index 4cbcd7e2881f..65fc90cdb1f6 100644
--- a/.flake8
+++ b/.flake8
@@ -60,7 +60,7 @@ per-file-ignores =
lib/matplotlib/pyplot.py: F401, F811
lib/matplotlib/tests/test_mathtext.py: E501
lib/matplotlib/transforms.py: E201, E202, E203
- lib/matplotlib/tri/triinterpolate.py: E201, E221
+ lib/matplotlib/tri/_triinterpolate.py: E201, E221
lib/mpl_toolkits/axes_grid1/axes_size.py: E272
lib/mpl_toolkits/axisartist/__init__.py: F401
lib/mpl_toolkits/axisartist/angle_helper.py: E221
@@ -113,3 +113,4 @@ per-file-ignores =
examples/user_interfaces/pylab_with_gtk4_sgskip.py: E402
examples/user_interfaces/toolmanager_sgskip.py: E402
examples/userdemo/pgf_preamble_sgskip.py: E402
+force-check = True
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 6ebdb4532dde..356d46b075ef 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -8,10 +8,13 @@
- [ ] Is [Flake 8](https://flake8.pycqa.org/en/latest/) compliant (install `flake8-docstrings` and run `flake8 --docstring-convention=all`).
**Documentation**
-- [ ] New features are documented, with examples if plot related.
-- [ ] New features have an entry in `doc/users/next_whats_new/` (follow instructions in README.rst there).
-- [ ] API changes documented in `doc/api/next_api_changes/` (follow instructions in README.rst there).
- [ ] Documentation is sphinx and numpydoc compliant (the docs should [build](https://matplotlib.org/devel/documenting_mpl.html#building-the-docs) without error).
+- [ ] New plotting related features are documented with examples.
+
+**Release Notes**
+- [ ] New features are marked with a `.. versionadded::` directive in the docstring and documented in `doc/users/next_whats_new/`
+- [ ] API changes are marked with a `.. versionchanged::` directive in the docstring and documented in `doc/api/next_api_changes/`
+- [ ] Release notes conform with instructions in `next_whats_new/README.rst` or `next_api_changes/README.rst`
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lib/matplotlib/tests/baseline_images/test_backend_ps/scatter.eps b/lib/matplotlib/tests/baseline_images/test_backend_ps/scatter.eps
new file mode 100644
index 000000000000..b21ff4234af4
--- /dev/null
+++ b/lib/matplotlib/tests/baseline_images/test_backend_ps/scatter.eps
@@ -0,0 +1,306 @@
+%!PS-Adobe-3.0 EPSF-3.0
+%%Title: scatter.eps
+%%Creator: Matplotlib v3.6.0.dev2701+g27bf604984.d20220719, https://matplotlib.org/
+%%CreationDate: Tue Jul 19 12:36:23 2022
+%%Orientation: portrait
+%%BoundingBox: 18 180 594 612
+%%HiResBoundingBox: 18.000000 180.000000 594.000000 612.000000
+%%EndComments
+%%BeginProlog
+/mpldict 10 dict def
+mpldict begin
+/_d { bind def } bind def
+/m { moveto } _d
+/l { lineto } _d
+/r { rlineto } _d
+/c { curveto } _d
+/cl { closepath } _d
+/ce { closepath eofill } _d
+/box {
+ m
+ 1 index 0 r
+ 0 exch r
+ neg 0 r
+ cl
+ } _d
+/clipbox {
+ box
+ clip
+ newpath
+ } _d
+/sc { setcachedevice } _d
+end
+%%EndProlog
+mpldict begin
+18 180 translate
+576 432 0 0 clipbox
+gsave
+0 0 m
+576 0 l
+576 432 l
+0 432 l
+cl
+1.000 setgray
+fill
+grestore
+/p0_0 {
+newpath
+translate
+72 141.529351 m
+17.327389 80.435325 l
+126.672611 80.435325 l
+cl
+
+} bind def
+/p0_1 {
+newpath
+translate
+72 158.4 m
+-17.28 100.8 l
+72 43.2 l
+161.28 100.8 l
+cl
+
+} bind def
+/p0_2 {
+newpath
+translate
+72 141.529351 m
+11.959333 113.386062 l
+34.892827 67.849263 l
+109.107173 67.849263 l
+132.040667 113.386062 l
+cl
+
+} bind def
+/p0_3 {
+newpath
+translate
+72 158.4 m
+-5.318748 129.6 l
+-5.318748 72 l
+72 43.2 l
+149.318748 72 l
+149.318748 129.6 l
+cl
+
+} bind def
+1.000 setlinewidth
+1 setlinejoin
+0 setlinecap
+[] 0 setdash
+0.000 setgray
+gsave
+446.4 345.6 72 43.2 clipbox
+96.7145 132.649 p0_0
+gsave
+1.000 1.000 0.000 setrgbcolor
+fill
+grestore
+stroke
+grestore
+gsave
+446.4 345.6 72 43.2 clipbox
+166.544 15.5782 p0_1
+gsave
+1.000 1.000 0.000 setrgbcolor
+fill
+grestore
+stroke
+grestore
+gsave
+446.4 345.6 72 43.2 clipbox
+149.874 179.799 p0_2
+gsave
+1.000 1.000 0.000 setrgbcolor
+fill
+grestore
+stroke
+grestore
+gsave
+446.4 345.6 72 43.2 clipbox
+34.7409 104.813 p0_3
+gsave
+1.000 1.000 0.000 setrgbcolor
+fill
+grestore
+stroke
+grestore
+gsave
+446.4 345.6 72 43.2 clipbox
+145.839 37.968 p0_0
+gsave
+1.000 1.000 0.000 setrgbcolor
+fill
+grestore
+stroke
+grestore
+gsave
+446.4 345.6 72 43.2 clipbox
+147.462 82.9425 p0_1
+gsave
+1.000 1.000 0.000 setrgbcolor
+fill
+grestore
+stroke
+grestore
+gsave
+446.4 345.6 72 43.2 clipbox
+147.29 120.393 p0_2
+gsave
+1.000 1.000 0.000 setrgbcolor
+fill
+grestore
+stroke
+grestore
+gsave
+446.4 345.6 72 43.2 clipbox
+151.565 52.8617 p0_3
+gsave
+1.000 1.000 0.000 setrgbcolor
+fill
+grestore
+stroke
+grestore
+gsave
+446.4 345.6 72 43.2 clipbox
+165.375 85.5808 p0_0
+gsave
+1.000 1.000 0.000 setrgbcolor
+fill
+grestore
+stroke
+grestore
+gsave
+446.4 345.6 72 43.2 clipbox
+12.8578 119.079 p0_1
+gsave
+1.000 1.000 0.000 setrgbcolor
+fill
+grestore
+stroke
+grestore
+0.900 0.200 0.100 setrgbcolor
+gsave
+446.4 345.6 72 43.2 clipbox
+326.215567 311.071597 m
+334.595085 306.881838 l
+334.595085 315.261356 l
+cl
+gsave
+0.000 0.000 1.000 setrgbcolor
+fill
+grestore
+stroke
+grestore
+gsave
+446.4 345.6 72 43.2 clipbox
+184.274432 293.965646 m
+190.679432 290.763146 l
+190.679432 297.168146 l
+cl
+gsave
+0.000 0.000 1.000 setrgbcolor
+fill
+grestore
+stroke
+grestore
+gsave
+446.4 345.6 72 43.2 clipbox
+276.081223 354.823805 m
+283.311607 351.208613 l
+283.311607 358.438997 l
+cl
+gsave
+0.000 0.000 1.000 setrgbcolor
+fill
+grestore
+stroke
+grestore
+gsave
+446.4 345.6 72 43.2 clipbox
+411.593191 219.187935 m
+420.363106 214.802977 l
+420.363106 223.572893 l
+cl
+gsave
+0.000 0.000 1.000 setrgbcolor
+fill
+grestore
+stroke
+grestore
+gsave
+446.4 345.6 72 43.2 clipbox
+141.383198 139.751386 m
+149.294063 135.795953 l
+149.294063 143.706818 l
+cl
+gsave
+0.000 0.000 1.000 setrgbcolor
+fill
+grestore
+stroke
+grestore
+gsave
+446.4 345.6 72 43.2 clipbox
+154.058079 131.129187 m
+160.028366 128.144043 l
+160.028366 134.114331 l
+cl
+gsave
+0.000 0.000 1.000 setrgbcolor
+fill
+grestore
+stroke
+grestore
+gsave
+446.4 345.6 72 43.2 clipbox
+247.767539 370.319257 m
+255.714503 366.345775 l
+255.714503 374.292739 l
+cl
+gsave
+0.000 0.000 1.000 setrgbcolor
+fill
+grestore
+stroke
+grestore
+gsave
+446.4 345.6 72 43.2 clipbox
+410.16817 374.136435 m
+419.852735 369.294152 l
+419.852735 378.978717 l
+cl
+gsave
+0.000 0.000 1.000 setrgbcolor
+fill
+grestore
+stroke
+grestore
+gsave
+446.4 345.6 72 43.2 clipbox
+450.836918 106.524611 m
+457.983473 102.951334 l
+457.983473 110.097888 l
+cl
+gsave
+0.000 0.000 1.000 setrgbcolor
+fill
+grestore
+stroke
+grestore
+gsave
+446.4 345.6 72 43.2 clipbox
+397.084416 298.708741 m
+402.739273 295.881312 l
+402.739273 301.53617 l
+cl
+gsave
+0.000 0.000 1.000 setrgbcolor
+fill
+grestore
+stroke
+grestore
+
+end
+showpage
diff --git a/lib/matplotlib/tests/baseline_images/test_bbox_tight/bbox_inches_fixed_aspect.png b/lib/matplotlib/tests/baseline_images/test_bbox_tight/bbox_inches_fixed_aspect.png
new file mode 100644
index 000000000000..0fd7a35e3303
Binary files /dev/null and b/lib/matplotlib/tests/baseline_images/test_bbox_tight/bbox_inches_fixed_aspect.png differ
diff --git a/lib/matplotlib/tests/baseline_images/test_colorbar/contourf_extend_patches.png b/lib/matplotlib/tests/baseline_images/test_colorbar/contourf_extend_patches.png
new file mode 100644
index 000000000000..0e5ef52cf549
Binary files /dev/null and b/lib/matplotlib/tests/baseline_images/test_colorbar/contourf_extend_patches.png differ
diff --git a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_cm_68.pdf b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_cm_68.pdf
index 94cdddaf6dcf..c463850d8d0f 100644
Binary files a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_cm_68.pdf and b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_cm_68.pdf differ
diff --git a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_cm_68.png b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_cm_68.png
index ae23ecedc0ce..2cf6829952b2 100644
Binary files a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_cm_68.png and b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_cm_68.png differ
diff --git a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_cm_68.svg b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_cm_68.svg
index 1d225fbb273a..ea21bf1f35e2 100644
--- a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_cm_68.svg
+++ b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_cm_68.svg
@@ -1,160 +1,216 @@
-
-
-
-
+
+
+
diff --git a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavusans_68.pdf b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavusans_68.pdf
index 3a95452967ac..a33f6b7d8385 100644
Binary files a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavusans_68.pdf and b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavusans_68.pdf differ
diff --git a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavusans_68.png b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavusans_68.png
index d655592d81c5..5cc8c6ef7b31 100644
Binary files a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavusans_68.png and b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavusans_68.png differ
diff --git a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavusans_68.svg b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavusans_68.svg
index bdb560153175..94bd4b633188 100644
--- a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavusans_68.svg
+++ b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavusans_68.svg
@@ -1,128 +1,184 @@
-
-
-
-
+
+
+
diff --git a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavuserif_68.pdf b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavuserif_68.pdf
index dc4c1802c743..64e9b2fe1a97 100644
Binary files a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavuserif_68.pdf and b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavuserif_68.pdf differ
diff --git a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavuserif_68.png b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavuserif_68.png
index 35caf2d2b6ad..9336942936a6 100644
Binary files a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavuserif_68.png and b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavuserif_68.png differ
diff --git a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavuserif_68.svg b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavuserif_68.svg
index 0284835785a3..f8773bd214fc 100644
--- a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavuserif_68.svg
+++ b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_dejavuserif_68.svg
@@ -1,117 +1,173 @@
-
-
-
-
+
+
+
diff --git a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stix_68.pdf b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stix_68.pdf
index 2aca1bfd4a52..c6cf56ddba12 100644
Binary files a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stix_68.pdf and b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stix_68.pdf differ
diff --git a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stix_68.png b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stix_68.png
index f51fc6032d83..0aa0ac62b063 100644
Binary files a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stix_68.png and b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stix_68.png differ
diff --git a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stix_68.svg b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stix_68.svg
index be7cdc9f2da8..078cb0fdb8c4 100644
--- a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stix_68.svg
+++ b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stix_68.svg
@@ -1,127 +1,183 @@
-
-
-
-
+
+
+
diff --git a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stixsans_68.pdf b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stixsans_68.pdf
index 323afa6acf0d..e277346b7f1e 100644
Binary files a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stixsans_68.pdf and b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stixsans_68.pdf differ
diff --git a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stixsans_68.png b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stixsans_68.png
index a8ea567f675e..1ea8e26f8d17 100644
Binary files a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stixsans_68.png and b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stixsans_68.png differ
diff --git a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stixsans_68.svg b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stixsans_68.svg
index 7fdb080a70a9..0a7baa3550a4 100644
--- a/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stixsans_68.svg
+++ b/lib/matplotlib/tests/baseline_images/test_mathtext/mathtext_stixsans_68.svg
@@ -1,111 +1,167 @@
-
-
-
-
+
+
+
diff --git a/lib/matplotlib/tests/baseline_images/test_usetex/eqnarray.png b/lib/matplotlib/tests/baseline_images/test_usetex/eqnarray.png
new file mode 100644
index 000000000000..249f15d238dd
Binary files /dev/null and b/lib/matplotlib/tests/baseline_images/test_usetex/eqnarray.png differ
diff --git a/lib/matplotlib/tests/test_axes.py b/lib/matplotlib/tests/test_axes.py
index 628f9542aa42..edc101c9876f 100644
--- a/lib/matplotlib/tests/test_axes.py
+++ b/lib/matplotlib/tests/test_axes.py
@@ -66,7 +66,7 @@ def test_repr():
ax.set_xlabel('x')
ax.set_ylabel('y')
assert repr(ax) == (
- "")
@@ -491,13 +491,15 @@ def test_subclass_clear_cla():
# Note, we cannot use mocking here as we want to be sure that the
# superclass fallback does not recurse.
- with pytest.warns(match='Overriding `Axes.cla`'):
+ with pytest.warns(PendingDeprecationWarning,
+ match='Overriding `Axes.cla`'):
class ClaAxes(Axes):
def cla(self):
nonlocal called
called = True
- with pytest.warns(match='Overriding `Axes.cla`'):
+ with pytest.warns(PendingDeprecationWarning,
+ match='Overriding `Axes.cla`'):
class ClaSuperAxes(Axes):
def cla(self):
nonlocal called
@@ -894,11 +896,15 @@ def test_hexbin_extent():
ax.hexbin("x", "y", extent=[.1, .3, .6, .7], data=data)
-@image_comparison(['hexbin_empty.png'], remove_text=True)
+@image_comparison(['hexbin_empty.png', 'hexbin_empty.png'], remove_text=True)
def test_hexbin_empty():
# From #3886: creating hexbin from empty dataset raises ValueError
- ax = plt.gca()
+ fig, ax = plt.subplots()
ax.hexbin([], [])
+ fig, ax = plt.subplots()
+ # From #23922: creating hexbin with log scaling from empty
+ # dataset raises ValueError
+ ax.hexbin([], [], bins='log')
def test_hexbin_pickable():
@@ -1493,7 +1499,7 @@ def test_arc_ellipse():
[np.cos(rtheta), -np.sin(rtheta)],
[np.sin(rtheta), np.cos(rtheta)]])
- x, y = np.dot(R, np.array([x, y]))
+ x, y = np.dot(R, [x, y])
x += xcenter
y += ycenter
@@ -2091,7 +2097,7 @@ def test_hist_datetime_datasets():
@pytest.mark.parametrize("bins_preprocess",
[mpl.dates.date2num,
lambda bins: bins,
- lambda bins: np.asarray(bins).astype('datetime64')],
+ lambda bins: np.asarray(bins, 'datetime64')],
ids=['date2num', 'datetime.datetime',
'np.datetime64'])
def test_hist_datetime_datasets_bins(bins_preprocess):
@@ -2763,7 +2769,7 @@ def _as_mpl_axes(self):
# testing axes creation with subplot
ax = plt.subplot(121, projection=prj)
- assert type(ax) == mpl.axes._subplots.subplot_class_factory(PolarAxes)
+ assert type(ax) == PolarAxes
plt.close()
@@ -2851,10 +2857,11 @@ def test_stackplot():
ax.set_xlim((0, 10))
ax.set_ylim((0, 70))
- # Reuse testcase from above for a labeled data test
+ # Reuse testcase from above for a test with labeled data and with colours
+ # from the Axes property cycle.
data = {"x": x, "y1": y1, "y2": y2, "y3": y3}
fig, ax = plt.subplots()
- ax.stackplot("x", "y1", "y2", "y3", data=data)
+ ax.stackplot("x", "y1", "y2", "y3", data=data, colors=["C0", "C1", "C2"])
ax.set_xlim((0, 10))
ax.set_ylim((0, 70))
@@ -3680,6 +3687,41 @@ def test_errorbar():
ax.set_title("Simplest errorbars, 0.2 in x, 0.4 in y")
+@image_comparison(['mixed_errorbar_polar_caps'], extensions=['png'],
+ remove_text=True)
+def test_mixed_errorbar_polar_caps():
+ """
+ Mix several polar errorbar use cases in a single test figure.
+
+ It is advisable to position individual points off the grid. If there are
+ problems with reproducibility of this test, consider removing grid.
+ """
+ fig = plt.figure()
+ ax = plt.subplot(111, projection='polar')
+
+ # symmetric errorbars
+ th_sym = [1, 2, 3]
+ r_sym = [0.9]*3
+ ax.errorbar(th_sym, r_sym, xerr=0.35, yerr=0.2, fmt="o")
+
+ # long errorbars
+ th_long = [np.pi/2 + .1, np.pi + .1]
+ r_long = [1.8, 2.2]
+ ax.errorbar(th_long, r_long, xerr=0.8 * np.pi, yerr=0.15, fmt="o")
+
+ # asymmetric errorbars
+ th_asym = [4*np.pi/3 + .1, 5*np.pi/3 + .1, 2*np.pi-0.1]
+ r_asym = [1.1]*3
+ xerr = [[.3, .3, .2], [.2, .3, .3]]
+ yerr = [[.35, .5, .5], [.5, .35, .5]]
+ ax.errorbar(th_asym, r_asym, xerr=xerr, yerr=yerr, fmt="o")
+
+ # overlapping errorbar
+ th_over = [2.1]
+ r_over = [3.1]
+ ax.errorbar(th_over, r_over, xerr=10, yerr=.2, fmt="o")
+
+
def test_errorbar_colorcycle():
f, ax = plt.subplots()
@@ -6367,7 +6409,7 @@ def test_pandas_pcolormesh(pd):
def test_pandas_indexing_dates(pd):
dates = np.arange('2005-02', '2005-03', dtype='datetime64[D]')
- values = np.sin(np.array(range(len(dates))))
+ values = np.sin(range(len(dates)))
df = pd.DataFrame({'dates': dates, 'values': values})
ax = plt.gca()
@@ -6410,24 +6452,6 @@ def test_pandas_bar_align_center(pd):
fig.canvas.draw()
-def test_tick_apply_tickdir_deprecation():
- # Remove this test when the deprecation expires.
- import matplotlib.axis as maxis
- ax = plt.axes()
-
- tick = maxis.XTick(ax, 0)
- with pytest.warns(MatplotlibDeprecationWarning,
- match="The apply_tickdir function was deprecated in "
- "Matplotlib 3.5"):
- tick.apply_tickdir('out')
-
- tick = maxis.YTick(ax, 0)
- with pytest.warns(MatplotlibDeprecationWarning,
- match="The apply_tickdir function was deprecated in "
- "Matplotlib 3.5"):
- tick.apply_tickdir('out')
-
-
def test_axis_set_tick_params_labelsize_labelcolor():
# Tests fix for issue 4346
axis_1 = plt.subplot()
@@ -7527,6 +7551,18 @@ def test_bbox_aspect_axes_init():
assert_allclose(sizes, sizes[0])
+def test_set_aspect_negative():
+ fig, ax = plt.subplots()
+ with pytest.raises(ValueError, match="must be finite and positive"):
+ ax.set_aspect(-1)
+ with pytest.raises(ValueError, match="must be finite and positive"):
+ ax.set_aspect(0)
+ with pytest.raises(ValueError, match="must be finite and positive"):
+ ax.set_aspect(np.inf)
+ with pytest.raises(ValueError, match="must be finite and positive"):
+ ax.set_aspect(-np.inf)
+
+
def test_redraw_in_frame():
fig, ax = plt.subplots(1, 1)
ax.plot([1, 2, 3])
@@ -7792,14 +7828,24 @@ def test_bar_label_location_center():
ys, widths = [1, 2], [3, -4]
rects = ax.barh(ys, widths)
labels = ax.bar_label(rects, label_type='center')
- assert labels[0].xy == (widths[0] / 2, ys[0])
+ assert labels[0].xy == (0.5, 0.5)
assert labels[0].get_ha() == 'center'
assert labels[0].get_va() == 'center'
- assert labels[1].xy == (widths[1] / 2, ys[1])
+ assert labels[1].xy == (0.5, 0.5)
assert labels[1].get_ha() == 'center'
assert labels[1].get_va() == 'center'
+@image_comparison(['test_centered_bar_label_nonlinear.svg'])
+def test_centered_bar_label_nonlinear():
+ _, ax = plt.subplots()
+ bar_container = ax.barh(['c', 'b', 'a'], [1_000, 5_000, 7_000])
+ ax.set_xscale('log')
+ ax.set_xlim(1, None)
+ ax.bar_label(bar_container, label_type='center')
+ ax.set_axis_off()
+
+
def test_bar_label_location_errorbars():
ax = plt.gca()
xs, heights = [1, 2], [3, -4]
@@ -7813,14 +7859,24 @@ def test_bar_label_location_errorbars():
assert labels[1].get_va() == 'top'
-def test_bar_label_fmt():
+@pytest.mark.parametrize('fmt', [
+ '%.2f', '{:.2f}', '{:.2f}'.format
+])
+def test_bar_label_fmt(fmt):
ax = plt.gca()
rects = ax.bar([1, 2], [3, -4])
- labels = ax.bar_label(rects, fmt='%.2f')
+ labels = ax.bar_label(rects, fmt=fmt)
assert labels[0].get_text() == '3.00'
assert labels[1].get_text() == '-4.00'
+def test_bar_label_fmt_error():
+ ax = plt.gca()
+ rects = ax.bar([1, 2], [3, -4])
+ with pytest.raises(TypeError, match='str or callable'):
+ _ = ax.bar_label(rects, fmt=10)
+
+
def test_bar_label_labels():
ax = plt.gca()
rects = ax.bar([1, 2], [3, -4])
@@ -8103,6 +8159,58 @@ def test_bezier_autoscale():
assert ax.get_ylim()[0] == -0.5
+def test_small_autoscale():
+ # Check that paths with small values autoscale correctly #24097.
+ verts = np.array([
+ [-5.45, 0.00], [-5.45, 0.00], [-5.29, 0.00], [-5.29, 0.00],
+ [-5.13, 0.00], [-5.13, 0.00], [-4.97, 0.00], [-4.97, 0.00],
+ [-4.81, 0.00], [-4.81, 0.00], [-4.65, 0.00], [-4.65, 0.00],
+ [-4.49, 0.00], [-4.49, 0.00], [-4.33, 0.00], [-4.33, 0.00],
+ [-4.17, 0.00], [-4.17, 0.00], [-4.01, 0.00], [-4.01, 0.00],
+ [-3.85, 0.00], [-3.85, 0.00], [-3.69, 0.00], [-3.69, 0.00],
+ [-3.53, 0.00], [-3.53, 0.00], [-3.37, 0.00], [-3.37, 0.00],
+ [-3.21, 0.00], [-3.21, 0.01], [-3.05, 0.01], [-3.05, 0.01],
+ [-2.89, 0.01], [-2.89, 0.01], [-2.73, 0.01], [-2.73, 0.02],
+ [-2.57, 0.02], [-2.57, 0.04], [-2.41, 0.04], [-2.41, 0.04],
+ [-2.25, 0.04], [-2.25, 0.06], [-2.09, 0.06], [-2.09, 0.08],
+ [-1.93, 0.08], [-1.93, 0.10], [-1.77, 0.10], [-1.77, 0.12],
+ [-1.61, 0.12], [-1.61, 0.14], [-1.45, 0.14], [-1.45, 0.17],
+ [-1.30, 0.17], [-1.30, 0.19], [-1.14, 0.19], [-1.14, 0.22],
+ [-0.98, 0.22], [-0.98, 0.25], [-0.82, 0.25], [-0.82, 0.27],
+ [-0.66, 0.27], [-0.66, 0.29], [-0.50, 0.29], [-0.50, 0.30],
+ [-0.34, 0.30], [-0.34, 0.32], [-0.18, 0.32], [-0.18, 0.33],
+ [-0.02, 0.33], [-0.02, 0.32], [0.13, 0.32], [0.13, 0.33], [0.29, 0.33],
+ [0.29, 0.31], [0.45, 0.31], [0.45, 0.30], [0.61, 0.30], [0.61, 0.28],
+ [0.77, 0.28], [0.77, 0.25], [0.93, 0.25], [0.93, 0.22], [1.09, 0.22],
+ [1.09, 0.19], [1.25, 0.19], [1.25, 0.17], [1.41, 0.17], [1.41, 0.15],
+ [1.57, 0.15], [1.57, 0.12], [1.73, 0.12], [1.73, 0.10], [1.89, 0.10],
+ [1.89, 0.08], [2.05, 0.08], [2.05, 0.07], [2.21, 0.07], [2.21, 0.05],
+ [2.37, 0.05], [2.37, 0.04], [2.53, 0.04], [2.53, 0.02], [2.69, 0.02],
+ [2.69, 0.02], [2.85, 0.02], [2.85, 0.01], [3.01, 0.01], [3.01, 0.01],
+ [3.17, 0.01], [3.17, 0.00], [3.33, 0.00], [3.33, 0.00], [3.49, 0.00],
+ [3.49, 0.00], [3.65, 0.00], [3.65, 0.00], [3.81, 0.00], [3.81, 0.00],
+ [3.97, 0.00], [3.97, 0.00], [4.13, 0.00], [4.13, 0.00], [4.29, 0.00],
+ [4.29, 0.00], [4.45, 0.00], [4.45, 0.00], [4.61, 0.00], [4.61, 0.00],
+ [4.77, 0.00], [4.77, 0.00], [4.93, 0.00], [4.93, 0.00],
+ ])
+
+ minx = np.min(verts[:, 0])
+ miny = np.min(verts[:, 1])
+ maxx = np.max(verts[:, 0])
+ maxy = np.max(verts[:, 1])
+
+ p = mpath.Path(verts)
+
+ fig, ax = plt.subplots()
+ ax.add_patch(mpatches.PathPatch(p))
+ ax.autoscale()
+
+ assert ax.get_xlim()[0] <= minx
+ assert ax.get_xlim()[1] >= maxx
+ assert ax.get_ylim()[0] <= miny
+ assert ax.get_ylim()[1] >= maxy
+
+
def test_get_xticklabel():
fig, ax = plt.subplots()
ax.plot(np.arange(10))
@@ -8133,3 +8241,16 @@ def test_bar_leading_nan():
for b in rest:
assert np.isfinite(b.xy).all()
assert np.isfinite(b.get_width())
+
+
+@check_figures_equal(extensions=["png"])
+def test_bar_all_nan(fig_test, fig_ref):
+ mpl.style.use("mpl20")
+ ax_test = fig_test.subplots()
+ ax_ref = fig_ref.subplots()
+
+ ax_test.bar([np.nan], [np.nan])
+ ax_test.bar([1], [1])
+
+ ax_ref.bar([1], [1]).remove()
+ ax_ref.bar([1], [1])
diff --git a/lib/matplotlib/tests/test_backend_bases.py b/lib/matplotlib/tests/test_backend_bases.py
index 231a3e044705..4cbd1bc98b67 100644
--- a/lib/matplotlib/tests/test_backend_bases.py
+++ b/lib/matplotlib/tests/test_backend_bases.py
@@ -2,8 +2,9 @@
from matplotlib import path, transforms
from matplotlib.backend_bases import (
- FigureCanvasBase, LocationEvent, MouseButton, MouseEvent,
+ FigureCanvasBase, KeyEvent, LocationEvent, MouseButton, MouseEvent,
NavigationToolbar2, RendererBase)
+from matplotlib.backend_tools import RubberbandBase
from matplotlib.figure import Figure
from matplotlib.testing._markers import needs_pgf_xelatex
import matplotlib.pyplot as plt
@@ -12,6 +13,12 @@
import pytest
+_EXPECTED_WARNING_TOOLMANAGER = (
+ r"Treat the new Tool classes introduced in "
+ r"v[0-9]*.[0-9]* as experimental for now; "
+ "the API and rcParam may change in future versions.")
+
+
def test_uses_per_path():
id = transforms.Affine2D()
paths = [path.Path.unit_regular_polygon(i) for i in range(3, 7)]
@@ -117,12 +124,18 @@ def test_pick():
fig = plt.figure()
fig.text(.5, .5, "hello", ha="center", va="center", picker=True)
fig.canvas.draw()
+
picks = []
- fig.canvas.mpl_connect("pick_event", lambda event: picks.append(event))
- start_event = MouseEvent(
- "button_press_event", fig.canvas, *fig.transFigure.transform((.5, .5)),
- MouseButton.LEFT)
- fig.canvas.callbacks.process(start_event.name, start_event)
+ def handle_pick(event):
+ assert event.mouseevent.key == "a"
+ picks.append(event)
+ fig.canvas.mpl_connect("pick_event", handle_pick)
+
+ KeyEvent("key_press_event", fig.canvas, "a")._process()
+ MouseEvent("button_press_event", fig.canvas,
+ *fig.transFigure.transform((.5, .5)),
+ MouseButton.LEFT)._process()
+ KeyEvent("key_release_event", fig.canvas, "a")._process()
assert len(picks) == 1
@@ -247,11 +260,7 @@ def test_interactive_colorbar(plot_func, orientation, tool, button, expected):
def test_toolbar_zoompan():
- expected_warning_regex = (
- r"Treat the new Tool classes introduced in "
- r"v[0-9]*.[0-9]* as experimental for now; "
- "the API and rcParam may change in future versions.")
- with pytest.warns(UserWarning, match=expected_warning_regex):
+ with pytest.warns(UserWarning, match=_EXPECTED_WARNING_TOOLMANAGER):
plt.rcParams['toolbar'] = 'toolmanager'
ax = plt.gca()
assert ax.get_navigate_mode() is None
@@ -349,3 +358,44 @@ def test_interactive_pan(key, mouseend, expectedxlim, expectedylim):
# Should be close, but won't be exact due to screen integer resolution
assert tuple(ax.get_xlim()) == pytest.approx(expectedxlim, abs=0.02)
assert tuple(ax.get_ylim()) == pytest.approx(expectedylim, abs=0.02)
+
+
+def test_toolmanager_remove():
+ with pytest.warns(UserWarning, match=_EXPECTED_WARNING_TOOLMANAGER):
+ plt.rcParams['toolbar'] = 'toolmanager'
+ fig = plt.gcf()
+ initial_len = len(fig.canvas.manager.toolmanager.tools)
+ assert 'forward' in fig.canvas.manager.toolmanager.tools
+ fig.canvas.manager.toolmanager.remove_tool('forward')
+ assert len(fig.canvas.manager.toolmanager.tools) == initial_len - 1
+ assert 'forward' not in fig.canvas.manager.toolmanager.tools
+
+
+def test_toolmanager_get_tool():
+ with pytest.warns(UserWarning, match=_EXPECTED_WARNING_TOOLMANAGER):
+ plt.rcParams['toolbar'] = 'toolmanager'
+ fig = plt.gcf()
+ rubberband = fig.canvas.manager.toolmanager.get_tool('rubberband')
+ assert isinstance(rubberband, RubberbandBase)
+ assert fig.canvas.manager.toolmanager.get_tool(rubberband) is rubberband
+ with pytest.warns(UserWarning,
+ match="ToolManager does not control tool 'foo'"):
+ assert fig.canvas.manager.toolmanager.get_tool('foo') is None
+ assert fig.canvas.manager.toolmanager.get_tool('foo', warn=False) is None
+
+ with pytest.warns(UserWarning,
+ match="ToolManager does not control tool 'foo'"):
+ assert fig.canvas.manager.toolmanager.trigger_tool('foo') is None
+
+
+def test_toolmanager_update_keymap():
+ with pytest.warns(UserWarning, match=_EXPECTED_WARNING_TOOLMANAGER):
+ plt.rcParams['toolbar'] = 'toolmanager'
+ fig = plt.gcf()
+ assert 'v' in fig.canvas.manager.toolmanager.get_tool_keymap('forward')
+ with pytest.warns(UserWarning,
+ match="Key c changed from back to forward"):
+ fig.canvas.manager.toolmanager.update_keymap('forward', 'c')
+ assert fig.canvas.manager.toolmanager.get_tool_keymap('forward') == ['c']
+ with pytest.raises(KeyError, match="'foo' not in Tools"):
+ fig.canvas.manager.toolmanager.update_keymap('foo', 'c')
diff --git a/lib/matplotlib/tests/test_backend_ps.py b/lib/matplotlib/tests/test_backend_ps.py
index a3a5a52e7977..b3e933a89a25 100644
--- a/lib/matplotlib/tests/test_backend_ps.py
+++ b/lib/matplotlib/tests/test_backend_ps.py
@@ -4,15 +4,17 @@
import re
import tempfile
+import numpy as np
import pytest
-from matplotlib import cbook, patheffects, font_manager as fm
+from matplotlib import cbook, path, patheffects, font_manager as fm
from matplotlib._api import MatplotlibDeprecationWarning
from matplotlib.figure import Figure
from matplotlib.patches import Ellipse
from matplotlib.testing._markers import needs_ghostscript, needs_usetex
from matplotlib.testing.decorators import check_figures_equal, image_comparison
import matplotlib as mpl
+import matplotlib.collections as mcollections
import matplotlib.pyplot as plt
@@ -254,6 +256,15 @@ def test_linedash():
assert buf.tell() > 0
+def test_empty_line():
+ # Smoke-test for gh#23954
+ figure = Figure()
+ figure.text(0.5, 0.5, "\nfoo\n\n")
+ buf = io.BytesIO()
+ figure.savefig(buf, format='eps')
+ figure.savefig(buf, format='ps')
+
+
def test_no_duplicate_definition():
fig = Figure()
@@ -298,3 +309,21 @@ def test_multi_font_type42():
fig = plt.figure()
fig.text(0.15, 0.475, "There are 几个汉字 in between!")
+
+
+@image_comparison(["scatter.eps"])
+def test_path_collection():
+ rng = np.random.default_rng(19680801)
+ xvals = rng.uniform(0, 1, 10)
+ yvals = rng.uniform(0, 1, 10)
+ sizes = rng.uniform(30, 100, 10)
+ fig, ax = plt.subplots()
+ ax.scatter(xvals, yvals, sizes, edgecolor=[0.9, 0.2, 0.1], marker='<')
+ ax.set_axis_off()
+ paths = [path.Path.unit_regular_polygon(i) for i in range(3, 7)]
+ offsets = rng.uniform(0, 200, 20).reshape(10, 2)
+ sizes = [0.02, 0.04]
+ pc = mcollections.PathCollection(paths, sizes, zorder=-1,
+ facecolors='yellow', offsets=offsets)
+ ax.add_collection(pc)
+ ax.set_xlim(0, 1)
diff --git a/lib/matplotlib/tests/test_backend_svg.py b/lib/matplotlib/tests/test_backend_svg.py
index 7ea81730f20d..680efd67379b 100644
--- a/lib/matplotlib/tests/test_backend_svg.py
+++ b/lib/matplotlib/tests/test_backend_svg.py
@@ -527,3 +527,64 @@ def test_svg_escape():
fig.savefig(fd, format='svg')
buf = fd.getvalue().decode()
assert '<'"&>"' in buf
+
+
+@pytest.mark.parametrize("font_str", [
+ "'DejaVu Sans', 'WenQuanYi Zen Hei', 'Arial', sans-serif",
+ "'DejaVu Serif', 'WenQuanYi Zen Hei', 'Times New Roman', serif",
+ "'Arial', 'WenQuanYi Zen Hei', cursive",
+ "'Impact', 'WenQuanYi Zen Hei', fantasy",
+ "'DejaVu Sans Mono', 'WenQuanYi Zen Hei', 'Courier New', monospace",
+ # These do not work because the logic to get the font metrics will not find
+ # WenQuanYi as the fallback logic stops with the first fallback font:
+ # "'DejaVu Sans Mono', 'Courier New', 'WenQuanYi Zen Hei', monospace",
+ # "'DejaVu Sans', 'Arial', 'WenQuanYi Zen Hei', sans-serif",
+ # "'DejaVu Serif', 'Times New Roman', 'WenQuanYi Zen Hei', serif",
+])
+@pytest.mark.parametrize("include_generic", [True, False])
+def test_svg_font_string(font_str, include_generic):
+ fp = fm.FontProperties(family=["WenQuanYi Zen Hei"])
+ if Path(fm.findfont(fp)).name != "wqy-zenhei.ttc":
+ pytest.skip("Font may be missing")
+
+ explicit, *rest, generic = map(
+ lambda x: x.strip("'"), font_str.split(", ")
+ )
+ size = len(generic)
+ if include_generic:
+ rest = rest + [generic]
+ plt.rcParams[f"font.{generic}"] = rest
+ plt.rcParams["font.size"] = size
+ plt.rcParams["svg.fonttype"] = "none"
+
+ fig, ax = plt.subplots()
+ if generic == "sans-serif":
+ generic_options = ["sans", "sans-serif", "sans serif"]
+ else:
+ generic_options = [generic]
+
+ for generic_name in generic_options:
+ # test that fallback works
+ ax.text(0.5, 0.5, "There are 几个汉字 in between!",
+ family=[explicit, generic_name], ha="center")
+ # test deduplication works
+ ax.text(0.5, 0.1, "There are 几个汉字 in between!",
+ family=[explicit, *rest, generic_name], ha="center")
+ ax.axis("off")
+
+ with BytesIO() as fd:
+ fig.savefig(fd, format="svg")
+ buf = fd.getvalue()
+
+ tree = xml.etree.ElementTree.fromstring(buf)
+ ns = "http://www.w3.org/2000/svg"
+ text_count = 0
+ for text_element in tree.findall(f".//{{{ns}}}text"):
+ text_count += 1
+ font_info = dict(
+ map(lambda x: x.strip(), _.strip().split(":"))
+ for _ in dict(text_element.items())["style"].split(";")
+ )["font"]
+
+ assert font_info == f"{size}px {font_str}"
+ assert text_count == len(ax.texts)
diff --git a/lib/matplotlib/tests/test_backend_tk.py b/lib/matplotlib/tests/test_backend_tk.py
index 5ad3e3ff28b9..4d43e27aa4a4 100644
--- a/lib/matplotlib/tests/test_backend_tk.py
+++ b/lib/matplotlib/tests/test_backend_tk.py
@@ -53,11 +53,14 @@ def test_func():
+ str(e.stderr))
else:
# macOS may actually emit irrelevant errors about Accelerated
- # OpenGL vs. software OpenGL, so suppress them.
+ # OpenGL vs. software OpenGL, or some permission error on Azure, so
+ # suppress them.
# Asserting stderr first (and printing it on failure) should be
# more helpful for debugging that printing a failed success count.
+ ignored_lines = ["OpenGL", "CFMessagePort: bootstrap_register",
+ "/usr/include/servers/bootstrap_defs.h"]
assert not [line for line in proc.stderr.splitlines()
- if "OpenGL" not in line]
+ if all(msg not in line for msg in ignored_lines)]
assert proc.stdout.count("success") == success_count
return test_func
diff --git a/lib/matplotlib/tests/test_bbox_tight.py b/lib/matplotlib/tests/test_bbox_tight.py
index 1a7ba6b7456c..91ff7fe20963 100644
--- a/lib/matplotlib/tests/test_bbox_tight.py
+++ b/lib/matplotlib/tests/test_bbox_tight.py
@@ -146,3 +146,13 @@ def test_noop_tight_bbox():
assert (im[:, :, 3] == 255).all()
assert not (im[:, :, :3] == 255).all()
assert im.shape == (7, 10, 4)
+
+
+@image_comparison(['bbox_inches_fixed_aspect'], extensions=['png'],
+ remove_text=True, savefig_kwarg={'bbox_inches': 'tight'})
+def test_bbox_inches_fixed_aspect():
+ with plt.rc_context({'figure.constrained_layout.use': True}):
+ fig, ax = plt.subplots()
+ ax.plot([0, 1])
+ ax.set_xlim(0, 1)
+ ax.set_aspect('equal')
diff --git a/lib/matplotlib/tests/test_cbook.py b/lib/matplotlib/tests/test_cbook.py
index 26748d1a5798..aa5c999b7079 100644
--- a/lib/matplotlib/tests/test_cbook.py
+++ b/lib/matplotlib/tests/test_cbook.py
@@ -51,7 +51,7 @@ def test_rgba(self):
class Test_boxplot_stats:
- def setup(self):
+ def setup_method(self):
np.random.seed(937)
self.nrows = 37
self.ncols = 4
@@ -177,7 +177,7 @@ def test_boxplot_stats_autorange_false(self):
class Test_callback_registry:
- def setup(self):
+ def setup_method(self):
self.signal = 'test'
self.callbacks = cbook.CallbackRegistry()
@@ -895,3 +895,19 @@ def test_safe_first_element_with_none():
datetime_lst[0] = None
actual = cbook._safe_first_finite(datetime_lst)
assert actual is not None and actual == datetime_lst[1]
+
+
+@pytest.mark.parametrize('fmt, value, result', [
+ ('%.2f m', 0.2, '0.20 m'),
+ ('{:.2f} m', 0.2, '0.20 m'),
+ ('{} m', 0.2, '0.2 m'),
+ ('const', 0.2, 'const'),
+ ('%d or {}', 0.2, '0 or {}'),
+ ('{{{:,.0f}}}', 2e5, '{200,000}'),
+ ('{:.2%}', 2/3, '66.67%'),
+ ('$%g', 2.54, '$2.54'),
+])
+def test_auto_format_str(fmt, value, result):
+ """Apply *value* to the format string *fmt*."""
+ assert cbook._auto_format_str(fmt, value) == result
+ assert cbook._auto_format_str(fmt, np.float64(value)) == result
diff --git a/lib/matplotlib/tests/test_collections.py b/lib/matplotlib/tests/test_collections.py
index 3647e561d2d6..b624a9093641 100644
--- a/lib/matplotlib/tests/test_collections.py
+++ b/lib/matplotlib/tests/test_collections.py
@@ -403,7 +403,7 @@ def test_polycollection_close():
[[3., 0.], [3., 1.], [4., 1.], [4., 0.]]]
fig = plt.figure()
- ax = fig.add_axes(Axes3D(fig, auto_add_to_figure=False))
+ ax = fig.add_axes(Axes3D(fig))
colors = ['r', 'g', 'b', 'y', 'k']
zpos = list(range(5))
@@ -696,7 +696,7 @@ def test_pathcollection_legend_elements():
h, l = sc.legend_elements(fmt="{x:g}")
assert len(h) == 5
- assert_array_equal(np.array(l).astype(float), np.arange(5))
+ assert l == ["0", "1", "2", "3", "4"]
colors = np.array([line.get_color() for line in h])
colors2 = sc.cmap(np.arange(5)/4)
assert_array_equal(colors, colors2)
@@ -707,16 +707,14 @@ def test_pathcollection_legend_elements():
l2 = ax.legend(h2, lab2, loc=2)
h, l = sc.legend_elements(prop="sizes", alpha=0.5, color="red")
- alpha = np.array([line.get_alpha() for line in h])
- assert_array_equal(alpha, 0.5)
- color = np.array([line.get_markerfacecolor() for line in h])
- assert_array_equal(color, "red")
+ assert all(line.get_alpha() == 0.5 for line in h)
+ assert all(line.get_markerfacecolor() == "red" for line in h)
l3 = ax.legend(h, l, loc=4)
h, l = sc.legend_elements(prop="sizes", num=4, fmt="{x:.2f}",
func=lambda x: 2*x)
actsizes = [line.get_markersize() for line in h]
- labeledsizes = np.sqrt(np.array(l).astype(float)/2)
+ labeledsizes = np.sqrt(np.array(l, float) / 2)
assert_array_almost_equal(actsizes, labeledsizes)
l4 = ax.legend(h, l, loc=3)
@@ -727,7 +725,7 @@ def test_pathcollection_legend_elements():
levels = [-1, 0, 55.4, 260]
h6, lab6 = sc.legend_elements(num=levels, prop="sizes", fmt="{x:g}")
- assert_array_equal(np.array(lab6).astype(float), levels[2:])
+ assert [float(l) for l in lab6] == levels[2:]
for l in [l1, l2, l3, l4]:
ax.add_artist(l)
diff --git a/lib/matplotlib/tests/test_colorbar.py b/lib/matplotlib/tests/test_colorbar.py
index 149ed4c3d22e..4336b761f698 100644
--- a/lib/matplotlib/tests/test_colorbar.py
+++ b/lib/matplotlib/tests/test_colorbar.py
@@ -1,10 +1,12 @@
import numpy as np
import pytest
+from matplotlib import _api
from matplotlib import cm
import matplotlib.colors as mcolors
import matplotlib as mpl
+
from matplotlib import rc_context
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
@@ -319,7 +321,8 @@ def test_parentless_mappable():
pc = mpl.collections.PatchCollection([], cmap=plt.get_cmap('viridis'))
pc.set_array([])
- with pytest.raises(ValueError, match='Unable to determine Axes to steal'):
+ with pytest.warns(_api.MatplotlibDeprecationWarning,
+ match='Unable to determine Axes to steal'):
plt.colorbar(pc)
@@ -998,6 +1001,36 @@ def test_colorbar_extend_drawedges():
np.testing.assert_array_equal(cbar.dividers.get_segments(), res)
+@image_comparison(['contourf_extend_patches.png'], remove_text=True,
+ style='mpl20')
+def test_colorbar_contourf_extend_patches():
+ params = [
+ ('both', 5, ['\\', '//']),
+ ('min', 7, ['+']),
+ ('max', 2, ['|', '-', '/', '\\', '//']),
+ ('neither', 10, ['//', '\\', '||']),
+ ]
+
+ plt.rcParams['axes.linewidth'] = 2
+
+ fig = plt.figure(figsize=(10, 4))
+ subfigs = fig.subfigures(1, 2)
+ fig.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95)
+
+ x = np.linspace(-2, 3, 50)
+ y = np.linspace(-2, 3, 30)
+ z = np.cos(x[np.newaxis, :]) + np.sin(y[:, np.newaxis])
+
+ cmap = mpl.colormaps["viridis"]
+ for orientation, subfig in zip(['horizontal', 'vertical'], subfigs):
+ axs = subfig.subplots(2, 2).ravel()
+ for ax, (extend, levels, hatches) in zip(axs, params):
+ cs = ax.contourf(x, y, z, levels, hatches=hatches,
+ cmap=cmap, extend=extend)
+ subfig.colorbar(cs, ax=ax, orientation=orientation, fraction=0.4,
+ extendfrac=0.2, aspect=5)
+
+
def test_negative_boundarynorm():
fig, ax = plt.subplots(figsize=(1, 3))
cmap = mpl.colormaps["viridis"]
diff --git a/lib/matplotlib/tests/test_colors.py b/lib/matplotlib/tests/test_colors.py
index f0c23038e11a..4e6ad9cadcf4 100644
--- a/lib/matplotlib/tests/test_colors.py
+++ b/lib/matplotlib/tests/test_colors.py
@@ -109,6 +109,26 @@ def test_register_cmap():
cm.register_cmap('nome', cmap='not a cmap')
+def test_colormaps_get_cmap():
+ cr = mpl.colormaps
+
+ # check str, and Colormap pass
+ assert cr.get_cmap('plasma') == cr["plasma"]
+ assert cr.get_cmap(cr["magma"]) == cr["magma"]
+
+ # check default
+ assert cr.get_cmap(None) == cr[mpl.rcParams['image.cmap']]
+
+ # check ValueError on bad name
+ bad_cmap = 'AardvarksAreAwkward'
+ with pytest.raises(ValueError, match=bad_cmap):
+ cr.get_cmap(bad_cmap)
+
+ # check TypeError on bad type
+ with pytest.raises(TypeError, match='object'):
+ cr.get_cmap(object())
+
+
def test_double_register_builtin_cmap():
name = "viridis"
match = f"Re-registering the builtin cmap {name!r}."
@@ -117,7 +137,7 @@ def test_double_register_builtin_cmap():
mpl.colormaps[name], name=name, force=True
)
with pytest.raises(ValueError, match='A colormap named "viridis"'):
- with pytest.warns():
+ with pytest.warns(PendingDeprecationWarning):
cm.register_cmap(name, mpl.colormaps[name])
with pytest.warns(UserWarning):
# TODO is warning more than once!
@@ -128,7 +148,7 @@ def test_unregister_builtin_cmap():
name = "viridis"
match = f'cannot unregister {name!r} which is a builtin colormap.'
with pytest.raises(ValueError, match=match):
- with pytest.warns():
+ with pytest.warns(PendingDeprecationWarning):
cm.unregister_cmap(name)
@@ -223,7 +243,7 @@ def test_colormap_invalid():
# Test scalar representations
assert_array_equal(cmap(-np.inf), cmap(0))
assert_array_equal(cmap(np.inf), cmap(1.0))
- assert_array_equal(cmap(np.nan), np.array([0., 0., 0., 0.]))
+ assert_array_equal(cmap(np.nan), [0., 0., 0., 0.])
def test_colormap_return_types():
@@ -554,7 +574,7 @@ def test_Normalize():
# i.e. 127-(-128) here).
vals = np.array([-128, 127], dtype=np.int8)
norm = mcolors.Normalize(vals.min(), vals.max())
- assert_array_equal(np.asarray(norm(vals)), [0, 1])
+ assert_array_equal(norm(vals), [0, 1])
# Don't lose precision on longdoubles (float128 on Linux):
# for array inputs...
diff --git a/lib/matplotlib/tests/test_constrainedlayout.py b/lib/matplotlib/tests/test_constrainedlayout.py
index 35eb850fcd70..64906b74c3ff 100644
--- a/lib/matplotlib/tests/test_constrainedlayout.py
+++ b/lib/matplotlib/tests/test_constrainedlayout.py
@@ -656,3 +656,14 @@ def test_compressed1():
pos = axs[1, 2].get_position()
np.testing.assert_allclose(pos.x1, 0.8618, atol=1e-3)
np.testing.assert_allclose(pos.y0, 0.1934, atol=1e-3)
+
+
+@pytest.mark.parametrize('arg, state', [
+ (True, True),
+ (False, False),
+ ({}, True),
+ ({'rect': None}, True)
+])
+def test_set_constrained_layout(arg, state):
+ fig, ax = plt.subplots(constrained_layout=arg)
+ assert fig.get_constrained_layout() is state
diff --git a/lib/matplotlib/tests/test_contour.py b/lib/matplotlib/tests/test_contour.py
index 2c76f34cb180..e42206b8cb79 100644
--- a/lib/matplotlib/tests/test_contour.py
+++ b/lib/matplotlib/tests/test_contour.py
@@ -4,7 +4,8 @@
import contourpy
import numpy as np
-from numpy.testing import assert_array_almost_equal
+from numpy.testing import (
+ assert_array_almost_equal, assert_array_almost_equal_nulp)
import matplotlib as mpl
from matplotlib.testing.decorators import image_comparison
from matplotlib import pyplot as plt, rc_context
@@ -313,7 +314,7 @@ def test_contourf_log_extension():
cb = plt.colorbar(c1, ax=ax1)
assert cb.ax.get_ylim() == (1e-8, 1e10)
cb = plt.colorbar(c2, ax=ax2)
- assert cb.ax.get_ylim() == (1e-4, 1e6)
+ assert_array_almost_equal_nulp(cb.ax.get_ylim(), np.array((1e-4, 1e6)))
cb = plt.colorbar(c3, ax=ax3)
@@ -682,3 +683,13 @@ def test_negative_linestyles(style):
ax4.clabel(CS4, fontsize=9, inline=True)
ax4.set_title(f'Single color - negative contours {style}')
assert CS4.negative_linestyles == style
+
+
+def test_contour_remove():
+ ax = plt.figure().add_subplot()
+ orig_children = ax.get_children()
+ cs = ax.contour(np.arange(16).reshape((4, 4)))
+ cs.clabel()
+ assert ax.get_children() != orig_children
+ cs.remove()
+ assert ax.get_children() == orig_children
diff --git a/lib/matplotlib/tests/test_dates.py b/lib/matplotlib/tests/test_dates.py
index 57a131cec4a0..43813a161db0 100644
--- a/lib/matplotlib/tests/test_dates.py
+++ b/lib/matplotlib/tests/test_dates.py
@@ -322,13 +322,13 @@ def callable_formatting_function(dates, _):
@pytest.mark.parametrize('delta, expected', [
(datetime.timedelta(weeks=52 * 200),
- range(1990, 2171, 20)),
+ [r'$\mathdefault{%d}$' % year for year in range(1990, 2171, 20)]),
(datetime.timedelta(days=30),
- ['1990-01-%02d' % day for day in range(1, 32, 3)]),
+ [r'$\mathdefault{1990{-}01{-}%02d}$' % day for day in range(1, 32, 3)]),
(datetime.timedelta(hours=20),
- ['01-01 %02d' % hour for hour in range(0, 21, 2)]),
+ [r'$\mathdefault{01{-}01\;%02d}$' % hour for hour in range(0, 21, 2)]),
(datetime.timedelta(minutes=10),
- ['01 00:%02d' % minu for minu in range(0, 11)]),
+ [r'$\mathdefault{01\;00{:}%02d}$' % minu for minu in range(0, 11)]),
])
def test_date_formatter_usetex(delta, expected):
style.use("default")
@@ -341,8 +341,7 @@ def test_date_formatter_usetex(delta, expected):
locator.axis.set_view_interval(mdates.date2num(d1), mdates.date2num(d2))
formatter = mdates.AutoDateFormatter(locator, usetex=True)
- assert [formatter(loc) for loc in locator()] == [
- r'{\fontfamily{\familydefault}\selectfont %s}' % s for s in expected]
+ assert [formatter(loc) for loc in locator()] == expected
def test_drange():
@@ -645,14 +644,24 @@ def test_offset_changes():
@pytest.mark.parametrize('t_delta, expected', [
(datetime.timedelta(weeks=52 * 200),
- range(1980, 2201, 20)),
+ ['$\\mathdefault{%d}$' % (t, ) for t in range(1980, 2201, 20)]),
(datetime.timedelta(days=40),
- ['Jan', '05', '09', '13', '17', '21', '25', '29', 'Feb', '05', '09']),
+ ['Jan', '$\\mathdefault{05}$', '$\\mathdefault{09}$',
+ '$\\mathdefault{13}$', '$\\mathdefault{17}$', '$\\mathdefault{21}$',
+ '$\\mathdefault{25}$', '$\\mathdefault{29}$', 'Feb',
+ '$\\mathdefault{05}$', '$\\mathdefault{09}$']),
(datetime.timedelta(hours=40),
- ['Jan-01', '04:00', '08:00', '12:00', '16:00', '20:00',
- 'Jan-02', '04:00', '08:00', '12:00', '16:00']),
+ ['Jan$\\mathdefault{{-}01}$', '$\\mathdefault{04{:}00}$',
+ '$\\mathdefault{08{:}00}$', '$\\mathdefault{12{:}00}$',
+ '$\\mathdefault{16{:}00}$', '$\\mathdefault{20{:}00}$',
+ 'Jan$\\mathdefault{{-}02}$', '$\\mathdefault{04{:}00}$',
+ '$\\mathdefault{08{:}00}$', '$\\mathdefault{12{:}00}$',
+ '$\\mathdefault{16{:}00}$']),
(datetime.timedelta(seconds=2),
- ['59.5', '00:00', '00.5', '01.0', '01.5', '02.0', '02.5']),
+ ['$\\mathdefault{59.5}$', '$\\mathdefault{00{:}00}$',
+ '$\\mathdefault{00.5}$', '$\\mathdefault{01.0}$',
+ '$\\mathdefault{01.5}$', '$\\mathdefault{02.0}$',
+ '$\\mathdefault{02.5}$']),
])
def test_concise_formatter_usetex(t_delta, expected):
d1 = datetime.datetime(1997, 1, 1)
@@ -663,8 +672,7 @@ def test_concise_formatter_usetex(t_delta, expected):
locator.axis.set_view_interval(mdates.date2num(d1), mdates.date2num(d2))
formatter = mdates.ConciseDateFormatter(locator, usetex=True)
- assert formatter.format_ticks(locator()) == [
- r'{\fontfamily{\familydefault}\selectfont %s}' % s for s in expected]
+ assert formatter.format_ticks(locator()) == expected
def test_concise_formatter_formats():
@@ -1232,31 +1240,19 @@ def test_change_interval_multiples():
assert ax.get_xticklabels()[1].get_text() == 'Feb 01 2020'
-def test_epoch2num():
- with _api.suppress_matplotlib_deprecation_warning():
+def test_julian2num():
+ with pytest.warns(_api.MatplotlibDeprecationWarning):
mdates._reset_epoch_test_example()
mdates.set_epoch('0000-12-31')
- assert mdates.epoch2num(86400) == 719164.0
- assert mdates.num2epoch(719165.0) == 86400 * 2
+ # 2440587.5 is julian date for 1970-01-01T00:00:00
+ # https://en.wikipedia.org/wiki/Julian_day
+ assert mdates.julian2num(2440588.5) == 719164.0
+ assert mdates.num2julian(719165.0) == 2440589.5
# set back to the default
mdates._reset_epoch_test_example()
mdates.set_epoch('1970-01-01T00:00:00')
- assert mdates.epoch2num(86400) == 1.0
- assert mdates.num2epoch(2.0) == 86400 * 2
-
-
-def test_julian2num():
- mdates._reset_epoch_test_example()
- mdates.set_epoch('0000-12-31')
- # 2440587.5 is julian date for 1970-01-01T00:00:00
- # https://en.wikipedia.org/wiki/Julian_day
- assert mdates.julian2num(2440588.5) == 719164.0
- assert mdates.num2julian(719165.0) == 2440589.5
- # set back to the default
- mdates._reset_epoch_test_example()
- mdates.set_epoch('1970-01-01T00:00:00')
- assert mdates.julian2num(2440588.5) == 1.0
- assert mdates.num2julian(2.0) == 2440589.5
+ assert mdates.julian2num(2440588.5) == 1.0
+ assert mdates.num2julian(2.0) == 2440589.5
def test_DateLocator():
@@ -1347,12 +1343,6 @@ def test_date_ticker_factory(span, expected_locator):
assert isinstance(locator, expected_locator)
-def test_usetex_newline():
- fig, ax = plt.subplots()
- ax.xaxis.set_major_formatter(mdates.DateFormatter('%d/%m\n%Y'))
- fig.canvas.draw()
-
-
def test_datetime_masked():
# make sure that all-masked data falls back to the viewlim
# set in convert.axisinfo....
diff --git a/lib/matplotlib/tests/test_figure.py b/lib/matplotlib/tests/test_figure.py
index 48b4a880e089..e38a772fe81e 100644
--- a/lib/matplotlib/tests/test_figure.py
+++ b/lib/matplotlib/tests/test_figure.py
@@ -262,7 +262,7 @@ def test_add_subplot_invalid():
fig.add_subplot(2, 2.0, 1)
_, ax = plt.subplots()
with pytest.raises(ValueError,
- match='The Subplot must have been created in the '
+ match='The Axes must have been created in the '
'present figure'):
fig.add_subplot(ax)
@@ -922,6 +922,26 @@ def test_nested_tuple(self, fig_test, fig_ref):
fig_ref.subplot_mosaic([["F"], [x]])
fig_test.subplot_mosaic([["F"], [xt]])
+ def test_nested_width_ratios(self):
+ x = [["A", [["B"],
+ ["C"]]]]
+ width_ratios = [2, 1]
+
+ fig, axd = plt.subplot_mosaic(x, width_ratios=width_ratios)
+
+ assert axd["A"].get_gridspec().get_width_ratios() == width_ratios
+ assert axd["B"].get_gridspec().get_width_ratios() != width_ratios
+
+ def test_nested_height_ratios(self):
+ x = [["A", [["B"],
+ ["C"]]], ["D", "D"]]
+ height_ratios = [1, 2]
+
+ fig, axd = plt.subplot_mosaic(x, height_ratios=height_ratios)
+
+ assert axd["D"].get_gridspec().get_height_ratios() == height_ratios
+ assert axd["B"].get_gridspec().get_height_ratios() != height_ratios
+
@check_figures_equal(extensions=["png"])
@pytest.mark.parametrize(
"x, empty_sentinel",
@@ -1412,3 +1432,11 @@ def test_unpickle_with_device_pixel_ratio():
assert fig.dpi == 42*7
fig2 = pickle.loads(pickle.dumps(fig))
assert fig2.dpi == 42
+
+
+def test_gridspec_no_mutate_input():
+ gs = {'left': .1}
+ gs_orig = dict(gs)
+ plt.subplots(1, 2, width_ratios=[1, 2], gridspec_kw=gs)
+ assert gs == gs_orig
+ plt.subplot_mosaic('AB', width_ratios=[1, 2], gridspec_kw=gs)
diff --git a/lib/matplotlib/tests/test_font_manager.py b/lib/matplotlib/tests/test_font_manager.py
index 2a34122d48e1..b1b5b37f039a 100644
--- a/lib/matplotlib/tests/test_font_manager.py
+++ b/lib/matplotlib/tests/test_font_manager.py
@@ -150,7 +150,8 @@ def test_find_invalid(tmpdir):
FT2Font(StringIO())
-@pytest.mark.skipif(sys.platform != 'linux', reason='Linux only')
+@pytest.mark.skipif(sys.platform != 'linux' or not has_fclist,
+ reason='only Linux with fontconfig installed')
def test_user_fonts_linux(tmpdir, monkeypatch):
font_test_file = 'mpltest.ttf'
diff --git a/lib/matplotlib/tests/test_fontconfig_pattern.py b/lib/matplotlib/tests/test_fontconfig_pattern.py
index 65eba804eebd..792a8ed517c2 100644
--- a/lib/matplotlib/tests/test_fontconfig_pattern.py
+++ b/lib/matplotlib/tests/test_fontconfig_pattern.py
@@ -1,3 +1,5 @@
+import pytest
+
from matplotlib.font_manager import FontProperties
@@ -60,7 +62,7 @@ def test_fontconfig_str():
assert getattr(font, k)() == getattr(right, k)(), test + k
test = "full "
- s = ("serif:size=24:style=oblique:variant=small-caps:weight=bold"
+ s = ("serif-24:style=oblique:variant=small-caps:weight=bold"
":stretch=expanded")
font = FontProperties(s)
right = FontProperties(family="serif", size=24, weight="bold",
@@ -68,3 +70,8 @@ def test_fontconfig_str():
stretch="expanded")
for k in keys:
assert getattr(font, k)() == getattr(right, k)(), test + k
+
+
+def test_fontconfig_unknown_constant():
+ with pytest.warns(DeprecationWarning):
+ FontProperties(":unknown")
diff --git a/lib/matplotlib/tests/test_legend.py b/lib/matplotlib/tests/test_legend.py
index 16847e0be6e2..6660b91ecdd9 100644
--- a/lib/matplotlib/tests/test_legend.py
+++ b/lib/matplotlib/tests/test_legend.py
@@ -671,6 +671,41 @@ def test_legend_labelcolor_linecolor():
assert mpl.colors.same_color(text.get_color(), color)
+def test_legend_pathcollection_labelcolor_linecolor():
+ # test the labelcolor for labelcolor='linecolor' on PathCollection
+ fig, ax = plt.subplots()
+ ax.scatter(np.arange(10), np.arange(10)*1, label='#1', c='r')
+ ax.scatter(np.arange(10), np.arange(10)*2, label='#2', c='g')
+ ax.scatter(np.arange(10), np.arange(10)*3, label='#3', c='b')
+
+ leg = ax.legend(labelcolor='linecolor')
+ for text, color in zip(leg.get_texts(), ['r', 'g', 'b']):
+ assert mpl.colors.same_color(text.get_color(), color)
+
+
+def test_legend_pathcollection_labelcolor_linecolor_iterable():
+ # test the labelcolor for labelcolor='linecolor' on PathCollection
+ # with iterable colors
+ fig, ax = plt.subplots()
+ colors = np.random.default_rng().choice(['r', 'g', 'b'], 10)
+ ax.scatter(np.arange(10), np.arange(10)*1, label='#1', c=colors)
+
+ leg = ax.legend(labelcolor='linecolor')
+ text, = leg.get_texts()
+ assert mpl.colors.same_color(text.get_color(), 'black')
+
+
+def test_legend_pathcollection_labelcolor_linecolor_cmap():
+ # test the labelcolor for labelcolor='linecolor' on PathCollection
+ # with a colormap
+ fig, ax = plt.subplots()
+ ax.scatter(np.arange(10), np.arange(10), c=np.arange(10), label='#1')
+
+ leg = ax.legend(labelcolor='linecolor')
+ text, = leg.get_texts()
+ assert mpl.colors.same_color(text.get_color(), 'black')
+
+
def test_legend_labelcolor_markeredgecolor():
# test the labelcolor for labelcolor='markeredgecolor'
fig, ax = plt.subplots()
@@ -683,6 +718,49 @@ def test_legend_labelcolor_markeredgecolor():
assert mpl.colors.same_color(text.get_color(), color)
+def test_legend_pathcollection_labelcolor_markeredgecolor():
+ # test the labelcolor for labelcolor='markeredgecolor' on PathCollection
+ fig, ax = plt.subplots()
+ ax.scatter(np.arange(10), np.arange(10)*1, label='#1', edgecolor='r')
+ ax.scatter(np.arange(10), np.arange(10)*2, label='#2', edgecolor='g')
+ ax.scatter(np.arange(10), np.arange(10)*3, label='#3', edgecolor='b')
+
+ leg = ax.legend(labelcolor='markeredgecolor')
+ for text, color in zip(leg.get_texts(), ['r', 'g', 'b']):
+ assert mpl.colors.same_color(text.get_color(), color)
+
+
+def test_legend_pathcollection_labelcolor_markeredgecolor_iterable():
+ # test the labelcolor for labelcolor='markeredgecolor' on PathCollection
+ # with iterable colors
+ fig, ax = plt.subplots()
+ colors = np.random.default_rng().choice(['r', 'g', 'b'], 10)
+ ax.scatter(np.arange(10), np.arange(10)*1, label='#1', edgecolor=colors)
+
+ leg = ax.legend(labelcolor='markeredgecolor')
+ for text, color in zip(leg.get_texts(), ['k']):
+ assert mpl.colors.same_color(text.get_color(), color)
+
+
+def test_legend_pathcollection_labelcolor_markeredgecolor_cmap():
+ # test the labelcolor for labelcolor='markeredgecolor' on PathCollection
+ # with a colormap
+ fig, ax = plt.subplots()
+ edgecolors = mpl.cm.viridis(np.random.rand(10))
+ ax.scatter(
+ np.arange(10),
+ np.arange(10),
+ label='#1',
+ c=np.arange(10),
+ edgecolor=edgecolors,
+ cmap="Reds"
+ )
+
+ leg = ax.legend(labelcolor='markeredgecolor')
+ for text, color in zip(leg.get_texts(), ['k']):
+ assert mpl.colors.same_color(text.get_color(), color)
+
+
def test_legend_labelcolor_markerfacecolor():
# test the labelcolor for labelcolor='markerfacecolor'
fig, ax = plt.subplots()
@@ -695,6 +773,48 @@ def test_legend_labelcolor_markerfacecolor():
assert mpl.colors.same_color(text.get_color(), color)
+def test_legend_pathcollection_labelcolor_markerfacecolor():
+ # test the labelcolor for labelcolor='markerfacecolor' on PathCollection
+ fig, ax = plt.subplots()
+ ax.scatter(np.arange(10), np.arange(10)*1, label='#1', facecolor='r')
+ ax.scatter(np.arange(10), np.arange(10)*2, label='#2', facecolor='g')
+ ax.scatter(np.arange(10), np.arange(10)*3, label='#3', facecolor='b')
+
+ leg = ax.legend(labelcolor='markerfacecolor')
+ for text, color in zip(leg.get_texts(), ['r', 'g', 'b']):
+ assert mpl.colors.same_color(text.get_color(), color)
+
+
+def test_legend_pathcollection_labelcolor_markerfacecolor_iterable():
+ # test the labelcolor for labelcolor='markerfacecolor' on PathCollection
+ # with iterable colors
+ fig, ax = plt.subplots()
+ colors = np.random.default_rng().choice(['r', 'g', 'b'], 10)
+ ax.scatter(np.arange(10), np.arange(10)*1, label='#1', facecolor=colors)
+
+ leg = ax.legend(labelcolor='markerfacecolor')
+ for text, color in zip(leg.get_texts(), ['k']):
+ assert mpl.colors.same_color(text.get_color(), color)
+
+
+def test_legend_pathcollection_labelcolor_markfacecolor_cmap():
+ # test the labelcolor for labelcolor='markerfacecolor' on PathCollection
+ # with colormaps
+ fig, ax = plt.subplots()
+ facecolors = mpl.cm.viridis(np.random.rand(10))
+ ax.scatter(
+ np.arange(10),
+ np.arange(10),
+ label='#1',
+ c=np.arange(10),
+ facecolor=facecolors
+ )
+
+ leg = ax.legend(labelcolor='markerfacecolor')
+ for text, color in zip(leg.get_texts(), ['k']):
+ assert mpl.colors.same_color(text.get_color(), color)
+
+
@pytest.mark.parametrize('color', ('red', 'none', (.5, .5, .5)))
def test_legend_labelcolor_rcparam_single(color):
# test the rcParams legend.labelcolor for a single color
@@ -783,6 +903,14 @@ def test_get_set_draggable():
assert not legend.get_draggable()
+@pytest.mark.parametrize('draggable', (True, False))
+def test_legend_draggable(draggable):
+ fig, ax = plt.subplots()
+ ax.plot(range(10), label='shabnams')
+ leg = ax.legend(draggable=draggable)
+ assert leg.get_draggable() is draggable
+
+
def test_alpha_handles():
x, n, hh = plt.hist([1, 2, 3], alpha=0.25, label='data', color='red')
legend = plt.legend()
diff --git a/lib/matplotlib/tests/test_mathtext.py b/lib/matplotlib/tests/test_mathtext.py
index 7f8b06fa0e9b..fbb9e9cddf61 100644
--- a/lib/matplotlib/tests/test_mathtext.py
+++ b/lib/matplotlib/tests/test_mathtext.py
@@ -100,7 +100,7 @@
r"$? ! &$", # github issue #466
None,
None,
- r"$\left\Vert a \right\Vert \left\vert b \right\vert \left| a \right| \left\| b\right\| \Vert a \Vert \vert b \vert$",
+ r"$\left\Vert \frac{a}{b} \right\Vert \left\vert \frac{a}{b} \right\vert \left\| \frac{a}{b}\right\| \left| \frac{a}{b} \right| \Vert a \Vert \vert b \vert \| a \| | b |$",
r'$\mathring{A} \AA$',
r'$M \, M \thinspace M \/ M \> M \: M \; M \ M \enspace M \quad M \qquad M \! M$',
r'$\Cap$ $\Cup$ $\leftharpoonup$ $\barwedge$ $\rightharpoonup$',
diff --git a/lib/matplotlib/tests/test_mlab.py b/lib/matplotlib/tests/test_mlab.py
index 75ca0648a4e1..86beb5c8c803 100644
--- a/lib/matplotlib/tests/test_mlab.py
+++ b/lib/matplotlib/tests/test_mlab.py
@@ -97,7 +97,7 @@ def test_window():
class TestDetrend:
- def setup(self):
+ def setup_method(self):
np.random.seed(0)
n = 1000
x = np.linspace(0., 100, n)
diff --git a/lib/matplotlib/tests/test_png.py b/lib/matplotlib/tests/test_png.py
index 1e8ad89c9511..646db60cd0ae 100644
--- a/lib/matplotlib/tests/test_png.py
+++ b/lib/matplotlib/tests/test_png.py
@@ -26,18 +26,17 @@ def test_pngsuite():
plt.gca().set_xlim(0, len(files))
-def test_truncated_file(tmpdir):
- d = tmpdir.mkdir('test')
- fname = str(d.join('test.png'))
- fname_t = str(d.join('test_truncated.png'))
- plt.savefig(fname)
- with open(fname, 'rb') as fin:
+def test_truncated_file(tmp_path):
+ path = tmp_path / 'test.png'
+ path_t = tmp_path / 'test_truncated.png'
+ plt.savefig(path)
+ with open(path, 'rb') as fin:
buf = fin.read()
- with open(fname_t, 'wb') as fout:
+ with open(path_t, 'wb') as fout:
fout.write(buf[:20])
with pytest.raises(Exception):
- plt.imread(fname_t)
+ plt.imread(path_t)
def test_truncated_buffer():
diff --git a/lib/matplotlib/tests/test_pyplot.py b/lib/matplotlib/tests/test_pyplot.py
index 2e51af54ea88..3b9632cf7795 100644
--- a/lib/matplotlib/tests/test_pyplot.py
+++ b/lib/matplotlib/tests/test_pyplot.py
@@ -1,5 +1,4 @@
import difflib
-import re
import numpy as np
import subprocess
@@ -367,10 +366,42 @@ def test_doc_pyplot_summary():
if not pyplot_docs.exists():
pytest.skip("Documentation sources not available")
- lines = pyplot_docs.read_text()
- m = re.search(r':nosignatures:\n\n(.*?)\n\n', lines, re.DOTALL)
- doc_functions = set(line.strip() for line in m.group(1).split('\n'))
- plot_commands = set(plt.get_plot_commands())
+ def extract_documented_functions(lines):
+ """
+ Return a list of all the functions that are mentioned in the
+ autosummary blocks contained in *lines*.
+
+ An autosummary block looks like this::
+
+ .. autosummary::
+ :toctree: _as_gen
+ :template: autosummary.rst
+ :nosignatures:
+
+ plot
+ plot_date
+
+ """
+ functions = []
+ in_autosummary = False
+ for line in lines:
+ if not in_autosummary:
+ if line.startswith(".. autosummary::"):
+ in_autosummary = True
+ else:
+ if not line or line.startswith(" :"):
+ # empty line or autosummary parameter
+ continue
+ if not line[0].isspace():
+ # no more indentation: end of autosummary block
+ in_autosummary = False
+ continue
+ functions.append(line.strip())
+ return functions
+
+ lines = pyplot_docs.read_text().split("\n")
+ doc_functions = set(extract_documented_functions(lines))
+ plot_commands = set(plt._get_pyplot_commands())
missing = plot_commands.difference(doc_functions)
if missing:
raise AssertionError(
@@ -398,3 +429,14 @@ def test_minor_ticks():
tick_labels = ax.get_yticklabels(minor=True)
assert np.all(tick_pos == np.array([3.5, 6.5]))
assert [l.get_text() for l in tick_labels] == ['a', 'b']
+
+
+def test_switch_backend_no_close():
+ plt.switch_backend('agg')
+ fig = plt.figure()
+ fig = plt.figure()
+ assert len(plt.get_fignums()) == 2
+ plt.switch_backend('agg')
+ assert len(plt.get_fignums()) == 2
+ plt.switch_backend('svg')
+ assert len(plt.get_fignums()) == 0
diff --git a/lib/matplotlib/tests/test_sankey.py b/lib/matplotlib/tests/test_sankey.py
index 6cc036a9f1ce..cbb7f516a65c 100644
--- a/lib/matplotlib/tests/test_sankey.py
+++ b/lib/matplotlib/tests/test_sankey.py
@@ -1,5 +1,5 @@
import pytest
-import numpy as np
+from numpy.testing import assert_allclose, assert_array_equal
from matplotlib.sankey import Sankey
from matplotlib.testing.decorators import check_figures_equal
@@ -67,28 +67,28 @@ def test_sankey2():
s = Sankey(flows=[0.25, -0.25, 0.5, -0.5], labels=['Foo'],
orientations=[-1], unit='Bar')
sf = s.finish()
- assert np.all(np.equal(np.array((0.25, -0.25, 0.5, -0.5)), sf[0].flows))
+ assert_array_equal(sf[0].flows, [0.25, -0.25, 0.5, -0.5])
assert sf[0].angles == [1, 3, 1, 3]
assert all([text.get_text()[0:3] == 'Foo' for text in sf[0].texts])
assert all([text.get_text()[-3:] == 'Bar' for text in sf[0].texts])
assert sf[0].text.get_text() == ''
- assert np.allclose(np.array(((-1.375, -0.52011255),
- (1.375, -0.75506044),
- (-0.75, -0.41522509),
- (0.75, -0.8599479))),
- sf[0].tips)
+ assert_allclose(sf[0].tips,
+ [(-1.375, -0.52011255),
+ (1.375, -0.75506044),
+ (-0.75, -0.41522509),
+ (0.75, -0.8599479)])
s = Sankey(flows=[0.25, -0.25, 0, 0.5, -0.5], labels=['Foo'],
orientations=[-1], unit='Bar')
sf = s.finish()
- assert np.all(np.equal(np.array((0.25, -0.25, 0, 0.5, -0.5)), sf[0].flows))
+ assert_array_equal(sf[0].flows, [0.25, -0.25, 0, 0.5, -0.5])
assert sf[0].angles == [1, 3, None, 1, 3]
- assert np.allclose(np.array(((-1.375, -0.52011255),
- (1.375, -0.75506044),
- (0, 0),
- (-0.75, -0.41522509),
- (0.75, -0.8599479))),
- sf[0].tips)
+ assert_allclose(sf[0].tips,
+ [(-1.375, -0.52011255),
+ (1.375, -0.75506044),
+ (0, 0),
+ (-0.75, -0.41522509),
+ (0.75, -0.8599479)])
@check_figures_equal(extensions=['png'])
diff --git a/lib/matplotlib/tests/test_streamplot.py b/lib/matplotlib/tests/test_streamplot.py
index 5ee6df09e4b2..10a64f1d6968 100644
--- a/lib/matplotlib/tests/test_streamplot.py
+++ b/lib/matplotlib/tests/test_streamplot.py
@@ -34,7 +34,8 @@ def test_startpoints():
plt.plot(start_x, start_y, 'ok')
-@image_comparison(['streamplot_colormap'], remove_text=True, style='mpl20')
+@image_comparison(['streamplot_colormap'], remove_text=True, style='mpl20',
+ tol=0.022)
def test_colormap():
X, Y, U, V = velocity_field()
plt.streamplot(X, Y, U, V, color=U, density=0.6, linewidth=2,
diff --git a/lib/matplotlib/tests/test_style.py b/lib/matplotlib/tests/test_style.py
index e1388819fff4..c788c45920ae 100644
--- a/lib/matplotlib/tests/test_style.py
+++ b/lib/matplotlib/tests/test_style.py
@@ -184,6 +184,8 @@ def test_deprecated_seaborn_styles():
with pytest.warns(mpl._api.MatplotlibDeprecationWarning):
mpl.style.use("seaborn-bright")
assert mpl.rcParams == seaborn_bright
+ with pytest.warns(mpl._api.MatplotlibDeprecationWarning):
+ mpl.style.library["seaborn-bright"]
def test_up_to_date_blacklist():
diff --git a/lib/matplotlib/tests/test_subplots.py b/lib/matplotlib/tests/test_subplots.py
index f299440ef53e..732418f19e2f 100644
--- a/lib/matplotlib/tests/test_subplots.py
+++ b/lib/matplotlib/tests/test_subplots.py
@@ -3,9 +3,9 @@
import numpy as np
import pytest
+from matplotlib.axes import Axes, SubplotBase
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import check_figures_equal, image_comparison
-import matplotlib.axes as maxes
def check_shared(axs, x_shared, y_shared):
@@ -122,6 +122,12 @@ def test_label_outer_span():
fig.axes, [False, True, False, True], [True, True, False, False])
+def test_label_outer_non_gridspec():
+ ax = plt.axes([0, 0, 1, 1])
+ ax.label_outer() # Does nothing.
+ check_visible([ax], [True], [True])
+
+
def test_shared_and_moved():
# test if sharey is on, but then tick_left is called that labels don't
# re-appear. Seaborn does this just to be sure yaxis is on left...
@@ -209,11 +215,6 @@ def test_dont_mutate_kwargs():
assert gridspec_kw == {'width_ratios': [1, 2]}
-def test_subplot_factory_reapplication():
- assert maxes.subplot_class_factory(maxes.Axes) is maxes.Subplot
- assert maxes.subplot_class_factory(maxes.Subplot) is maxes.Subplot
-
-
@pytest.mark.parametrize("width_ratios", [None, [1, 3, 2]])
@pytest.mark.parametrize("height_ratios", [None, [1, 2]])
@check_figures_equal(extensions=['png'])
@@ -251,3 +252,11 @@ def test_ratio_overlapping_kws(method, args):
with pytest.raises(ValueError, match='width_ratios'):
getattr(plt, method)(*args, width_ratios=[1, 2, 3],
gridspec_kw={'width_ratios': [1, 2, 3]})
+
+
+def test_old_subplot_compat():
+ fig = plt.figure()
+ assert isinstance(fig.add_subplot(), SubplotBase)
+ assert not isinstance(fig.add_axes(rect=[0, 0, 1, 1]), SubplotBase)
+ with pytest.raises(TypeError):
+ Axes(fig, [0, 0, 1, 1], rect=[0, 0, 1, 1])
diff --git a/lib/matplotlib/tests/test_texmanager.py b/lib/matplotlib/tests/test_texmanager.py
index 29ed9d86597f..fbff21144e60 100644
--- a/lib/matplotlib/tests/test_texmanager.py
+++ b/lib/matplotlib/tests/test_texmanager.py
@@ -1,5 +1,8 @@
+import os
from pathlib import Path
import re
+import subprocess
+import sys
import matplotlib.pyplot as plt
from matplotlib.texmanager import TexManager
@@ -57,3 +60,15 @@ def test_unicode_characters():
with pytest.raises(RuntimeError):
ax.set_title('\N{SNOWMAN}')
fig.canvas.draw()
+
+
+@needs_usetex
+def test_openin_any_paranoid():
+ completed = subprocess.run(
+ [sys.executable, "-c",
+ 'import matplotlib.pyplot as plt;'
+ 'plt.rcParams.update({"text.usetex": True});'
+ 'plt.title("paranoid");'
+ 'plt.show(block=False);'],
+ env={**os.environ, 'openin_any': 'p'}, check=True, capture_output=True)
+ assert completed.stderr == b""
diff --git a/lib/matplotlib/tests/test_text.py b/lib/matplotlib/tests/test_text.py
index b5c1bbff641b..f775efa01de4 100644
--- a/lib/matplotlib/tests/test_text.py
+++ b/lib/matplotlib/tests/test_text.py
@@ -339,6 +339,32 @@ def test_set_position():
assert a + shift_val == b
+def test_char_index_at():
+ fig = plt.figure()
+ text = fig.text(0.1, 0.9, "")
+
+ text.set_text("i")
+ bbox = text.get_window_extent()
+ size_i = bbox.x1 - bbox.x0
+
+ text.set_text("m")
+ bbox = text.get_window_extent()
+ size_m = bbox.x1 - bbox.x0
+
+ text.set_text("iiiimmmm")
+ bbox = text.get_window_extent()
+ origin = bbox.x0
+
+ assert text._char_index_at(origin - size_i) == 0 # left of first char
+ assert text._char_index_at(origin) == 0
+ assert text._char_index_at(origin + 0.499*size_i) == 0
+ assert text._char_index_at(origin + 0.501*size_i) == 1
+ assert text._char_index_at(origin + size_i*3) == 3
+ assert text._char_index_at(origin + size_i*4 + size_m*3) == 7
+ assert text._char_index_at(origin + size_i*4 + size_m*4) == 8
+ assert text._char_index_at(origin + size_i*4 + size_m*10) == 8
+
+
@pytest.mark.parametrize('text', ['', 'O'], ids=['empty', 'non-empty'])
def test_non_default_dpi(text):
fig, ax = plt.subplots()
@@ -806,11 +832,26 @@ def test_metrics_cache():
fig = plt.figure()
fig.text(.3, .5, "foo\nbar")
- fig.text(.5, .5, "foo\nbar")
fig.text(.3, .5, "foo\nbar", usetex=True)
fig.text(.5, .5, "foo\nbar", usetex=True)
fig.canvas.draw()
+ renderer = fig._get_renderer()
+ ys = {} # mapping of strings to where they were drawn in y with draw_tex.
+
+ def call(*args, **kwargs):
+ renderer, x, y, s, *_ = args
+ ys.setdefault(s, set()).add(y)
+
+ renderer.draw_tex = call
+ fig.canvas.draw()
+ assert [*ys] == ["foo", "bar"]
+ # Check that both TeX strings were drawn with the same y-position for both
+ # single-line substrings. Previously, there used to be an incorrect cache
+ # collision with the non-TeX string (drawn first here) whose metrics would
+ # get incorrectly reused by the first TeX string.
+ assert len(ys["foo"]) == len(ys["bar"]) == 1
info = mpl.text._get_text_metrics_with_cache_impl.cache_info()
- # Each string gets drawn twice, so the second draw results in a hit.
- assert info.hits == info.misses
+ # Every string gets a miss for the first layouting (extents), then a hit
+ # when drawing, but "foo\nbar" gets two hits as it's drawn twice.
+ assert info.hits > info.misses
diff --git a/lib/matplotlib/tests/test_ticker.py b/lib/matplotlib/tests/test_ticker.py
index e91d3236020d..b474dfdd5eaa 100644
--- a/lib/matplotlib/tests/test_ticker.py
+++ b/lib/matplotlib/tests/test_ticker.py
@@ -1,6 +1,7 @@
from contextlib import nullcontext
import itertools
import locale
+import logging
import re
import numpy as np
@@ -725,6 +726,24 @@ def test_mathtext_ticks(self):
ax.set_xticks([-1, 0, 1])
fig.canvas.draw()
+ def test_cmr10_substitutions(self, caplog):
+ mpl.rcParams.update({
+ 'font.family': 'cmr10',
+ 'mathtext.fontset': 'cm',
+ 'axes.formatter.use_mathtext': True,
+ })
+
+ # Test that it does not log a warning about missing glyphs.
+ with caplog.at_level(logging.WARNING, logger='matplotlib.mathtext'):
+ fig, ax = plt.subplots()
+ ax.plot([-0.03, 0.05], [40, 0.05])
+ ax.set_yscale('log')
+ yticks = [0.02, 0.3, 4, 50]
+ formatter = mticker.LogFormatterSciNotation()
+ ax.set_yticks(yticks, map(formatter, yticks))
+ fig.canvas.draw()
+ assert not caplog.text
+
def test_empty_locs(self):
sf = mticker.ScalarFormatter()
sf.set_locs([])
diff --git a/lib/matplotlib/tests/test_transforms.py b/lib/matplotlib/tests/test_transforms.py
index 55fcdd937656..6c5b3496bc7c 100644
--- a/lib/matplotlib/tests/test_transforms.py
+++ b/lib/matplotlib/tests/test_transforms.py
@@ -422,7 +422,7 @@ def test_pathc_extents_non_affine(self):
ax = plt.axes()
offset = mtransforms.Affine2D().translate(10, 10)
na_offset = NonAffineForTest(mtransforms.Affine2D().translate(10, 10))
- pth = Path(np.array([[0, 0], [0, 10], [10, 10], [10, 0]]))
+ pth = Path([[0, 0], [0, 10], [10, 10], [10, 0]])
patch = mpatches.PathPatch(pth,
transform=offset + na_offset + ax.transData)
ax.add_patch(patch)
@@ -432,7 +432,7 @@ def test_pathc_extents_non_affine(self):
def test_pathc_extents_affine(self):
ax = plt.axes()
offset = mtransforms.Affine2D().translate(10, 10)
- pth = Path(np.array([[0, 0], [0, 10], [10, 10], [10, 0]]))
+ pth = Path([[0, 0], [0, 10], [10, 10], [10, 0]])
patch = mpatches.PathPatch(pth, transform=offset + ax.transData)
ax.add_patch(patch)
expected_data_lim = np.array([[0., 0.], [10., 10.]]) + 10
@@ -510,7 +510,7 @@ def test_str_transform():
Affine2D().scale(1.0),
Affine2D().scale(1.0))),
PolarTransform(
- PolarAxesSubplot(0.125,0.1;0.775x0.8),
+ PolarAxes(0.125,0.1;0.775x0.8),
use_rmin=True,
_apply_theta_transforms=False)),
CompositeGenericTransform(
diff --git a/lib/matplotlib/tests/test_triangulation.py b/lib/matplotlib/tests/test_triangulation.py
index 75b2a51dfaf9..9c6dec71e09f 100644
--- a/lib/matplotlib/tests/test_triangulation.py
+++ b/lib/matplotlib/tests/test_triangulation.py
@@ -243,7 +243,7 @@ def test_tripcolor_color():
fig, ax = plt.subplots()
with pytest.raises(TypeError, match=r"tripcolor\(\) missing 1 required "):
ax.tripcolor(x, y)
- with pytest.raises(ValueError, match="The length of C must match either"):
+ with pytest.raises(ValueError, match="The length of c must match either"):
ax.tripcolor(x, y, [1, 2, 3])
with pytest.raises(ValueError,
match="length of facecolors must match .* triangles"):
@@ -255,7 +255,7 @@ def test_tripcolor_color():
match="'gouraud' .* at the points.* not at the faces"):
ax.tripcolor(x, y, [1, 2], shading='gouraud') # faces
with pytest.raises(TypeError,
- match="positional.*'C'.*keyword-only.*'facecolors'"):
+ match="positional.*'c'.*keyword-only.*'facecolors'"):
ax.tripcolor(x, y, C=[1, 2, 3, 4])
# smoke test for valid color specifications (via C or facecolors)
@@ -278,16 +278,16 @@ def test_tripcolor_clim():
def test_tripcolor_warnings():
x = [-1, 0, 1, 0]
y = [0, -1, 0, 1]
- C = [0.4, 0.5]
+ c = [0.4, 0.5]
fig, ax = plt.subplots()
# additional parameters
with pytest.warns(DeprecationWarning, match="Additional positional param"):
- ax.tripcolor(x, y, C, 'unused_positional')
- # facecolors takes precedence over C
- with pytest.warns(UserWarning, match="Positional parameter C .*no effect"):
- ax.tripcolor(x, y, C, facecolors=C)
- with pytest.warns(UserWarning, match="Positional parameter C .*no effect"):
- ax.tripcolor(x, y, 'interpreted as C', facecolors=C)
+ ax.tripcolor(x, y, c, 'unused_positional')
+ # facecolors takes precedence over c
+ with pytest.warns(UserWarning, match="Positional parameter c .*no effect"):
+ ax.tripcolor(x, y, c, facecolors=c)
+ with pytest.warns(UserWarning, match="Positional parameter c .*no effect"):
+ ax.tripcolor(x, y, 'interpreted as c', facecolors=c)
def test_no_modify():
@@ -614,15 +614,15 @@ def poisson_sparse_matrix(n, m):
# Instantiating a sparse Poisson matrix of size 48 x 48:
(n, m) = (12, 4)
- mat = mtri.triinterpolate._Sparse_Matrix_coo(*poisson_sparse_matrix(n, m))
+ mat = mtri._triinterpolate._Sparse_Matrix_coo(*poisson_sparse_matrix(n, m))
mat.compress_csc()
mat_dense = mat.to_dense()
# Testing a sparse solve for all 48 basis vector
for itest in range(n*m):
b = np.zeros(n*m, dtype=np.float64)
b[itest] = 1.
- x, _ = mtri.triinterpolate._cg(A=mat, b=b, x0=np.zeros(n*m),
- tol=1.e-10)
+ x, _ = mtri._triinterpolate._cg(A=mat, b=b, x0=np.zeros(n*m),
+ tol=1.e-10)
assert_array_almost_equal(np.dot(mat_dense, x), b)
# 2) Same matrix with inserting 2 rows - cols with null diag terms
@@ -635,16 +635,16 @@ def poisson_sparse_matrix(n, m):
rows = np.concatenate([rows, [i_zero, i_zero-1, j_zero, j_zero-1]])
cols = np.concatenate([cols, [i_zero-1, i_zero, j_zero-1, j_zero]])
vals = np.concatenate([vals, [1., 1., 1., 1.]])
- mat = mtri.triinterpolate._Sparse_Matrix_coo(vals, rows, cols,
- (n*m + 2, n*m + 2))
+ mat = mtri._triinterpolate._Sparse_Matrix_coo(vals, rows, cols,
+ (n*m + 2, n*m + 2))
mat.compress_csc()
mat_dense = mat.to_dense()
# Testing a sparse solve for all 50 basis vec
for itest in range(n*m + 2):
b = np.zeros(n*m + 2, dtype=np.float64)
b[itest] = 1.
- x, _ = mtri.triinterpolate._cg(A=mat, b=b, x0=np.ones(n*m + 2),
- tol=1.e-10)
+ x, _ = mtri._triinterpolate._cg(A=mat, b=b, x0=np.ones(n * m + 2),
+ tol=1.e-10)
assert_array_almost_equal(np.dot(mat_dense, x), b)
# 3) Now a simple test that summation of duplicate (i.e. with same rows,
@@ -655,7 +655,7 @@ def poisson_sparse_matrix(n, m):
cols = np.array([0, 1, 2, 1, 1, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2],
dtype=np.int32)
dim = (3, 3)
- mat = mtri.triinterpolate._Sparse_Matrix_coo(vals, rows, cols, dim)
+ mat = mtri._triinterpolate._Sparse_Matrix_coo(vals, rows, cols, dim)
mat.compress_csc()
mat_dense = mat.to_dense()
assert_array_almost_equal(mat_dense, np.array([
@@ -678,7 +678,7 @@ def test_triinterpcubic_geom_weights():
y_rot = -np.sin(theta)*x + np.cos(theta)*y
triang = mtri.Triangulation(x_rot, y_rot, triangles)
cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom')
- dof_estimator = mtri.triinterpolate._DOF_estimator_geom(cubic_geom)
+ dof_estimator = mtri._triinterpolate._DOF_estimator_geom(cubic_geom)
weights = dof_estimator.compute_geom_weights()
# Testing for the 4 possibilities...
sum_w[0, :] = np.sum(weights, 1) - 1
@@ -944,8 +944,7 @@ def test_tritools():
mask = np.array([False, False, True], dtype=bool)
triang = mtri.Triangulation(x, y, triangles, mask=mask)
analyser = mtri.TriAnalyzer(triang)
- assert_array_almost_equal(analyser.scale_factors,
- np.array([1., 1./(1.+0.5*np.sqrt(3.))]))
+ assert_array_almost_equal(analyser.scale_factors, [1, 1/(1+3**.5/2)])
assert_array_almost_equal(
analyser.circle_ratios(rescale=False),
np.ma.masked_array([0.5, 1./(1.+np.sqrt(2.)), np.nan], mask))
diff --git a/lib/matplotlib/tests/test_usetex.py b/lib/matplotlib/tests/test_usetex.py
index 22309afdaf97..0f01ebaffb56 100644
--- a/lib/matplotlib/tests/test_usetex.py
+++ b/lib/matplotlib/tests/test_usetex.py
@@ -64,6 +64,21 @@ def test_mathdefault():
fig.canvas.draw()
+@image_comparison(['eqnarray.png'])
+def test_multiline_eqnarray():
+ text = (
+ r'\begin{eqnarray*}'
+ r'foo\\'
+ r'bar\\'
+ r'baz\\'
+ r'\end{eqnarray*}'
+ )
+
+ fig = plt.figure(figsize=(1, 1))
+ fig.text(0.5, 0.5, text, usetex=True,
+ horizontalalignment='center', verticalalignment='center')
+
+
@pytest.mark.parametrize("fontsize", [8, 10, 12])
def test_minus_no_descent(fontsize):
# Test special-casing of minus descent in DviFont._height_depth_of, by
diff --git a/lib/matplotlib/texmanager.py b/lib/matplotlib/texmanager.py
index edac2fec9c79..4bd113767c23 100644
--- a/lib/matplotlib/texmanager.py
+++ b/lib/matplotlib/texmanager.py
@@ -100,11 +100,6 @@ class TexManager:
'computer modern typewriter': 'monospace',
}
- grey_arrayd = _api.deprecate_privatize_attribute("3.5")
- font_family = _api.deprecate_privatize_attribute("3.5")
- font_families = _api.deprecate_privatize_attribute("3.5")
- font_info = _api.deprecate_privatize_attribute("3.5")
-
@functools.lru_cache() # Always return the same instance.
def __new__(cls):
Path(cls.texcache).mkdir(parents=True, exist_ok=True)
@@ -230,8 +225,7 @@ def _get_tex_source(cls, tex, fontsize):
r"% last line's baseline.",
rf"\fontsize{{{fontsize}}}{{{baselineskip}}}%",
r"\ifdefined\psfrag\else\hbox{}\fi%",
- rf"{{\obeylines{fontcmd} {tex}}}%",
- r"\special{matplotlibbaselinemarker}%",
+ rf"{{{fontcmd} {tex}}}%",
r"\end{document}",
])
@@ -291,12 +285,16 @@ def make_dvi(cls, tex, fontsize):
# and thus replace() works atomically. It also allows referring to
# the texfile with a relative path (for pathological MPLCONFIGDIRs,
# the absolute path may contain characters (e.g. ~) that TeX does
- # not support.)
- with TemporaryDirectory(dir=Path(dvifile).parent) as tmpdir:
+ # not support; n.b. relative paths cannot traverse parents, or it
+ # will be blocked when `openin_any = p` in texmf.cnf).
+ cwd = Path(dvifile).parent
+ with TemporaryDirectory(dir=cwd) as tmpdir:
+ tmppath = Path(tmpdir)
cls._run_checked_subprocess(
["latex", "-interaction=nonstopmode", "--halt-on-error",
- f"../{texfile.name}"], tex, cwd=tmpdir)
- (Path(tmpdir) / Path(dvifile).name).replace(dvifile)
+ f"--output-directory={tmppath.name}",
+ f"{texfile.name}"], tex, cwd=cwd)
+ (tmppath / Path(dvifile).name).replace(dvifile)
return dvifile
@classmethod
diff --git a/lib/matplotlib/text.py b/lib/matplotlib/text.py
index 3c998d540c48..1ef91cc7e1dc 100644
--- a/lib/matplotlib/text.py
+++ b/lib/matplotlib/text.py
@@ -124,6 +124,7 @@ class Text(Artist):
"""Handle storing and drawing of text in window or data coordinates."""
zorder = 3
+ _charsize_cache = dict()
def __repr__(self):
return "Text(%s, %s, %s)" % (self._x, self._y, repr(self._text))
@@ -164,6 +165,39 @@ def __init__(self,
super().__init__()
self._x, self._y = x, y
self._text = ''
+ self._reset_visual_defaults(
+ text=text,
+ color=color,
+ fontproperties=fontproperties,
+ usetex=usetex,
+ parse_math=parse_math,
+ wrap=wrap,
+ verticalalignment=verticalalignment,
+ horizontalalignment=horizontalalignment,
+ multialignment=multialignment,
+ rotation=rotation,
+ transform_rotates_text=transform_rotates_text,
+ linespacing=linespacing,
+ rotation_mode=rotation_mode,
+ )
+ self.update(kwargs)
+
+ def _reset_visual_defaults(
+ self,
+ text='',
+ color=None,
+ fontproperties=None,
+ usetex=None,
+ parse_math=None,
+ wrap=False,
+ verticalalignment='baseline',
+ horizontalalignment='left',
+ multialignment=None,
+ rotation=None,
+ transform_rotates_text=False,
+ linespacing=None,
+ rotation_mode=None,
+ ):
self.set_text(text)
self.set_color(
color if color is not None else mpl.rcParams["text.color"])
@@ -183,7 +217,6 @@ def __init__(self,
linespacing = 1.2 # Maybe use rcParam later.
self.set_linespacing(linespacing)
self.set_rotation_mode(rotation_mode)
- self.update(kwargs)
def update(self, kwargs):
# docstring inherited
@@ -247,6 +280,38 @@ def _get_multialignment(self):
else:
return self._horizontalalignment
+ def _char_index_at(self, x):
+ """
+ Calculate the index closest to the coordinate x in display space.
+
+ The position of text[index] is assumed to be the sum of the widths
+ of all preceding characters text[:index].
+
+ This works only on single line texts.
+ """
+ if not self._text:
+ return 0
+
+ text = self._text
+
+ fontproperties = str(self._fontproperties)
+ if fontproperties not in Text._charsize_cache:
+ Text._charsize_cache[fontproperties] = dict()
+
+ charsize_cache = Text._charsize_cache[fontproperties]
+ for char in set(text):
+ if char not in charsize_cache:
+ self.set_text(char)
+ bb = self.get_window_extent()
+ charsize_cache[char] = bb.x1 - bb.x0
+
+ self.set_text(text)
+ bb = self.get_window_extent()
+
+ size_accum = np.cumsum([0] + [charsize_cache[x] for x in text])
+ std_x = x - bb.x0
+ return (np.abs(size_accum - std_x)).argmin()
+
def get_rotation(self):
"""Return the text angle in degrees between 0 and 360."""
if self.get_transform_rotates_text():
@@ -302,8 +367,7 @@ def _get_layout(self, renderer):
of a rotated text when necessary.
"""
thisx, thisy = 0.0, 0.0
- text = self.get_text()
- lines = [text] if self.get_usetex() else text.split("\n") # Not empty.
+ lines = self.get_text().split("\n") # Ensures lines is not empty.
ws = []
hs = []
@@ -840,27 +904,6 @@ def get_position(self):
# specified with 'set_x' and 'set_y'.
return self._x, self._y
- # When removing, also remove the hash(color) check in set_color()
- @_api.deprecated("3.5")
- def get_prop_tup(self, renderer=None):
- """
- Return a hashable tuple of properties.
-
- Not intended to be human readable, but useful for backends who
- want to cache derived information about text (e.g., layouts) and
- need to know if the text has changed.
- """
- x, y = self.get_unitless_position()
- renderer = renderer or self._renderer
- return (x, y, self.get_text(), self._color,
- self._verticalalignment, self._horizontalalignment,
- hash(self._fontproperties),
- self._rotation, self._rotation_mode,
- self._transform_rotates_text,
- self.figure.dpi, weakref.ref(renderer),
- self._linespacing
- )
-
def get_text(self):
"""Return the text string."""
return self._text
@@ -951,12 +994,6 @@ def set_color(self, color):
# out at draw time for simplicity.
if not cbook._str_equal(color, "auto"):
mpl.colors._check_color_like(color=color)
- # Make sure it is hashable, or get_prop_tup will fail (remove this once
- # get_prop_tup is removed).
- try:
- hash(color)
- except TypeError:
- color = tuple(color)
self._color = color
self.stale = True
diff --git a/lib/matplotlib/ticker.py b/lib/matplotlib/ticker.py
index dfacdf4aead9..df03aa09e335 100644
--- a/lib/matplotlib/ticker.py
+++ b/lib/matplotlib/ticker.py
@@ -196,21 +196,6 @@ def create_dummy_axis(self, **kwargs):
if self.axis is None:
self.axis = _DummyAxis(**kwargs)
- @_api.deprecated("3.5", alternative="`.Axis.set_view_interval`")
- def set_view_interval(self, vmin, vmax):
- self.axis.set_view_interval(vmin, vmax)
-
- @_api.deprecated("3.5", alternative="`.Axis.set_data_interval`")
- def set_data_interval(self, vmin, vmax):
- self.axis.set_data_interval(vmin, vmax)
-
- @_api.deprecated(
- "3.5",
- alternative="`.Axis.set_view_interval` and `.Axis.set_data_interval`")
- def set_bounds(self, vmin, vmax):
- self.set_view_interval(vmin, vmax)
- self.set_data_interval(vmin, vmax)
-
class Formatter(TickHelper):
"""
@@ -819,7 +804,7 @@ def _set_format(self):
else:
break
sigfigs += 1
- self.format = '%1.' + str(sigfigs) + 'f'
+ self.format = f'%1.{sigfigs}f'
if self._usetex or self._useMathText:
self.format = r'$\mathdefault{%s}$' % self.format
@@ -1722,10 +1707,10 @@ def tick_values(self, vmin, vmax):
class FixedLocator(Locator):
"""
- Tick locations are fixed. If nbins is not None,
- the array of possible positions will be subsampled to
- keep the number of ticks <= nbins +1.
- The subsampling will be done so as to include the smallest
+ Tick locations are fixed at *locs*. If *nbins* is not None,
+ the *locs* array of possible positions will be subsampled to
+ keep the number of ticks <= *nbins* +1.
+ The subsampling will be done to include the smallest
absolute value; for example, if zero is included in the
array of possibilities, then it is guaranteed to be one of
the chosen ticks.
@@ -1789,14 +1774,21 @@ class LinearLocator(Locator):
Determine the tick locations
The first time this function is called it will try to set the
- number of ticks to make a nice tick partitioning. Thereafter the
+ number of ticks to make a nice tick partitioning. Thereafter, the
number of ticks will be fixed so that interactive navigation will
be nice
"""
def __init__(self, numticks=None, presets=None):
"""
- Use presets to set locs based on lom. A dict mapping vmin, vmax->locs
+ Parameters
+ ----------
+ numticks : int or None, default None
+ Number of ticks. If None, *numticks* = 11.
+ presets : dict or None, default: None
+ Dictionary mapping ``(vmin, vmax)`` to an array of locations.
+ Overrides *numticks* if there is an entry for the current
+ ``(vmin, vmax)``.
"""
self.numticks = numticks
if presets is None:
@@ -1862,7 +1854,8 @@ def view_limits(self, vmin, vmax):
class MultipleLocator(Locator):
"""
- Set a tick on each integer multiple of a base within the view interval.
+ Set a tick on each integer multiple of the *base* within the view
+ interval.
"""
def __init__(self, base=1.0):
@@ -1889,7 +1882,7 @@ def tick_values(self, vmin, vmax):
def view_limits(self, dmin, dmax):
"""
- Set the view limits to the nearest multiples of base that
+ Set the view limits to the nearest multiples of *base* that
contain the data.
"""
if mpl.rcParams['axes.autolimit_mode'] == 'round_numbers':
@@ -1918,16 +1911,20 @@ def scale_range(vmin, vmax, n=1, threshold=100):
class _Edge_integer:
"""
- Helper for MaxNLocator, MultipleLocator, etc.
+ Helper for `.MaxNLocator`, `.MultipleLocator`, etc.
- Take floating point precision limitations into account when calculating
+ Take floating-point precision limitations into account when calculating
tick locations as integer multiples of a step.
"""
def __init__(self, step, offset):
"""
- *step* is a positive floating-point interval between ticks.
- *offset* is the offset subtracted from the data limits
- prior to calculating tick locations.
+ Parameters
+ ----------
+ step : float > 0
+ Interval between ticks.
+ offset : float
+ Offset subtracted from the data limits prior to calculating tick
+ locations.
"""
if step <= 0:
raise ValueError("'step' must be positive")
@@ -1961,8 +1958,8 @@ def ge(self, x):
class MaxNLocator(Locator):
"""
- Find nice tick locations with no more than N being within the view limits.
- Locations beyond the limits are added to support autoscaling.
+ Find nice tick locations with no more than *nbins* + 1 being within the
+ view limits. Locations beyond the limits are added to support autoscaling.
"""
default_params = dict(nbins=10,
steps=None,
diff --git a/lib/matplotlib/transforms.py b/lib/matplotlib/transforms.py
index 1471d4fe672d..1f3a180855b3 100644
--- a/lib/matplotlib/transforms.py
+++ b/lib/matplotlib/transforms.py
@@ -925,7 +925,7 @@ def update_from_data_y(self, y, ignore=None):
- When ``False``, include the existing bounds of the `Bbox`.
- When ``None``, use the last value passed to :meth:`ignore`.
"""
- y = np.array(y).ravel()
+ y = np.ravel(y)
self.update_from_data_xy(np.column_stack([np.ones(y.size), y]),
ignore=ignore, updatex=False)
diff --git a/lib/matplotlib/tri/__init__.py b/lib/matplotlib/tri/__init__.py
index 4185452c0123..e000831d8a08 100644
--- a/lib/matplotlib/tri/__init__.py
+++ b/lib/matplotlib/tri/__init__.py
@@ -2,15 +2,15 @@
Unstructured triangular grid functions.
"""
-from .triangulation import Triangulation
-from .tricontour import TriContourSet, tricontour, tricontourf
-from .trifinder import TriFinder, TrapezoidMapTriFinder
-from .triinterpolate import (TriInterpolator, LinearTriInterpolator,
- CubicTriInterpolator)
-from .tripcolor import tripcolor
-from .triplot import triplot
-from .trirefine import TriRefiner, UniformTriRefiner
-from .tritools import TriAnalyzer
+from ._triangulation import Triangulation
+from ._tricontour import TriContourSet, tricontour, tricontourf
+from ._trifinder import TriFinder, TrapezoidMapTriFinder
+from ._triinterpolate import (TriInterpolator, LinearTriInterpolator,
+ CubicTriInterpolator)
+from ._tripcolor import tripcolor
+from ._triplot import triplot
+from ._trirefine import TriRefiner, UniformTriRefiner
+from ._tritools import TriAnalyzer
__all__ = ["Triangulation",
diff --git a/lib/matplotlib/tri/_triangulation.py b/lib/matplotlib/tri/_triangulation.py
new file mode 100644
index 000000000000..c123cca4c9e0
--- /dev/null
+++ b/lib/matplotlib/tri/_triangulation.py
@@ -0,0 +1,240 @@
+import numpy as np
+
+from matplotlib import _api
+
+
+class Triangulation:
+ """
+ An unstructured triangular grid consisting of npoints points and
+ ntri triangles. The triangles can either be specified by the user
+ or automatically generated using a Delaunay triangulation.
+
+ Parameters
+ ----------
+ x, y : (npoints,) array-like
+ Coordinates of grid points.
+ triangles : (ntri, 3) array-like of int, optional
+ For each triangle, the indices of the three points that make
+ up the triangle, ordered in an anticlockwise manner. If not
+ specified, the Delaunay triangulation is calculated.
+ mask : (ntri,) array-like of bool, optional
+ Which triangles are masked out.
+
+ Attributes
+ ----------
+ triangles : (ntri, 3) array of int
+ For each triangle, the indices of the three points that make
+ up the triangle, ordered in an anticlockwise manner. If you want to
+ take the *mask* into account, use `get_masked_triangles` instead.
+ mask : (ntri, 3) array of bool
+ Masked out triangles.
+ is_delaunay : bool
+ Whether the Triangulation is a calculated Delaunay
+ triangulation (where *triangles* was not specified) or not.
+
+ Notes
+ -----
+ For a Triangulation to be valid it must not have duplicate points,
+ triangles formed from colinear points, or overlapping triangles.
+ """
+ def __init__(self, x, y, triangles=None, mask=None):
+ from matplotlib import _qhull
+
+ self.x = np.asarray(x, dtype=np.float64)
+ self.y = np.asarray(y, dtype=np.float64)
+ if self.x.shape != self.y.shape or self.x.ndim != 1:
+ raise ValueError("x and y must be equal-length 1D arrays, but "
+ f"found shapes {self.x.shape!r} and "
+ f"{self.y.shape!r}")
+
+ self.mask = None
+ self._edges = None
+ self._neighbors = None
+ self.is_delaunay = False
+
+ if triangles is None:
+ # No triangulation specified, so use matplotlib._qhull to obtain
+ # Delaunay triangulation.
+ self.triangles, self._neighbors = _qhull.delaunay(x, y)
+ self.is_delaunay = True
+ else:
+ # Triangulation specified. Copy, since we may correct triangle
+ # orientation.
+ try:
+ self.triangles = np.array(triangles, dtype=np.int32, order='C')
+ except ValueError as e:
+ raise ValueError('triangles must be a (N, 3) int array, not '
+ f'{triangles!r}') from e
+ if self.triangles.ndim != 2 or self.triangles.shape[1] != 3:
+ raise ValueError(
+ 'triangles must be a (N, 3) int array, but found shape '
+ f'{self.triangles.shape!r}')
+ if self.triangles.max() >= len(self.x):
+ raise ValueError(
+ 'triangles are indices into the points and must be in the '
+ f'range 0 <= i < {len(self.x)} but found value '
+ f'{self.triangles.max()}')
+ if self.triangles.min() < 0:
+ raise ValueError(
+ 'triangles are indices into the points and must be in the '
+ f'range 0 <= i < {len(self.x)} but found value '
+ f'{self.triangles.min()}')
+
+ # Underlying C++ object is not created until first needed.
+ self._cpp_triangulation = None
+
+ # Default TriFinder not created until needed.
+ self._trifinder = None
+
+ self.set_mask(mask)
+
+ def calculate_plane_coefficients(self, z):
+ """
+ Calculate plane equation coefficients for all unmasked triangles from
+ the point (x, y) coordinates and specified z-array of shape (npoints).
+ The returned array has shape (npoints, 3) and allows z-value at (x, y)
+ position in triangle tri to be calculated using
+ ``z = array[tri, 0] * x + array[tri, 1] * y + array[tri, 2]``.
+ """
+ return self.get_cpp_triangulation().calculate_plane_coefficients(z)
+
+ @property
+ def edges(self):
+ """
+ Return integer array of shape (nedges, 2) containing all edges of
+ non-masked triangles.
+
+ Each row defines an edge by its start point index and end point
+ index. Each edge appears only once, i.e. for an edge between points
+ *i* and *j*, there will only be either *(i, j)* or *(j, i)*.
+ """
+ if self._edges is None:
+ self._edges = self.get_cpp_triangulation().get_edges()
+ return self._edges
+
+ def get_cpp_triangulation(self):
+ """
+ Return the underlying C++ Triangulation object, creating it
+ if necessary.
+ """
+ from matplotlib import _tri
+ if self._cpp_triangulation is None:
+ self._cpp_triangulation = _tri.Triangulation(
+ self.x, self.y, self.triangles, self.mask, self._edges,
+ self._neighbors, not self.is_delaunay)
+ return self._cpp_triangulation
+
+ def get_masked_triangles(self):
+ """
+ Return an array of triangles taking the mask into account.
+ """
+ if self.mask is not None:
+ return self.triangles[~self.mask]
+ else:
+ return self.triangles
+
+ @staticmethod
+ def get_from_args_and_kwargs(*args, **kwargs):
+ """
+ Return a Triangulation object from the args and kwargs, and
+ the remaining args and kwargs with the consumed values removed.
+
+ There are two alternatives: either the first argument is a
+ Triangulation object, in which case it is returned, or the args
+ and kwargs are sufficient to create a new Triangulation to
+ return. In the latter case, see Triangulation.__init__ for
+ the possible args and kwargs.
+ """
+ if isinstance(args[0], Triangulation):
+ triangulation, *args = args
+ if 'triangles' in kwargs:
+ _api.warn_external(
+ "Passing the keyword 'triangles' has no effect when also "
+ "passing a Triangulation")
+ if 'mask' in kwargs:
+ _api.warn_external(
+ "Passing the keyword 'mask' has no effect when also "
+ "passing a Triangulation")
+ else:
+ x, y, triangles, mask, args, kwargs = \
+ Triangulation._extract_triangulation_params(args, kwargs)
+ triangulation = Triangulation(x, y, triangles, mask)
+ return triangulation, args, kwargs
+
+ @staticmethod
+ def _extract_triangulation_params(args, kwargs):
+ x, y, *args = args
+ # Check triangles in kwargs then args.
+ triangles = kwargs.pop('triangles', None)
+ from_args = False
+ if triangles is None and args:
+ triangles = args[0]
+ from_args = True
+ if triangles is not None:
+ try:
+ triangles = np.asarray(triangles, dtype=np.int32)
+ except ValueError:
+ triangles = None
+ if triangles is not None and (triangles.ndim != 2 or
+ triangles.shape[1] != 3):
+ triangles = None
+ if triangles is not None and from_args:
+ args = args[1:] # Consumed first item in args.
+ # Check for mask in kwargs.
+ mask = kwargs.pop('mask', None)
+ return x, y, triangles, mask, args, kwargs
+
+ def get_trifinder(self):
+ """
+ Return the default `matplotlib.tri.TriFinder` of this
+ triangulation, creating it if necessary. This allows the same
+ TriFinder object to be easily shared.
+ """
+ if self._trifinder is None:
+ # Default TriFinder class.
+ from matplotlib.tri._trifinder import TrapezoidMapTriFinder
+ self._trifinder = TrapezoidMapTriFinder(self)
+ return self._trifinder
+
+ @property
+ def neighbors(self):
+ """
+ Return integer array of shape (ntri, 3) containing neighbor triangles.
+
+ For each triangle, the indices of the three triangles that
+ share the same edges, or -1 if there is no such neighboring
+ triangle. ``neighbors[i, j]`` is the triangle that is the neighbor
+ to the edge from point index ``triangles[i, j]`` to point index
+ ``triangles[i, (j+1)%3]``.
+ """
+ if self._neighbors is None:
+ self._neighbors = self.get_cpp_triangulation().get_neighbors()
+ return self._neighbors
+
+ def set_mask(self, mask):
+ """
+ Set or clear the mask array.
+
+ Parameters
+ ----------
+ mask : None or bool array of length ntri
+ """
+ if mask is None:
+ self.mask = None
+ else:
+ self.mask = np.asarray(mask, dtype=bool)
+ if self.mask.shape != (self.triangles.shape[0],):
+ raise ValueError('mask array must have same length as '
+ 'triangles array')
+
+ # Set mask in C++ Triangulation.
+ if self._cpp_triangulation is not None:
+ self._cpp_triangulation.set_mask(self.mask)
+
+ # Clear derived fields so they are recalculated when needed.
+ self._edges = None
+ self._neighbors = None
+
+ # Recalculate TriFinder if it exists.
+ if self._trifinder is not None:
+ self._trifinder._initialize()
diff --git a/lib/matplotlib/tri/_tricontour.py b/lib/matplotlib/tri/_tricontour.py
new file mode 100644
index 000000000000..7d128ce0a496
--- /dev/null
+++ b/lib/matplotlib/tri/_tricontour.py
@@ -0,0 +1,271 @@
+import numpy as np
+
+from matplotlib import _docstring
+from matplotlib.contour import ContourSet
+from matplotlib.tri._triangulation import Triangulation
+
+
+@_docstring.dedent_interpd
+class TriContourSet(ContourSet):
+ """
+ Create and store a set of contour lines or filled regions for
+ a triangular grid.
+
+ This class is typically not instantiated directly by the user but by
+ `~.Axes.tricontour` and `~.Axes.tricontourf`.
+
+ %(contour_set_attributes)s
+ """
+ def __init__(self, ax, *args, **kwargs):
+ """
+ Draw triangular grid contour lines or filled regions,
+ depending on whether keyword arg *filled* is False
+ (default) or True.
+
+ The first argument of the initializer must be an `~.axes.Axes`
+ object. The remaining arguments and keyword arguments
+ are described in the docstring of `~.Axes.tricontour`.
+ """
+ super().__init__(ax, *args, **kwargs)
+
+ def _process_args(self, *args, **kwargs):
+ """
+ Process args and kwargs.
+ """
+ if isinstance(args[0], TriContourSet):
+ C = args[0]._contour_generator
+ if self.levels is None:
+ self.levels = args[0].levels
+ self.zmin = args[0].zmin
+ self.zmax = args[0].zmax
+ self._mins = args[0]._mins
+ self._maxs = args[0]._maxs
+ else:
+ from matplotlib import _tri
+ tri, z = self._contour_args(args, kwargs)
+ C = _tri.TriContourGenerator(tri.get_cpp_triangulation(), z)
+ self._mins = [tri.x.min(), tri.y.min()]
+ self._maxs = [tri.x.max(), tri.y.max()]
+
+ self._contour_generator = C
+ return kwargs
+
+ def _contour_args(self, args, kwargs):
+ tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args,
+ **kwargs)
+ z = np.ma.asarray(args[0])
+ if z.shape != tri.x.shape:
+ raise ValueError('z array must have same length as triangulation x'
+ ' and y arrays')
+
+ # z values must be finite, only need to check points that are included
+ # in the triangulation.
+ z_check = z[np.unique(tri.get_masked_triangles())]
+ if np.ma.is_masked(z_check):
+ raise ValueError('z must not contain masked points within the '
+ 'triangulation')
+ if not np.isfinite(z_check).all():
+ raise ValueError('z array must not contain non-finite values '
+ 'within the triangulation')
+
+ z = np.ma.masked_invalid(z, copy=False)
+ self.zmax = float(z_check.max())
+ self.zmin = float(z_check.min())
+ if self.logscale and self.zmin <= 0:
+ func = 'contourf' if self.filled else 'contour'
+ raise ValueError(f'Cannot {func} log of negative values.')
+ self._process_contour_level_args(args[1:])
+ return (tri, z)
+
+
+_docstring.interpd.update(_tricontour_doc="""
+Draw contour %%(type)s on an unstructured triangular grid.
+
+Call signatures::
+
+ %%(func)s(triangulation, z, [levels], ...)
+ %%(func)s(x, y, z, [levels], *, [triangles=triangles], [mask=mask], ...)
+
+The triangular grid can be specified either by passing a `.Triangulation`
+object as the first parameter, or by passing the points *x*, *y* and
+optionally the *triangles* and a *mask*. See `.Triangulation` for an
+explanation of these parameters. If neither of *triangulation* or
+*triangles* are given, the triangulation is calculated on the fly.
+
+It is possible to pass *triangles* positionally, i.e.
+``%%(func)s(x, y, triangles, z, ...)``. However, this is discouraged. For more
+clarity, pass *triangles* via keyword argument.
+
+Parameters
+----------
+triangulation : `.Triangulation`, optional
+ An already created triangular grid.
+
+x, y, triangles, mask
+ Parameters defining the triangular grid. See `.Triangulation`.
+ This is mutually exclusive with specifying *triangulation*.
+
+z : array-like
+ The height values over which the contour is drawn. Color-mapping is
+ controlled by *cmap*, *norm*, *vmin*, and *vmax*.
+
+ .. note::
+ All values in *z* must be finite. Hence, nan and inf values must
+ either be removed or `~.Triangulation.set_mask` be used.
+
+levels : int or array-like, optional
+ Determines the number and positions of the contour lines / regions.
+
+ If an int *n*, use `~matplotlib.ticker.MaxNLocator`, which tries to
+ automatically choose no more than *n+1* "nice" contour levels between
+ between minimum and maximum numeric values of *Z*.
+
+ If array-like, draw contour lines at the specified levels. The values must
+ be in increasing order.
+
+Returns
+-------
+`~matplotlib.tri.TriContourSet`
+
+Other Parameters
+----------------
+colors : color string or sequence of colors, optional
+ The colors of the levels, i.e., the contour %%(type)s.
+
+ The sequence is cycled for the levels in ascending order. If the sequence
+ is shorter than the number of levels, it is repeated.
+
+ As a shortcut, single color strings may be used in place of one-element
+ lists, i.e. ``'red'`` instead of ``['red']`` to color all levels with the
+ same color. This shortcut does only work for color strings, not for other
+ ways of specifying colors.
+
+ By default (value *None*), the colormap specified by *cmap* will be used.
+
+alpha : float, default: 1
+ The alpha blending value, between 0 (transparent) and 1 (opaque).
+
+%(cmap_doc)s
+
+ This parameter is ignored if *colors* is set.
+
+%(norm_doc)s
+
+ This parameter is ignored if *colors* is set.
+
+%(vmin_vmax_doc)s
+
+ If *vmin* or *vmax* are not given, the default color scaling is based on
+ *levels*.
+
+ This parameter is ignored if *colors* is set.
+
+origin : {*None*, 'upper', 'lower', 'image'}, default: None
+ Determines the orientation and exact position of *z* by specifying the
+ position of ``z[0, 0]``. This is only relevant, if *X*, *Y* are not given.
+
+ - *None*: ``z[0, 0]`` is at X=0, Y=0 in the lower left corner.
+ - 'lower': ``z[0, 0]`` is at X=0.5, Y=0.5 in the lower left corner.
+ - 'upper': ``z[0, 0]`` is at X=N+0.5, Y=0.5 in the upper left corner.
+ - 'image': Use the value from :rc:`image.origin`.
+
+extent : (x0, x1, y0, y1), optional
+ If *origin* is not *None*, then *extent* is interpreted as in `.imshow`: it
+ gives the outer pixel boundaries. In this case, the position of z[0, 0] is
+ the center of the pixel, not a corner. If *origin* is *None*, then
+ (*x0*, *y0*) is the position of z[0, 0], and (*x1*, *y1*) is the position
+ of z[-1, -1].
+
+ This argument is ignored if *X* and *Y* are specified in the call to
+ contour.
+
+locator : ticker.Locator subclass, optional
+ The locator is used to determine the contour levels if they are not given
+ explicitly via *levels*.
+ Defaults to `~.ticker.MaxNLocator`.
+
+extend : {'neither', 'both', 'min', 'max'}, default: 'neither'
+ Determines the ``%%(func)s``-coloring of values that are outside the
+ *levels* range.
+
+ If 'neither', values outside the *levels* range are not colored. If 'min',
+ 'max' or 'both', color the values below, above or below and above the
+ *levels* range.
+
+ Values below ``min(levels)`` and above ``max(levels)`` are mapped to the
+ under/over values of the `.Colormap`. Note that most colormaps do not have
+ dedicated colors for these by default, so that the over and under values
+ are the edge values of the colormap. You may want to set these values
+ explicitly using `.Colormap.set_under` and `.Colormap.set_over`.
+
+ .. note::
+
+ An existing `.TriContourSet` does not get notified if properties of its
+ colormap are changed. Therefore, an explicit call to
+ `.ContourSet.changed()` is needed after modifying the colormap. The
+ explicit call can be left out, if a colorbar is assigned to the
+ `.TriContourSet` because it internally calls `.ContourSet.changed()`.
+
+xunits, yunits : registered units, optional
+ Override axis units by specifying an instance of a
+ :class:`matplotlib.units.ConversionInterface`.
+
+antialiased : bool, optional
+ Enable antialiasing, overriding the defaults. For
+ filled contours, the default is *True*. For line contours,
+ it is taken from :rc:`lines.antialiased`.""" % _docstring.interpd.params)
+
+
+@_docstring.Substitution(func='tricontour', type='lines')
+@_docstring.dedent_interpd
+def tricontour(ax, *args, **kwargs):
+ """
+ %(_tricontour_doc)s
+
+ linewidths : float or array-like, default: :rc:`contour.linewidth`
+ The line width of the contour lines.
+
+ If a number, all levels will be plotted with this linewidth.
+
+ If a sequence, the levels in ascending order will be plotted with
+ the linewidths in the order specified.
+
+ If None, this falls back to :rc:`lines.linewidth`.
+
+ linestyles : {*None*, 'solid', 'dashed', 'dashdot', 'dotted'}, optional
+ If *linestyles* is *None*, the default is 'solid' unless the lines are
+ monochrome. In that case, negative contours will take their linestyle
+ from :rc:`contour.negative_linestyle` setting.
+
+ *linestyles* can also be an iterable of the above strings specifying a
+ set of linestyles to be used. If this iterable is shorter than the
+ number of contour levels it will be repeated as necessary.
+ """
+ kwargs['filled'] = False
+ return TriContourSet(ax, *args, **kwargs)
+
+
+@_docstring.Substitution(func='tricontourf', type='regions')
+@_docstring.dedent_interpd
+def tricontourf(ax, *args, **kwargs):
+ """
+ %(_tricontour_doc)s
+
+ hatches : list[str], optional
+ A list of cross hatch patterns to use on the filled areas.
+ If None, no hatching will be added to the contour.
+ Hatching is supported in the PostScript, PDF, SVG and Agg
+ backends only.
+
+ Notes
+ -----
+ `.tricontourf` fills intervals that are closed at the top; that is, for
+ boundaries *z1* and *z2*, the filled region is::
+
+ z1 < Z <= z2
+
+ except for the lowest interval, which is closed on both sides (i.e. it
+ includes the lowest value).
+ """
+ kwargs['filled'] = True
+ return TriContourSet(ax, *args, **kwargs)
diff --git a/lib/matplotlib/tri/_trifinder.py b/lib/matplotlib/tri/_trifinder.py
new file mode 100644
index 000000000000..e06b84c0d974
--- /dev/null
+++ b/lib/matplotlib/tri/_trifinder.py
@@ -0,0 +1,93 @@
+import numpy as np
+
+from matplotlib import _api
+from matplotlib.tri import Triangulation
+
+
+class TriFinder:
+ """
+ Abstract base class for classes used to find the triangles of a
+ Triangulation in which (x, y) points lie.
+
+ Rather than instantiate an object of a class derived from TriFinder, it is
+ usually better to use the function `.Triangulation.get_trifinder`.
+
+ Derived classes implement __call__(x, y) where x and y are array-like point
+ coordinates of the same shape.
+ """
+
+ def __init__(self, triangulation):
+ _api.check_isinstance(Triangulation, triangulation=triangulation)
+ self._triangulation = triangulation
+
+
+class TrapezoidMapTriFinder(TriFinder):
+ """
+ `~matplotlib.tri.TriFinder` class implemented using the trapezoid
+ map algorithm from the book "Computational Geometry, Algorithms and
+ Applications", second edition, by M. de Berg, M. van Kreveld, M. Overmars
+ and O. Schwarzkopf.
+
+ The triangulation must be valid, i.e. it must not have duplicate points,
+ triangles formed from colinear points, or overlapping triangles. The
+ algorithm has some tolerance to triangles formed from colinear points, but
+ this should not be relied upon.
+ """
+
+ def __init__(self, triangulation):
+ from matplotlib import _tri
+ super().__init__(triangulation)
+ self._cpp_trifinder = _tri.TrapezoidMapTriFinder(
+ triangulation.get_cpp_triangulation())
+ self._initialize()
+
+ def __call__(self, x, y):
+ """
+ Return an array containing the indices of the triangles in which the
+ specified *x*, *y* points lie, or -1 for points that do not lie within
+ a triangle.
+
+ *x*, *y* are array-like x and y coordinates of the same shape and any
+ number of dimensions.
+
+ Returns integer array with the same shape and *x* and *y*.
+ """
+ x = np.asarray(x, dtype=np.float64)
+ y = np.asarray(y, dtype=np.float64)
+ if x.shape != y.shape:
+ raise ValueError("x and y must be array-like with the same shape")
+
+ # C++ does the heavy lifting, and expects 1D arrays.
+ indices = (self._cpp_trifinder.find_many(x.ravel(), y.ravel())
+ .reshape(x.shape))
+ return indices
+
+ def _get_tree_stats(self):
+ """
+ Return a python list containing the statistics about the node tree:
+ 0: number of nodes (tree size)
+ 1: number of unique nodes
+ 2: number of trapezoids (tree leaf nodes)
+ 3: number of unique trapezoids
+ 4: maximum parent count (max number of times a node is repeated in
+ tree)
+ 5: maximum depth of tree (one more than the maximum number of
+ comparisons needed to search through the tree)
+ 6: mean of all trapezoid depths (one more than the average number
+ of comparisons needed to search through the tree)
+ """
+ return self._cpp_trifinder.get_tree_stats()
+
+ def _initialize(self):
+ """
+ Initialize the underlying C++ object. Can be called multiple times if,
+ for example, the triangulation is modified.
+ """
+ self._cpp_trifinder.initialize()
+
+ def _print_tree(self):
+ """
+ Print a text representation of the node tree, which is useful for
+ debugging purposes.
+ """
+ self._cpp_trifinder.print_tree()
diff --git a/lib/matplotlib/tri/_triinterpolate.py b/lib/matplotlib/tri/_triinterpolate.py
new file mode 100644
index 000000000000..a4d294ad1719
--- /dev/null
+++ b/lib/matplotlib/tri/_triinterpolate.py
@@ -0,0 +1,1574 @@
+"""
+Interpolation inside triangular grids.
+"""
+
+import numpy as np
+
+from matplotlib import _api
+from matplotlib.tri import Triangulation
+from matplotlib.tri._trifinder import TriFinder
+from matplotlib.tri._tritools import TriAnalyzer
+
+__all__ = ('TriInterpolator', 'LinearTriInterpolator', 'CubicTriInterpolator')
+
+
+class TriInterpolator:
+ """
+ Abstract base class for classes used to interpolate on a triangular grid.
+
+ Derived classes implement the following methods:
+
+ - ``__call__(x, y)``,
+ where x, y are array-like point coordinates of the same shape, and
+ that returns a masked array of the same shape containing the
+ interpolated z-values.
+
+ - ``gradient(x, y)``,
+ where x, y are array-like point coordinates of the same
+ shape, and that returns a list of 2 masked arrays of the same shape
+ containing the 2 derivatives of the interpolator (derivatives of
+ interpolated z values with respect to x and y).
+ """
+
+ def __init__(self, triangulation, z, trifinder=None):
+ _api.check_isinstance(Triangulation, triangulation=triangulation)
+ self._triangulation = triangulation
+
+ self._z = np.asarray(z)
+ if self._z.shape != self._triangulation.x.shape:
+ raise ValueError("z array must have same length as triangulation x"
+ " and y arrays")
+
+ _api.check_isinstance((TriFinder, None), trifinder=trifinder)
+ self._trifinder = trifinder or self._triangulation.get_trifinder()
+
+ # Default scaling factors : 1.0 (= no scaling)
+ # Scaling may be used for interpolations for which the order of
+ # magnitude of x, y has an impact on the interpolant definition.
+ # Please refer to :meth:`_interpolate_multikeys` for details.
+ self._unit_x = 1.0
+ self._unit_y = 1.0
+
+ # Default triangle renumbering: None (= no renumbering)
+ # Renumbering may be used to avoid unnecessary computations
+ # if complex calculations are done inside the Interpolator.
+ # Please refer to :meth:`_interpolate_multikeys` for details.
+ self._tri_renum = None
+
+ # __call__ and gradient docstrings are shared by all subclasses
+ # (except, if needed, relevant additions).
+ # However these methods are only implemented in subclasses to avoid
+ # confusion in the documentation.
+ _docstring__call__ = """
+ Returns a masked array containing interpolated values at the specified
+ (x, y) points.
+
+ Parameters
+ ----------
+ x, y : array-like
+ x and y coordinates of the same shape and any number of
+ dimensions.
+
+ Returns
+ -------
+ np.ma.array
+ Masked array of the same shape as *x* and *y*; values corresponding
+ to (*x*, *y*) points outside of the triangulation are masked out.
+
+ """
+
+ _docstringgradient = r"""
+ Returns a list of 2 masked arrays containing interpolated derivatives
+ at the specified (x, y) points.
+
+ Parameters
+ ----------
+ x, y : array-like
+ x and y coordinates of the same shape and any number of
+ dimensions.
+
+ Returns
+ -------
+ dzdx, dzdy : np.ma.array
+ 2 masked arrays of the same shape as *x* and *y*; values
+ corresponding to (x, y) points outside of the triangulation
+ are masked out.
+ The first returned array contains the values of
+ :math:`\frac{\partial z}{\partial x}` and the second those of
+ :math:`\frac{\partial z}{\partial y}`.
+
+ """
+
+ def _interpolate_multikeys(self, x, y, tri_index=None,
+ return_keys=('z',)):
+ """
+ Versatile (private) method defined for all TriInterpolators.
+
+ :meth:`_interpolate_multikeys` is a wrapper around method
+ :meth:`_interpolate_single_key` (to be defined in the child
+ subclasses).
+ :meth:`_interpolate_single_key actually performs the interpolation,
+ but only for 1-dimensional inputs and at valid locations (inside
+ unmasked triangles of the triangulation).
+
+ The purpose of :meth:`_interpolate_multikeys` is to implement the
+ following common tasks needed in all subclasses implementations:
+
+ - calculation of containing triangles
+ - dealing with more than one interpolation request at the same
+ location (e.g., if the 2 derivatives are requested, it is
+ unnecessary to compute the containing triangles twice)
+ - scaling according to self._unit_x, self._unit_y
+ - dealing with points outside of the grid (with fill value np.nan)
+ - dealing with multi-dimensional *x*, *y* arrays: flattening for
+ :meth:`_interpolate_params` call and final reshaping.
+
+ (Note that np.vectorize could do most of those things very well for
+ you, but it does it by function evaluations over successive tuples of
+ the input arrays. Therefore, this tends to be more time consuming than
+ using optimized numpy functions - e.g., np.dot - which can be used
+ easily on the flattened inputs, in the child-subclass methods
+ :meth:`_interpolate_single_key`.)
+
+ It is guaranteed that the calls to :meth:`_interpolate_single_key`
+ will be done with flattened (1-d) array-like input parameters *x*, *y*
+ and with flattened, valid `tri_index` arrays (no -1 index allowed).
+
+ Parameters
+ ----------
+ x, y : array-like
+ x and y coordinates where interpolated values are requested.
+ tri_index : array-like of int, optional
+ Array of the containing triangle indices, same shape as
+ *x* and *y*. Defaults to None. If None, these indices
+ will be computed by a TriFinder instance.
+ (Note: For point outside the grid, tri_index[ipt] shall be -1).
+ return_keys : tuple of keys from {'z', 'dzdx', 'dzdy'}
+ Defines the interpolation arrays to return, and in which order.
+
+ Returns
+ -------
+ list of arrays
+ Each array-like contains the expected interpolated values in the
+ order defined by *return_keys* parameter.
+ """
+ # Flattening and rescaling inputs arrays x, y
+ # (initial shape is stored for output)
+ x = np.asarray(x, dtype=np.float64)
+ y = np.asarray(y, dtype=np.float64)
+ sh_ret = x.shape
+ if x.shape != y.shape:
+ raise ValueError("x and y shall have same shapes."
+ " Given: {0} and {1}".format(x.shape, y.shape))
+ x = np.ravel(x)
+ y = np.ravel(y)
+ x_scaled = x/self._unit_x
+ y_scaled = y/self._unit_y
+ size_ret = np.size(x_scaled)
+
+ # Computes & ravels the element indexes, extract the valid ones.
+ if tri_index is None:
+ tri_index = self._trifinder(x, y)
+ else:
+ if tri_index.shape != sh_ret:
+ raise ValueError(
+ "tri_index array is provided and shall"
+ " have same shape as x and y. Given: "
+ "{0} and {1}".format(tri_index.shape, sh_ret))
+ tri_index = np.ravel(tri_index)
+
+ mask_in = (tri_index != -1)
+ if self._tri_renum is None:
+ valid_tri_index = tri_index[mask_in]
+ else:
+ valid_tri_index = self._tri_renum[tri_index[mask_in]]
+ valid_x = x_scaled[mask_in]
+ valid_y = y_scaled[mask_in]
+
+ ret = []
+ for return_key in return_keys:
+ # Find the return index associated with the key.
+ try:
+ return_index = {'z': 0, 'dzdx': 1, 'dzdy': 2}[return_key]
+ except KeyError as err:
+ raise ValueError("return_keys items shall take values in"
+ " {'z', 'dzdx', 'dzdy'}") from err
+
+ # Sets the scale factor for f & df components
+ scale = [1., 1./self._unit_x, 1./self._unit_y][return_index]
+
+ # Computes the interpolation
+ ret_loc = np.empty(size_ret, dtype=np.float64)
+ ret_loc[~mask_in] = np.nan
+ ret_loc[mask_in] = self._interpolate_single_key(
+ return_key, valid_tri_index, valid_x, valid_y) * scale
+ ret += [np.ma.masked_invalid(ret_loc.reshape(sh_ret), copy=False)]
+
+ return ret
+
+ def _interpolate_single_key(self, return_key, tri_index, x, y):
+ """
+ Interpolate at points belonging to the triangulation
+ (inside an unmasked triangles).
+
+ Parameters
+ ----------
+ return_key : {'z', 'dzdx', 'dzdy'}
+ The requested values (z or its derivatives).
+ tri_index : 1D int array
+ Valid triangle index (cannot be -1).
+ x, y : 1D arrays, same shape as `tri_index`
+ Valid locations where interpolation is requested.
+
+ Returns
+ -------
+ 1-d array
+ Returned array of the same size as *tri_index*
+ """
+ raise NotImplementedError("TriInterpolator subclasses" +
+ "should implement _interpolate_single_key!")
+
+
+class LinearTriInterpolator(TriInterpolator):
+ """
+ Linear interpolator on a triangular grid.
+
+ Each triangle is represented by a plane so that an interpolated value at
+ point (x, y) lies on the plane of the triangle containing (x, y).
+ Interpolated values are therefore continuous across the triangulation, but
+ their first derivatives are discontinuous at edges between triangles.
+
+ Parameters
+ ----------
+ triangulation : `~matplotlib.tri.Triangulation`
+ The triangulation to interpolate over.
+ z : (npoints,) array-like
+ Array of values, defined at grid points, to interpolate between.
+ trifinder : `~matplotlib.tri.TriFinder`, optional
+ If this is not specified, the Triangulation's default TriFinder will
+ be used by calling `.Triangulation.get_trifinder`.
+
+ Methods
+ -------
+ `__call__` (x, y) : Returns interpolated values at (x, y) points.
+ `gradient` (x, y) : Returns interpolated derivatives at (x, y) points.
+
+ """
+ def __init__(self, triangulation, z, trifinder=None):
+ super().__init__(triangulation, z, trifinder)
+
+ # Store plane coefficients for fast interpolation calculations.
+ self._plane_coefficients = \
+ self._triangulation.calculate_plane_coefficients(self._z)
+
+ def __call__(self, x, y):
+ return self._interpolate_multikeys(x, y, tri_index=None,
+ return_keys=('z',))[0]
+ __call__.__doc__ = TriInterpolator._docstring__call__
+
+ def gradient(self, x, y):
+ return self._interpolate_multikeys(x, y, tri_index=None,
+ return_keys=('dzdx', 'dzdy'))
+ gradient.__doc__ = TriInterpolator._docstringgradient
+
+ def _interpolate_single_key(self, return_key, tri_index, x, y):
+ _api.check_in_list(['z', 'dzdx', 'dzdy'], return_key=return_key)
+ if return_key == 'z':
+ return (self._plane_coefficients[tri_index, 0]*x +
+ self._plane_coefficients[tri_index, 1]*y +
+ self._plane_coefficients[tri_index, 2])
+ elif return_key == 'dzdx':
+ return self._plane_coefficients[tri_index, 0]
+ else: # 'dzdy'
+ return self._plane_coefficients[tri_index, 1]
+
+
+class CubicTriInterpolator(TriInterpolator):
+ r"""
+ Cubic interpolator on a triangular grid.
+
+ In one-dimension - on a segment - a cubic interpolating function is
+ defined by the values of the function and its derivative at both ends.
+ This is almost the same in 2D inside a triangle, except that the values
+ of the function and its 2 derivatives have to be defined at each triangle
+ node.
+
+ The CubicTriInterpolator takes the value of the function at each node -
+ provided by the user - and internally computes the value of the
+ derivatives, resulting in a smooth interpolation.
+ (As a special feature, the user can also impose the value of the
+ derivatives at each node, but this is not supposed to be the common
+ usage.)
+
+ Parameters
+ ----------
+ triangulation : `~matplotlib.tri.Triangulation`
+ The triangulation to interpolate over.
+ z : (npoints,) array-like
+ Array of values, defined at grid points, to interpolate between.
+ kind : {'min_E', 'geom', 'user'}, optional
+ Choice of the smoothing algorithm, in order to compute
+ the interpolant derivatives (defaults to 'min_E'):
+
+ - if 'min_E': (default) The derivatives at each node is computed
+ to minimize a bending energy.
+ - if 'geom': The derivatives at each node is computed as a
+ weighted average of relevant triangle normals. To be used for
+ speed optimization (large grids).
+ - if 'user': The user provides the argument *dz*, no computation
+ is hence needed.
+
+ trifinder : `~matplotlib.tri.TriFinder`, optional
+ If not specified, the Triangulation's default TriFinder will
+ be used by calling `.Triangulation.get_trifinder`.
+ dz : tuple of array-likes (dzdx, dzdy), optional
+ Used only if *kind* ='user'. In this case *dz* must be provided as
+ (dzdx, dzdy) where dzdx, dzdy are arrays of the same shape as *z* and
+ are the interpolant first derivatives at the *triangulation* points.
+
+ Methods
+ -------
+ `__call__` (x, y) : Returns interpolated values at (x, y) points.
+ `gradient` (x, y) : Returns interpolated derivatives at (x, y) points.
+
+ Notes
+ -----
+ This note is a bit technical and details how the cubic interpolation is
+ computed.
+
+ The interpolation is based on a Clough-Tocher subdivision scheme of
+ the *triangulation* mesh (to make it clearer, each triangle of the
+ grid will be divided in 3 child-triangles, and on each child triangle
+ the interpolated function is a cubic polynomial of the 2 coordinates).
+ This technique originates from FEM (Finite Element Method) analysis;
+ the element used is a reduced Hsieh-Clough-Tocher (HCT)
+ element. Its shape functions are described in [1]_.
+ The assembled function is guaranteed to be C1-smooth, i.e. it is
+ continuous and its first derivatives are also continuous (this
+ is easy to show inside the triangles but is also true when crossing the
+ edges).
+
+ In the default case (*kind* ='min_E'), the interpolant minimizes a
+ curvature energy on the functional space generated by the HCT element
+ shape functions - with imposed values but arbitrary derivatives at each
+ node. The minimized functional is the integral of the so-called total
+ curvature (implementation based on an algorithm from [2]_ - PCG sparse
+ solver):
+
+ .. math::
+
+ E(z) = \frac{1}{2} \int_{\Omega} \left(
+ \left( \frac{\partial^2{z}}{\partial{x}^2} \right)^2 +
+ \left( \frac{\partial^2{z}}{\partial{y}^2} \right)^2 +
+ 2\left( \frac{\partial^2{z}}{\partial{y}\partial{x}} \right)^2
+ \right) dx\,dy
+
+ If the case *kind* ='geom' is chosen by the user, a simple geometric
+ approximation is used (weighted average of the triangle normal
+ vectors), which could improve speed on very large grids.
+
+ References
+ ----------
+ .. [1] Michel Bernadou, Kamal Hassan, "Basis functions for general
+ Hsieh-Clough-Tocher triangles, complete or reduced.",
+ International Journal for Numerical Methods in Engineering,
+ 17(5):784 - 789. 2.01.
+ .. [2] C.T. Kelley, "Iterative Methods for Optimization".
+
+ """
+ def __init__(self, triangulation, z, kind='min_E', trifinder=None,
+ dz=None):
+ super().__init__(triangulation, z, trifinder)
+
+ # Loads the underlying c++ _triangulation.
+ # (During loading, reordering of triangulation._triangles may occur so
+ # that all final triangles are now anti-clockwise)
+ self._triangulation.get_cpp_triangulation()
+
+ # To build the stiffness matrix and avoid zero-energy spurious modes
+ # we will only store internally the valid (unmasked) triangles and
+ # the necessary (used) points coordinates.
+ # 2 renumbering tables need to be computed and stored:
+ # - a triangle renum table in order to translate the result from a
+ # TriFinder instance into the internal stored triangle number.
+ # - a node renum table to overwrite the self._z values into the new
+ # (used) node numbering.
+ tri_analyzer = TriAnalyzer(self._triangulation)
+ (compressed_triangles, compressed_x, compressed_y, tri_renum,
+ node_renum) = tri_analyzer._get_compressed_triangulation()
+ self._triangles = compressed_triangles
+ self._tri_renum = tri_renum
+ # Taking into account the node renumbering in self._z:
+ valid_node = (node_renum != -1)
+ self._z[node_renum[valid_node]] = self._z[valid_node]
+
+ # Computing scale factors
+ self._unit_x = np.ptp(compressed_x)
+ self._unit_y = np.ptp(compressed_y)
+ self._pts = np.column_stack([compressed_x / self._unit_x,
+ compressed_y / self._unit_y])
+ # Computing triangle points
+ self._tris_pts = self._pts[self._triangles]
+ # Computing eccentricities
+ self._eccs = self._compute_tri_eccentricities(self._tris_pts)
+ # Computing dof estimations for HCT triangle shape function
+ _api.check_in_list(['user', 'geom', 'min_E'], kind=kind)
+ self._dof = self._compute_dof(kind, dz=dz)
+ # Loading HCT element
+ self._ReferenceElement = _ReducedHCT_Element()
+
+ def __call__(self, x, y):
+ return self._interpolate_multikeys(x, y, tri_index=None,
+ return_keys=('z',))[0]
+ __call__.__doc__ = TriInterpolator._docstring__call__
+
+ def gradient(self, x, y):
+ return self._interpolate_multikeys(x, y, tri_index=None,
+ return_keys=('dzdx', 'dzdy'))
+ gradient.__doc__ = TriInterpolator._docstringgradient
+
+ def _interpolate_single_key(self, return_key, tri_index, x, y):
+ _api.check_in_list(['z', 'dzdx', 'dzdy'], return_key=return_key)
+ tris_pts = self._tris_pts[tri_index]
+ alpha = self._get_alpha_vec(x, y, tris_pts)
+ ecc = self._eccs[tri_index]
+ dof = np.expand_dims(self._dof[tri_index], axis=1)
+ if return_key == 'z':
+ return self._ReferenceElement.get_function_values(
+ alpha, ecc, dof)
+ else: # 'dzdx', 'dzdy'
+ J = self._get_jacobian(tris_pts)
+ dzdx = self._ReferenceElement.get_function_derivatives(
+ alpha, J, ecc, dof)
+ if return_key == 'dzdx':
+ return dzdx[:, 0, 0]
+ else:
+ return dzdx[:, 1, 0]
+
+ def _compute_dof(self, kind, dz=None):
+ """
+ Compute and return nodal dofs according to kind.
+
+ Parameters
+ ----------
+ kind : {'min_E', 'geom', 'user'}
+ Choice of the _DOF_estimator subclass to estimate the gradient.
+ dz : tuple of array-likes (dzdx, dzdy), optional
+ Used only if *kind*=user; in this case passed to the
+ :class:`_DOF_estimator_user`.
+
+ Returns
+ -------
+ array-like, shape (npts, 2)
+ Estimation of the gradient at triangulation nodes (stored as
+ degree of freedoms of reduced-HCT triangle elements).
+ """
+ if kind == 'user':
+ if dz is None:
+ raise ValueError("For a CubicTriInterpolator with "
+ "*kind*='user', a valid *dz* "
+ "argument is expected.")
+ TE = _DOF_estimator_user(self, dz=dz)
+ elif kind == 'geom':
+ TE = _DOF_estimator_geom(self)
+ else: # 'min_E', checked in __init__
+ TE = _DOF_estimator_min_E(self)
+ return TE.compute_dof_from_df()
+
+ @staticmethod
+ def _get_alpha_vec(x, y, tris_pts):
+ """
+ Fast (vectorized) function to compute barycentric coordinates alpha.
+
+ Parameters
+ ----------
+ x, y : array-like of dim 1 (shape (nx,))
+ Coordinates of the points whose points barycentric coordinates are
+ requested.
+ tris_pts : array like of dim 3 (shape: (nx, 3, 2))
+ Coordinates of the containing triangles apexes.
+
+ Returns
+ -------
+ array of dim 2 (shape (nx, 3))
+ Barycentric coordinates of the points inside the containing
+ triangles.
+ """
+ ndim = tris_pts.ndim-2
+
+ a = tris_pts[:, 1, :] - tris_pts[:, 0, :]
+ b = tris_pts[:, 2, :] - tris_pts[:, 0, :]
+ abT = np.stack([a, b], axis=-1)
+ ab = _transpose_vectorized(abT)
+ OM = np.stack([x, y], axis=1) - tris_pts[:, 0, :]
+
+ metric = ab @ abT
+ # Here we try to deal with the colinear cases.
+ # metric_inv is in this case set to the Moore-Penrose pseudo-inverse
+ # meaning that we will still return a set of valid barycentric
+ # coordinates.
+ metric_inv = _pseudo_inv22sym_vectorized(metric)
+ Covar = ab @ _transpose_vectorized(np.expand_dims(OM, ndim))
+ ksi = metric_inv @ Covar
+ alpha = _to_matrix_vectorized([
+ [1-ksi[:, 0, 0]-ksi[:, 1, 0]], [ksi[:, 0, 0]], [ksi[:, 1, 0]]])
+ return alpha
+
+ @staticmethod
+ def _get_jacobian(tris_pts):
+ """
+ Fast (vectorized) function to compute triangle jacobian matrix.
+
+ Parameters
+ ----------
+ tris_pts : array like of dim 3 (shape: (nx, 3, 2))
+ Coordinates of the containing triangles apexes.
+
+ Returns
+ -------
+ array of dim 3 (shape (nx, 2, 2))
+ Barycentric coordinates of the points inside the containing
+ triangles.
+ J[itri, :, :] is the jacobian matrix at apex 0 of the triangle
+ itri, so that the following (matrix) relationship holds:
+ [dz/dksi] = [J] x [dz/dx]
+ with x: global coordinates
+ ksi: element parametric coordinates in triangle first apex
+ local basis.
+ """
+ a = np.array(tris_pts[:, 1, :] - tris_pts[:, 0, :])
+ b = np.array(tris_pts[:, 2, :] - tris_pts[:, 0, :])
+ J = _to_matrix_vectorized([[a[:, 0], a[:, 1]],
+ [b[:, 0], b[:, 1]]])
+ return J
+
+ @staticmethod
+ def _compute_tri_eccentricities(tris_pts):
+ """
+ Compute triangle eccentricities.
+
+ Parameters
+ ----------
+ tris_pts : array like of dim 3 (shape: (nx, 3, 2))
+ Coordinates of the triangles apexes.
+
+ Returns
+ -------
+ array like of dim 2 (shape: (nx, 3))
+ The so-called eccentricity parameters [1] needed for HCT triangular
+ element.
+ """
+ a = np.expand_dims(tris_pts[:, 2, :] - tris_pts[:, 1, :], axis=2)
+ b = np.expand_dims(tris_pts[:, 0, :] - tris_pts[:, 2, :], axis=2)
+ c = np.expand_dims(tris_pts[:, 1, :] - tris_pts[:, 0, :], axis=2)
+ # Do not use np.squeeze, this is dangerous if only one triangle
+ # in the triangulation...
+ dot_a = (_transpose_vectorized(a) @ a)[:, 0, 0]
+ dot_b = (_transpose_vectorized(b) @ b)[:, 0, 0]
+ dot_c = (_transpose_vectorized(c) @ c)[:, 0, 0]
+ # Note that this line will raise a warning for dot_a, dot_b or dot_c
+ # zeros, but we choose not to support triangles with duplicate points.
+ return _to_matrix_vectorized([[(dot_c-dot_b) / dot_a],
+ [(dot_a-dot_c) / dot_b],
+ [(dot_b-dot_a) / dot_c]])
+
+
+# FEM element used for interpolation and for solving minimisation
+# problem (Reduced HCT element)
+class _ReducedHCT_Element:
+ """
+ Implementation of reduced HCT triangular element with explicit shape
+ functions.
+
+ Computes z, dz, d2z and the element stiffness matrix for bending energy:
+ E(f) = integral( (d2z/dx2 + d2z/dy2)**2 dA)
+
+ *** Reference for the shape functions: ***
+ [1] Basis functions for general Hsieh-Clough-Tocher _triangles, complete or
+ reduced.
+ Michel Bernadou, Kamal Hassan
+ International Journal for Numerical Methods in Engineering.
+ 17(5):784 - 789. 2.01
+
+ *** Element description: ***
+ 9 dofs: z and dz given at 3 apex
+ C1 (conform)
+
+ """
+ # 1) Loads matrices to generate shape functions as a function of
+ # triangle eccentricities - based on [1] p.11 '''
+ M = np.array([
+ [ 0.00, 0.00, 0.00, 4.50, 4.50, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [-0.25, 0.00, 0.00, 0.50, 1.25, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [-0.25, 0.00, 0.00, 1.25, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.50, 1.00, 0.00, -1.50, 0.00, 3.00, 3.00, 0.00, 0.00, 3.00],
+ [ 0.00, 0.00, 0.00, -0.25, 0.25, 0.00, 1.00, 0.00, 0.00, 0.50],
+ [ 0.25, 0.00, 0.00, -0.50, -0.25, 1.00, 0.00, 0.00, 0.00, 1.00],
+ [ 0.50, 0.00, 1.00, 0.00, -1.50, 0.00, 0.00, 3.00, 3.00, 3.00],
+ [ 0.25, 0.00, 0.00, -0.25, -0.50, 0.00, 0.00, 0.00, 1.00, 1.00],
+ [ 0.00, 0.00, 0.00, 0.25, -0.25, 0.00, 0.00, 1.00, 0.00, 0.50]])
+ M0 = np.array([
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [-1.00, 0.00, 0.00, 1.50, 1.50, 0.00, 0.00, 0.00, 0.00, -3.00],
+ [-0.50, 0.00, 0.00, 0.75, 0.75, 0.00, 0.00, 0.00, 0.00, -1.50],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 1.00, 0.00, 0.00, -1.50, -1.50, 0.00, 0.00, 0.00, 0.00, 3.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.50, 0.00, 0.00, -0.75, -0.75, 0.00, 0.00, 0.00, 0.00, 1.50]])
+ M1 = np.array([
+ [-0.50, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [-0.25, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.50, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.25, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])
+ M2 = np.array([
+ [ 0.50, 0.00, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.25, 0.00, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [-0.50, 0.00, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [-0.25, 0.00, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
+ [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])
+
+ # 2) Loads matrices to rotate components of gradient & Hessian
+ # vectors in the reference basis of triangle first apex (a0)
+ rotate_dV = np.array([[ 1., 0.], [ 0., 1.],
+ [ 0., 1.], [-1., -1.],
+ [-1., -1.], [ 1., 0.]])
+
+ rotate_d2V = np.array([[1., 0., 0.], [0., 1., 0.], [ 0., 0., 1.],
+ [0., 1., 0.], [1., 1., 1.], [ 0., -2., -1.],
+ [1., 1., 1.], [1., 0., 0.], [-2., 0., -1.]])
+
+ # 3) Loads Gauss points & weights on the 3 sub-_triangles for P2
+ # exact integral - 3 points on each subtriangles.
+ # NOTE: as the 2nd derivative is discontinuous , we really need those 9
+ # points!
+ n_gauss = 9
+ gauss_pts = np.array([[13./18., 4./18., 1./18.],
+ [ 4./18., 13./18., 1./18.],
+ [ 7./18., 7./18., 4./18.],
+ [ 1./18., 13./18., 4./18.],
+ [ 1./18., 4./18., 13./18.],
+ [ 4./18., 7./18., 7./18.],
+ [ 4./18., 1./18., 13./18.],
+ [13./18., 1./18., 4./18.],
+ [ 7./18., 4./18., 7./18.]], dtype=np.float64)
+ gauss_w = np.ones([9], dtype=np.float64) / 9.
+
+ # 4) Stiffness matrix for curvature energy
+ E = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 2.]])
+
+ # 5) Loads the matrix to compute DOF_rot from tri_J at apex 0
+ J0_to_J1 = np.array([[-1., 1.], [-1., 0.]])
+ J0_to_J2 = np.array([[ 0., -1.], [ 1., -1.]])
+
+ def get_function_values(self, alpha, ecc, dofs):
+ """
+ Parameters
+ ----------
+ alpha : is a (N x 3 x 1) array (array of column-matrices) of
+ barycentric coordinates,
+ ecc : is a (N x 3 x 1) array (array of column-matrices) of triangle
+ eccentricities,
+ dofs : is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
+ degrees of freedom.
+
+ Returns
+ -------
+ Returns the N-array of interpolated function values.
+ """
+ subtri = np.argmin(alpha, axis=1)[:, 0]
+ ksi = _roll_vectorized(alpha, -subtri, axis=0)
+ E = _roll_vectorized(ecc, -subtri, axis=0)
+ x = ksi[:, 0, 0]
+ y = ksi[:, 1, 0]
+ z = ksi[:, 2, 0]
+ x_sq = x*x
+ y_sq = y*y
+ z_sq = z*z
+ V = _to_matrix_vectorized([
+ [x_sq*x], [y_sq*y], [z_sq*z], [x_sq*z], [x_sq*y], [y_sq*x],
+ [y_sq*z], [z_sq*y], [z_sq*x], [x*y*z]])
+ prod = self.M @ V
+ prod += _scalar_vectorized(E[:, 0, 0], self.M0 @ V)
+ prod += _scalar_vectorized(E[:, 1, 0], self.M1 @ V)
+ prod += _scalar_vectorized(E[:, 2, 0], self.M2 @ V)
+ s = _roll_vectorized(prod, 3*subtri, axis=0)
+ return (dofs @ s)[:, 0, 0]
+
+ def get_function_derivatives(self, alpha, J, ecc, dofs):
+ """
+ Parameters
+ ----------
+ *alpha* is a (N x 3 x 1) array (array of column-matrices of
+ barycentric coordinates)
+ *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
+ triangle first apex)
+ *ecc* is a (N x 3 x 1) array (array of column-matrices of triangle
+ eccentricities)
+ *dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
+ degrees of freedom.
+
+ Returns
+ -------
+ Returns the values of interpolated function derivatives [dz/dx, dz/dy]
+ in global coordinates at locations alpha, as a column-matrices of
+ shape (N x 2 x 1).
+ """
+ subtri = np.argmin(alpha, axis=1)[:, 0]
+ ksi = _roll_vectorized(alpha, -subtri, axis=0)
+ E = _roll_vectorized(ecc, -subtri, axis=0)
+ x = ksi[:, 0, 0]
+ y = ksi[:, 1, 0]
+ z = ksi[:, 2, 0]
+ x_sq = x*x
+ y_sq = y*y
+ z_sq = z*z
+ dV = _to_matrix_vectorized([
+ [ -3.*x_sq, -3.*x_sq],
+ [ 3.*y_sq, 0.],
+ [ 0., 3.*z_sq],
+ [ -2.*x*z, -2.*x*z+x_sq],
+ [-2.*x*y+x_sq, -2.*x*y],
+ [ 2.*x*y-y_sq, -y_sq],
+ [ 2.*y*z, y_sq],
+ [ z_sq, 2.*y*z],
+ [ -z_sq, 2.*x*z-z_sq],
+ [ x*z-y*z, x*y-y*z]])
+ # Puts back dV in first apex basis
+ dV = dV @ _extract_submatrices(
+ self.rotate_dV, subtri, block_size=2, axis=0)
+
+ prod = self.M @ dV
+ prod += _scalar_vectorized(E[:, 0, 0], self.M0 @ dV)
+ prod += _scalar_vectorized(E[:, 1, 0], self.M1 @ dV)
+ prod += _scalar_vectorized(E[:, 2, 0], self.M2 @ dV)
+ dsdksi = _roll_vectorized(prod, 3*subtri, axis=0)
+ dfdksi = dofs @ dsdksi
+ # In global coordinates:
+ # Here we try to deal with the simplest colinear cases, returning a
+ # null matrix.
+ J_inv = _safe_inv22_vectorized(J)
+ dfdx = J_inv @ _transpose_vectorized(dfdksi)
+ return dfdx
+
+ def get_function_hessians(self, alpha, J, ecc, dofs):
+ """
+ Parameters
+ ----------
+ *alpha* is a (N x 3 x 1) array (array of column-matrices) of
+ barycentric coordinates
+ *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
+ triangle first apex)
+ *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
+ eccentricities
+ *dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
+ degrees of freedom.
+
+ Returns
+ -------
+ Returns the values of interpolated function 2nd-derivatives
+ [d2z/dx2, d2z/dy2, d2z/dxdy] in global coordinates at locations alpha,
+ as a column-matrices of shape (N x 3 x 1).
+ """
+ d2sdksi2 = self.get_d2Sidksij2(alpha, ecc)
+ d2fdksi2 = dofs @ d2sdksi2
+ H_rot = self.get_Hrot_from_J(J)
+ d2fdx2 = d2fdksi2 @ H_rot
+ return _transpose_vectorized(d2fdx2)
+
+ def get_d2Sidksij2(self, alpha, ecc):
+ """
+ Parameters
+ ----------
+ *alpha* is a (N x 3 x 1) array (array of column-matrices) of
+ barycentric coordinates
+ *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
+ eccentricities
+
+ Returns
+ -------
+ Returns the arrays d2sdksi2 (N x 3 x 1) Hessian of shape functions
+ expressed in covariant coordinates in first apex basis.
+ """
+ subtri = np.argmin(alpha, axis=1)[:, 0]
+ ksi = _roll_vectorized(alpha, -subtri, axis=0)
+ E = _roll_vectorized(ecc, -subtri, axis=0)
+ x = ksi[:, 0, 0]
+ y = ksi[:, 1, 0]
+ z = ksi[:, 2, 0]
+ d2V = _to_matrix_vectorized([
+ [ 6.*x, 6.*x, 6.*x],
+ [ 6.*y, 0., 0.],
+ [ 0., 6.*z, 0.],
+ [ 2.*z, 2.*z-4.*x, 2.*z-2.*x],
+ [2.*y-4.*x, 2.*y, 2.*y-2.*x],
+ [2.*x-4.*y, 0., -2.*y],
+ [ 2.*z, 0., 2.*y],
+ [ 0., 2.*y, 2.*z],
+ [ 0., 2.*x-4.*z, -2.*z],
+ [ -2.*z, -2.*y, x-y-z]])
+ # Puts back d2V in first apex basis
+ d2V = d2V @ _extract_submatrices(
+ self.rotate_d2V, subtri, block_size=3, axis=0)
+ prod = self.M @ d2V
+ prod += _scalar_vectorized(E[:, 0, 0], self.M0 @ d2V)
+ prod += _scalar_vectorized(E[:, 1, 0], self.M1 @ d2V)
+ prod += _scalar_vectorized(E[:, 2, 0], self.M2 @ d2V)
+ d2sdksi2 = _roll_vectorized(prod, 3*subtri, axis=0)
+ return d2sdksi2
+
+ def get_bending_matrices(self, J, ecc):
+ """
+ Parameters
+ ----------
+ *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
+ triangle first apex)
+ *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
+ eccentricities
+
+ Returns
+ -------
+ Returns the element K matrices for bending energy expressed in
+ GLOBAL nodal coordinates.
+ K_ij = integral [ (d2zi/dx2 + d2zi/dy2) * (d2zj/dx2 + d2zj/dy2) dA]
+ tri_J is needed to rotate dofs from local basis to global basis
+ """
+ n = np.size(ecc, 0)
+
+ # 1) matrix to rotate dofs in global coordinates
+ J1 = self.J0_to_J1 @ J
+ J2 = self.J0_to_J2 @ J
+ DOF_rot = np.zeros([n, 9, 9], dtype=np.float64)
+ DOF_rot[:, 0, 0] = 1
+ DOF_rot[:, 3, 3] = 1
+ DOF_rot[:, 6, 6] = 1
+ DOF_rot[:, 1:3, 1:3] = J
+ DOF_rot[:, 4:6, 4:6] = J1
+ DOF_rot[:, 7:9, 7:9] = J2
+
+ # 2) matrix to rotate Hessian in global coordinates.
+ H_rot, area = self.get_Hrot_from_J(J, return_area=True)
+
+ # 3) Computes stiffness matrix
+ # Gauss quadrature.
+ K = np.zeros([n, 9, 9], dtype=np.float64)
+ weights = self.gauss_w
+ pts = self.gauss_pts
+ for igauss in range(self.n_gauss):
+ alpha = np.tile(pts[igauss, :], n).reshape(n, 3)
+ alpha = np.expand_dims(alpha, 2)
+ weight = weights[igauss]
+ d2Skdksi2 = self.get_d2Sidksij2(alpha, ecc)
+ d2Skdx2 = d2Skdksi2 @ H_rot
+ K += weight * (d2Skdx2 @ self.E @ _transpose_vectorized(d2Skdx2))
+
+ # 4) With nodal (not elem) dofs
+ K = _transpose_vectorized(DOF_rot) @ K @ DOF_rot
+
+ # 5) Need the area to compute total element energy
+ return _scalar_vectorized(area, K)
+
+ def get_Hrot_from_J(self, J, return_area=False):
+ """
+ Parameters
+ ----------
+ *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
+ triangle first apex)
+
+ Returns
+ -------
+ Returns H_rot used to rotate Hessian from local basis of first apex,
+ to global coordinates.
+ if *return_area* is True, returns also the triangle area (0.5*det(J))
+ """
+ # Here we try to deal with the simplest colinear cases; a null
+ # energy and area is imposed.
+ J_inv = _safe_inv22_vectorized(J)
+ Ji00 = J_inv[:, 0, 0]
+ Ji11 = J_inv[:, 1, 1]
+ Ji10 = J_inv[:, 1, 0]
+ Ji01 = J_inv[:, 0, 1]
+ H_rot = _to_matrix_vectorized([
+ [Ji00*Ji00, Ji10*Ji10, Ji00*Ji10],
+ [Ji01*Ji01, Ji11*Ji11, Ji01*Ji11],
+ [2*Ji00*Ji01, 2*Ji11*Ji10, Ji00*Ji11+Ji10*Ji01]])
+ if not return_area:
+ return H_rot
+ else:
+ area = 0.5 * (J[:, 0, 0]*J[:, 1, 1] - J[:, 0, 1]*J[:, 1, 0])
+ return H_rot, area
+
+ def get_Kff_and_Ff(self, J, ecc, triangles, Uc):
+ """
+ Build K and F for the following elliptic formulation:
+ minimization of curvature energy with value of function at node
+ imposed and derivatives 'free'.
+
+ Build the global Kff matrix in cco format.
+ Build the full Ff vec Ff = - Kfc x Uc.
+
+ Parameters
+ ----------
+ *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
+ triangle first apex)
+ *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
+ eccentricities
+ *triangles* is a (N x 3) array of nodes indexes.
+ *Uc* is (N x 3) array of imposed displacements at nodes
+
+ Returns
+ -------
+ (Kff_rows, Kff_cols, Kff_vals) Kff matrix in coo format - Duplicate
+ (row, col) entries must be summed.
+ Ff: force vector - dim npts * 3
+ """
+ ntri = np.size(ecc, 0)
+ vec_range = np.arange(ntri, dtype=np.int32)
+ c_indices = np.full(ntri, -1, dtype=np.int32) # for unused dofs, -1
+ f_dof = [1, 2, 4, 5, 7, 8]
+ c_dof = [0, 3, 6]
+
+ # vals, rows and cols indices in global dof numbering
+ f_dof_indices = _to_matrix_vectorized([[
+ c_indices, triangles[:, 0]*2, triangles[:, 0]*2+1,
+ c_indices, triangles[:, 1]*2, triangles[:, 1]*2+1,
+ c_indices, triangles[:, 2]*2, triangles[:, 2]*2+1]])
+
+ expand_indices = np.ones([ntri, 9, 1], dtype=np.int32)
+ f_row_indices = _transpose_vectorized(expand_indices @ f_dof_indices)
+ f_col_indices = expand_indices @ f_dof_indices
+ K_elem = self.get_bending_matrices(J, ecc)
+
+ # Extracting sub-matrices
+ # Explanation & notations:
+ # * Subscript f denotes 'free' degrees of freedom (i.e. dz/dx, dz/dx)
+ # * Subscript c denotes 'condensated' (imposed) degrees of freedom
+ # (i.e. z at all nodes)
+ # * F = [Ff, Fc] is the force vector
+ # * U = [Uf, Uc] is the imposed dof vector
+ # [ Kff Kfc ]
+ # * K = [ ] is the laplacian stiffness matrix
+ # [ Kcf Kff ]
+ # * As F = K x U one gets straightforwardly: Ff = - Kfc x Uc
+
+ # Computing Kff stiffness matrix in sparse coo format
+ Kff_vals = np.ravel(K_elem[np.ix_(vec_range, f_dof, f_dof)])
+ Kff_rows = np.ravel(f_row_indices[np.ix_(vec_range, f_dof, f_dof)])
+ Kff_cols = np.ravel(f_col_indices[np.ix_(vec_range, f_dof, f_dof)])
+
+ # Computing Ff force vector in sparse coo format
+ Kfc_elem = K_elem[np.ix_(vec_range, f_dof, c_dof)]
+ Uc_elem = np.expand_dims(Uc, axis=2)
+ Ff_elem = -(Kfc_elem @ Uc_elem)[:, :, 0]
+ Ff_indices = f_dof_indices[np.ix_(vec_range, [0], f_dof)][:, 0, :]
+
+ # Extracting Ff force vector in dense format
+ # We have to sum duplicate indices - using bincount
+ Ff = np.bincount(np.ravel(Ff_indices), weights=np.ravel(Ff_elem))
+ return Kff_rows, Kff_cols, Kff_vals, Ff
+
+
+# :class:_DOF_estimator, _DOF_estimator_user, _DOF_estimator_geom,
+# _DOF_estimator_min_E
+# Private classes used to compute the degree of freedom of each triangular
+# element for the TriCubicInterpolator.
+class _DOF_estimator:
+ """
+ Abstract base class for classes used to estimate a function's first
+ derivatives, and deduce the dofs for a CubicTriInterpolator using a
+ reduced HCT element formulation.
+
+ Derived classes implement ``compute_df(self, **kwargs)``, returning
+ ``np.vstack([dfx, dfy]).T`` where ``dfx, dfy`` are the estimation of the 2
+ gradient coordinates.
+ """
+ def __init__(self, interpolator, **kwargs):
+ _api.check_isinstance(CubicTriInterpolator, interpolator=interpolator)
+ self._pts = interpolator._pts
+ self._tris_pts = interpolator._tris_pts
+ self.z = interpolator._z
+ self._triangles = interpolator._triangles
+ (self._unit_x, self._unit_y) = (interpolator._unit_x,
+ interpolator._unit_y)
+ self.dz = self.compute_dz(**kwargs)
+ self.compute_dof_from_df()
+
+ def compute_dz(self, **kwargs):
+ raise NotImplementedError
+
+ def compute_dof_from_df(self):
+ """
+ Compute reduced-HCT elements degrees of freedom, from the gradient.
+ """
+ J = CubicTriInterpolator._get_jacobian(self._tris_pts)
+ tri_z = self.z[self._triangles]
+ tri_dz = self.dz[self._triangles]
+ tri_dof = self.get_dof_vec(tri_z, tri_dz, J)
+ return tri_dof
+
+ @staticmethod
+ def get_dof_vec(tri_z, tri_dz, J):
+ """
+ Compute the dof vector of a triangle, from the value of f, df and
+ of the local Jacobian at each node.
+
+ Parameters
+ ----------
+ tri_z : shape (3,) array
+ f nodal values.
+ tri_dz : shape (3, 2) array
+ df/dx, df/dy nodal values.
+ J
+ Jacobian matrix in local basis of apex 0.
+
+ Returns
+ -------
+ dof : shape (9,) array
+ For each apex ``iapex``::
+
+ dof[iapex*3+0] = f(Ai)
+ dof[iapex*3+1] = df(Ai).(AiAi+)
+ dof[iapex*3+2] = df(Ai).(AiAi-)
+ """
+ npt = tri_z.shape[0]
+ dof = np.zeros([npt, 9], dtype=np.float64)
+ J1 = _ReducedHCT_Element.J0_to_J1 @ J
+ J2 = _ReducedHCT_Element.J0_to_J2 @ J
+
+ col0 = J @ np.expand_dims(tri_dz[:, 0, :], axis=2)
+ col1 = J1 @ np.expand_dims(tri_dz[:, 1, :], axis=2)
+ col2 = J2 @ np.expand_dims(tri_dz[:, 2, :], axis=2)
+
+ dfdksi = _to_matrix_vectorized([
+ [col0[:, 0, 0], col1[:, 0, 0], col2[:, 0, 0]],
+ [col0[:, 1, 0], col1[:, 1, 0], col2[:, 1, 0]]])
+ dof[:, 0:7:3] = tri_z
+ dof[:, 1:8:3] = dfdksi[:, 0]
+ dof[:, 2:9:3] = dfdksi[:, 1]
+ return dof
+
+
+class _DOF_estimator_user(_DOF_estimator):
+ """dz is imposed by user; accounts for scaling if any."""
+
+ def compute_dz(self, dz):
+ (dzdx, dzdy) = dz
+ dzdx = dzdx * self._unit_x
+ dzdy = dzdy * self._unit_y
+ return np.vstack([dzdx, dzdy]).T
+
+
+class _DOF_estimator_geom(_DOF_estimator):
+ """Fast 'geometric' approximation, recommended for large arrays."""
+
+ def compute_dz(self):
+ """
+ self.df is computed as weighted average of _triangles sharing a common
+ node. On each triangle itri f is first assumed linear (= ~f), which
+ allows to compute d~f[itri]
+ Then the following approximation of df nodal values is then proposed:
+ f[ipt] = SUM ( w[itri] x d~f[itri] , for itri sharing apex ipt)
+ The weighted coeff. w[itri] are proportional to the angle of the
+ triangle itri at apex ipt
+ """
+ el_geom_w = self.compute_geom_weights()
+ el_geom_grad = self.compute_geom_grads()
+
+ # Sum of weights coeffs
+ w_node_sum = np.bincount(np.ravel(self._triangles),
+ weights=np.ravel(el_geom_w))
+
+ # Sum of weighted df = (dfx, dfy)
+ dfx_el_w = np.empty_like(el_geom_w)
+ dfy_el_w = np.empty_like(el_geom_w)
+ for iapex in range(3):
+ dfx_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 0]
+ dfy_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 1]
+ dfx_node_sum = np.bincount(np.ravel(self._triangles),
+ weights=np.ravel(dfx_el_w))
+ dfy_node_sum = np.bincount(np.ravel(self._triangles),
+ weights=np.ravel(dfy_el_w))
+
+ # Estimation of df
+ dfx_estim = dfx_node_sum/w_node_sum
+ dfy_estim = dfy_node_sum/w_node_sum
+ return np.vstack([dfx_estim, dfy_estim]).T
+
+ def compute_geom_weights(self):
+ """
+ Build the (nelems, 3) weights coeffs of _triangles angles,
+ renormalized so that np.sum(weights, axis=1) == np.ones(nelems)
+ """
+ weights = np.zeros([np.size(self._triangles, 0), 3])
+ tris_pts = self._tris_pts
+ for ipt in range(3):
+ p0 = tris_pts[:, ipt % 3, :]
+ p1 = tris_pts[:, (ipt+1) % 3, :]
+ p2 = tris_pts[:, (ipt-1) % 3, :]
+ alpha1 = np.arctan2(p1[:, 1]-p0[:, 1], p1[:, 0]-p0[:, 0])
+ alpha2 = np.arctan2(p2[:, 1]-p0[:, 1], p2[:, 0]-p0[:, 0])
+ # In the below formula we could take modulo 2. but
+ # modulo 1. is safer regarding round-off errors (flat triangles).
+ angle = np.abs(((alpha2-alpha1) / np.pi) % 1)
+ # Weight proportional to angle up np.pi/2; null weight for
+ # degenerated cases 0 and np.pi (note that *angle* is normalized
+ # by np.pi).
+ weights[:, ipt] = 0.5 - np.abs(angle-0.5)
+ return weights
+
+ def compute_geom_grads(self):
+ """
+ Compute the (global) gradient component of f assumed linear (~f).
+ returns array df of shape (nelems, 2)
+ df[ielem].dM[ielem] = dz[ielem] i.e. df = dz x dM = dM.T^-1 x dz
+ """
+ tris_pts = self._tris_pts
+ tris_f = self.z[self._triangles]
+
+ dM1 = tris_pts[:, 1, :] - tris_pts[:, 0, :]
+ dM2 = tris_pts[:, 2, :] - tris_pts[:, 0, :]
+ dM = np.dstack([dM1, dM2])
+ # Here we try to deal with the simplest colinear cases: a null
+ # gradient is assumed in this case.
+ dM_inv = _safe_inv22_vectorized(dM)
+
+ dZ1 = tris_f[:, 1] - tris_f[:, 0]
+ dZ2 = tris_f[:, 2] - tris_f[:, 0]
+ dZ = np.vstack([dZ1, dZ2]).T
+ df = np.empty_like(dZ)
+
+ # With np.einsum: could be ej,eji -> ej
+ df[:, 0] = dZ[:, 0]*dM_inv[:, 0, 0] + dZ[:, 1]*dM_inv[:, 1, 0]
+ df[:, 1] = dZ[:, 0]*dM_inv[:, 0, 1] + dZ[:, 1]*dM_inv[:, 1, 1]
+ return df
+
+
+class _DOF_estimator_min_E(_DOF_estimator_geom):
+ """
+ The 'smoothest' approximation, df is computed through global minimization
+ of the bending energy:
+ E(f) = integral[(d2z/dx2 + d2z/dy2 + 2 d2z/dxdy)**2 dA]
+ """
+ def __init__(self, Interpolator):
+ self._eccs = Interpolator._eccs
+ super().__init__(Interpolator)
+
+ def compute_dz(self):
+ """
+ Elliptic solver for bending energy minimization.
+ Uses a dedicated 'toy' sparse Jacobi PCG solver.
+ """
+ # Initial guess for iterative PCG solver.
+ dz_init = super().compute_dz()
+ Uf0 = np.ravel(dz_init)
+
+ reference_element = _ReducedHCT_Element()
+ J = CubicTriInterpolator._get_jacobian(self._tris_pts)
+ eccs = self._eccs
+ triangles = self._triangles
+ Uc = self.z[self._triangles]
+
+ # Building stiffness matrix and force vector in coo format
+ Kff_rows, Kff_cols, Kff_vals, Ff = reference_element.get_Kff_and_Ff(
+ J, eccs, triangles, Uc)
+
+ # Building sparse matrix and solving minimization problem
+ # We could use scipy.sparse direct solver; however to avoid this
+ # external dependency an implementation of a simple PCG solver with
+ # a simple diagonal Jacobi preconditioner is implemented.
+ tol = 1.e-10
+ n_dof = Ff.shape[0]
+ Kff_coo = _Sparse_Matrix_coo(Kff_vals, Kff_rows, Kff_cols,
+ shape=(n_dof, n_dof))
+ Kff_coo.compress_csc()
+ Uf, err = _cg(A=Kff_coo, b=Ff, x0=Uf0, tol=tol)
+ # If the PCG did not converge, we return the best guess between Uf0
+ # and Uf.
+ err0 = np.linalg.norm(Kff_coo.dot(Uf0) - Ff)
+ if err0 < err:
+ # Maybe a good occasion to raise a warning here ?
+ _api.warn_external("In TriCubicInterpolator initialization, "
+ "PCG sparse solver did not converge after "
+ "1000 iterations. `geom` approximation is "
+ "used instead of `min_E`")
+ Uf = Uf0
+
+ # Building dz from Uf
+ dz = np.empty([self._pts.shape[0], 2], dtype=np.float64)
+ dz[:, 0] = Uf[::2]
+ dz[:, 1] = Uf[1::2]
+ return dz
+
+
+# The following private :class:_Sparse_Matrix_coo and :func:_cg provide
+# a PCG sparse solver for (symmetric) elliptic problems.
+class _Sparse_Matrix_coo:
+ def __init__(self, vals, rows, cols, shape):
+ """
+ Create a sparse matrix in coo format.
+ *vals*: arrays of values of non-null entries of the matrix
+ *rows*: int arrays of rows of non-null entries of the matrix
+ *cols*: int arrays of cols of non-null entries of the matrix
+ *shape*: 2-tuple (n, m) of matrix shape
+ """
+ self.n, self.m = shape
+ self.vals = np.asarray(vals, dtype=np.float64)
+ self.rows = np.asarray(rows, dtype=np.int32)
+ self.cols = np.asarray(cols, dtype=np.int32)
+
+ def dot(self, V):
+ """
+ Dot product of self by a vector *V* in sparse-dense to dense format
+ *V* dense vector of shape (self.m,).
+ """
+ assert V.shape == (self.m,)
+ return np.bincount(self.rows,
+ weights=self.vals*V[self.cols],
+ minlength=self.m)
+
+ def compress_csc(self):
+ """
+ Compress rows, cols, vals / summing duplicates. Sort for csc format.
+ """
+ _, unique, indices = np.unique(
+ self.rows + self.n*self.cols,
+ return_index=True, return_inverse=True)
+ self.rows = self.rows[unique]
+ self.cols = self.cols[unique]
+ self.vals = np.bincount(indices, weights=self.vals)
+
+ def compress_csr(self):
+ """
+ Compress rows, cols, vals / summing duplicates. Sort for csr format.
+ """
+ _, unique, indices = np.unique(
+ self.m*self.rows + self.cols,
+ return_index=True, return_inverse=True)
+ self.rows = self.rows[unique]
+ self.cols = self.cols[unique]
+ self.vals = np.bincount(indices, weights=self.vals)
+
+ def to_dense(self):
+ """
+ Return a dense matrix representing self, mainly for debugging purposes.
+ """
+ ret = np.zeros([self.n, self.m], dtype=np.float64)
+ nvals = self.vals.size
+ for i in range(nvals):
+ ret[self.rows[i], self.cols[i]] += self.vals[i]
+ return ret
+
+ def __str__(self):
+ return self.to_dense().__str__()
+
+ @property
+ def diag(self):
+ """Return the (dense) vector of the diagonal elements."""
+ in_diag = (self.rows == self.cols)
+ diag = np.zeros(min(self.n, self.n), dtype=np.float64) # default 0.
+ diag[self.rows[in_diag]] = self.vals[in_diag]
+ return diag
+
+
+def _cg(A, b, x0=None, tol=1.e-10, maxiter=1000):
+ """
+ Use Preconditioned Conjugate Gradient iteration to solve A x = b
+ A simple Jacobi (diagonal) preconditioner is used.
+
+ Parameters
+ ----------
+ A : _Sparse_Matrix_coo
+ *A* must have been compressed before by compress_csc or
+ compress_csr method.
+ b : array
+ Right hand side of the linear system.
+ x0 : array, optional
+ Starting guess for the solution. Defaults to the zero vector.
+ tol : float, optional
+ Tolerance to achieve. The algorithm terminates when the relative
+ residual is below tol. Default is 1e-10.
+ maxiter : int, optional
+ Maximum number of iterations. Iteration will stop after *maxiter*
+ steps even if the specified tolerance has not been achieved. Defaults
+ to 1000.
+
+ Returns
+ -------
+ x : array
+ The converged solution.
+ err : float
+ The absolute error np.linalg.norm(A.dot(x) - b)
+ """
+ n = b.size
+ assert A.n == n
+ assert A.m == n
+ b_norm = np.linalg.norm(b)
+
+ # Jacobi pre-conditioner
+ kvec = A.diag
+ # For diag elem < 1e-6 we keep 1e-6.
+ kvec = np.maximum(kvec, 1e-6)
+
+ # Initial guess
+ if x0 is None:
+ x = np.zeros(n)
+ else:
+ x = x0
+
+ r = b - A.dot(x)
+ w = r/kvec
+
+ p = np.zeros(n)
+ beta = 0.0
+ rho = np.dot(r, w)
+ k = 0
+
+ # Following C. T. Kelley
+ while (np.sqrt(abs(rho)) > tol*b_norm) and (k < maxiter):
+ p = w + beta*p
+ z = A.dot(p)
+ alpha = rho/np.dot(p, z)
+ r = r - alpha*z
+ w = r/kvec
+ rhoold = rho
+ rho = np.dot(r, w)
+ x = x + alpha*p
+ beta = rho/rhoold
+ # err = np.linalg.norm(A.dot(x) - b) # absolute accuracy - not used
+ k += 1
+ err = np.linalg.norm(A.dot(x) - b)
+ return x, err
+
+
+# The following private functions:
+# :func:`_safe_inv22_vectorized`
+# :func:`_pseudo_inv22sym_vectorized`
+# :func:`_scalar_vectorized`
+# :func:`_transpose_vectorized`
+# :func:`_roll_vectorized`
+# :func:`_to_matrix_vectorized`
+# :func:`_extract_submatrices`
+# provide fast numpy implementation of some standard operations on arrays of
+# matrices - stored as (:, n_rows, n_cols)-shaped np.arrays.
+
+# Development note: Dealing with pathologic 'flat' triangles in the
+# CubicTriInterpolator code and impact on (2, 2)-matrix inversion functions
+# :func:`_safe_inv22_vectorized` and :func:`_pseudo_inv22sym_vectorized`.
+#
+# Goals:
+# 1) The CubicTriInterpolator should be able to handle flat or almost flat
+# triangles without raising an error,
+# 2) These degenerated triangles should have no impact on the automatic dof
+# calculation (associated with null weight for the _DOF_estimator_geom and
+# with null energy for the _DOF_estimator_min_E),
+# 3) Linear patch test should be passed exactly on degenerated meshes,
+# 4) Interpolation (with :meth:`_interpolate_single_key` or
+# :meth:`_interpolate_multi_key`) shall be correctly handled even *inside*
+# the pathologic triangles, to interact correctly with a TriRefiner class.
+#
+# Difficulties:
+# Flat triangles have rank-deficient *J* (so-called jacobian matrix) and
+# *metric* (the metric tensor = J x J.T). Computation of the local
+# tangent plane is also problematic.
+#
+# Implementation:
+# Most of the time, when computing the inverse of a rank-deficient matrix it
+# is safe to simply return the null matrix (which is the implementation in
+# :func:`_safe_inv22_vectorized`). This is because of point 2), itself
+# enforced by:
+# - null area hence null energy in :class:`_DOF_estimator_min_E`
+# - angles close or equal to 0 or np.pi hence null weight in
+# :class:`_DOF_estimator_geom`.
+# Note that the function angle -> weight is continuous and maximum for an
+# angle np.pi/2 (refer to :meth:`compute_geom_weights`)
+# The exception is the computation of barycentric coordinates, which is done
+# by inversion of the *metric* matrix. In this case, we need to compute a set
+# of valid coordinates (1 among numerous possibilities), to ensure point 4).
+# We benefit here from the symmetry of metric = J x J.T, which makes it easier
+# to compute a pseudo-inverse in :func:`_pseudo_inv22sym_vectorized`
+def _safe_inv22_vectorized(M):
+ """
+ Inversion of arrays of (2, 2) matrices, returns 0 for rank-deficient
+ matrices.
+
+ *M* : array of (2, 2) matrices to inverse, shape (n, 2, 2)
+ """
+ _api.check_shape((None, 2, 2), M=M)
+ M_inv = np.empty_like(M)
+ prod1 = M[:, 0, 0]*M[:, 1, 1]
+ delta = prod1 - M[:, 0, 1]*M[:, 1, 0]
+
+ # We set delta_inv to 0. in case of a rank deficient matrix; a
+ # rank-deficient input matrix *M* will lead to a null matrix in output
+ rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))
+ if np.all(rank2):
+ # Normal 'optimized' flow.
+ delta_inv = 1./delta
+ else:
+ # 'Pathologic' flow.
+ delta_inv = np.zeros(M.shape[0])
+ delta_inv[rank2] = 1./delta[rank2]
+
+ M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv
+ M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv
+ M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv
+ M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv
+ return M_inv
+
+
+def _pseudo_inv22sym_vectorized(M):
+ """
+ Inversion of arrays of (2, 2) SYMMETRIC matrices; returns the
+ (Moore-Penrose) pseudo-inverse for rank-deficient matrices.
+
+ In case M is of rank 1, we have M = trace(M) x P where P is the orthogonal
+ projection on Im(M), and we return trace(M)^-1 x P == M / trace(M)**2
+ In case M is of rank 0, we return the null matrix.
+
+ *M* : array of (2, 2) matrices to inverse, shape (n, 2, 2)
+ """
+ _api.check_shape((None, 2, 2), M=M)
+ M_inv = np.empty_like(M)
+ prod1 = M[:, 0, 0]*M[:, 1, 1]
+ delta = prod1 - M[:, 0, 1]*M[:, 1, 0]
+ rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))
+
+ if np.all(rank2):
+ # Normal 'optimized' flow.
+ M_inv[:, 0, 0] = M[:, 1, 1] / delta
+ M_inv[:, 0, 1] = -M[:, 0, 1] / delta
+ M_inv[:, 1, 0] = -M[:, 1, 0] / delta
+ M_inv[:, 1, 1] = M[:, 0, 0] / delta
+ else:
+ # 'Pathologic' flow.
+ # Here we have to deal with 2 sub-cases
+ # 1) First sub-case: matrices of rank 2:
+ delta = delta[rank2]
+ M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta
+ M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta
+ M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta
+ M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta
+ # 2) Second sub-case: rank-deficient matrices of rank 0 and 1:
+ rank01 = ~rank2
+ tr = M[rank01, 0, 0] + M[rank01, 1, 1]
+ tr_zeros = (np.abs(tr) < 1.e-8)
+ sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros)
+ # sq_tr_inv = 1. / tr**2
+ M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv
+ M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv
+ M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv
+ M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv
+
+ return M_inv
+
+
+def _scalar_vectorized(scalar, M):
+ """
+ Scalar product between scalars and matrices.
+ """
+ return scalar[:, np.newaxis, np.newaxis]*M
+
+
+def _transpose_vectorized(M):
+ """
+ Transposition of an array of matrices *M*.
+ """
+ return np.transpose(M, [0, 2, 1])
+
+
+def _roll_vectorized(M, roll_indices, axis):
+ """
+ Roll an array of matrices along *axis* (0: rows, 1: columns) according to
+ an array of indices *roll_indices*.
+ """
+ assert axis in [0, 1]
+ ndim = M.ndim
+ assert ndim == 3
+ ndim_roll = roll_indices.ndim
+ assert ndim_roll == 1
+ sh = M.shape
+ r, c = sh[-2:]
+ assert sh[0] == roll_indices.shape[0]
+ vec_indices = np.arange(sh[0], dtype=np.int32)
+
+ # Builds the rolled matrix
+ M_roll = np.empty_like(M)
+ if axis == 0:
+ for ir in range(r):
+ for ic in range(c):
+ M_roll[:, ir, ic] = M[vec_indices, (-roll_indices+ir) % r, ic]
+ else: # 1
+ for ir in range(r):
+ for ic in range(c):
+ M_roll[:, ir, ic] = M[vec_indices, ir, (-roll_indices+ic) % c]
+ return M_roll
+
+
+def _to_matrix_vectorized(M):
+ """
+ Build an array of matrices from individuals np.arrays of identical shapes.
+
+ Parameters
+ ----------
+ M
+ ncols-list of nrows-lists of shape sh.
+
+ Returns
+ -------
+ M_res : np.array of shape (sh, nrow, ncols)
+ *M_res* satisfies ``M_res[..., i, j] = M[i][j]``.
+ """
+ assert isinstance(M, (tuple, list))
+ assert all(isinstance(item, (tuple, list)) for item in M)
+ c_vec = np.asarray([len(item) for item in M])
+ assert np.all(c_vec-c_vec[0] == 0)
+ r = len(M)
+ c = c_vec[0]
+ M00 = np.asarray(M[0][0])
+ dt = M00.dtype
+ sh = [M00.shape[0], r, c]
+ M_ret = np.empty(sh, dtype=dt)
+ for irow in range(r):
+ for icol in range(c):
+ M_ret[:, irow, icol] = np.asarray(M[irow][icol])
+ return M_ret
+
+
+def _extract_submatrices(M, block_indices, block_size, axis):
+ """
+ Extract selected blocks of a matrices *M* depending on parameters
+ *block_indices* and *block_size*.
+
+ Returns the array of extracted matrices *Mres* so that ::
+
+ M_res[..., ir, :] = M[(block_indices*block_size+ir), :]
+ """
+ assert block_indices.ndim == 1
+ assert axis in [0, 1]
+
+ r, c = M.shape
+ if axis == 0:
+ sh = [block_indices.shape[0], block_size, c]
+ else: # 1
+ sh = [block_indices.shape[0], r, block_size]
+
+ dt = M.dtype
+ M_res = np.empty(sh, dtype=dt)
+ if axis == 0:
+ for ir in range(block_size):
+ M_res[:, ir, :] = M[(block_indices*block_size+ir), :]
+ else: # 1
+ for ic in range(block_size):
+ M_res[:, :, ic] = M[:, (block_indices*block_size+ic)]
+
+ return M_res
diff --git a/lib/matplotlib/tri/_tripcolor.py b/lib/matplotlib/tri/_tripcolor.py
new file mode 100644
index 000000000000..3c252cdbc31b
--- /dev/null
+++ b/lib/matplotlib/tri/_tripcolor.py
@@ -0,0 +1,154 @@
+import numpy as np
+
+from matplotlib import _api
+from matplotlib.collections import PolyCollection, TriMesh
+from matplotlib.colors import Normalize
+from matplotlib.tri._triangulation import Triangulation
+
+
+def tripcolor(ax, *args, alpha=1.0, norm=None, cmap=None, vmin=None,
+ vmax=None, shading='flat', facecolors=None, **kwargs):
+ """
+ Create a pseudocolor plot of an unstructured triangular grid.
+
+ Call signatures::
+
+ tripcolor(triangulation, c, *, ...)
+ tripcolor(x, y, c, *, [triangles=triangles], [mask=mask], ...)
+
+ The triangular grid can be specified either by passing a `.Triangulation`
+ object as the first parameter, or by passing the points *x*, *y* and
+ optionally the *triangles* and a *mask*. See `.Triangulation` for an
+ explanation of these parameters.
+
+ It is possible to pass the triangles positionally, i.e.
+ ``tripcolor(x, y, triangles, c, ...)``. However, this is discouraged.
+ For more clarity, pass *triangles* via keyword argument.
+
+ If neither of *triangulation* or *triangles* are given, the triangulation
+ is calculated on the fly. In this case, it does not make sense to provide
+ colors at the triangle faces via *c* or *facecolors* because there are
+ multiple possible triangulations for a group of points and you don't know
+ which triangles will be constructed.
+
+ Parameters
+ ----------
+ triangulation : `.Triangulation`
+ An already created triangular grid.
+ x, y, triangles, mask
+ Parameters defining the triangular grid. See `.Triangulation`.
+ This is mutually exclusive with specifying *triangulation*.
+ c : array-like
+ The color values, either for the points or for the triangles. Which one
+ is automatically inferred from the length of *c*, i.e. does it match
+ the number of points or the number of triangles. If there are the same
+ number of points and triangles in the triangulation it is assumed that
+ color values are defined at points; to force the use of color values at
+ triangles use the keyword argument ``facecolors=c`` instead of just
+ ``c``.
+ This parameter is position-only.
+ facecolors : array-like, optional
+ Can be used alternatively to *c* to specify colors at the triangle
+ faces. This parameter takes precedence over *c*.
+ shading : {'flat', 'gouraud'}, default: 'flat'
+ If 'flat' and the color values *c* are defined at points, the color
+ values used for each triangle are from the mean c of the triangle's
+ three points. If *shading* is 'gouraud' then color values must be
+ defined at points.
+ other_parameters
+ All other parameters are the same as for `~.Axes.pcolor`.
+ """
+ _api.check_in_list(['flat', 'gouraud'], shading=shading)
+
+ tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
+
+ # Parse the color to be in one of (the other variable will be None):
+ # - facecolors: if specified at the triangle faces
+ # - point_colors: if specified at the points
+ if facecolors is not None:
+ if args:
+ _api.warn_external(
+ "Positional parameter c has no effect when the keyword "
+ "facecolors is given")
+ point_colors = None
+ if len(facecolors) != len(tri.triangles):
+ raise ValueError("The length of facecolors must match the number "
+ "of triangles")
+ else:
+ # Color from positional parameter c
+ if not args:
+ raise TypeError(
+ "tripcolor() missing 1 required positional argument: 'c'; or "
+ "1 required keyword-only argument: 'facecolors'")
+ elif len(args) > 1:
+ _api.warn_deprecated(
+ "3.6", message=f"Additional positional parameters "
+ f"{args[1:]!r} are ignored; support for them is deprecated "
+ f"since %(since)s and will be removed %(removal)s")
+ c = np.asarray(args[0])
+ if len(c) == len(tri.x):
+ # having this before the len(tri.triangles) comparison gives
+ # precedence to nodes if there are as many nodes as triangles
+ point_colors = c
+ facecolors = None
+ elif len(c) == len(tri.triangles):
+ point_colors = None
+ facecolors = c
+ else:
+ raise ValueError('The length of c must match either the number '
+ 'of points or the number of triangles')
+
+ # Handling of linewidths, shading, edgecolors and antialiased as
+ # in Axes.pcolor
+ linewidths = (0.25,)
+ if 'linewidth' in kwargs:
+ kwargs['linewidths'] = kwargs.pop('linewidth')
+ kwargs.setdefault('linewidths', linewidths)
+
+ edgecolors = 'none'
+ if 'edgecolor' in kwargs:
+ kwargs['edgecolors'] = kwargs.pop('edgecolor')
+ ec = kwargs.setdefault('edgecolors', edgecolors)
+
+ if 'antialiased' in kwargs:
+ kwargs['antialiaseds'] = kwargs.pop('antialiased')
+ if 'antialiaseds' not in kwargs and ec.lower() == "none":
+ kwargs['antialiaseds'] = False
+
+ _api.check_isinstance((Normalize, None), norm=norm)
+ if shading == 'gouraud':
+ if facecolors is not None:
+ raise ValueError(
+ "shading='gouraud' can only be used when the colors "
+ "are specified at the points, not at the faces.")
+ collection = TriMesh(tri, alpha=alpha, array=point_colors,
+ cmap=cmap, norm=norm, **kwargs)
+ else: # 'flat'
+ # Vertices of triangles.
+ maskedTris = tri.get_masked_triangles()
+ verts = np.stack((tri.x[maskedTris], tri.y[maskedTris]), axis=-1)
+
+ # Color values.
+ if facecolors is None:
+ # One color per triangle, the mean of the 3 vertex color values.
+ colors = point_colors[maskedTris].mean(axis=1)
+ elif tri.mask is not None:
+ # Remove color values of masked triangles.
+ colors = facecolors[~tri.mask]
+ else:
+ colors = facecolors
+ collection = PolyCollection(verts, alpha=alpha, array=colors,
+ cmap=cmap, norm=norm, **kwargs)
+
+ collection._scale_norm(norm, vmin, vmax)
+ ax.grid(False)
+
+ minx = tri.x.min()
+ maxx = tri.x.max()
+ miny = tri.y.min()
+ maxy = tri.y.max()
+ corners = (minx, miny), (maxx, maxy)
+ ax.update_datalim(corners)
+ ax.autoscale_view()
+ ax.add_collection(collection)
+ return collection
diff --git a/lib/matplotlib/tri/_triplot.py b/lib/matplotlib/tri/_triplot.py
new file mode 100644
index 000000000000..6168946b1531
--- /dev/null
+++ b/lib/matplotlib/tri/_triplot.py
@@ -0,0 +1,86 @@
+import numpy as np
+from matplotlib.tri._triangulation import Triangulation
+import matplotlib.cbook as cbook
+import matplotlib.lines as mlines
+
+
+def triplot(ax, *args, **kwargs):
+ """
+ Draw an unstructured triangular grid as lines and/or markers.
+
+ Call signatures::
+
+ triplot(triangulation, ...)
+ triplot(x, y, [triangles], *, [mask=mask], ...)
+
+ The triangular grid can be specified either by passing a `.Triangulation`
+ object as the first parameter, or by passing the points *x*, *y* and
+ optionally the *triangles* and a *mask*. If neither of *triangulation* or
+ *triangles* are given, the triangulation is calculated on the fly.
+
+ Parameters
+ ----------
+ triangulation : `.Triangulation`
+ An already created triangular grid.
+ x, y, triangles, mask
+ Parameters defining the triangular grid. See `.Triangulation`.
+ This is mutually exclusive with specifying *triangulation*.
+ other_parameters
+ All other args and kwargs are forwarded to `~.Axes.plot`.
+
+ Returns
+ -------
+ lines : `~matplotlib.lines.Line2D`
+ The drawn triangles edges.
+ markers : `~matplotlib.lines.Line2D`
+ The drawn marker nodes.
+ """
+ import matplotlib.axes
+
+ tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
+ x, y, edges = (tri.x, tri.y, tri.edges)
+
+ # Decode plot format string, e.g., 'ro-'
+ fmt = args[0] if args else ""
+ linestyle, marker, color = matplotlib.axes._base._process_plot_format(fmt)
+
+ # Insert plot format string into a copy of kwargs (kwargs values prevail).
+ kw = cbook.normalize_kwargs(kwargs, mlines.Line2D)
+ for key, val in zip(('linestyle', 'marker', 'color'),
+ (linestyle, marker, color)):
+ if val is not None:
+ kw.setdefault(key, val)
+
+ # Draw lines without markers.
+ # Note 1: If we drew markers here, most markers would be drawn more than
+ # once as they belong to several edges.
+ # Note 2: We insert nan values in the flattened edges arrays rather than
+ # plotting directly (triang.x[edges].T, triang.y[edges].T)
+ # as it considerably speeds-up code execution.
+ linestyle = kw['linestyle']
+ kw_lines = {
+ **kw,
+ 'marker': 'None', # No marker to draw.
+ 'zorder': kw.get('zorder', 1), # Path default zorder is used.
+ }
+ if linestyle not in [None, 'None', '', ' ']:
+ tri_lines_x = np.insert(x[edges], 2, np.nan, axis=1)
+ tri_lines_y = np.insert(y[edges], 2, np.nan, axis=1)
+ tri_lines = ax.plot(tri_lines_x.ravel(), tri_lines_y.ravel(),
+ **kw_lines)
+ else:
+ tri_lines = ax.plot([], [], **kw_lines)
+
+ # Draw markers separately.
+ marker = kw['marker']
+ kw_markers = {
+ **kw,
+ 'linestyle': 'None', # No line to draw.
+ }
+ kw_markers.pop('label', None)
+ if marker not in [None, 'None', '', ' ']:
+ tri_markers = ax.plot(x, y, **kw_markers)
+ else:
+ tri_markers = ax.plot([], [], **kw_markers)
+
+ return tri_lines + tri_markers
diff --git a/lib/matplotlib/tri/_trirefine.py b/lib/matplotlib/tri/_trirefine.py
new file mode 100644
index 000000000000..80e41458d4fe
--- /dev/null
+++ b/lib/matplotlib/tri/_trirefine.py
@@ -0,0 +1,307 @@
+"""
+Mesh refinement for triangular grids.
+"""
+
+import numpy as np
+
+from matplotlib import _api
+from matplotlib.tri._triangulation import Triangulation
+import matplotlib.tri._triinterpolate
+
+
+class TriRefiner:
+ """
+ Abstract base class for classes implementing mesh refinement.
+
+ A TriRefiner encapsulates a Triangulation object and provides tools for
+ mesh refinement and interpolation.
+
+ Derived classes must implement:
+
+ - ``refine_triangulation(return_tri_index=False, **kwargs)`` , where
+ the optional keyword arguments *kwargs* are defined in each
+ TriRefiner concrete implementation, and which returns:
+
+ - a refined triangulation,
+ - optionally (depending on *return_tri_index*), for each
+ point of the refined triangulation: the index of
+ the initial triangulation triangle to which it belongs.
+
+ - ``refine_field(z, triinterpolator=None, **kwargs)``, where:
+
+ - *z* array of field values (to refine) defined at the base
+ triangulation nodes,
+ - *triinterpolator* is an optional `~matplotlib.tri.TriInterpolator`,
+ - the other optional keyword arguments *kwargs* are defined in
+ each TriRefiner concrete implementation;
+
+ and which returns (as a tuple) a refined triangular mesh and the
+ interpolated values of the field at the refined triangulation nodes.
+ """
+
+ def __init__(self, triangulation):
+ _api.check_isinstance(Triangulation, triangulation=triangulation)
+ self._triangulation = triangulation
+
+
+class UniformTriRefiner(TriRefiner):
+ """
+ Uniform mesh refinement by recursive subdivisions.
+
+ Parameters
+ ----------
+ triangulation : `~matplotlib.tri.Triangulation`
+ The encapsulated triangulation (to be refined)
+ """
+# See Also
+# --------
+# :class:`~matplotlib.tri.CubicTriInterpolator` and
+# :class:`~matplotlib.tri.TriAnalyzer`.
+# """
+ def __init__(self, triangulation):
+ super().__init__(triangulation)
+
+ def refine_triangulation(self, return_tri_index=False, subdiv=3):
+ """
+ Compute an uniformly refined triangulation *refi_triangulation* of
+ the encapsulated :attr:`triangulation`.
+
+ This function refines the encapsulated triangulation by splitting each
+ father triangle into 4 child sub-triangles built on the edges midside
+ nodes, recursing *subdiv* times. In the end, each triangle is hence
+ divided into ``4**subdiv`` child triangles.
+
+ Parameters
+ ----------
+ return_tri_index : bool, default: False
+ Whether an index table indicating the father triangle index of each
+ point is returned.
+ subdiv : int, default: 3
+ Recursion level for the subdivision.
+ Each triangle is divided into ``4**subdiv`` child triangles;
+ hence, the default results in 64 refined subtriangles for each
+ triangle of the initial triangulation.
+
+ Returns
+ -------
+ refi_triangulation : `~matplotlib.tri.Triangulation`
+ The refined triangulation.
+ found_index : int array
+ Index of the initial triangulation containing triangle, for each
+ point of *refi_triangulation*.
+ Returned only if *return_tri_index* is set to True.
+ """
+ refi_triangulation = self._triangulation
+ ntri = refi_triangulation.triangles.shape[0]
+
+ # Computes the triangulation ancestors numbers in the reference
+ # triangulation.
+ ancestors = np.arange(ntri, dtype=np.int32)
+ for _ in range(subdiv):
+ refi_triangulation, ancestors = self._refine_triangulation_once(
+ refi_triangulation, ancestors)
+ refi_npts = refi_triangulation.x.shape[0]
+ refi_triangles = refi_triangulation.triangles
+
+ # Now we compute found_index table if needed
+ if return_tri_index:
+ # We have to initialize found_index with -1 because some nodes
+ # may very well belong to no triangle at all, e.g., in case of
+ # Delaunay Triangulation with DuplicatePointWarning.
+ found_index = np.full(refi_npts, -1, dtype=np.int32)
+ tri_mask = self._triangulation.mask
+ if tri_mask is None:
+ found_index[refi_triangles] = np.repeat(ancestors,
+ 3).reshape(-1, 3)
+ else:
+ # There is a subtlety here: we want to avoid whenever possible
+ # that refined points container is a masked triangle (which
+ # would result in artifacts in plots).
+ # So we impose the numbering from masked ancestors first,
+ # then overwrite it with unmasked ancestor numbers.
+ ancestor_mask = tri_mask[ancestors]
+ found_index[refi_triangles[ancestor_mask, :]
+ ] = np.repeat(ancestors[ancestor_mask],
+ 3).reshape(-1, 3)
+ found_index[refi_triangles[~ancestor_mask, :]
+ ] = np.repeat(ancestors[~ancestor_mask],
+ 3).reshape(-1, 3)
+ return refi_triangulation, found_index
+ else:
+ return refi_triangulation
+
+ def refine_field(self, z, triinterpolator=None, subdiv=3):
+ """
+ Refine a field defined on the encapsulated triangulation.
+
+ Parameters
+ ----------
+ z : (npoints,) array-like
+ Values of the field to refine, defined at the nodes of the
+ encapsulated triangulation. (``n_points`` is the number of points
+ in the initial triangulation)
+ triinterpolator : `~matplotlib.tri.TriInterpolator`, optional
+ Interpolator used for field interpolation. If not specified,
+ a `~matplotlib.tri.CubicTriInterpolator` will be used.
+ subdiv : int, default: 3
+ Recursion level for the subdivision.
+ Each triangle is divided into ``4**subdiv`` child triangles.
+
+ Returns
+ -------
+ refi_tri : `~matplotlib.tri.Triangulation`
+ The returned refined triangulation.
+ refi_z : 1D array of length: *refi_tri* node count.
+ The returned interpolated field (at *refi_tri* nodes).
+ """
+ if triinterpolator is None:
+ interp = matplotlib.tri.CubicTriInterpolator(
+ self._triangulation, z)
+ else:
+ _api.check_isinstance(matplotlib.tri.TriInterpolator,
+ triinterpolator=triinterpolator)
+ interp = triinterpolator
+
+ refi_tri, found_index = self.refine_triangulation(
+ subdiv=subdiv, return_tri_index=True)
+ refi_z = interp._interpolate_multikeys(
+ refi_tri.x, refi_tri.y, tri_index=found_index)[0]
+ return refi_tri, refi_z
+
+ @staticmethod
+ def _refine_triangulation_once(triangulation, ancestors=None):
+ """
+ Refine a `.Triangulation` by splitting each triangle into 4
+ child-masked_triangles built on the edges midside nodes.
+
+ Masked triangles, if present, are also split, but their children
+ returned masked.
+
+ If *ancestors* is not provided, returns only a new triangulation:
+ child_triangulation.
+
+ If the array-like key table *ancestor* is given, it shall be of shape
+ (ntri,) where ntri is the number of *triangulation* masked_triangles.
+ In this case, the function returns
+ (child_triangulation, child_ancestors)
+ child_ancestors is defined so that the 4 child masked_triangles share
+ the same index as their father: child_ancestors.shape = (4 * ntri,).
+ """
+
+ x = triangulation.x
+ y = triangulation.y
+
+ # According to tri.triangulation doc:
+ # neighbors[i, j] is the triangle that is the neighbor
+ # to the edge from point index masked_triangles[i, j] to point
+ # index masked_triangles[i, (j+1)%3].
+ neighbors = triangulation.neighbors
+ triangles = triangulation.triangles
+ npts = np.shape(x)[0]
+ ntri = np.shape(triangles)[0]
+ if ancestors is not None:
+ ancestors = np.asarray(ancestors)
+ if np.shape(ancestors) != (ntri,):
+ raise ValueError(
+ "Incompatible shapes provide for triangulation"
+ ".masked_triangles and ancestors: {0} and {1}".format(
+ np.shape(triangles), np.shape(ancestors)))
+
+ # Initiating tables refi_x and refi_y of the refined triangulation
+ # points
+ # hint: each apex is shared by 2 masked_triangles except the borders.
+ borders = np.sum(neighbors == -1)
+ added_pts = (3*ntri + borders) // 2
+ refi_npts = npts + added_pts
+ refi_x = np.zeros(refi_npts)
+ refi_y = np.zeros(refi_npts)
+
+ # First part of refi_x, refi_y is just the initial points
+ refi_x[:npts] = x
+ refi_y[:npts] = y
+
+ # Second part contains the edge midside nodes.
+ # Each edge belongs to 1 triangle (if border edge) or is shared by 2
+ # masked_triangles (interior edge).
+ # We first build 2 * ntri arrays of edge starting nodes (edge_elems,
+ # edge_apexes); we then extract only the masters to avoid overlaps.
+ # The so-called 'master' is the triangle with biggest index
+ # The 'slave' is the triangle with lower index
+ # (can be -1 if border edge)
+ # For slave and master we will identify the apex pointing to the edge
+ # start
+ edge_elems = np.tile(np.arange(ntri, dtype=np.int32), 3)
+ edge_apexes = np.repeat(np.arange(3, dtype=np.int32), ntri)
+ edge_neighbors = neighbors[edge_elems, edge_apexes]
+ mask_masters = (edge_elems > edge_neighbors)
+
+ # Identifying the "masters" and adding to refi_x, refi_y vec
+ masters = edge_elems[mask_masters]
+ apex_masters = edge_apexes[mask_masters]
+ x_add = (x[triangles[masters, apex_masters]] +
+ x[triangles[masters, (apex_masters+1) % 3]]) * 0.5
+ y_add = (y[triangles[masters, apex_masters]] +
+ y[triangles[masters, (apex_masters+1) % 3]]) * 0.5
+ refi_x[npts:] = x_add
+ refi_y[npts:] = y_add
+
+ # Building the new masked_triangles; each old masked_triangles hosts
+ # 4 new masked_triangles
+ # there are 6 pts to identify per 'old' triangle, 3 new_pt_corner and
+ # 3 new_pt_midside
+ new_pt_corner = triangles
+
+ # What is the index in refi_x, refi_y of point at middle of apex iapex
+ # of elem ielem ?
+ # If ielem is the apex master: simple count, given the way refi_x was
+ # built.
+ # If ielem is the apex slave: yet we do not know; but we will soon
+ # using the neighbors table.
+ new_pt_midside = np.empty([ntri, 3], dtype=np.int32)
+ cum_sum = npts
+ for imid in range(3):
+ mask_st_loc = (imid == apex_masters)
+ n_masters_loc = np.sum(mask_st_loc)
+ elem_masters_loc = masters[mask_st_loc]
+ new_pt_midside[:, imid][elem_masters_loc] = np.arange(
+ n_masters_loc, dtype=np.int32) + cum_sum
+ cum_sum += n_masters_loc
+
+ # Now dealing with slave elems.
+ # for each slave element we identify the master and then the inode
+ # once slave_masters is identified, slave_masters_apex is such that:
+ # neighbors[slaves_masters, slave_masters_apex] == slaves
+ mask_slaves = np.logical_not(mask_masters)
+ slaves = edge_elems[mask_slaves]
+ slaves_masters = edge_neighbors[mask_slaves]
+ diff_table = np.abs(neighbors[slaves_masters, :] -
+ np.outer(slaves, np.ones(3, dtype=np.int32)))
+ slave_masters_apex = np.argmin(diff_table, axis=1)
+ slaves_apex = edge_apexes[mask_slaves]
+ new_pt_midside[slaves, slaves_apex] = new_pt_midside[
+ slaves_masters, slave_masters_apex]
+
+ # Builds the 4 child masked_triangles
+ child_triangles = np.empty([ntri*4, 3], dtype=np.int32)
+ child_triangles[0::4, :] = np.vstack([
+ new_pt_corner[:, 0], new_pt_midside[:, 0],
+ new_pt_midside[:, 2]]).T
+ child_triangles[1::4, :] = np.vstack([
+ new_pt_corner[:, 1], new_pt_midside[:, 1],
+ new_pt_midside[:, 0]]).T
+ child_triangles[2::4, :] = np.vstack([
+ new_pt_corner[:, 2], new_pt_midside[:, 2],
+ new_pt_midside[:, 1]]).T
+ child_triangles[3::4, :] = np.vstack([
+ new_pt_midside[:, 0], new_pt_midside[:, 1],
+ new_pt_midside[:, 2]]).T
+ child_triangulation = Triangulation(refi_x, refi_y, child_triangles)
+
+ # Builds the child mask
+ if triangulation.mask is not None:
+ child_triangulation.set_mask(np.repeat(triangulation.mask, 4))
+
+ if ancestors is None:
+ return child_triangulation
+ else:
+ return child_triangulation, np.repeat(ancestors, 4)
diff --git a/lib/matplotlib/tri/_tritools.py b/lib/matplotlib/tri/_tritools.py
new file mode 100644
index 000000000000..11b500fcdd8f
--- /dev/null
+++ b/lib/matplotlib/tri/_tritools.py
@@ -0,0 +1,263 @@
+"""
+Tools for triangular grids.
+"""
+
+import numpy as np
+
+from matplotlib import _api
+from matplotlib.tri import Triangulation
+
+
+class TriAnalyzer:
+ """
+ Define basic tools for triangular mesh analysis and improvement.
+
+ A TriAnalyzer encapsulates a `.Triangulation` object and provides basic
+ tools for mesh analysis and mesh improvement.
+
+ Attributes
+ ----------
+ scale_factors
+
+ Parameters
+ ----------
+ triangulation : `~matplotlib.tri.Triangulation`
+ The encapsulated triangulation to analyze.
+ """
+
+ def __init__(self, triangulation):
+ _api.check_isinstance(Triangulation, triangulation=triangulation)
+ self._triangulation = triangulation
+
+ @property
+ def scale_factors(self):
+ """
+ Factors to rescale the triangulation into a unit square.
+
+ Returns
+ -------
+ (float, float)
+ Scaling factors (kx, ky) so that the triangulation
+ ``[triangulation.x * kx, triangulation.y * ky]``
+ fits exactly inside a unit square.
+ """
+ compressed_triangles = self._triangulation.get_masked_triangles()
+ node_used = (np.bincount(np.ravel(compressed_triangles),
+ minlength=self._triangulation.x.size) != 0)
+ return (1 / np.ptp(self._triangulation.x[node_used]),
+ 1 / np.ptp(self._triangulation.y[node_used]))
+
+ def circle_ratios(self, rescale=True):
+ """
+ Return a measure of the triangulation triangles flatness.
+
+ The ratio of the incircle radius over the circumcircle radius is a
+ widely used indicator of a triangle flatness.
+ It is always ``<= 0.5`` and ``== 0.5`` only for equilateral
+ triangles. Circle ratios below 0.01 denote very flat triangles.
+
+ To avoid unduly low values due to a difference of scale between the 2
+ axis, the triangular mesh can first be rescaled to fit inside a unit
+ square with `scale_factors` (Only if *rescale* is True, which is
+ its default value).
+
+ Parameters
+ ----------
+ rescale : bool, default: True
+ If True, internally rescale (based on `scale_factors`), so that the
+ (unmasked) triangles fit exactly inside a unit square mesh.
+
+ Returns
+ -------
+ masked array
+ Ratio of the incircle radius over the circumcircle radius, for
+ each 'rescaled' triangle of the encapsulated triangulation.
+ Values corresponding to masked triangles are masked out.
+
+ """
+ # Coords rescaling
+ if rescale:
+ (kx, ky) = self.scale_factors
+ else:
+ (kx, ky) = (1.0, 1.0)
+ pts = np.vstack([self._triangulation.x*kx,
+ self._triangulation.y*ky]).T
+ tri_pts = pts[self._triangulation.triangles]
+ # Computes the 3 side lengths
+ a = tri_pts[:, 1, :] - tri_pts[:, 0, :]
+ b = tri_pts[:, 2, :] - tri_pts[:, 1, :]
+ c = tri_pts[:, 0, :] - tri_pts[:, 2, :]
+ a = np.hypot(a[:, 0], a[:, 1])
+ b = np.hypot(b[:, 0], b[:, 1])
+ c = np.hypot(c[:, 0], c[:, 1])
+ # circumcircle and incircle radii
+ s = (a+b+c)*0.5
+ prod = s*(a+b-s)*(a+c-s)*(b+c-s)
+ # We have to deal with flat triangles with infinite circum_radius
+ bool_flat = (prod == 0.)
+ if np.any(bool_flat):
+ # Pathologic flow
+ ntri = tri_pts.shape[0]
+ circum_radius = np.empty(ntri, dtype=np.float64)
+ circum_radius[bool_flat] = np.inf
+ abc = a*b*c
+ circum_radius[~bool_flat] = abc[~bool_flat] / (
+ 4.0*np.sqrt(prod[~bool_flat]))
+ else:
+ # Normal optimized flow
+ circum_radius = (a*b*c) / (4.0*np.sqrt(prod))
+ in_radius = (a*b*c) / (4.0*circum_radius*s)
+ circle_ratio = in_radius/circum_radius
+ mask = self._triangulation.mask
+ if mask is None:
+ return circle_ratio
+ else:
+ return np.ma.array(circle_ratio, mask=mask)
+
+ def get_flat_tri_mask(self, min_circle_ratio=0.01, rescale=True):
+ """
+ Eliminate excessively flat border triangles from the triangulation.
+
+ Returns a mask *new_mask* which allows to clean the encapsulated
+ triangulation from its border-located flat triangles
+ (according to their :meth:`circle_ratios`).
+ This mask is meant to be subsequently applied to the triangulation
+ using `.Triangulation.set_mask`.
+ *new_mask* is an extension of the initial triangulation mask
+ in the sense that an initially masked triangle will remain masked.
+
+ The *new_mask* array is computed recursively; at each step flat
+ triangles are removed only if they share a side with the current mesh
+ border. Thus no new holes in the triangulated domain will be created.
+
+ Parameters
+ ----------
+ min_circle_ratio : float, default: 0.01
+ Border triangles with incircle/circumcircle radii ratio r/R will
+ be removed if r/R < *min_circle_ratio*.
+ rescale : bool, default: True
+ If True, first, internally rescale (based on `scale_factors`) so
+ that the (unmasked) triangles fit exactly inside a unit square
+ mesh. This rescaling accounts for the difference of scale which
+ might exist between the 2 axis.
+
+ Returns
+ -------
+ array of bool
+ Mask to apply to encapsulated triangulation.
+ All the initially masked triangles remain masked in the
+ *new_mask*.
+
+ Notes
+ -----
+ The rationale behind this function is that a Delaunay
+ triangulation - of an unstructured set of points - sometimes contains
+ almost flat triangles at its border, leading to artifacts in plots
+ (especially for high-resolution contouring).
+ Masked with computed *new_mask*, the encapsulated
+ triangulation would contain no more unmasked border triangles
+ with a circle ratio below *min_circle_ratio*, thus improving the
+ mesh quality for subsequent plots or interpolation.
+ """
+ # Recursively computes the mask_current_borders, true if a triangle is
+ # at the border of the mesh OR touching the border through a chain of
+ # invalid aspect ratio masked_triangles.
+ ntri = self._triangulation.triangles.shape[0]
+ mask_bad_ratio = self.circle_ratios(rescale) < min_circle_ratio
+
+ current_mask = self._triangulation.mask
+ if current_mask is None:
+ current_mask = np.zeros(ntri, dtype=bool)
+ valid_neighbors = np.copy(self._triangulation.neighbors)
+ renum_neighbors = np.arange(ntri, dtype=np.int32)
+ nadd = -1
+ while nadd != 0:
+ # The active wavefront is the triangles from the border (unmasked
+ # but with a least 1 neighbor equal to -1
+ wavefront = (np.min(valid_neighbors, axis=1) == -1) & ~current_mask
+ # The element from the active wavefront will be masked if their
+ # circle ratio is bad.
+ added_mask = wavefront & mask_bad_ratio
+ current_mask = added_mask | current_mask
+ nadd = np.sum(added_mask)
+
+ # now we have to update the tables valid_neighbors
+ valid_neighbors[added_mask, :] = -1
+ renum_neighbors[added_mask] = -1
+ valid_neighbors = np.where(valid_neighbors == -1, -1,
+ renum_neighbors[valid_neighbors])
+
+ return np.ma.filled(current_mask, True)
+
+ def _get_compressed_triangulation(self):
+ """
+ Compress (if masked) the encapsulated triangulation.
+
+ Returns minimal-length triangles array (*compressed_triangles*) and
+ coordinates arrays (*compressed_x*, *compressed_y*) that can still
+ describe the unmasked triangles of the encapsulated triangulation.
+
+ Returns
+ -------
+ compressed_triangles : array-like
+ the returned compressed triangulation triangles
+ compressed_x : array-like
+ the returned compressed triangulation 1st coordinate
+ compressed_y : array-like
+ the returned compressed triangulation 2nd coordinate
+ tri_renum : int array
+ renumbering table to translate the triangle numbers from the
+ encapsulated triangulation into the new (compressed) renumbering.
+ -1 for masked triangles (deleted from *compressed_triangles*).
+ node_renum : int array
+ renumbering table to translate the point numbers from the
+ encapsulated triangulation into the new (compressed) renumbering.
+ -1 for unused points (i.e. those deleted from *compressed_x* and
+ *compressed_y*).
+
+ """
+ # Valid triangles and renumbering
+ tri_mask = self._triangulation.mask
+ compressed_triangles = self._triangulation.get_masked_triangles()
+ ntri = self._triangulation.triangles.shape[0]
+ if tri_mask is not None:
+ tri_renum = self._total_to_compress_renum(~tri_mask)
+ else:
+ tri_renum = np.arange(ntri, dtype=np.int32)
+
+ # Valid nodes and renumbering
+ valid_node = (np.bincount(np.ravel(compressed_triangles),
+ minlength=self._triangulation.x.size) != 0)
+ compressed_x = self._triangulation.x[valid_node]
+ compressed_y = self._triangulation.y[valid_node]
+ node_renum = self._total_to_compress_renum(valid_node)
+
+ # Now renumbering the valid triangles nodes
+ compressed_triangles = node_renum[compressed_triangles]
+
+ return (compressed_triangles, compressed_x, compressed_y, tri_renum,
+ node_renum)
+
+ @staticmethod
+ def _total_to_compress_renum(valid):
+ """
+ Parameters
+ ----------
+ valid : 1D bool array
+ Validity mask.
+
+ Returns
+ -------
+ int array
+ Array so that (`valid_array` being a compressed array
+ based on a `masked_array` with mask ~*valid*):
+
+ - For all i with valid[i] = True:
+ valid_array[renum[i]] = masked_array[i]
+ - For all i with valid[i] = False:
+ renum[i] = -1 (invalid value)
+ """
+ renum = np.full(np.size(valid), -1, dtype=np.int32)
+ n_valid = np.sum(valid)
+ renum[valid] = np.arange(n_valid, dtype=np.int32)
+ return renum
diff --git a/lib/matplotlib/tri/triangulation.py b/lib/matplotlib/tri/triangulation.py
index 00d8da4f4d28..c48b09b280ff 100644
--- a/lib/matplotlib/tri/triangulation.py
+++ b/lib/matplotlib/tri/triangulation.py
@@ -1,240 +1,9 @@
-import numpy as np
-
+from ._triangulation import * # noqa: F401, F403
from matplotlib import _api
-class Triangulation:
- """
- An unstructured triangular grid consisting of npoints points and
- ntri triangles. The triangles can either be specified by the user
- or automatically generated using a Delaunay triangulation.
-
- Parameters
- ----------
- x, y : (npoints,) array-like
- Coordinates of grid points.
- triangles : (ntri, 3) array-like of int, optional
- For each triangle, the indices of the three points that make
- up the triangle, ordered in an anticlockwise manner. If not
- specified, the Delaunay triangulation is calculated.
- mask : (ntri,) array-like of bool, optional
- Which triangles are masked out.
-
- Attributes
- ----------
- triangles : (ntri, 3) array of int
- For each triangle, the indices of the three points that make
- up the triangle, ordered in an anticlockwise manner. If you want to
- take the *mask* into account, use `get_masked_triangles` instead.
- mask : (ntri, 3) array of bool
- Masked out triangles.
- is_delaunay : bool
- Whether the Triangulation is a calculated Delaunay
- triangulation (where *triangles* was not specified) or not.
-
- Notes
- -----
- For a Triangulation to be valid it must not have duplicate points,
- triangles formed from colinear points, or overlapping triangles.
- """
- def __init__(self, x, y, triangles=None, mask=None):
- from matplotlib import _qhull
-
- self.x = np.asarray(x, dtype=np.float64)
- self.y = np.asarray(y, dtype=np.float64)
- if self.x.shape != self.y.shape or self.x.ndim != 1:
- raise ValueError("x and y must be equal-length 1D arrays, but "
- f"found shapes {self.x.shape!r} and "
- f"{self.y.shape!r}")
-
- self.mask = None
- self._edges = None
- self._neighbors = None
- self.is_delaunay = False
-
- if triangles is None:
- # No triangulation specified, so use matplotlib._qhull to obtain
- # Delaunay triangulation.
- self.triangles, self._neighbors = _qhull.delaunay(x, y)
- self.is_delaunay = True
- else:
- # Triangulation specified. Copy, since we may correct triangle
- # orientation.
- try:
- self.triangles = np.array(triangles, dtype=np.int32, order='C')
- except ValueError as e:
- raise ValueError('triangles must be a (N, 3) int array, not '
- f'{triangles!r}') from e
- if self.triangles.ndim != 2 or self.triangles.shape[1] != 3:
- raise ValueError(
- 'triangles must be a (N, 3) int array, but found shape '
- f'{self.triangles.shape!r}')
- if self.triangles.max() >= len(self.x):
- raise ValueError(
- 'triangles are indices into the points and must be in the '
- f'range 0 <= i < {len(self.x)} but found value '
- f'{self.triangles.max()}')
- if self.triangles.min() < 0:
- raise ValueError(
- 'triangles are indices into the points and must be in the '
- f'range 0 <= i < {len(self.x)} but found value '
- f'{self.triangles.min()}')
-
- # Underlying C++ object is not created until first needed.
- self._cpp_triangulation = None
-
- # Default TriFinder not created until needed.
- self._trifinder = None
-
- self.set_mask(mask)
-
- def calculate_plane_coefficients(self, z):
- """
- Calculate plane equation coefficients for all unmasked triangles from
- the point (x, y) coordinates and specified z-array of shape (npoints).
- The returned array has shape (npoints, 3) and allows z-value at (x, y)
- position in triangle tri to be calculated using
- ``z = array[tri, 0] * x + array[tri, 1] * y + array[tri, 2]``.
- """
- return self.get_cpp_triangulation().calculate_plane_coefficients(z)
-
- @property
- def edges(self):
- """
- Return integer array of shape (nedges, 2) containing all edges of
- non-masked triangles.
-
- Each row defines an edge by its start point index and end point
- index. Each edge appears only once, i.e. for an edge between points
- *i* and *j*, there will only be either *(i, j)* or *(j, i)*.
- """
- if self._edges is None:
- self._edges = self.get_cpp_triangulation().get_edges()
- return self._edges
-
- def get_cpp_triangulation(self):
- """
- Return the underlying C++ Triangulation object, creating it
- if necessary.
- """
- from matplotlib import _tri
- if self._cpp_triangulation is None:
- self._cpp_triangulation = _tri.Triangulation(
- self.x, self.y, self.triangles, self.mask, self._edges,
- self._neighbors, not self.is_delaunay)
- return self._cpp_triangulation
-
- def get_masked_triangles(self):
- """
- Return an array of triangles taking the mask into account.
- """
- if self.mask is not None:
- return self.triangles[~self.mask]
- else:
- return self.triangles
-
- @staticmethod
- def get_from_args_and_kwargs(*args, **kwargs):
- """
- Return a Triangulation object from the args and kwargs, and
- the remaining args and kwargs with the consumed values removed.
-
- There are two alternatives: either the first argument is a
- Triangulation object, in which case it is returned, or the args
- and kwargs are sufficient to create a new Triangulation to
- return. In the latter case, see Triangulation.__init__ for
- the possible args and kwargs.
- """
- if isinstance(args[0], Triangulation):
- triangulation, *args = args
- if 'triangles' in kwargs:
- _api.warn_external(
- "Passing the keyword 'triangles' has no effect when also "
- "passing a Triangulation")
- if 'mask' in kwargs:
- _api.warn_external(
- "Passing the keyword 'mask' has no effect when also "
- "passing a Triangulation")
- else:
- x, y, triangles, mask, args, kwargs = \
- Triangulation._extract_triangulation_params(args, kwargs)
- triangulation = Triangulation(x, y, triangles, mask)
- return triangulation, args, kwargs
-
- @staticmethod
- def _extract_triangulation_params(args, kwargs):
- x, y, *args = args
- # Check triangles in kwargs then args.
- triangles = kwargs.pop('triangles', None)
- from_args = False
- if triangles is None and args:
- triangles = args[0]
- from_args = True
- if triangles is not None:
- try:
- triangles = np.asarray(triangles, dtype=np.int32)
- except ValueError:
- triangles = None
- if triangles is not None and (triangles.ndim != 2 or
- triangles.shape[1] != 3):
- triangles = None
- if triangles is not None and from_args:
- args = args[1:] # Consumed first item in args.
- # Check for mask in kwargs.
- mask = kwargs.pop('mask', None)
- return x, y, triangles, mask, args, kwargs
-
- def get_trifinder(self):
- """
- Return the default `matplotlib.tri.TriFinder` of this
- triangulation, creating it if necessary. This allows the same
- TriFinder object to be easily shared.
- """
- if self._trifinder is None:
- # Default TriFinder class.
- from matplotlib.tri.trifinder import TrapezoidMapTriFinder
- self._trifinder = TrapezoidMapTriFinder(self)
- return self._trifinder
-
- @property
- def neighbors(self):
- """
- Return integer array of shape (ntri, 3) containing neighbor triangles.
-
- For each triangle, the indices of the three triangles that
- share the same edges, or -1 if there is no such neighboring
- triangle. ``neighbors[i, j]`` is the triangle that is the neighbor
- to the edge from point index ``triangles[i, j]`` to point index
- ``triangles[i, (j+1)%3]``.
- """
- if self._neighbors is None:
- self._neighbors = self.get_cpp_triangulation().get_neighbors()
- return self._neighbors
-
- def set_mask(self, mask):
- """
- Set or clear the mask array.
-
- Parameters
- ----------
- mask : None or bool array of length ntri
- """
- if mask is None:
- self.mask = None
- else:
- self.mask = np.asarray(mask, dtype=bool)
- if self.mask.shape != (self.triangles.shape[0],):
- raise ValueError('mask array must have same length as '
- 'triangles array')
-
- # Set mask in C++ Triangulation.
- if self._cpp_triangulation is not None:
- self._cpp_triangulation.set_mask(self.mask)
-
- # Clear derived fields so they are recalculated when needed.
- self._edges = None
- self._neighbors = None
-
- # Recalculate TriFinder if it exists.
- if self._trifinder is not None:
- self._trifinder._initialize()
+_api.warn_deprecated(
+ "3.7",
+ message=f"Importing {__name__} was deprecated in Matplotlib 3.7 and will "
+ f"be removed two minor releases later. All functionality is "
+ f"available via the top-level module matplotlib.tri")
diff --git a/lib/matplotlib/tri/tricontour.py b/lib/matplotlib/tri/tricontour.py
index df3b44d941ef..37406451d376 100644
--- a/lib/matplotlib/tri/tricontour.py
+++ b/lib/matplotlib/tri/tricontour.py
@@ -1,267 +1,9 @@
-import numpy as np
+from ._tricontour import * # noqa: F401, F403
+from matplotlib import _api
-from matplotlib import _docstring
-from matplotlib.contour import ContourSet
-from matplotlib.tri.triangulation import Triangulation
-
-@_docstring.dedent_interpd
-class TriContourSet(ContourSet):
- """
- Create and store a set of contour lines or filled regions for
- a triangular grid.
-
- This class is typically not instantiated directly by the user but by
- `~.Axes.tricontour` and `~.Axes.tricontourf`.
-
- %(contour_set_attributes)s
- """
- def __init__(self, ax, *args, **kwargs):
- """
- Draw triangular grid contour lines or filled regions,
- depending on whether keyword arg *filled* is False
- (default) or True.
-
- The first argument of the initializer must be an `~.axes.Axes`
- object. The remaining arguments and keyword arguments
- are described in the docstring of `~.Axes.tricontour`.
- """
- super().__init__(ax, *args, **kwargs)
-
- def _process_args(self, *args, **kwargs):
- """
- Process args and kwargs.
- """
- if isinstance(args[0], TriContourSet):
- C = args[0]._contour_generator
- if self.levels is None:
- self.levels = args[0].levels
- self.zmin = args[0].zmin
- self.zmax = args[0].zmax
- self._mins = args[0]._mins
- self._maxs = args[0]._maxs
- else:
- from matplotlib import _tri
- tri, z = self._contour_args(args, kwargs)
- C = _tri.TriContourGenerator(tri.get_cpp_triangulation(), z)
- self._mins = [tri.x.min(), tri.y.min()]
- self._maxs = [tri.x.max(), tri.y.max()]
-
- self._contour_generator = C
- return kwargs
-
- def _contour_args(self, args, kwargs):
- tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args,
- **kwargs)
- z = np.ma.asarray(args[0])
- if z.shape != tri.x.shape:
- raise ValueError('z array must have same length as triangulation x'
- ' and y arrays')
-
- # z values must be finite, only need to check points that are included
- # in the triangulation.
- z_check = z[np.unique(tri.get_masked_triangles())]
- if np.ma.is_masked(z_check):
- raise ValueError('z must not contain masked points within the '
- 'triangulation')
- if not np.isfinite(z_check).all():
- raise ValueError('z array must not contain non-finite values '
- 'within the triangulation')
-
- z = np.ma.masked_invalid(z, copy=False)
- self.zmax = float(z_check.max())
- self.zmin = float(z_check.min())
- if self.logscale and self.zmin <= 0:
- func = 'contourf' if self.filled else 'contour'
- raise ValueError(f'Cannot {func} log of negative values.')
- self._process_contour_level_args(args[1:])
- return (tri, z)
-
-
-_docstring.interpd.update(_tricontour_doc="""
-Draw contour %%(type)s on an unstructured triangular grid.
-
-Call signatures::
-
- %%(func)s(triangulation, Z, [levels], ...)
- %%(func)s(x, y, Z, [levels], *, [triangles=triangles], [mask=mask], ...)
-
-The triangular grid can be specified either by passing a `.Triangulation`
-object as the first parameter, or by passing the points *x*, *y* and
-optionally the *triangles* and a *mask*. See `.Triangulation` for an
-explanation of these parameters. If neither of *triangulation* or
-*triangles* are given, the triangulation is calculated on the fly.
-
-It is possible to pass *triangles* positionally, i.e.
-``%%(func)s(x, y, triangles, Z, ...)``. However, this is discouraged. For more
-clarity, pass *triangles* via keyword argument.
-
-Parameters
-----------
-triangulation : `.Triangulation`, optional
- An already created triangular grid.
-
-x, y, triangles, mask
- Parameters defining the triangular grid. See `.Triangulation`.
- This is mutually exclusive with specifying *triangulation*.
-
-Z : array-like
- The height values over which the contour is drawn. Color-mapping is
- controlled by *cmap*, *norm*, *vmin*, and *vmax*.
-
-levels : int or array-like, optional
- Determines the number and positions of the contour lines / regions.
-
- If an int *n*, use `~matplotlib.ticker.MaxNLocator`, which tries to
- automatically choose no more than *n+1* "nice" contour levels between
- *vmin* and *vmax*.
-
- If array-like, draw contour lines at the specified levels. The values must
- be in increasing order.
-
-Returns
--------
-`~matplotlib.tri.TriContourSet`
-
-Other Parameters
-----------------
-colors : color string or sequence of colors, optional
- The colors of the levels, i.e., the contour %%(type)s.
-
- The sequence is cycled for the levels in ascending order. If the sequence
- is shorter than the number of levels, it is repeated.
-
- As a shortcut, single color strings may be used in place of one-element
- lists, i.e. ``'red'`` instead of ``['red']`` to color all levels with the
- same color. This shortcut does only work for color strings, not for other
- ways of specifying colors.
-
- By default (value *None*), the colormap specified by *cmap* will be used.
-
-alpha : float, default: 1
- The alpha blending value, between 0 (transparent) and 1 (opaque).
-
-%(cmap_doc)s
-
- This parameter is ignored if *colors* is set.
-
-%(norm_doc)s
-
- This parameter is ignored if *colors* is set.
-
-%(vmin_vmax_doc)s
-
- If *vmin* or *vmax* are not given, the default color scaling is based on
- *levels*.
-
- This parameter is ignored if *colors* is set.
-
-origin : {*None*, 'upper', 'lower', 'image'}, default: None
- Determines the orientation and exact position of *Z* by specifying the
- position of ``Z[0, 0]``. This is only relevant, if *X*, *Y* are not given.
-
- - *None*: ``Z[0, 0]`` is at X=0, Y=0 in the lower left corner.
- - 'lower': ``Z[0, 0]`` is at X=0.5, Y=0.5 in the lower left corner.
- - 'upper': ``Z[0, 0]`` is at X=N+0.5, Y=0.5 in the upper left corner.
- - 'image': Use the value from :rc:`image.origin`.
-
-extent : (x0, x1, y0, y1), optional
- If *origin* is not *None*, then *extent* is interpreted as in `.imshow`: it
- gives the outer pixel boundaries. In this case, the position of Z[0, 0] is
- the center of the pixel, not a corner. If *origin* is *None*, then
- (*x0*, *y0*) is the position of Z[0, 0], and (*x1*, *y1*) is the position
- of Z[-1, -1].
-
- This argument is ignored if *X* and *Y* are specified in the call to
- contour.
-
-locator : ticker.Locator subclass, optional
- The locator is used to determine the contour levels if they are not given
- explicitly via *levels*.
- Defaults to `~.ticker.MaxNLocator`.
-
-extend : {'neither', 'both', 'min', 'max'}, default: 'neither'
- Determines the ``%%(func)s``-coloring of values that are outside the
- *levels* range.
-
- If 'neither', values outside the *levels* range are not colored. If 'min',
- 'max' or 'both', color the values below, above or below and above the
- *levels* range.
-
- Values below ``min(levels)`` and above ``max(levels)`` are mapped to the
- under/over values of the `.Colormap`. Note that most colormaps do not have
- dedicated colors for these by default, so that the over and under values
- are the edge values of the colormap. You may want to set these values
- explicitly using `.Colormap.set_under` and `.Colormap.set_over`.
-
- .. note::
-
- An existing `.TriContourSet` does not get notified if properties of its
- colormap are changed. Therefore, an explicit call to
- `.ContourSet.changed()` is needed after modifying the colormap. The
- explicit call can be left out, if a colorbar is assigned to the
- `.TriContourSet` because it internally calls `.ContourSet.changed()`.
-
-xunits, yunits : registered units, optional
- Override axis units by specifying an instance of a
- :class:`matplotlib.units.ConversionInterface`.
-
-antialiased : bool, optional
- Enable antialiasing, overriding the defaults. For
- filled contours, the default is *True*. For line contours,
- it is taken from :rc:`lines.antialiased`.""" % _docstring.interpd.params)
-
-
-@_docstring.Substitution(func='tricontour', type='lines')
-@_docstring.dedent_interpd
-def tricontour(ax, *args, **kwargs):
- """
- %(_tricontour_doc)s
-
- linewidths : float or array-like, default: :rc:`contour.linewidth`
- The line width of the contour lines.
-
- If a number, all levels will be plotted with this linewidth.
-
- If a sequence, the levels in ascending order will be plotted with
- the linewidths in the order specified.
-
- If None, this falls back to :rc:`lines.linewidth`.
-
- linestyles : {*None*, 'solid', 'dashed', 'dashdot', 'dotted'}, optional
- If *linestyles* is *None*, the default is 'solid' unless the lines are
- monochrome. In that case, negative contours will take their linestyle
- from :rc:`contour.negative_linestyle` setting.
-
- *linestyles* can also be an iterable of the above strings specifying a
- set of linestyles to be used. If this iterable is shorter than the
- number of contour levels it will be repeated as necessary.
- """
- kwargs['filled'] = False
- return TriContourSet(ax, *args, **kwargs)
-
-
-@_docstring.Substitution(func='tricontourf', type='regions')
-@_docstring.dedent_interpd
-def tricontourf(ax, *args, **kwargs):
- """
- %(_tricontour_doc)s
-
- hatches : list[str], optional
- A list of cross hatch patterns to use on the filled areas.
- If None, no hatching will be added to the contour.
- Hatching is supported in the PostScript, PDF, SVG and Agg
- backends only.
-
- Notes
- -----
- `.tricontourf` fills intervals that are closed at the top; that is, for
- boundaries *z1* and *z2*, the filled region is::
-
- z1 < Z <= z2
-
- except for the lowest interval, which is closed on both sides (i.e. it
- includes the lowest value).
- """
- kwargs['filled'] = True
- return TriContourSet(ax, *args, **kwargs)
+_api.warn_deprecated(
+ "3.7",
+ message=f"Importing {__name__} was deprecated in Matplotlib 3.7 and will "
+ f"be removed two minor releases later. All functionality is "
+ f"available via the top-level module matplotlib.tri")
diff --git a/lib/matplotlib/tri/trifinder.py b/lib/matplotlib/tri/trifinder.py
index e06b84c0d974..1aff5c9d3280 100644
--- a/lib/matplotlib/tri/trifinder.py
+++ b/lib/matplotlib/tri/trifinder.py
@@ -1,93 +1,9 @@
-import numpy as np
-
+from ._trifinder import * # noqa: F401, F403
from matplotlib import _api
-from matplotlib.tri import Triangulation
-
-
-class TriFinder:
- """
- Abstract base class for classes used to find the triangles of a
- Triangulation in which (x, y) points lie.
-
- Rather than instantiate an object of a class derived from TriFinder, it is
- usually better to use the function `.Triangulation.get_trifinder`.
-
- Derived classes implement __call__(x, y) where x and y are array-like point
- coordinates of the same shape.
- """
-
- def __init__(self, triangulation):
- _api.check_isinstance(Triangulation, triangulation=triangulation)
- self._triangulation = triangulation
-
-
-class TrapezoidMapTriFinder(TriFinder):
- """
- `~matplotlib.tri.TriFinder` class implemented using the trapezoid
- map algorithm from the book "Computational Geometry, Algorithms and
- Applications", second edition, by M. de Berg, M. van Kreveld, M. Overmars
- and O. Schwarzkopf.
-
- The triangulation must be valid, i.e. it must not have duplicate points,
- triangles formed from colinear points, or overlapping triangles. The
- algorithm has some tolerance to triangles formed from colinear points, but
- this should not be relied upon.
- """
-
- def __init__(self, triangulation):
- from matplotlib import _tri
- super().__init__(triangulation)
- self._cpp_trifinder = _tri.TrapezoidMapTriFinder(
- triangulation.get_cpp_triangulation())
- self._initialize()
-
- def __call__(self, x, y):
- """
- Return an array containing the indices of the triangles in which the
- specified *x*, *y* points lie, or -1 for points that do not lie within
- a triangle.
-
- *x*, *y* are array-like x and y coordinates of the same shape and any
- number of dimensions.
-
- Returns integer array with the same shape and *x* and *y*.
- """
- x = np.asarray(x, dtype=np.float64)
- y = np.asarray(y, dtype=np.float64)
- if x.shape != y.shape:
- raise ValueError("x and y must be array-like with the same shape")
-
- # C++ does the heavy lifting, and expects 1D arrays.
- indices = (self._cpp_trifinder.find_many(x.ravel(), y.ravel())
- .reshape(x.shape))
- return indices
-
- def _get_tree_stats(self):
- """
- Return a python list containing the statistics about the node tree:
- 0: number of nodes (tree size)
- 1: number of unique nodes
- 2: number of trapezoids (tree leaf nodes)
- 3: number of unique trapezoids
- 4: maximum parent count (max number of times a node is repeated in
- tree)
- 5: maximum depth of tree (one more than the maximum number of
- comparisons needed to search through the tree)
- 6: mean of all trapezoid depths (one more than the average number
- of comparisons needed to search through the tree)
- """
- return self._cpp_trifinder.get_tree_stats()
- def _initialize(self):
- """
- Initialize the underlying C++ object. Can be called multiple times if,
- for example, the triangulation is modified.
- """
- self._cpp_trifinder.initialize()
- def _print_tree(self):
- """
- Print a text representation of the node tree, which is useful for
- debugging purposes.
- """
- self._cpp_trifinder.print_tree()
+_api.warn_deprecated(
+ "3.7",
+ message=f"Importing {__name__} was deprecated in Matplotlib 3.7 and will "
+ f"be removed two minor releases later. All functionality is "
+ f"available via the top-level module matplotlib.tri")
diff --git a/lib/matplotlib/tri/triinterpolate.py b/lib/matplotlib/tri/triinterpolate.py
index 48643880e713..3112bd38e6c6 100644
--- a/lib/matplotlib/tri/triinterpolate.py
+++ b/lib/matplotlib/tri/triinterpolate.py
@@ -1,1574 +1,9 @@
-"""
-Interpolation inside triangular grids.
-"""
-
-import numpy as np
-
+from ._triinterpolate import * # noqa: F401, F403
from matplotlib import _api
-from matplotlib.tri import Triangulation
-from matplotlib.tri.trifinder import TriFinder
-from matplotlib.tri.tritools import TriAnalyzer
-
-__all__ = ('TriInterpolator', 'LinearTriInterpolator', 'CubicTriInterpolator')
-
-
-class TriInterpolator:
- """
- Abstract base class for classes used to interpolate on a triangular grid.
-
- Derived classes implement the following methods:
-
- - ``__call__(x, y)``,
- where x, y are array-like point coordinates of the same shape, and
- that returns a masked array of the same shape containing the
- interpolated z-values.
-
- - ``gradient(x, y)``,
- where x, y are array-like point coordinates of the same
- shape, and that returns a list of 2 masked arrays of the same shape
- containing the 2 derivatives of the interpolator (derivatives of
- interpolated z values with respect to x and y).
- """
-
- def __init__(self, triangulation, z, trifinder=None):
- _api.check_isinstance(Triangulation, triangulation=triangulation)
- self._triangulation = triangulation
-
- self._z = np.asarray(z)
- if self._z.shape != self._triangulation.x.shape:
- raise ValueError("z array must have same length as triangulation x"
- " and y arrays")
-
- _api.check_isinstance((TriFinder, None), trifinder=trifinder)
- self._trifinder = trifinder or self._triangulation.get_trifinder()
-
- # Default scaling factors : 1.0 (= no scaling)
- # Scaling may be used for interpolations for which the order of
- # magnitude of x, y has an impact on the interpolant definition.
- # Please refer to :meth:`_interpolate_multikeys` for details.
- self._unit_x = 1.0
- self._unit_y = 1.0
-
- # Default triangle renumbering: None (= no renumbering)
- # Renumbering may be used to avoid unnecessary computations
- # if complex calculations are done inside the Interpolator.
- # Please refer to :meth:`_interpolate_multikeys` for details.
- self._tri_renum = None
-
- # __call__ and gradient docstrings are shared by all subclasses
- # (except, if needed, relevant additions).
- # However these methods are only implemented in subclasses to avoid
- # confusion in the documentation.
- _docstring__call__ = """
- Returns a masked array containing interpolated values at the specified
- (x, y) points.
-
- Parameters
- ----------
- x, y : array-like
- x and y coordinates of the same shape and any number of
- dimensions.
-
- Returns
- -------
- np.ma.array
- Masked array of the same shape as *x* and *y*; values corresponding
- to (*x*, *y*) points outside of the triangulation are masked out.
-
- """
-
- _docstringgradient = r"""
- Returns a list of 2 masked arrays containing interpolated derivatives
- at the specified (x, y) points.
-
- Parameters
- ----------
- x, y : array-like
- x and y coordinates of the same shape and any number of
- dimensions.
-
- Returns
- -------
- dzdx, dzdy : np.ma.array
- 2 masked arrays of the same shape as *x* and *y*; values
- corresponding to (x, y) points outside of the triangulation
- are masked out.
- The first returned array contains the values of
- :math:`\frac{\partial z}{\partial x}` and the second those of
- :math:`\frac{\partial z}{\partial y}`.
-
- """
-
- def _interpolate_multikeys(self, x, y, tri_index=None,
- return_keys=('z',)):
- """
- Versatile (private) method defined for all TriInterpolators.
-
- :meth:`_interpolate_multikeys` is a wrapper around method
- :meth:`_interpolate_single_key` (to be defined in the child
- subclasses).
- :meth:`_interpolate_single_key actually performs the interpolation,
- but only for 1-dimensional inputs and at valid locations (inside
- unmasked triangles of the triangulation).
-
- The purpose of :meth:`_interpolate_multikeys` is to implement the
- following common tasks needed in all subclasses implementations:
-
- - calculation of containing triangles
- - dealing with more than one interpolation request at the same
- location (e.g., if the 2 derivatives are requested, it is
- unnecessary to compute the containing triangles twice)
- - scaling according to self._unit_x, self._unit_y
- - dealing with points outside of the grid (with fill value np.nan)
- - dealing with multi-dimensional *x*, *y* arrays: flattening for
- :meth:`_interpolate_params` call and final reshaping.
-
- (Note that np.vectorize could do most of those things very well for
- you, but it does it by function evaluations over successive tuples of
- the input arrays. Therefore, this tends to be more time consuming than
- using optimized numpy functions - e.g., np.dot - which can be used
- easily on the flattened inputs, in the child-subclass methods
- :meth:`_interpolate_single_key`.)
-
- It is guaranteed that the calls to :meth:`_interpolate_single_key`
- will be done with flattened (1-d) array-like input parameters *x*, *y*
- and with flattened, valid `tri_index` arrays (no -1 index allowed).
-
- Parameters
- ----------
- x, y : array-like
- x and y coordinates where interpolated values are requested.
- tri_index : array-like of int, optional
- Array of the containing triangle indices, same shape as
- *x* and *y*. Defaults to None. If None, these indices
- will be computed by a TriFinder instance.
- (Note: For point outside the grid, tri_index[ipt] shall be -1).
- return_keys : tuple of keys from {'z', 'dzdx', 'dzdy'}
- Defines the interpolation arrays to return, and in which order.
-
- Returns
- -------
- list of arrays
- Each array-like contains the expected interpolated values in the
- order defined by *return_keys* parameter.
- """
- # Flattening and rescaling inputs arrays x, y
- # (initial shape is stored for output)
- x = np.asarray(x, dtype=np.float64)
- y = np.asarray(y, dtype=np.float64)
- sh_ret = x.shape
- if x.shape != y.shape:
- raise ValueError("x and y shall have same shapes."
- " Given: {0} and {1}".format(x.shape, y.shape))
- x = np.ravel(x)
- y = np.ravel(y)
- x_scaled = x/self._unit_x
- y_scaled = y/self._unit_y
- size_ret = np.size(x_scaled)
-
- # Computes & ravels the element indexes, extract the valid ones.
- if tri_index is None:
- tri_index = self._trifinder(x, y)
- else:
- if tri_index.shape != sh_ret:
- raise ValueError(
- "tri_index array is provided and shall"
- " have same shape as x and y. Given: "
- "{0} and {1}".format(tri_index.shape, sh_ret))
- tri_index = np.ravel(tri_index)
-
- mask_in = (tri_index != -1)
- if self._tri_renum is None:
- valid_tri_index = tri_index[mask_in]
- else:
- valid_tri_index = self._tri_renum[tri_index[mask_in]]
- valid_x = x_scaled[mask_in]
- valid_y = y_scaled[mask_in]
-
- ret = []
- for return_key in return_keys:
- # Find the return index associated with the key.
- try:
- return_index = {'z': 0, 'dzdx': 1, 'dzdy': 2}[return_key]
- except KeyError as err:
- raise ValueError("return_keys items shall take values in"
- " {'z', 'dzdx', 'dzdy'}") from err
-
- # Sets the scale factor for f & df components
- scale = [1., 1./self._unit_x, 1./self._unit_y][return_index]
-
- # Computes the interpolation
- ret_loc = np.empty(size_ret, dtype=np.float64)
- ret_loc[~mask_in] = np.nan
- ret_loc[mask_in] = self._interpolate_single_key(
- return_key, valid_tri_index, valid_x, valid_y) * scale
- ret += [np.ma.masked_invalid(ret_loc.reshape(sh_ret), copy=False)]
-
- return ret
-
- def _interpolate_single_key(self, return_key, tri_index, x, y):
- """
- Interpolate at points belonging to the triangulation
- (inside an unmasked triangles).
-
- Parameters
- ----------
- return_key : {'z', 'dzdx', 'dzdy'}
- The requested values (z or its derivatives).
- tri_index : 1D int array
- Valid triangle index (cannot be -1).
- x, y : 1D arrays, same shape as `tri_index`
- Valid locations where interpolation is requested.
-
- Returns
- -------
- 1-d array
- Returned array of the same size as *tri_index*
- """
- raise NotImplementedError("TriInterpolator subclasses" +
- "should implement _interpolate_single_key!")
-
-
-class LinearTriInterpolator(TriInterpolator):
- """
- Linear interpolator on a triangular grid.
-
- Each triangle is represented by a plane so that an interpolated value at
- point (x, y) lies on the plane of the triangle containing (x, y).
- Interpolated values are therefore continuous across the triangulation, but
- their first derivatives are discontinuous at edges between triangles.
-
- Parameters
- ----------
- triangulation : `~matplotlib.tri.Triangulation`
- The triangulation to interpolate over.
- z : (npoints,) array-like
- Array of values, defined at grid points, to interpolate between.
- trifinder : `~matplotlib.tri.TriFinder`, optional
- If this is not specified, the Triangulation's default TriFinder will
- be used by calling `.Triangulation.get_trifinder`.
-
- Methods
- -------
- `__call__` (x, y) : Returns interpolated values at (x, y) points.
- `gradient` (x, y) : Returns interpolated derivatives at (x, y) points.
-
- """
- def __init__(self, triangulation, z, trifinder=None):
- super().__init__(triangulation, z, trifinder)
-
- # Store plane coefficients for fast interpolation calculations.
- self._plane_coefficients = \
- self._triangulation.calculate_plane_coefficients(self._z)
-
- def __call__(self, x, y):
- return self._interpolate_multikeys(x, y, tri_index=None,
- return_keys=('z',))[0]
- __call__.__doc__ = TriInterpolator._docstring__call__
-
- def gradient(self, x, y):
- return self._interpolate_multikeys(x, y, tri_index=None,
- return_keys=('dzdx', 'dzdy'))
- gradient.__doc__ = TriInterpolator._docstringgradient
-
- def _interpolate_single_key(self, return_key, tri_index, x, y):
- _api.check_in_list(['z', 'dzdx', 'dzdy'], return_key=return_key)
- if return_key == 'z':
- return (self._plane_coefficients[tri_index, 0]*x +
- self._plane_coefficients[tri_index, 1]*y +
- self._plane_coefficients[tri_index, 2])
- elif return_key == 'dzdx':
- return self._plane_coefficients[tri_index, 0]
- else: # 'dzdy'
- return self._plane_coefficients[tri_index, 1]
-
-
-class CubicTriInterpolator(TriInterpolator):
- r"""
- Cubic interpolator on a triangular grid.
-
- In one-dimension - on a segment - a cubic interpolating function is
- defined by the values of the function and its derivative at both ends.
- This is almost the same in 2D inside a triangle, except that the values
- of the function and its 2 derivatives have to be defined at each triangle
- node.
-
- The CubicTriInterpolator takes the value of the function at each node -
- provided by the user - and internally computes the value of the
- derivatives, resulting in a smooth interpolation.
- (As a special feature, the user can also impose the value of the
- derivatives at each node, but this is not supposed to be the common
- usage.)
-
- Parameters
- ----------
- triangulation : `~matplotlib.tri.Triangulation`
- The triangulation to interpolate over.
- z : (npoints,) array-like
- Array of values, defined at grid points, to interpolate between.
- kind : {'min_E', 'geom', 'user'}, optional
- Choice of the smoothing algorithm, in order to compute
- the interpolant derivatives (defaults to 'min_E'):
-
- - if 'min_E': (default) The derivatives at each node is computed
- to minimize a bending energy.
- - if 'geom': The derivatives at each node is computed as a
- weighted average of relevant triangle normals. To be used for
- speed optimization (large grids).
- - if 'user': The user provides the argument *dz*, no computation
- is hence needed.
-
- trifinder : `~matplotlib.tri.TriFinder`, optional
- If not specified, the Triangulation's default TriFinder will
- be used by calling `.Triangulation.get_trifinder`.
- dz : tuple of array-likes (dzdx, dzdy), optional
- Used only if *kind* ='user'. In this case *dz* must be provided as
- (dzdx, dzdy) where dzdx, dzdy are arrays of the same shape as *z* and
- are the interpolant first derivatives at the *triangulation* points.
-
- Methods
- -------
- `__call__` (x, y) : Returns interpolated values at (x, y) points.
- `gradient` (x, y) : Returns interpolated derivatives at (x, y) points.
-
- Notes
- -----
- This note is a bit technical and details how the cubic interpolation is
- computed.
-
- The interpolation is based on a Clough-Tocher subdivision scheme of
- the *triangulation* mesh (to make it clearer, each triangle of the
- grid will be divided in 3 child-triangles, and on each child triangle
- the interpolated function is a cubic polynomial of the 2 coordinates).
- This technique originates from FEM (Finite Element Method) analysis;
- the element used is a reduced Hsieh-Clough-Tocher (HCT)
- element. Its shape functions are described in [1]_.
- The assembled function is guaranteed to be C1-smooth, i.e. it is
- continuous and its first derivatives are also continuous (this
- is easy to show inside the triangles but is also true when crossing the
- edges).
-
- In the default case (*kind* ='min_E'), the interpolant minimizes a
- curvature energy on the functional space generated by the HCT element
- shape functions - with imposed values but arbitrary derivatives at each
- node. The minimized functional is the integral of the so-called total
- curvature (implementation based on an algorithm from [2]_ - PCG sparse
- solver):
-
- .. math::
-
- E(z) = \frac{1}{2} \int_{\Omega} \left(
- \left( \frac{\partial^2{z}}{\partial{x}^2} \right)^2 +
- \left( \frac{\partial^2{z}}{\partial{y}^2} \right)^2 +
- 2\left( \frac{\partial^2{z}}{\partial{y}\partial{x}} \right)^2
- \right) dx\,dy
-
- If the case *kind* ='geom' is chosen by the user, a simple geometric
- approximation is used (weighted average of the triangle normal
- vectors), which could improve speed on very large grids.
-
- References
- ----------
- .. [1] Michel Bernadou, Kamal Hassan, "Basis functions for general
- Hsieh-Clough-Tocher triangles, complete or reduced.",
- International Journal for Numerical Methods in Engineering,
- 17(5):784 - 789. 2.01.
- .. [2] C.T. Kelley, "Iterative Methods for Optimization".
-
- """
- def __init__(self, triangulation, z, kind='min_E', trifinder=None,
- dz=None):
- super().__init__(triangulation, z, trifinder)
-
- # Loads the underlying c++ _triangulation.
- # (During loading, reordering of triangulation._triangles may occur so
- # that all final triangles are now anti-clockwise)
- self._triangulation.get_cpp_triangulation()
-
- # To build the stiffness matrix and avoid zero-energy spurious modes
- # we will only store internally the valid (unmasked) triangles and
- # the necessary (used) points coordinates.
- # 2 renumbering tables need to be computed and stored:
- # - a triangle renum table in order to translate the result from a
- # TriFinder instance into the internal stored triangle number.
- # - a node renum table to overwrite the self._z values into the new
- # (used) node numbering.
- tri_analyzer = TriAnalyzer(self._triangulation)
- (compressed_triangles, compressed_x, compressed_y, tri_renum,
- node_renum) = tri_analyzer._get_compressed_triangulation()
- self._triangles = compressed_triangles
- self._tri_renum = tri_renum
- # Taking into account the node renumbering in self._z:
- valid_node = (node_renum != -1)
- self._z[node_renum[valid_node]] = self._z[valid_node]
-
- # Computing scale factors
- self._unit_x = np.ptp(compressed_x)
- self._unit_y = np.ptp(compressed_y)
- self._pts = np.column_stack([compressed_x / self._unit_x,
- compressed_y / self._unit_y])
- # Computing triangle points
- self._tris_pts = self._pts[self._triangles]
- # Computing eccentricities
- self._eccs = self._compute_tri_eccentricities(self._tris_pts)
- # Computing dof estimations for HCT triangle shape function
- _api.check_in_list(['user', 'geom', 'min_E'], kind=kind)
- self._dof = self._compute_dof(kind, dz=dz)
- # Loading HCT element
- self._ReferenceElement = _ReducedHCT_Element()
-
- def __call__(self, x, y):
- return self._interpolate_multikeys(x, y, tri_index=None,
- return_keys=('z',))[0]
- __call__.__doc__ = TriInterpolator._docstring__call__
-
- def gradient(self, x, y):
- return self._interpolate_multikeys(x, y, tri_index=None,
- return_keys=('dzdx', 'dzdy'))
- gradient.__doc__ = TriInterpolator._docstringgradient
-
- def _interpolate_single_key(self, return_key, tri_index, x, y):
- _api.check_in_list(['z', 'dzdx', 'dzdy'], return_key=return_key)
- tris_pts = self._tris_pts[tri_index]
- alpha = self._get_alpha_vec(x, y, tris_pts)
- ecc = self._eccs[tri_index]
- dof = np.expand_dims(self._dof[tri_index], axis=1)
- if return_key == 'z':
- return self._ReferenceElement.get_function_values(
- alpha, ecc, dof)
- else: # 'dzdx', 'dzdy'
- J = self._get_jacobian(tris_pts)
- dzdx = self._ReferenceElement.get_function_derivatives(
- alpha, J, ecc, dof)
- if return_key == 'dzdx':
- return dzdx[:, 0, 0]
- else:
- return dzdx[:, 1, 0]
-
- def _compute_dof(self, kind, dz=None):
- """
- Compute and return nodal dofs according to kind.
-
- Parameters
- ----------
- kind : {'min_E', 'geom', 'user'}
- Choice of the _DOF_estimator subclass to estimate the gradient.
- dz : tuple of array-likes (dzdx, dzdy), optional
- Used only if *kind*=user; in this case passed to the
- :class:`_DOF_estimator_user`.
-
- Returns
- -------
- array-like, shape (npts, 2)
- Estimation of the gradient at triangulation nodes (stored as
- degree of freedoms of reduced-HCT triangle elements).
- """
- if kind == 'user':
- if dz is None:
- raise ValueError("For a CubicTriInterpolator with "
- "*kind*='user', a valid *dz* "
- "argument is expected.")
- TE = _DOF_estimator_user(self, dz=dz)
- elif kind == 'geom':
- TE = _DOF_estimator_geom(self)
- else: # 'min_E', checked in __init__
- TE = _DOF_estimator_min_E(self)
- return TE.compute_dof_from_df()
-
- @staticmethod
- def _get_alpha_vec(x, y, tris_pts):
- """
- Fast (vectorized) function to compute barycentric coordinates alpha.
-
- Parameters
- ----------
- x, y : array-like of dim 1 (shape (nx,))
- Coordinates of the points whose points barycentric coordinates are
- requested.
- tris_pts : array like of dim 3 (shape: (nx, 3, 2))
- Coordinates of the containing triangles apexes.
-
- Returns
- -------
- array of dim 2 (shape (nx, 3))
- Barycentric coordinates of the points inside the containing
- triangles.
- """
- ndim = tris_pts.ndim-2
-
- a = tris_pts[:, 1, :] - tris_pts[:, 0, :]
- b = tris_pts[:, 2, :] - tris_pts[:, 0, :]
- abT = np.stack([a, b], axis=-1)
- ab = _transpose_vectorized(abT)
- OM = np.stack([x, y], axis=1) - tris_pts[:, 0, :]
-
- metric = ab @ abT
- # Here we try to deal with the colinear cases.
- # metric_inv is in this case set to the Moore-Penrose pseudo-inverse
- # meaning that we will still return a set of valid barycentric
- # coordinates.
- metric_inv = _pseudo_inv22sym_vectorized(metric)
- Covar = ab @ _transpose_vectorized(np.expand_dims(OM, ndim))
- ksi = metric_inv @ Covar
- alpha = _to_matrix_vectorized([
- [1-ksi[:, 0, 0]-ksi[:, 1, 0]], [ksi[:, 0, 0]], [ksi[:, 1, 0]]])
- return alpha
-
- @staticmethod
- def _get_jacobian(tris_pts):
- """
- Fast (vectorized) function to compute triangle jacobian matrix.
-
- Parameters
- ----------
- tris_pts : array like of dim 3 (shape: (nx, 3, 2))
- Coordinates of the containing triangles apexes.
-
- Returns
- -------
- array of dim 3 (shape (nx, 2, 2))
- Barycentric coordinates of the points inside the containing
- triangles.
- J[itri, :, :] is the jacobian matrix at apex 0 of the triangle
- itri, so that the following (matrix) relationship holds:
- [dz/dksi] = [J] x [dz/dx]
- with x: global coordinates
- ksi: element parametric coordinates in triangle first apex
- local basis.
- """
- a = np.array(tris_pts[:, 1, :] - tris_pts[:, 0, :])
- b = np.array(tris_pts[:, 2, :] - tris_pts[:, 0, :])
- J = _to_matrix_vectorized([[a[:, 0], a[:, 1]],
- [b[:, 0], b[:, 1]]])
- return J
-
- @staticmethod
- def _compute_tri_eccentricities(tris_pts):
- """
- Compute triangle eccentricities.
-
- Parameters
- ----------
- tris_pts : array like of dim 3 (shape: (nx, 3, 2))
- Coordinates of the triangles apexes.
-
- Returns
- -------
- array like of dim 2 (shape: (nx, 3))
- The so-called eccentricity parameters [1] needed for HCT triangular
- element.
- """
- a = np.expand_dims(tris_pts[:, 2, :] - tris_pts[:, 1, :], axis=2)
- b = np.expand_dims(tris_pts[:, 0, :] - tris_pts[:, 2, :], axis=2)
- c = np.expand_dims(tris_pts[:, 1, :] - tris_pts[:, 0, :], axis=2)
- # Do not use np.squeeze, this is dangerous if only one triangle
- # in the triangulation...
- dot_a = (_transpose_vectorized(a) @ a)[:, 0, 0]
- dot_b = (_transpose_vectorized(b) @ b)[:, 0, 0]
- dot_c = (_transpose_vectorized(c) @ c)[:, 0, 0]
- # Note that this line will raise a warning for dot_a, dot_b or dot_c
- # zeros, but we choose not to support triangles with duplicate points.
- return _to_matrix_vectorized([[(dot_c-dot_b) / dot_a],
- [(dot_a-dot_c) / dot_b],
- [(dot_b-dot_a) / dot_c]])
-
-
-# FEM element used for interpolation and for solving minimisation
-# problem (Reduced HCT element)
-class _ReducedHCT_Element:
- """
- Implementation of reduced HCT triangular element with explicit shape
- functions.
-
- Computes z, dz, d2z and the element stiffness matrix for bending energy:
- E(f) = integral( (d2z/dx2 + d2z/dy2)**2 dA)
-
- *** Reference for the shape functions: ***
- [1] Basis functions for general Hsieh-Clough-Tocher _triangles, complete or
- reduced.
- Michel Bernadou, Kamal Hassan
- International Journal for Numerical Methods in Engineering.
- 17(5):784 - 789. 2.01
-
- *** Element description: ***
- 9 dofs: z and dz given at 3 apex
- C1 (conform)
-
- """
- # 1) Loads matrices to generate shape functions as a function of
- # triangle eccentricities - based on [1] p.11 '''
- M = np.array([
- [ 0.00, 0.00, 0.00, 4.50, 4.50, 0.00, 0.00, 0.00, 0.00, 0.00],
- [-0.25, 0.00, 0.00, 0.50, 1.25, 0.00, 0.00, 0.00, 0.00, 0.00],
- [-0.25, 0.00, 0.00, 1.25, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00],
- [ 0.50, 1.00, 0.00, -1.50, 0.00, 3.00, 3.00, 0.00, 0.00, 3.00],
- [ 0.00, 0.00, 0.00, -0.25, 0.25, 0.00, 1.00, 0.00, 0.00, 0.50],
- [ 0.25, 0.00, 0.00, -0.50, -0.25, 1.00, 0.00, 0.00, 0.00, 1.00],
- [ 0.50, 0.00, 1.00, 0.00, -1.50, 0.00, 0.00, 3.00, 3.00, 3.00],
- [ 0.25, 0.00, 0.00, -0.25, -0.50, 0.00, 0.00, 0.00, 1.00, 1.00],
- [ 0.00, 0.00, 0.00, 0.25, -0.25, 0.00, 0.00, 1.00, 0.00, 0.50]])
- M0 = np.array([
- [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
- [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
- [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
- [-1.00, 0.00, 0.00, 1.50, 1.50, 0.00, 0.00, 0.00, 0.00, -3.00],
- [-0.50, 0.00, 0.00, 0.75, 0.75, 0.00, 0.00, 0.00, 0.00, -1.50],
- [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
- [ 1.00, 0.00, 0.00, -1.50, -1.50, 0.00, 0.00, 0.00, 0.00, 3.00],
- [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
- [ 0.50, 0.00, 0.00, -0.75, -0.75, 0.00, 0.00, 0.00, 0.00, 1.50]])
- M1 = np.array([
- [-0.50, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
- [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
- [-0.25, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
- [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
- [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
- [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
- [ 0.50, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
- [ 0.25, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
- [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])
- M2 = np.array([
- [ 0.50, 0.00, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00],
- [ 0.25, 0.00, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00],
- [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
- [-0.50, 0.00, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00],
- [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
- [-0.25, 0.00, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00],
- [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
- [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
- [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])
-
- # 2) Loads matrices to rotate components of gradient & Hessian
- # vectors in the reference basis of triangle first apex (a0)
- rotate_dV = np.array([[ 1., 0.], [ 0., 1.],
- [ 0., 1.], [-1., -1.],
- [-1., -1.], [ 1., 0.]])
-
- rotate_d2V = np.array([[1., 0., 0.], [0., 1., 0.], [ 0., 0., 1.],
- [0., 1., 0.], [1., 1., 1.], [ 0., -2., -1.],
- [1., 1., 1.], [1., 0., 0.], [-2., 0., -1.]])
-
- # 3) Loads Gauss points & weights on the 3 sub-_triangles for P2
- # exact integral - 3 points on each subtriangles.
- # NOTE: as the 2nd derivative is discontinuous , we really need those 9
- # points!
- n_gauss = 9
- gauss_pts = np.array([[13./18., 4./18., 1./18.],
- [ 4./18., 13./18., 1./18.],
- [ 7./18., 7./18., 4./18.],
- [ 1./18., 13./18., 4./18.],
- [ 1./18., 4./18., 13./18.],
- [ 4./18., 7./18., 7./18.],
- [ 4./18., 1./18., 13./18.],
- [13./18., 1./18., 4./18.],
- [ 7./18., 4./18., 7./18.]], dtype=np.float64)
- gauss_w = np.ones([9], dtype=np.float64) / 9.
-
- # 4) Stiffness matrix for curvature energy
- E = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 2.]])
-
- # 5) Loads the matrix to compute DOF_rot from tri_J at apex 0
- J0_to_J1 = np.array([[-1., 1.], [-1., 0.]])
- J0_to_J2 = np.array([[ 0., -1.], [ 1., -1.]])
-
- def get_function_values(self, alpha, ecc, dofs):
- """
- Parameters
- ----------
- alpha : is a (N x 3 x 1) array (array of column-matrices) of
- barycentric coordinates,
- ecc : is a (N x 3 x 1) array (array of column-matrices) of triangle
- eccentricities,
- dofs : is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
- degrees of freedom.
-
- Returns
- -------
- Returns the N-array of interpolated function values.
- """
- subtri = np.argmin(alpha, axis=1)[:, 0]
- ksi = _roll_vectorized(alpha, -subtri, axis=0)
- E = _roll_vectorized(ecc, -subtri, axis=0)
- x = ksi[:, 0, 0]
- y = ksi[:, 1, 0]
- z = ksi[:, 2, 0]
- x_sq = x*x
- y_sq = y*y
- z_sq = z*z
- V = _to_matrix_vectorized([
- [x_sq*x], [y_sq*y], [z_sq*z], [x_sq*z], [x_sq*y], [y_sq*x],
- [y_sq*z], [z_sq*y], [z_sq*x], [x*y*z]])
- prod = self.M @ V
- prod += _scalar_vectorized(E[:, 0, 0], self.M0 @ V)
- prod += _scalar_vectorized(E[:, 1, 0], self.M1 @ V)
- prod += _scalar_vectorized(E[:, 2, 0], self.M2 @ V)
- s = _roll_vectorized(prod, 3*subtri, axis=0)
- return (dofs @ s)[:, 0, 0]
-
- def get_function_derivatives(self, alpha, J, ecc, dofs):
- """
- Parameters
- ----------
- *alpha* is a (N x 3 x 1) array (array of column-matrices of
- barycentric coordinates)
- *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
- triangle first apex)
- *ecc* is a (N x 3 x 1) array (array of column-matrices of triangle
- eccentricities)
- *dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
- degrees of freedom.
-
- Returns
- -------
- Returns the values of interpolated function derivatives [dz/dx, dz/dy]
- in global coordinates at locations alpha, as a column-matrices of
- shape (N x 2 x 1).
- """
- subtri = np.argmin(alpha, axis=1)[:, 0]
- ksi = _roll_vectorized(alpha, -subtri, axis=0)
- E = _roll_vectorized(ecc, -subtri, axis=0)
- x = ksi[:, 0, 0]
- y = ksi[:, 1, 0]
- z = ksi[:, 2, 0]
- x_sq = x*x
- y_sq = y*y
- z_sq = z*z
- dV = _to_matrix_vectorized([
- [ -3.*x_sq, -3.*x_sq],
- [ 3.*y_sq, 0.],
- [ 0., 3.*z_sq],
- [ -2.*x*z, -2.*x*z+x_sq],
- [-2.*x*y+x_sq, -2.*x*y],
- [ 2.*x*y-y_sq, -y_sq],
- [ 2.*y*z, y_sq],
- [ z_sq, 2.*y*z],
- [ -z_sq, 2.*x*z-z_sq],
- [ x*z-y*z, x*y-y*z]])
- # Puts back dV in first apex basis
- dV = dV @ _extract_submatrices(
- self.rotate_dV, subtri, block_size=2, axis=0)
-
- prod = self.M @ dV
- prod += _scalar_vectorized(E[:, 0, 0], self.M0 @ dV)
- prod += _scalar_vectorized(E[:, 1, 0], self.M1 @ dV)
- prod += _scalar_vectorized(E[:, 2, 0], self.M2 @ dV)
- dsdksi = _roll_vectorized(prod, 3*subtri, axis=0)
- dfdksi = dofs @ dsdksi
- # In global coordinates:
- # Here we try to deal with the simplest colinear cases, returning a
- # null matrix.
- J_inv = _safe_inv22_vectorized(J)
- dfdx = J_inv @ _transpose_vectorized(dfdksi)
- return dfdx
-
- def get_function_hessians(self, alpha, J, ecc, dofs):
- """
- Parameters
- ----------
- *alpha* is a (N x 3 x 1) array (array of column-matrices) of
- barycentric coordinates
- *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
- triangle first apex)
- *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
- eccentricities
- *dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
- degrees of freedom.
-
- Returns
- -------
- Returns the values of interpolated function 2nd-derivatives
- [d2z/dx2, d2z/dy2, d2z/dxdy] in global coordinates at locations alpha,
- as a column-matrices of shape (N x 3 x 1).
- """
- d2sdksi2 = self.get_d2Sidksij2(alpha, ecc)
- d2fdksi2 = dofs @ d2sdksi2
- H_rot = self.get_Hrot_from_J(J)
- d2fdx2 = d2fdksi2 @ H_rot
- return _transpose_vectorized(d2fdx2)
-
- def get_d2Sidksij2(self, alpha, ecc):
- """
- Parameters
- ----------
- *alpha* is a (N x 3 x 1) array (array of column-matrices) of
- barycentric coordinates
- *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
- eccentricities
-
- Returns
- -------
- Returns the arrays d2sdksi2 (N x 3 x 1) Hessian of shape functions
- expressed in covariant coordinates in first apex basis.
- """
- subtri = np.argmin(alpha, axis=1)[:, 0]
- ksi = _roll_vectorized(alpha, -subtri, axis=0)
- E = _roll_vectorized(ecc, -subtri, axis=0)
- x = ksi[:, 0, 0]
- y = ksi[:, 1, 0]
- z = ksi[:, 2, 0]
- d2V = _to_matrix_vectorized([
- [ 6.*x, 6.*x, 6.*x],
- [ 6.*y, 0., 0.],
- [ 0., 6.*z, 0.],
- [ 2.*z, 2.*z-4.*x, 2.*z-2.*x],
- [2.*y-4.*x, 2.*y, 2.*y-2.*x],
- [2.*x-4.*y, 0., -2.*y],
- [ 2.*z, 0., 2.*y],
- [ 0., 2.*y, 2.*z],
- [ 0., 2.*x-4.*z, -2.*z],
- [ -2.*z, -2.*y, x-y-z]])
- # Puts back d2V in first apex basis
- d2V = d2V @ _extract_submatrices(
- self.rotate_d2V, subtri, block_size=3, axis=0)
- prod = self.M @ d2V
- prod += _scalar_vectorized(E[:, 0, 0], self.M0 @ d2V)
- prod += _scalar_vectorized(E[:, 1, 0], self.M1 @ d2V)
- prod += _scalar_vectorized(E[:, 2, 0], self.M2 @ d2V)
- d2sdksi2 = _roll_vectorized(prod, 3*subtri, axis=0)
- return d2sdksi2
-
- def get_bending_matrices(self, J, ecc):
- """
- Parameters
- ----------
- *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
- triangle first apex)
- *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
- eccentricities
-
- Returns
- -------
- Returns the element K matrices for bending energy expressed in
- GLOBAL nodal coordinates.
- K_ij = integral [ (d2zi/dx2 + d2zi/dy2) * (d2zj/dx2 + d2zj/dy2) dA]
- tri_J is needed to rotate dofs from local basis to global basis
- """
- n = np.size(ecc, 0)
-
- # 1) matrix to rotate dofs in global coordinates
- J1 = self.J0_to_J1 @ J
- J2 = self.J0_to_J2 @ J
- DOF_rot = np.zeros([n, 9, 9], dtype=np.float64)
- DOF_rot[:, 0, 0] = 1
- DOF_rot[:, 3, 3] = 1
- DOF_rot[:, 6, 6] = 1
- DOF_rot[:, 1:3, 1:3] = J
- DOF_rot[:, 4:6, 4:6] = J1
- DOF_rot[:, 7:9, 7:9] = J2
-
- # 2) matrix to rotate Hessian in global coordinates.
- H_rot, area = self.get_Hrot_from_J(J, return_area=True)
-
- # 3) Computes stiffness matrix
- # Gauss quadrature.
- K = np.zeros([n, 9, 9], dtype=np.float64)
- weights = self.gauss_w
- pts = self.gauss_pts
- for igauss in range(self.n_gauss):
- alpha = np.tile(pts[igauss, :], n).reshape(n, 3)
- alpha = np.expand_dims(alpha, 2)
- weight = weights[igauss]
- d2Skdksi2 = self.get_d2Sidksij2(alpha, ecc)
- d2Skdx2 = d2Skdksi2 @ H_rot
- K += weight * (d2Skdx2 @ self.E @ _transpose_vectorized(d2Skdx2))
-
- # 4) With nodal (not elem) dofs
- K = _transpose_vectorized(DOF_rot) @ K @ DOF_rot
-
- # 5) Need the area to compute total element energy
- return _scalar_vectorized(area, K)
-
- def get_Hrot_from_J(self, J, return_area=False):
- """
- Parameters
- ----------
- *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
- triangle first apex)
-
- Returns
- -------
- Returns H_rot used to rotate Hessian from local basis of first apex,
- to global coordinates.
- if *return_area* is True, returns also the triangle area (0.5*det(J))
- """
- # Here we try to deal with the simplest colinear cases; a null
- # energy and area is imposed.
- J_inv = _safe_inv22_vectorized(J)
- Ji00 = J_inv[:, 0, 0]
- Ji11 = J_inv[:, 1, 1]
- Ji10 = J_inv[:, 1, 0]
- Ji01 = J_inv[:, 0, 1]
- H_rot = _to_matrix_vectorized([
- [Ji00*Ji00, Ji10*Ji10, Ji00*Ji10],
- [Ji01*Ji01, Ji11*Ji11, Ji01*Ji11],
- [2*Ji00*Ji01, 2*Ji11*Ji10, Ji00*Ji11+Ji10*Ji01]])
- if not return_area:
- return H_rot
- else:
- area = 0.5 * (J[:, 0, 0]*J[:, 1, 1] - J[:, 0, 1]*J[:, 1, 0])
- return H_rot, area
-
- def get_Kff_and_Ff(self, J, ecc, triangles, Uc):
- """
- Build K and F for the following elliptic formulation:
- minimization of curvature energy with value of function at node
- imposed and derivatives 'free'.
-
- Build the global Kff matrix in cco format.
- Build the full Ff vec Ff = - Kfc x Uc.
-
- Parameters
- ----------
- *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
- triangle first apex)
- *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
- eccentricities
- *triangles* is a (N x 3) array of nodes indexes.
- *Uc* is (N x 3) array of imposed displacements at nodes
-
- Returns
- -------
- (Kff_rows, Kff_cols, Kff_vals) Kff matrix in coo format - Duplicate
- (row, col) entries must be summed.
- Ff: force vector - dim npts * 3
- """
- ntri = np.size(ecc, 0)
- vec_range = np.arange(ntri, dtype=np.int32)
- c_indices = np.full(ntri, -1, dtype=np.int32) # for unused dofs, -1
- f_dof = [1, 2, 4, 5, 7, 8]
- c_dof = [0, 3, 6]
-
- # vals, rows and cols indices in global dof numbering
- f_dof_indices = _to_matrix_vectorized([[
- c_indices, triangles[:, 0]*2, triangles[:, 0]*2+1,
- c_indices, triangles[:, 1]*2, triangles[:, 1]*2+1,
- c_indices, triangles[:, 2]*2, triangles[:, 2]*2+1]])
-
- expand_indices = np.ones([ntri, 9, 1], dtype=np.int32)
- f_row_indices = _transpose_vectorized(expand_indices @ f_dof_indices)
- f_col_indices = expand_indices @ f_dof_indices
- K_elem = self.get_bending_matrices(J, ecc)
-
- # Extracting sub-matrices
- # Explanation & notations:
- # * Subscript f denotes 'free' degrees of freedom (i.e. dz/dx, dz/dx)
- # * Subscript c denotes 'condensated' (imposed) degrees of freedom
- # (i.e. z at all nodes)
- # * F = [Ff, Fc] is the force vector
- # * U = [Uf, Uc] is the imposed dof vector
- # [ Kff Kfc ]
- # * K = [ ] is the laplacian stiffness matrix
- # [ Kcf Kff ]
- # * As F = K x U one gets straightforwardly: Ff = - Kfc x Uc
-
- # Computing Kff stiffness matrix in sparse coo format
- Kff_vals = np.ravel(K_elem[np.ix_(vec_range, f_dof, f_dof)])
- Kff_rows = np.ravel(f_row_indices[np.ix_(vec_range, f_dof, f_dof)])
- Kff_cols = np.ravel(f_col_indices[np.ix_(vec_range, f_dof, f_dof)])
-
- # Computing Ff force vector in sparse coo format
- Kfc_elem = K_elem[np.ix_(vec_range, f_dof, c_dof)]
- Uc_elem = np.expand_dims(Uc, axis=2)
- Ff_elem = -(Kfc_elem @ Uc_elem)[:, :, 0]
- Ff_indices = f_dof_indices[np.ix_(vec_range, [0], f_dof)][:, 0, :]
-
- # Extracting Ff force vector in dense format
- # We have to sum duplicate indices - using bincount
- Ff = np.bincount(np.ravel(Ff_indices), weights=np.ravel(Ff_elem))
- return Kff_rows, Kff_cols, Kff_vals, Ff
-
-
-# :class:_DOF_estimator, _DOF_estimator_user, _DOF_estimator_geom,
-# _DOF_estimator_min_E
-# Private classes used to compute the degree of freedom of each triangular
-# element for the TriCubicInterpolator.
-class _DOF_estimator:
- """
- Abstract base class for classes used to estimate a function's first
- derivatives, and deduce the dofs for a CubicTriInterpolator using a
- reduced HCT element formulation.
-
- Derived classes implement ``compute_df(self, **kwargs)``, returning
- ``np.vstack([dfx, dfy]).T`` where ``dfx, dfy`` are the estimation of the 2
- gradient coordinates.
- """
- def __init__(self, interpolator, **kwargs):
- _api.check_isinstance(CubicTriInterpolator, interpolator=interpolator)
- self._pts = interpolator._pts
- self._tris_pts = interpolator._tris_pts
- self.z = interpolator._z
- self._triangles = interpolator._triangles
- (self._unit_x, self._unit_y) = (interpolator._unit_x,
- interpolator._unit_y)
- self.dz = self.compute_dz(**kwargs)
- self.compute_dof_from_df()
-
- def compute_dz(self, **kwargs):
- raise NotImplementedError
-
- def compute_dof_from_df(self):
- """
- Compute reduced-HCT elements degrees of freedom, from the gradient.
- """
- J = CubicTriInterpolator._get_jacobian(self._tris_pts)
- tri_z = self.z[self._triangles]
- tri_dz = self.dz[self._triangles]
- tri_dof = self.get_dof_vec(tri_z, tri_dz, J)
- return tri_dof
-
- @staticmethod
- def get_dof_vec(tri_z, tri_dz, J):
- """
- Compute the dof vector of a triangle, from the value of f, df and
- of the local Jacobian at each node.
-
- Parameters
- ----------
- tri_z : shape (3,) array
- f nodal values.
- tri_dz : shape (3, 2) array
- df/dx, df/dy nodal values.
- J
- Jacobian matrix in local basis of apex 0.
-
- Returns
- -------
- dof : shape (9,) array
- For each apex ``iapex``::
-
- dof[iapex*3+0] = f(Ai)
- dof[iapex*3+1] = df(Ai).(AiAi+)
- dof[iapex*3+2] = df(Ai).(AiAi-)
- """
- npt = tri_z.shape[0]
- dof = np.zeros([npt, 9], dtype=np.float64)
- J1 = _ReducedHCT_Element.J0_to_J1 @ J
- J2 = _ReducedHCT_Element.J0_to_J2 @ J
-
- col0 = J @ np.expand_dims(tri_dz[:, 0, :], axis=2)
- col1 = J1 @ np.expand_dims(tri_dz[:, 1, :], axis=2)
- col2 = J2 @ np.expand_dims(tri_dz[:, 2, :], axis=2)
-
- dfdksi = _to_matrix_vectorized([
- [col0[:, 0, 0], col1[:, 0, 0], col2[:, 0, 0]],
- [col0[:, 1, 0], col1[:, 1, 0], col2[:, 1, 0]]])
- dof[:, 0:7:3] = tri_z
- dof[:, 1:8:3] = dfdksi[:, 0]
- dof[:, 2:9:3] = dfdksi[:, 1]
- return dof
-
-
-class _DOF_estimator_user(_DOF_estimator):
- """dz is imposed by user; accounts for scaling if any."""
-
- def compute_dz(self, dz):
- (dzdx, dzdy) = dz
- dzdx = dzdx * self._unit_x
- dzdy = dzdy * self._unit_y
- return np.vstack([dzdx, dzdy]).T
-
-
-class _DOF_estimator_geom(_DOF_estimator):
- """Fast 'geometric' approximation, recommended for large arrays."""
-
- def compute_dz(self):
- """
- self.df is computed as weighted average of _triangles sharing a common
- node. On each triangle itri f is first assumed linear (= ~f), which
- allows to compute d~f[itri]
- Then the following approximation of df nodal values is then proposed:
- f[ipt] = SUM ( w[itri] x d~f[itri] , for itri sharing apex ipt)
- The weighted coeff. w[itri] are proportional to the angle of the
- triangle itri at apex ipt
- """
- el_geom_w = self.compute_geom_weights()
- el_geom_grad = self.compute_geom_grads()
-
- # Sum of weights coeffs
- w_node_sum = np.bincount(np.ravel(self._triangles),
- weights=np.ravel(el_geom_w))
-
- # Sum of weighted df = (dfx, dfy)
- dfx_el_w = np.empty_like(el_geom_w)
- dfy_el_w = np.empty_like(el_geom_w)
- for iapex in range(3):
- dfx_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 0]
- dfy_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 1]
- dfx_node_sum = np.bincount(np.ravel(self._triangles),
- weights=np.ravel(dfx_el_w))
- dfy_node_sum = np.bincount(np.ravel(self._triangles),
- weights=np.ravel(dfy_el_w))
-
- # Estimation of df
- dfx_estim = dfx_node_sum/w_node_sum
- dfy_estim = dfy_node_sum/w_node_sum
- return np.vstack([dfx_estim, dfy_estim]).T
-
- def compute_geom_weights(self):
- """
- Build the (nelems, 3) weights coeffs of _triangles angles,
- renormalized so that np.sum(weights, axis=1) == np.ones(nelems)
- """
- weights = np.zeros([np.size(self._triangles, 0), 3])
- tris_pts = self._tris_pts
- for ipt in range(3):
- p0 = tris_pts[:, ipt % 3, :]
- p1 = tris_pts[:, (ipt+1) % 3, :]
- p2 = tris_pts[:, (ipt-1) % 3, :]
- alpha1 = np.arctan2(p1[:, 1]-p0[:, 1], p1[:, 0]-p0[:, 0])
- alpha2 = np.arctan2(p2[:, 1]-p0[:, 1], p2[:, 0]-p0[:, 0])
- # In the below formula we could take modulo 2. but
- # modulo 1. is safer regarding round-off errors (flat triangles).
- angle = np.abs(((alpha2-alpha1) / np.pi) % 1)
- # Weight proportional to angle up np.pi/2; null weight for
- # degenerated cases 0 and np.pi (note that *angle* is normalized
- # by np.pi).
- weights[:, ipt] = 0.5 - np.abs(angle-0.5)
- return weights
-
- def compute_geom_grads(self):
- """
- Compute the (global) gradient component of f assumed linear (~f).
- returns array df of shape (nelems, 2)
- df[ielem].dM[ielem] = dz[ielem] i.e. df = dz x dM = dM.T^-1 x dz
- """
- tris_pts = self._tris_pts
- tris_f = self.z[self._triangles]
-
- dM1 = tris_pts[:, 1, :] - tris_pts[:, 0, :]
- dM2 = tris_pts[:, 2, :] - tris_pts[:, 0, :]
- dM = np.dstack([dM1, dM2])
- # Here we try to deal with the simplest colinear cases: a null
- # gradient is assumed in this case.
- dM_inv = _safe_inv22_vectorized(dM)
-
- dZ1 = tris_f[:, 1] - tris_f[:, 0]
- dZ2 = tris_f[:, 2] - tris_f[:, 0]
- dZ = np.vstack([dZ1, dZ2]).T
- df = np.empty_like(dZ)
-
- # With np.einsum: could be ej,eji -> ej
- df[:, 0] = dZ[:, 0]*dM_inv[:, 0, 0] + dZ[:, 1]*dM_inv[:, 1, 0]
- df[:, 1] = dZ[:, 0]*dM_inv[:, 0, 1] + dZ[:, 1]*dM_inv[:, 1, 1]
- return df
-
-
-class _DOF_estimator_min_E(_DOF_estimator_geom):
- """
- The 'smoothest' approximation, df is computed through global minimization
- of the bending energy:
- E(f) = integral[(d2z/dx2 + d2z/dy2 + 2 d2z/dxdy)**2 dA]
- """
- def __init__(self, Interpolator):
- self._eccs = Interpolator._eccs
- super().__init__(Interpolator)
-
- def compute_dz(self):
- """
- Elliptic solver for bending energy minimization.
- Uses a dedicated 'toy' sparse Jacobi PCG solver.
- """
- # Initial guess for iterative PCG solver.
- dz_init = super().compute_dz()
- Uf0 = np.ravel(dz_init)
-
- reference_element = _ReducedHCT_Element()
- J = CubicTriInterpolator._get_jacobian(self._tris_pts)
- eccs = self._eccs
- triangles = self._triangles
- Uc = self.z[self._triangles]
-
- # Building stiffness matrix and force vector in coo format
- Kff_rows, Kff_cols, Kff_vals, Ff = reference_element.get_Kff_and_Ff(
- J, eccs, triangles, Uc)
-
- # Building sparse matrix and solving minimization problem
- # We could use scipy.sparse direct solver; however to avoid this
- # external dependency an implementation of a simple PCG solver with
- # a simple diagonal Jacobi preconditioner is implemented.
- tol = 1.e-10
- n_dof = Ff.shape[0]
- Kff_coo = _Sparse_Matrix_coo(Kff_vals, Kff_rows, Kff_cols,
- shape=(n_dof, n_dof))
- Kff_coo.compress_csc()
- Uf, err = _cg(A=Kff_coo, b=Ff, x0=Uf0, tol=tol)
- # If the PCG did not converge, we return the best guess between Uf0
- # and Uf.
- err0 = np.linalg.norm(Kff_coo.dot(Uf0) - Ff)
- if err0 < err:
- # Maybe a good occasion to raise a warning here ?
- _api.warn_external("In TriCubicInterpolator initialization, "
- "PCG sparse solver did not converge after "
- "1000 iterations. `geom` approximation is "
- "used instead of `min_E`")
- Uf = Uf0
-
- # Building dz from Uf
- dz = np.empty([self._pts.shape[0], 2], dtype=np.float64)
- dz[:, 0] = Uf[::2]
- dz[:, 1] = Uf[1::2]
- return dz
-
-
-# The following private :class:_Sparse_Matrix_coo and :func:_cg provide
-# a PCG sparse solver for (symmetric) elliptic problems.
-class _Sparse_Matrix_coo:
- def __init__(self, vals, rows, cols, shape):
- """
- Create a sparse matrix in coo format.
- *vals*: arrays of values of non-null entries of the matrix
- *rows*: int arrays of rows of non-null entries of the matrix
- *cols*: int arrays of cols of non-null entries of the matrix
- *shape*: 2-tuple (n, m) of matrix shape
- """
- self.n, self.m = shape
- self.vals = np.asarray(vals, dtype=np.float64)
- self.rows = np.asarray(rows, dtype=np.int32)
- self.cols = np.asarray(cols, dtype=np.int32)
-
- def dot(self, V):
- """
- Dot product of self by a vector *V* in sparse-dense to dense format
- *V* dense vector of shape (self.m,).
- """
- assert V.shape == (self.m,)
- return np.bincount(self.rows,
- weights=self.vals*V[self.cols],
- minlength=self.m)
-
- def compress_csc(self):
- """
- Compress rows, cols, vals / summing duplicates. Sort for csc format.
- """
- _, unique, indices = np.unique(
- self.rows + self.n*self.cols,
- return_index=True, return_inverse=True)
- self.rows = self.rows[unique]
- self.cols = self.cols[unique]
- self.vals = np.bincount(indices, weights=self.vals)
-
- def compress_csr(self):
- """
- Compress rows, cols, vals / summing duplicates. Sort for csr format.
- """
- _, unique, indices = np.unique(
- self.m*self.rows + self.cols,
- return_index=True, return_inverse=True)
- self.rows = self.rows[unique]
- self.cols = self.cols[unique]
- self.vals = np.bincount(indices, weights=self.vals)
-
- def to_dense(self):
- """
- Return a dense matrix representing self, mainly for debugging purposes.
- """
- ret = np.zeros([self.n, self.m], dtype=np.float64)
- nvals = self.vals.size
- for i in range(nvals):
- ret[self.rows[i], self.cols[i]] += self.vals[i]
- return ret
-
- def __str__(self):
- return self.to_dense().__str__()
-
- @property
- def diag(self):
- """Return the (dense) vector of the diagonal elements."""
- in_diag = (self.rows == self.cols)
- diag = np.zeros(min(self.n, self.n), dtype=np.float64) # default 0.
- diag[self.rows[in_diag]] = self.vals[in_diag]
- return diag
-
-
-def _cg(A, b, x0=None, tol=1.e-10, maxiter=1000):
- """
- Use Preconditioned Conjugate Gradient iteration to solve A x = b
- A simple Jacobi (diagonal) preconditioner is used.
-
- Parameters
- ----------
- A : _Sparse_Matrix_coo
- *A* must have been compressed before by compress_csc or
- compress_csr method.
- b : array
- Right hand side of the linear system.
- x0 : array, optional
- Starting guess for the solution. Defaults to the zero vector.
- tol : float, optional
- Tolerance to achieve. The algorithm terminates when the relative
- residual is below tol. Default is 1e-10.
- maxiter : int, optional
- Maximum number of iterations. Iteration will stop after *maxiter*
- steps even if the specified tolerance has not been achieved. Defaults
- to 1000.
-
- Returns
- -------
- x : array
- The converged solution.
- err : float
- The absolute error np.linalg.norm(A.dot(x) - b)
- """
- n = b.size
- assert A.n == n
- assert A.m == n
- b_norm = np.linalg.norm(b)
-
- # Jacobi pre-conditioner
- kvec = A.diag
- # For diag elem < 1e-6 we keep 1e-6.
- kvec = np.maximum(kvec, 1e-6)
-
- # Initial guess
- if x0 is None:
- x = np.zeros(n)
- else:
- x = x0
-
- r = b - A.dot(x)
- w = r/kvec
-
- p = np.zeros(n)
- beta = 0.0
- rho = np.dot(r, w)
- k = 0
-
- # Following C. T. Kelley
- while (np.sqrt(abs(rho)) > tol*b_norm) and (k < maxiter):
- p = w + beta*p
- z = A.dot(p)
- alpha = rho/np.dot(p, z)
- r = r - alpha*z
- w = r/kvec
- rhoold = rho
- rho = np.dot(r, w)
- x = x + alpha*p
- beta = rho/rhoold
- # err = np.linalg.norm(A.dot(x) - b) # absolute accuracy - not used
- k += 1
- err = np.linalg.norm(A.dot(x) - b)
- return x, err
-
-
-# The following private functions:
-# :func:`_safe_inv22_vectorized`
-# :func:`_pseudo_inv22sym_vectorized`
-# :func:`_scalar_vectorized`
-# :func:`_transpose_vectorized`
-# :func:`_roll_vectorized`
-# :func:`_to_matrix_vectorized`
-# :func:`_extract_submatrices`
-# provide fast numpy implementation of some standard operations on arrays of
-# matrices - stored as (:, n_rows, n_cols)-shaped np.arrays.
-
-# Development note: Dealing with pathologic 'flat' triangles in the
-# CubicTriInterpolator code and impact on (2, 2)-matrix inversion functions
-# :func:`_safe_inv22_vectorized` and :func:`_pseudo_inv22sym_vectorized`.
-#
-# Goals:
-# 1) The CubicTriInterpolator should be able to handle flat or almost flat
-# triangles without raising an error,
-# 2) These degenerated triangles should have no impact on the automatic dof
-# calculation (associated with null weight for the _DOF_estimator_geom and
-# with null energy for the _DOF_estimator_min_E),
-# 3) Linear patch test should be passed exactly on degenerated meshes,
-# 4) Interpolation (with :meth:`_interpolate_single_key` or
-# :meth:`_interpolate_multi_key`) shall be correctly handled even *inside*
-# the pathologic triangles, to interact correctly with a TriRefiner class.
-#
-# Difficulties:
-# Flat triangles have rank-deficient *J* (so-called jacobian matrix) and
-# *metric* (the metric tensor = J x J.T). Computation of the local
-# tangent plane is also problematic.
-#
-# Implementation:
-# Most of the time, when computing the inverse of a rank-deficient matrix it
-# is safe to simply return the null matrix (which is the implementation in
-# :func:`_safe_inv22_vectorized`). This is because of point 2), itself
-# enforced by:
-# - null area hence null energy in :class:`_DOF_estimator_min_E`
-# - angles close or equal to 0 or np.pi hence null weight in
-# :class:`_DOF_estimator_geom`.
-# Note that the function angle -> weight is continuous and maximum for an
-# angle np.pi/2 (refer to :meth:`compute_geom_weights`)
-# The exception is the computation of barycentric coordinates, which is done
-# by inversion of the *metric* matrix. In this case, we need to compute a set
-# of valid coordinates (1 among numerous possibilities), to ensure point 4).
-# We benefit here from the symmetry of metric = J x J.T, which makes it easier
-# to compute a pseudo-inverse in :func:`_pseudo_inv22sym_vectorized`
-def _safe_inv22_vectorized(M):
- """
- Inversion of arrays of (2, 2) matrices, returns 0 for rank-deficient
- matrices.
-
- *M* : array of (2, 2) matrices to inverse, shape (n, 2, 2)
- """
- _api.check_shape((None, 2, 2), M=M)
- M_inv = np.empty_like(M)
- prod1 = M[:, 0, 0]*M[:, 1, 1]
- delta = prod1 - M[:, 0, 1]*M[:, 1, 0]
-
- # We set delta_inv to 0. in case of a rank deficient matrix; a
- # rank-deficient input matrix *M* will lead to a null matrix in output
- rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))
- if np.all(rank2):
- # Normal 'optimized' flow.
- delta_inv = 1./delta
- else:
- # 'Pathologic' flow.
- delta_inv = np.zeros(M.shape[0])
- delta_inv[rank2] = 1./delta[rank2]
-
- M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv
- M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv
- M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv
- M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv
- return M_inv
-
-
-def _pseudo_inv22sym_vectorized(M):
- """
- Inversion of arrays of (2, 2) SYMMETRIC matrices; returns the
- (Moore-Penrose) pseudo-inverse for rank-deficient matrices.
-
- In case M is of rank 1, we have M = trace(M) x P where P is the orthogonal
- projection on Im(M), and we return trace(M)^-1 x P == M / trace(M)**2
- In case M is of rank 0, we return the null matrix.
-
- *M* : array of (2, 2) matrices to inverse, shape (n, 2, 2)
- """
- _api.check_shape((None, 2, 2), M=M)
- M_inv = np.empty_like(M)
- prod1 = M[:, 0, 0]*M[:, 1, 1]
- delta = prod1 - M[:, 0, 1]*M[:, 1, 0]
- rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))
-
- if np.all(rank2):
- # Normal 'optimized' flow.
- M_inv[:, 0, 0] = M[:, 1, 1] / delta
- M_inv[:, 0, 1] = -M[:, 0, 1] / delta
- M_inv[:, 1, 0] = -M[:, 1, 0] / delta
- M_inv[:, 1, 1] = M[:, 0, 0] / delta
- else:
- # 'Pathologic' flow.
- # Here we have to deal with 2 sub-cases
- # 1) First sub-case: matrices of rank 2:
- delta = delta[rank2]
- M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta
- M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta
- M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta
- M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta
- # 2) Second sub-case: rank-deficient matrices of rank 0 and 1:
- rank01 = ~rank2
- tr = M[rank01, 0, 0] + M[rank01, 1, 1]
- tr_zeros = (np.abs(tr) < 1.e-8)
- sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros)
- # sq_tr_inv = 1. / tr**2
- M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv
- M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv
- M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv
- M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv
-
- return M_inv
-
-
-def _scalar_vectorized(scalar, M):
- """
- Scalar product between scalars and matrices.
- """
- return scalar[:, np.newaxis, np.newaxis]*M
-
-
-def _transpose_vectorized(M):
- """
- Transposition of an array of matrices *M*.
- """
- return np.transpose(M, [0, 2, 1])
-
-
-def _roll_vectorized(M, roll_indices, axis):
- """
- Roll an array of matrices along *axis* (0: rows, 1: columns) according to
- an array of indices *roll_indices*.
- """
- assert axis in [0, 1]
- ndim = M.ndim
- assert ndim == 3
- ndim_roll = roll_indices.ndim
- assert ndim_roll == 1
- sh = M.shape
- r, c = sh[-2:]
- assert sh[0] == roll_indices.shape[0]
- vec_indices = np.arange(sh[0], dtype=np.int32)
-
- # Builds the rolled matrix
- M_roll = np.empty_like(M)
- if axis == 0:
- for ir in range(r):
- for ic in range(c):
- M_roll[:, ir, ic] = M[vec_indices, (-roll_indices+ir) % r, ic]
- else: # 1
- for ir in range(r):
- for ic in range(c):
- M_roll[:, ir, ic] = M[vec_indices, ir, (-roll_indices+ic) % c]
- return M_roll
-
-
-def _to_matrix_vectorized(M):
- """
- Build an array of matrices from individuals np.arrays of identical shapes.
-
- Parameters
- ----------
- M
- ncols-list of nrows-lists of shape sh.
-
- Returns
- -------
- M_res : np.array of shape (sh, nrow, ncols)
- *M_res* satisfies ``M_res[..., i, j] = M[i][j]``.
- """
- assert isinstance(M, (tuple, list))
- assert all(isinstance(item, (tuple, list)) for item in M)
- c_vec = np.asarray([len(item) for item in M])
- assert np.all(c_vec-c_vec[0] == 0)
- r = len(M)
- c = c_vec[0]
- M00 = np.asarray(M[0][0])
- dt = M00.dtype
- sh = [M00.shape[0], r, c]
- M_ret = np.empty(sh, dtype=dt)
- for irow in range(r):
- for icol in range(c):
- M_ret[:, irow, icol] = np.asarray(M[irow][icol])
- return M_ret
-
-
-def _extract_submatrices(M, block_indices, block_size, axis):
- """
- Extract selected blocks of a matrices *M* depending on parameters
- *block_indices* and *block_size*.
-
- Returns the array of extracted matrices *Mres* so that ::
-
- M_res[..., ir, :] = M[(block_indices*block_size+ir), :]
- """
- assert block_indices.ndim == 1
- assert axis in [0, 1]
-
- r, c = M.shape
- if axis == 0:
- sh = [block_indices.shape[0], block_size, c]
- else: # 1
- sh = [block_indices.shape[0], r, block_size]
- dt = M.dtype
- M_res = np.empty(sh, dtype=dt)
- if axis == 0:
- for ir in range(block_size):
- M_res[:, ir, :] = M[(block_indices*block_size+ir), :]
- else: # 1
- for ic in range(block_size):
- M_res[:, :, ic] = M[:, (block_indices*block_size+ic)]
- return M_res
+_api.warn_deprecated(
+ "3.7",
+ message=f"Importing {__name__} was deprecated in Matplotlib 3.7 and will "
+ f"be removed two minor releases later. All functionality is "
+ f"available via the top-level module matplotlib.tri")
diff --git a/lib/matplotlib/tri/tripcolor.py b/lib/matplotlib/tri/tripcolor.py
index f0155d2a29e8..0da87891810d 100644
--- a/lib/matplotlib/tri/tripcolor.py
+++ b/lib/matplotlib/tri/tripcolor.py
@@ -1,154 +1,9 @@
-import numpy as np
-
+from ._tripcolor import * # noqa: F401, F403
from matplotlib import _api
-from matplotlib.collections import PolyCollection, TriMesh
-from matplotlib.colors import Normalize
-from matplotlib.tri.triangulation import Triangulation
-
-
-def tripcolor(ax, *args, alpha=1.0, norm=None, cmap=None, vmin=None,
- vmax=None, shading='flat', facecolors=None, **kwargs):
- """
- Create a pseudocolor plot of an unstructured triangular grid.
-
- Call signatures::
-
- tripcolor(triangulation, C, *, ...)
- tripcolor(x, y, C, *, [triangles=triangles], [mask=mask], ...)
-
- The triangular grid can be specified either by passing a `.Triangulation`
- object as the first parameter, or by passing the points *x*, *y* and
- optionally the *triangles* and a *mask*. See `.Triangulation` for an
- explanation of these parameters.
-
- It is possible to pass the triangles positionally, i.e.
- ``tripcolor(x, y, triangles, C, ...)``. However, this is discouraged.
- For more clarity, pass *triangles* via keyword argument.
-
- If neither of *triangulation* or *triangles* are given, the triangulation
- is calculated on the fly. In this case, it does not make sense to provide
- colors at the triangle faces via *C* or *facecolors* because there are
- multiple possible triangulations for a group of points and you don't know
- which triangles will be constructed.
-
- Parameters
- ----------
- triangulation : `.Triangulation`
- An already created triangular grid.
- x, y, triangles, mask
- Parameters defining the triangular grid. See `.Triangulation`.
- This is mutually exclusive with specifying *triangulation*.
- C : array-like
- The color values, either for the points or for the triangles. Which one
- is automatically inferred from the length of *C*, i.e. does it match
- the number of points or the number of triangles. If there are the same
- number of points and triangles in the triangulation it is assumed that
- color values are defined at points; to force the use of color values at
- triangles use the keyword argument ``facecolors=C`` instead of just
- ``C``.
- This parameter is position-only.
- facecolors : array-like, optional
- Can be used alternatively to *C* to specify colors at the triangle
- faces. This parameter takes precedence over *C*.
- shading : {'flat', 'gouraud'}, default: 'flat'
- If 'flat' and the color values *C* are defined at points, the color
- values used for each triangle are from the mean C of the triangle's
- three points. If *shading* is 'gouraud' then color values must be
- defined at points.
- other_parameters
- All other parameters are the same as for `~.Axes.pcolor`.
- """
- _api.check_in_list(['flat', 'gouraud'], shading=shading)
-
- tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
-
- # Parse the color to be in one of (the other variable will be None):
- # - facecolors: if specified at the triangle faces
- # - point_colors: if specified at the points
- if facecolors is not None:
- if args:
- _api.warn_external(
- "Positional parameter C has no effect when the keyword "
- "facecolors is given")
- point_colors = None
- if len(facecolors) != len(tri.triangles):
- raise ValueError("The length of facecolors must match the number "
- "of triangles")
- else:
- # Color from positional parameter C
- if not args:
- raise TypeError(
- "tripcolor() missing 1 required positional argument: 'C'; or "
- "1 required keyword-only argument: 'facecolors'")
- elif len(args) > 1:
- _api.warn_deprecated(
- "3.6", message=f"Additional positional parameters "
- f"{args[1:]!r} are ignored; support for them is deprecated "
- f"since %(since)s and will be removed %(removal)s")
- C = np.asarray(args[0])
- if len(C) == len(tri.x):
- # having this before the len(tri.triangles) comparison gives
- # precedence to nodes if there are as many nodes as triangles
- point_colors = C
- facecolors = None
- elif len(C) == len(tri.triangles):
- point_colors = None
- facecolors = C
- else:
- raise ValueError('The length of C must match either the number '
- 'of points or the number of triangles')
-
- # Handling of linewidths, shading, edgecolors and antialiased as
- # in Axes.pcolor
- linewidths = (0.25,)
- if 'linewidth' in kwargs:
- kwargs['linewidths'] = kwargs.pop('linewidth')
- kwargs.setdefault('linewidths', linewidths)
-
- edgecolors = 'none'
- if 'edgecolor' in kwargs:
- kwargs['edgecolors'] = kwargs.pop('edgecolor')
- ec = kwargs.setdefault('edgecolors', edgecolors)
-
- if 'antialiased' in kwargs:
- kwargs['antialiaseds'] = kwargs.pop('antialiased')
- if 'antialiaseds' not in kwargs and ec.lower() == "none":
- kwargs['antialiaseds'] = False
-
- _api.check_isinstance((Normalize, None), norm=norm)
- if shading == 'gouraud':
- if facecolors is not None:
- raise ValueError(
- "shading='gouraud' can only be used when the colors "
- "are specified at the points, not at the faces.")
- collection = TriMesh(tri, alpha=alpha, array=point_colors,
- cmap=cmap, norm=norm, **kwargs)
- else: # 'flat'
- # Vertices of triangles.
- maskedTris = tri.get_masked_triangles()
- verts = np.stack((tri.x[maskedTris], tri.y[maskedTris]), axis=-1)
-
- # Color values.
- if facecolors is None:
- # One color per triangle, the mean of the 3 vertex color values.
- colors = point_colors[maskedTris].mean(axis=1)
- elif tri.mask is not None:
- # Remove color values of masked triangles.
- colors = facecolors[~tri.mask]
- else:
- colors = facecolors
- collection = PolyCollection(verts, alpha=alpha, array=colors,
- cmap=cmap, norm=norm, **kwargs)
- collection._scale_norm(norm, vmin, vmax)
- ax.grid(False)
- minx = tri.x.min()
- maxx = tri.x.max()
- miny = tri.y.min()
- maxy = tri.y.max()
- corners = (minx, miny), (maxx, maxy)
- ax.update_datalim(corners)
- ax.autoscale_view()
- ax.add_collection(collection)
- return collection
+_api.warn_deprecated(
+ "3.7",
+ message=f"Importing {__name__} was deprecated in Matplotlib 3.7 and will "
+ f"be removed two minor releases later. All functionality is "
+ f"available via the top-level module matplotlib.tri")
diff --git a/lib/matplotlib/tri/triplot.py b/lib/matplotlib/tri/triplot.py
index 553b4b4482bf..7c012b1a59e7 100644
--- a/lib/matplotlib/tri/triplot.py
+++ b/lib/matplotlib/tri/triplot.py
@@ -1,86 +1,9 @@
-import numpy as np
-from matplotlib.tri.triangulation import Triangulation
-import matplotlib.cbook as cbook
-import matplotlib.lines as mlines
+from ._triplot import * # noqa: F401, F403
+from matplotlib import _api
-def triplot(ax, *args, **kwargs):
- """
- Draw an unstructured triangular grid as lines and/or markers.
-
- Call signatures::
-
- triplot(triangulation, ...)
- triplot(x, y, [triangles], *, [mask=mask], ...)
-
- The triangular grid can be specified either by passing a `.Triangulation`
- object as the first parameter, or by passing the points *x*, *y* and
- optionally the *triangles* and a *mask*. If neither of *triangulation* or
- *triangles* are given, the triangulation is calculated on the fly.
-
- Parameters
- ----------
- triangulation : `.Triangulation`
- An already created triangular grid.
- x, y, triangles, mask
- Parameters defining the triangular grid. See `.Triangulation`.
- This is mutually exclusive with specifying *triangulation*.
- other_parameters
- All other args and kwargs are forwarded to `~.Axes.plot`.
-
- Returns
- -------
- lines : `~matplotlib.lines.Line2D`
- The drawn triangles edges.
- markers : `~matplotlib.lines.Line2D`
- The drawn marker nodes.
- """
- import matplotlib.axes
-
- tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
- x, y, edges = (tri.x, tri.y, tri.edges)
-
- # Decode plot format string, e.g., 'ro-'
- fmt = args[0] if args else ""
- linestyle, marker, color = matplotlib.axes._base._process_plot_format(fmt)
-
- # Insert plot format string into a copy of kwargs (kwargs values prevail).
- kw = cbook.normalize_kwargs(kwargs, mlines.Line2D)
- for key, val in zip(('linestyle', 'marker', 'color'),
- (linestyle, marker, color)):
- if val is not None:
- kw.setdefault(key, val)
-
- # Draw lines without markers.
- # Note 1: If we drew markers here, most markers would be drawn more than
- # once as they belong to several edges.
- # Note 2: We insert nan values in the flattened edges arrays rather than
- # plotting directly (triang.x[edges].T, triang.y[edges].T)
- # as it considerably speeds-up code execution.
- linestyle = kw['linestyle']
- kw_lines = {
- **kw,
- 'marker': 'None', # No marker to draw.
- 'zorder': kw.get('zorder', 1), # Path default zorder is used.
- }
- if linestyle not in [None, 'None', '', ' ']:
- tri_lines_x = np.insert(x[edges], 2, np.nan, axis=1)
- tri_lines_y = np.insert(y[edges], 2, np.nan, axis=1)
- tri_lines = ax.plot(tri_lines_x.ravel(), tri_lines_y.ravel(),
- **kw_lines)
- else:
- tri_lines = ax.plot([], [], **kw_lines)
-
- # Draw markers separately.
- marker = kw['marker']
- kw_markers = {
- **kw,
- 'linestyle': 'None', # No line to draw.
- }
- kw_markers.pop('label', None)
- if marker not in [None, 'None', '', ' ']:
- tri_markers = ax.plot(x, y, **kw_markers)
- else:
- tri_markers = ax.plot([], [], **kw_markers)
-
- return tri_lines + tri_markers
+_api.warn_deprecated(
+ "3.7",
+ message=f"Importing {__name__} was deprecated in Matplotlib 3.7 and will "
+ f"be removed two minor releases later. All functionality is "
+ f"available via the top-level module matplotlib.tri")
diff --git a/lib/matplotlib/tri/trirefine.py b/lib/matplotlib/tri/trirefine.py
index 674ee211cf46..6f22f9e8d203 100644
--- a/lib/matplotlib/tri/trirefine.py
+++ b/lib/matplotlib/tri/trirefine.py
@@ -1,307 +1,9 @@
-"""
-Mesh refinement for triangular grids.
-"""
-
-import numpy as np
-
+from ._trirefine import * # noqa: F401, F403
from matplotlib import _api
-from matplotlib.tri.triangulation import Triangulation
-import matplotlib.tri.triinterpolate
-
-
-class TriRefiner:
- """
- Abstract base class for classes implementing mesh refinement.
-
- A TriRefiner encapsulates a Triangulation object and provides tools for
- mesh refinement and interpolation.
-
- Derived classes must implement:
-
- - ``refine_triangulation(return_tri_index=False, **kwargs)`` , where
- the optional keyword arguments *kwargs* are defined in each
- TriRefiner concrete implementation, and which returns:
-
- - a refined triangulation,
- - optionally (depending on *return_tri_index*), for each
- point of the refined triangulation: the index of
- the initial triangulation triangle to which it belongs.
-
- - ``refine_field(z, triinterpolator=None, **kwargs)``, where:
-
- - *z* array of field values (to refine) defined at the base
- triangulation nodes,
- - *triinterpolator* is an optional `~matplotlib.tri.TriInterpolator`,
- - the other optional keyword arguments *kwargs* are defined in
- each TriRefiner concrete implementation;
-
- and which returns (as a tuple) a refined triangular mesh and the
- interpolated values of the field at the refined triangulation nodes.
- """
-
- def __init__(self, triangulation):
- _api.check_isinstance(Triangulation, triangulation=triangulation)
- self._triangulation = triangulation
-
-
-class UniformTriRefiner(TriRefiner):
- """
- Uniform mesh refinement by recursive subdivisions.
-
- Parameters
- ----------
- triangulation : `~matplotlib.tri.Triangulation`
- The encapsulated triangulation (to be refined)
- """
-# See Also
-# --------
-# :class:`~matplotlib.tri.CubicTriInterpolator` and
-# :class:`~matplotlib.tri.TriAnalyzer`.
-# """
- def __init__(self, triangulation):
- super().__init__(triangulation)
-
- def refine_triangulation(self, return_tri_index=False, subdiv=3):
- """
- Compute an uniformly refined triangulation *refi_triangulation* of
- the encapsulated :attr:`triangulation`.
-
- This function refines the encapsulated triangulation by splitting each
- father triangle into 4 child sub-triangles built on the edges midside
- nodes, recursing *subdiv* times. In the end, each triangle is hence
- divided into ``4**subdiv`` child triangles.
-
- Parameters
- ----------
- return_tri_index : bool, default: False
- Whether an index table indicating the father triangle index of each
- point is returned.
- subdiv : int, default: 3
- Recursion level for the subdivision.
- Each triangle is divided into ``4**subdiv`` child triangles;
- hence, the default results in 64 refined subtriangles for each
- triangle of the initial triangulation.
-
- Returns
- -------
- refi_triangulation : `~matplotlib.tri.Triangulation`
- The refined triangulation.
- found_index : int array
- Index of the initial triangulation containing triangle, for each
- point of *refi_triangulation*.
- Returned only if *return_tri_index* is set to True.
- """
- refi_triangulation = self._triangulation
- ntri = refi_triangulation.triangles.shape[0]
-
- # Computes the triangulation ancestors numbers in the reference
- # triangulation.
- ancestors = np.arange(ntri, dtype=np.int32)
- for _ in range(subdiv):
- refi_triangulation, ancestors = self._refine_triangulation_once(
- refi_triangulation, ancestors)
- refi_npts = refi_triangulation.x.shape[0]
- refi_triangles = refi_triangulation.triangles
-
- # Now we compute found_index table if needed
- if return_tri_index:
- # We have to initialize found_index with -1 because some nodes
- # may very well belong to no triangle at all, e.g., in case of
- # Delaunay Triangulation with DuplicatePointWarning.
- found_index = np.full(refi_npts, -1, dtype=np.int32)
- tri_mask = self._triangulation.mask
- if tri_mask is None:
- found_index[refi_triangles] = np.repeat(ancestors,
- 3).reshape(-1, 3)
- else:
- # There is a subtlety here: we want to avoid whenever possible
- # that refined points container is a masked triangle (which
- # would result in artifacts in plots).
- # So we impose the numbering from masked ancestors first,
- # then overwrite it with unmasked ancestor numbers.
- ancestor_mask = tri_mask[ancestors]
- found_index[refi_triangles[ancestor_mask, :]
- ] = np.repeat(ancestors[ancestor_mask],
- 3).reshape(-1, 3)
- found_index[refi_triangles[~ancestor_mask, :]
- ] = np.repeat(ancestors[~ancestor_mask],
- 3).reshape(-1, 3)
- return refi_triangulation, found_index
- else:
- return refi_triangulation
-
- def refine_field(self, z, triinterpolator=None, subdiv=3):
- """
- Refine a field defined on the encapsulated triangulation.
-
- Parameters
- ----------
- z : (npoints,) array-like
- Values of the field to refine, defined at the nodes of the
- encapsulated triangulation. (``n_points`` is the number of points
- in the initial triangulation)
- triinterpolator : `~matplotlib.tri.TriInterpolator`, optional
- Interpolator used for field interpolation. If not specified,
- a `~matplotlib.tri.CubicTriInterpolator` will be used.
- subdiv : int, default: 3
- Recursion level for the subdivision.
- Each triangle is divided into ``4**subdiv`` child triangles.
-
- Returns
- -------
- refi_tri : `~matplotlib.tri.Triangulation`
- The returned refined triangulation.
- refi_z : 1D array of length: *refi_tri* node count.
- The returned interpolated field (at *refi_tri* nodes).
- """
- if triinterpolator is None:
- interp = matplotlib.tri.CubicTriInterpolator(
- self._triangulation, z)
- else:
- _api.check_isinstance(matplotlib.tri.TriInterpolator,
- triinterpolator=triinterpolator)
- interp = triinterpolator
-
- refi_tri, found_index = self.refine_triangulation(
- subdiv=subdiv, return_tri_index=True)
- refi_z = interp._interpolate_multikeys(
- refi_tri.x, refi_tri.y, tri_index=found_index)[0]
- return refi_tri, refi_z
-
- @staticmethod
- def _refine_triangulation_once(triangulation, ancestors=None):
- """
- Refine a `.Triangulation` by splitting each triangle into 4
- child-masked_triangles built on the edges midside nodes.
-
- Masked triangles, if present, are also split, but their children
- returned masked.
-
- If *ancestors* is not provided, returns only a new triangulation:
- child_triangulation.
-
- If the array-like key table *ancestor* is given, it shall be of shape
- (ntri,) where ntri is the number of *triangulation* masked_triangles.
- In this case, the function returns
- (child_triangulation, child_ancestors)
- child_ancestors is defined so that the 4 child masked_triangles share
- the same index as their father: child_ancestors.shape = (4 * ntri,).
- """
-
- x = triangulation.x
- y = triangulation.y
-
- # According to tri.triangulation doc:
- # neighbors[i, j] is the triangle that is the neighbor
- # to the edge from point index masked_triangles[i, j] to point
- # index masked_triangles[i, (j+1)%3].
- neighbors = triangulation.neighbors
- triangles = triangulation.triangles
- npts = np.shape(x)[0]
- ntri = np.shape(triangles)[0]
- if ancestors is not None:
- ancestors = np.asarray(ancestors)
- if np.shape(ancestors) != (ntri,):
- raise ValueError(
- "Incompatible shapes provide for triangulation"
- ".masked_triangles and ancestors: {0} and {1}".format(
- np.shape(triangles), np.shape(ancestors)))
-
- # Initiating tables refi_x and refi_y of the refined triangulation
- # points
- # hint: each apex is shared by 2 masked_triangles except the borders.
- borders = np.sum(neighbors == -1)
- added_pts = (3*ntri + borders) // 2
- refi_npts = npts + added_pts
- refi_x = np.zeros(refi_npts)
- refi_y = np.zeros(refi_npts)
-
- # First part of refi_x, refi_y is just the initial points
- refi_x[:npts] = x
- refi_y[:npts] = y
-
- # Second part contains the edge midside nodes.
- # Each edge belongs to 1 triangle (if border edge) or is shared by 2
- # masked_triangles (interior edge).
- # We first build 2 * ntri arrays of edge starting nodes (edge_elems,
- # edge_apexes); we then extract only the masters to avoid overlaps.
- # The so-called 'master' is the triangle with biggest index
- # The 'slave' is the triangle with lower index
- # (can be -1 if border edge)
- # For slave and master we will identify the apex pointing to the edge
- # start
- edge_elems = np.tile(np.arange(ntri, dtype=np.int32), 3)
- edge_apexes = np.repeat(np.arange(3, dtype=np.int32), ntri)
- edge_neighbors = neighbors[edge_elems, edge_apexes]
- mask_masters = (edge_elems > edge_neighbors)
-
- # Identifying the "masters" and adding to refi_x, refi_y vec
- masters = edge_elems[mask_masters]
- apex_masters = edge_apexes[mask_masters]
- x_add = (x[triangles[masters, apex_masters]] +
- x[triangles[masters, (apex_masters+1) % 3]]) * 0.5
- y_add = (y[triangles[masters, apex_masters]] +
- y[triangles[masters, (apex_masters+1) % 3]]) * 0.5
- refi_x[npts:] = x_add
- refi_y[npts:] = y_add
-
- # Building the new masked_triangles; each old masked_triangles hosts
- # 4 new masked_triangles
- # there are 6 pts to identify per 'old' triangle, 3 new_pt_corner and
- # 3 new_pt_midside
- new_pt_corner = triangles
-
- # What is the index in refi_x, refi_y of point at middle of apex iapex
- # of elem ielem ?
- # If ielem is the apex master: simple count, given the way refi_x was
- # built.
- # If ielem is the apex slave: yet we do not know; but we will soon
- # using the neighbors table.
- new_pt_midside = np.empty([ntri, 3], dtype=np.int32)
- cum_sum = npts
- for imid in range(3):
- mask_st_loc = (imid == apex_masters)
- n_masters_loc = np.sum(mask_st_loc)
- elem_masters_loc = masters[mask_st_loc]
- new_pt_midside[:, imid][elem_masters_loc] = np.arange(
- n_masters_loc, dtype=np.int32) + cum_sum
- cum_sum += n_masters_loc
-
- # Now dealing with slave elems.
- # for each slave element we identify the master and then the inode
- # once slave_masters is identified, slave_masters_apex is such that:
- # neighbors[slaves_masters, slave_masters_apex] == slaves
- mask_slaves = np.logical_not(mask_masters)
- slaves = edge_elems[mask_slaves]
- slaves_masters = edge_neighbors[mask_slaves]
- diff_table = np.abs(neighbors[slaves_masters, :] -
- np.outer(slaves, np.ones(3, dtype=np.int32)))
- slave_masters_apex = np.argmin(diff_table, axis=1)
- slaves_apex = edge_apexes[mask_slaves]
- new_pt_midside[slaves, slaves_apex] = new_pt_midside[
- slaves_masters, slave_masters_apex]
-
- # Builds the 4 child masked_triangles
- child_triangles = np.empty([ntri*4, 3], dtype=np.int32)
- child_triangles[0::4, :] = np.vstack([
- new_pt_corner[:, 0], new_pt_midside[:, 0],
- new_pt_midside[:, 2]]).T
- child_triangles[1::4, :] = np.vstack([
- new_pt_corner[:, 1], new_pt_midside[:, 1],
- new_pt_midside[:, 0]]).T
- child_triangles[2::4, :] = np.vstack([
- new_pt_corner[:, 2], new_pt_midside[:, 2],
- new_pt_midside[:, 1]]).T
- child_triangles[3::4, :] = np.vstack([
- new_pt_midside[:, 0], new_pt_midside[:, 1],
- new_pt_midside[:, 2]]).T
- child_triangulation = Triangulation(refi_x, refi_y, child_triangles)
- # Builds the child mask
- if triangulation.mask is not None:
- child_triangulation.set_mask(np.repeat(triangulation.mask, 4))
- if ancestors is None:
- return child_triangulation
- else:
- return child_triangulation, np.repeat(ancestors, 4)
+_api.warn_deprecated(
+ "3.7",
+ message=f"Importing {__name__} was deprecated in Matplotlib 3.7 and will "
+ f"be removed two minor releases later. All functionality is "
+ f"available via the top-level module matplotlib.tri")
diff --git a/lib/matplotlib/tri/tritools.py b/lib/matplotlib/tri/tritools.py
index 11b500fcdd8f..9c6839ca2049 100644
--- a/lib/matplotlib/tri/tritools.py
+++ b/lib/matplotlib/tri/tritools.py
@@ -1,263 +1,9 @@
-"""
-Tools for triangular grids.
-"""
-
-import numpy as np
-
+from ._tritools import * # noqa: F401, F403
from matplotlib import _api
-from matplotlib.tri import Triangulation
-
-
-class TriAnalyzer:
- """
- Define basic tools for triangular mesh analysis and improvement.
-
- A TriAnalyzer encapsulates a `.Triangulation` object and provides basic
- tools for mesh analysis and mesh improvement.
-
- Attributes
- ----------
- scale_factors
-
- Parameters
- ----------
- triangulation : `~matplotlib.tri.Triangulation`
- The encapsulated triangulation to analyze.
- """
-
- def __init__(self, triangulation):
- _api.check_isinstance(Triangulation, triangulation=triangulation)
- self._triangulation = triangulation
-
- @property
- def scale_factors(self):
- """
- Factors to rescale the triangulation into a unit square.
-
- Returns
- -------
- (float, float)
- Scaling factors (kx, ky) so that the triangulation
- ``[triangulation.x * kx, triangulation.y * ky]``
- fits exactly inside a unit square.
- """
- compressed_triangles = self._triangulation.get_masked_triangles()
- node_used = (np.bincount(np.ravel(compressed_triangles),
- minlength=self._triangulation.x.size) != 0)
- return (1 / np.ptp(self._triangulation.x[node_used]),
- 1 / np.ptp(self._triangulation.y[node_used]))
-
- def circle_ratios(self, rescale=True):
- """
- Return a measure of the triangulation triangles flatness.
-
- The ratio of the incircle radius over the circumcircle radius is a
- widely used indicator of a triangle flatness.
- It is always ``<= 0.5`` and ``== 0.5`` only for equilateral
- triangles. Circle ratios below 0.01 denote very flat triangles.
-
- To avoid unduly low values due to a difference of scale between the 2
- axis, the triangular mesh can first be rescaled to fit inside a unit
- square with `scale_factors` (Only if *rescale* is True, which is
- its default value).
-
- Parameters
- ----------
- rescale : bool, default: True
- If True, internally rescale (based on `scale_factors`), so that the
- (unmasked) triangles fit exactly inside a unit square mesh.
-
- Returns
- -------
- masked array
- Ratio of the incircle radius over the circumcircle radius, for
- each 'rescaled' triangle of the encapsulated triangulation.
- Values corresponding to masked triangles are masked out.
-
- """
- # Coords rescaling
- if rescale:
- (kx, ky) = self.scale_factors
- else:
- (kx, ky) = (1.0, 1.0)
- pts = np.vstack([self._triangulation.x*kx,
- self._triangulation.y*ky]).T
- tri_pts = pts[self._triangulation.triangles]
- # Computes the 3 side lengths
- a = tri_pts[:, 1, :] - tri_pts[:, 0, :]
- b = tri_pts[:, 2, :] - tri_pts[:, 1, :]
- c = tri_pts[:, 0, :] - tri_pts[:, 2, :]
- a = np.hypot(a[:, 0], a[:, 1])
- b = np.hypot(b[:, 0], b[:, 1])
- c = np.hypot(c[:, 0], c[:, 1])
- # circumcircle and incircle radii
- s = (a+b+c)*0.5
- prod = s*(a+b-s)*(a+c-s)*(b+c-s)
- # We have to deal with flat triangles with infinite circum_radius
- bool_flat = (prod == 0.)
- if np.any(bool_flat):
- # Pathologic flow
- ntri = tri_pts.shape[0]
- circum_radius = np.empty(ntri, dtype=np.float64)
- circum_radius[bool_flat] = np.inf
- abc = a*b*c
- circum_radius[~bool_flat] = abc[~bool_flat] / (
- 4.0*np.sqrt(prod[~bool_flat]))
- else:
- # Normal optimized flow
- circum_radius = (a*b*c) / (4.0*np.sqrt(prod))
- in_radius = (a*b*c) / (4.0*circum_radius*s)
- circle_ratio = in_radius/circum_radius
- mask = self._triangulation.mask
- if mask is None:
- return circle_ratio
- else:
- return np.ma.array(circle_ratio, mask=mask)
-
- def get_flat_tri_mask(self, min_circle_ratio=0.01, rescale=True):
- """
- Eliminate excessively flat border triangles from the triangulation.
-
- Returns a mask *new_mask* which allows to clean the encapsulated
- triangulation from its border-located flat triangles
- (according to their :meth:`circle_ratios`).
- This mask is meant to be subsequently applied to the triangulation
- using `.Triangulation.set_mask`.
- *new_mask* is an extension of the initial triangulation mask
- in the sense that an initially masked triangle will remain masked.
-
- The *new_mask* array is computed recursively; at each step flat
- triangles are removed only if they share a side with the current mesh
- border. Thus no new holes in the triangulated domain will be created.
-
- Parameters
- ----------
- min_circle_ratio : float, default: 0.01
- Border triangles with incircle/circumcircle radii ratio r/R will
- be removed if r/R < *min_circle_ratio*.
- rescale : bool, default: True
- If True, first, internally rescale (based on `scale_factors`) so
- that the (unmasked) triangles fit exactly inside a unit square
- mesh. This rescaling accounts for the difference of scale which
- might exist between the 2 axis.
-
- Returns
- -------
- array of bool
- Mask to apply to encapsulated triangulation.
- All the initially masked triangles remain masked in the
- *new_mask*.
-
- Notes
- -----
- The rationale behind this function is that a Delaunay
- triangulation - of an unstructured set of points - sometimes contains
- almost flat triangles at its border, leading to artifacts in plots
- (especially for high-resolution contouring).
- Masked with computed *new_mask*, the encapsulated
- triangulation would contain no more unmasked border triangles
- with a circle ratio below *min_circle_ratio*, thus improving the
- mesh quality for subsequent plots or interpolation.
- """
- # Recursively computes the mask_current_borders, true if a triangle is
- # at the border of the mesh OR touching the border through a chain of
- # invalid aspect ratio masked_triangles.
- ntri = self._triangulation.triangles.shape[0]
- mask_bad_ratio = self.circle_ratios(rescale) < min_circle_ratio
-
- current_mask = self._triangulation.mask
- if current_mask is None:
- current_mask = np.zeros(ntri, dtype=bool)
- valid_neighbors = np.copy(self._triangulation.neighbors)
- renum_neighbors = np.arange(ntri, dtype=np.int32)
- nadd = -1
- while nadd != 0:
- # The active wavefront is the triangles from the border (unmasked
- # but with a least 1 neighbor equal to -1
- wavefront = (np.min(valid_neighbors, axis=1) == -1) & ~current_mask
- # The element from the active wavefront will be masked if their
- # circle ratio is bad.
- added_mask = wavefront & mask_bad_ratio
- current_mask = added_mask | current_mask
- nadd = np.sum(added_mask)
-
- # now we have to update the tables valid_neighbors
- valid_neighbors[added_mask, :] = -1
- renum_neighbors[added_mask] = -1
- valid_neighbors = np.where(valid_neighbors == -1, -1,
- renum_neighbors[valid_neighbors])
-
- return np.ma.filled(current_mask, True)
-
- def _get_compressed_triangulation(self):
- """
- Compress (if masked) the encapsulated triangulation.
-
- Returns minimal-length triangles array (*compressed_triangles*) and
- coordinates arrays (*compressed_x*, *compressed_y*) that can still
- describe the unmasked triangles of the encapsulated triangulation.
-
- Returns
- -------
- compressed_triangles : array-like
- the returned compressed triangulation triangles
- compressed_x : array-like
- the returned compressed triangulation 1st coordinate
- compressed_y : array-like
- the returned compressed triangulation 2nd coordinate
- tri_renum : int array
- renumbering table to translate the triangle numbers from the
- encapsulated triangulation into the new (compressed) renumbering.
- -1 for masked triangles (deleted from *compressed_triangles*).
- node_renum : int array
- renumbering table to translate the point numbers from the
- encapsulated triangulation into the new (compressed) renumbering.
- -1 for unused points (i.e. those deleted from *compressed_x* and
- *compressed_y*).
-
- """
- # Valid triangles and renumbering
- tri_mask = self._triangulation.mask
- compressed_triangles = self._triangulation.get_masked_triangles()
- ntri = self._triangulation.triangles.shape[0]
- if tri_mask is not None:
- tri_renum = self._total_to_compress_renum(~tri_mask)
- else:
- tri_renum = np.arange(ntri, dtype=np.int32)
-
- # Valid nodes and renumbering
- valid_node = (np.bincount(np.ravel(compressed_triangles),
- minlength=self._triangulation.x.size) != 0)
- compressed_x = self._triangulation.x[valid_node]
- compressed_y = self._triangulation.y[valid_node]
- node_renum = self._total_to_compress_renum(valid_node)
-
- # Now renumbering the valid triangles nodes
- compressed_triangles = node_renum[compressed_triangles]
-
- return (compressed_triangles, compressed_x, compressed_y, tri_renum,
- node_renum)
-
- @staticmethod
- def _total_to_compress_renum(valid):
- """
- Parameters
- ----------
- valid : 1D bool array
- Validity mask.
- Returns
- -------
- int array
- Array so that (`valid_array` being a compressed array
- based on a `masked_array` with mask ~*valid*):
- - For all i with valid[i] = True:
- valid_array[renum[i]] = masked_array[i]
- - For all i with valid[i] = False:
- renum[i] = -1 (invalid value)
- """
- renum = np.full(np.size(valid), -1, dtype=np.int32)
- n_valid = np.sum(valid)
- renum[valid] = np.arange(n_valid, dtype=np.int32)
- return renum
+_api.warn_deprecated(
+ "3.7",
+ message=f"Importing {__name__} was deprecated in Matplotlib 3.7 and will "
+ f"be removed two minor releases later. All functionality is "
+ f"available via the top-level module matplotlib.tri")
diff --git a/lib/matplotlib/widgets.py b/lib/matplotlib/widgets.py
index c42caa649935..807e9d360071 100644
--- a/lib/matplotlib/widgets.py
+++ b/lib/matplotlib/widgets.py
@@ -1315,17 +1315,6 @@ def stop_typing(self):
# call it once we've already done our cleanup.
self._observers.process('submit', self.text)
- def position_cursor(self, x):
- # now, we have to figure out where the cursor goes.
- # approximate it based on assuming all characters the same length
- if len(self.text) == 0:
- self.cursor_index = 0
- else:
- bb = self.text_disp.get_window_extent()
- ratio = np.clip((x - bb.x0) / bb.width, 0, 1)
- self.cursor_index = int(len(self.text) * ratio)
- self._rendercursor()
-
def _click(self, event):
if self.ignore(event):
return
@@ -1338,7 +1327,8 @@ def _click(self, event):
event.canvas.grab_mouse(self.ax)
if not self.capturekeystrokes:
self.begin_typing(event.x)
- self.position_cursor(event.x)
+ self.cursor_index = self.text_disp._char_index_at(event.x)
+ self._rendercursor()
def _resize(self, event):
self.stop_typing()
@@ -2918,10 +2908,9 @@ class RectangleSelector(_SelectorWidget):
... print(erelease.xdata, erelease.ydata)
>>> props = dict(facecolor='blue', alpha=0.5)
>>> rect = mwidgets.RectangleSelector(ax, onselect, interactive=True,
- props=props)
+ ... props=props)
>>> fig.show()
-
- >>> selector.add_state('square')
+ >>> rect.add_state('square')
See also: :doc:`/gallery/widgets/rectangle_selector`
"""
diff --git a/lib/mpl_toolkits/axes_grid1/anchored_artists.py b/lib/mpl_toolkits/axes_grid1/anchored_artists.py
index 7e15879289ee..b5746ca3df8a 100644
--- a/lib/mpl_toolkits/axes_grid1/anchored_artists.py
+++ b/lib/mpl_toolkits/axes_grid1/anchored_artists.py
@@ -14,7 +14,7 @@ def __init__(self, width, height, xdescent, ydescent,
loc, pad=0.4, borderpad=0.5, prop=None, frameon=True,
**kwargs):
"""
- An anchored container with a fixed size and fillable DrawingArea.
+ An anchored container with a fixed size and fillable `.DrawingArea`.
Artists added to the *drawing_area* will have their coordinates
interpreted as pixels. Any transformations set on the artists will be
@@ -37,16 +37,16 @@ def __init__(self, width, height, xdescent, ydescent,
Padding around the child objects, in fraction of the font size.
borderpad : float, default: 0.5
Border padding, in fraction of the font size.
- prop : `matplotlib.font_manager.FontProperties`, optional
+ prop : `~matplotlib.font_manager.FontProperties`, optional
Font property used as a reference for paddings.
frameon : bool, default: True
- If True, draw a box around this artists.
+ If True, draw a box around this artist.
**kwargs
Keyword arguments forwarded to `.AnchoredOffsetbox`.
Attributes
----------
- drawing_area : `matplotlib.offsetbox.DrawingArea`
+ drawing_area : `~matplotlib.offsetbox.DrawingArea`
A container for artists to display.
Examples
@@ -81,7 +81,7 @@ def __init__(self, transform, loc,
Parameters
----------
- transform : `matplotlib.transforms.Transform`
+ transform : `~matplotlib.transforms.Transform`
The transformation object for the coordinate system in use, i.e.,
:attr:`matplotlib.axes.Axes.transData`.
loc : str
@@ -95,16 +95,16 @@ def __init__(self, transform, loc,
Padding around the child objects, in fraction of the font size.
borderpad : float, default: 0.5
Border padding, in fraction of the font size.
- prop : `matplotlib.font_manager.FontProperties`, optional
+ prop : `~matplotlib.font_manager.FontProperties`, optional
Font property used as a reference for paddings.
frameon : bool, default: True
- If True, draw a box around this artists.
+ If True, draw a box around this artist.
**kwargs
Keyword arguments forwarded to `.AnchoredOffsetbox`.
Attributes
----------
- drawing_area : `matplotlib.offsetbox.AuxTransformBox`
+ drawing_area : `~matplotlib.offsetbox.AuxTransformBox`
A container for artists to display.
Examples
@@ -132,7 +132,7 @@ def __init__(self, transform, width, height, angle, loc,
Parameters
----------
- transform : `matplotlib.transforms.Transform`
+ transform : `~matplotlib.transforms.Transform`
The transformation object for the coordinate system in use, i.e.,
:attr:`matplotlib.axes.Axes.transData`.
width, height : float
@@ -153,14 +153,14 @@ def __init__(self, transform, width, height, angle, loc,
Border padding, in fraction of the font size.
frameon : bool, default: True
If True, draw a box around the ellipse.
- prop : `matplotlib.font_manager.FontProperties`, optional
+ prop : `~matplotlib.font_manager.FontProperties`, optional
Font property used as a reference for paddings.
**kwargs
Keyword arguments forwarded to `.AnchoredOffsetbox`.
Attributes
----------
- ellipse : `matplotlib.patches.Ellipse`
+ ellipse : `~matplotlib.patches.Ellipse`
Ellipse patch drawn.
"""
self._box = AuxTransformBox(transform)
@@ -182,7 +182,7 @@ def __init__(self, transform, size, label, loc,
Parameters
----------
- transform : `matplotlib.transforms.Transform`
+ transform : `~matplotlib.transforms.Transform`
The transformation object for the coordinate system in use, i.e.,
:attr:`matplotlib.axes.Axes.transData`.
size : float
@@ -213,7 +213,7 @@ def __init__(self, transform, size, label, loc,
Color for the size bar and label.
label_top : bool, default: False
If True, the label will be over the size bar.
- fontproperties : `matplotlib.font_manager.FontProperties`, optional
+ fontproperties : `~matplotlib.font_manager.FontProperties`, optional
Font properties for the label text.
fill_bar : bool, optional
If True and if *size_vertical* is nonzero, the size bar will
@@ -225,15 +225,15 @@ def __init__(self, transform, size, label, loc,
Attributes
----------
- size_bar : `matplotlib.offsetbox.AuxTransformBox`
+ size_bar : `~matplotlib.offsetbox.AuxTransformBox`
Container for the size bar.
- txt_label : `matplotlib.offsetbox.TextArea`
+ txt_label : `~matplotlib.offsetbox.TextArea`
Container for the label of the size bar.
Notes
-----
If *prop* is passed as a keyword argument, but *fontproperties* is
- not, then *prop* is be assumed to be the intended *fontproperties*.
+ not, then *prop* is assumed to be the intended *fontproperties*.
Using both *prop* and *fontproperties* is not supported.
Examples
@@ -301,7 +301,7 @@ def __init__(self, transform, label_x, label_y, length=0.15,
Parameters
----------
- transform : `matplotlib.transforms.Transform`
+ transform : `~matplotlib.transforms.Transform`
The transformation object for the coordinate system in use, i.e.,
:attr:`matplotlib.axes.Axes.transAxes`.
label_x, label_y : str
@@ -335,7 +335,7 @@ def __init__(self, transform, label_x, label_y, length=0.15,
sep_x, sep_y : float, default: 0.01 and 0 respectively
Separation between the arrows and labels in coordinates of
*transform*.
- fontproperties : `matplotlib.font_manager.FontProperties`, optional
+ fontproperties : `~matplotlib.font_manager.FontProperties`, optional
Font properties for the label text.
back_length : float, default: 0.15
Fraction of the arrow behind the arrow crossing.
@@ -347,25 +347,25 @@ def __init__(self, transform, label_x, label_y, length=0.15,
Width of arrow tail, sent to ArrowStyle.
text_props, arrow_props : dict
Properties of the text and arrows, passed to
- `.textpath.TextPath` and `.patches.FancyArrowPatch`.
+ `~.textpath.TextPath` and `~.patches.FancyArrowPatch`.
**kwargs
Keyword arguments forwarded to `.AnchoredOffsetbox`.
Attributes
----------
- arrow_x, arrow_y : `matplotlib.patches.FancyArrowPatch`
+ arrow_x, arrow_y : `~matplotlib.patches.FancyArrowPatch`
Arrow x and y
- text_path_x, text_path_y : `matplotlib.textpath.TextPath`
+ text_path_x, text_path_y : `~matplotlib.textpath.TextPath`
Path for arrow labels
- p_x, p_y : `matplotlib.patches.PathPatch`
+ p_x, p_y : `~matplotlib.patches.PathPatch`
Patch for arrow labels
- box : `matplotlib.offsetbox.AuxTransformBox`
+ box : `~matplotlib.offsetbox.AuxTransformBox`
Container for the arrows and labels.
Notes
-----
If *prop* is passed as a keyword argument, but *fontproperties* is
- not, then *prop* is be assumed to be the intended *fontproperties*.
+ not, then *prop* is assumed to be the intended *fontproperties*.
Using both *prop* and *fontproperties* is not supported.
Examples
diff --git a/lib/mpl_toolkits/axes_grid1/axes_divider.py b/lib/mpl_toolkits/axes_grid1/axes_divider.py
index 251fa44b14bf..2d092674623d 100644
--- a/lib/mpl_toolkits/axes_grid1/axes_divider.py
+++ b/lib/mpl_toolkits/axes_grid1/axes_divider.py
@@ -6,7 +6,6 @@
import matplotlib as mpl
from matplotlib import _api
-from matplotlib.axes import SubplotBase
from matplotlib.gridspec import SubplotSpec
import matplotlib.transforms as mtransforms
from . import axes_size as Size
@@ -61,12 +60,6 @@ def get_horizontal_sizes(self, renderer):
def get_vertical_sizes(self, renderer):
return [s.get_size(renderer) for s in self.get_vertical()]
- @_api.deprecated("3.5")
- def get_vsize_hsize(self):
- vsize = Size.AddList(self.get_vertical())
- hsize = Size.AddList(self.get_horizontal())
- return vsize, hsize
-
@staticmethod
def _calc_k(l, total_size):
@@ -237,14 +230,14 @@ def locate(self, nx, ny, nx1=None, ny1=None, axes=None, renderer=None):
def new_locator(self, nx, ny, nx1=None, ny1=None):
"""
- Return a new `AxesLocator` for the specified cell.
+ Return a new `.AxesLocator` for the specified cell.
Parameters
----------
nx, nx1 : int
Integers specifying the column-position of the
cell. When *nx1* is None, a single *nx*-th column is
- specified. Otherwise location of columns spanning between *nx*
+ specified. Otherwise, location of columns spanning between *nx*
to *nx1* (but excluding *nx1*-th column) is specified.
ny, ny1 : int
Same as *nx* and *nx1*, but for row positions.
@@ -275,7 +268,7 @@ def add_auto_adjustable_area(self, use_axes, pad=0.1, adjust_dirs=None):
Parameters
----------
- use_axes : `~.axes.Axes` or list of `~.axes.Axes`
+ use_axes : `~matplotlib.axes.Axes` or list of `~matplotlib.axes.Axes`
The Axes whose decorations are taken into account.
pad : float, optional
Additional padding in inches.
@@ -291,18 +284,18 @@ def add_auto_adjustable_area(self, use_axes, pad=0.1, adjust_dirs=None):
class AxesLocator:
"""
A callable object which returns the position and size of a given
- AxesDivider cell.
+ `.AxesDivider` cell.
"""
def __init__(self, axes_divider, nx, ny, nx1=None, ny1=None):
"""
Parameters
----------
- axes_divider : AxesDivider
+ axes_divider : `~mpl_toolkits.axes_grid1.axes_divider.AxesDivider`
nx, nx1 : int
Integers specifying the column-position of the
cell. When *nx1* is None, a single *nx*-th column is
- specified. Otherwise location of columns spanning between *nx*
+ specified. Otherwise, location of columns spanning between *nx*
to *nx1* (but excluding *nx1*-th column) is specified.
ny, ny1 : int
Same as *nx* and *nx1*, but for row positions.
@@ -343,10 +336,7 @@ def __call__(self, axes, renderer):
renderer)
def get_subplotspec(self):
- if hasattr(self._axes_divider, "get_subplotspec"):
- return self._axes_divider.get_subplotspec()
- else:
- return None
+ return self._axes_divider.get_subplotspec()
class SubplotDivider(Divider):
@@ -359,7 +349,7 @@ def __init__(self, fig, *args, horizontal=None, vertical=None,
"""
Parameters
----------
- fig : `matplotlib.figure.Figure`
+ fig : `~matplotlib.figure.Figure`
*args : tuple (*nrows*, *ncols*, *index*) or int
The array of subplots in the figure has dimensions ``(nrows,
@@ -421,10 +411,7 @@ def __init__(self, axes, xref=None, yref=None):
def _get_new_axes(self, *, axes_class=None, **kwargs):
axes = self._axes
if axes_class is None:
- if isinstance(axes, SubplotBase):
- axes_class = axes._axes_class
- else:
- axes_class = type(axes)
+ axes_class = type(axes)
return axes_class(axes.get_figure(), axes.get_position(original=True),
**kwargs)
@@ -492,9 +479,8 @@ def new_vertical(self, size, pad=None, pack_start=False, **kwargs):
ax.set_axes_locator(locator)
return ax
- @_api.delete_parameter("3.5", "add_to_figure", alternative="ax.remove()")
- def append_axes(self, position, size, pad=None, add_to_figure=True, *,
- axes_class=None, **kwargs):
+ def append_axes(self, position, size, pad=None, *, axes_class=None,
+ **kwargs):
"""
Add a new axes on a given side of the main axes.
@@ -524,8 +510,7 @@ def append_axes(self, position, size, pad=None, add_to_figure=True, *,
}, position=position)
ax = create_axes(
size, pad, pack_start=pack_start, axes_class=axes_class, **kwargs)
- if add_to_figure:
- self._fig.add_axes(ax)
+ self._fig.add_axes(ax)
return ax
def get_aspect(self):
@@ -552,10 +537,7 @@ def get_anchor(self):
return self._anchor
def get_subplotspec(self):
- if hasattr(self._axes, "get_subplotspec"):
- return self._axes.get_subplotspec()
- else:
- return None
+ return self._axes.get_subplotspec()
# Helper for HBoxDivider/VBoxDivider.
@@ -611,7 +593,7 @@ def _locate(x, y, w, h, summed_widths, equal_heights, fig_w, fig_h, anchor):
class HBoxDivider(SubplotDivider):
"""
- A `SubplotDivider` for laying out axes horizontally, while ensuring that
+ A `.SubplotDivider` for laying out axes horizontally, while ensuring that
they have equal heights.
Examples
@@ -621,14 +603,14 @@ class HBoxDivider(SubplotDivider):
def new_locator(self, nx, nx1=None):
"""
- Create a new `AxesLocator` for the specified cell.
+ Create a new `.AxesLocator` for the specified cell.
Parameters
----------
nx, nx1 : int
Integers specifying the column-position of the
cell. When *nx1* is None, a single *nx*-th column is
- specified. Otherwise location of columns spanning between *nx*
+ specified. Otherwise, location of columns spanning between *nx*
to *nx1* (but excluding *nx1*-th column) is specified.
"""
return AxesLocator(self, nx, 0, nx1 if nx1 is not None else nx + 1, 1)
@@ -654,20 +636,20 @@ def locate(self, nx, ny, nx1=None, ny1=None, axes=None, renderer=None):
class VBoxDivider(SubplotDivider):
"""
- A `SubplotDivider` for laying out axes vertically, while ensuring that they
- have equal widths.
+ A `.SubplotDivider` for laying out axes vertically, while ensuring that
+ they have equal widths.
"""
def new_locator(self, ny, ny1=None):
"""
- Create a new `AxesLocator` for the specified cell.
+ Create a new `.AxesLocator` for the specified cell.
Parameters
----------
ny, ny1 : int
Integers specifying the row-position of the
cell. When *ny1* is None, a single *ny*-th row is
- specified. Otherwise location of rows spanning between *ny*
+ specified. Otherwise, location of rows spanning between *ny*
to *ny1* (but excluding *ny1*-th row) is specified.
"""
return AxesLocator(self, 0, ny, 1, ny1 if ny1 is not None else ny + 1)
@@ -704,7 +686,7 @@ def make_axes_area_auto_adjustable(
"""
Add auto-adjustable padding around *ax* to take its decorations (title,
labels, ticks, ticklabels) into account during layout, using
- `Divider.add_auto_adjustable_area`.
+ `.Divider.add_auto_adjustable_area`.
By default, padding is determined from the decorations of *ax*.
Pass *use_axes* to consider the decorations of other Axes instead.
diff --git a/lib/mpl_toolkits/axes_grid1/axes_grid.py b/lib/mpl_toolkits/axes_grid1/axes_grid.py
index 2435f2258e25..ff5bc1c617c6 100644
--- a/lib/mpl_toolkits/axes_grid1/axes_grid.py
+++ b/lib/mpl_toolkits/axes_grid1/axes_grid.py
@@ -41,11 +41,6 @@ def cla(self):
self.orientation = orientation
-@_api.deprecated("3.5")
-class CbarAxes(CbarAxesBase, Axes):
- pass
-
-
_cbaraxes_class_factory = cbook._make_class_factory(CbarAxesBase, "Cbar{}")
@@ -81,9 +76,11 @@ def __init__(self, fig,
----------
fig : `.Figure`
The parent figure.
- rect : (float, float, float, float) or int
- The axes position, as a ``(left, bottom, width, height)`` tuple or
- as a three-digit subplot position code (e.g., "121").
+ rect : (float, float, float, float), (int, int, int), int, or \
+ `~.SubplotSpec`
+ The axes position, as a ``(left, bottom, width, height)`` tuple,
+ as a three-digit subplot position code (e.g., ``(1, 2, 1)`` or
+ ``121``), or as a `~.SubplotSpec`.
nrows_ncols : (int, int)
Number of rows and columns in the grid.
ngrids : int or None, default: None
@@ -139,7 +136,7 @@ def __init__(self, fig,
axes_class = functools.partial(cls, **kwargs)
kw = dict(horizontal=[], vertical=[], aspect=aspect)
- if isinstance(rect, (str, Number, SubplotSpec)):
+ if isinstance(rect, (Number, SubplotSpec)):
self._divider = SubplotDivider(fig, rect, **kw)
elif len(rect) == 3:
self._divider = SubplotDivider(fig, *rect, **kw)
@@ -269,7 +266,6 @@ def set_label_mode(self, mode):
- "1": Only the bottom left axes is labelled.
- "all": all axes are labelled.
"""
- _api.check_in_list(["all", "L", "1"], mode=mode)
if mode == "all":
for ax in self.axes_all:
_tick_only(ax, False, False)
@@ -290,7 +286,7 @@ def set_label_mode(self, mode):
ax = col[-1]
_tick_only(ax, bottom_on=False, left_on=True)
- else: # "1"
+ elif mode == "1":
for ax in self.axes_all:
_tick_only(ax, bottom_on=True, left_on=True)
@@ -306,10 +302,6 @@ def set_axes_locator(self, locator):
def get_axes_locator(self):
return self._divider.get_locator()
- @_api.deprecated("3.5")
- def get_vsize_hsize(self):
- return self._divider.get_vsize_hsize()
-
class ImageGrid(Grid):
# docstring inherited
diff --git a/lib/mpl_toolkits/axes_grid1/axes_rgb.py b/lib/mpl_toolkits/axes_grid1/axes_rgb.py
index c94f443a8f83..003efd68c7fd 100644
--- a/lib/mpl_toolkits/axes_grid1/axes_rgb.py
+++ b/lib/mpl_toolkits/axes_grid1/axes_rgb.py
@@ -8,8 +8,15 @@ def make_rgb_axes(ax, pad=0.01, axes_class=None, **kwargs):
"""
Parameters
----------
- pad : float
- Fraction of the axes height.
+ ax : `~matplotlib.axes.Axes`
+ Axes instance to create the RGB Axes in.
+ pad : float, optional
+ Fraction of the Axes height to pad.
+ axes_class : `matplotlib.axes.Axes` or None, optional
+ Axes class to use for the R, G, and B Axes. If None, use
+ the same class as *ax*.
+ **kwargs :
+ Forwarded to *axes_class* init for the R, G, and B Axes.
"""
divider = make_axes_locatable(ax)
@@ -26,10 +33,7 @@ def make_rgb_axes(ax, pad=0.01, axes_class=None, **kwargs):
ax_rgb = []
if axes_class is None:
- try:
- axes_class = ax._axes_class
- except AttributeError:
- axes_class = type(ax)
+ axes_class = type(ax)
for ny in [4, 2, 0]:
ax1 = axes_class(ax.get_figure(), ax.get_position(original=True),
@@ -55,7 +59,7 @@ def make_rgb_axes(ax, pad=0.01, axes_class=None, **kwargs):
class RGBAxes:
"""
- 4-panel imshow (RGB, R, G, B).
+ 4-panel `~.Axes.imshow` (RGB, R, G, B).
Layout:
@@ -68,17 +72,18 @@ class RGBAxes:
+---------------+-----+
Subclasses can override the ``_defaultAxesClass`` attribute.
+ By default RGBAxes uses `.mpl_axes.Axes`.
Attributes
----------
RGB : ``_defaultAxesClass``
- The axes object for the three-channel imshow.
+ The Axes object for the three-channel `~.Axes.imshow`.
R : ``_defaultAxesClass``
- The axes object for the red channel imshow.
+ The Axes object for the red channel `~.Axes.imshow`.
G : ``_defaultAxesClass``
- The axes object for the green channel imshow.
+ The Axes object for the green channel `~.Axes.imshow`.
B : ``_defaultAxesClass``
- The axes object for the blue channel imshow.
+ The Axes object for the blue channel `~.Axes.imshow`.
"""
_defaultAxesClass = Axes
@@ -88,13 +93,13 @@ def __init__(self, *args, pad=0, **kwargs):
Parameters
----------
pad : float, default: 0
- fraction of the axes height to put as padding.
- axes_class : matplotlib.axes.Axes
-
+ Fraction of the Axes height to put as padding.
+ axes_class : `~matplotlib.axes.Axes`
+ Axes class to use. If not provided, ``_defaultAxesClass`` is used.
*args
- Unpacked into axes_class() init for RGB
+ Forwarded to *axes_class* init for the RGB Axes
**kwargs
- Unpacked into axes_class() init for RGB, R, G, B axes
+ Forwarded to *axes_class* init for the RGB, R, G, and B Axes
"""
axes_class = kwargs.pop("axes_class", self._defaultAxesClass)
self.RGB = ax = axes_class(*args, **kwargs)
@@ -114,15 +119,15 @@ def imshow_rgb(self, r, g, b, **kwargs):
----------
r, g, b : array-like
The red, green, and blue arrays.
- kwargs : imshow kwargs
- kwargs get unpacked into the imshow calls for the four images.
+ **kwargs :
+ Forwarded to `~.Axes.imshow` calls for the four images.
Returns
-------
- rgb : matplotlib.image.AxesImage
- r : matplotlib.image.AxesImage
- g : matplotlib.image.AxesImage
- b : matplotlib.image.AxesImage
+ rgb : `~matplotlib.image.AxesImage`
+ r : `~matplotlib.image.AxesImage`
+ g : `~matplotlib.image.AxesImage`
+ b : `~matplotlib.image.AxesImage`
"""
if not (r.shape == g.shape == b.shape):
raise ValueError(
diff --git a/lib/mpl_toolkits/axes_grid1/inset_locator.py b/lib/mpl_toolkits/axes_grid1/inset_locator.py
index 01c14e9baa4a..52722520f3f6 100644
--- a/lib/mpl_toolkits/axes_grid1/inset_locator.py
+++ b/lib/mpl_toolkits/axes_grid1/inset_locator.py
@@ -24,7 +24,7 @@ def __init__(self, parent, lbwh):
Parameters
----------
- parent : `matplotlib.axes.Axes`
+ parent : `~matplotlib.axes.Axes`
Axes to use for normalizing coordinates.
lbwh : iterable of four floats
@@ -38,12 +38,12 @@ def __init__(self, parent, lbwh):
Examples
--------
The following bounds the inset axes to a box with 20%% of the parent
- axes's height and 40%% of the width. The size of the axes specified
+ axes height and 40%% of the width. The size of the axes specified
([0, 0, 1, 1]) ensures that the axes completely fills the bounding box:
>>> parent_axes = plt.gca()
>>> ax_ins = plt.axes([0, 0, 1, 1])
- >>> ip = InsetPosition(ax, [0.5, 0.1, 0.4, 0.2])
+ >>> ip = InsetPosition(parent_axes, [0.5, 0.1, 0.4, 0.2])
>>> ax_ins.set_axes_locator(ip)
"""
self.parent = parent
@@ -70,18 +70,11 @@ def draw(self, renderer):
def __call__(self, ax, renderer):
self.axes = ax
-
- fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
- self._update_offset_func(renderer, fontsize)
-
- width, height, xdescent, ydescent = self.get_extent(renderer)
-
- px, py = self.get_offset(width, height, 0, 0, renderer)
- bbox_canvas = Bbox.from_bounds(px, py, width, height)
+ bbox = self.get_window_extent(renderer)
+ px, py = self.get_offset(bbox.width, bbox.height, 0, 0, renderer)
+ bbox_canvas = Bbox.from_bounds(px, py, bbox.width, bbox.height)
tr = ax.figure.transFigure.inverted()
- bb = TransformedBbox(bbox_canvas, tr)
-
- return bb
+ return TransformedBbox(bbox_canvas, tr)
class AnchoredSizeLocator(AnchoredLocatorBase):
@@ -142,7 +135,7 @@ def __init__(self, bbox, **kwargs):
Parameters
----------
- bbox : `matplotlib.transforms.Bbox`
+ bbox : `~matplotlib.transforms.Bbox`
Bbox to use for the extents of this patch.
**kwargs
@@ -204,7 +197,7 @@ def __init__(self, bbox1, bbox2, loc1, loc2=None, **kwargs):
Parameters
----------
- bbox1, bbox2 : `matplotlib.transforms.Bbox`
+ bbox1, bbox2 : `~matplotlib.transforms.Bbox`
Bounding boxes to connect.
loc1, loc2 : {1, 2, 3, 4}
@@ -255,7 +248,7 @@ def __init__(self, bbox1, bbox2, loc1a, loc2a, loc1b, loc2b, **kwargs):
Parameters
----------
- bbox1, bbox2 : `matplotlib.transforms.Bbox`
+ bbox1, bbox2 : `~matplotlib.transforms.Bbox`
Bounding boxes to connect.
loc1a, loc2a, loc1b, loc2b : {1, 2, 3, 4}
@@ -342,7 +335,7 @@ def inset_axes(parent_axes, width, height, loc='upper right',
the size in inches, e.g. *width=1.3*. If a string is provided, it is
the size in relative units, e.g. *width='40%%'*. By default, i.e. if
neither *bbox_to_anchor* nor *bbox_transform* are specified, those
- are relative to the parent_axes. Otherwise they are to be understood
+ are relative to the parent_axes. Otherwise, they are to be understood
relative to the bounding box provided via *bbox_to_anchor*.
loc : str, default: 'upper right'
@@ -353,7 +346,7 @@ def inset_axes(parent_axes, width, height, loc='upper right',
For backward compatibility, numeric values are accepted as well.
See the parameter *loc* of `.Legend` for details.
- bbox_to_anchor : tuple or `matplotlib.transforms.BboxBase`, optional
+ bbox_to_anchor : tuple or `~matplotlib.transforms.BboxBase`, optional
Bbox that the inset axes will be anchored to. If None,
a tuple of (0, 0, 1, 1) is used if *bbox_transform* is set
to *parent_axes.transAxes* or *parent_axes.figure.transFigure*.
@@ -367,7 +360,7 @@ def inset_axes(parent_axes, width, height, loc='upper right',
a *bbox_transform*. This might often be the axes transform
*parent_axes.transAxes*.
- bbox_transform : `matplotlib.transforms.Transform`, optional
+ bbox_transform : `~matplotlib.transforms.Transform`, optional
Transformation for the bbox that contains the inset axes.
If None, a `.transforms.IdentityTransform` is used. The value
of *bbox_to_anchor* (or the return value of its get_points method)
@@ -376,7 +369,7 @@ def inset_axes(parent_axes, width, height, loc='upper right',
You may provide *bbox_to_anchor* in some normalized coordinate,
and give an appropriate transform (e.g., *parent_axes.transAxes*).
- axes_class : `matplotlib.axes.Axes` type, default: `.HostAxes`
+ axes_class : `~matplotlib.axes.Axes` type, default: `.HostAxes`
The type of the newly created inset axes.
axes_kwargs : dict, optional
@@ -445,7 +438,7 @@ def zoomed_inset_axes(parent_axes, zoom, loc='upper right',
Parameters
----------
- parent_axes : `matplotlib.axes.Axes`
+ parent_axes : `~matplotlib.axes.Axes`
Axes to place the inset axes.
zoom : float
@@ -461,7 +454,7 @@ def zoomed_inset_axes(parent_axes, zoom, loc='upper right',
For backward compatibility, numeric values are accepted as well.
See the parameter *loc* of `.Legend` for details.
- bbox_to_anchor : tuple or `matplotlib.transforms.BboxBase`, optional
+ bbox_to_anchor : tuple or `~matplotlib.transforms.BboxBase`, optional
Bbox that the inset axes will be anchored to. If None,
*parent_axes.bbox* is used. If a tuple, can be either
[left, bottom, width, height], or [left, bottom].
@@ -472,7 +465,7 @@ def zoomed_inset_axes(parent_axes, zoom, loc='upper right',
also specify a *bbox_transform*. This might often be the axes transform
*parent_axes.transAxes*.
- bbox_transform : `matplotlib.transforms.Transform`, optional
+ bbox_transform : `~matplotlib.transforms.Transform`, optional
Transformation for the bbox that contains the inset axes.
If None, a `.transforms.IdentityTransform` is used (i.e. pixel
coordinates). This is useful when not providing any argument to
@@ -483,7 +476,7 @@ def zoomed_inset_axes(parent_axes, zoom, loc='upper right',
*bbox_to_anchor* will use *parent_axes.bbox*, the units of which are
in display (pixel) coordinates.
- axes_class : `matplotlib.axes.Axes` type, default: `.HostAxes`
+ axes_class : `~matplotlib.axes.Axes` type, default: `.HostAxes`
The type of the newly created inset axes.
axes_kwargs : dict, optional
@@ -548,10 +541,10 @@ def mark_inset(parent_axes, inset_axes, loc1, loc2, **kwargs):
Parameters
----------
- parent_axes : `matplotlib.axes.Axes`
+ parent_axes : `~matplotlib.axes.Axes`
Axes which contains the area of the inset axes.
- inset_axes : `matplotlib.axes.Axes`
+ inset_axes : `~matplotlib.axes.Axes`
The inset axes.
loc1, loc2 : {1, 2, 3, 4}
@@ -565,10 +558,10 @@ def mark_inset(parent_axes, inset_axes, loc1, loc2, **kwargs):
Returns
-------
- pp : `matplotlib.patches.Patch`
+ pp : `~matplotlib.patches.Patch`
The patch drawn to represent the area of the inset axes.
- p1, p2 : `matplotlib.patches.Patch`
+ p1, p2 : `~matplotlib.patches.Patch`
The patches connecting two corners of the inset axes and its area.
"""
rect = _TransformedBboxWithCallback(
diff --git a/lib/mpl_toolkits/axes_grid1/parasite_axes.py b/lib/mpl_toolkits/axes_grid1/parasite_axes.py
index b959d1f48a49..b37dcfc888fd 100644
--- a/lib/mpl_toolkits/axes_grid1/parasite_axes.py
+++ b/lib/mpl_toolkits/axes_grid1/parasite_axes.py
@@ -1,8 +1,6 @@
from matplotlib import _api, cbook
import matplotlib.artist as martist
-import matplotlib.image as mimage
import matplotlib.transforms as mtransforms
-from matplotlib.axes import subplot_class_factory
from matplotlib.transforms import Bbox
from .mpl_axes import Axes
@@ -22,21 +20,6 @@ def clear(self):
martist.setp(self.get_children(), visible=False)
self._get_lines = self._parent_axes._get_lines
- @_api.deprecated("3.5")
- def get_images_artists(self):
- artists = []
- images = []
-
- for a in self.get_children():
- if not a.get_visible():
- continue
- if isinstance(a, mimage.AxesImage):
- images.append(a)
- else:
- artists.append(a)
-
- return images, artists
-
def pick(self, mouseevent):
# This most likely goes to Artist.pick (depending on axes_class given
# to the factory), which only handles pick events registered on the
@@ -99,20 +82,38 @@ def __init__(self, *args, **kwargs):
self.parasites = []
super().__init__(*args, **kwargs)
- def get_aux_axes(self, tr=None, viewlim_mode="equal", axes_class=Axes):
+ def get_aux_axes(
+ self, tr=None, viewlim_mode="equal", axes_class=None, **kwargs):
"""
Add a parasite axes to this host.
Despite this method's name, this should actually be thought of as an
``add_parasite_axes`` method.
- *tr* may be `.Transform`, in which case the following relation will
- hold: ``parasite.transData = tr + host.transData``. Alternatively, it
- may be None (the default), no special relationship will hold between
- the parasite's and the host's ``transData``.
+ .. versionchanged:: 3.7
+ Defaults to same base axes class as host axes.
+
+ Parameters
+ ----------
+ tr : `~matplotlib.transforms.Transform` or None, default: None
+ If a `.Transform`, the following relation will hold:
+ ``parasite.transData = tr + host.transData``.
+ If None, the parasite's and the host's ``transData`` are unrelated.
+ viewlim_mode : {"equal", "transform", None}, default: "equal"
+ How the parasite's view limits are set: directly equal to the
+ parent axes ("equal"), equal after application of *tr*
+ ("transform"), or independently (None).
+ axes_class : subclass type of `~matplotlib.axes.Axes`, optional
+ The `~.axes.Axes` subclass that is instantiated. If None, the base
+ class of the host axes is used.
+ kwargs
+ Other parameters are forwarded to the parasite axes constructor.
"""
+ if axes_class is None:
+ axes_class = self._base_axes_class
parasite_axes_class = parasite_axes_class_factory(axes_class)
- ax2 = parasite_axes_class(self, tr, viewlim_mode=viewlim_mode)
+ ax2 = parasite_axes_class(
+ self, tr, viewlim_mode=viewlim_mode, **kwargs)
# note that ax2.transData == tr + ax1.transData
# Anything you draw in ax2 will match the ticks and grids of ax1.
self.parasites.append(ax2)
@@ -226,16 +227,9 @@ def get_tightbbox(self, renderer=None, call_axes_locator=True,
return Bbox.union([b for b in bbs if b.width != 0 or b.height != 0])
-host_axes_class_factory = cbook._make_class_factory(
- HostAxesBase, "{}HostAxes", "_base_axes_class")
-HostAxes = host_axes_class_factory(Axes)
-SubplotHost = subplot_class_factory(HostAxes)
-
-
-def host_subplot_class_factory(axes_class):
- host_axes_class = host_axes_class_factory(axes_class)
- subplot_host_class = subplot_class_factory(host_axes_class)
- return subplot_host_class
+host_axes_class_factory = host_subplot_class_factory = \
+ cbook._make_class_factory(HostAxesBase, "{}HostAxes", "_base_axes_class")
+HostAxes = SubplotHost = host_axes_class_factory(Axes)
def host_axes(*args, axes_class=Axes, figure=None, **kwargs):
@@ -244,12 +238,12 @@ def host_axes(*args, axes_class=Axes, figure=None, **kwargs):
Parameters
----------
- figure : `matplotlib.figure.Figure`
+ figure : `~matplotlib.figure.Figure`
Figure to which the axes will be added. Defaults to the current figure
`.pyplot.gcf()`.
*args, **kwargs
- Will be passed on to the underlying ``Axes`` object creation.
+ Will be passed on to the underlying `~.axes.Axes` object creation.
"""
import matplotlib.pyplot as plt
host_axes_class = host_axes_class_factory(axes_class)
@@ -260,23 +254,4 @@ def host_axes(*args, axes_class=Axes, figure=None, **kwargs):
return ax
-def host_subplot(*args, axes_class=Axes, figure=None, **kwargs):
- """
- Create a subplot that can act as a host to parasitic axes.
-
- Parameters
- ----------
- figure : `matplotlib.figure.Figure`
- Figure to which the subplot will be added. Defaults to the current
- figure `.pyplot.gcf()`.
-
- *args, **kwargs
- Will be passed on to the underlying ``Axes`` object creation.
- """
- import matplotlib.pyplot as plt
- host_subplot_class = host_subplot_class_factory(axes_class)
- if figure is None:
- figure = plt.gcf()
- ax = host_subplot_class(figure, *args, **kwargs)
- figure.add_subplot(ax)
- return ax
+host_subplot = host_axes
diff --git a/lib/mpl_toolkits/axisartist/__init__.py b/lib/mpl_toolkits/axisartist/__init__.py
index f6d4fe9252dd..47242cf7f0c5 100644
--- a/lib/mpl_toolkits/axisartist/__init__.py
+++ b/lib/mpl_toolkits/axisartist/__init__.py
@@ -5,10 +5,9 @@
from .grid_helper_curvelinear import GridHelperCurveLinear
from .floating_axes import FloatingAxes, FloatingSubplot
from mpl_toolkits.axes_grid1.parasite_axes import (
- host_axes_class_factory, parasite_axes_class_factory,
- subplot_class_factory)
+ host_axes_class_factory, parasite_axes_class_factory)
ParasiteAxes = parasite_axes_class_factory(Axes)
HostAxes = host_axes_class_factory(Axes)
-SubplotHost = subplot_class_factory(HostAxes)
+SubplotHost = HostAxes
diff --git a/lib/mpl_toolkits/axisartist/angle_helper.py b/lib/mpl_toolkits/axisartist/angle_helper.py
index 7f49a657ba52..1786cd70bcdb 100644
--- a/lib/mpl_toolkits/axisartist/angle_helper.py
+++ b/lib/mpl_toolkits/axisartist/angle_helper.py
@@ -284,7 +284,7 @@ def __call__(self, direction, factor, values):
return r
else: # factor > 3600.
- return [r"$%s^{\circ}$" % (str(v),) for v in ss*values]
+ return [r"$%s^{\circ}$" % v for v in ss*values]
class FormatterHMS(FormatterDMS):
diff --git a/lib/mpl_toolkits/axisartist/axes_grid.py b/lib/mpl_toolkits/axisartist/axes_grid.py
index d90097228329..27877a238b7d 100644
--- a/lib/mpl_toolkits/axisartist/axes_grid.py
+++ b/lib/mpl_toolkits/axisartist/axes_grid.py
@@ -1,13 +1,7 @@
-from matplotlib import _api
import mpl_toolkits.axes_grid1.axes_grid as axes_grid_orig
from .axislines import Axes
-@_api.deprecated("3.5")
-class CbarAxes(axes_grid_orig.CbarAxesBase, Axes):
- pass
-
-
class Grid(axes_grid_orig.Grid):
_defaultAxesClass = Axes
diff --git a/lib/mpl_toolkits/axisartist/axes_rgb.py b/lib/mpl_toolkits/axisartist/axes_rgb.py
index 22d778654b25..20c1f7fd233b 100644
--- a/lib/mpl_toolkits/axisartist/axes_rgb.py
+++ b/lib/mpl_toolkits/axisartist/axes_rgb.py
@@ -4,4 +4,8 @@
class RGBAxes(_RGBAxes):
+ """
+ Subclass of `~.axes_grid1.axes_rgb.RGBAxes` with
+ ``_defaultAxesClass`` = `.axislines.Axes`.
+ """
_defaultAxesClass = Axes
diff --git a/lib/mpl_toolkits/axisartist/axis_artist.py b/lib/mpl_toolkits/axisartist/axis_artist.py
index 986a1c0cca0e..08bb73b08e11 100644
--- a/lib/mpl_toolkits/axisartist/axis_artist.py
+++ b/lib/mpl_toolkits/axisartist/axis_artist.py
@@ -43,11 +43,11 @@
ticklabel), which gives 0 for bottom axis.
=================== ====== ======== ====== ========
-Parameter left bottom right top
+Property left bottom right top
=================== ====== ======== ====== ========
-ticklabels location left right right left
+ticklabel location left right right left
axislabel location left right right left
-ticklabels angle 90 0 -90 180
+ticklabel angle 90 0 -90 180
axislabel angle 180 0 0 180
ticklabel va center baseline center baseline
axislabel va center top center bottom
@@ -106,13 +106,13 @@ def get_attribute_from_ref_artist(self, attr_name):
class Ticks(AttributeCopier, Line2D):
"""
- Ticks are derived from Line2D, and note that ticks themselves
+ Ticks are derived from `.Line2D`, and note that ticks themselves
are markers. Thus, you should use set_mec, set_mew, etc.
To change the tick size (length), you need to use
- set_ticksize. To change the direction of the ticks (ticks are
+ `set_ticksize`. To change the direction of the ticks (ticks are
in opposite direction of ticklabels by default), use
- set_tick_out(False).
+ ``set_tick_out(False)``
"""
def __init__(self, ticksize, tick_out=False, *, axis=None, **kwargs):
@@ -202,8 +202,8 @@ def draw(self, renderer):
class LabelBase(mtext.Text):
"""
- A base class for AxisLabel and TickLabels. The position and angle
- of the text are calculated by to offset_ref_angle,
+ A base class for `.AxisLabel` and `.TickLabels`. The position and
+ angle of the text are calculated by the offset_ref_angle,
text_ref_angle, and offset_radius attributes.
"""
@@ -274,11 +274,11 @@ def get_window_extent(self, renderer=None):
class AxisLabel(AttributeCopier, LabelBase):
"""
- Axis Label. Derived from Text. The position of the text is updated
+ Axis label. Derived from `.Text`. The position of the text is updated
in the fly, so changing text position has no effect. Otherwise, the
- properties can be changed as a normal Text.
+ properties can be changed as a normal `.Text`.
- To change the pad between ticklabels and axis label, use set_pad.
+ To change the pad between tick labels and axis label, use `set_pad`.
"""
def __init__(self, *args, axis_direction="bottom", axis=None, **kwargs):
@@ -293,7 +293,12 @@ def set_pad(self, pad):
Set the internal pad in points.
The actual pad will be the sum of the internal pad and the
- external pad (the latter is set automatically by the AxisArtist).
+ external pad (the latter is set automatically by the `.AxisArtist`).
+
+ Parameters
+ ----------
+ pad : float
+ The internal pad in points.
"""
self._pad = pad
@@ -310,6 +315,7 @@ def get_ref_artist(self):
return self._axis.get_label()
def get_text(self):
+ # docstring inherited
t = super().get_text()
if t == "__from_axes__":
return self._axis.get_label().get_text()
@@ -321,6 +327,13 @@ def get_text(self):
top=("bottom", "center"))
def set_default_alignment(self, d):
+ """
+ Set the default alignment. See `set_axis_direction` for details.
+
+ Parameters
+ ----------
+ d : {"left", "bottom", "right", "top"}
+ """
va, ha = _api.check_getitem(self._default_alignments, d=d)
self.set_va(va)
self.set_ha(ha)
@@ -331,6 +344,13 @@ def set_default_alignment(self, d):
top=180)
def set_default_angle(self, d):
+ """
+ Set the default angle. See `set_axis_direction` for details.
+
+ Parameters
+ ----------
+ d : {"left", "bottom", "right", "top"}
+ """
self.set_rotation(_api.check_getitem(self._default_angles, d=d))
def set_axis_direction(self, d):
@@ -339,7 +359,7 @@ def set_axis_direction(self, d):
according to the matplotlib convention.
===================== ========== ========= ========== ==========
- property left bottom right top
+ Property left bottom right top
===================== ========== ========= ========== ==========
axislabel angle 180 0 0 180
axislabel va center top center bottom
@@ -349,6 +369,10 @@ def set_axis_direction(self, d):
Note that the text angles are actually relative to (90 + angle
of the direction to the ticklabel), which gives 0 for bottom
axis.
+
+ Parameters
+ ----------
+ d : {"left", "bottom", "right", "top"}
"""
self.set_default_alignment(d)
self.set_default_angle(d)
@@ -381,14 +405,14 @@ def get_window_extent(self, renderer=None):
class TickLabels(AxisLabel): # mtext.Text
"""
- Tick Labels. While derived from Text, this single artist draws all
- ticklabels. As in AxisLabel, the position of the text is updated
+ Tick labels. While derived from `.Text`, this single artist draws all
+ ticklabels. As in `.AxisLabel`, the position of the text is updated
in the fly, so changing text position has no effect. Otherwise,
- the properties can be changed as a normal Text. Unlike the
- ticklabels of the mainline matplotlib, properties of single
- ticklabel alone cannot modified.
+ the properties can be changed as a normal `.Text`. Unlike the
+ ticklabels of the mainline Matplotlib, properties of a single
+ ticklabel alone cannot be modified.
- To change the pad between ticks and ticklabels, use set_pad.
+ To change the pad between ticks and ticklabels, use `~.AxisLabel.set_pad`.
"""
def __init__(self, *, axis_direction="bottom", **kwargs):
@@ -403,14 +427,14 @@ def get_ref_artist(self):
def set_axis_direction(self, label_direction):
"""
Adjust the text angle and text alignment of ticklabels
- according to the matplotlib convention.
+ according to the Matplotlib convention.
The *label_direction* must be one of [left, right, bottom, top].
===================== ========== ========= ========== ==========
- property left bottom right top
+ Property left bottom right top
===================== ========== ========= ========== ==========
- ticklabels angle 90 0 -90 180
+ ticklabel angle 90 0 -90 180
ticklabel va center baseline center baseline
ticklabel ha right center right center
===================== ========== ========= ========== ==========
@@ -418,6 +442,11 @@ def set_axis_direction(self, label_direction):
Note that the text angles are actually relative to (90 + angle
of the direction to the ticklabel), which gives 0 for bottom
axis.
+
+ Parameters
+ ----------
+ label_direction : {"left", "bottom", "right", "top"}
+
"""
self.set_default_alignment(label_direction)
self.set_default_angle(label_direction)
@@ -568,10 +597,16 @@ def get_texts_widths_heights_descents(self, renderer):
class GridlinesCollection(LineCollection):
def __init__(self, *args, which="major", axis="both", **kwargs):
"""
+ Collection of grid lines.
+
Parameters
----------
which : {"major", "minor"}
+ Which grid to consider.
axis : {"both", "x", "y"}
+ Which axis to consider.
+ *args, **kwargs :
+ Passed to `.LineCollection`.
"""
self._which = which
self._axis = axis
@@ -579,12 +614,33 @@ def __init__(self, *args, which="major", axis="both", **kwargs):
self.set_grid_helper(None)
def set_which(self, which):
+ """
+ Select major or minor grid lines.
+
+ Parameters
+ ----------
+ which : {"major", "minor"}
+ """
self._which = which
def set_axis(self, axis):
+ """
+ Select axis.
+
+ Parameters
+ ----------
+ axis : {"both", "x", "y"}
+ """
self._axis = axis
def set_grid_helper(self, grid_helper):
+ """
+ Set grid helper.
+
+ Parameters
+ ----------
+ grid_helper : `.GridHelperBase` subclass
+ """
self._grid_helper = grid_helper
def draw(self, renderer):
@@ -598,7 +654,7 @@ def draw(self, renderer):
class AxisArtist(martist.Artist):
"""
An artist which draws axis (a line along which the n-th axes coord
- is constant) line, ticks, ticklabels, and axis label.
+ is constant) line, ticks, tick labels, and axis label.
"""
zorder = 2.5
@@ -659,18 +715,18 @@ def __init__(self, axes,
def set_axis_direction(self, axis_direction):
"""
- Adjust the direction, text angle, text alignment of
- ticklabels, labels following the matplotlib convention for
- the rectangle axes.
+ Adjust the direction, text angle, and text alignment of tick labels
+ and axis labels following the Matplotlib convention for the rectangle
+ axes.
The *axis_direction* must be one of [left, right, bottom, top].
===================== ========== ========= ========== ==========
- property left bottom right top
+ Property left bottom right top
===================== ========== ========= ========== ==========
- ticklabels location "-" "+" "+" "-"
- axislabel location "-" "+" "+" "-"
- ticklabels angle 90 0 -90 180
+ ticklabel direction "-" "+" "+" "-"
+ axislabel direction "-" "+" "+" "-"
+ ticklabel angle 90 0 -90 180
ticklabel va center baseline center baseline
ticklabel ha right center right center
axislabel angle 180 0 0 180
@@ -682,6 +738,10 @@ def set_axis_direction(self, axis_direction):
the increasing coordinate. Also, the text angles are actually
relative to (90 + angle of the direction to the ticklabel),
which gives 0 for bottom axis.
+
+ Parameters
+ ----------
+ axis_direction : {"left", "bottom", "right", "top"}
"""
self.major_ticklabels.set_axis_direction(axis_direction)
self.label.set_axis_direction(axis_direction)
@@ -695,9 +755,9 @@ def set_axis_direction(self, axis_direction):
def set_ticklabel_direction(self, tick_direction):
r"""
- Adjust the direction of the ticklabel.
+ Adjust the direction of the tick labels.
- Note that the *label_direction*\s '+' and '-' are relative to the
+ Note that the *tick_direction*\s '+' and '-' are relative to the
direction of the increasing coordinate.
Parameters
@@ -714,7 +774,7 @@ def invert_ticklabel_direction(self):
def set_axislabel_direction(self, label_direction):
r"""
- Adjust the direction of the axislabel.
+ Adjust the direction of the axis label.
Note that the *label_direction*\s '+' and '-' are relative to the
direction of the increasing coordinate.
@@ -754,6 +814,7 @@ def set_axisline_style(self, axisline_style=None, **kwargs):
Examples
--------
The following two commands are equal:
+
>>> set_axisline_style("->,size=1.5")
>>> set_axisline_style("->", size=1.5)
"""
@@ -974,6 +1035,7 @@ def _draw_label(self, renderer):
self.label.draw(renderer)
def set_label(self, s):
+ # docstring inherited
self.label.set_text(s)
def get_tightbbox(self, renderer=None):
@@ -1020,7 +1082,7 @@ def toggle(self, all=None, ticks=None, ticklabels=None, label=None):
To turn all on but (axis) label off ::
- axis.toggle(all=True, label=False))
+ axis.toggle(all=True, label=False)
"""
if all:
diff --git a/lib/mpl_toolkits/axisartist/axislines.py b/lib/mpl_toolkits/axisartist/axislines.py
index fdbf41580f03..c52fb347abd2 100644
--- a/lib/mpl_toolkits/axisartist/axislines.py
+++ b/lib/mpl_toolkits/axisartist/axislines.py
@@ -20,8 +20,8 @@
instance responsible to draw left y-axis. The default Axes.axis contains
"bottom", "left", "top" and "right".
-AxisArtist can be considered as a container artist and
-has following children artists which will draw ticks, labels, etc.
+AxisArtist can be considered as a container artist and has the following
+children artists which will draw ticks, labels, etc.
* line
* major_ticks, major_ticklabels
@@ -52,48 +52,35 @@
class AxisArtistHelper:
"""
- AxisArtistHelper should define
- following method with given APIs. Note that the first axes argument
- will be axes attribute of the caller artist.::
+ Axis helpers should define the methods listed below. The *axes* argument
+ will be the axes attribute of the caller artist.
+ ::
- # LINE (spinal line?)
-
- def get_line(self, axes):
- # path : Path
- return path
+ # Construct the spine.
def get_line_transform(self, axes):
- # ...
- # trans : transform
- return trans
+ return transform
- # LABEL
+ def get_line(self, axes):
+ return path
- def get_label_pos(self, axes):
- # x, y : position
- return (x, y), trans
+ # Construct the label.
+ def get_axislabel_transform(self, axes):
+ return transform
- def get_label_offset_transform(self,
- axes,
- pad_points, fontprops, renderer,
- bboxes,
- ):
- # va : vertical alignment
- # ha : horizontal alignment
- # a : angle
- return trans, va, ha, a
+ def get_axislabel_pos_angle(self, axes):
+ return (x, y), angle
- # TICK
+ # Construct the ticks.
def get_tick_transform(self, axes):
- return trans
+ return transform
def get_tick_iterators(self, axes):
- # iter : iterable object that yields (c, angle, l) where
- # c, angle, l is position, tick angle, and label
-
+ # A pair of iterables (one for major ticks, one for minor ticks)
+ # that yield (tick_position, tick_angle, tick_label).
return iter_major, iter_minor
"""
@@ -117,17 +104,14 @@ class Fixed(_Base):
top=(0, 1))
def __init__(self, loc, nth_coord=None):
- """
- nth_coord = along which coordinate value varies
- in 2D, nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
- """
+ """``nth_coord = 0``: x-axis; ``nth_coord = 1``: y-axis."""
_api.check_in_list(["left", "right", "bottom", "top"], loc=loc)
self._loc = loc
if nth_coord is None:
if loc in ["left", "right"]:
nth_coord = 1
- elif loc in ["bottom", "top"]:
+ else: # "bottom", "top"
nth_coord = 0
self.nth_coord = nth_coord
@@ -210,7 +194,7 @@ def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
if self._loc in ["bottom", "top"]:
angle_normal, angle_tangent = 90, 0
- else:
+ else: # "left", "right"
angle_normal, angle_tangent = 0, 90
major = self.axis.major
@@ -327,8 +311,10 @@ def get_gridlines(self, which, axis):
"""
Return list of grid lines as a list of paths (list of points).
- *which* : "major" or "minor"
- *axis* : "both", "x" or "y"
+ Parameters
+ ----------
+ which : {"both", "major", "minor"}
+ axis : {"both", "x", "y"}
"""
return []
@@ -407,23 +393,27 @@ def get_gridlines(self, which="major", axis="both"):
"""
Return list of gridline coordinates in data coordinates.
- *which* : "major" or "minor"
- *axis* : "both", "x" or "y"
+ Parameters
+ ----------
+ which : {"both", "major", "minor"}
+ axis : {"both", "x", "y"}
"""
+ _api.check_in_list(["both", "major", "minor"], which=which)
+ _api.check_in_list(["both", "x", "y"], axis=axis)
gridlines = []
- if axis in ["both", "x"]:
+ if axis in ("both", "x"):
locs = []
y1, y2 = self.axes.get_ylim()
- if which in ["both", "major"]:
+ if which in ("both", "major"):
locs.extend(self.axes.xaxis.major.locator())
- if which in ["both", "minor"]:
+ if which in ("both", "minor"):
locs.extend(self.axes.xaxis.minor.locator())
for x in locs:
gridlines.append([[x, x], [y1, y2]])
- if axis in ["both", "y"]:
+ if axis in ("both", "y"):
x1, x2 = self.axes.get_xlim()
locs = []
if self.axes.yaxis._major_tick_kw["gridOn"]:
@@ -513,7 +503,6 @@ def clear(self):
def get_grid_helper(self):
return self._grid_helper
- @_api.rename_parameter("3.5", "b", "visible")
def grid(self, visible=None, which='major', axis="both", **kwargs):
"""
Toggle the gridlines, and optionally set the properties of the lines.
@@ -558,9 +547,6 @@ def new_floating_axis(self, nth_coord, value, axis_direction="bottom"):
return axis
-Subplot = maxes.subplot_class_factory(Axes)
-
-
class AxesZero(Axes):
def clear(self):
@@ -577,4 +563,5 @@ def clear(self):
self._axislines[k].set_visible(False)
-SubplotZero = maxes.subplot_class_factory(AxesZero)
+Subplot = Axes
+SubplotZero = AxesZero
diff --git a/lib/mpl_toolkits/axisartist/clip_path.py b/lib/mpl_toolkits/axisartist/clip_path.py
deleted file mode 100644
index 53b75f073a44..000000000000
--- a/lib/mpl_toolkits/axisartist/clip_path.py
+++ /dev/null
@@ -1,121 +0,0 @@
-import numpy as np
-from math import degrees
-from matplotlib import _api
-import math
-
-
-_api.warn_deprecated("3.5", name=__name__, obj_type="module")
-
-
-def atan2(dy, dx):
- if dx == 0 and dy == 0:
- _api.warn_external("dx and dy are 0")
- return 0
- else:
- return math.atan2(dy, dx)
-
-
-# FIXME : The current algorithm seems to return incorrect angle when the line
-# ends at the boundary.
-def clip(xlines, ylines, x0, clip="right", xdir=True, ydir=True):
-
- clipped_xlines = []
- clipped_ylines = []
-
- _pos_angles = []
-
- xsign = 1 if xdir else -1
- ysign = 1 if ydir else -1
-
- for x, y in zip(xlines, ylines):
-
- if clip in ["up", "right"]:
- b = (x < x0).astype("i")
- db = b[1:] - b[:-1]
- else:
- b = (x > x0).astype("i")
- db = b[1:] - b[:-1]
-
- if b[0]:
- ns = 0
- else:
- ns = -1
- segx, segy = [], []
- for (i,) in np.argwhere(db):
- c = db[i]
- if c == -1:
- dx = (x0 - x[i])
- dy = (y[i+1] - y[i]) * (dx / (x[i+1] - x[i]))
- y0 = y[i] + dy
- clipped_xlines.append(np.concatenate([segx, x[ns:i+1], [x0]]))
- clipped_ylines.append(np.concatenate([segy, y[ns:i+1], [y0]]))
- ns = -1
- segx, segy = [], []
-
- if dx == 0. and dy == 0:
- dx = x[i+1] - x[i]
- dy = y[i+1] - y[i]
-
- a = degrees(atan2(ysign*dy, xsign*dx))
- _pos_angles.append((x0, y0, a))
-
- elif c == 1:
- dx = (x0 - x[i])
- dy = (y[i+1] - y[i]) * (dx / (x[i+1] - x[i]))
- y0 = y[i] + dy
- segx, segy = [x0], [y0]
- ns = i+1
-
- if dx == 0. and dy == 0:
- dx = x[i+1] - x[i]
- dy = y[i+1] - y[i]
-
- a = degrees(atan2(ysign*dy, xsign*dx))
- _pos_angles.append((x0, y0, a))
-
- if ns != -1:
- clipped_xlines.append(np.concatenate([segx, x[ns:]]))
- clipped_ylines.append(np.concatenate([segy, y[ns:]]))
-
- return clipped_xlines, clipped_ylines, _pos_angles
-
-
-def clip_line_to_rect(xline, yline, bbox):
-
- x0, y0, x1, y1 = bbox.extents
-
- xdir = x1 > x0
- ydir = y1 > y0
-
- if x1 > x0:
- lx1, ly1, c_right_ = clip([xline], [yline], x1,
- clip="right", xdir=xdir, ydir=ydir)
- lx2, ly2, c_left_ = clip(lx1, ly1, x0,
- clip="left", xdir=xdir, ydir=ydir)
- else:
- lx1, ly1, c_right_ = clip([xline], [yline], x0,
- clip="right", xdir=xdir, ydir=ydir)
- lx2, ly2, c_left_ = clip(lx1, ly1, x1,
- clip="left", xdir=xdir, ydir=ydir)
-
- if y1 > y0:
- ly3, lx3, c_top_ = clip(ly2, lx2, y1,
- clip="right", xdir=ydir, ydir=xdir)
- ly4, lx4, c_bottom_ = clip(ly3, lx3, y0,
- clip="left", xdir=ydir, ydir=xdir)
- else:
- ly3, lx3, c_top_ = clip(ly2, lx2, y0,
- clip="right", xdir=ydir, ydir=xdir)
- ly4, lx4, c_bottom_ = clip(ly3, lx3, y1,
- clip="left", xdir=ydir, ydir=xdir)
-
- c_left = [((x, y), (a + 90) % 180 - 90) for x, y, a in c_left_
- if bbox.containsy(y)]
- c_bottom = [((x, y), (90 - a) % 180) for y, x, a in c_bottom_
- if bbox.containsx(x)]
- c_right = [((x, y), (a + 90) % 180 + 90) for x, y, a in c_right_
- if bbox.containsy(y)]
- c_top = [((x, y), (90 - a) % 180 + 180) for y, x, a in c_top_
- if bbox.containsx(x)]
-
- return list(zip(lx4, ly4)), [c_left, c_bottom, c_right, c_top]
diff --git a/lib/mpl_toolkits/axisartist/floating_axes.py b/lib/mpl_toolkits/axisartist/floating_axes.py
index d86c3db6c85a..92f5ab2d2c7f 100644
--- a/lib/mpl_toolkits/axisartist/floating_axes.py
+++ b/lib/mpl_toolkits/axisartist/floating_axes.py
@@ -11,7 +11,6 @@
import matplotlib as mpl
from matplotlib import _api, cbook
-import matplotlib.axes as maxes
import matplotlib.patches as mpatches
from matplotlib.path import Path
@@ -275,25 +274,6 @@ def get_gridlines(self, which="major", axis="both"):
grid_lines.extend(self._grid_info["lat_lines"])
return grid_lines
- @_api.deprecated("3.5")
- def get_boundary(self):
- """
- Return (N, 2) array of (x, y) coordinate of the boundary.
- """
- x0, x1, y0, y1 = self._extremes
-
- xx = np.linspace(x0, x1, 100)
- yy0 = np.full_like(xx, y0)
- yy1 = np.full_like(xx, y1)
- yy = np.linspace(y0, y1, 100)
- xx0 = np.full_like(yy, x0)
- xx1 = np.full_like(yy, x1)
-
- xxx = np.concatenate([xx[:-1], xx1[:-1], xx[-1:0:-1], xx0])
- yyy = np.concatenate([yy0[:-1], yy[:-1], yy1[:-1], yy[::-1]])
-
- return self._aux_trans.transform(np.column_stack([xxx, yyy]))
-
class FloatingAxesBase:
@@ -339,4 +319,4 @@ def adjust_axes_lim(self):
FloatingAxesBase, "Floating{}")
FloatingAxes = floatingaxes_class_factory(
host_axes_class_factory(axislines.Axes))
-FloatingSubplot = maxes.subplot_class_factory(FloatingAxes)
+FloatingSubplot = FloatingAxes
diff --git a/lib/mpl_toolkits/axisartist/grid_helper_curvelinear.py b/lib/mpl_toolkits/axisartist/grid_helper_curvelinear.py
index 34117aac880b..3e4ae747e853 100644
--- a/lib/mpl_toolkits/axisartist/grid_helper_curvelinear.py
+++ b/lib/mpl_toolkits/axisartist/grid_helper_curvelinear.py
@@ -8,7 +8,6 @@
import numpy as np
import matplotlib as mpl
-from matplotlib import _api
from matplotlib.path import Path
from matplotlib.transforms import Affine2D, IdentityTransform
from .axislines import AxisArtistHelper, GridHelperBase
@@ -39,15 +38,6 @@ def __init__(self, grid_helper, side, nth_coord_ticks=None):
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
- @_api.deprecated("3.5")
- def change_tick_coord(self, coord_number=None):
- if coord_number is None:
- self.nth_coord_ticks = 1 - self.nth_coord_ticks
- elif coord_number in [0, 1]:
- self.nth_coord_ticks = coord_number
- else:
- raise Exception("wrong coord number")
-
def get_tick_transform(self, axes):
return axes.transData
@@ -66,8 +56,6 @@ def get_tick_iterators(self, axes):
class FloatingAxisArtistHelper(AxisArtistHelper.Floating):
- grid_info = _api.deprecate_privatize_attribute("3.5")
-
def __init__(self, grid_helper, nth_coord, value, axis_direction=None):
"""
nth_coord = along which coordinate value varies.
@@ -252,8 +240,6 @@ def get_line(self, axes):
class GridHelperCurveLinear(GridHelperBase):
- grid_info = _api.deprecate_privatize_attribute("3.5")
-
def __init__(self, aux_trans,
extreme_finder=None,
grid_locator1=None,
diff --git a/lib/mpl_toolkits/axisartist/parasite_axes.py b/lib/mpl_toolkits/axisartist/parasite_axes.py
index feeecf6d907b..4ebd6acc03be 100644
--- a/lib/mpl_toolkits/axisartist/parasite_axes.py
+++ b/lib/mpl_toolkits/axisartist/parasite_axes.py
@@ -1,9 +1,7 @@
from mpl_toolkits.axes_grid1.parasite_axes import (
- host_axes_class_factory, parasite_axes_class_factory,
- subplot_class_factory)
+ host_axes_class_factory, parasite_axes_class_factory)
from .axislines import Axes
ParasiteAxes = parasite_axes_class_factory(Axes)
-HostAxes = host_axes_class_factory(Axes)
-SubplotHost = subplot_class_factory(HostAxes)
+HostAxes = SubplotHost = host_axes_class_factory(Axes)
diff --git a/lib/mpl_toolkits/mplot3d/art3d.py b/lib/mpl_toolkits/mplot3d/art3d.py
index 82c6d8bbf290..24ad0634c7a8 100644
--- a/lib/mpl_toolkits/mplot3d/art3d.py
+++ b/lib/mpl_toolkits/mplot3d/art3d.py
@@ -12,7 +12,8 @@
import numpy as np
from matplotlib import (
- artist, cbook, colors as mcolors, lines, text as mtext, path as mpath)
+ artist, cbook, colors as mcolors, lines, text as mtext,
+ path as mpath)
from matplotlib.collections import (
LineCollection, PolyCollection, PatchCollection, PathCollection)
from matplotlib.colors import Normalize
@@ -76,7 +77,7 @@ class Text3D(mtext.Text):
Parameters
----------
- x, y, z
+ x, y, z : float
The position of the text.
text : str
The text string to display.
@@ -107,8 +108,8 @@ def set_position_3d(self, xyz, zdir=None):
xyz : (float, float, float)
The position in 3D space.
zdir : {'x', 'y', 'z', None, 3-tuple}
- The direction of the text. If unspecified, the zdir will not be
- changed.
+ The direction of the text. If unspecified, the *zdir* will not be
+ changed. See `.get_dir_vector` for a description of the values.
"""
super().set_position(xyz[:2])
self.set_z(xyz[2])
@@ -127,6 +128,17 @@ def set_z(self, z):
self.stale = True
def set_3d_properties(self, z=0, zdir='z'):
+ """
+ Set the *z* position and direction of the text.
+
+ Parameters
+ ----------
+ z : float
+ The z-position in 3D space.
+ zdir : {'x', 'y', 'z', 3-tuple}
+ The direction of the text. Default: 'z'.
+ See `.get_dir_vector` for a description of the values.
+ """
self._z = z
self._dir_vec = get_dir_vector(zdir)
self.stale = True
@@ -151,7 +163,17 @@ def get_tightbbox(self, renderer=None):
def text_2d_to_3d(obj, z=0, zdir='z'):
- """Convert a Text to a Text3D object."""
+ """
+ Convert a `.Text` to a `.Text3D` object.
+
+ Parameters
+ ----------
+ z : float
+ The z-position in 3D space.
+ zdir : {'x', 'y', 'z', 3-tuple}
+ The direction of the text. Default: 'z'.
+ See `.get_dir_vector` for a description of the values.
+ """
obj.__class__ = Text3D
obj.set_3d_properties(z, zdir)
@@ -163,12 +185,34 @@ class Line3D(lines.Line2D):
def __init__(self, xs, ys, zs, *args, **kwargs):
"""
- Keyword arguments are passed onto :func:`~matplotlib.lines.Line2D`.
+
+ Parameters
+ ----------
+ xs : array-like
+ The x-data to be plotted.
+ ys : array-like
+ The y-data to be plotted.
+ zs : array-like
+ The z-data to be plotted.
+
+ Additional arguments are passed onto :func:`~matplotlib.lines.Line2D`.
"""
super().__init__([], [], *args, **kwargs)
self._verts3d = xs, ys, zs
def set_3d_properties(self, zs=0, zdir='z'):
+ """
+ Set the *z* position and direction of the line.
+
+ Parameters
+ ----------
+ zs : float or array of floats
+ The location along the *zdir* axis in 3D space to position the
+ line.
+ zdir : {'x', 'y', 'z'}
+ Plane to plot line orthogonal to. Default: 'z'.
+ See `.get_dir_vector` for a description of the values.
+ """
xs = self.get_xdata()
ys = self.get_ydata()
zs = cbook._to_unmasked_float_array(zs).ravel()
@@ -220,7 +264,17 @@ def draw(self, renderer):
def line_2d_to_3d(line, zs=0, zdir='z'):
- """Convert a 2D line to 3D."""
+ """
+ Convert a `.Line2D` to a `.Line3D` object.
+
+ Parameters
+ ----------
+ zs : float
+ The location along the *zdir* axis in 3D space to position the line.
+ zdir : {'x', 'y', 'z'}
+ Plane to plot line orthogonal to. Default: 'z'.
+ See `.get_dir_vector` for a description of the values.
+ """
line.__class__ = Line3D
line.set_3d_properties(zs, zdir)
@@ -314,7 +368,7 @@ def do_3d_projection(self):
def line_collection_2d_to_3d(col, zs=0, zdir='z'):
- """Convert a LineCollection to a Line3DCollection object."""
+ """Convert a `.LineCollection` to a `.Line3DCollection` object."""
segments3d = _paths_to_3d_segments(col.get_paths(), zs, zdir)
col.__class__ = Line3DCollection
col.set_segments(segments3d)
@@ -326,10 +380,34 @@ class Patch3D(Patch):
"""
def __init__(self, *args, zs=(), zdir='z', **kwargs):
+ """
+ Parameters
+ ----------
+ verts :
+ zs : float
+ The location along the *zdir* axis in 3D space to position the
+ patch.
+ zdir : {'x', 'y', 'z'}
+ Plane to plot patch orthogonal to. Default: 'z'.
+ See `.get_dir_vector` for a description of the values.
+ """
super().__init__(*args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_3d_properties(self, verts, zs=0, zdir='z'):
+ """
+ Set the *z* position and direction of the patch.
+
+ Parameters
+ ----------
+ verts :
+ zs : float
+ The location along the *zdir* axis in 3D space to position the
+ patch.
+ zdir : {'x', 'y', 'z'}
+ Plane to plot patch orthogonal to. Default: 'z'.
+ See `.get_dir_vector` for a description of the values.
+ """
zs = np.broadcast_to(zs, len(verts))
self._segment3d = [juggle_axes(x, y, z, zdir)
for ((x, y), z) in zip(verts, zs)]
@@ -352,11 +430,35 @@ class PathPatch3D(Patch3D):
"""
def __init__(self, path, *, zs=(), zdir='z', **kwargs):
+ """
+ Parameters
+ ----------
+ path :
+ zs : float
+ The location along the *zdir* axis in 3D space to position the
+ path patch.
+ zdir : {'x', 'y', 'z', 3-tuple}
+ Plane to plot path patch orthogonal to. Default: 'z'.
+ See `.get_dir_vector` for a description of the values.
+ """
# Not super().__init__!
Patch.__init__(self, **kwargs)
self.set_3d_properties(path, zs, zdir)
def set_3d_properties(self, path, zs=0, zdir='z'):
+ """
+ Set the *z* position and direction of the path patch.
+
+ Parameters
+ ----------
+ path :
+ zs : float
+ The location along the *zdir* axis in 3D space to position the
+ path patch.
+ zdir : {'x', 'y', 'z', 3-tuple}
+ Plane to plot path patch orthogonal to. Default: 'z'.
+ See `.get_dir_vector` for a description of the values.
+ """
Patch3D.set_3d_properties(self, path.vertices, zs=zs, zdir=zdir)
self._code3d = path.codes
@@ -378,14 +480,14 @@ def _get_patch_verts(patch):
def patch_2d_to_3d(patch, z=0, zdir='z'):
- """Convert a Patch to a Patch3D object."""
+ """Convert a `.Patch` to a `.Patch3D` object."""
verts = _get_patch_verts(patch)
patch.__class__ = Patch3D
patch.set_3d_properties(verts, z, zdir)
def pathpatch_2d_to_3d(pathpatch, z=0, zdir='z'):
- """Convert a PathPatch to a PathPatch3D object."""
+ """Convert a `.PathPatch` to a `.PathPatch3D` object."""
path = pathpatch.get_path()
trans = pathpatch.get_patch_transform()
@@ -441,6 +543,19 @@ def set_sort_zpos(self, val):
self.stale = True
def set_3d_properties(self, zs, zdir):
+ """
+ Set the *z* positions and direction of the patches.
+
+ Parameters
+ ----------
+ zs : float or array of floats
+ The location or locations to place the patches in the collection
+ along the *zdir* axis.
+ zdir : {'x', 'y', 'z'}
+ Plane to plot patches orthogonal to.
+ All patches must have the same direction.
+ See `.get_dir_vector` for a description of the values.
+ """
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
@@ -525,6 +640,19 @@ def set_sort_zpos(self, val):
self.stale = True
def set_3d_properties(self, zs, zdir):
+ """
+ Set the *z* positions and direction of the paths.
+
+ Parameters
+ ----------
+ zs : float or array of floats
+ The location or locations to place the paths in the collection
+ along the *zdir* axis.
+ zdir : {'x', 'y', 'z'}
+ Plane to plot paths orthogonal to.
+ All paths must have the same direction.
+ See `.get_dir_vector` for a description of the values.
+ """
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
@@ -636,18 +764,17 @@ def get_edgecolor(self):
def patch_collection_2d_to_3d(col, zs=0, zdir='z', depthshade=True):
"""
- Convert a :class:`~matplotlib.collections.PatchCollection` into a
- :class:`Patch3DCollection` object
- (or a :class:`~matplotlib.collections.PathCollection` into a
- :class:`Path3DCollection` object).
+ Convert a `.PatchCollection` into a `.Patch3DCollection` object
+ (or a `.PathCollection` into a `.Path3DCollection` object).
Parameters
----------
- za
+ zs : float or array of floats
The location or locations to place the patches in the collection along
the *zdir* axis. Default: 0.
- zdir
+ zdir : {'x', 'y', 'z'}
The axis in which to place the patches. Default: "z".
+ See `.get_dir_vector` for a description of the values.
depthshade
Whether to shade the patches to give a sense of depth. Default: *True*.
@@ -682,16 +809,29 @@ class Poly3DCollection(PolyCollection):
triangulation and thus generates consistent surfaces.
"""
- def __init__(self, verts, *args, zsort='average', **kwargs):
+ def __init__(self, verts, *args, zsort='average', shade=False,
+ lightsource=None, **kwargs):
"""
Parameters
----------
verts : list of (N, 3) array-like
- Each element describes a polygon as a sequence of ``N_i`` points
- ``(x, y, z)``.
+ The sequence of polygons [*verts0*, *verts1*, ...] where each
+ element *verts_i* defines the vertices of polygon *i* as a 2D
+ array-like of shape (N, 3).
zsort : {'average', 'min', 'max'}, default: 'average'
The calculation method for the z-order.
See `~.Poly3DCollection.set_zsort` for details.
+ shade : bool, default: False
+ Whether to shade *facecolors* and *edgecolors*. When activating
+ *shade*, *facecolors* and/or *edgecolors* must be provided.
+
+ .. versionadded:: 3.7
+
+ lightsource : `~matplotlib.colors.LightSource`
+ The lightsource to use when *shade* is True.
+
+ .. versionadded:: 3.7
+
*args, **kwargs
All other parameters are forwarded to `.PolyCollection`.
@@ -700,6 +840,23 @@ def __init__(self, verts, *args, zsort='average', **kwargs):
Note that this class does a bit of magic with the _facecolors
and _edgecolors properties.
"""
+ if shade:
+ normals = _generate_normals(verts)
+ facecolors = kwargs.get('facecolors', None)
+ if facecolors is not None:
+ kwargs['facecolors'] = _shade_colors(
+ facecolors, normals, lightsource
+ )
+
+ edgecolors = kwargs.get('edgecolors', None)
+ if edgecolors is not None:
+ kwargs['edgecolors'] = _shade_colors(
+ edgecolors, normals, lightsource
+ )
+ if facecolors is None and edgecolors in None:
+ raise ValueError(
+ "You must provide facecolors, edgecolors, or both for "
+ "shade to work.")
super().__init__(verts, *args, **kwargs)
if isinstance(verts, np.ndarray):
if verts.ndim != 3:
@@ -743,7 +900,19 @@ def get_vector(self, segments3d):
self._segslices = [*map(slice, indices[:-1], indices[1:])]
def set_verts(self, verts, closed=True):
- """Set 3D vertices."""
+ """
+ Set 3D vertices.
+
+ Parameters
+ ----------
+ verts : list of (N, 3) array-like
+ The sequence of polygons [*verts0*, *verts1*, ...] where each
+ element *verts_i* defines the vertices of polygon *i* as a 2D
+ array-like of shape (N, 3).
+ closed : bool, default: True
+ Whether the polygon should be closed by adding a CLOSEPOLY
+ connection at the end.
+ """
self.get_vector(verts)
# 2D verts will be updated at draw time
super().set_verts([], False)
@@ -885,7 +1054,18 @@ def get_edgecolor(self):
def poly_collection_2d_to_3d(col, zs=0, zdir='z'):
- """Convert a PolyCollection to a Poly3DCollection object."""
+ """
+ Convert a `.PolyCollection` into a `.Poly3DCollection` object.
+
+ Parameters
+ ----------
+ zs : float or array of floats
+ The location or locations to place the polygons in the collection along
+ the *zdir* axis. Default: 0.
+ zdir : {'x', 'y', 'z'}
+ The axis in which to place the patches. Default: 'z'.
+ See `.get_dir_vector` for a description of the values.
+ """
segments_3d, codes = _paths_to_3d_segments_with_codes(
col.get_paths(), zs, zdir)
col.__class__ = Poly3DCollection
@@ -895,9 +1075,10 @@ def poly_collection_2d_to_3d(col, zs=0, zdir='z'):
def juggle_axes(xs, ys, zs, zdir):
"""
- Reorder coordinates so that 2D xs, ys can be plotted in the plane
- orthogonal to zdir. zdir is normally x, y or z. However, if zdir
- starts with a '-' it is interpreted as a compensation for rotate_axes.
+ Reorder coordinates so that 2D *xs*, *ys* can be plotted in the plane
+ orthogonal to *zdir*. *zdir* is normally 'x', 'y' or 'z'. However, if
+ *zdir* starts with a '-' it is interpreted as a compensation for
+ `rotate_axes`.
"""
if zdir == 'x':
return zs, xs, ys
@@ -911,20 +1092,14 @@ def juggle_axes(xs, ys, zs, zdir):
def rotate_axes(xs, ys, zs, zdir):
"""
- Reorder coordinates so that the axes are rotated with zdir along
+ Reorder coordinates so that the axes are rotated with *zdir* along
the original z axis. Prepending the axis with a '-' does the
- inverse transform, so zdir can be x, -x, y, -y, z or -z
+ inverse transform, so *zdir* can be 'x', '-x', 'y', '-y', 'z' or '-z'.
"""
- if zdir == 'x':
+ if zdir in ('x', '-y'):
return ys, zs, xs
- elif zdir == '-x':
- return zs, xs, ys
-
- elif zdir == 'y':
+ elif zdir in ('-x', 'y'):
return zs, xs, ys
- elif zdir == '-y':
- return ys, zs, xs
-
else:
return xs, ys, zs
@@ -941,3 +1116,84 @@ def _zalpha(colors, zs):
sats = 1 - norm(zs) * 0.7
rgba = np.broadcast_to(mcolors.to_rgba_array(colors), (len(zs), 4))
return np.column_stack([rgba[:, :3], rgba[:, 3] * sats])
+
+
+def _generate_normals(polygons):
+ """
+ Compute the normals of a list of polygons, one normal per polygon.
+
+ Normals point towards the viewer for a face with its vertices in
+ counterclockwise order, following the right hand rule.
+
+ Uses three points equally spaced around the polygon. This method assumes
+ that the points are in a plane. Otherwise, more than one shade is required,
+ which is not supported.
+
+ Parameters
+ ----------
+ polygons : list of (M_i, 3) array-like, or (..., M, 3) array-like
+ A sequence of polygons to compute normals for, which can have
+ varying numbers of vertices. If the polygons all have the same
+ number of vertices and array is passed, then the operation will
+ be vectorized.
+
+ Returns
+ -------
+ normals : (..., 3) array
+ A normal vector estimated for the polygon.
+ """
+ if isinstance(polygons, np.ndarray):
+ # optimization: polygons all have the same number of points, so can
+ # vectorize
+ n = polygons.shape[-2]
+ i1, i2, i3 = 0, n//3, 2*n//3
+ v1 = polygons[..., i1, :] - polygons[..., i2, :]
+ v2 = polygons[..., i2, :] - polygons[..., i3, :]
+ else:
+ # The subtraction doesn't vectorize because polygons is jagged.
+ v1 = np.empty((len(polygons), 3))
+ v2 = np.empty((len(polygons), 3))
+ for poly_i, ps in enumerate(polygons):
+ n = len(ps)
+ i1, i2, i3 = 0, n//3, 2*n//3
+ v1[poly_i, :] = ps[i1, :] - ps[i2, :]
+ v2[poly_i, :] = ps[i2, :] - ps[i3, :]
+ return np.cross(v1, v2)
+
+
+def _shade_colors(color, normals, lightsource=None):
+ """
+ Shade *color* using normal vectors given by *normals*,
+ assuming a *lightsource* (using default position if not given).
+ *color* can also be an array of the same length as *normals*.
+ """
+ if lightsource is None:
+ # chosen for backwards-compatibility
+ lightsource = mcolors.LightSource(azdeg=225, altdeg=19.4712)
+
+ with np.errstate(invalid="ignore"):
+ shade = ((normals / np.linalg.norm(normals, axis=1, keepdims=True))
+ @ lightsource.direction)
+ mask = ~np.isnan(shade)
+
+ if mask.any():
+ # convert dot product to allowed shading fractions
+ in_norm = mcolors.Normalize(-1, 1)
+ out_norm = mcolors.Normalize(0.3, 1).inverse
+
+ def norm(x):
+ return out_norm(in_norm(x))
+
+ shade[~mask] = 0
+
+ color = mcolors.to_rgba_array(color)
+ # shape of color should be (M, 4) (where M is number of faces)
+ # shape of shade should be (M,)
+ # colors should have final shape of (M, 4)
+ alpha = color[:, 3]
+ colors = norm(shade)[:, np.newaxis] * color
+ colors[:, 3] = alpha
+ else:
+ colors = np.asanyarray(color).copy()
+
+ return colors
diff --git a/lib/mpl_toolkits/mplot3d/axes3d.py b/lib/mpl_toolkits/mplot3d/axes3d.py
index c50601246376..eebdd1313ea6 100644
--- a/lib/mpl_toolkits/mplot3d/axes3d.py
+++ b/lib/mpl_toolkits/mplot3d/axes3d.py
@@ -32,7 +32,7 @@
from matplotlib.axes import Axes
from matplotlib.axes._base import _axis_method_wrapper, _process_plot_format
from matplotlib.transforms import Bbox
-from matplotlib.tri.triangulation import Triangulation
+from matplotlib.tri._triangulation import Triangulation
from . import art3d
from . import proj3d
@@ -52,6 +52,10 @@ class Axes3D(Axes):
Axes._shared_axes["z"] = cbook.Grouper()
dist = _api.deprecate_privatize_attribute("3.6")
+ vvec = _api.deprecate_privatize_attribute("3.7")
+ eye = _api.deprecate_privatize_attribute("3.7")
+ sx = _api.deprecate_privatize_attribute("3.7")
+ sy = _api.deprecate_privatize_attribute("3.7")
def __init__(
self, fig, rect=None, *args,
@@ -95,14 +99,6 @@ def __init__(
does not produce the desired result. Note however, that a manual
zorder will only be correct for a limited view angle. If the figure
is rotated by the user, it will look wrong from certain angles.
- auto_add_to_figure : bool, default: False
- Prior to Matplotlib 3.4 Axes3D would add themselves
- to their host Figure on init. Other Axes class do not
- do this.
-
- This behavior is deprecated in 3.4, the default is
- changed to False in 3.6. The keyword will be undocumented
- and a non-False value will be an error in 3.7.
focal_length : float, default: None
For a projection type of 'persp', the focal length of the virtual
camera. Must be > 0. If None, defaults to 1.
@@ -141,7 +137,11 @@ def __init__(
self._shared_axes["z"].join(self, sharez)
self._adjustable = 'datalim'
- auto_add_to_figure = kwargs.pop('auto_add_to_figure', False)
+ if kwargs.pop('auto_add_to_figure', False):
+ raise AttributeError(
+ 'auto_add_to_figure is no longer supported for Axes3D. '
+ 'Use fig.add_axes(ax) instead.'
+ )
super().__init__(
fig, rect, frameon=True, box_aspect=box_aspect, *args, **kwargs
@@ -173,18 +173,6 @@ def __init__(
# for bounding box calculations
self.spines[:].set_visible(False)
- if auto_add_to_figure:
- _api.warn_deprecated(
- "3.4", removal="3.7", message="Axes3D(fig) adding itself "
- "to the figure is deprecated since %(since)s. "
- "Pass the keyword argument auto_add_to_figure=False "
- "and use fig.add_axes(ax) to suppress this warning. "
- "The default value of auto_add_to_figure is changed to "
- "False in mpl3.6 and True values will "
- "no longer work %(removal)s. This is consistent with "
- "other Axes classes.")
- fig.add_axes(self)
-
def set_axis_off(self):
self._axis3don = False
self.stale = True
@@ -232,7 +220,11 @@ def get_zaxis(self):
w_zaxis = _api.deprecated("3.1", alternative="zaxis", removal="3.8")(
property(lambda self: self.zaxis))
+ @_api.deprecated("3.7")
def unit_cube(self, vals=None):
+ return self._unit_cube(vals)
+
+ def _unit_cube(self, vals=None):
minx, maxx, miny, maxy, minz, maxz = vals or self.get_w_lims()
return [(minx, miny, minz),
(maxx, miny, minz),
@@ -243,15 +235,23 @@ def unit_cube(self, vals=None):
(maxx, maxy, maxz),
(minx, maxy, maxz)]
+ @_api.deprecated("3.7")
def tunit_cube(self, vals=None, M=None):
+ return self._tunit_cube(vals, M)
+
+ def _tunit_cube(self, vals=None, M=None):
if M is None:
M = self.M
- xyzs = self.unit_cube(vals)
+ xyzs = self._unit_cube(vals)
tcube = proj3d.proj_points(xyzs, M)
return tcube
+ @_api.deprecated("3.7")
def tunit_edges(self, vals=None, M=None):
- tc = self.tunit_cube(vals, M)
+ return self._tunit_edges(vals, M)
+
+ def _tunit_edges(self, vals=None, M=None):
+ tc = self._tunit_cube(vals, M)
edges = [(tc[0], tc[1]),
(tc[1], tc[2]),
(tc[2], tc[3]),
@@ -287,9 +287,7 @@ def set_aspect(self, aspect, adjustable=None, anchor=None, share=False):
'equalyz' adapt the y and z axes to have equal aspect ratios.
========= ==================================================
- adjustable : None
- Currently ignored by Axes3D
-
+ adjustable : None or {'box', 'datalim'}, optional
If not *None*, this defines which parameter will be adjusted to
meet the required aspect. See `.set_adjustable` for further
details.
@@ -297,7 +295,7 @@ def set_aspect(self, aspect, adjustable=None, anchor=None, share=False):
anchor : None or str or 2-tuple of float, optional
If not *None*, this defines where the Axes will be drawn if there
is extra space due to aspect constraints. The most common way to
- to specify the anchor are abbreviations of cardinal directions:
+ specify the anchor are abbreviations of cardinal directions:
===== =====================
value description
@@ -320,34 +318,65 @@ def set_aspect(self, aspect, adjustable=None, anchor=None, share=False):
"""
_api.check_in_list(('auto', 'equal', 'equalxy', 'equalyz', 'equalxz'),
aspect=aspect)
+ if adjustable is None:
+ adjustable = self._adjustable
+ _api.check_in_list(('box', 'datalim'), adjustable=adjustable)
super().set_aspect(
aspect='auto', adjustable=adjustable, anchor=anchor, share=share)
self._aspect = aspect
if aspect in ('equal', 'equalxy', 'equalxz', 'equalyz'):
- if aspect == 'equal':
- ax_indices = [0, 1, 2]
- elif aspect == 'equalxy':
- ax_indices = [0, 1]
- elif aspect == 'equalxz':
- ax_indices = [0, 2]
- elif aspect == 'equalyz':
- ax_indices = [1, 2]
+ ax_indices = self._equal_aspect_axis_indices(aspect)
view_intervals = np.array([self.xaxis.get_view_interval(),
self.yaxis.get_view_interval(),
self.zaxis.get_view_interval()])
- mean = np.mean(view_intervals, axis=1)
ptp = np.ptp(view_intervals, axis=1)
- delta = max(ptp[ax_indices])
- scale = self._box_aspect[ptp == delta][0]
- deltas = delta * self._box_aspect / scale
+ if adjustable == 'datalim':
+ mean = np.mean(view_intervals, axis=1)
+ delta = max(ptp[ax_indices])
+ scale = self._box_aspect[ptp == delta][0]
+ deltas = delta * self._box_aspect / scale
+
+ for i, set_lim in enumerate((self.set_xlim3d,
+ self.set_ylim3d,
+ self.set_zlim3d)):
+ if i in ax_indices:
+ set_lim(mean[i] - deltas[i]/2., mean[i] + deltas[i]/2.)
+ else: # 'box'
+ # Change the box aspect such that the ratio of the length of
+ # the unmodified axis to the length of the diagonal
+ # perpendicular to it remains unchanged.
+ box_aspect = np.array(self._box_aspect)
+ box_aspect[ax_indices] = ptp[ax_indices]
+ remaining_ax_indices = {0, 1, 2}.difference(ax_indices)
+ if remaining_ax_indices:
+ remaining = remaining_ax_indices.pop()
+ old_diag = np.linalg.norm(self._box_aspect[ax_indices])
+ new_diag = np.linalg.norm(box_aspect[ax_indices])
+ box_aspect[remaining] *= new_diag / old_diag
+ self.set_box_aspect(box_aspect)
+
+ def _equal_aspect_axis_indices(self, aspect):
+ """
+ Get the indices for which of the x, y, z axes are constrained to have
+ equal aspect ratios.
- for i, set_lim in enumerate((self.set_xlim3d,
- self.set_ylim3d,
- self.set_zlim3d)):
- if i in ax_indices:
- set_lim(mean[i] - deltas[i]/2., mean[i] + deltas[i]/2.)
+ Parameters
+ ----------
+ aspect : {'auto', 'equal', 'equalxy', 'equalxz', 'equalyz'}
+ See descriptions in docstring for `.set_aspect()`.
+ """
+ ax_indices = [] # aspect == 'auto'
+ if aspect == 'equal':
+ ax_indices = [0, 1, 2]
+ elif aspect == 'equalxy':
+ ax_indices = [0, 1]
+ elif aspect == 'equalxz':
+ ax_indices = [0, 2]
+ elif aspect == 'equalyz':
+ ax_indices = [1, 2]
+ return ax_indices
def set_box_aspect(self, aspect, *, zoom=1):
"""
@@ -369,7 +398,7 @@ def set_box_aspect(self, aspect, *, zoom=1):
aspect : 3-tuple of floats or None
Changes the physical dimensions of the Axes3D, such that the ratio
of the axis lengths in display units is x:y:z.
- If None, defaults to (4,4,3).
+ If None, defaults to (4, 4, 3).
zoom : float, default: 1
Control overall size of the Axes3D in the figure. Must be > 0.
@@ -463,7 +492,7 @@ def draw(self, renderer):
def get_axis_position(self):
vals = self.get_w_lims()
- tc = self.tunit_cube(vals, self.M)
+ tc = self._tunit_cube(vals, self.M)
xhigh = tc[1][2] > tc[2][2]
yhigh = tc[3][2] > tc[2][2]
zhigh = tc[0][2] > tc[2][2]
@@ -847,15 +876,13 @@ def get_proj(self):
pb_aspect=box_aspect,
)
- # Look into the middle of the new coordinates:
+ # Look into the middle of the world coordinates:
R = 0.5 * box_aspect
# elev stores the elevation angle in the z plane
# azim stores the azimuth angle in the x,y plane
- # roll stores the roll angle about the view axis
elev_rad = np.deg2rad(art3d._norm_angle(self.elev))
azim_rad = np.deg2rad(art3d._norm_angle(self.azim))
- roll_rad = np.deg2rad(art3d._norm_angle(self.roll))
# Coordinates for a point that rotates around the box of data.
# p0, p1 corresponds to rotating the box only around the
@@ -874,27 +901,27 @@ def get_proj(self):
# towards the middle of the box of data from a distance:
eye = R + self._dist * ps
- # TODO: Is this being used somewhere? Can it be removed?
- self.eye = eye
- self.vvec = R - eye
- self.vvec = self.vvec / np.linalg.norm(self.vvec)
+ # vvec, self._vvec and self._eye are unused, remove when deprecated
+ vvec = R - eye
+ self._eye = eye
+ self._vvec = vvec / np.linalg.norm(vvec)
- # Define which axis should be vertical. A negative value
- # indicates the plot is upside down and therefore the values
- # have been reversed:
- V = np.zeros(3)
- V[self._vertical_axis] = -1 if abs(elev_rad) > 0.5 * np.pi else 1
+ # Calculate the viewing axes for the eye position
+ u, v, w = self._calc_view_axes(eye)
+ self._view_u = u # _view_u is towards the right of the screen
+ self._view_v = v # _view_v is towards the top of the screen
+ self._view_w = w # _view_w is out of the screen
# Generate the view and projection transformation matrices
if self._focal_length == np.inf:
# Orthographic projection
- viewM = proj3d.view_transformation(eye, R, V, roll_rad)
+ viewM = proj3d._view_transformation_uvw(u, v, w, eye)
projM = proj3d.ortho_transformation(-self._dist, self._dist)
else:
# Perspective projection
# Scale the eye dist to compensate for the focal length zoom effect
eye_focal = R + self._dist * ps * self._focal_length
- viewM = proj3d.view_transformation(eye_focal, R, V, roll_rad)
+ viewM = proj3d._view_transformation_uvw(u, v, w, eye_focal)
projM = proj3d.persp_transformation(-self._dist,
self._dist,
self._focal_length)
@@ -904,7 +931,7 @@ def get_proj(self):
M = np.dot(projM, M0)
return M
- def mouse_init(self, rotate_btn=1, zoom_btn=3):
+ def mouse_init(self, rotate_btn=1, pan_btn=2, zoom_btn=3):
"""
Set the mouse buttons for 3D rotation and zooming.
@@ -912,6 +939,8 @@ def mouse_init(self, rotate_btn=1, zoom_btn=3):
----------
rotate_btn : int or list of int, default: 1
The mouse button or buttons to use for 3D rotation of the axes.
+ pan_btn : int or list of int, default: 2
+ The mouse button or buttons to use to pan the 3D axes.
zoom_btn : int or list of int, default: 3
The mouse button or buttons to use to zoom the 3D axes.
"""
@@ -920,27 +949,24 @@ def mouse_init(self, rotate_btn=1, zoom_btn=3):
# a regular list to avoid comparisons against None
# which breaks in recent versions of numpy.
self._rotate_btn = np.atleast_1d(rotate_btn).tolist()
+ self._pan_btn = np.atleast_1d(pan_btn).tolist()
self._zoom_btn = np.atleast_1d(zoom_btn).tolist()
def disable_mouse_rotation(self):
- """Disable mouse buttons for 3D rotation and zooming."""
- self.mouse_init(rotate_btn=[], zoom_btn=[])
+ """Disable mouse buttons for 3D rotation, panning, and zooming."""
+ self.mouse_init(rotate_btn=[], pan_btn=[], zoom_btn=[])
def can_zoom(self):
"""
Return whether this Axes supports the zoom box button functionality.
-
- Axes3D objects do not use the zoom box button.
"""
- return False
+ return True
def can_pan(self):
"""
- Return whether this Axes supports the pan/zoom button functionality.
-
- Axes3d objects do not use the pan/zoom button.
+ Return whether this Axes supports the pan button functionality.
"""
- return False
+ return True
def sharez(self, other):
"""
@@ -973,7 +999,7 @@ def clear(self):
def _button_press(self, event):
if event.inaxes == self:
self.button_pressed = event.button
- self.sx, self.sy = event.xdata, event.ydata
+ self._sx, self._sy = event.xdata, event.ydata
toolbar = getattr(self.figure.canvas, "toolbar")
if toolbar and toolbar._nav_stack() is None:
self.figure.canvas.toolbar.push_current()
@@ -981,7 +1007,9 @@ def _button_press(self, event):
def _button_release(self, event):
self.button_pressed = None
toolbar = getattr(self.figure.canvas, "toolbar")
- if toolbar:
+ # backend_bases.release_zoom and backend_bases.release_pan call
+ # push_current, so check the navigation mode so we don't call it twice
+ if toolbar and self.get_navigate_mode() is None:
self.figure.canvas.toolbar.push_current()
def _get_view(self):
@@ -1031,7 +1059,7 @@ def format_coord(self, xd, yd):
).replace("-", "\N{MINUS SIGN}")
# nearest edge
- p0, p1 = min(self.tunit_edges(),
+ p0, p1 = min(self._tunit_edges(),
key=lambda edge: proj3d._line2d_seg_dist(
edge[0], edge[1], (xd, yd)))
@@ -1054,25 +1082,29 @@ def _on_move(self, event):
"""
Mouse moving.
- By default, button-1 rotates and button-3 zooms; these buttons can be
- modified via `mouse_init`.
+ By default, button-1 rotates, button-2 pans, and button-3 zooms;
+ these buttons can be modified via `mouse_init`.
"""
if not self.button_pressed:
return
+ if self.get_navigate_mode() is not None:
+ # we don't want to rotate if we are zooming/panning
+ # from the toolbar
+ return
+
if self.M is None:
return
x, y = event.xdata, event.ydata
# In case the mouse is out of bounds.
- if x is None:
+ if x is None or event.inaxes != self:
return
- dx, dy = x - self.sx, y - self.sy
+ dx, dy = x - self._sx, y - self._sy
w = self._pseudo_w
h = self._pseudo_h
- self.sx, self.sy = x, y
# Rotation
if self.button_pressed in self._rotate_btn:
@@ -1086,45 +1118,201 @@ def _on_move(self, event):
dazim = -(dy/h)*180*np.sin(roll) - (dx/w)*180*np.cos(roll)
self.elev = self.elev + delev
self.azim = self.azim + dazim
- self.get_proj()
self.stale = True
- self.figure.canvas.draw_idle()
- elif self.button_pressed == 2:
- # pan view
- # get the x and y pixel coords
- if dx == 0 and dy == 0:
- return
- minx, maxx, miny, maxy, minz, maxz = self.get_w_lims()
- dx = 1-((w - dx)/w)
- dy = 1-((h - dy)/h)
- elev = np.deg2rad(self.elev)
- azim = np.deg2rad(self.azim)
- # project xv, yv, zv -> xw, yw, zw
- dxx = (maxx-minx)*(dy*np.sin(elev)*np.cos(azim) + dx*np.sin(azim))
- dyy = (maxy-miny)*(-dx*np.cos(azim) + dy*np.sin(elev)*np.sin(azim))
- dzz = (maxz-minz)*(-dy*np.cos(elev))
- # pan
- self.set_xlim3d(minx + dxx, maxx + dxx)
- self.set_ylim3d(miny + dyy, maxy + dyy)
- self.set_zlim3d(minz + dzz, maxz + dzz)
- self.get_proj()
- self.figure.canvas.draw_idle()
+ elif self.button_pressed in self._pan_btn:
+ # Start the pan event with pixel coordinates
+ px, py = self.transData.transform([self._sx, self._sy])
+ self.start_pan(px, py, 2)
+ # pan view (takes pixel coordinate input)
+ self.drag_pan(2, None, event.x, event.y)
+ self.end_pan()
# Zoom
elif self.button_pressed in self._zoom_btn:
- # zoom view
- # hmmm..this needs some help from clipping....
- minx, maxx, miny, maxy, minz, maxz = self.get_w_lims()
- df = 1-((h - dy)/h)
- dx = (maxx-minx)*df
- dy = (maxy-miny)*df
- dz = (maxz-minz)*df
- self.set_xlim3d(minx - dx, maxx + dx)
- self.set_ylim3d(miny - dy, maxy + dy)
- self.set_zlim3d(minz - dz, maxz + dz)
- self.get_proj()
- self.figure.canvas.draw_idle()
+ # zoom view (dragging down zooms in)
+ scale = h/(h - dy)
+ self._scale_axis_limits(scale, scale, scale)
+
+ # Store the event coordinates for the next time through.
+ self._sx, self._sy = x, y
+ # Always request a draw update at the end of interaction
+ self.figure.canvas.draw_idle()
+
+ def drag_pan(self, button, key, x, y):
+ # docstring inherited
+
+ # Get the coordinates from the move event
+ p = self._pan_start
+ (xdata, ydata), (xdata_start, ydata_start) = p.trans_inverse.transform(
+ [(x, y), (p.x, p.y)])
+ self._sx, self._sy = xdata, ydata
+ # Calling start_pan() to set the x/y of this event as the starting
+ # move location for the next event
+ self.start_pan(x, y, button)
+ du, dv = xdata - xdata_start, ydata - ydata_start
+ dw = 0
+ if key == 'x':
+ dv = 0
+ elif key == 'y':
+ du = 0
+ if du == 0 and dv == 0:
+ return
+
+ # Transform the pan from the view axes to the data axes
+ R = np.array([self._view_u, self._view_v, self._view_w])
+ R = -R / self._box_aspect * self._dist
+ duvw_projected = R.T @ np.array([du, dv, dw])
+
+ # Calculate pan distance
+ minx, maxx, miny, maxy, minz, maxz = self.get_w_lims()
+ dx = (maxx - minx) * duvw_projected[0]
+ dy = (maxy - miny) * duvw_projected[1]
+ dz = (maxz - minz) * duvw_projected[2]
+
+ # Set the new axis limits
+ self.set_xlim3d(minx + dx, maxx + dx)
+ self.set_ylim3d(miny + dy, maxy + dy)
+ self.set_zlim3d(minz + dz, maxz + dz)
+
+ def _calc_view_axes(self, eye):
+ """
+ Get the unit vectors for the viewing axes in data coordinates.
+ `u` is towards the right of the screen
+ `v` is towards the top of the screen
+ `w` is out of the screen
+ """
+ elev_rad = np.deg2rad(art3d._norm_angle(self.elev))
+ roll_rad = np.deg2rad(art3d._norm_angle(self.roll))
+
+ # Look into the middle of the world coordinates
+ R = 0.5 * self._roll_to_vertical(self._box_aspect)
+
+ # Define which axis should be vertical. A negative value
+ # indicates the plot is upside down and therefore the values
+ # have been reversed:
+ V = np.zeros(3)
+ V[self._vertical_axis] = -1 if abs(elev_rad) > np.pi/2 else 1
+
+ u, v, w = proj3d._view_axes(eye, R, V, roll_rad)
+ return u, v, w
+
+ def _set_view_from_bbox(self, bbox, direction='in',
+ mode=None, twinx=False, twiny=False):
+ """
+ Zoom in or out of the bounding box.
+
+ Will center the view in the center of the bounding box, and zoom by
+ the ratio of the size of the bounding box to the size of the Axes3D.
+ """
+ (start_x, start_y, stop_x, stop_y) = bbox
+ if mode == 'x':
+ start_y = self.bbox.min[1]
+ stop_y = self.bbox.max[1]
+ elif mode == 'y':
+ start_x = self.bbox.min[0]
+ stop_x = self.bbox.max[0]
+
+ # Clip to bounding box limits
+ start_x, stop_x = np.clip(sorted([start_x, stop_x]),
+ self.bbox.min[0], self.bbox.max[0])
+ start_y, stop_y = np.clip(sorted([start_y, stop_y]),
+ self.bbox.min[1], self.bbox.max[1])
+
+ # Move the center of the view to the center of the bbox
+ zoom_center_x = (start_x + stop_x)/2
+ zoom_center_y = (start_y + stop_y)/2
+
+ ax_center_x = (self.bbox.max[0] + self.bbox.min[0])/2
+ ax_center_y = (self.bbox.max[1] + self.bbox.min[1])/2
+
+ self.start_pan(zoom_center_x, zoom_center_y, 2)
+ self.drag_pan(2, None, ax_center_x, ax_center_y)
+ self.end_pan()
+
+ # Calculate zoom level
+ dx = abs(start_x - stop_x)
+ dy = abs(start_y - stop_y)
+ scale_u = dx / (self.bbox.max[0] - self.bbox.min[0])
+ scale_v = dy / (self.bbox.max[1] - self.bbox.min[1])
+
+ # Keep aspect ratios equal
+ scale = max(scale_u, scale_v)
+
+ # Zoom out
+ if direction == 'out':
+ scale = 1 / scale
+
+ self._zoom_data_limits(scale, scale, scale)
+
+ def _zoom_data_limits(self, scale_u, scale_v, scale_w):
+ """
+ Zoom in or out of a 3D plot.
+
+ Will scale the data limits by the scale factors. These will be
+ transformed to the x, y, z data axes based on the current view angles.
+ A scale factor > 1 zooms out and a scale factor < 1 zooms in.
+
+ For an axes that has had its aspect ratio set to 'equal', 'equalxy',
+ 'equalyz', or 'equalxz', the relevant axes are constrained to zoom
+ equally.
+
+ Parameters
+ ----------
+ scale_u : float
+ Scale factor for the u view axis (view screen horizontal).
+ scale_v : float
+ Scale factor for the v view axis (view screen vertical).
+ scale_w : float
+ Scale factor for the w view axis (view screen depth).
+ """
+ scale = np.array([scale_u, scale_v, scale_w])
+
+ # Only perform frame conversion if unequal scale factors
+ if not np.allclose(scale, scale_u):
+ # Convert the scale factors from the view frame to the data frame
+ R = np.array([self._view_u, self._view_v, self._view_w])
+ S = scale * np.eye(3)
+ scale = np.linalg.norm(R.T @ S, axis=1)
+
+ # Set the constrained scale factors to the factor closest to 1
+ if self._aspect in ('equal', 'equalxy', 'equalxz', 'equalyz'):
+ ax_idxs = self._equal_aspect_axis_indices(self._aspect)
+ min_ax_idxs = np.argmin(np.abs(scale[ax_idxs] - 1))
+ scale[ax_idxs] = scale[ax_idxs][min_ax_idxs]
+
+ self._scale_axis_limits(scale[0], scale[1], scale[2])
+
+ def _scale_axis_limits(self, scale_x, scale_y, scale_z):
+ """
+ Keeping the center of the x, y, and z data axes fixed, scale their
+ limits by scale factors. A scale factor > 1 zooms out and a scale
+ factor < 1 zooms in.
+
+ Parameters
+ ----------
+ scale_x : float
+ Scale factor for the x data axis.
+ scale_y : float
+ Scale factor for the y data axis.
+ scale_z : float
+ Scale factor for the z data axis.
+ """
+ # Get the axis limits and centers
+ minx, maxx, miny, maxy, minz, maxz = self.get_w_lims()
+ cx = (maxx + minx)/2
+ cy = (maxy + miny)/2
+ cz = (maxz + minz)/2
+
+ # Scale the data range
+ dx = (maxx - minx)*scale_x
+ dy = (maxy - miny)*scale_y
+ dz = (maxz - minz)*scale_z
+
+ # Set the scaled axis limits
+ self.set_xlim3d(cx - dx/2, cx + dx/2)
+ self.set_ylim3d(cy - dy/2, cy + dy/2)
+ self.set_zlim3d(cz - dz/2, cz + dz/2)
def set_zlabel(self, zlabel, fontdict=None, labelpad=None, **kwargs):
"""
@@ -1158,7 +1346,6 @@ def set_frame_on(self, b):
self._frameon = bool(b)
self.stale = True
- @_api.rename_parameter("3.5", "b", "visible")
def grid(self, visible=True, **kwargs):
"""
Set / unset 3D grid.
@@ -1251,9 +1438,11 @@ def set_zbound(self, lower=None, upper=None):
def text(self, x, y, z, s, zdir=None, **kwargs):
"""
- Add text to the plot. kwargs will be passed on to Axes.text,
- except for the *zdir* keyword, which sets the direction to be
- used as the z direction.
+ Add text to the plot.
+
+ Keyword arguments will be passed on to `.Axes.text`, except for the
+ *zdir* keyword, which sets the direction to be used as the z
+ direction.
"""
text = super().text(x, y, s, **kwargs)
art3d.text_2d_to_3d(text, z, zdir)
@@ -1276,7 +1465,7 @@ def plot(self, xs, ys, *args, zdir='z', **kwargs):
z coordinates of vertices; either one for all points or one for
each point.
zdir : {'x', 'y', 'z'}, default: 'z'
- When plotting 2D data, the direction to use as z ('x', 'y' or 'z').
+ When plotting 2D data, the direction to use as z.
**kwargs
Other arguments are forwarded to `matplotlib.axes.Axes.plot`.
"""
@@ -1310,7 +1499,7 @@ def plot_surface(self, X, Y, Z, *, norm=None, vmin=None,
"""
Create a surface plot.
- By default it will be colored in shades of a solid color, but it also
+ By default, it will be colored in shades of a solid color, but it also
supports colormapping by supplying the *cmap* argument.
.. note::
@@ -1474,15 +1663,13 @@ def plot_surface(self, X, Y, Z, *, norm=None, vmin=None,
# note that the striding causes some polygons to have more coordinates
# than others
- polyc = art3d.Poly3DCollection(polys, **kwargs)
if fcolors is not None:
- if shade:
- colset = self._shade_colors(
- colset, self._generate_normals(polys), lightsource)
- polyc.set_facecolors(colset)
- polyc.set_edgecolors(colset)
+ polyc = art3d.Poly3DCollection(
+ polys, edgecolors=colset, facecolors=colset, shade=shade,
+ lightsource=lightsource, **kwargs)
elif cmap:
+ polyc = art3d.Poly3DCollection(polys, **kwargs)
# can't always vectorize, because polys might be jagged
if isinstance(polys, np.ndarray):
avg_z = polys[..., 2].mean(axis=-1)
@@ -1494,97 +1681,15 @@ def plot_surface(self, X, Y, Z, *, norm=None, vmin=None,
if norm is not None:
polyc.set_norm(norm)
else:
- if shade:
- colset = self._shade_colors(
- color, self._generate_normals(polys), lightsource)
- else:
- colset = color
- polyc.set_facecolors(colset)
+ polyc = art3d.Poly3DCollection(
+ polys, facecolors=color, shade=shade,
+ lightsource=lightsource, **kwargs)
self.add_collection(polyc)
self.auto_scale_xyz(X, Y, Z, had_data)
return polyc
- def _generate_normals(self, polygons):
- """
- Compute the normals of a list of polygons.
-
- Normals point towards the viewer for a face with its vertices in
- counterclockwise order, following the right hand rule.
-
- Uses three points equally spaced around the polygon.
- This normal of course might not make sense for polygons with more than
- three points not lying in a plane, but it's a plausible and fast
- approximation.
-
- Parameters
- ----------
- polygons : list of (M_i, 3) array-like, or (..., M, 3) array-like
- A sequence of polygons to compute normals for, which can have
- varying numbers of vertices. If the polygons all have the same
- number of vertices and array is passed, then the operation will
- be vectorized.
-
- Returns
- -------
- normals : (..., 3) array
- A normal vector estimated for the polygon.
- """
- if isinstance(polygons, np.ndarray):
- # optimization: polygons all have the same number of points, so can
- # vectorize
- n = polygons.shape[-2]
- i1, i2, i3 = 0, n//3, 2*n//3
- v1 = polygons[..., i1, :] - polygons[..., i2, :]
- v2 = polygons[..., i2, :] - polygons[..., i3, :]
- else:
- # The subtraction doesn't vectorize because polygons is jagged.
- v1 = np.empty((len(polygons), 3))
- v2 = np.empty((len(polygons), 3))
- for poly_i, ps in enumerate(polygons):
- n = len(ps)
- i1, i2, i3 = 0, n//3, 2*n//3
- v1[poly_i, :] = ps[i1, :] - ps[i2, :]
- v2[poly_i, :] = ps[i2, :] - ps[i3, :]
- return np.cross(v1, v2)
-
- def _shade_colors(self, color, normals, lightsource=None):
- """
- Shade *color* using normal vectors given by *normals*.
- *color* can also be an array of the same length as *normals*.
- """
- if lightsource is None:
- # chosen for backwards-compatibility
- lightsource = mcolors.LightSource(azdeg=225, altdeg=19.4712)
-
- with np.errstate(invalid="ignore"):
- shade = ((normals / np.linalg.norm(normals, axis=1, keepdims=True))
- @ lightsource.direction)
- mask = ~np.isnan(shade)
-
- if mask.any():
- # convert dot product to allowed shading fractions
- in_norm = mcolors.Normalize(-1, 1)
- out_norm = mcolors.Normalize(0.3, 1).inverse
-
- def norm(x):
- return out_norm(in_norm(x))
-
- shade[~mask] = 0
-
- color = mcolors.to_rgba_array(color)
- # shape of color should be (M, 4) (where M is number of faces)
- # shape of shade should be (M,)
- # colors should have final shape of (M, 4)
- alpha = color[:, 3]
- colors = norm(shade)[:, np.newaxis] * color
- colors[:, 3] = alpha
- else:
- colors = np.asanyarray(color).copy()
-
- return colors
-
def plot_wireframe(self, X, Y, Z, **kwargs):
"""
Plot a 3D wireframe.
@@ -1720,7 +1825,7 @@ def plot_trisurf(self, *args, color=None, norm=None, vmin=None, vmax=None,
plot_trisurf(X, Y, triangles=triangles, ...)
in which case a Triangulation object will be created. See
- `.Triangulation` for a explanation of these possibilities.
+ `.Triangulation` for an explanation of these possibilities.
The remaining arguments are::
@@ -1781,9 +1886,8 @@ def plot_trisurf(self, *args, color=None, norm=None, vmin=None, vmax=None,
zt = z[triangles]
verts = np.stack((xt, yt, zt), axis=-1)
- polyc = art3d.Poly3DCollection(verts, *args, **kwargs)
-
if cmap:
+ polyc = art3d.Poly3DCollection(verts, *args, **kwargs)
# average over the three points of each triangle
avg_z = verts[:, :, 2].mean(axis=1)
polyc.set_array(avg_z)
@@ -1792,12 +1896,9 @@ def plot_trisurf(self, *args, color=None, norm=None, vmin=None, vmax=None,
if norm is not None:
polyc.set_norm(norm)
else:
- if shade:
- normals = self._generate_normals(verts)
- colset = self._shade_colors(color, normals, lightsource)
- else:
- colset = color
- polyc.set_facecolors(colset)
+ polyc = art3d.Poly3DCollection(
+ verts, *args, shade=shade, lightsource=lightsource,
+ facecolors=color, **kwargs)
self.add_collection(polyc)
self.auto_scale_xyz(tri.x, tri.y, z, had_data)
@@ -1822,8 +1923,6 @@ def _3d_extend_contour(self, cset, stride=5):
color = linec.get_edgecolor()[0]
- polyverts = []
- normals = []
nsteps = round(len(topverts[0]) / stride)
if nsteps <= 1:
if len(topverts[0]) > 1:
@@ -1831,10 +1930,11 @@ def _3d_extend_contour(self, cset, stride=5):
else:
continue
+ polyverts = []
stepsize = (len(topverts[0]) - 1) / (nsteps - 1)
- for i in range(int(round(nsteps)) - 1):
- i1 = int(round(i * stepsize))
- i2 = int(round((i + 1) * stepsize))
+ for i in range(round(nsteps) - 1):
+ i1 = round(i * stepsize)
+ i2 = round((i + 1) * stepsize)
polyverts.append([topverts[0][i1],
topverts[0][i2],
botverts[0][i2],
@@ -1842,13 +1942,10 @@ def _3d_extend_contour(self, cset, stride=5):
# all polygons have 4 vertices, so vectorize
polyverts = np.array(polyverts)
- normals = self._generate_normals(polyverts)
-
- colors = self._shade_colors(color, normals)
- colors2 = self._shade_colors(color, normals)
polycol = art3d.Poly3DCollection(polyverts,
- facecolors=colors,
- edgecolors=colors2)
+ facecolors=color,
+ edgecolors=color,
+ shade=True)
polycol.set_sort_zpos(z)
self.add_collection3d(polycol)
@@ -1873,7 +1970,7 @@ def _add_contourf_set(self, cset, zdir='z', offset=None):
"""
Returns
-------
- levels : numpy.ndarray
+ levels : `numpy.ndarray`
Levels at which the filled contours are added.
"""
zdir = '-' + zdir
@@ -1912,7 +2009,7 @@ def contour(self, X, Y, Z, *args,
The direction to use.
offset : float, optional
If specified, plot a projection of the contour lines at this
- position in a plane normal to zdir.
+ position in a plane normal to *zdir*.
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
@@ -1956,7 +2053,7 @@ def tricontour(self, *args,
The direction to use.
offset : float, optional
If specified, plot a projection of the contour lines at this
- position in a plane normal to zdir.
+ position in a plane normal to *zdir*.
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
*args, **kwargs
@@ -1964,7 +2061,7 @@ def tricontour(self, *args,
Returns
-------
- matplotlib.tri.tricontour.TriContourSet
+ matplotlib.tri._tricontour.TriContourSet
"""
had_data = self.has_data()
@@ -2010,7 +2107,7 @@ def contourf(self, X, Y, Z, *args, zdir='z', offset=None, **kwargs):
The direction to use.
offset : float, optional
If specified, plot a projection of the contour lines at this
- position in a plane normal to zdir.
+ position in a plane normal to *zdir*.
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
*args, **kwargs
@@ -2057,7 +2154,7 @@ def tricontourf(self, *args, zdir='z', offset=None, **kwargs):
Returns
-------
- matplotlib.tri.tricontour.TriContourSet
+ matplotlib.tri._tricontour.TriContourSet
"""
had_data = self.has_data()
@@ -2393,15 +2490,11 @@ def bar3d(self, x, y, z, dx, dy, dz, color=None,
if len(facecolors) < len(x):
facecolors *= (6 * len(x))
- if shade:
- normals = self._generate_normals(polys)
- sfacecolors = self._shade_colors(facecolors, normals, lightsource)
- else:
- sfacecolors = facecolors
-
col = art3d.Poly3DCollection(polys,
zsort=zsort,
- facecolor=sfacecolors,
+ facecolors=facecolors,
+ shade=shade,
+ lightsource=lightsource,
*args, **kwargs)
self.add_collection(col)
@@ -2612,8 +2705,7 @@ def voxels(self, *args, facecolors=None, edgecolors=None, shade=True,
last axis.
shade : bool, default: True
- Whether to shade the facecolors. Shading is always disabled when
- *cmap* is specified.
+ Whether to shade the facecolors.
lightsource : `~matplotlib.colors.LightSource`
The lightsource to use when *shade* is True.
@@ -2770,16 +2862,10 @@ def permutation_matrices(n):
# shade the faces
facecolor = facecolors[coord]
edgecolor = edgecolors[coord]
- if shade:
- normals = self._generate_normals(faces)
- facecolor = self._shade_colors(facecolor, normals, lightsource)
- if edgecolor is not None:
- edgecolor = self._shade_colors(
- edgecolor, normals, lightsource
- )
poly = art3d.Poly3DCollection(
- faces, facecolors=facecolor, edgecolors=edgecolor, **kwargs)
+ faces, facecolors=facecolor, edgecolors=edgecolor,
+ shade=shade, lightsource=lightsource, **kwargs)
self.add_collection3d(poly)
polygons[coord] = poly
@@ -3019,7 +3105,7 @@ def _extract_errs(err, data, lomask, himask):
invM = np.linalg.inv(self.get_proj())
# elev=azim=roll=0 produces the Y-Z plane, so quiversize in 2D 'x' is
# 'y' in 3D, hence the 1 index.
- quiversize = np.dot(invM, np.array([quiversize, 0, 0, 0]))[1]
+ quiversize = np.dot(invM, [quiversize, 0, 0, 0])[1]
# Quivers use a fixed 15-degree arrow head, so scale up the length so
# that the size corresponds to the base. In other words, this constant
# corresponds to the equation tan(15) = (base / 2) / (arrow length).
diff --git a/lib/mpl_toolkits/mplot3d/axis3d.py b/lib/mpl_toolkits/mplot3d/axis3d.py
index efb3ced73048..45bd3b4173a5 100644
--- a/lib/mpl_toolkits/mplot3d/axis3d.py
+++ b/lib/mpl_toolkits/mplot3d/axis3d.py
@@ -61,12 +61,9 @@ class Axis(maxis.XAxis):
# Some properties for the axes
_AXINFO = {
- 'x': {'i': 0, 'tickdir': 1, 'juggled': (1, 0, 2),
- 'color': (0.95, 0.95, 0.95, 0.5)},
- 'y': {'i': 1, 'tickdir': 0, 'juggled': (0, 1, 2),
- 'color': (0.90, 0.90, 0.90, 0.5)},
- 'z': {'i': 2, 'tickdir': 0, 'juggled': (0, 2, 1),
- 'color': (0.925, 0.925, 0.925, 0.5)},
+ 'x': {'i': 0, 'tickdir': 1, 'juggled': (1, 0, 2)},
+ 'y': {'i': 1, 'tickdir': 0, 'juggled': (0, 1, 2)},
+ 'z': {'i': 2, 'tickdir': 0, 'juggled': (0, 2, 1)},
}
def _old_init(self, adir, v_intervalx, d_intervalx, axes, *args,
@@ -97,17 +94,18 @@ def __init__(self, *args, **kwargs):
# This is a temporary member variable.
# Do not depend on this existing in future releases!
self._axinfo = self._AXINFO[name].copy()
+ # Common parts
+ self._axinfo.update({
+ 'label': {'va': 'center', 'ha': 'center'},
+ 'color': mpl.rcParams[f'axes3d.{name}axis.panecolor'],
+ 'tick': {
+ 'inward_factor': 0.2,
+ 'outward_factor': 0.1,
+ },
+ })
+
if mpl.rcParams['_internal.classic_mode']:
self._axinfo.update({
- 'label': {'va': 'center', 'ha': 'center'},
- 'tick': {
- 'inward_factor': 0.2,
- 'outward_factor': 0.1,
- 'linewidth': {
- True: mpl.rcParams['lines.linewidth'], # major
- False: mpl.rcParams['lines.linewidth'], # minor
- }
- },
'axisline': {'linewidth': 0.75, 'color': (0, 0, 0, 1)},
'grid': {
'color': (0.9, 0.9, 0.9, 1),
@@ -115,21 +113,14 @@ def __init__(self, *args, **kwargs):
'linestyle': '-',
},
})
+ self._axinfo['tick'].update({
+ 'linewidth': {
+ True: mpl.rcParams['lines.linewidth'], # major
+ False: mpl.rcParams['lines.linewidth'], # minor
+ }
+ })
else:
self._axinfo.update({
- 'label': {'va': 'center', 'ha': 'center'},
- 'tick': {
- 'inward_factor': 0.2,
- 'outward_factor': 0.1,
- 'linewidth': {
- True: ( # major
- mpl.rcParams['xtick.major.width'] if name in 'xz'
- else mpl.rcParams['ytick.major.width']),
- False: ( # minor
- mpl.rcParams['xtick.minor.width'] if name in 'xz'
- else mpl.rcParams['ytick.minor.width']),
- }
- },
'axisline': {
'linewidth': mpl.rcParams['axes.linewidth'],
'color': mpl.rcParams['axes.edgecolor'],
@@ -140,6 +131,16 @@ def __init__(self, *args, **kwargs):
'linestyle': mpl.rcParams['grid.linestyle'],
},
})
+ self._axinfo['tick'].update({
+ 'linewidth': {
+ True: ( # major
+ mpl.rcParams['xtick.major.width'] if name in 'xz'
+ else mpl.rcParams['ytick.major.width']),
+ False: ( # minor
+ mpl.rcParams['xtick.minor.width'] if name in 'xz'
+ else mpl.rcParams['ytick.minor.width']),
+ }
+ })
super().__init__(axes, *args, **kwargs)
@@ -163,8 +164,7 @@ def _init3d(self):
antialiased=True)
# Store dummy data in Polygon object
- self.pane = mpatches.Polygon(
- np.array([[0, 0], [0, 1]]), closed=False)
+ self.pane = mpatches.Polygon([[0, 0], [0, 1]], closed=False)
self.set_pane_color(self._axinfo['color'])
self.axes._set_artist_props(self.line)
@@ -199,6 +199,7 @@ def get_minor_ticks(self, numticks=None):
@_api.deprecated("3.6")
def set_pane_pos(self, xys):
+ """Set pane position."""
self._set_pane_pos(xys)
def _set_pane_pos(self, xys):
@@ -257,7 +258,7 @@ def _get_coord_info(self, renderer):
# Project the bounds along the current position of the cube:
bounds = mins[0], maxs[0], mins[1], maxs[1], mins[2], maxs[2]
- bounds_proj = self.axes.tunit_cube(bounds, self.axes.M)
+ bounds_proj = self.axes._tunit_cube(bounds, self.axes.M)
# Determine which one of the parallel planes are higher up:
means_z0 = np.zeros(3)
@@ -320,6 +321,13 @@ def _get_tickdir(self):
return tickdir
def draw_pane(self, renderer):
+ """
+ Draw pane.
+
+ Parameters
+ ----------
+ renderer : `~matplotlib.backend_bases.RendererBase` subclass
+ """
renderer.open_group('pane3d', gid=self.get_gid())
mins, maxs, centers, deltas, tc, highs = self._get_coord_info(renderer)
diff --git a/lib/mpl_toolkits/mplot3d/proj3d.py b/lib/mpl_toolkits/mplot3d/proj3d.py
index 2f23e3779b06..cb67c1e2f06e 100644
--- a/lib/mpl_toolkits/mplot3d/proj3d.py
+++ b/lib/mpl_toolkits/mplot3d/proj3d.py
@@ -72,26 +72,86 @@ def rotation_about_vector(v, angle):
return R
-def view_transformation(E, R, V, roll):
- n = (E - R)
- n = n/np.linalg.norm(n)
- u = np.cross(V, n)
+def _view_axes(E, R, V, roll):
+ """
+ Get the unit viewing axes in data coordinates.
+
+ Parameters
+ ----------
+ E : 3-element numpy array
+ The coordinates of the eye/camera.
+ R : 3-element numpy array
+ The coordinates of the center of the view box.
+ V : 3-element numpy array
+ Unit vector in the direction of the vertical axis.
+ roll : float
+ The roll angle in radians.
+
+ Returns
+ -------
+ u : 3-element numpy array
+ Unit vector pointing towards the right of the screen.
+ v : 3-element numpy array
+ Unit vector pointing towards the top of the screen.
+ w : 3-element numpy array
+ Unit vector pointing out of the screen.
+ """
+ w = (E - R)
+ w = w/np.linalg.norm(w)
+ u = np.cross(V, w)
u = u/np.linalg.norm(u)
- v = np.cross(n, u) # Will be a unit vector
+ v = np.cross(w, u) # Will be a unit vector
# Save some computation for the default roll=0
if roll != 0:
# A positive rotation of the camera is a negative rotation of the world
- Rroll = rotation_about_vector(n, -roll)
+ Rroll = rotation_about_vector(w, -roll)
u = np.dot(Rroll, u)
v = np.dot(Rroll, v)
+ return u, v, w
+
+def _view_transformation_uvw(u, v, w, E):
+ """
+ Return the view transformation matrix.
+
+ Parameters
+ ----------
+ u : 3-element numpy array
+ Unit vector pointing towards the right of the screen.
+ v : 3-element numpy array
+ Unit vector pointing towards the top of the screen.
+ w : 3-element numpy array
+ Unit vector pointing out of the screen.
+ E : 3-element numpy array
+ The coordinates of the eye/camera.
+ """
Mr = np.eye(4)
Mt = np.eye(4)
- Mr[:3, :3] = [u, v, n]
+ Mr[:3, :3] = [u, v, w]
Mt[:3, -1] = -E
+ M = np.dot(Mr, Mt)
+ return M
- return np.dot(Mr, Mt)
+
+def view_transformation(E, R, V, roll):
+ """
+ Return the view transformation matrix.
+
+ Parameters
+ ----------
+ E : 3-element numpy array
+ The coordinates of the eye/camera.
+ R : 3-element numpy array
+ The coordinates of the center of the view box.
+ V : 3-element numpy array
+ Unit vector in the direction of the vertical axis.
+ roll : float
+ The roll angle in radians.
+ """
+ u, v, w = _view_axes(E, R, V, roll)
+ M = _view_transformation_uvw(u, v, w, E)
+ return M
def persp_transformation(zfront, zback, focal_length):
@@ -137,6 +197,9 @@ def _proj_transform_vec_clip(vec, M):
def inv_transform(xs, ys, zs, M):
+ """
+ Transform the points by the inverse of the projection matrix *M*.
+ """
iM = linalg.inv(M)
vec = _vec_pad_ones(xs, ys, zs)
vecr = np.dot(iM, vec)
@@ -153,7 +216,7 @@ def _vec_pad_ones(xs, ys, zs):
def proj_transform(xs, ys, zs, M):
"""
- Transform the points by the projection matrix
+ Transform the points by the projection matrix *M*.
"""
vec = _vec_pad_ones(xs, ys, zs)
return _proj_transform_vec(vec, M)
diff --git a/lib/mpl_toolkits/tests/baseline_images/test_axes_grid1/image_grid_each_left_label_mode_all.png b/lib/mpl_toolkits/tests/baseline_images/test_axes_grid1/image_grid_each_left_label_mode_all.png
new file mode 100644
index 000000000000..23abe8b9649d
Binary files /dev/null and b/lib/mpl_toolkits/tests/baseline_images/test_axes_grid1/image_grid_each_left_label_mode_all.png differ
diff --git a/lib/mpl_toolkits/tests/baseline_images/test_axes_grid1/image_grid_single_bottom_label_mode_1.png b/lib/mpl_toolkits/tests/baseline_images/test_axes_grid1/image_grid_single_bottom_label_mode_1.png
new file mode 100644
index 000000000000..1a0f4cd1fc9a
Binary files /dev/null and b/lib/mpl_toolkits/tests/baseline_images/test_axes_grid1/image_grid_single_bottom_label_mode_1.png differ
diff --git a/lib/mpl_toolkits/tests/baseline_images/test_axes_grid/imagegrid_cbar_mode.png b/lib/mpl_toolkits/tests/baseline_images/test_axes_grid1/imagegrid_cbar_mode.png
similarity index 100%
rename from lib/mpl_toolkits/tests/baseline_images/test_axes_grid/imagegrid_cbar_mode.png
rename to lib/mpl_toolkits/tests/baseline_images/test_axes_grid1/imagegrid_cbar_mode.png
diff --git a/lib/mpl_toolkits/tests/baseline_images/test_axes_grid1/insetposition.png b/lib/mpl_toolkits/tests/baseline_images/test_axes_grid1/insetposition.png
new file mode 100644
index 000000000000..e8676cfd6c95
Binary files /dev/null and b/lib/mpl_toolkits/tests/baseline_images/test_axes_grid1/insetposition.png differ
diff --git a/lib/mpl_toolkits/tests/baseline_images/test_axisartist_clip_path/clip_path.png b/lib/mpl_toolkits/tests/baseline_images/test_axisartist_clip_path/clip_path.png
deleted file mode 100644
index 1f296b6d06d5..000000000000
Binary files a/lib/mpl_toolkits/tests/baseline_images/test_axisartist_clip_path/clip_path.png and /dev/null differ
diff --git a/lib/mpl_toolkits/tests/baseline_images/test_mplot3d/aspects_adjust_box.png b/lib/mpl_toolkits/tests/baseline_images/test_mplot3d/aspects_adjust_box.png
new file mode 100644
index 000000000000..7fb448f2c51d
Binary files /dev/null and b/lib/mpl_toolkits/tests/baseline_images/test_mplot3d/aspects_adjust_box.png differ
diff --git a/lib/mpl_toolkits/tests/baseline_images/test_mplot3d/panecolor_rcparams.png b/lib/mpl_toolkits/tests/baseline_images/test_mplot3d/panecolor_rcparams.png
new file mode 100644
index 000000000000..e8e2ac6dcd5a
Binary files /dev/null and b/lib/mpl_toolkits/tests/baseline_images/test_mplot3d/panecolor_rcparams.png differ
diff --git a/lib/mpl_toolkits/tests/test_axes_grid.py b/lib/mpl_toolkits/tests/test_axes_grid.py
deleted file mode 100644
index 4d77b90e5e03..000000000000
--- a/lib/mpl_toolkits/tests/test_axes_grid.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import numpy as np
-
-import matplotlib as mpl
-import matplotlib.ticker as mticker
-from matplotlib.testing.decorators import image_comparison
-import matplotlib.pyplot as plt
-from mpl_toolkits.axes_grid1 import ImageGrid
-
-
-# The original version of this test relied on mpl_toolkits's slightly different
-# colorbar implementation; moving to matplotlib's own colorbar implementation
-# caused the small image comparison error.
-@image_comparison(['imagegrid_cbar_mode.png'],
- remove_text=True, style='mpl20', tol=0.3)
-def test_imagegrid_cbar_mode_edge():
- # Remove this line when this test image is regenerated.
- plt.rcParams['pcolormesh.snap'] = False
-
- X, Y = np.meshgrid(np.linspace(0, 6, 30), np.linspace(0, 6, 30))
- arr = np.sin(X) * np.cos(Y) + 1j*(np.sin(3*Y) * np.cos(Y/2.))
-
- fig = plt.figure(figsize=(18, 9))
-
- positions = (241, 242, 243, 244, 245, 246, 247, 248)
- directions = ['row']*4 + ['column']*4
- cbar_locations = ['left', 'right', 'top', 'bottom']*2
-
- for position, direction, location in zip(
- positions, directions, cbar_locations):
- grid = ImageGrid(fig, position,
- nrows_ncols=(2, 2),
- direction=direction,
- cbar_location=location,
- cbar_size='20%',
- cbar_mode='edge')
- ax1, ax2, ax3, ax4, = grid
-
- ax1.imshow(arr.real, cmap='nipy_spectral')
- ax2.imshow(arr.imag, cmap='hot')
- ax3.imshow(np.abs(arr), cmap='jet')
- ax4.imshow(np.arctan2(arr.imag, arr.real), cmap='hsv')
-
- # In each row/column, the "first" colorbars must be overwritten by the
- # "second" ones. To achieve this, clear out the axes first.
- for ax in grid:
- ax.cax.cla()
- cb = ax.cax.colorbar(ax.images[0])
-
-
-def test_imagegrid():
- fig = plt.figure()
- grid = ImageGrid(fig, 111, nrows_ncols=(1, 1))
- ax = grid[0]
- im = ax.imshow([[1, 2]], norm=mpl.colors.LogNorm())
- cb = ax.cax.colorbar(im)
- assert isinstance(cb.locator, mticker.LogLocator)
diff --git a/lib/mpl_toolkits/tests/test_axes_grid1.py b/lib/mpl_toolkits/tests/test_axes_grid1.py
index 374b8c721f9c..70fc22deb85e 100644
--- a/lib/mpl_toolkits/tests/test_axes_grid1.py
+++ b/lib/mpl_toolkits/tests/test_axes_grid1.py
@@ -3,6 +3,7 @@
import matplotlib as mpl
import matplotlib.pyplot as plt
+import matplotlib.ticker as mticker
from matplotlib import cbook
from matplotlib.backend_bases import MouseEvent
from matplotlib.colors import LogNorm
@@ -17,10 +18,12 @@
from mpl_toolkits.axes_grid1.anchored_artists import (
AnchoredSizeBar, AnchoredDirectionArrows)
from mpl_toolkits.axes_grid1.axes_divider import (
- Divider, HBoxDivider, make_axes_area_auto_adjustable)
+ Divider, HBoxDivider, make_axes_area_auto_adjustable, SubplotDivider,
+ VBoxDivider)
from mpl_toolkits.axes_grid1.axes_rgb import RGBAxes
from mpl_toolkits.axes_grid1.inset_locator import (
- zoomed_inset_axes, mark_inset, inset_axes, BboxConnectorPatch)
+ zoomed_inset_axes, mark_inset, inset_axes, BboxConnectorPatch,
+ InsetPosition)
import mpl_toolkits.axes_grid1.mpl_axes
import pytest
@@ -206,23 +209,22 @@ def test_inset_axes_complete():
ins = inset_axes(ax, width=2., height=2., borderpad=0)
fig.canvas.draw()
assert_array_almost_equal(
- ins.get_position().extents,
- np.array(((0.9*figsize[0]-2.)/figsize[0],
- (0.9*figsize[1]-2.)/figsize[1], 0.9, 0.9)))
+ ins.get_position().extents,
+ [(0.9*figsize[0]-2.)/figsize[0], (0.9*figsize[1]-2.)/figsize[1],
+ 0.9, 0.9])
ins = inset_axes(ax, width="40%", height="30%", borderpad=0)
fig.canvas.draw()
assert_array_almost_equal(
- ins.get_position().extents,
- np.array((.9-.8*.4, .9-.8*.3, 0.9, 0.9)))
+ ins.get_position().extents, [.9-.8*.4, .9-.8*.3, 0.9, 0.9])
ins = inset_axes(ax, width=1., height=1.2, bbox_to_anchor=(200, 100),
loc=3, borderpad=0)
fig.canvas.draw()
assert_array_almost_equal(
- ins.get_position().extents,
- np.array((200./dpi/figsize[0], 100./dpi/figsize[1],
- (200./dpi+1)/figsize[0], (100./dpi+1.2)/figsize[1])))
+ ins.get_position().extents,
+ [200/dpi/figsize[0], 100/dpi/figsize[1],
+ (200/dpi+1)/figsize[0], (100/dpi+1.2)/figsize[1]])
ins1 = inset_axes(ax, width="35%", height="60%", loc=3, borderpad=1)
ins2 = inset_axes(ax, width="100%", height="100%",
@@ -367,6 +369,40 @@ def test_axes_locatable_position():
0.03621495327102808)
+@image_comparison(['image_grid_each_left_label_mode_all.png'], style='mpl20',
+ savefig_kwarg={'bbox_inches': 'tight'})
+def test_image_grid_each_left_label_mode_all():
+ imdata = np.arange(100).reshape((10, 10))
+
+ fig = plt.figure(1, (3, 3))
+ grid = ImageGrid(fig, (1, 1, 1), nrows_ncols=(3, 2), axes_pad=(0.5, 0.3),
+ cbar_mode="each", cbar_location="left", cbar_size="15%",
+ label_mode="all")
+ # 3-tuple rect => SubplotDivider
+ assert isinstance(grid.get_divider(), SubplotDivider)
+ assert grid.get_axes_pad() == (0.5, 0.3)
+ assert grid.get_aspect() # True by default for ImageGrid
+ for ax, cax in zip(grid, grid.cbar_axes):
+ im = ax.imshow(imdata, interpolation='none')
+ cax.colorbar(im)
+
+
+@image_comparison(['image_grid_single_bottom_label_mode_1.png'], style='mpl20',
+ savefig_kwarg={'bbox_inches': 'tight'})
+def test_image_grid_single_bottom():
+ imdata = np.arange(100).reshape((10, 10))
+
+ fig = plt.figure(1, (2.5, 1.5))
+ grid = ImageGrid(fig, (0, 0, 1, 1), nrows_ncols=(1, 3),
+ axes_pad=(0.2, 0.15), cbar_mode="single",
+ cbar_location="bottom", cbar_size="10%", label_mode="1")
+ # 4-tuple rect => Divider, isinstance will give True for SubplotDivider
+ assert type(grid.get_divider()) is Divider
+ for i in range(3):
+ im = grid[i].imshow(imdata, interpolation='none')
+ grid.cbar_axes[0].colorbar(im)
+
+
@image_comparison(['image_grid.png'],
remove_text=True, style='mpl20',
savefig_kwarg={'bbox_inches': 'tight'})
@@ -478,6 +514,29 @@ def test_hbox_divider():
assert p2.width / p1.width == pytest.approx((4 / 5) ** 2)
+def test_vbox_divider():
+ arr1 = np.arange(20).reshape((4, 5))
+ arr2 = np.arange(20).reshape((5, 4))
+
+ fig, (ax1, ax2) = plt.subplots(1, 2)
+ ax1.imshow(arr1)
+ ax2.imshow(arr2)
+
+ pad = 0.5 # inches.
+ divider = VBoxDivider(
+ fig, 111, # Position of combined axes.
+ horizontal=[Size.AxesX(ax1), Size.Scaled(1), Size.AxesX(ax2)],
+ vertical=[Size.AxesY(ax1), Size.Fixed(pad), Size.AxesY(ax2)])
+ ax1.set_axes_locator(divider.new_locator(0))
+ ax2.set_axes_locator(divider.new_locator(2))
+
+ fig.canvas.draw()
+ p1 = ax1.get_position()
+ p2 = ax2.get_position()
+ assert p1.width == p2.width
+ assert p1.height / p2.height == pytest.approx((4 / 5) ** 2)
+
+
def test_axes_class_tuple():
fig = plt.figure()
axes_class = (mpl_toolkits.axes_grid1.mpl_axes.Axes, {})
@@ -568,3 +627,60 @@ def test_rgb_axes():
g = rng.random((5, 5))
b = rng.random((5, 5))
ax.imshow_rgb(r, g, b, interpolation='none')
+
+
+@image_comparison(['insetposition.png'], remove_text=True)
+def test_insetposition():
+ fig, ax = plt.subplots(figsize=(2, 2))
+ ax_ins = plt.axes([0, 0, 1, 1])
+ ip = InsetPosition(ax, [0.2, 0.25, 0.5, 0.4])
+ ax_ins.set_axes_locator(ip)
+
+
+# The original version of this test relied on mpl_toolkits's slightly different
+# colorbar implementation; moving to matplotlib's own colorbar implementation
+# caused the small image comparison error.
+@image_comparison(['imagegrid_cbar_mode.png'],
+ remove_text=True, style='mpl20', tol=0.3)
+def test_imagegrid_cbar_mode_edge():
+ # Remove this line when this test image is regenerated.
+ plt.rcParams['pcolormesh.snap'] = False
+
+ X, Y = np.meshgrid(np.linspace(0, 6, 30), np.linspace(0, 6, 30))
+ arr = np.sin(X) * np.cos(Y) + 1j*(np.sin(3*Y) * np.cos(Y/2.))
+
+ fig = plt.figure(figsize=(18, 9))
+
+ positions = (241, 242, 243, 244, 245, 246, 247, 248)
+ directions = ['row']*4 + ['column']*4
+ cbar_locations = ['left', 'right', 'top', 'bottom']*2
+
+ for position, direction, location in zip(
+ positions, directions, cbar_locations):
+ grid = ImageGrid(fig, position,
+ nrows_ncols=(2, 2),
+ direction=direction,
+ cbar_location=location,
+ cbar_size='20%',
+ cbar_mode='edge')
+ ax1, ax2, ax3, ax4, = grid
+
+ ax1.imshow(arr.real, cmap='nipy_spectral')
+ ax2.imshow(arr.imag, cmap='hot')
+ ax3.imshow(np.abs(arr), cmap='jet')
+ ax4.imshow(np.arctan2(arr.imag, arr.real), cmap='hsv')
+
+ # In each row/column, the "first" colorbars must be overwritten by the
+ # "second" ones. To achieve this, clear out the axes first.
+ for ax in grid:
+ ax.cax.cla()
+ cb = ax.cax.colorbar(ax.images[0])
+
+
+def test_imagegrid():
+ fig = plt.figure()
+ grid = ImageGrid(fig, 111, nrows_ncols=(1, 1))
+ ax = grid[0]
+ im = ax.imshow([[1, 2]], norm=mpl.colors.LogNorm())
+ cb = ax.cax.colorbar(im)
+ assert isinstance(cb.locator, mticker.LogLocator)
diff --git a/lib/mpl_toolkits/tests/test_axisartist_axislines.py b/lib/mpl_toolkits/tests/test_axisartist_axislines.py
index 1b239f214183..7743cb35aa3b 100644
--- a/lib/mpl_toolkits/tests/test_axisartist_axislines.py
+++ b/lib/mpl_toolkits/tests/test_axisartist_axislines.py
@@ -4,7 +4,7 @@
from matplotlib.transforms import IdentityTransform
from mpl_toolkits.axisartist.axislines import SubplotZero, Subplot
-from mpl_toolkits.axisartist import Axes, SubplotHost, ParasiteAxes
+from mpl_toolkits.axisartist import Axes, SubplotHost
@image_comparison(['SubplotZero.png'], style='default')
@@ -81,8 +81,7 @@ def test_ParasiteAxesAuxTrans():
ax1 = SubplotHost(fig, 1, 3, i+1)
fig.add_subplot(ax1)
- ax2 = ParasiteAxes(ax1, IdentityTransform())
- ax1.parasites.append(ax2)
+ ax2 = ax1.get_aux_axes(IdentityTransform(), viewlim_mode=None)
if name.startswith('pcolor'):
getattr(ax2, name)(xx, yy, data[:-1, :-1])
else:
diff --git a/lib/mpl_toolkits/tests/test_axisartist_clip_path.py b/lib/mpl_toolkits/tests/test_axisartist_clip_path.py
deleted file mode 100644
index 1108533353a1..000000000000
--- a/lib/mpl_toolkits/tests/test_axisartist_clip_path.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import numpy as np
-
-from matplotlib import _api
-import matplotlib.pyplot as plt
-from matplotlib.testing.decorators import image_comparison
-from matplotlib.transforms import Bbox
-
-with _api.suppress_matplotlib_deprecation_warning():
- from mpl_toolkits.axisartist.clip_path import clip_line_to_rect
-
-
-@image_comparison(['clip_path.png'], style='default')
-def test_clip_path():
- x = np.array([-3, -2, -1, 0., 1, 2, 3, 2, 1, 0, -1, -2, -3, 5])
- y = np.arange(len(x))
-
- fig, ax = plt.subplots()
- ax.plot(x, y, lw=1)
-
- bbox = Bbox.from_extents(-2, 3, 2, 12.5)
- rect = plt.Rectangle(bbox.p0, bbox.width, bbox.height,
- facecolor='none', edgecolor='k', ls='--')
- ax.add_patch(rect)
-
- clipped_lines, ticks = clip_line_to_rect(x, y, bbox)
- for lx, ly in clipped_lines:
- ax.plot(lx, ly, lw=1, color='C1')
- for px, py in zip(lx, ly):
- assert bbox.contains(px, py)
-
- ccc = iter(['C3o', 'C2x', 'C3o', 'C2x'])
- for ttt in ticks:
- cc = next(ccc)
- for (xx, yy), aa in ttt:
- ax.plot([xx], [yy], cc)
diff --git a/lib/mpl_toolkits/tests/test_axisartist_grid_helper_curvelinear.py b/lib/mpl_toolkits/tests/test_axisartist_grid_helper_curvelinear.py
index 9a501daa2e7b..ffc5f6c1b791 100644
--- a/lib/mpl_toolkits/tests/test_axisartist_grid_helper_curvelinear.py
+++ b/lib/mpl_toolkits/tests/test_axisartist_grid_helper_curvelinear.py
@@ -6,9 +6,8 @@
from matplotlib.transforms import Affine2D, Transform
from matplotlib.testing.decorators import image_comparison
-from mpl_toolkits.axes_grid1.parasite_axes import ParasiteAxes
from mpl_toolkits.axisartist import SubplotHost
-from mpl_toolkits.axes_grid1.parasite_axes import host_subplot_class_factory
+from mpl_toolkits.axes_grid1.parasite_axes import host_axes_class_factory
from mpl_toolkits.axisartist import angle_helper
from mpl_toolkits.axisartist.axislines import Axes
from mpl_toolkits.axisartist.grid_helper_curvelinear import \
@@ -59,15 +58,14 @@ def inverted(self):
fig = plt.figure()
- SubplotHost = host_subplot_class_factory(Axes)
+ SubplotHost = host_axes_class_factory(Axes)
tr = MyTransform(1)
grid_helper = GridHelperCurveLinear(tr)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
fig.add_subplot(ax1)
- ax2 = ParasiteAxes(ax1, tr, viewlim_mode="equal")
- ax1.parasites.append(ax2)
+ ax2 = ax1.get_aux_axes(tr, viewlim_mode="equal")
ax2.plot([3, 6], [5.0, 10.])
ax1.set_aspect(1.)
@@ -127,10 +125,9 @@ def test_polar_box():
axis.get_helper().set_extremes(-180, 90)
# A parasite axes with given transform
- ax2 = ParasiteAxes(ax1, tr, viewlim_mode="equal")
+ ax2 = ax1.get_aux_axes(tr, viewlim_mode="equal")
assert ax2.transData == tr + ax1.transData
# Anything you draw in ax2 will match the ticks and grids of ax1.
- ax1.parasites.append(ax2)
ax2.plot(np.linspace(0, 30, 50), np.linspace(10, 10, 50))
ax1.set_aspect(1.)
diff --git a/lib/mpl_toolkits/tests/test_mplot3d.py b/lib/mpl_toolkits/tests/test_mplot3d.py
index bd651d1ad52b..aad42d1f4d72 100644
--- a/lib/mpl_toolkits/tests/test_mplot3d.py
+++ b/lib/mpl_toolkits/tests/test_mplot3d.py
@@ -5,7 +5,8 @@
from mpl_toolkits.mplot3d import Axes3D, axes3d, proj3d, art3d
import matplotlib as mpl
-from matplotlib.backend_bases import MouseButton
+from matplotlib.backend_bases import (MouseButton, MouseEvent,
+ NavigationToolbar2)
from matplotlib import cm
from matplotlib import colors as mcolors, patches as mpatch
from matplotlib.testing.decorators import image_comparison, check_figures_equal
@@ -44,7 +45,25 @@ def test_aspects():
ax.plot3D(*zip(start*scale, end*scale))
for i, ax in enumerate(axs):
ax.set_box_aspect((3, 4, 5))
- ax.set_aspect(aspects[i])
+ ax.set_aspect(aspects[i], adjustable='datalim')
+
+
+@mpl3d_image_comparison(['aspects_adjust_box.png'], remove_text=False)
+def test_aspects_adjust_box():
+ aspects = ('auto', 'equal', 'equalxy', 'equalyz', 'equalxz')
+ fig, axs = plt.subplots(1, len(aspects), subplot_kw={'projection': '3d'},
+ figsize=(11, 3))
+
+ # Draw rectangular cuboid with side lengths [4, 3, 5]
+ r = [0, 1]
+ scale = np.array([4, 3, 5])
+ pts = itertools.combinations(np.array(list(itertools.product(r, r, r))), 2)
+ for start, end in pts:
+ if np.sum(np.abs(start - end)) == r[1] - r[0]:
+ for ax in axs:
+ ax.plot3D(*zip(start*scale, end*scale))
+ for i, ax in enumerate(axs):
+ ax.set_aspect(aspects[i], adjustable='box')
def test_axes3d_repr():
@@ -56,7 +75,7 @@ def test_axes3d_repr():
ax.set_ylabel('y')
ax.set_zlabel('z')
assert repr(ax) == (
- "")
@@ -906,14 +925,16 @@ def test_add_collection3d_zs_scalar():
@mpl3d_image_comparison(['axes3d_labelpad.png'], remove_text=False)
def test_axes3d_labelpad():
fig = plt.figure()
- ax = fig.add_axes(Axes3D(fig, auto_add_to_figure=False))
+ ax = fig.add_axes(Axes3D(fig))
# labelpad respects rcParams
assert ax.xaxis.labelpad == mpl.rcParams['axes.labelpad']
# labelpad can be set in set_label
ax.set_xlabel('X LABEL', labelpad=10)
assert ax.xaxis.labelpad == 10
ax.set_ylabel('Y LABEL')
- ax.set_zlabel('Z LABEL')
+ ax.set_zlabel('Z LABEL', labelpad=20)
+ assert ax.zaxis.labelpad == 20
+ assert ax.get_zlabel() == 'Z LABEL'
# or manually
ax.yaxis.labelpad = 20
ax.zaxis.labelpad = -40
@@ -957,7 +978,8 @@ def _test_proj_make_M():
R = np.array([100, 100, 100])
V = np.array([0, 0, 1])
roll = 0
- viewM = proj3d.view_transformation(E, R, V, roll)
+ u, v, w = proj3d._view_axes(E, R, V, roll)
+ viewM = proj3d._view_transformation_uvw(u, v, w, E)
perspM = proj3d.persp_transformation(100, -100, 1)
M = np.dot(perspM, viewM)
return M
@@ -1023,7 +1045,8 @@ def test_proj_axes_cube_ortho():
R = np.array([0, 0, 0])
V = np.array([0, 0, 1])
roll = 0
- viewM = proj3d.view_transformation(E, R, V, roll)
+ u, v, w = proj3d._view_axes(E, R, V, roll)
+ viewM = proj3d._view_transformation_uvw(u, v, w, E)
orthoM = proj3d.ortho_transformation(-1, 1)
M = np.dot(orthoM, viewM)
@@ -1106,6 +1129,7 @@ def test_lines_dists_nowarning():
def test_autoscale():
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
+ assert ax.get_zscale() == 'linear'
ax.margins(x=0, y=.1, z=.2)
ax.plot([0, 1], [0, 1], [0, 1])
assert ax.get_w_lims() == (0, 1, -.1, 1.1, -.2, 1.2)
@@ -1550,6 +1574,9 @@ def test_equal_box_aspect():
ax.axis('off')
ax.set_box_aspect((1, 1, 1))
+ with pytest.raises(ValueError, match="Argument zoom ="):
+ ax.set_box_aspect((1, 1, 1), zoom=-1)
+
def test_colorbar_pos():
num_plots = 2
@@ -1567,6 +1594,55 @@ def test_colorbar_pos():
assert cbar.ax.get_position().extents[1] < 0.2
+def test_inverted_zaxis():
+ fig = plt.figure()
+ ax = fig.add_subplot(projection='3d')
+ assert not ax.zaxis_inverted()
+ assert ax.get_zlim() == (0, 1)
+ assert ax.get_zbound() == (0, 1)
+
+ # Change bound
+ ax.set_zbound((0, 2))
+ assert not ax.zaxis_inverted()
+ assert ax.get_zlim() == (0, 2)
+ assert ax.get_zbound() == (0, 2)
+
+ # Change invert
+ ax.invert_zaxis()
+ assert ax.zaxis_inverted()
+ assert ax.get_zlim() == (2, 0)
+ assert ax.get_zbound() == (0, 2)
+
+ # Set upper bound
+ ax.set_zbound(upper=1)
+ assert ax.zaxis_inverted()
+ assert ax.get_zlim() == (1, 0)
+ assert ax.get_zbound() == (0, 1)
+
+ # Set lower bound
+ ax.set_zbound(lower=2)
+ assert ax.zaxis_inverted()
+ assert ax.get_zlim() == (2, 1)
+ assert ax.get_zbound() == (1, 2)
+
+
+def test_set_zlim():
+ fig = plt.figure()
+ ax = fig.add_subplot(projection='3d')
+ assert ax.get_zlim() == (0, 1)
+ ax.set_zlim(zmax=2)
+ assert ax.get_zlim() == (0, 2)
+ ax.set_zlim(zmin=1)
+ assert ax.get_zlim() == (1, 2)
+
+ with pytest.raises(
+ TypeError, match="Cannot pass both 'bottom' and 'zmin'"):
+ ax.set_zlim(bottom=0, zmin=1)
+ with pytest.raises(
+ TypeError, match="Cannot pass both 'top' and 'zmax'"):
+ ax.set_zlim(top=0, zmax=1)
+
+
def test_shared_axes_retick():
fig = plt.figure()
ax1 = fig.add_subplot(211, projection="3d")
@@ -1617,6 +1693,82 @@ def convert_lim(dmin, dmax):
assert z_center != pytest.approx(z_center0)
+@pytest.mark.parametrize("tool,button,key,expected",
+ [("zoom", MouseButton.LEFT, None, # zoom in
+ ((0.00, 0.06), (0.01, 0.07), (0.02, 0.08))),
+ ("zoom", MouseButton.LEFT, 'x', # zoom in
+ ((-0.01, 0.10), (-0.03, 0.08), (-0.06, 0.06))),
+ ("zoom", MouseButton.LEFT, 'y', # zoom in
+ ((-0.07, 0.04), (-0.03, 0.08), (0.00, 0.11))),
+ ("zoom", MouseButton.RIGHT, None, # zoom out
+ ((-0.09, 0.15), (-0.07, 0.17), (-0.06, 0.18))),
+ ("pan", MouseButton.LEFT, None,
+ ((-0.70, -0.58), (-1.03, -0.91), (-1.27, -1.15))),
+ ("pan", MouseButton.LEFT, 'x',
+ ((-0.96, -0.84), (-0.58, -0.46), (-0.06, 0.06))),
+ ("pan", MouseButton.LEFT, 'y',
+ ((0.20, 0.32), (-0.51, -0.39), (-1.27, -1.15)))])
+def test_toolbar_zoom_pan(tool, button, key, expected):
+ # NOTE: The expected zoom values are rough ballparks of moving in the view
+ # to make sure we are getting the right direction of motion.
+ # The specific values can and should change if the zoom movement
+ # scaling factor gets updated.
+ fig = plt.figure()
+ ax = fig.add_subplot(projection='3d')
+ ax.scatter(0, 0, 0)
+ fig.canvas.draw()
+ xlim0, ylim0, zlim0 = ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()
+
+ # Mouse from (0, 0) to (1, 1)
+ d0 = (0, 0)
+ d1 = (1, 1)
+ # Convert to screen coordinates ("s"). Events are defined only with pixel
+ # precision, so round the pixel values, and below, check against the
+ # corresponding xdata/ydata, which are close but not equal to d0/d1.
+ s0 = ax.transData.transform(d0).astype(int)
+ s1 = ax.transData.transform(d1).astype(int)
+
+ # Set up the mouse movements
+ start_event = MouseEvent(
+ "button_press_event", fig.canvas, *s0, button, key=key)
+ stop_event = MouseEvent(
+ "button_release_event", fig.canvas, *s1, button, key=key)
+
+ tb = NavigationToolbar2(fig.canvas)
+ if tool == "zoom":
+ tb.zoom()
+ tb.press_zoom(start_event)
+ tb.drag_zoom(stop_event)
+ tb.release_zoom(stop_event)
+ else:
+ tb.pan()
+ tb.press_pan(start_event)
+ tb.drag_pan(stop_event)
+ tb.release_pan(stop_event)
+
+ # Should be close, but won't be exact due to screen integer resolution
+ xlim, ylim, zlim = expected
+ assert ax.get_xlim3d() == pytest.approx(xlim, abs=0.01)
+ assert ax.get_ylim3d() == pytest.approx(ylim, abs=0.01)
+ assert ax.get_zlim3d() == pytest.approx(zlim, abs=0.01)
+
+ # Ensure that back, forward, and home buttons work
+ tb.back()
+ assert ax.get_xlim3d() == pytest.approx(xlim0)
+ assert ax.get_ylim3d() == pytest.approx(ylim0)
+ assert ax.get_zlim3d() == pytest.approx(zlim0)
+
+ tb.forward()
+ assert ax.get_xlim3d() == pytest.approx(xlim, abs=0.01)
+ assert ax.get_ylim3d() == pytest.approx(ylim, abs=0.01)
+ assert ax.get_zlim3d() == pytest.approx(zlim, abs=0.01)
+
+ tb.home()
+ assert ax.get_xlim3d() == pytest.approx(xlim0)
+ assert ax.get_ylim3d() == pytest.approx(ylim0)
+ assert ax.get_zlim3d() == pytest.approx(zlim0)
+
+
@mpl.style.context('default')
@check_figures_equal(extensions=["png"])
def test_scalarmap_update(fig_test, fig_ref):
@@ -1961,3 +2113,14 @@ def test_arc_pathpatch():
angle=20, theta1=10, theta2=130)
ax.add_patch(a)
art3d.pathpatch_2d_to_3d(a, z=0, zdir='z')
+
+
+@image_comparison(baseline_images=['panecolor_rcparams.png'],
+ remove_text=True,
+ style='mpl20')
+def test_panecolor_rcparams():
+ with plt.rc_context({'axes3d.xaxis.panecolor': 'r',
+ 'axes3d.yaxis.panecolor': 'g',
+ 'axes3d.zaxis.panecolor': 'b'}):
+ fig = plt.figure(figsize=(1, 1))
+ fig.add_subplot(projection='3d')
diff --git a/plot_types/3D/README.rst b/plot_types/3D/README.rst
new file mode 100644
index 000000000000..e7157d4ba628
--- /dev/null
+++ b/plot_types/3D/README.rst
@@ -0,0 +1,6 @@
+.. _3D_plots:
+
+3D
+--
+
+3D plots using the `mpl_toolkits.mplot3d` library.
diff --git a/plot_types/3D/scatter3d_simple.py b/plot_types/3D/scatter3d_simple.py
new file mode 100644
index 000000000000..023a46448ccf
--- /dev/null
+++ b/plot_types/3D/scatter3d_simple.py
@@ -0,0 +1,29 @@
+"""
+==============
+3D scatterplot
+==============
+
+See `~mpl_toolkits.mplot3d.axes3d.Axes3D.scatter`.
+"""
+import matplotlib.pyplot as plt
+import numpy as np
+
+plt.style.use('_mpl-gallery')
+
+# Make data
+np.random.seed(19680801)
+n = 100
+rng = np.random.default_rng()
+xs = rng.uniform(23, 32, n)
+ys = rng.uniform(0, 100, n)
+zs = rng.uniform(-50, -25, n)
+
+# Plot
+fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
+ax.scatter(xs, ys, zs)
+
+ax.set(xticklabels=[],
+ yticklabels=[],
+ zticklabels=[])
+
+plt.show()
diff --git a/plot_types/3D/surface3d_simple.py b/plot_types/3D/surface3d_simple.py
new file mode 100644
index 000000000000..b1aff7d23b12
--- /dev/null
+++ b/plot_types/3D/surface3d_simple.py
@@ -0,0 +1,29 @@
+"""
+=====================
+3D surface
+=====================
+
+See `~mpl_toolkits.mplot3d.axes3d.Axes3D.plot_surface`.
+"""
+import matplotlib.pyplot as plt
+from matplotlib import cm
+import numpy as np
+
+plt.style.use('_mpl-gallery')
+
+# Make data
+X = np.arange(-5, 5, 0.25)
+Y = np.arange(-5, 5, 0.25)
+X, Y = np.meshgrid(X, Y)
+R = np.sqrt(X**2 + Y**2)
+Z = np.sin(R)
+
+# Plot the surface
+fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
+ax.plot_surface(X, Y, Z, vmin=Z.min() * 2, cmap=cm.Blues)
+
+ax.set(xticklabels=[],
+ yticklabels=[],
+ zticklabels=[])
+
+plt.show()
diff --git a/plot_types/3D/trisurf3d_simple.py b/plot_types/3D/trisurf3d_simple.py
new file mode 100644
index 000000000000..92832c1b5b3a
--- /dev/null
+++ b/plot_types/3D/trisurf3d_simple.py
@@ -0,0 +1,34 @@
+"""
+======================
+Triangular 3D surfaces
+======================
+
+See `~mpl_toolkits.mplot3d.axes3d.Axes3D.plot_trisurf`.
+"""
+import matplotlib.pyplot as plt
+from matplotlib import cm
+import numpy as np
+
+plt.style.use('_mpl-gallery')
+
+n_radii = 8
+n_angles = 36
+
+# Make radii and angles spaces
+radii = np.linspace(0.125, 1.0, n_radii)
+angles = np.linspace(0, 2*np.pi, n_angles, endpoint=False)[..., np.newaxis]
+
+# Convert polar (radii, angles) coords to cartesian (x, y) coords.
+x = np.append(0, (radii*np.cos(angles)).flatten())
+y = np.append(0, (radii*np.sin(angles)).flatten())
+z = np.sin(-x*y)
+
+# Plot
+fig, ax = plt.subplots(subplot_kw={'projection': '3d'})
+ax.plot_trisurf(x, y, z, vmin=z.min() * 2, cmap=cm.Blues)
+
+ax.set(xticklabels=[],
+ yticklabels=[],
+ zticklabels=[])
+
+plt.show()
diff --git a/plot_types/3D/voxels_simple.py b/plot_types/3D/voxels_simple.py
new file mode 100644
index 000000000000..c3473e108969
--- /dev/null
+++ b/plot_types/3D/voxels_simple.py
@@ -0,0 +1,31 @@
+"""
+==========================
+3D voxel / volumetric plot
+==========================
+
+See `~mpl_toolkits.mplot3d.axes3d.Axes3D.voxels`.
+"""
+import matplotlib.pyplot as plt
+import numpy as np
+
+plt.style.use('_mpl-gallery')
+
+# Prepare some coordinates
+x, y, z = np.indices((8, 8, 8))
+
+# Draw cuboids in the top left and bottom right corners
+cube1 = (x < 3) & (y < 3) & (z < 3)
+cube2 = (x >= 5) & (y >= 5) & (z >= 5)
+
+# Combine the objects into a single boolean array
+voxelarray = cube1 | cube2
+
+# Plot
+fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
+ax.voxels(voxelarray, edgecolor='k')
+
+ax.set(xticklabels=[],
+ yticklabels=[],
+ zticklabels=[])
+
+plt.show()
diff --git a/plot_types/3D/wire3d_simple.py b/plot_types/3D/wire3d_simple.py
new file mode 100644
index 000000000000..c0eaf40210e8
--- /dev/null
+++ b/plot_types/3D/wire3d_simple.py
@@ -0,0 +1,24 @@
+"""
+=================
+3D wireframe plot
+=================
+
+See `~mpl_toolkits.mplot3d.axes3d.Axes3D.plot_wireframe`.
+"""
+from mpl_toolkits.mplot3d import axes3d
+import matplotlib.pyplot as plt
+
+plt.style.use('_mpl-gallery')
+
+# Make data
+X, Y, Z = axes3d.get_test_data(0.05)
+
+# Plot
+fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
+ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10)
+
+ax.set(xticklabels=[],
+ yticklabels=[],
+ zticklabels=[])
+
+plt.show()
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 000000000000..bbd0c8baf2b3
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,7 @@
+[build-system]
+build-backend = "setuptools.build_meta"
+requires = [
+ "certifi>=2020.06.20",
+ "oldest-supported-numpy",
+ "setuptools_scm>=7",
+]
diff --git a/requirements/doc/doc-requirements.txt b/requirements/doc/doc-requirements.txt
index ad0e44d16fa4..98586608cbc9 100644
--- a/requirements/doc/doc-requirements.txt
+++ b/requirements/doc/doc-requirements.txt
@@ -14,7 +14,7 @@ ipywidgets
numpydoc>=1.0
packaging>=20
pydata-sphinx-theme>=0.9.0
-mpl-sphinx-theme~=3.6.0
+mpl-sphinx-theme
sphinxcontrib-svg2pdfconverter>=1.1.0
sphinx-gallery>=0.10
sphinx-copybutton
diff --git a/requirements/testing/flake8.txt b/requirements/testing/flake8.txt
index f98708973072..a4d006b8551e 100644
--- a/requirements/testing/flake8.txt
+++ b/requirements/testing/flake8.txt
@@ -5,3 +5,5 @@ flake8>=3.8
pydocstyle>=5.1.0
# 1.4.0 adds docstring-convention=all
flake8-docstrings>=1.4.0
+# fix bug where flake8 aborts checking on syntax error
+flake8-force
diff --git a/requirements/testing/minver.txt b/requirements/testing/minver.txt
index d8dd2f66c22c..d932b0aa34e7 100644
--- a/requirements/testing/minver.txt
+++ b/requirements/testing/minver.txt
@@ -6,6 +6,6 @@ kiwisolver==1.0.1
numpy==1.19.0
packaging==20.0
pillow==6.2.1
-pyparsing==2.2.1
+pyparsing==2.3.1
python-dateutil==2.7
fonttools==4.22.0
diff --git a/setup.py b/setup.py
index d1f1de7078ff..365de0c0b5a2 100644
--- a/setup.py
+++ b/setup.py
@@ -34,6 +34,9 @@
import setuptools.command.build_py
import setuptools.command.sdist
+# sys.path modified to find setupext.py during pyproject.toml builds.
+sys.path.append(str(Path(__file__).resolve().parent))
+
import setupext
from setupext import print_raw, print_status
@@ -68,6 +71,12 @@ def has_flag(self, flagname):
class BuildExtraLibraries(setuptools.command.build_ext.build_ext):
def finalize_options(self):
+ # If coverage is enabled then need to keep the .o and .gcno files in a
+ # non-temporary directory otherwise coverage info not collected.
+ cppflags = os.getenv('CPPFLAGS')
+ if cppflags and '--coverage' in cppflags:
+ self.build_temp = 'build'
+
self.distribution.ext_modules[:] = [
ext
for package in good_packages
@@ -208,8 +217,9 @@ def update_matplotlibrc(path):
class BuildPy(setuptools.command.build_py.build_py):
def run(self):
super().run()
- update_matplotlibrc(
- Path(self.build_lib, "matplotlib/mpl-data/matplotlibrc"))
+ if not getattr(self, 'editable_mode', False):
+ update_matplotlibrc(
+ Path(self.build_lib, "matplotlib/mpl-data/matplotlibrc"))
class Sdist(setuptools.command.sdist.sdist):
@@ -271,8 +281,8 @@ def make_release_tree(self, base_dir, files):
'Forum': 'https://discourse.matplotlib.org/',
'Donate': 'https://numfocus.org/donate-to-matplotlib'
},
- long_description=Path("README.rst").read_text(encoding="utf-8"),
- long_description_content_type="text/x-rst",
+ long_description=Path("README.md").read_text(encoding="utf-8"),
+ long_description_content_type="text/markdown",
license="PSF",
platforms="any",
classifiers=[
@@ -300,11 +310,13 @@ def make_release_tree(self, base_dir, files):
package_data=package_data,
python_requires='>={}'.format('.'.join(str(n) for n in py_min_version)),
- setup_requires=[
- "certifi>=2020.06.20",
- "numpy>=1.19",
- "setuptools_scm>=7",
- ],
+ # When updating the list of dependencies, add an api_changes/development
+ # entry and also update the following places:
+ # - lib/matplotlib/__init__.py (matplotlib._check_versions())
+ # - requirements/testing/minver.txt
+ # - doc/devel/dependencies.rst
+ # - .github/workflows/tests.yml
+ # - environment.yml
install_requires=[
"contourpy>=1.0.1",
"cycler>=0.10",
@@ -313,7 +325,7 @@ def make_release_tree(self, base_dir, files):
"numpy>=1.19",
"packaging>=20.0",
"pillow>=6.2.0",
- "pyparsing>=2.2.1",
+ "pyparsing>=2.3.1",
"python-dateutil>=2.7",
] + (
# Installing from a git checkout that is not producing a wheel.
diff --git a/setupext.py b/setupext.py
index 0387223b9487..df8bdc2aed77 100644
--- a/setupext.py
+++ b/setupext.py
@@ -216,7 +216,7 @@ def print_raw(*args, **kwargs): pass # Suppress our own output.
def print_status(package, status):
initial_indent = "%12s: " % package
indent = ' ' * 18
- print_raw(textwrap.fill(str(status), width=80,
+ print_raw(textwrap.fill(status, width=80,
initial_indent=initial_indent,
subsequent_indent=indent))
diff --git a/src/_backend_agg_wrapper.cpp b/src/_backend_agg_wrapper.cpp
index 9d0c3dbc759a..94b863873158 100644
--- a/src/_backend_agg_wrapper.cpp
+++ b/src/_backend_agg_wrapper.cpp
@@ -46,6 +46,12 @@ static void PyBufferRegion_dealloc(PyBufferRegion *self)
static PyObject *PyBufferRegion_to_string(PyBufferRegion *self, PyObject *args)
{
+ char const* msg =
+ "BufferRegion.to_string is deprecated since Matplotlib 3.7 and will "
+ "be removed two minor releases later; use np.asarray(region) instead.";
+ if (PyErr_WarnEx(PyExc_DeprecationWarning, msg, 1)) {
+ return NULL;
+ }
return PyBytes_FromStringAndSize((const char *)self->x->get_data(),
self->x->get_height() * self->x->get_stride());
}
@@ -83,6 +89,13 @@ static PyObject *PyBufferRegion_get_extents(PyBufferRegion *self, PyObject *args
static PyObject *PyBufferRegion_to_string_argb(PyBufferRegion *self, PyObject *args)
{
+ char const* msg =
+ "BufferRegion.to_string_argb is deprecated since Matplotlib 3.7 and "
+ "will be removed two minor releases later; use "
+ "np.take(region, [2, 1, 0, 3], axis=2) instead.";
+ if (PyErr_WarnEx(PyExc_DeprecationWarning, msg, 1)) {
+ return NULL;
+ }
PyObject *bufobj;
uint8_t *buf;
diff --git a/src/_tkagg.cpp b/src/_tkagg.cpp
index bbca8e8d066c..663c06fd0474 100644
--- a/src/_tkagg.cpp
+++ b/src/_tkagg.cpp
@@ -231,13 +231,13 @@ bool load_tcl_tk(T lib)
{
// Try to fill Tcl/Tk global vars with function pointers. Return whether
// all of them have been filled.
- if (void* ptr = dlsym(lib, "Tcl_SetVar")) {
+ if (auto ptr = dlsym(lib, "Tcl_SetVar")) {
TCL_SETVAR = (Tcl_SetVar_t)ptr;
}
- if (void* ptr = dlsym(lib, "Tk_FindPhoto")) {
+ if (auto ptr = dlsym(lib, "Tk_FindPhoto")) {
TK_FIND_PHOTO = (Tk_FindPhoto_t)ptr;
}
- if (void* ptr = dlsym(lib, "Tk_PhotoPutBlock")) {
+ if (auto ptr = dlsym(lib, "Tk_PhotoPutBlock")) {
TK_PHOTO_PUT_BLOCK = (Tk_PhotoPutBlock_t)ptr;
}
return TCL_SETVAR && TK_FIND_PHOTO && TK_PHOTO_PUT_BLOCK;
diff --git a/src/tri/_tri.cpp b/src/tri/_tri.cpp
index b7a87783de29..80a14201645b 100644
--- a/src/tri/_tri.cpp
+++ b/src/tri/_tri.cpp
@@ -12,6 +12,7 @@
#include
#include
+#include
TriEdge::TriEdge()
@@ -1465,8 +1466,8 @@ TrapezoidMapTriFinder::initialize()
_tree->assert_valid(false);
// Randomly shuffle all edges other than first 2.
- RandomNumberGenerator rng(1234);
- std::random_shuffle(_edges.begin()+2, _edges.end(), rng);
+ std::mt19937 rng(1234);
+ std::shuffle(_edges.begin()+2, _edges.end(), rng);
// Add edges, one at a time, to tree.
size_t nedges = _edges.size();
@@ -2056,16 +2057,3 @@ TrapezoidMapTriFinder::Trapezoid::set_upper_right(Trapezoid* upper_right_)
if (upper_right != 0)
upper_right->upper_left = this;
}
-
-
-
-RandomNumberGenerator::RandomNumberGenerator(unsigned long seed)
- : _m(21870), _a(1291), _c(4621), _seed(seed % _m)
-{}
-
-unsigned long
-RandomNumberGenerator::operator()(unsigned long max_value)
-{
- _seed = (_seed*_a + _c) % _m;
- return (_seed*max_value) / _m;
-}
diff --git a/src/tri/_tri.h b/src/tri/_tri.h
index 28c8e07933cc..29b4ff81fb17 100644
--- a/src/tri/_tri.h
+++ b/src/tri/_tri.h
@@ -791,28 +791,4 @@ class TrapezoidMapTriFinder
Node* _tree; // Root node of the trapezoid map search tree. Owned.
};
-
-
-/* Linear congruential random number generator. Edges in the triangulation are
- * randomly shuffled before being added to the trapezoid map. Want the
- * shuffling to be identical across different operating systems and the same
- * regardless of previous random number use. Would prefer to use a STL or
- * Boost random number generator, but support is not consistent across
- * different operating systems so implementing own here.
- *
- * This is not particularly random, but is perfectly adequate for the use here.
- * Coefficients taken from Numerical Recipes in C. */
-class RandomNumberGenerator
-{
-public:
- RandomNumberGenerator(unsigned long seed);
-
- // Return random integer in the range 0 to max_value-1.
- unsigned long operator()(unsigned long max_value);
-
-private:
- const unsigned long _m, _a, _c;
- unsigned long _seed;
-};
-
#endif
diff --git a/tools/subset.py b/tools/subset.py
index d65c69bfad2c..9fdf3789b0df 100644
--- a/tools/subset.py
+++ b/tools/subset.py
@@ -35,20 +35,21 @@
import fontforge
-def log_namelist(nam, unicode):
- if nam and isinstance(unicode, int):
- print(f"0x{unicode:04X}", fontforge.nameFromUnicode(unicode), file=nam)
+def log_namelist(name, unicode):
+ if name and isinstance(unicode, int):
+ print(f"0x{unicode:04X}", fontforge.nameeFromUnicode(unicode),
+ file=name)
-def select_with_refs(font, unicode, newfont, pe=None, nam=None):
+def select_with_refs(font, unicode, newfont, pe=None, name=None):
newfont.selection.select(('more', 'unicode'), unicode)
- log_namelist(nam, unicode)
+ log_namelist(name, unicode)
if pe:
print(f"SelectMore({unicode})", file=pe)
try:
for ref in font[unicode].references:
newfont.selection.select(('more',), ref[0])
- log_namelist(nam, ref[0])
+ log_namelist(name, ref[0])
if pe:
print(f'SelectMore("{ref[0]}")', file=pe)
except Exception:
@@ -60,11 +61,11 @@ def subset_font_raw(font_in, font_out, unicodes, opts):
# 2010-12-06 DC To allow setting namelist filenames,
# change getopt.gnu_getopt from namelist to namelist=
# and invert comments on following 2 lines
- # nam_fn = opts['--namelist']
- nam_fn = f'{font_out}.nam'
- nam = open(nam_fn, 'w')
+ # name_fn = opts['--namelist']
+ name_fn = f'{font_out}.name'
+ name = open(name_fn, 'w')
else:
- nam = None
+ name = None
if '--script' in opts:
pe_fn = "/tmp/script.pe"
pe = open(pe_fn, 'w')
@@ -75,7 +76,7 @@ def subset_font_raw(font_in, font_out, unicodes, opts):
print(f'Open("{font_in}")', file=pe)
extract_vert_to_script(font_in, pe)
for i in unicodes:
- select_with_refs(font, i, font, pe, nam)
+ select_with_refs(font, i, font, pe, name)
addl_glyphs = []
if '--nmr' in opts:
@@ -86,9 +87,9 @@ def subset_font_raw(font_in, font_out, unicodes, opts):
addl_glyphs.append('.notdef')
for glyph in addl_glyphs:
font.selection.select(('more',), glyph)
- if nam:
+ if name:
print(f"0x{fontforge.unicodeFromName(glyph):0.4X}", glyph,
- file=nam)
+ file=name)
if pe:
print(f'SelectMore("{glyph}")', file=pe)
@@ -112,7 +113,7 @@ def subset_font_raw(font_in, font_out, unicodes, opts):
new.em = font.em
new.layers['Fore'].is_quadratic = font.layers['Fore'].is_quadratic
for i in unicodes:
- select_with_refs(font, i, new, pe, nam)
+ select_with_refs(font, i, new, pe, name)
new.paste()
# This is a hack - it should have been taken care of above.
font.selection.select('space')
@@ -149,9 +150,9 @@ def subset_font_raw(font_in, font_out, unicodes, opts):
font.selection.select(glname)
font.cut()
- if nam:
+ if name:
print("Writing NameList", end="")
- nam.close()
+ name.close()
if pe:
print(f'Generate("{font_out}")', file=pe)
@@ -177,7 +178,7 @@ def subset_font(font_in, font_out, unicodes, opts):
if font_out != font_out_raw:
os.rename(font_out_raw, font_out)
# 2011-02-14 DC this needs to only happen with --namelist is used
-# os.rename(font_out_raw + '.nam', font_out + '.nam')
+# os.rename(font_out_raw + '.name', font_out + '.name')
def getsubset(subset, font_in):
diff --git a/tutorials/advanced/path_tutorial.py b/tutorials/advanced/path_tutorial.py
index 19632ce42964..70bb5998cecb 100644
--- a/tutorials/advanced/path_tutorial.py
+++ b/tutorials/advanced/path_tutorial.py
@@ -76,11 +76,11 @@
# ==============
#
# Some of the path components require multiple vertices to specify them:
-# for example CURVE 3 is a `bézier
+# for example CURVE 3 is a `Bézier
# `_ curve with one
# control point and one end point, and CURVE4 has three vertices for the
# two control points and the end point. The example below shows a
-# CURVE4 Bézier spline -- the bézier curve will be contained in the
+# CURVE4 Bézier spline -- the Bézier curve will be contained in the
# convex hull of the start point, the two control points, and the end
# point
@@ -139,8 +139,8 @@
# for each histogram bar: the rectangle width is the bin width and the
# rectangle height is the number of datapoints in that bin. First we'll
# create some random normally distributed data and compute the
-# histogram. Because numpy returns the bin edges and not centers, the
-# length of ``bins`` is 1 greater than the length of ``n`` in the
+# histogram. Because NumPy returns the bin edges and not centers, the
+# length of ``bins`` is one greater than the length of ``n`` in the
# example below::
#
# # histogram our data with numpy
@@ -159,10 +159,10 @@
#
# Now we have to construct our compound path, which will consist of a
# series of ``MOVETO``, ``LINETO`` and ``CLOSEPOLY`` for each rectangle.
-# For each rectangle, we need 5 vertices: 1 for the ``MOVETO``, 3 for
-# the ``LINETO``, and 1 for the ``CLOSEPOLY``. As indicated in the
-# table above, the vertex for the closepoly is ignored but we still need
-# it to keep the codes aligned with the vertices::
+# For each rectangle, we need five vertices: one for the ``MOVETO``,
+# three for the ``LINETO``, and one for the ``CLOSEPOLY``. As indicated
+# in the table above, the vertex for the closepoly is ignored but we still
+# need it to keep the codes aligned with the vertices::
#
# nverts = nrects*(1+3+1)
# verts = np.zeros((nverts, 2))
diff --git a/tutorials/colors/colormap-manipulation.py b/tutorials/colors/colormap-manipulation.py
index 297506004861..8b2cbc784bc0 100644
--- a/tutorials/colors/colormap-manipulation.py
+++ b/tutorials/colors/colormap-manipulation.py
@@ -255,6 +255,48 @@ def plot_linearmap(cdict):
plot_examples([cmap1, cmap2])
+#############################################################################
+# .. _reversing-colormap:
+#
+# Reversing a colormap
+# ====================
+#
+# `.Colormap.reversed` creates a new colormap that is a reversed version of
+# the original colormap.
+
+colors = ["#ffffcc", "#a1dab4", "#41b6c4", "#2c7fb8", "#253494"]
+my_cmap = ListedColormap(colors, name="my_cmap")
+
+my_cmap_r = my_cmap.reversed()
+
+plot_examples([my_cmap, my_cmap_r])
+# %%
+# If no name is passed in, ``.reversed`` also names the copy by
+# :ref:`appending '_r' ` to the original colormap's
+# name.
+
+##############################################################################
+# .. _registering-colormap:
+#
+# Registering a colormap
+# ======================
+#
+# Colormaps can be added to the `matplotlib.colormaps` list of named colormaps.
+# This allows the colormaps to be accessed by name in plotting functions:
+
+# my_cmap, my_cmap_r from reversing a colormap
+mpl.colormaps.register(cmap=my_cmap)
+mpl.colormaps.register(cmap=my_cmap_r)
+
+data = [[1, 2, 3, 4, 5]]
+
+fig, (ax1, ax2) = plt.subplots(nrows=2)
+
+ax1.imshow(data, cmap='my_cmap')
+ax2.imshow(data, cmap='my_cmap_r')
+
+plt.show()
+
#############################################################################
#
# .. admonition:: References
diff --git a/tutorials/intermediate/artists.py b/tutorials/intermediate/artists.py
index f76c62d8462c..5d0ed4ea38c4 100644
--- a/tutorials/intermediate/artists.py
+++ b/tutorials/intermediate/artists.py
@@ -29,8 +29,8 @@
the containers are places to put them (:class:`~matplotlib.axis.Axis`,
:class:`~matplotlib.axes.Axes` and :class:`~matplotlib.figure.Figure`). The
standard use is to create a :class:`~matplotlib.figure.Figure` instance, use
-the ``Figure`` to create one or more :class:`~matplotlib.axes.Axes` or
-:class:`~matplotlib.axes.Subplot` instances, and use the ``Axes`` instance
+the ``Figure`` to create one or more :class:`~matplotlib.axes.Axes`
+instances, and use the ``Axes`` instance
helper methods to create the primitives. In the example below, we create a
``Figure`` instance using :func:`matplotlib.pyplot.figure`, which is a
convenience method for instantiating ``Figure`` instances and connecting them
@@ -59,10 +59,7 @@ class in the Matplotlib API, and the one you will be working with most
:class:`~matplotlib.image.AxesImage`, respectively). These helper methods
will take your data (e.g., ``numpy`` arrays and strings) and create
primitive ``Artist`` instances as needed (e.g., ``Line2D``), add them to
-the relevant containers, and draw them when requested. Most of you
-are probably familiar with the :class:`~matplotlib.axes.Subplot`,
-which is just a special case of an ``Axes`` that lives on a regular
-rows by columns grid of ``Subplot`` instances. If you want to create
+the relevant containers, and draw them when requested. If you want to create
an ``Axes`` at an arbitrary location, simply use the
:meth:`~matplotlib.figure.Figure.add_axes` method which takes a list
of ``[left, bottom, width, height]`` values in 0-1 relative figure
@@ -79,8 +76,8 @@ class in the Matplotlib API, and the one you will be working with most
line, = ax.plot(t, s, color='blue', lw=2)
In this example, ``ax`` is the ``Axes`` instance created by the
-``fig.add_subplot`` call above (remember ``Subplot`` is just a subclass of
-``Axes``) and when you call ``ax.plot``, it creates a ``Line2D`` instance and
+``fig.add_subplot`` call above and when you call ``ax.plot``, it creates a
+``Line2D`` instance and
adds it to the ``Axes``. In the interactive `IPython `_
session below, you can see that the ``Axes.lines`` list is length one and
contains the same line that was returned by the ``line, = ax.plot...`` call:
@@ -115,6 +112,7 @@ class in the Matplotlib API, and the one you will be working with most
Try creating the figure below.
"""
+# sphinx_gallery_capture_repr = ('__repr__',)
import numpy as np
import matplotlib.pyplot as plt
@@ -122,8 +120,8 @@ class in the Matplotlib API, and the one you will be working with most
fig = plt.figure()
fig.subplots_adjust(top=0.8)
ax1 = fig.add_subplot(211)
-ax1.set_ylabel('volts')
-ax1.set_title('a sine wave')
+ax1.set_ylabel('Voltage [V]')
+ax1.set_title('A sine wave')
t = np.arange(0.0, 1.0, 0.01)
s = np.sin(2*np.pi*t)
@@ -135,7 +133,7 @@ class in the Matplotlib API, and the one you will be working with most
ax2 = fig.add_axes([0.15, 0.1, 0.7, 0.3])
n, bins, patches = ax2.hist(np.random.randn(1000), 50,
facecolor='yellow', edgecolor='yellow')
-ax2.set_xlabel('time (s)')
+ax2.set_xlabel('Time [s]')
plt.show()
@@ -298,10 +296,10 @@ class in the Matplotlib API, and the one you will be working with most
# In [158]: ax2 = fig.add_axes([0.1, 0.1, 0.7, 0.3])
#
# In [159]: ax1
-# Out[159]:
+# Out[159]:
#
# In [160]: print(fig.axes)
-# [, ]
+# [, ]
#
# Because the figure maintains the concept of the "current Axes" (see
# :meth:`Figure.gca ` and
@@ -348,7 +346,7 @@ class in the Matplotlib API, and the one you will be working with most
# ================ ============================================================
# Figure attribute Description
# ================ ============================================================
-# axes A list of `~.axes.Axes` instances (includes Subplot)
+# axes A list of `~.axes.Axes` instances
# patch The `.Rectangle` background
# images A list of `.FigureImage` patches -
# useful for raw pixel display
@@ -718,6 +716,6 @@ class in the Matplotlib API, and the one you will be working with most
# dollar signs and colors them green on the right side of the yaxis.
#
#
-# .. include:: ../../gallery/pyplots/dollar_ticks.rst
+# .. include:: ../../gallery/ticks/dollar_ticks.rst
# :start-after: y axis labels.
# :end-before: .. admonition:: References
diff --git a/tutorials/intermediate/autoscale.py b/tutorials/intermediate/autoscale.py
index 0f4dda87d183..3b563510aa1f 100644
--- a/tutorials/intermediate/autoscale.py
+++ b/tutorials/intermediate/autoscale.py
@@ -26,7 +26,7 @@
# -------
# The default margin around the data limits is 5%:
-ax.margins()
+print(ax.margins())
###############################################################################
# The margins can be made larger using `~matplotlib.axes.Axes.margins`:
diff --git a/tutorials/intermediate/constrainedlayout_guide.py b/tutorials/intermediate/constrainedlayout_guide.py
index 84cbf8c0447f..3734df1bd5d6 100644
--- a/tutorials/intermediate/constrainedlayout_guide.py
+++ b/tutorials/intermediate/constrainedlayout_guide.py
@@ -263,7 +263,7 @@ def example_plot(ax, fontsize=12, hide_labels=False):
##########################################
# If there are more than two columns, the *wspace* is shared between them,
-# so here the wspace is divided in 2, with a *wspace* of 0.1 between each
+# so here the wspace is divided in two, with a *wspace* of 0.1 between each
# column:
fig, axs = plt.subplots(2, 3, layout="constrained")
diff --git a/tutorials/introductory/images.py b/tutorials/introductory/images.py
index 3f816b5b6afd..7673037c4a9c 100644
--- a/tutorials/introductory/images.py
+++ b/tutorials/introductory/images.py
@@ -49,7 +49,8 @@
"""
import matplotlib.pyplot as plt
-import matplotlib.image as mpimg
+import numpy as np
+from PIL import Image
###############################################################################
# .. _importing_data:
@@ -72,23 +73,14 @@
# `_
# to your computer for the rest of this tutorial.
#
-# And here we go...
+# We use Pillow to open an image (with `PIL.Image.open`), and immediately
+# convert the `PIL.Image.Image` object into an 8-bit (``dtype=uint8``) numpy
+# array.
-img = mpimg.imread('../../doc/_static/stinkbug.png')
-print(img)
+img = np.asarray(Image.open('../../doc/_static/stinkbug.png'))
+print(repr(img))
###############################################################################
-# Note the dtype there - float32. Matplotlib has rescaled the 8 bit
-# data from each channel to floating point data between 0.0 and 1.0. As
-# a side note, the only datatype that Pillow can work with is uint8.
-# Matplotlib plotting can handle float32 and uint8, but image
-# reading/writing for any format other than PNG is limited to uint8
-# data. Why 8 bits? Most displays can only render 8 bits per channel
-# worth of color gradation. Why can they only render 8 bits/channel?
-# Because that's about all the human eye can see. More here (from a
-# photography standpoint): `Luminous Landscape bit depth tutorial
-# `_.
-#
# Each inner list represents a pixel. Here, with an RGB image, there
# are 3 values. Since it's a black and white image, R, G, and B are all
# similar. An RGBA (where A is alpha, or transparency) has 4 values
@@ -186,7 +178,7 @@
# interesting regions is the histogram. To create a histogram of our
# image data, we use the :func:`~matplotlib.pyplot.hist` function.
-plt.hist(lum_img.ravel(), bins=256, range=(0.0, 1.0), fc='k', ec='k')
+plt.hist(lum_img.ravel(), bins=range(256), fc='k', ec='k')
###############################################################################
# Most often, the "interesting" part of the image is around the peak,
@@ -194,29 +186,23 @@
# below the peak. In our histogram, it looks like there's not much
# useful information in the high end (not many white things in the
# image). Let's adjust the upper limit, so that we effectively "zoom in
-# on" part of the histogram. We do this by passing the clim argument to
-# imshow. You could also do this by calling the
-# :meth:`~matplotlib.cm.ScalarMappable.set_clim` method of the image plot
-# object, but make sure that you do so in the same cell as your plot
-# command when working with the Jupyter Notebook - it will not change
-# plots from earlier cells.
+# on" part of the histogram. We do this by setting *clim*, the colormap
+# limits.
#
-# You can specify the clim in the call to ``plot``.
+# This can be done by passing a *clim* keyword argument in the call to
+# ``imshow``.
-imgplot = plt.imshow(lum_img, clim=(0.0, 0.7))
+plt.imshow(lum_img, clim=(0, 175))
###############################################################################
-# You can also specify the clim using the returned object
-fig = plt.figure()
-ax = fig.add_subplot(1, 2, 1)
-imgplot = plt.imshow(lum_img)
-ax.set_title('Before')
-plt.colorbar(ticks=[0.1, 0.3, 0.5, 0.7], orientation='horizontal')
-ax = fig.add_subplot(1, 2, 2)
+# This can also be done by calling the
+# :meth:`~matplotlib.cm.ScalarMappable.set_clim` method of the returned image
+# plot object, but make sure that you do so in the same cell as your plot
+# command when working with the Jupyter Notebook - it will not change
+# plots from earlier cells.
+
imgplot = plt.imshow(lum_img)
-imgplot.set_clim(0.0, 0.7)
-ax.set_title('After')
-plt.colorbar(ticks=[0.1, 0.3, 0.5, 0.7], orientation='horizontal')
+imgplot.set_clim(0, 175)
###############################################################################
# .. _Interpolation:
@@ -240,19 +226,17 @@
# We'll use the Pillow library that we used to load the image also to resize
# the image.
-from PIL import Image
-
img = Image.open('../../doc/_static/stinkbug.png')
img.thumbnail((64, 64)) # resizes image in-place
imgplot = plt.imshow(img)
###############################################################################
-# Here we have the default interpolation, bilinear, since we did not
+# Here we use the default interpolation ("nearest"), since we did not
# give :func:`~matplotlib.pyplot.imshow` any interpolation argument.
#
-# Let's try some others. Here's "nearest", which does no interpolation.
+# Let's try some others. Here's "bilinear":
-imgplot = plt.imshow(img, interpolation="nearest")
+imgplot = plt.imshow(img, interpolation="bilinear")
###############################################################################
# and bicubic:
diff --git a/tutorials/introductory/lifecycle.py b/tutorials/introductory/lifecycle.py
index b0181b4df7b7..da976a39de3e 100644
--- a/tutorials/introductory/lifecycle.py
+++ b/tutorials/introductory/lifecycle.py
@@ -26,7 +26,7 @@
In the explicit object-oriented (OO) interface we directly utilize instances of
:class:`axes.Axes` to build up the visualization in an instance of
:class:`figure.Figure`. In the implicit interface, inspired by and modeled on
-MATLAB, uses an global state-based interface which is is encapsulated in the
+MATLAB, we use a global state-based interface which is encapsulated in the
:mod:`.pyplot` module to plot to the "current Axes". See the :doc:`pyplot
tutorials ` for a more in-depth look at the
pyplot interface.
@@ -34,16 +34,16 @@
Most of the terms are straightforward but the main thing to remember
is that:
-* The Figure is the final image that may contain 1 or more Axes.
-* The Axes represent an individual plot (don't confuse this with the word
- "axis", which refers to the x/y axis of a plot).
+* The `.Figure` is the final image, and may contain one or more `~.axes.Axes`.
+* The `~.axes.Axes` represents an individual plot (not to be confused with
+ `~.axis.Axis`, which refers to the x/y axis of a plot).
We call methods that do the plotting directly from the Axes, which gives
us much more flexibility and power in customizing our plot.
.. note::
- In general prefer the explicit interface over the implicit pyplot interface
+ In general, use the explicit interface over the implicit pyplot interface
for plotting.
Our data
diff --git a/tutorials/introductory/pyplot.py b/tutorials/introductory/pyplot.py
index ebe49df9d3b0..9b15a956efb8 100644
--- a/tutorials/introductory/pyplot.py
+++ b/tutorials/introductory/pyplot.py
@@ -295,8 +295,10 @@ def f(t):
# plt.figure(2) # a second figure
# plt.plot([4, 5, 6]) # creates a subplot() by default
#
-# plt.figure(1) # figure 1 current; subplot(212) still current
-# plt.subplot(211) # make subplot(211) in figure1 current
+# plt.figure(1) # first figure current;
+# # subplot(212) still current
+# plt.subplot(211) # make subplot(211) in the first figure
+# # current
# plt.title('Easy as 1, 2, 3') # subplot 211 title
#
# You can clear the current figure with `~.pyplot.clf`
diff --git a/tutorials/introductory/quick_start.py b/tutorials/introductory/quick_start.py
index 42edbc03b10f..882d1a23737a 100644
--- a/tutorials/introductory/quick_start.py
+++ b/tutorials/introductory/quick_start.py
@@ -28,7 +28,7 @@
# `.Axes.plot` to draw some data on the Axes:
fig, ax = plt.subplots() # Create a figure containing a single axes.
-ax.plot([1, 2, 3, 4], [1, 4, 2, 3]); # Plot some data on the axes.
+ax.plot([1, 2, 3, 4], [1, 4, 2, 3]) # Plot some data on the axes.
###############################################################################
# .. _figure_parts:
@@ -126,7 +126,7 @@
fig, ax = plt.subplots(figsize=(5, 2.7), layout='constrained')
ax.scatter('a', 'b', c='c', s='d', data=data)
ax.set_xlabel('entry a')
-ax.set_ylabel('entry b');
+ax.set_ylabel('entry b')
##############################################################################
# .. _coding_styles:
@@ -159,7 +159,7 @@
ax.set_xlabel('x label') # Add an x-label to the axes.
ax.set_ylabel('y label') # Add a y-label to the axes.
ax.set_title("Simple Plot") # Add a title to the axes.
-ax.legend(); # Add a legend.
+ax.legend() # Add a legend.
###############################################################################
# or the pyplot-style:
@@ -173,7 +173,7 @@
plt.xlabel('x label')
plt.ylabel('y label')
plt.title("Simple Plot")
-plt.legend();
+plt.legend()
###############################################################################
# (In addition, there is a third approach, for the case when embedding
@@ -213,7 +213,7 @@ def my_plotter(ax, data1, data2, param_dict):
data1, data2, data3, data4 = np.random.randn(4, 100) # make 4 random data sets
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(5, 2.7))
my_plotter(ax1, data1, data2, {'marker': 'x'})
-my_plotter(ax2, data3, data4, {'marker': 'o'});
+my_plotter(ax2, data3, data4, {'marker': 'o'})
###############################################################################
# Note that if you want to install these as a python package, or any other
@@ -235,7 +235,7 @@ def my_plotter(ax, data1, data2, param_dict):
x = np.arange(len(data1))
ax.plot(x, np.cumsum(data1), color='blue', linewidth=3, linestyle='--')
l, = ax.plot(x, np.cumsum(data2), color='orange', linewidth=2)
-l.set_linestyle(':');
+l.set_linestyle(':')
###############################################################################
# Colors
@@ -248,7 +248,7 @@ def my_plotter(ax, data1, data2, param_dict):
# from the interior:
fig, ax = plt.subplots(figsize=(5, 2.7))
-ax.scatter(data1, data2, s=50, facecolor='C0', edgecolor='k');
+ax.scatter(data1, data2, s=50, facecolor='C0', edgecolor='k')
###############################################################################
# Linewidths, linestyles, and markersizes
@@ -272,7 +272,7 @@ def my_plotter(ax, data1, data2, param_dict):
ax.plot(data2, 'd', label='data2')
ax.plot(data3, 'v', label='data3')
ax.plot(data4, 's', label='data4')
-ax.legend();
+ax.legend()
###############################################################################
#
@@ -298,7 +298,7 @@ def my_plotter(ax, data1, data2, param_dict):
ax.set_title('Aardvark lengths\n (not really)')
ax.text(75, .025, r'$\mu=115,\ \sigma=15$')
ax.axis([55, 175, 0, 0.03])
-ax.grid(True);
+ax.grid(True)
###############################################################################
# All of the `~.Axes.text` functions return a `matplotlib.text.Text`
@@ -342,7 +342,7 @@ def my_plotter(ax, data1, data2, param_dict):
ax.annotate('local max', xy=(2, 1), xytext=(3, 1.5),
arrowprops=dict(facecolor='black', shrink=0.05))
-ax.set_ylim(-2, 2);
+ax.set_ylim(-2, 2)
###############################################################################
# In this basic example, both *xy* and *xytext* are in data coordinates.
@@ -360,7 +360,7 @@ def my_plotter(ax, data1, data2, param_dict):
ax.plot(np.arange(len(data1)), data1, label='data1')
ax.plot(np.arange(len(data2)), data2, label='data2')
ax.plot(np.arange(len(data3)), data3, 'd', label='data3')
-ax.legend();
+ax.legend()
##############################################################################
# Legends in Matplotlib are quite flexible in layout, placement, and what
@@ -391,7 +391,7 @@ def my_plotter(ax, data1, data2, param_dict):
axs[0].plot(xdata, data)
axs[1].set_yscale('log')
-axs[1].plot(xdata, data);
+axs[1].plot(xdata, data)
##############################################################################
# The scale sets the mapping from data values to spacing along the Axis. This
@@ -413,7 +413,7 @@ def my_plotter(ax, data1, data2, param_dict):
axs[1].plot(xdata, data1)
axs[1].set_xticks(np.arange(0, 100, 30), ['zero', '30', 'sixty', '90'])
axs[1].set_yticks([-1.5, 0, 1.5]) # note that we don't need to specify labels
-axs[1].set_title('Manual ticks');
+axs[1].set_title('Manual ticks')
##############################################################################
# Different scales can have different locators and formatters; for instance
@@ -435,7 +435,7 @@ def my_plotter(ax, data1, data2, param_dict):
data = np.cumsum(np.random.randn(len(dates)))
ax.plot(dates, data)
cdf = mpl.dates.ConciseDateFormatter(ax.xaxis.get_major_locator())
-ax.xaxis.set_major_formatter(cdf);
+ax.xaxis.set_major_formatter(cdf)
##############################################################################
# For more information see the date examples
@@ -447,7 +447,7 @@ def my_plotter(ax, data1, data2, param_dict):
fig, ax = plt.subplots(figsize=(5, 2.7), layout='constrained')
categories = ['turnips', 'rutabaga', 'cucumber', 'pumpkins']
-ax.bar(categories, np.random.rand(len(categories)));
+ax.bar(categories, np.random.rand(len(categories)))
##############################################################################
# One caveat about categorical plotting is that some methods of parsing
@@ -561,7 +561,7 @@ def my_plotter(ax, data1, data2, param_dict):
['lowleft', 'right']], layout='constrained')
axd['upleft'].set_title('upleft')
axd['lowleft'].set_title('lowleft')
-axd['right'].set_title('right');
+axd['right'].set_title('right')
###############################################################################
# Matplotlib has quite sophisticated tools for arranging Axes: See
diff --git a/tutorials/text/annotations.py b/tutorials/text/annotations.py
index df2dda7ad580..6dcdc5e871b0 100644
--- a/tutorials/text/annotations.py
+++ b/tutorials/text/annotations.py
@@ -7,9 +7,6 @@
.. contents:: Table of Contents
:depth: 3
"""
-
-from matplotlib import pyplot as plt
-
###############################################################################
# .. _annotations-tutorial:
#
@@ -23,12 +20,22 @@
# to make annotations easy. In an annotation, there are two points to
# consider: the location being annotated represented by the argument
# *xy* and the location of the text *xytext*. Both of these
-# arguments are ``(x, y)`` tuples.
-#
-# .. figure:: ../../gallery/pyplots/images/sphx_glr_annotation_basic_001.png
-# :target: ../../gallery/pyplots/annotation_basic.html
-# :align: center
-#
+# arguments are ``(x, y)`` tuples:
+
+import numpy as np
+import matplotlib.pyplot as plt
+
+fig, ax = plt.subplots(figsize=(3, 3))
+
+t = np.arange(0.0, 5.0, 0.01)
+s = np.cos(2*np.pi*t)
+line, = ax.plot(t, s, lw=2)
+
+ax.annotate('local max', xy=(2, 1), xytext=(3, 1.5),
+ arrowprops=dict(facecolor='black', shrink=0.05))
+ax.set_ylim(-2, 2)
+
+###############################################################################
# In this example, both the *xy* (arrow tip) and *xytext* locations
# (text location) are in data coordinates. There are a variety of other
# coordinate systems one can choose -- you can specify the coordinate
@@ -47,22 +54,50 @@
# 'data' use the axes data coordinate system
# ================== ========================================================
#
-# For example to place the text coordinates in fractional axes
-# coordinates, one could do::
+# The following strings are also valid arguments for *textcoords*
#
-# ax.annotate('local max', xy=(3, 1), xycoords='data',
-# xytext=(0.8, 0.95), textcoords='axes fraction',
-# arrowprops=dict(facecolor='black', shrink=0.05),
-# horizontalalignment='right', verticalalignment='top',
-# )
+# ================== ========================================================
+# argument coordinate system
+# ================== ========================================================
+# 'offset points' offset (in points) from the xy value
+# 'offset pixels' offset (in pixels) from the xy value
+# ================== ========================================================
#
# For physical coordinate systems (points or pixels) the origin is the
-# bottom-left of the figure or axes.
+# bottom-left of the figure or axes. Points are
+# `typographic points `_
+# meaning that they are a physical unit measuring 1/72 of an inch. Points and
+# pixels are discussed in further detail in :ref:`transforms-fig-scale-dpi`.
#
-# Optionally, you can enable drawing of an arrow from the text to the annotated
-# point by giving a dictionary of arrow properties in the optional keyword
-# argument *arrowprops*.
+# .. _annotation-data:
+#
+# Annotating data
+# ~~~~~~~~~~~~~~~
+#
+# This example places the text coordinates in fractional axes coordinates:
+
+fig, ax = plt.subplots(figsize=(3, 3))
+
+t = np.arange(0.0, 5.0, 0.01)
+s = np.cos(2*np.pi*t)
+line, = ax.plot(t, s, lw=2)
+
+ax.annotate('local max', xy=(2, 1), xycoords='data',
+ xytext=(0.01, .99), textcoords='axes fraction',
+ va='top', ha='left',
+ arrowprops=dict(facecolor='black', shrink=0.05))
+ax.set_ylim(-2, 2)
+
+###################################################################
#
+# .. _annotation-with-arrow:
+#
+# Annotating with arrows
+# ~~~~~~~~~~~~~~~~~~~~~~
+#
+# You can enable drawing of an arrow from the text to the annotated point
+# by giving a dictionary of arrow properties in the optional keyword
+# argument *arrowprops*.
#
# ==================== =====================================================
# *arrowprops* key description
@@ -77,59 +112,76 @@
# e.g., ``facecolor``
# ==================== =====================================================
#
-#
-# In the example below, the *xy* point is in native coordinates
-# (*xycoords* defaults to 'data'). For a polar axes, this is in
-# (theta, radius) space. The text in this example is placed in the
+# In the example below, the *xy* point is in the data coordinate system
+# since *xycoords* defaults to 'data'. For a polar axes, this is in
+# (theta, radius) space. The text in this example is placed in the
# fractional figure coordinate system. :class:`matplotlib.text.Text`
# keyword arguments like *horizontalalignment*, *verticalalignment* and
# *fontsize* are passed from `~matplotlib.axes.Axes.annotate` to the
# ``Text`` instance.
+
+fig = plt.figure()
+ax = fig.add_subplot(projection='polar')
+r = np.arange(0, 1, 0.001)
+theta = 2 * 2*np.pi * r
+line, = ax.plot(theta, r, color='#ee8d18', lw=3)
+
+ind = 800
+thisr, thistheta = r[ind], theta[ind]
+ax.plot([thistheta], [thisr], 'o')
+ax.annotate('a polar annotation',
+ xy=(thistheta, thisr), # theta, radius
+ xytext=(0.05, 0.05), # fraction, fraction
+ textcoords='figure fraction',
+ arrowprops=dict(facecolor='black', shrink=0.05),
+ horizontalalignment='left',
+ verticalalignment='bottom')
+
+###############################################################################
+# For more on plotting with arrows, see :ref:`annotation_with_custom_arrow`
#
-# .. figure:: ../../gallery/pyplots/images/sphx_glr_annotation_polar_001.png
-# :target: ../../gallery/pyplots/annotation_polar.html
-# :align: center
-#
-# For more on all the wild and wonderful things you can do with
-# annotations, including fancy arrows, see :ref:`plotting-guide-annotation`
-# and :doc:`/gallery/text_labels_and_annotations/annotation_demo`.
-#
-#
-# Do not proceed unless you have already read :ref:`annotations-tutorial`,
-# :func:`~matplotlib.pyplot.text` and :func:`~matplotlib.pyplot.annotate`!
+# .. _annotations-offset-text:
#
+# Placing text annotations relative to data
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# Annotations can be positioned at a relative offset to the *xy* input to
+# annotation by setting the *textcoords* keyword argument to ``'offset points'``
+# or ``'offset pixels'``.
+
+fig, ax = plt.subplots(figsize=(3, 3))
+x = [1, 3, 5, 7, 9]
+y = [2, 4, 6, 8, 10]
+annotations = ["A", "B", "C", "D", "E"]
+ax.scatter(x, y, s=20)
+
+for xi, yi, text in zip(x, y, annotations):
+ ax.annotate(text,
+ xy=(xi, yi), xycoords='data',
+ xytext=(1.5, 1.5), textcoords='offset points')
+
+###############################################################################
+# The annotations are offset 1.5 points (1.5*1/72 inches) from the *xy* values.
#
# .. _plotting-guide-annotation:
#
-# Advanced Annotations
-# --------------------
+# Advanced annotation
+# -------------------
#
-# Annotating with Text with Box
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# We recommend reading :ref:`annotations-tutorial`, :func:`~matplotlib.pyplot.text`
+# and :func:`~matplotlib.pyplot.annotate` before reading this section.
#
-# Let's start with a simple example.
-#
-# .. figure:: ../../gallery/userdemo/images/sphx_glr_annotate_text_arrow_001.png
-# :target: ../../gallery/userdemo/annotate_text_arrow.html
-# :align: center
+# Annotating with boxed text
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# `~.Axes.text` takes a *bbox* keyword argument, which draws a box around the
-# text::
-#
-# t = ax.text(
-# 0, 0, "Direction", ha="center", va="center", rotation=45, size=15,
-# bbox=dict(boxstyle="rarrow,pad=0.3", fc="cyan", ec="b", lw=2))
-#
-# The patch object associated with the text can be accessed by::
-#
-# bb = t.get_bbox_patch()
-#
-# The return value is a `.FancyBboxPatch`; patch properties
-# (facecolor, edgewidth, etc.) can be accessed and modified as usual.
-# `.FancyBboxPatch.set_boxstyle` sets the box shape::
-#
-# bb.set_boxstyle("rarrow", pad=0.6)
-#
+# text:
+
+fig, ax = plt.subplots(figsize=(5, 5))
+t = ax.text(0, 0, "Direction",
+ ha="center", va="center", rotation=45, size=15,
+ bbox=dict(boxstyle="rarrow,pad=0.3", fc="cyan", ec="b", lw=2))
+
+###############################################################################
# The arguments are the name of the box style with its attributes as
# keyword arguments. Currently, following box styles are implemented.
#
@@ -151,43 +203,82 @@
# :target: ../../gallery/shapes_and_collections/fancybox_demo.html
# :align: center
#
-# Note that the attribute arguments can be specified within the style
-# name with separating comma (this form can be used as "boxstyle" value
-# of bbox argument when initializing the text instance) ::
+# The patch object (box) associated with the text can be accessed using::
#
-# bb.set_boxstyle("rarrow,pad=0.6")
+# bb = t.get_bbox_patch()
#
-# Annotating with Arrow
-# ~~~~~~~~~~~~~~~~~~~~~
+# The return value is a `.FancyBboxPatch`; patch properties
+# (facecolor, edgewidth, etc.) can be accessed and modified as usual.
+# `.FancyBboxPatch.set_boxstyle` sets the box shape::
#
-# `~.Axes.annotate` draws an arrow connecting two points in an Axes::
+# bb.set_boxstyle("rarrow", pad=0.6)
#
-# ax.annotate("Annotation",
-# xy=(x1, y1), xycoords='data',
-# xytext=(x2, y2), textcoords='offset points',
-# )
+# The attribute arguments can also be specified within the style
+# name with separating comma::
#
-# This annotates a point at *xy* in the given coordinate (*xycoords*)
-# with the text at *xytext* given in *textcoords*. Often, the
-# annotated point is specified in the *data* coordinate and the annotating
-# text in *offset points*.
-# See `~.Axes.annotate` for available coordinate systems.
+# bb.set_boxstyle("rarrow, pad=0.6")
#
-# An arrow connecting *xy* to *xytext* can be optionally drawn by
-# specifying the *arrowprops* argument. To draw only an arrow, use
-# empty string as the first argument. ::
#
-# ax.annotate("",
-# xy=(0.2, 0.2), xycoords='data',
-# xytext=(0.8, 0.8), textcoords='data',
-# arrowprops=dict(arrowstyle="->",
-# connectionstyle="arc3"),
-# )
+# Defining custom box styles
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
-# .. figure:: ../../gallery/userdemo/images/sphx_glr_annotate_simple01_001.png
-# :target: ../../gallery/userdemo/annotate_simple01.html
-# :align: center
+# You can use a custom box style. The value for the ``boxstyle`` can be a
+# callable object in the following forms:
+
+from matplotlib.path import Path
+
+
+def custom_box_style(x0, y0, width, height, mutation_size):
+ """
+ Given the location and size of the box, return the path of the box around
+ it. Rotation is automatically taken care of.
+
+ Parameters
+ ----------
+ x0, y0, width, height : float
+ Box location and size.
+ mutation_size : float
+ Mutation reference scale, typically the text font size.
+ """
+ # padding
+ mypad = 0.3
+ pad = mutation_size * mypad
+ # width and height with padding added.
+ width = width + 2 * pad
+ height = height + 2 * pad
+ # boundary of the padded box
+ x0, y0 = x0 - pad, y0 - pad
+ x1, y1 = x0 + width, y0 + height
+ # return the new path
+ return Path([(x0, y0), (x1, y0), (x1, y1), (x0, y1),
+ (x0-pad, (y0+y1)/2), (x0, y0), (x0, y0)],
+ closed=True)
+
+fig, ax = plt.subplots(figsize=(3, 3))
+ax.text(0.5, 0.5, "Test", size=30, va="center", ha="center", rotation=30,
+ bbox=dict(boxstyle=custom_box_style, alpha=0.2))
+
+###############################################################################
+# See also :doc:`/gallery/userdemo/custom_boxstyle01`. Similarly, you can define a
+# custom `.ConnectionStyle` and a custom `.ArrowStyle`. View the source code at
+# `.patches` to learn how each class is defined.
#
+# .. _annotation_with_custom_arrow:
+#
+# Customizing annotation arrows
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# An arrow connecting *xy* to *xytext* can be optionally drawn by
+# specifying the *arrowprops* argument. To draw only an arrow, use
+# empty string as the first argument:
+
+fig, ax = plt.subplots(figsize=(3, 3))
+ax.annotate("",
+ xy=(0.2, 0.2), xycoords='data',
+ xytext=(0.8, 0.8), textcoords='data',
+ arrowprops=dict(arrowstyle="->", connectionstyle="arc3"))
+
+###############################################################################
# The arrow is drawn as follows:
#
# 1. A path connecting the two points is created, as specified by the
@@ -222,7 +313,7 @@
#
# The behavior of each connection style is (limitedly) demonstrated in the
# example below. (Warning: The behavior of the ``bar`` style is currently not
-# well defined, it may be changed in the future).
+# well defined and may be changed in the future).
#
# .. figure:: ../../gallery/userdemo/images/sphx_glr_connectionstyle_demo_001.png
# :target: ../../gallery/userdemo/connectionstyle_demo.html
@@ -257,29 +348,62 @@
# For these arrow styles, you must use the "angle3" or "arc3" connection
# style.
#
-# If the annotation string is given, the patchA is set to the bbox patch
+# If the annotation string is given, the patch is set to the bbox patch
# of the text by default.
-#
-# .. figure:: ../../gallery/userdemo/images/sphx_glr_annotate_simple02_001.png
-# :target: ../../gallery/userdemo/annotate_simple02.html
-# :align: center
-#
+
+fig, ax = plt.subplots(figsize=(3, 3))
+
+ax.annotate("Test",
+ xy=(0.2, 0.2), xycoords='data',
+ xytext=(0.8, 0.8), textcoords='data',
+ size=20, va="center", ha="center",
+ arrowprops=dict(arrowstyle="simple",
+ connectionstyle="arc3,rad=-0.2"))
+
+##############################################################################
# As with `~.Axes.text`, a box around the text can be drawn using the *bbox*
# argument.
-#
-# .. figure:: ../../gallery/userdemo/images/sphx_glr_annotate_simple03_001.png
-# :target: ../../gallery/userdemo/annotate_simple03.html
-# :align: center
-#
+
+fig, ax = plt.subplots(figsize=(3, 3))
+
+ann = ax.annotate("Test",
+ xy=(0.2, 0.2), xycoords='data',
+ xytext=(0.8, 0.8), textcoords='data',
+ size=20, va="center", ha="center",
+ bbox=dict(boxstyle="round4", fc="w"),
+ arrowprops=dict(arrowstyle="-|>",
+ connectionstyle="arc3,rad=-0.2",
+ fc="w"))
+
+##############################################################################
# By default, the starting point is set to the center of the text
# extent. This can be adjusted with ``relpos`` key value. The values
# are normalized to the extent of the text. For example, (0, 0) means
# lower-left corner and (1, 1) means top-right.
-#
-# .. figure:: ../../gallery/userdemo/images/sphx_glr_annotate_simple04_001.png
-# :target: ../../gallery/userdemo/annotate_simple04.html
-# :align: center
-#
+
+fig, ax = plt.subplots(figsize=(3, 3))
+
+ann = ax.annotate("Test",
+ xy=(0.2, 0.2), xycoords='data',
+ xytext=(0.8, 0.8), textcoords='data',
+ size=20, va="center", ha="center",
+ bbox=dict(boxstyle="round4", fc="w"),
+ arrowprops=dict(arrowstyle="-|>",
+ connectionstyle="arc3,rad=0.2",
+ relpos=(0., 0.),
+ fc="w"))
+
+ann = ax.annotate("Test",
+ xy=(0.2, 0.2), xycoords='data',
+ xytext=(0.8, 0.8), textcoords='data',
+ size=20, va="center", ha="center",
+ bbox=dict(boxstyle="round4", fc="w"),
+ arrowprops=dict(arrowstyle="-|>",
+ connectionstyle="arc3,rad=-0.2",
+ relpos=(1., 0.),
+ fc="w"))
+
+##############################################################################
# Placing Artist at anchored Axes locations
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
@@ -291,9 +415,9 @@
from matplotlib.offsetbox import AnchoredText
-fig, ax = plt.subplots()
-at = AnchoredText(
- "Figure 1a", prop=dict(size=15), frameon=True, loc='upper left')
+fig, ax = plt.subplots(figsize=(3, 3))
+at = AnchoredText("Figure 1a",
+ prop=dict(size=15), frameon=True, loc='upper left')
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at)
@@ -318,7 +442,7 @@
from matplotlib.patches import Circle
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredDrawingArea
-fig, ax = plt.subplots()
+fig, ax = plt.subplots(figsize=(3, 3))
ada = AnchoredDrawingArea(40, 20, 0, 0,
loc='upper right', pad=0., frameon=False)
p1 = Circle((10, 10), 10)
@@ -343,166 +467,202 @@
from matplotlib.patches import Ellipse
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredAuxTransformBox
-fig, ax = plt.subplots()
+fig, ax = plt.subplots(figsize=(3, 3))
box = AnchoredAuxTransformBox(ax.transData, loc='upper left')
el = Ellipse((0, 0), width=0.1, height=0.4, angle=30) # in data coordinates!
box.drawing_area.add_artist(el)
ax.add_artist(box)
###############################################################################
-# As in the legend, the bbox_to_anchor argument can be set. Using the
-# HPacker and VPacker, you can have an arrangement(?) of artist as in the
-# legend (as a matter of fact, this is how the legend is created).
-#
-# .. figure:: ../../gallery/userdemo/images/sphx_glr_anchored_box04_001.png
-# :target: ../../gallery/userdemo/anchored_box04.html
-# :align: center
-#
-# Note that unlike the legend, the ``bbox_transform`` is set
-# to IdentityTransform by default.
-#
-# Coordinate systems for Annotations
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-#
-# Matplotlib Annotations support several types of coordinates. Some are
-# described in :ref:`annotations-tutorial`; more advanced options are
-#
-# 1. A `.Transform` instance. For example, ::
-#
-# ax.annotate("Test", xy=(0.5, 0.5), xycoords=ax.transAxes)
-#
-# is identical to ::
+# Another method of anchoring an artist relative to a parent axes or anchor
+# point is via the *bbox_to_anchor* argument of `.AnchoredOffsetbox`. This
+# artist can then be automatically positioned relative to another artist using
+# `.HPacker` and `.VPacker`:
+
+from matplotlib.offsetbox import (AnchoredOffsetbox, DrawingArea, HPacker,
+ TextArea)
+
+fig, ax = plt.subplots(figsize=(3, 3))
+
+box1 = TextArea(" Test: ", textprops=dict(color="k"))
+box2 = DrawingArea(60, 20, 0, 0)
+
+el1 = Ellipse((10, 10), width=16, height=5, angle=30, fc="r")
+el2 = Ellipse((30, 10), width=16, height=5, angle=170, fc="g")
+el3 = Ellipse((50, 10), width=16, height=5, angle=230, fc="b")
+box2.add_artist(el1)
+box2.add_artist(el2)
+box2.add_artist(el3)
+
+box = HPacker(children=[box1, box2],
+ align="center",
+ pad=0, sep=5)
+
+anchored_box = AnchoredOffsetbox(loc='lower left',
+ child=box, pad=0.,
+ frameon=True,
+ bbox_to_anchor=(0., 1.02),
+ bbox_transform=ax.transAxes,
+ borderpad=0.,)
+
+ax.add_artist(anchored_box)
+fig.subplots_adjust(top=0.8)
+
+###############################################################################
+# Note that, unlike in `.Legend`, the ``bbox_transform`` is set to
+# `.IdentityTransform` by default
#
-# ax.annotate("Test", xy=(0.5, 0.5), xycoords="axes fraction")
+# .. _annotating_coordinate_systems:
#
-# This allows annotating a point in another axes::
+# Coordinate systems for annotations
+# ----------------------------------
#
-# fig, (ax1, ax2) = plt.subplots(1, 2)
-# ax2.annotate("Test", xy=(0.5, 0.5), xycoords=ax1.transData,
-# xytext=(0.5, 0.5), textcoords=ax2.transData,
-# arrowprops=dict(arrowstyle="->"))
+# Matplotlib Annotations support several types of coordinate systems. The
+# examples in :ref:`annotations-tutorial` used the ``data`` coordinate system;
+# Some others more advanced options are:
#
+# 1. A `.Transform` instance. For more information on transforms, see the
+# :doc:`../advanced/transforms_tutorial` For example, the
+# ``Axes.transAxes`` transform positions the annotation relative to the Axes
+# coordinates and using it is therefore identical to setting the
+# coordinate system to "axes fraction":
+
+fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(6, 3))
+ax1.annotate("Test", xy=(0.5, 0.5), xycoords=ax1.transAxes)
+ax2.annotate("Test", xy=(0.5, 0.5), xycoords="axes fraction")
+
+###############################################################################
+# Another commonly used `.Transform` instance is ``Axes.transData``. This
+# transform is the coordinate system of the data plotted in the axes. In this
+# example, it is used to draw an arrow from a point in *ax1* to text in *ax2*,
+# where the point and text are positioned relative to the coordinates of *ax1*
+# and *ax2* respectively:
+
+fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(6, 3))
+
+ax1.annotate("Test1", xy=(0.5, 0.5), xycoords="axes fraction")
+ax2.annotate("Test2",
+ xy=(0.5, 0.5), xycoords=ax1.transData,
+ xytext=(0.5, 0.5), textcoords=ax2.transData,
+ arrowprops=dict(arrowstyle="->"))
+
+#############################################################################
# 2. An `.Artist` instance. The *xy* value (or *xytext*) is interpreted as a
-# fractional coordinate of the bbox (return value of *get_window_extent*) of
-# the artist::
-#
-# an1 = ax.annotate("Test 1", xy=(0.5, 0.5), xycoords="data",
-# va="center", ha="center",
-# bbox=dict(boxstyle="round", fc="w"))
-# an2 = ax.annotate("Test 2", xy=(1, 0.5), xycoords=an1, # (1, 0.5) of the an1's bbox
-# xytext=(30, 0), textcoords="offset points",
-# va="center", ha="left",
-# bbox=dict(boxstyle="round", fc="w"),
-# arrowprops=dict(arrowstyle="->"))
-#
-# .. figure:: ../../gallery/userdemo/images/sphx_glr_annotate_simple_coord01_001.png
-# :target: ../../gallery/userdemo/annotate_simple_coord01.html
-# :align: center
-#
-# Note that you must ensure that the extent of the coordinate artist (*an1* in
-# above example) is determined before *an2* gets drawn. Usually, this means
-# that *an2* needs to be drawn after *an1*.
+# fractional coordinate of the bounding box (bbox) of the artist:
+
+fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(3, 3))
+an1 = ax.annotate("Test 1",
+ xy=(0.5, 0.5), xycoords="data",
+ va="center", ha="center",
+ bbox=dict(boxstyle="round", fc="w"))
+
+an2 = ax.annotate("Test 2",
+ xy=(1, 0.5), xycoords=an1, # (1, 0.5) of an1's bbox
+ xytext=(30, 0), textcoords="offset points",
+ va="center", ha="left",
+ bbox=dict(boxstyle="round", fc="w"),
+ arrowprops=dict(arrowstyle="->"))
+
+###############################################################################
+# Note that you must ensure that the extent of the coordinate artist (*an1* in
+# this example) is determined before *an2* gets drawn. Usually, this means
+# that *an2* needs to be drawn after *an1*. The base class for all bounding
+# boxes is `.BboxBase`
#
# 3. A callable object that takes the renderer instance as single argument, and
-# returns either a `.Transform` or a `.BboxBase`. The return value is then
-# handled as in (1), for transforms, or in (2), for bboxes. For example, ::
-#
-# an2 = ax.annotate("Test 2", xy=(1, 0.5), xycoords=an1,
-# xytext=(30, 0), textcoords="offset points")
-#
-# is identical to::
-#
-# an2 = ax.annotate("Test 2", xy=(1, 0.5), xycoords=an1.get_window_extent,
-# xytext=(30, 0), textcoords="offset points")
-#
-# 4. A pair of coordinate specifications -- the first for the x-coordinate, and
-# the second is for the y-coordinate; e.g. ::
-#
-# annotate("Test", xy=(0.5, 1), xycoords=("data", "axes fraction"))
-#
-# Here, 0.5 is in data coordinates, and 1 is in normalized axes coordinates.
-# Each of the coordinate specifications can also be an artist or a transform.
-# For example,
-#
-# .. figure:: ../../gallery/userdemo/images/sphx_glr_annotate_simple_coord02_001.png
-# :target: ../../gallery/userdemo/annotate_simple_coord02.html
-# :align: center
-#
+# returns either a `.Transform` or a `.BboxBase`. For example, the return
+# value of `.Artist.get_window_extent` is a bbox, so this method is identical to
+# to (2) passing in the artist:
+
+fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(3, 3))
+an1 = ax.annotate("Test 1",
+ xy=(0.5, 0.5), xycoords="data",
+ va="center", ha="center",
+ bbox=dict(boxstyle="round", fc="w"))
+
+an2 = ax.annotate("Test 2",
+ xy=(1, 0.5), xycoords=an1.get_window_extent,
+ xytext=(30, 0), textcoords="offset points",
+ va="center", ha="left",
+ bbox=dict(boxstyle="round", fc="w"),
+ arrowprops=dict(arrowstyle="->"))
+
+###############################################################################
+# `.Artist.get_window_extent` is the bounding box of the Axes object and is
+# therefore identical to setting the coordinate system to axes fraction:
+
+fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(6, 3))
+
+an1 = ax1.annotate("Test1", xy=(0.5, 0.5), xycoords="axes fraction")
+an2 = ax2.annotate("Test 2", xy=(0.5, 0.5), xycoords=ax2.get_window_extent)
+
+###############################################################################
+# 4. A blended pair of coordinate specifications -- the first for the
+# x-coordinate, and the second is for the y-coordinate. For example, x=0.5 is
+# in data coordinates, and y=1 is in normalized axes coordinates:
+
+fig, ax = plt.subplots(figsize=(3, 3))
+ax.annotate("Test", xy=(0.5, 1), xycoords=("data", "axes fraction"))
+ax.axvline(x=.5, color='lightgray')
+ax.set(xlim=(0, 2), ylim=(1, 2))
+
+###############################################################################
# 5. Sometimes, you want your annotation with some "offset points", not from the
-# annotated point but from some other point. `.text.OffsetFrom` is a helper
-# for such cases.
-#
-# .. figure:: ../../gallery/userdemo/images/sphx_glr_annotate_simple_coord03_001.png
-# :target: ../../gallery/userdemo/annotate_simple_coord03.html
-# :align: center
-#
-# You may take a look at this example
-# :doc:`/gallery/text_labels_and_annotations/annotation_demo`.
-#
+# annotated point but from some other point or artist. `.text.OffsetFrom` is
+# a helper for such cases.
+
+from matplotlib.text import OffsetFrom
+
+fig, ax = plt.subplots(figsize=(3, 3))
+an1 = ax.annotate("Test 1", xy=(0.5, 0.5), xycoords="data",
+ va="center", ha="center",
+ bbox=dict(boxstyle="round", fc="w"))
+
+offset_from = OffsetFrom(an1, (0.5, 0))
+an2 = ax.annotate("Test 2", xy=(0.1, 0.1), xycoords="data",
+ xytext=(0, -10), textcoords=offset_from,
+ # xytext is offset points from "xy=(0.5, 0), xycoords=an1"
+ va="top", ha="center",
+ bbox=dict(boxstyle="round", fc="w"),
+ arrowprops=dict(arrowstyle="->"))
+
+###############################################################################
# Using ConnectionPatch
# ~~~~~~~~~~~~~~~~~~~~~
#
# `.ConnectionPatch` is like an annotation without text. While `~.Axes.annotate`
-# is sufficient in most situations, `.ConnectionPatch` is useful when you want to
-# connect points in different axes. ::
-#
-# from matplotlib.patches import ConnectionPatch
-# xy = (0.2, 0.2)
-# con = ConnectionPatch(xyA=xy, coordsA=ax1.transData,
-# xyB=xy, coordsB=ax2.transData)
-# fig.add_artist(con)
-#
-# The above code connects point *xy* in the data coordinates of ``ax1`` to
-# point *xy* in the data coordinates of ``ax2``. Here is a simple example.
-#
-# .. figure:: ../../gallery/userdemo/images/sphx_glr_connect_simple01_001.png
-# :target: ../../gallery/userdemo/connect_simple01.html
-# :align: center
-#
-# Here, we added the `.ConnectionPatch` to the *figure* (with `~.Figure.add_artist`)
-# rather than to either axes: this ensures that it is drawn on top of both axes,
-# and is also necessary if using :doc:`constrained_layout
+# is sufficient in most situations, `.ConnectionPatch` is useful when you want
+# to connect points in different axes. For example, here we connect the point
+# *xy* in the data coordinates of ``ax1`` to point *xy* in the data coordinates
+# of ``ax2``:
+
+from matplotlib.patches import ConnectionPatch
+
+fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(6, 3))
+xy = (0.3, 0.2)
+con = ConnectionPatch(xyA=xy, coordsA=ax1.transData,
+ xyB=xy, coordsB=ax2.transData)
+
+fig.add_artist(con)
+
+###############################################################################
+# Here, we added the `.ConnectionPatch` to the *figure*
+# (with `~.Figure.add_artist`) rather than to either axes. This ensures that
+# the ConnectionPatch artist is drawn on top of both axes, and is also necessary
+# when using :doc:`constrained_layout
# ` for positioning the axes.
#
-# Advanced Topics
-# ---------------
-#
# Zoom effect between Axes
# ~~~~~~~~~~~~~~~~~~~~~~~~
#
# `mpl_toolkits.axes_grid1.inset_locator` defines some patch classes useful for
-# interconnecting two axes. Understanding the code requires some knowledge of
-# Matplotlib's transform system.
+# interconnecting two axes.
#
# .. figure:: ../../gallery/subplots_axes_and_figures/images/sphx_glr_axes_zoom_effect_001.png
# :target: ../../gallery/subplots_axes_and_figures/axes_zoom_effect.html
# :align: center
#
-# Define Custom BoxStyle
-# ~~~~~~~~~~~~~~~~~~~~~~
-#
-# You can use a custom box style. The value for the ``boxstyle`` can be a
-# callable object in the following forms.::
-#
-# def __call__(self, x0, y0, width, height, mutation_size,
-# aspect_ratio=1.):
-# '''
-# Given the location and size of the box, return the path of
-# the box around it.
-#
-# - *x0*, *y0*, *width*, *height* : location and size of the box
-# - *mutation_size* : a reference scale for the mutation.
-# - *aspect_ratio* : aspect-ratio for the mutation.
-# '''
-# path = ...
-# return path
-#
-# Here is a complete example.
-#
-# .. figure:: ../../gallery/userdemo/images/sphx_glr_custom_boxstyle01_001.png
-# :target: ../../gallery/userdemo/custom_boxstyle01.html
-# :align: center
-#
-# Similarly, you can define a custom ConnectionStyle and a custom ArrowStyle.
-# See the source code of ``lib/matplotlib/patches.py`` and check
-# how each style class is defined.
+# The code for this figure is at
+# :doc:`/gallery/subplots_axes_and_figures/axes_zoom_effect` and
+# familiarity with :doc:`../advanced/transforms_tutorial`
+# is recommended.
diff --git a/tutorials/text/text_intro.py b/tutorials/text/text_intro.py
index a32ddc800d10..1b0e60a37ab1 100644
--- a/tutorials/text/text_intro.py
+++ b/tutorials/text/text_intro.py
@@ -118,7 +118,7 @@
fig, ax = plt.subplots(figsize=(5, 3))
fig.subplots_adjust(bottom=0.15, left=0.2)
ax.plot(x1, y1)
-ax.set_xlabel('time [s]')
+ax.set_xlabel('Time [s]')
ax.set_ylabel('Damped oscillation [V]')
plt.show()
@@ -131,7 +131,7 @@
fig, ax = plt.subplots(figsize=(5, 3))
fig.subplots_adjust(bottom=0.15, left=0.2)
ax.plot(x1, y1*10000)
-ax.set_xlabel('time [s]')
+ax.set_xlabel('Time [s]')
ax.set_ylabel('Damped oscillation [V]')
plt.show()
@@ -144,7 +144,7 @@
fig, ax = plt.subplots(figsize=(5, 3))
fig.subplots_adjust(bottom=0.15, left=0.2)
ax.plot(x1, y1*10000)
-ax.set_xlabel('time [s]')
+ax.set_xlabel('Time [s]')
ax.set_ylabel('Damped oscillation [V]', labelpad=18)
plt.show()
@@ -159,7 +159,7 @@
fig, ax = plt.subplots(figsize=(5, 3))
fig.subplots_adjust(bottom=0.15, left=0.2)
ax.plot(x1, y1)
-ax.set_xlabel('time [s]', position=(0., 1e6), horizontalalignment='left')
+ax.set_xlabel('Time [s]', position=(0., 1e6), horizontalalignment='left')
ax.set_ylabel('Damped oscillation [V]')
plt.show()
@@ -179,7 +179,7 @@
fig, ax = plt.subplots(figsize=(5, 3))
fig.subplots_adjust(bottom=0.15, left=0.2)
ax.plot(x1, y1)
-ax.set_xlabel('time [s]', fontsize='large', fontweight='bold')
+ax.set_xlabel('Time [s]', fontsize='large', fontweight='bold')
ax.set_ylabel('Damped oscillation [V]', fontproperties=font)
plt.show()
@@ -191,7 +191,7 @@
fig, ax = plt.subplots(figsize=(5, 3))
fig.subplots_adjust(bottom=0.2, left=0.2)
ax.plot(x1, np.cumsum(y1**2))
-ax.set_xlabel('time [s] \n This was a long experiment')
+ax.set_xlabel('Time [s] \n This was a long experiment')
ax.set_ylabel(r'$\int\ Y^2\ dt\ \ [V^2 s]$')
plt.show()
diff --git a/tutorials/toolkits/axisartist.py b/tutorials/toolkits/axisartist.py
index 2e539cc3f20c..087a4fab20ff 100644
--- a/tutorials/toolkits/axisartist.py
+++ b/tutorials/toolkits/axisartist.py
@@ -519,10 +519,9 @@ def inv_tr(x, y):
ax1 = SubplotHost(fig, 1, 2, 2, grid_helper=grid_helper)
# A parasite axes with given transform
- ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
+ ax2 = ax1.get_aux_axes(tr, "equal")
# note that ax2.transData == tr + ax1.transData
# Anything you draw in ax2 will match the ticks and grids of ax1.
- ax1.parasites.append(ax2)
.. figure:: ../../gallery/axisartist/images/sphx_glr_demo_curvelinear_grid_001.png
:target: ../../gallery/axisartist/demo_curvelinear_grid.html