diff --git a/appveyor.yml b/appveyor.yml
index 8f67174b6682..8aff64c12798 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -88,8 +88,8 @@ test_script:
# Now build the thing..
- '%CMD_IN_ENV% python setup.py develop'
# tests
- # for now, just let them pass to get the after_test parts...
- - python tests.py || cmd /c "exit /b 0"
+ - python tests.py
+ - python visual_tests.py
after_test:
# After the tests were a success, build packages (wheels and conda)
@@ -126,6 +126,7 @@ artifacts:
type: zip
on_failure:
+ - python visual_tests.py
- echo zipping images after a failure...
- - 7z a result_images.zip result_images\ >NUL:
+ - 7z a result_images.zip result_images\ |grep -v "Compressing"
- appveyor PushArtifact result_images.zip
diff --git a/lib/matplotlib/backends/backend_ps.py b/lib/matplotlib/backends/backend_ps.py
index a332eb4e72da..71f8ba6a62d9 100644
--- a/lib/matplotlib/backends/backend_ps.py
+++ b/lib/matplotlib/backends/backend_ps.py
@@ -1530,8 +1530,13 @@ def gs_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):
with io.open(outfile, 'rb') as fh:
if exit_status:
- raise RuntimeError('ghostscript was not able to process \
- your image.\nHere is the full report generated by ghostscript:\n\n' + fh.read())
+ output = fh.read()
+ m = "\n".join(["ghostscript was not able to process your image.",
+ "Here is the full report generated by ghostscript:",
+ "",
+ "%s"])
+ # use % to prevent problems with bytes
+ raise RuntimeError(m % output)
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
diff --git a/lib/matplotlib/dviread.py b/lib/matplotlib/dviread.py
index 695e3ea9d10d..03a966559f6a 100644
--- a/lib/matplotlib/dviread.py
+++ b/lib/matplotlib/dviread.py
@@ -961,8 +961,8 @@ def find_tex_file(filename, format=None):
`--format` option.
Apparently most existing TeX distributions on Unix-like systems
- use kpathsea. I hear MikTeX (a popular distribution on Windows)
- doesn't use kpathsea, so what do we do? (TODO)
+ use kpathsea. It's also available as part of MikTeX, a popular
+ distribution on Windows.
.. seealso::
diff --git a/lib/matplotlib/testing/compare.py b/lib/matplotlib/testing/compare.py
index e6f9d7f2beef..542e48aa731d 100644
--- a/lib/matplotlib/testing/compare.py
+++ b/lib/matplotlib/testing/compare.py
@@ -166,8 +166,8 @@ def convert(filename, cache):
"""
base, extension = filename.rsplit('.', 1)
if extension not in converter:
- raise ImageComparisonFailure(
- "Don't know how to convert %s files to png" % extension)
+ from nose import SkipTest
+ raise SkipTest("Don't know how to convert %s files to png" % extension)
newname = base + '_' + extension + '.png'
if not os.path.exists(filename):
raise IOError("'%s' does not exist" % filename)
diff --git a/lib/matplotlib/testing/decorators.py b/lib/matplotlib/testing/decorators.py
index 06b15d7b7a2c..d20029955f99 100644
--- a/lib/matplotlib/testing/decorators.py
+++ b/lib/matplotlib/testing/decorators.py
@@ -426,3 +426,24 @@ def backend_switcher(*args, **kwargs):
return nose.tools.make_decorator(func)(backend_switcher)
return switch_backend_decorator
+
+
+def skip_if_command_unavailable(cmd):
+ """
+ skips a test if a command is unavailable.
+
+ Parameters
+ ----------
+ cmd : list of str
+ must be a complete command which should not
+ return a non zero exit code, something like
+ ["latex", "-version"]
+ """
+ from matplotlib.compat.subprocess import check_output
+ try:
+ check_output(cmd)
+ except:
+ from nose import SkipTest
+ raise SkipTest('missing command: %s' % cmd[0])
+
+ return lambda f: f
diff --git a/lib/matplotlib/tests/test_axes.py b/lib/matplotlib/tests/test_axes.py
index 607d9a758c89..df0e48e0cef0 100644
--- a/lib/matplotlib/tests/test_axes.py
+++ b/lib/matplotlib/tests/test_axes.py
@@ -26,6 +26,9 @@
import warnings
from matplotlib.cbook import IgnoredKeywordWarning
+import sys
+on_win = (sys.platform == 'win32')
+
# Note: Some test cases are run twice: once normally and once with labeled data
# These two must be defined in the same test function or need to have
# different baseline images to prevent race conditions when nose runs
@@ -2749,7 +2752,8 @@ def test_subplot_key_hash():
@image_comparison(baseline_images=['specgram_freqs',
'specgram_freqs_linear'],
- remove_text=True, extensions=['png'], tol=0.03)
+ remove_text=True, extensions=['png'],
+ tol=0.05 if on_win else 0.03)
def test_specgram_freqs():
'''test axes.specgram in default (psd) mode with sinusoidal stimuli'''
n = 10000
@@ -2849,7 +2853,8 @@ def test_specgram_noise():
@image_comparison(baseline_images=['specgram_magnitude_freqs',
'specgram_magnitude_freqs_linear'],
- remove_text=True, extensions=['png'], tol=0.03)
+ remove_text=True, extensions=['png'],
+ tol=0.05 if on_win else 0.03)
def test_specgram_magnitude_freqs():
'''test axes.specgram in magnitude mode with sinusoidal stimuli'''
n = 10000
@@ -2950,7 +2955,8 @@ def test_specgram_magnitude_noise():
@image_comparison(baseline_images=['specgram_angle_freqs'],
- remove_text=True, extensions=['png'])
+ remove_text=True, extensions=['png'],
+ tol=0.003 if on_win else 0)
def test_specgram_angle_freqs():
'''test axes.specgram in angle mode with sinusoidal stimuli'''
n = 10000
diff --git a/lib/matplotlib/tests/test_backend_ps.py b/lib/matplotlib/tests/test_backend_ps.py
index ab346a4d0a41..af799353df45 100644
--- a/lib/matplotlib/tests/test_backend_ps.py
+++ b/lib/matplotlib/tests/test_backend_ps.py
@@ -71,6 +71,7 @@ def test_savefig_to_stringio_with_distiller():
@cleanup
@needs_tex
+@needs_ghostscript
def test_savefig_to_stringio_with_usetex():
matplotlib.rcParams['text.latex.unicode'] = True
matplotlib.rcParams['text.usetex'] = True
@@ -90,6 +91,7 @@ def test_savefig_to_stringio_eps_afm():
@cleanup
@needs_tex
+@needs_ghostscript
def test_savefig_to_stringio_with_usetex_eps():
matplotlib.rcParams['text.latex.unicode'] = True
matplotlib.rcParams['text.usetex'] = True
diff --git a/lib/matplotlib/tests/test_dviread.py b/lib/matplotlib/tests/test_dviread.py
index 260e7f429d82..4e2fb45afc3b 100644
--- a/lib/matplotlib/tests/test_dviread.py
+++ b/lib/matplotlib/tests/test_dviread.py
@@ -2,6 +2,8 @@
unicode_literals)
from matplotlib.externals import six
+from matplotlib.testing.decorators import skip_if_command_unavailable
+
from nose.tools import assert_equal, with_setup
import matplotlib.dviread as dr
@@ -60,6 +62,7 @@ def test_PsfontsMap():
assert_equal(entry.filename, '/absolute/font9.pfb')
+@skip_if_command_unavailable(["kpsewhich", "-version"])
def test_dviread():
dir = os.path.join(os.path.dirname(__file__), 'baseline_images', 'dviread')
with open(os.path.join(dir, 'test.json')) as f:
diff --git a/lib/matplotlib/tests/test_patches.py b/lib/matplotlib/tests/test_patches.py
index a2e21255540f..c8c35eac85ec 100644
--- a/lib/matplotlib/tests/test_patches.py
+++ b/lib/matplotlib/tests/test_patches.py
@@ -20,6 +20,9 @@
from matplotlib import path as mpath
from matplotlib import transforms as mtrans
+import sys
+on_win = (sys.platform == 'win32')
+
def test_Polygon_close():
#: Github issue #1018 identified a bug in the Polygon handling
@@ -250,7 +253,7 @@ def test_wedge_movement():
@image_comparison(baseline_images=['wedge_range'],
- remove_text=True)
+ remove_text=True, tol=0.06 if on_win else 0)
def test_wedge_range():
ax = plt.axes()
diff --git a/lib/matplotlib/tests/test_patheffects.py b/lib/matplotlib/tests/test_patheffects.py
index b8a068362316..c9ff921a1a2b 100644
--- a/lib/matplotlib/tests/test_patheffects.py
+++ b/lib/matplotlib/tests/test_patheffects.py
@@ -17,6 +17,9 @@
import mock
from nose.tools import assert_equal
+import sys
+on_win = (sys.platform == 'win32')
+
@image_comparison(baseline_images=['patheffect1'], remove_text=True)
def test_patheffect1():
@@ -110,7 +113,7 @@ def test_SimplePatchShadow_offset():
assert_equal(pe._offset, (4, 5))
-@image_comparison(baseline_images=['collection'])
+@image_comparison(baseline_images=['collection'], tol=0.013 if on_win else 0)
def test_collection():
x, y = np.meshgrid(np.linspace(0, 10, 150), np.linspace(-5, 5, 100))
data = np.sin(x) + np.cos(y)
diff --git a/lib/matplotlib/tests/test_triangulation.py b/lib/matplotlib/tests/test_triangulation.py
index 966990373434..19c29ea7b482 100644
--- a/lib/matplotlib/tests/test_triangulation.py
+++ b/lib/matplotlib/tests/test_triangulation.py
@@ -14,6 +14,8 @@
import matplotlib.cm as cm
from matplotlib.path import Path
+import sys
+on_win = (sys.platform == 'win32')
def test_delaunay():
# No duplicate points, regular grid.
@@ -770,7 +772,8 @@ def z(x, y):
@image_comparison(baseline_images=['tri_smooth_gradient'],
- extensions=['png'], remove_text=True)
+ extensions=['png'], remove_text=True,
+ tol=0.015 if on_win else 0)
def test_tri_smooth_gradient():
# Image comparison based on example trigradient_demo.
diff --git a/test.txt b/test.txt
new file mode 100644
index 000000000000..d82196bb51ce
--- /dev/null
+++ b/test.txt
@@ -0,0 +1,206 @@
+TARGET_ARCH=x86, CONDA_PY=27
+
+======================================================================
+FAIL: matplotlib.tests.test_axes.test_specgram_angle_freqs.test
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "C:\conda\envs\test-environment\lib\site-packages\nose\case.py", line 197, in runTest
+ self.test(*self.arg)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 55, in failer
+ result = f(*args, **kwargs)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 259, in do_test
+ '(RMS %(rms).3f)'%err)
+ImageComparisonFailure: images not close:
+C:\projects\matplotlib\result_images\test_axes\specgram_angle_freqs.png vs. C:\projects\matplotlib\result_images\test_axes\specgram_angle_freqs-expected.png
+(RMS 0.002)
+
+======================================================================
+FAIL: matplotlib.tests.test_patches.test_wedge_range.test
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "C:\conda\envs\test-environment\lib\site-packages\nose\case.py", line 197, in runTest
+ self.test(*self.arg)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 55, in failer
+ result = f(*args, **kwargs)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 259, in do_test
+ '(RMS %(rms).3f)'%err)
+ImageComparisonFailure: images not close:
+C:\projects\matplotlib\result_images\test_patches\wedge_range.png vs. C:\projects\matplotlib\result_images\test_patches\wedge_range-expected.png
+(RMS 0.059)
+
+======================================================================
+FAIL: matplotlib.tests.test_patheffects.test_collection.test
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "C:\conda\envs\test-environment\lib\site-packages\nose\case.py", line 197, in runTest
+ self.test(*self.arg)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 55, in failer
+ result = f(*args, **kwargs)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 259, in do_test
+ '(RMS %(rms).3f)'%err)
+ImageComparisonFailure: images not close:
+C:\projects\matplotlib\result_images\test_patheffects\collection.png vs. C:\projects\matplotlib\result_images\test_patheffects\collection-expected.png
+(RMS 0.008)
+
+----------------------------------------------------------------------
+Ran 5966 tests in 588.929s
+
+FAILED (KNOWNFAIL=40, SKIP=1210, failures=3)
+TARGET_ARCH=x64, CONDA_PY=27
+
+======================================================================
+FAIL: matplotlib.tests.test_patches.test_wedge_range.test
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "C:\conda\envs\test-environment\lib\site-packages\nose\case.py", line 197, in runTest
+ self.test(*self.arg)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 55, in failer
+ result = f(*args, **kwargs)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 259, in do_test
+ '(RMS %(rms).3f)'%err)
+ImageComparisonFailure: images not close:
+C:\projects\matplotlib\result_images\test_patches\wedge_range.png vs. C:\projects\matplotlib\result_images\test_patches\wedge_range-expected.png
+(RMS 0.059)
+
+======================================================================
+FAIL: matplotlib.tests.test_patheffects.test_collection.test
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "C:\conda\envs\test-environment\lib\site-packages\nose\case.py", line 197, in runTest
+ self.test(*self.arg)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 55, in failer
+ result = f(*args, **kwargs)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 259, in do_test
+ '(RMS %(rms).3f)'%err)
+ImageComparisonFailure: images not close:
+C:\projects\matplotlib\result_images\test_patheffects\collection.png vs. C:\projects\matplotlib\result_images\test_patheffects\collection-expected.png
+ (RMS 0.012)
+
+----------------------------------------------------------------------
+Ran 5966 tests in 411.283s
+
+FAILED (KNOWNFAIL=40, SKIP=1210, failures=2)
+TARGET_ARCH=x64, CONDA_PY=34
+
+======================================================================
+FAIL: matplotlib.tests.test_patches.test_wedge_range.test
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "C:\conda\envs\test-environment\lib\site-packages\nose\case.py", line 198, in runTest
+ self.test(*self.arg)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 55, in failer
+ result = f(*args, **kwargs)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 259, in do_test
+ '(RMS %(rms).3f)'%err)
+matplotlib.testing.exceptions.ImageComparisonFailure: images not close:
+C:\projects\matplotlib\result_images\test_patches\wedge_range.png vs. C:\projects\matplotlib\result_images\test_patches\wedge_range-expected.png
+ (RMS 0.059)
+
+======================================================================
+FAIL: matplotlib.tests.test_patheffects.test_collection.test
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "C:\conda\envs\test-environment\lib\site-packages\nose\case.py", line 198, in runTest
+ self.test(*self.arg)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 55, in failer
+ result = f(*args, **kwargs)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 259, in do_test
+ '(RMS %(rms).3f)'%err)
+matplotlib.testing.exceptions.ImageComparisonFailure: images not close:
+C:\projects\matplotlib\result_images\test_patheffects\collection.png vs. C:\projects\matplotlib\result_images\test_patheffects\collection-expected.png
+(RMS 0.012)
+
+----------------------------------------------------------------------
+Ran 5966 tests in 494.700s
+
+FAILED (KNOWNFAIL=40, SKIP=1210, failures=2)
+TARGET_ARCH=x64, CONDA_PY=35
+
+======================================================================
+FAIL: matplotlib.tests.test_axes.test_specgram_freqs.test
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "C:\conda\envs\test-environment\lib\site-packages\nose\case.py", line 198, in runTest
+ self.test(*self.arg)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 55, in failer
+ result = f(*args, **kwargs)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 259, in do_test
+ '(RMS %(rms).3f)'%err)
+matplotlib.testing.exceptions.ImageComparisonFailure: images not close:
+ C:\projects\matplotlib\result_images\test_axes\specgram_freqs.png vs. C:\projects\matplotlib\result_images\test_axes\specgram_freqs-expected.png
+(RMS 0.042)
+
+======================================================================
+FAIL: matplotlib.tests.test_axes.test_specgram_freqs.test
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "C:\conda\envs\test-environment\lib\site-packages\nose\case.py", line 198, in runTest
+ self.test(*self.arg)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 55, in failer
+ result = f(*args, **kwargs)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 259, in do_test
+ '(RMS %(rms).3f)'%err)
+matplotlib.testing.exceptions.ImageComparisonFailure: images not close:
+C:\projects\matplotlib\result_images\test_axes\specgram_freqs_linear.png vs. C:\projects\matplotlib\result_images\test_axes\specgram_freqs_linear-expected.png
+ (RMS 0.042)
+
+======================================================================
+FAIL: matplotlib.tests.test_axes.test_specgram_magnitude_freqs.test
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "C:\conda\envs\test-environment\lib\site-packages\nose\case.py", line 198, in runTest
+ self.test(*self.arg)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 55, in failer
+ result = f(*args, **kwargs)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 259, in do_test
+ '(RMS %(rms).3f)'%err)
+matplotlib.testing.exceptions.ImageComparisonFailure: images not close:
+C:\projects\matplotlib\result_images\test_axes\specgram_magnitude_freqs.png vs. C:\projects\matplotlib\result_images\test_axes\specgram_magnitude_freqs-expected.png
+ (RMS 0.042)
+
+======================================================================
+FAIL: matplotlib.tests.test_axes.test_specgram_magnitude_freqs.test
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "C:\conda\envs\test-environment\lib\site-packages\nose\case.py", line 198, in runTest
+ self.test(*self.arg)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 55, in failer
+ result = f(*args, **kwargs)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 259, in do_test
+ '(RMS %(rms).3f)'%err)
+matplotlib.testing.exceptions.ImageComparisonFailure: images not close:
+C:\projects\matplotlib\result_images\test_axes\specgram_magnitude_freqs_linear.png vs. C:\projects\matplotlib\result_images\test_axes\specgram_magnitude_freqs_linear-expected.png
+(RMS 0.042)
+
+======================================================================
+FAIL: matplotlib.tests.test_patheffects.test_collection.test
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "C:\conda\envs\test-environment\lib\site-packages\nose\case.py", line 198, in runTest
+ self.test(*self.arg)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 55, in failer
+ result = f(*args, **kwargs)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 259, in do_test
+ '(RMS %(rms).3f)'%err)
+matplotlib.testing.exceptions.ImageComparisonFailure: images not close:
+C:\projects\matplotlib\result_images\test_patheffects\collection.png vs. C:\projects\matplotlib\result_images\test_patheffects\collection-expected.png
+(RMS 0.006)
+
+======================================================================
+FAIL: matplotlib.tests.test_triangulation.test_tri_smooth_gradient.test
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "C:\conda\envs\test-environment\lib\site-packages\nose\case.py", line 198, in runTest
+ self.test(*self.arg)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 55, in failer
+ result = f(*args, **kwargs)
+ File "c:\projects\matplotlib\lib\matplotlib\testing\decorators.py", line 259, in do_test
+ '(RMS %(rms).3f)'%err)
+matplotlib.testing.exceptions.ImageComparisonFailure: images not close:
+C:\projects\matplotlib\result_images\test_triangulation\tri_smooth_gradient.png vs. C:\projects\matplotlib\result_images\test_triangulation\tri_smooth_gradient-expected.png
+(RMS 0.014)
+
+----------------------------------------------------------------------
+Ran 5966 tests in 517.163s
+
+FAILED (KNOWNFAIL=40, SKIP=1210, failures=6)
\ No newline at end of file
diff --git a/visual_tests.py b/visual_tests.py
new file mode 100644
index 000000000000..7fa5b4ebf6de
--- /dev/null
+++ b/visual_tests.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+#
+# This builds a html page of all images from the image comparison tests
+# and opens that page in the browser.
+#
+# $ python visual_tests.py
+#
+
+import os
+import time
+import six
+
+from collections import defaultdict
+
+def run():
+ # Build a website for visual comparison
+ image_dir = "result_images"
+ # build the website
+ _html = ""
+ _html += """
+ \n"""
+ _subdirs = [name for name in os.listdir(image_dir) if os.path.isdir(os.path.join(image_dir, name))]
+ # loop over all pictures
+ _row = '{0} {1} | {2} |  | {4} | \n'
+ _failed = ""
+ _failed += "Only Failed
"
+ _failed += "\nname | actual | expected | diff | \n"
+ _has_failure = False
+ _body = ""
+ for subdir in _subdirs:
+ if subdir == "test_compare_images":
+ # these are the image which test the image comparison functions...
+ continue
+ pictures = defaultdict(dict)
+ for file in os.listdir(os.path.join(image_dir, subdir)):
+ if os.path.isdir(os.path.join(image_dir, subdir, file)):
+ continue
+ fn, fext = os.path.splitext(file)
+ if fext != ".png":
+ continue
+ if "-failed-diff" in fn:
+ pictures[fn[:-12]]["f"] = os.path.join(subdir, file)
+ elif "-expected" in fn:
+ pictures[fn[:-9]]["e"] = os.path.join(subdir, file)
+ else:
+ pictures[fn]["c"] = os.path.join(subdir, file)
+
+ _body += "{0}
".format(subdir)
+ _body += "\nname | actual | expected | diff | \n"
+ for name, test in six.iteritems(pictures):
+ if test.get("f", None):
+ # a real failure in the image generation, resulting in different images
+ _has_failure = True
+ s = "(failed)"
+ failed = 'diff'.format(test.get("f", ""))
+ current = '
'.format(test.get("c", ""))
+ _failed += _row.format(name, "", current, test.get("e", ""), failed)
+ elif test.get("c", None) is None:
+ # A failure in the test, resulting in no current image
+ _has_failure = True
+ s = "(failed)"
+ failed = '--'
+ current = '(Failure in test, no image produced)'
+ _failed += _row.format(name, "", current, test.get("e", ""), failed)
+ else:
+ s = "(passed)"
+ failed = '--'
+ current = '
'.format(test.get("c", ""))
+ _body += _row.format(name, "", current, test.get("e", ""), failed)
+ _body += "
\n"
+ _failed += "
\n"
+ if _has_failure:
+ _html += _failed
+ _html += _body
+ _html += "\n"
+ index = os.path.join(image_dir, "index.html")
+ with open(index, "w") as f:
+ f.write(_html)
+ try:
+ import webbrowser
+ webbrowser.open(index)
+ except:
+ print("Open {0} in a browser for a visual comparison.".format(str(index)))
+
+if __name__ == '__main__':
+ run()