From da4b2dd7e128431107ea25d478a1782e08c43135 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20L=C3=B6ffler?= Date: Sun, 19 Apr 2015 14:31:39 +0300 Subject: [PATCH 001/496] pytest traceback hide markers set for testing helpers --- numpy/testing/utils.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py index 4527a51d9aee..63f9b4d271d0 100644 --- a/numpy/testing/utils.py +++ b/numpy/testing/utils.py @@ -255,6 +255,7 @@ def assert_equal(actual,desired,err_msg='',verbose=True): DESIRED: 6 """ + __tracebackhide__ = True if isinstance(desired, dict): if not isinstance(actual, dict) : raise AssertionError(repr(type(actual))) @@ -361,6 +362,7 @@ def print_assert_equal(test_string, actual, desired): [0, 2] """ + __tracebackhide__ = True import pprint if not (actual == desired): @@ -434,6 +436,7 @@ def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True): y: array([ 1. , 2.33333334]) """ + __tracebackhide__ = True from numpy.core import ndarray from numpy.lib import iscomplexobj, real, imag @@ -547,6 +550,7 @@ def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True): True """ + __tracebackhide__ = True import numpy as np (actual, desired) = map(float, (actual, desired)) @@ -588,6 +592,7 @@ def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True): def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', precision=6): + __tracebackhide__ = True from numpy.core import array, isnan, isinf, any, all, inf x = array(x, copy=False, subok=True) y = array(y, copy=False, subok=True) @@ -808,6 +813,7 @@ def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): y: array([ 1. , 2.33333, 5. ]) """ + __tracebackhide__ = True from numpy.core import around, number, float_, result_type, array from numpy.core.numerictypes import issubdtype from numpy.core.fromnumeric import any as npany @@ -908,6 +914,7 @@ def assert_array_less(x, y, err_msg='', verbose=True): y: array([4]) """ + __tracebackhide__ = True assert_array_compare(operator.__lt__, x, y, err_msg=err_msg, verbose=verbose, header='Arrays are not less-ordered') @@ -942,6 +949,7 @@ def assert_string_equal(actual, desired): """ # delay import of difflib to reduce startup time + __tracebackhide__ = True import difflib if not isinstance(actual, str) : @@ -1050,6 +1058,7 @@ def assert_raises(*args,**kwargs): unexpected exception. """ + __tracebackhide__ = True nose = import_nose() return nose.tools.assert_raises(*args,**kwargs) @@ -1068,6 +1077,7 @@ def assert_raises_regex(exception_class, expected_regexp, all versions down to 2.6. """ + __tracebackhide__ = True nose = import_nose() global assert_raises_regex_impl @@ -1289,6 +1299,7 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=False, >>> assert_allclose(x, y, rtol=1e-5, atol=0) """ + __tracebackhide__ = True import numpy as np def compare(x, y): return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol, @@ -1348,6 +1359,7 @@ def assert_array_almost_equal_nulp(x, y, nulp=1): AssertionError: X and Y are not equal to 1 ULP (max is 2) """ + __tracebackhide__ = True import numpy as np ax = np.abs(x) ay = np.abs(y) @@ -1396,6 +1408,7 @@ def assert_array_max_ulp(a, b, maxulp=1, dtype=None): >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a))) """ + __tracebackhide__ = True import numpy as np ret = nulp_diff(a, b, dtype) if not np.all(ret <= maxulp): @@ -1597,6 +1610,7 @@ def assert_warns(warning_class, func, *args, **kw): The value returned by `func`. """ + __tracebackhide__ = True with warnings.catch_warnings(record=True) as l: warnings.simplefilter('always') result = func(*args, **kw) @@ -1628,6 +1642,7 @@ def assert_no_warnings(func, *args, **kw): The value returned by `func`. """ + __tracebackhide__ = True with warnings.catch_warnings(record=True) as l: warnings.simplefilter('always') result = func(*args, **kw) From 73fad84b1bc657bc0dedb533df29f9b18d06f297 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20L=C3=B6ffler?= Date: Mon, 20 Apr 2015 17:31:23 +0300 Subject: [PATCH 002/496] Comments added --- numpy/testing/utils.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py index 63f9b4d271d0..0f49f59b188d 100644 --- a/numpy/testing/utils.py +++ b/numpy/testing/utils.py @@ -255,7 +255,7 @@ def assert_equal(actual,desired,err_msg='',verbose=True): DESIRED: 6 """ - __tracebackhide__ = True + __tracebackhide__ = True # Hide traceback for py.test if isinstance(desired, dict): if not isinstance(actual, dict) : raise AssertionError(repr(type(actual))) @@ -362,7 +362,7 @@ def print_assert_equal(test_string, actual, desired): [0, 2] """ - __tracebackhide__ = True + __tracebackhide__ = True # Hide traceback for py.test import pprint if not (actual == desired): @@ -436,7 +436,7 @@ def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True): y: array([ 1. , 2.33333334]) """ - __tracebackhide__ = True + __tracebackhide__ = True # Hide traceback for py.test from numpy.core import ndarray from numpy.lib import iscomplexobj, real, imag @@ -550,7 +550,7 @@ def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True): True """ - __tracebackhide__ = True + __tracebackhide__ = True # Hide traceback for py.test import numpy as np (actual, desired) = map(float, (actual, desired)) @@ -592,7 +592,7 @@ def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True): def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', precision=6): - __tracebackhide__ = True + __tracebackhide__ = True # Hide traceback for py.test from numpy.core import array, isnan, isinf, any, all, inf x = array(x, copy=False, subok=True) y = array(y, copy=False, subok=True) @@ -813,7 +813,7 @@ def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): y: array([ 1. , 2.33333, 5. ]) """ - __tracebackhide__ = True + __tracebackhide__ = True # Hide traceback for py.test from numpy.core import around, number, float_, result_type, array from numpy.core.numerictypes import issubdtype from numpy.core.fromnumeric import any as npany @@ -914,7 +914,7 @@ def assert_array_less(x, y, err_msg='', verbose=True): y: array([4]) """ - __tracebackhide__ = True + __tracebackhide__ = True # Hide traceback for py.test assert_array_compare(operator.__lt__, x, y, err_msg=err_msg, verbose=verbose, header='Arrays are not less-ordered') @@ -949,7 +949,7 @@ def assert_string_equal(actual, desired): """ # delay import of difflib to reduce startup time - __tracebackhide__ = True + __tracebackhide__ = True # Hide traceback for py.test import difflib if not isinstance(actual, str) : @@ -1058,7 +1058,7 @@ def assert_raises(*args,**kwargs): unexpected exception. """ - __tracebackhide__ = True + __tracebackhide__ = True # Hide traceback for py.test nose = import_nose() return nose.tools.assert_raises(*args,**kwargs) @@ -1077,7 +1077,7 @@ def assert_raises_regex(exception_class, expected_regexp, all versions down to 2.6. """ - __tracebackhide__ = True + __tracebackhide__ = True # Hide traceback for py.test nose = import_nose() global assert_raises_regex_impl @@ -1299,7 +1299,7 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=False, >>> assert_allclose(x, y, rtol=1e-5, atol=0) """ - __tracebackhide__ = True + __tracebackhide__ = True # Hide traceback for py.test import numpy as np def compare(x, y): return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol, @@ -1359,7 +1359,7 @@ def assert_array_almost_equal_nulp(x, y, nulp=1): AssertionError: X and Y are not equal to 1 ULP (max is 2) """ - __tracebackhide__ = True + __tracebackhide__ = True # Hide traceback for py.test import numpy as np ax = np.abs(x) ay = np.abs(y) @@ -1408,7 +1408,7 @@ def assert_array_max_ulp(a, b, maxulp=1, dtype=None): >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a))) """ - __tracebackhide__ = True + __tracebackhide__ = True # Hide traceback for py.test import numpy as np ret = nulp_diff(a, b, dtype) if not np.all(ret <= maxulp): @@ -1610,7 +1610,7 @@ def assert_warns(warning_class, func, *args, **kw): The value returned by `func`. """ - __tracebackhide__ = True + __tracebackhide__ = True # Hide traceback for py.test with warnings.catch_warnings(record=True) as l: warnings.simplefilter('always') result = func(*args, **kw) @@ -1642,7 +1642,7 @@ def assert_no_warnings(func, *args, **kw): The value returned by `func`. """ - __tracebackhide__ = True + __tracebackhide__ = True # Hide traceback for py.test with warnings.catch_warnings(record=True) as l: warnings.simplefilter('always') result = func(*args, **kw) From 25dca018f4f00db4877c6ee993c38d339cbbf8b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20L=C3=B6ffler?= Date: Thu, 14 May 2015 10:54:09 +0300 Subject: [PATCH 003/496] PEP8 applied to comments --- numpy/testing/utils.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py index 0f49f59b188d..60c6470b1907 100644 --- a/numpy/testing/utils.py +++ b/numpy/testing/utils.py @@ -255,7 +255,7 @@ def assert_equal(actual,desired,err_msg='',verbose=True): DESIRED: 6 """ - __tracebackhide__ = True # Hide traceback for py.test + __tracebackhide__ = True # Hide traceback for py.test if isinstance(desired, dict): if not isinstance(actual, dict) : raise AssertionError(repr(type(actual))) @@ -362,7 +362,7 @@ def print_assert_equal(test_string, actual, desired): [0, 2] """ - __tracebackhide__ = True # Hide traceback for py.test + __tracebackhide__ = True # Hide traceback for py.test import pprint if not (actual == desired): @@ -436,7 +436,7 @@ def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True): y: array([ 1. , 2.33333334]) """ - __tracebackhide__ = True # Hide traceback for py.test + __tracebackhide__ = True # Hide traceback for py.test from numpy.core import ndarray from numpy.lib import iscomplexobj, real, imag @@ -550,7 +550,7 @@ def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True): True """ - __tracebackhide__ = True # Hide traceback for py.test + __tracebackhide__ = True # Hide traceback for py.test import numpy as np (actual, desired) = map(float, (actual, desired)) @@ -592,7 +592,7 @@ def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True): def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', precision=6): - __tracebackhide__ = True # Hide traceback for py.test + __tracebackhide__ = True # Hide traceback for py.test from numpy.core import array, isnan, isinf, any, all, inf x = array(x, copy=False, subok=True) y = array(y, copy=False, subok=True) @@ -813,7 +813,7 @@ def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): y: array([ 1. , 2.33333, 5. ]) """ - __tracebackhide__ = True # Hide traceback for py.test + __tracebackhide__ = True # Hide traceback for py.test from numpy.core import around, number, float_, result_type, array from numpy.core.numerictypes import issubdtype from numpy.core.fromnumeric import any as npany @@ -914,7 +914,7 @@ def assert_array_less(x, y, err_msg='', verbose=True): y: array([4]) """ - __tracebackhide__ = True # Hide traceback for py.test + __tracebackhide__ = True # Hide traceback for py.test assert_array_compare(operator.__lt__, x, y, err_msg=err_msg, verbose=verbose, header='Arrays are not less-ordered') @@ -949,7 +949,7 @@ def assert_string_equal(actual, desired): """ # delay import of difflib to reduce startup time - __tracebackhide__ = True # Hide traceback for py.test + __tracebackhide__ = True # Hide traceback for py.test import difflib if not isinstance(actual, str) : @@ -1058,7 +1058,7 @@ def assert_raises(*args,**kwargs): unexpected exception. """ - __tracebackhide__ = True # Hide traceback for py.test + __tracebackhide__ = True # Hide traceback for py.test nose = import_nose() return nose.tools.assert_raises(*args,**kwargs) @@ -1077,7 +1077,7 @@ def assert_raises_regex(exception_class, expected_regexp, all versions down to 2.6. """ - __tracebackhide__ = True # Hide traceback for py.test + __tracebackhide__ = True # Hide traceback for py.test nose = import_nose() global assert_raises_regex_impl @@ -1299,7 +1299,7 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=False, >>> assert_allclose(x, y, rtol=1e-5, atol=0) """ - __tracebackhide__ = True # Hide traceback for py.test + __tracebackhide__ = True # Hide traceback for py.test import numpy as np def compare(x, y): return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol, @@ -1359,7 +1359,7 @@ def assert_array_almost_equal_nulp(x, y, nulp=1): AssertionError: X and Y are not equal to 1 ULP (max is 2) """ - __tracebackhide__ = True # Hide traceback for py.test + __tracebackhide__ = True # Hide traceback for py.test import numpy as np ax = np.abs(x) ay = np.abs(y) @@ -1408,7 +1408,7 @@ def assert_array_max_ulp(a, b, maxulp=1, dtype=None): >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a))) """ - __tracebackhide__ = True # Hide traceback for py.test + __tracebackhide__ = True # Hide traceback for py.test import numpy as np ret = nulp_diff(a, b, dtype) if not np.all(ret <= maxulp): @@ -1610,7 +1610,7 @@ def assert_warns(warning_class, func, *args, **kw): The value returned by `func`. """ - __tracebackhide__ = True # Hide traceback for py.test + __tracebackhide__ = True # Hide traceback for py.test with warnings.catch_warnings(record=True) as l: warnings.simplefilter('always') result = func(*args, **kw) @@ -1642,7 +1642,7 @@ def assert_no_warnings(func, *args, **kw): The value returned by `func`. """ - __tracebackhide__ = True # Hide traceback for py.test + __tracebackhide__ = True # Hide traceback for py.test with warnings.catch_warnings(record=True) as l: warnings.simplefilter('always') result = func(*args, **kw) From 288239d3485e753e0735fbed3a25dfaebe9b80a9 Mon Sep 17 00:00:00 2001 From: Blake Griffith Date: Tue, 10 Feb 2015 13:21:10 -0800 Subject: [PATCH 004/496] Check `out` kwarg for __nump_ufunc__ override and set index appropriately for the case where self is among outputs but not among inputs. Ensure it works both out passed on as an argument and with out in a keyword argument. --- numpy/core/src/private/ufunc_override.h | 37 +++++++++++-- numpy/core/tests/test_multiarray.py | 72 ++++++++++++++++++++++--- 2 files changed, 97 insertions(+), 12 deletions(-) diff --git a/numpy/core/src/private/ufunc_override.h b/numpy/core/src/private/ufunc_override.h index c3f9f601e0ff..4042eae2fde2 100644 --- a/numpy/core/src/private/ufunc_override.h +++ b/numpy/core/src/private/ufunc_override.h @@ -180,10 +180,13 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, int override_pos; /* Position of override in args.*/ int j; - int nargs = PyTuple_GET_SIZE(args); + int nargs; + int nout_kwd = 0; + int out_kwd_is_tuple = 0; int noa = 0; /* Number of overriding args.*/ PyObject *obj; + PyObject *out_kwd_obj = NULL; PyObject *other_obj; PyObject *method_name = NULL; @@ -204,16 +207,40 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, "with non-tuple"); goto fail; } - - if (PyTuple_GET_SIZE(args) > NPY_MAXARGS) { + nargs = PyTuple_GET_SIZE(args); + if (nargs > NPY_MAXARGS) { PyErr_SetString(PyExc_ValueError, "Internal Numpy error: too many arguments in call " "to PyUFunc_CheckOverride"); goto fail; } - for (i = 0; i < nargs; ++i) { - obj = PyTuple_GET_ITEM(args, i); + /* be sure to include possible 'out' keyword argument. */ + if ((kwds)&& (PyDict_CheckExact(kwds))) { + out_kwd_obj = PyDict_GetItemString(kwds, "out"); + if (out_kwd_obj != NULL) { + out_kwd_is_tuple = PyTuple_CheckExact(out_kwd_obj); + if (out_kwd_is_tuple) { + nout_kwd = PyTuple_GET_SIZE(out_kwd_obj); + } + else { + nout_kwd = 1; + } + } + } + + for (i = 0; i < nargs + nout_kwd; ++i) { + if (i < nargs) { + obj = PyTuple_GET_ITEM(args, i); + } + else { + if (out_kwd_is_tuple) { + obj = PyTuple_GET_ITEM(out_kwd_obj, i-nargs); + } + else { + obj = out_kwd_obj; + } + } /* * TODO: could use PyArray_GetAttrString_SuppressException if it * weren't private to multiarray.so diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index ac645f01322c..0f024cbf721d 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -2383,15 +2383,15 @@ def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw): return "ufunc" else: inputs = list(inputs) - inputs[i] = np.asarray(self) + if i < len(inputs): + inputs[i] = np.asarray(self) func = getattr(ufunc, method) + if ('out' in kw) and (kw['out'] is not None): + kw['out'] = np.asarray(kw['out']) r = func(*inputs, **kw) - if 'out' in kw: - return r - else: - x = self.__class__(r.shape, dtype=r.dtype) - x[...] = r - return x + x = self.__class__(r.shape, dtype=r.dtype) + x[...] = r + return x class SomeClass3(SomeClass2): def __rsub__(self, other): @@ -2475,6 +2475,64 @@ def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw): assert_('sig' not in kw and 'signature' in kw) assert_equal(kw['signature'], 'ii->i') + def test_numpy_ufunc_index(self): + # Check that index is set appropriately, also if only an output + # is passed on (latter is another regression tests for github bug 4753) + class CheckIndex(object): + def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw): + return i + + a = CheckIndex() + dummy = np.arange(2.) + # 1 input, 1 output + assert_equal(np.sin(a), 0) + assert_equal(np.sin(dummy, a), 1) + assert_equal(np.sin(dummy, out=a), 1) + assert_equal(np.sin(dummy, out=(a,)), 1) + assert_equal(np.sin(a, a), 0) + assert_equal(np.sin(a, out=a), 0) + assert_equal(np.sin(a, out=(a,)), 0) + # 1 input, 2 outputs + assert_equal(np.modf(dummy, a), 1) + assert_equal(np.modf(dummy, None, a), 2) + assert_equal(np.modf(dummy, dummy, a), 2) + assert_equal(np.modf(dummy, out=a), 1) + assert_equal(np.modf(dummy, out=(a,)), 1) + assert_equal(np.modf(dummy, out=(a, None)), 1) + assert_equal(np.modf(dummy, out=(a, dummy)), 1) + assert_equal(np.modf(dummy, out=(None, a)), 2) + assert_equal(np.modf(dummy, out=(dummy, a)), 2) + assert_equal(np.modf(a, out=(dummy, a)), 0) + # 2 inputs, 1 output + assert_equal(np.add(a, dummy), 0) + assert_equal(np.add(dummy, a), 1) + assert_equal(np.add(dummy, dummy, a), 2) + assert_equal(np.add(dummy, a, a), 1) + assert_equal(np.add(dummy, dummy, out=a), 2) + assert_equal(np.add(dummy, dummy, out=(a,)), 2) + assert_equal(np.add(a, dummy, out=a), 0) + + def test_out_override(self): + # regression test for github bug 4753 + class OutClass(ndarray): + def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw): + if 'out' in kw: + tmp_kw = kw.copy() + tmp_kw.pop('out') + func = getattr(ufunc, method) + kw['out'][...] = func(*inputs, **tmp_kw) + + A = np.array([0]).view(OutClass) + B = np.array([5]) + C = np.array([6]) + np.multiply(C, B, A) + assert_equal(A[0], 30) + assert_(isinstance(A, OutClass)) + A[0] = 0 + np.multiply(C, B, out=A) + assert_equal(A[0], 30) + assert_(isinstance(A, OutClass)) + class TestCAPI(TestCase): def test_IsPythonScalar(self): From 608f6e84e55db5fddb742a47ec0ef0126b41f80c Mon Sep 17 00:00:00 2001 From: "Gregory R. Lee" Date: Tue, 4 Aug 2015 14:29:25 -0400 Subject: [PATCH 005/496] DOC: document behaviour of sign for complex numbers --- numpy/core/code_generators/ufunc_docstrings.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py index bfa3ad221672..068644132847 100644 --- a/numpy/core/code_generators/ufunc_docstrings.py +++ b/numpy/core/code_generators/ufunc_docstrings.py @@ -2801,6 +2801,11 @@ def add_newdoc(place, name, doc): The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. + For complex inputs, the `sign` function returns: + ``-1+0j if x.real < 0, + 1+0j if x.real > 0, + sign(x.imag)+0j if x.real == 0.`` + Parameters ---------- x : array_like @@ -2811,12 +2816,20 @@ def add_newdoc(place, name, doc): y : ndarray The sign of `x`. + Notes + ----- + There is more than one definition of sign in common use for complex + numbers. The definition used here is equivalent to :math:`x/\\sqrt{x*x}` + which is different from a common alternative, :math:`x/|x|`. + Examples -------- >>> np.sign([-5., 4.5]) array([-1., 1.]) >>> np.sign(0) 0 + >>> np.sign(5-2j) + (1+0j) """) From 02ed190c97793abc103335cc67af1b689bda3ad3 Mon Sep 17 00:00:00 2001 From: jason king Date: Sat, 12 Sep 2015 12:48:34 +1000 Subject: [PATCH 006/496] DOC: first attempt at documenting load_library. Found some doco at http://docs.scipy.org/doc/numpy-dev/user/c-info.python-as-glue.html which seemed to describe the function, so used that. --- numpy/ctypeslib.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py index 961fa601261f..6c65860e8d7c 100644 --- a/numpy/ctypeslib.py +++ b/numpy/ctypeslib.py @@ -89,6 +89,33 @@ def _dummy(*args, **kwds): # Adapted from Albert Strasheim def load_library(libname, loader_path): + """ + It is possible to load a library using + >>> lib = ctypes.cdll[] + + But there are cross-platform considerations, such as library file extensions, + plus the fact Windows will just load the first library it finds with that name. + Numpy supplies the load_library function as a convenience. + + Parameters + ---------- + libname : string + Name of the library, which can have 'lib' as a prefix, + but without an extension. + loader_path : string + Where the library can be found. + + Returns + ------- + ctypes.cdll[libpath] : library object + A ctypes library object + + Raises + ------ + OSError + If there is no library with the expected extension, or the + library is defective and can't be loaded. + """ if ctypes.__version__ < '1.0.1': import warnings warnings.warn("All features of ctypes interface may not work " \ From 44c9b2aba51f73e50c17b7f990a054d0d4804269 Mon Sep 17 00:00:00 2001 From: Dmitry Zagorny Date: Mon, 14 Sep 2015 13:00:55 -0500 Subject: [PATCH 007/496] Added closing for 'so_dup' and 'se_dup' file descriptors. Issue: _exec_command function doesn't close 'so_dup' and 'se_dup' file descriptors.SciPy try to build scipy\special\amos\zunik.f and crash:error: Command "C:\Program Files (x86)\Intel\Composer XE 2015\bin\intel64\ifort.exe /nologo /MD /nbs /names:lowercase /assume:underscore /O1 -IC:\Python27\lib\site-packages\numpy\core\include -c /c scipy\special\amos\zunik.f /Fobuild\temp.win-amd64-2.7\scipy\special\amos\zunik.o" failed with exit status -1073741502 --- numpy/distutils/exec_command.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py index f751a8ca3e0b..9fa09cd51ce0 100644 --- a/numpy/distutils/exec_command.py +++ b/numpy/distutils/exec_command.py @@ -441,8 +441,10 @@ def _exec_command( command, use_shell=None, use_tee = None, **env ): se_flush() if _so_has_fileno: os.dup2(so_dup, so_fileno) + os.close(so_dup) if _se_has_fileno: os.dup2(se_dup, se_fileno) + os.close(se_dup) fout.close() fout = open_latin1(outfile, 'r') From 72753bbdf8736a13f1cb60c25cf8683608f46e29 Mon Sep 17 00:00:00 2001 From: Dmitry Zagorny Date: Mon, 14 Sep 2015 13:03:38 -0500 Subject: [PATCH 008/496] MSVCCompiler overwrite 'lib' and 'include' environment variables. This behavior affect at least python 3.5 and SciPy build and build failed. During initialization .distutils.MSVCCompiler replace Intel environment('include' and 'lib' paths). This fix decorate 'initialize' function in MSVCCompiler and extend 'lib' and 'include' environment variables. Changed compilation keys: generate optimized code specialized for Intel processors with SSE4.2 support. --- numpy/distutils/fcompiler/compaq.py | 2 +- numpy/distutils/fcompiler/intel.py | 29 ++++++++++++------------- numpy/distutils/intelccompiler.py | 33 ++++++++++++++--------------- numpy/distutils/msvc9compiler.py | 21 ++++++++++++++++++ numpy/distutils/msvccompiler.py | 17 +++++++++++++++ numpy/distutils/system_info.py | 4 ++-- 6 files changed, 71 insertions(+), 35 deletions(-) create mode 100644 numpy/distutils/msvc9compiler.py create mode 100644 numpy/distutils/msvccompiler.py diff --git a/numpy/distutils/fcompiler/compaq.py b/numpy/distutils/fcompiler/compaq.py index 5162b168c160..2dd6c01e63e1 100644 --- a/numpy/distutils/fcompiler/compaq.py +++ b/numpy/distutils/fcompiler/compaq.py @@ -74,7 +74,7 @@ class CompaqVisualFCompiler(FCompiler): fc_exe = 'DF' if sys.platform=='win32': - from distutils.msvccompiler import MSVCCompiler + from numpy.distutils.msvccompiler import MSVCCompiler try: m = MSVCCompiler() diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py index ef0bcc30b283..28624918d6c7 100644 --- a/numpy/distutils/fcompiler/intel.py +++ b/numpy/distutils/fcompiler/intel.py @@ -10,6 +10,7 @@ 'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler', 'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler'] + def intel_version_match(type): # Match against the important stuff in the version string return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,)) @@ -45,17 +46,16 @@ class IntelFCompiler(BaseIntelFCompiler): } pic_flags = ['-fPIC'] - module_dir_switch = '-module ' # Don't remove ending space! + module_dir_switch = '-module ' # Don't remove ending space! module_include_switch = '-I' def get_flags_free(self): - return ["-FR"] + return ['-FR'] def get_flags(self): return ['-fPIC'] def get_flags_opt(self): - #return ['-i8 -xhost -openmp -fp-model strict'] return ['-xhost -openmp -fp-model strict'] def get_flags_arch(self): @@ -120,11 +120,10 @@ def get_flags(self): return ['-fPIC'] def get_flags_opt(self): - #return ['-i8 -xhost -openmp -fp-model strict'] - return ['-xhost -openmp -fp-model strict'] + return ['-openmp -fp-model strict'] def get_flags_arch(self): - return [] + return ['-xSSE4.2'] # Is there no difference in the version string between the above compilers # and the Visual compilers? @@ -145,18 +144,18 @@ def update_executables(self): executables = { 'version_cmd' : None, - 'compiler_f77' : [None, "-FI", "-w90", "-w95"], - 'compiler_fix' : [None, "-FI", "-4L72", "-w"], + 'compiler_f77' : [None], + 'compiler_fix' : [None], 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], + 'linker_so' : [None], 'archiver' : [ar_exe, "/verbose", "/OUT:"], 'ranlib' : None } compile_switch = '/c ' - object_switch = '/Fo' #No space after /Fo! - library_switch = '/OUT:' #No space after /OUT:! - module_dir_switch = '/module:' #No space after /module: + object_switch = '/Fo' # No space after /Fo! + library_switch = '/OUT:' # No space after /OUT:! + module_dir_switch = '/module:' # No space after /module: module_include_switch = '/I' def get_flags(self): @@ -164,7 +163,7 @@ def get_flags(self): return opt def get_flags_free(self): - return ["-FR"] + return [] def get_flags_debug(self): return ['/4Yb', '/d2'] @@ -185,7 +184,7 @@ class IntelItaniumVisualFCompiler(IntelVisualFCompiler): version_match = intel_version_match('Itanium') - possible_executables = ['efl'] # XXX this is a wild guess + possible_executables = ['efl'] # XXX this is a wild guess ar_exe = IntelVisualFCompiler.ar_exe executables = { @@ -206,7 +205,7 @@ class IntelEM64VisualFCompiler(IntelVisualFCompiler): version_match = simple_version_match(start='Intel\(R\).*?64,') def get_flags_arch(self): - return ["/arch:SSE2"] + return ['/QxSSE4.2'] if __name__ == '__main__': diff --git a/numpy/distutils/intelccompiler.py b/numpy/distutils/intelccompiler.py index db6ef80bdbf0..2635424e34e8 100644 --- a/numpy/distutils/intelccompiler.py +++ b/numpy/distutils/intelccompiler.py @@ -1,8 +1,10 @@ from __future__ import division, absolute_import, print_function -import sys +import platform from distutils.unixccompiler import UnixCCompiler +if platform.system() == 'Windows': + from numpy.distutils.msvc9compiler import MSVCCompiler from numpy.distutils.exec_command import find_executable from numpy.distutils.ccompiler import simple_version_match @@ -15,14 +17,14 @@ class IntelCCompiler(UnixCCompiler): def __init__(self, verbose=0, dry_run=0, force=0): UnixCCompiler.__init__(self, verbose, dry_run, force) - self.cc_exe = 'icc -fPIC' + self.cc_exe = 'icc -fPIC -fp-model strict -O3 -fomit-frame-pointer -openmp' compiler = self.cc_exe self.set_executables(compiler=compiler, compiler_so=compiler, compiler_cxx=compiler, archiver='xiar' + ' cru', - linker_exe=compiler, - linker_so=compiler + ' -shared') + linker_exe=compiler + ' -shared-intel', + linker_so=compiler + ' -shared -shared-intel') class IntelItaniumCCompiler(IntelCCompiler): @@ -40,24 +42,22 @@ class IntelEM64TCCompiler(UnixCCompiler): A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python. """ compiler_type = 'intelem' - cc_exe = 'icc -m64 -fPIC' - cc_args = "-fPIC" + cc_exe = 'icc -m64' + cc_args = '-fPIC' def __init__(self, verbose=0, dry_run=0, force=0): UnixCCompiler.__init__(self, verbose, dry_run, force) - self.cc_exe = 'icc -m64 -fPIC' + self.cc_exe = 'icc -m64 -fPIC -fp-model strict -O3 -fomit-frame-pointer -openmp -xSSE4.2' compiler = self.cc_exe self.set_executables(compiler=compiler, compiler_so=compiler, compiler_cxx=compiler, archiver='xiar' + ' cru', - linker_exe=compiler, - linker_so=compiler + ' -shared') + linker_exe=compiler + ' -shared-intel', + linker_so=compiler + ' -shared -shared-intel') -if sys.platform == 'win32': - from distutils.msvc9compiler import MSVCCompiler - +if platform.system() == 'Windows': class IntelCCompilerW(MSVCCompiler): """ A modified Intel compiler compatible with an MSVC-built Python. @@ -72,11 +72,11 @@ def __init__(self, verbose=0, dry_run=0, force=0): def initialize(self, plat_name=None): MSVCCompiler.initialize(self, plat_name) - self.cc = self.find_exe("icl.exe") - self.lib = self.find_exe("xilib") - self.linker = self.find_exe("xilink") + self.cc = self.find_exe('icl.exe') + self.lib = self.find_exe('xilib') + self.linker = self.find_exe('xilink') self.compile_options = ['/nologo', '/O3', '/MD', '/W3', - '/Qstd=c99'] + '/Qstd=c99', '/QxSSE4.2'] self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/Qstd=c99', '/Z7', '/D_DEBUG'] @@ -91,4 +91,3 @@ def __init__(self, verbose=0, dry_run=0, force=0): MSVCCompiler.__init__(self, verbose, dry_run, force) version_match = simple_version_match(start='Intel\(R\).*?64,') self.__version = version_match - diff --git a/numpy/distutils/msvc9compiler.py b/numpy/distutils/msvc9compiler.py new file mode 100644 index 000000000000..c60826de97ac --- /dev/null +++ b/numpy/distutils/msvc9compiler.py @@ -0,0 +1,21 @@ +import os +from distutils.msvccompiler import * +from distutils.msvc9compiler import MSVCCompiler as distutils_MSVCCompiler + + +class MSVCCompiler(distutils_MSVCCompiler): + def __init__(self, verbose=0, dry_run=0, force=0): + distutils_MSVCCompiler.__init__(self, verbose, dry_run, force) + + def initialize(self, plat_name=None): + environ_lib = os.getenv('lib') + environ_include = os.getenv('include') + distutils_MSVCCompiler.initialize(self, plat_name) + if environ_lib is not None: + os.environ['lib'] = environ_lib + os.environ['lib'] + if environ_include is not None: + os.environ['include'] = environ_include + os.environ['include'] + + def manifest_setup_ldargs(self, output_filename, build_temp, ld_args): + ld_args.append('/MANIFEST') + distutils_MSVCCompiler.manifest_setup_ldargs(self, output_filename, build_temp, ld_args) diff --git a/numpy/distutils/msvccompiler.py b/numpy/distutils/msvccompiler.py new file mode 100644 index 000000000000..cb9b81a71056 --- /dev/null +++ b/numpy/distutils/msvccompiler.py @@ -0,0 +1,17 @@ +import os +from distutils.msvccompiler import * +from distutils.msvccompiler import MSVCCompiler as distutils_MSVCCompiler + + +class MSVCCompiler(distutils_MSVCCompiler): + def __init__(self, verbose=0, dry_run=0, force=0): + distutils_MSVCCompiler.__init__(self, verbose, dry_run, force) + + def initialize(self, plat_name=None): + environ_lib = os.getenv('lib') + environ_include = os.getenv('include') + distutils_MSVCCompiler.initialize(self, plat_name) + if environ_lib is not None: + os.environ['lib'] = environ_lib + os.environ['lib'] + if environ_include is not None: + os.environ['include'] = environ_include + os.environ['include'] diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 90c05329861f..b09b5d40b7ae 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -999,8 +999,8 @@ def __init__(self): plt = '64' #l = 'mkl_ipf' elif cpu.is_Xeon(): - plt = 'em64t' - #l = 'mkl_em64t' + plt = 'intel64' + #l = 'mkl_intel64' else: plt = '32' #l = 'mkl_ia32' From ec97125439b6e0b4fdafba27b95367d38b7fd487 Mon Sep 17 00:00:00 2001 From: Dmitry Zagorny Date: Mon, 14 Sep 2015 13:05:00 -0500 Subject: [PATCH 009/496] Fixed issue: SciPy can't be build in case, if python was installed into folder with whitespaces. --- numpy/distutils/npy_pkg_config.py | 43 +++++++++++-------------------- 1 file changed, 15 insertions(+), 28 deletions(-) diff --git a/numpy/distutils/npy_pkg_config.py b/numpy/distutils/npy_pkg_config.py index ceab906a4edf..6156439e1f26 100644 --- a/numpy/distutils/npy_pkg_config.py +++ b/numpy/distutils/npy_pkg_config.py @@ -3,7 +3,6 @@ import sys import re import os -import shlex if sys.version_info[0] < 3: from ConfigParser import SafeConfigParser, NoOptionError @@ -56,35 +55,23 @@ def parse_flags(line): * 'ignored' """ - lexer = shlex.shlex(line) - lexer.whitespace_split = True - d = {'include_dirs': [], 'library_dirs': [], 'libraries': [], - 'macros': [], 'ignored': []} - def next_token(t): - if t.startswith('-I'): - if len(t) > 2: - d['include_dirs'].append(t[2:]) - else: - t = lexer.get_token() - d['include_dirs'].append(t) - elif t.startswith('-L'): - if len(t) > 2: - d['library_dirs'].append(t[2:]) + 'macros': [], 'ignored': []} + + flags = (' ' + line).split(' -') + for flag in flags: + flag = '-' + flag + if len(flag) > 0: + if flag.startswith('-I'): + d['include_dirs'].append(flag[2:].strip()) + elif flag.startswith('-L'): + d['library_dirs'].append(flag[2:].strip()) + elif flag.startswith('-l'): + d['libraries'].append(flag[2:].strip()) + elif flag.startswith('-D'): + d['macros'].append(flag[2:].strip()) else: - t = lexer.get_token() - d['library_dirs'].append(t) - elif t.startswith('-l'): - d['libraries'].append(t[2:]) - elif t.startswith('-D'): - d['macros'].append(t[2:]) - else: - d['ignored'].append(t) - return lexer.get_token() - - t = lexer.get_token() - while t: - t = next_token(t) + d['ignored'].append(flag) return d From 4287a60ca1a7449063aca75688325bc7f6a5cba4 Mon Sep 17 00:00:00 2001 From: Dmitry Zagorny Date: Mon, 14 Sep 2015 13:06:19 -0500 Subject: [PATCH 010/496] Align with pep8 --- numpy/distutils/intelccompiler.py | 10 ++++++---- numpy/distutils/msvc9compiler.py | 3 ++- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/numpy/distutils/intelccompiler.py b/numpy/distutils/intelccompiler.py index 2635424e34e8..a1f34e304eab 100644 --- a/numpy/distutils/intelccompiler.py +++ b/numpy/distutils/intelccompiler.py @@ -3,10 +3,10 @@ import platform from distutils.unixccompiler import UnixCCompiler -if platform.system() == 'Windows': - from numpy.distutils.msvc9compiler import MSVCCompiler from numpy.distutils.exec_command import find_executable from numpy.distutils.ccompiler import simple_version_match +if platform.system() == 'Windows': + from numpy.distutils.msvc9compiler import MSVCCompiler class IntelCCompiler(UnixCCompiler): @@ -17,7 +17,8 @@ class IntelCCompiler(UnixCCompiler): def __init__(self, verbose=0, dry_run=0, force=0): UnixCCompiler.__init__(self, verbose, dry_run, force) - self.cc_exe = 'icc -fPIC -fp-model strict -O3 -fomit-frame-pointer -openmp' + self.cc_exe = ('icc -fPIC -fp-model strict -O3 ' + '-fomit-frame-pointer -openmp') compiler = self.cc_exe self.set_executables(compiler=compiler, compiler_so=compiler, @@ -47,7 +48,8 @@ class IntelEM64TCCompiler(UnixCCompiler): def __init__(self, verbose=0, dry_run=0, force=0): UnixCCompiler.__init__(self, verbose, dry_run, force) - self.cc_exe = 'icc -m64 -fPIC -fp-model strict -O3 -fomit-frame-pointer -openmp -xSSE4.2' + self.cc_exe = ('icc -m64 -fPIC -fp-model strict -O3 ' + '-fomit-frame-pointer -openmp -xSSE4.2') compiler = self.cc_exe self.set_executables(compiler=compiler, compiler_so=compiler, diff --git a/numpy/distutils/msvc9compiler.py b/numpy/distutils/msvc9compiler.py index c60826de97ac..5ccff57a8133 100644 --- a/numpy/distutils/msvc9compiler.py +++ b/numpy/distutils/msvc9compiler.py @@ -18,4 +18,5 @@ def initialize(self, plat_name=None): def manifest_setup_ldargs(self, output_filename, build_temp, ld_args): ld_args.append('/MANIFEST') - distutils_MSVCCompiler.manifest_setup_ldargs(self, output_filename, build_temp, ld_args) + distutils_MSVCCompiler.manifest_setup_ldargs(self, output_filename, + build_temp, ld_args) From 5ef26af9209f206c36329e0b2585b4ebdbb5f5fd Mon Sep 17 00:00:00 2001 From: Dmitry Zagorny Date: Mon, 14 Sep 2015 13:07:17 -0500 Subject: [PATCH 011/496] Changed from distutils_MSVCCompiler to distutils.msvccompiler.MSVCCompiler --- numpy/distutils/msvc9compiler.py | 15 ++++++++------- numpy/distutils/msvccompiler.py | 8 ++++---- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/numpy/distutils/msvc9compiler.py b/numpy/distutils/msvc9compiler.py index 5ccff57a8133..636165bd52a2 100644 --- a/numpy/distutils/msvc9compiler.py +++ b/numpy/distutils/msvc9compiler.py @@ -1,16 +1,16 @@ import os -from distutils.msvccompiler import * -from distutils.msvc9compiler import MSVCCompiler as distutils_MSVCCompiler +import distutils.msvc9compiler +from distutils.msvc9compiler import * -class MSVCCompiler(distutils_MSVCCompiler): +class MSVCCompiler(distutils.msvc9compiler.MSVCCompiler): def __init__(self, verbose=0, dry_run=0, force=0): - distutils_MSVCCompiler.__init__(self, verbose, dry_run, force) + distutils.msvc9compiler.MSVCCompiler.__init__(self, verbose, dry_run, force) def initialize(self, plat_name=None): environ_lib = os.getenv('lib') environ_include = os.getenv('include') - distutils_MSVCCompiler.initialize(self, plat_name) + distutils.msvc9compiler.MSVCCompiler.initialize(self, plat_name) if environ_lib is not None: os.environ['lib'] = environ_lib + os.environ['lib'] if environ_include is not None: @@ -18,5 +18,6 @@ def initialize(self, plat_name=None): def manifest_setup_ldargs(self, output_filename, build_temp, ld_args): ld_args.append('/MANIFEST') - distutils_MSVCCompiler.manifest_setup_ldargs(self, output_filename, - build_temp, ld_args) + distutils.msvc9compiler.MSVCCompiler.manifest_setup_ldargs(self, + output_filename, + build_temp, ld_args) diff --git a/numpy/distutils/msvccompiler.py b/numpy/distutils/msvccompiler.py index cb9b81a71056..0d28f6b9f674 100644 --- a/numpy/distutils/msvccompiler.py +++ b/numpy/distutils/msvccompiler.py @@ -1,16 +1,16 @@ import os +import distutils.msvccompiler from distutils.msvccompiler import * -from distutils.msvccompiler import MSVCCompiler as distutils_MSVCCompiler -class MSVCCompiler(distutils_MSVCCompiler): +class MSVCCompiler(distutils.msvccompiler.MSVCCompiler): def __init__(self, verbose=0, dry_run=0, force=0): - distutils_MSVCCompiler.__init__(self, verbose, dry_run, force) + distutils.msvccompiler.MSVCCompiler.__init__(self, verbose, dry_run, force) def initialize(self, plat_name=None): environ_lib = os.getenv('lib') environ_include = os.getenv('include') - distutils_MSVCCompiler.initialize(self, plat_name) + distutils.msvccompiler.MSVCCompiler.initialize(self, plat_name) if environ_lib is not None: os.environ['lib'] = environ_lib + os.environ['lib'] if environ_include is not None: From a3c1ed6dbba1e23bcbe30040bfa3d63e91b2cdab Mon Sep 17 00:00:00 2001 From: jason king Date: Wed, 16 Sep 2015 23:02:15 +1000 Subject: [PATCH 012/496] DOC: numpy.diff docstring changed as per recommendation on https://github.com/numpy/numpy/issues/5900 Slight change for cumsame doco as well, to match. --- numpy/core/fromnumeric.py | 2 +- numpy/lib/function_base.py | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 10626fe9fd77..d60f9adb2b28 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -2069,7 +2069,7 @@ def cumsum(a, axis=None, dtype=None, out=None): trapz : Integration of array values using the composite trapezoidal rule. - diff : Calculate the n-th order discrete difference along given axis. + diff : Calculate the n-th discrete difference along given axis. Notes ----- diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 007ff42a45eb..3c941ca5bfd9 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1316,10 +1316,10 @@ def gradient(f, *varargs, **kwargs): def diff(a, n=1, axis=-1): """ - Calculate the n-th order discrete difference along given axis. + Calculate the n-th discrete difference along given axis. - The first order difference is given by ``out[n] = a[n+1] - a[n]`` along - the given axis, higher order differences are calculated by using `diff` + The first difference is given by ``out[n] = a[n+1] - a[n]`` along + the given axis, higher differences are calculated by using `diff` recursively. Parameters @@ -1334,8 +1334,9 @@ def diff(a, n=1, axis=-1): Returns ------- diff : ndarray - The `n` order differences. The shape of the output is the same as `a` + The n-th differences. The shape of the output is the same as `a` except along `axis` where the dimension is smaller by `n`. +. See Also -------- From 62e87ab001710bcafa54786a2b3d413f77398066 Mon Sep 17 00:00:00 2001 From: Nick Papior Date: Thu, 17 Sep 2015 22:00:12 +0000 Subject: [PATCH 013/496] ENH: enabled extra_link_args in OpenBLAS segment The extra_link_args is sadly not intrinsically used for many parts of the system_info code. This commit adds the linking properties stored when using extra_link_args in the openblas section to bypass any difficulties in the usage of OpenBLAS. This is especially helpful when linking against external LAPACK libraries which requires -lgfortran and possibly -lm for correct linking. --- numpy/distutils/system_info.py | 15 +++++++++++++-- site.cfg.example | 2 +- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 90c05329861f..be94c87047e3 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -1703,6 +1703,10 @@ def calc_info(self): if info is None: return + # Add extra info for OpenBLAS + extra_info = self.calc_extra_info() + dict_append(info, **extra_info) + if not self.check_embedded_lapack(info): return @@ -1729,13 +1733,19 @@ def check_embedded_lapack(self, info): }""" src = os.path.join(tmpdir, 'source.c') out = os.path.join(tmpdir, 'a.out') + # Add the additional "extra" arguments + try: + extra_args = info['extra_link_args'] + except: + extra_args = [] try: with open(src, 'wt') as f: f.write(s) obj = c.compile([src], output_dir=tmpdir) try: c.link_executable(obj, out, libraries=info['libraries'], - library_dirs=info['library_dirs']) + library_dirs=info['library_dirs'], + extra_postargs=extra_args) res = True except distutils.ccompiler.LinkError: res = False @@ -1752,7 +1762,8 @@ def check_embedded_lapack(self, info): obj = c.compile([src], output_dir=tmpdir) try: c.link_executable(obj, out, libraries=info['libraries'], - library_dirs=info['library_dirs']) + library_dirs=info['library_dirs'], + extra_postargs=extra_args) res = True except distutils.ccompiler.LinkError: res = False diff --git a/site.cfg.example b/site.cfg.example index 1324a74d3715..64eedb7f9cd2 100644 --- a/site.cfg.example +++ b/site.cfg.example @@ -68,7 +68,7 @@ # extra_compile_args = -g -ftree-vectorize # # extra_link_args -# Add additional arguments to when libraries/executables +# Add additional arguments when libraries/executables # are linked. # Simple variable with no parsing done. # Provide a single line with all complete flags. From e9f13a8ad5cd9722272ea99736cd78e76cdd9ab0 Mon Sep 17 00:00:00 2001 From: Varun Nayyar Date: Tue, 22 Sep 2015 19:38:34 +1000 Subject: [PATCH 014/496] ENH: Adding support to the range keyword for estimation of the optimal number of bins and associated tests --- numpy/lib/function_base.py | 26 +++++++++++++++++++++-- numpy/lib/tests/test_function_base.py | 30 +++++++++++++++++++++++++++ 2 files changed, 54 insertions(+), 2 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 007ff42a45eb..30aefa0a8a73 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -76,14 +76,24 @@ def iterable(y): return 1 -def _hist_optim_numbins_estimator(a, estimator): +def _hist_optim_numbins_estimator(a, estimator, data_range=None, data_weights=None): """ A helper function to be called from histogram to deal with estimating optimal number of bins + a: np.array + The data with which to estimate the number of bins + estimator: str If estimator is one of ['auto', 'fd', 'scott', 'rice', 'sturges'] this function will choose the appropriate estimator and return it's estimate for the optimal number of bins. + + data_range: tuple (min, max) + What range should the data to be binned be restricted to + + data_weights: + weights are not supported, must be left blank or None + """ assert isinstance(estimator, basestring) # private function should not be called otherwise @@ -91,6 +101,17 @@ def _hist_optim_numbins_estimator(a, estimator): if a.size == 0: return 1 + if data_weights is not None: + raise TypeError("Automated estimation of the number of " + "bins is not supported for weighted data") + + if data_range is not None: + mn, mx = data_range + keep = (a >= mn) + keep &= (a <= mx) + if not np.logical_and.reduce(keep): + a = a[keep] + def sturges(x): """ Sturges Estimator @@ -181,6 +202,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, If `bins` is a string from the list below, `histogram` will use the method chosen to calculate the optimal number of bins (see Notes for more detail on the estimators). For visualisation, we suggest using the 'auto' option. + Weighted data is unsupported in this mode. 'auto' Maximum of the 'sturges' and 'fd' estimators. Provides good all round performance @@ -340,7 +362,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, 'max must be larger than min in range parameter.') if isinstance(bins, basestring): - bins = _hist_optim_numbins_estimator(a, bins) + bins = _hist_optim_numbins_estimator(a, bins, range, weights) # if `bins` is a string for an automatic method, # this will replace it with the number of bins calculated diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 5e758fb89595..326052f59f00 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1357,6 +1357,36 @@ def test_outlier(self): a, b = np.histogram(outlier_dataset, estimator) assert_equal(len(a), numbins) + def test_simple_range(self): + """ + Straightforward testing with a mixture of linspace data (for consistency). + Adding in a 3rd mixture that will then be completely ignored. + All test values have been precomputed and the values shouldn't change + """ + # some basic sanity checking, with some fixed data. Checking for the correct number of bins + basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7, 'auto': 7}, + 500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10, 'auto': 10}, + 5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14, 'auto': 17}} + + for testlen, expectedResults in basic_test.items(): + # create some sort of non uniform data to test with (2 peak uniform mixture) + x1 = np.linspace(-10, -1, testlen/5 * 2) + x2 = np.linspace(1, 10, testlen/5 * 3) + x3 = np.linspace(-100, -50, testlen) + x = np.hstack((x1, x2, x3)) + for estimator, numbins in expectedResults.items(): + a, b = np.histogram(x, estimator, range = (-20, 20)) + assert_equal(len(a), numbins, + err_msg="For the {0} estimator with datasize of {1} ".format(estimator, testlen)) + + def test_simple_weighted(self): + """ + Check that weighted data raises a TypeError + """ + estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto'] + for estimator in estimator_list: + assert_raises(TypeError, histogram, [1, 2, 3], estimator, weights=[1, 2, 3]) + class TestHistogramdd(TestCase): From fe857457d4f3fa268495cb962521769bd4ce1da0 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 23 Sep 2015 14:03:37 -0600 Subject: [PATCH 015/496] BUG: PEP440 compliant versioning for development releases. The pavement script was not using the new '.dev0+' GIT_REVISION[:7] suffix for development releases. --- pavement.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pavement.py b/pavement.py index ac857a492372..acceed0ade4a 100644 --- a/pavement.py +++ b/pavement.py @@ -89,7 +89,7 @@ GIT_REVISION = "Unknown" if not setup_py.ISRELEASED: - FULLVERSION += '.dev-' + GIT_REVISION[:7] + FULLVERSION += '.dev0+' + GIT_REVISION[:7] finally: sys.path.pop(0) From 32a6b4547f8f94e481f7d1a6e2656c9fa9c6c62e Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 23 Sep 2015 14:52:40 -0600 Subject: [PATCH 016/496] BUG: Add cblasfuncs.c and python_xerbla.c to MANIFEST.in Those files in numpy/core/src/multiarray were only included in source distributions when HAVE_CBLAS was defined, making the distribution contents depend on the local configuration. Closes #6343. --- MANIFEST.in | 3 +++ numpy/core/setup.py | 3 +++ 2 files changed, 6 insertions(+) diff --git a/MANIFEST.in b/MANIFEST.in index 976e283ed301..6f4826478d88 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -12,6 +12,9 @@ include numpy/random/mtrand/generate_mtrand_c.py recursive-include numpy/random/mtrand *.pyx *.pxd # Add build support that should go in sdist, but not go in bdist/be installed recursive-include numpy/_build_utils * +# Add sdist files whose use depends on local configuration. +include numpy/core/src/multiarray/cblasfuncs.c +include numpy/core/src/multiarray/python_xerbla.c # Adding scons build related files not found by distutils recursive-include numpy/core/code_generators *.py *.txt recursive-include numpy/core *.in *.h diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 6d9926d89e97..361bf90821a6 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -736,6 +736,7 @@ def generate_multiarray_templated_sources(ext, build_dir): join('src', 'multiarray', 'array_assign.h'), join('src', 'multiarray', 'buffer.h'), join('src', 'multiarray', 'calculation.h'), + join('src', 'multiarray', 'cblasfuncs.h'), join('src', 'multiarray', 'common.h'), join('src', 'multiarray', 'convert_datatype.h'), join('src', 'multiarray', 'convert.h'), @@ -839,6 +840,8 @@ def generate_multiarray_templated_sources(ext, build_dir): blas_info = get_info('blas_opt', 0) if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []): extra_info = blas_info + # These files are also in MANIFEST.in so that they are always in + # the source distribution independently of HAVE_CBLAS. multiarray_src.extend([join('src', 'multiarray', 'cblasfuncs.c'), join('src', 'multiarray', 'python_xerbla.c'), ]) From 935c634e5665647187af8545cb94394f9ac9da82 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 23 Sep 2015 19:38:34 -0600 Subject: [PATCH 017/496] TST: Update test__version.py. Added tests for the x.x.dev0+1234567 form together with alpha and beta versions of same. Updates lifted from scipy. --- numpy/lib/tests/test__version.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/numpy/lib/tests/test__version.py b/numpy/lib/tests/test__version.py index bbafe68eb355..993c9d507091 100644 --- a/numpy/lib/tests/test__version.py +++ b/numpy/lib/tests/test__version.py @@ -48,6 +48,19 @@ def test_dev_a_b_rc_mixed(): assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2') +def test_dev0_version(): + assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0') + for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']: + assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver) + + assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111') + + +def test_dev0_a_b_rc_mixed(): + assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111') + assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2') + + def test_raises(): for ver in ['1.9', '1,9.0', '1.7.x']: assert_raises(ValueError, NumpyVersion, ver) From 763f49df46678aadd1cfebe5d8497ad0b089fe12 Mon Sep 17 00:00:00 2001 From: "Nathaniel J. Smith" Date: Wed, 23 Sep 2015 20:40:03 -0700 Subject: [PATCH 018/496] DEV: Draft governance document + list of people and positions This is definitely *not* the final version -- it's the version originally posted to the mailing list, reformatted as ReST. I'll make further changes on top of this as further commits, in order to preserve the historical record. --- doc/source/dev/governance/governance.rst | 405 +++++++++++++++++++++++ doc/source/dev/governance/index.rst | 9 + doc/source/dev/governance/people.rst | 49 +++ doc/source/dev/index.rst | 1 + 4 files changed, 464 insertions(+) create mode 100644 doc/source/dev/governance/governance.rst create mode 100644 doc/source/dev/governance/index.rst create mode 100644 doc/source/dev/governance/people.rst diff --git a/doc/source/dev/governance/governance.rst b/doc/source/dev/governance/governance.rst new file mode 100644 index 000000000000..8f06129b5ae7 --- /dev/null +++ b/doc/source/dev/governance/governance.rst @@ -0,0 +1,405 @@ +================================================================ + NumPy project governance and decision-making +================================================================ + +[DRAFT, not yet accepted] + +The purpose of this document is to formalize the governance process +used by the NumPy project in both ordinary and extraordinary +situations, and to clarify how decisions are made and how the various +elements of our community interact, including the relationship between +open source collaborative development and work that may be funded by +for-profit or non-profit entities. + +Summary +======= + +NumPy is a community-owned and community-run project. To the maximum +extent possible, decisions about project direction are made by community +consensus (but note that "consensus" here has a somewhat technical +meaning that might not match everyone's expectations -- see below). Some +members of the community additionally contribute by serving on the NumPy +steering council, where they are responsible for facilitating the +establishment of community consensus, for stewarding project resources, +and -- in extreme cases -- for making project decisions if the normal +community-based process breaks down. + +The Project +=========== + +The NumPy Project (The Project) is an open source software project +affiliated with the 501(c)3 NumFocus Foundation. The goal of The Project +is to develop open source software for array-based computing in Python, +and in particular the ``numpy`` package, along with related software +such as ``f2py`` and the NumPy Sphinx extensions. The Software developed +by The Project is released under the BSD (or similar) open source +license, developed openly and hosted on public GitHub repositories under +the ``numpy`` GitHub organization. + +The Project is developed by a team of distributed developers, called +Contributors. Contributors are individuals who have contributed code, +documentation, designs or other work to the Project. Anyone can be a +Contributor. Contributors can be affiliated with any legal entity or +none. Contributors participate in the project by submitting, reviewing +and discussing GitHub Pull Requests and Issues and participating in open +and public Project discussions on GitHub, mailing lists, and other +channels. The foundation of Project participation is openness and +transparency. + +Here is a list of the current Contributors to the main NumPy repository: + +https://github.com/numpy/numpy/graphs/contributors + +The Project Community consists of all Contributors and Users of the +Project. Contributors work on behalf of and are responsible to the +larger Project Community and we strive to keep the barrier between +Contributors and Users as low as possible. + +The Project is formally affiliated with the 501(c)3 NumFOCUS Foundation +(http://numfocus.org), which serves as its fiscal sponsor, may hold +project trademarks and other intellectual property, helps manage project +donations and acts as a parent legal entity. NumFOCUS is the only legal +entity that has a formal relationship with the project (see +Institutional Partners section below). + +Governance +========== + +This section describes the governance and leadership model of The +Project. + +The foundations of Project governance are: + +- Openness & Transparency +- Active Contribution +- Institutional Neutrality + +Consensus-based decision making by the community +------------------------------------------------ + +Normally, all project decisions will be made by consensus of all +interested Contributors. The primary goal of this approach is to ensure +that the people who are most affected by and involved in any given +change can contribute their knowledge in the confidence that their +voices will be heard, because thoughtful review from a broad community +is the best mechanism we know of for creating high-quality software. + +The mechanism we use to accomplish this goal may be unfamiliar for those +who are not experienced with the cultural norms around free/open-source +software development. We provide a summary here, and highly recommend +that all Contributors additionally read `Chapter 4: Social and Political +Infrastructure `__ +of Karl Fogel's classic *Producing Open Source Software*, and in +particular the section on `Consensus-based +Democracy `__, +for a more detailed discussion. + +In this context, consensus does *not* require: + +- that we wait to solicit everybody's opinion on every change, +- that we ever hold a vote on anything, +- or that everybody is happy or agrees with every decision. + +For us, what consensus means is that we entrust *everyone* with the +right to veto any change if they feel it necessary. While this may sound +like a recipe for obstruction and pain, this is not what happens. +Instead, we find that most people take this responsibility seriously, +and only invoke their veto when they judge that a serious problem is +being ignored, and that their veto is necessary to protect the project. +And in practice, it turns out that such vetoes are almost never formally +invoked, because their mere possibility ensures that Contributors are +motivated from the start to find some solution that everyone can live +with -- thus accomplishing our goal of ensuring that all interested +perspectives are taken into account. + +How do we know when consensus has been achieved? In principle, this is +rather difficult, since consensus is defined by the absence of vetos, +which requires us to somehow prove a negative. In practice, we use a +combination of our best judgement (e.g., a simple and uncontroversial +bug fix posted on GitHub and reviewed by a core developer is probably +fine) and best efforts (e.g., all substantive API changes must be posted +to the mailing list in order to give the broader community a chance to +catch any problems and suggest improvements; we assume that anyone who +cares enough about NumPy to invoke their veto right should be on the +mailing list). If no-one bothers to comment on the mailing list after a +few days, then it's probably fine. And worst case, if a change is more +controversial than expected, or a crucial critique is delayed because +someone was on vacation, then it's no big deal: we apologize for +misjudging the situation, `back up, and sort things +out `__. + +If one does need to invoke a formal veto, then it should consist of: + +- an unambiguous statement that a veto is being invoked, +- an explanation of why it is being invoked, and +- a description of what conditions (if any) would convince the vetoer + to withdraw their veto. + +If all proposals for resolving some issue are vetoed, then the status +quo wins by default. + +In the worst case, if a Contributor is genuinely misusing their veto in +an obstructive fashion to the detriment of the project, then they can be +ejected from the project by consensus of the Steering Council -- see +below. + +Steering Council +---------------- + +The Project will have a Steering Council that consists of Project +Contributors who have produced contributions that are substantial in +quality and quantity, and sustained over at least one year. The overall +role of the Council is to ensure, with input from the Community, the +long-term well-being of the project, both technically and as a +community. + +During the everyday project activities, council members participate in +all discussions, code review and other project activities as peers with +all other Contributors and the Community. In these everyday activities, +Council Members do not have any special power or privilege through their +membership on the Council. However, it is expected that because of the +quality and quantity of their contributions and their expert knowledge +of the Project Software and Services that Council Members will provide +useful guidance, both technical and in terms of project direction, to +potentially less experienced contributors. + +The Steering Council and its Members play a special role in certain +situations. In particular, the Council may, if necessary: + +- Make decisions about the overall scope, vision and direction of the + project. +- Make decisions about strategic collaborations with other + organizations or individuals. +- Make decisions about specific technical issues, features, bugs and + pull requests. They are the primary mechanism of guiding the code + review process and merging pull requests. +- Make decisions about the Services that are run by The Project and + manage those Services for the benefit of the Project and Community. +- Update policy documents such as this one. +- Make decisions when regular community discussion doesn’t produce + consensus on an issue in a reasonable time frame. + +However, the Council's primary responsibility is to facilitate the +ordinary community-based decision making procedure described above. If +we ever have to step in and formally override the community for the +health of the Project, then we will do so, but we will consider reaching +this point to indicate a failure in our leadership. + +Council decision making +~~~~~~~~~~~~~~~~~~~~~~~ + +If it becomes necessary for the Steering Council to produce a formal +decision, then they will use a form of the `Apache Foundation voting +process `__. This is a +formalized version of consensus, in which +1 votes indicate agreement, +-1 votes are vetoes (and must be accompanied with a rationale, as +above), and one can also vote fractionally (e.g. -0.5, +0.5) if one +wishes to express an opinion without registering a full veto. These +numeric votes are also often used informally as a way of getting a +general sense of people's feelings on some issue, and should not +normally be taken as formal votes. A formal vote only occurs if +explicitly declared, and if this does occur then the vote should be held +open for long enough to give all interested Council Members a chance to +respond -- at least one week. + +In practice, we anticipate that for most Steering Council decisions +(e.g., voting in new members) a more informal process will suffice. + +Council membership +~~~~~~~~~~~~~~~~~~ + +To become eligible to join the Steering Council, an individual must be a +Project Contributor who has produced contributions that are substantial +in quality and quantity, and sustained over at least one year. Potential +Council Members are nominated by existing Council members and voted upon +by the existing Council after asking if the potential Member is +interested and willing to serve in that capacity. The Council will be +initially formed from the set of existing Core Developers who, as of +late 2015, have been significantly active over the last year. + +When considering potential Members, the Council will look at candidates +with a comprehensive view of their contributions. This will include but +is not limited to code, code review, infrastructure work, mailing list +and chat participation, community help/building, education and outreach, +design work, etc. We are deliberately not setting arbitrary quantitative +metrics (like “100 commits in this repo”) to avoid encouraging behavior +that plays to the metrics rather than the project’s overall well-being. +We want to encourage a diverse array of backgrounds, viewpoints and +talents in our team, which is why we explicitly do not define code as +the sole metric on which council membership will be evaluated. + +If a Council member becomes inactive in the project for a period of one +year, they will be considered for removal from the Council. Before +removal, inactive Member will be approached to see if they plan on +returning to active participation. If not they will be removed +immediately upon a Council vote. If they plan on returning to active +participation soon, they will be given a grace period of one year. If +they don’t return to active participation within that time period they +will be removed by vote of the Council without further grace period. All +former Council members can be considered for membership again at any +time in the future, like any other Project Contributor. Retired Council +members will be listed on the project website, acknowledging the period +during which they were active in the Council. + +The Council reserves the right to eject current Members, if they are +deemed to be actively harmful to the project’s well-being, and attempts +at communication and conflict resolution have failed. This requires the +consensus of the remaining Members. + +[We also have to decide on the initial membership for the Council. While +the above text makes pains to distinguish between "committer" and +"Council Member", in the past we've pretty much treated them as the +same. So to keep things simple and deterministic, I propose that we seed +the Council with everyone who has reviewed/merged a pull request since +Jan 1, 2014, and move those who haven't used their commit bit in >1.5 +years to the emeritus list. Based on the output of + +git log --grep="^Merge pull request" --since 2014-01-01 \| grep Author: +\| sort -u + +I believe this would give us an initial Steering Council of: Sebastian +Berg, Jaime Fernández del Río, Ralf Gommers, Alex Griffing, Charles +Harris, Nathaniel Smith, Julian Taylor, and Pauli Virtanen (assuming +everyone on that list is interested/willing to serve).] + +Conflict of interest +~~~~~~~~~~~~~~~~~~~~ + +It is expected that the Council Members will be employed at a wide range +of companies, universities and non-profit organizations. Because of +this, it is possible that Members will have conflict of interests. Such +conflict of interests include, but are not limited to: + +- Financial interests, such as investments, employment or contracting + work, outside of The Project that may influence their work on The + Project. +- Access to proprietary information of their employer that could + potentially leak into their work with the Project. + +All members of the Council shall disclose to the rest of the Council any +conflict of interest they may have. Members with a conflict of interest +in a particular issue may participate in Council discussions on that +issue, but must recuse themselves from voting on the issue. + +Private communications of the Council +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Unless specifically required, all Council discussions and activities +will be public and done in collaboration and discussion with the Project +Contributors and Community. The Council will have a private mailing list +that will be used sparingly and only when a specific matter requires +privacy. When private communications and decisions are needed, the +Council will do its best to summarize those to the Community after +eliding personal/private/sensitive information that should not be posted +to the public internet. + +Subcommittees +~~~~~~~~~~~~~ + +The Council can create subcommittees that provide leadership and +guidance for specific aspects of the project. Like the Council as a +whole, subcommittees should conduct their business in an open and public +manner unless privacy is specifically called for. Private subcommittee +communications should happen on the main private mailing list of the +Council unless specifically called for. + +NumFOCUS Subcommittee +~~~~~~~~~~~~~~~~~~~~~ + +The Council will maintain one narrowly focused subcommittee to manage +its interactions with NumFOCUS. + +- The NumFOCUS Subcommittee is comprised of 5 persons who manage + project funding that comes through NumFOCUS. It is expected that + these funds will be spent in a manner that is consistent with the + non-profit mission of NumFOCUS and the direction of the Project as + determined by the full Council. +- This Subcommittee shall NOT make decisions about the direction, scope + or technical direction of the Project. +- This Subcommittee will have 5 members, 4 of whom will be current + Council Members and 1 of whom will be external to the Steering + Council. No more than 2 Subcommitee Members can report to one person + through employment or contracting work (including the reportee, i.e. + the reportee + 1 is the max). This avoids effective majorities + resting on one person. + +[Initially, the NumFOCUS subcommittee will consist of: Chuck Harris, +Ralf Gommers, Nathaniel Smith, and ???? as internal members, and Thomas +Caswell as external member.] + +Institutional Partners and Funding +================================== + +The Steering Council are the primary leadership for the project. No +outside institution, individual or legal entity has the ability to own, +control, usurp or influence the project other than by participating in +the Project as Contributors and Council Members. However, because +institutions can be an important funding mechanism for the project, it +is important to formally acknowledge institutional participation in the +project. These are Institutional Partners. + +An Institutional Contributor is any individual Project Contributor who +contributes to the project as part of their official duties at an +Institutional Partner. Likewise, an Institutional Council Member is any +Project Steering Council Member who contributes to the project as part +of their official duties at an Institutional Partner. + +With these definitions, an Institutional Partner is any recognized legal +entity in the United States or elsewhere that employs at least 1 +Institutional Contributor of Institutional Council Member. Institutional +Partners can be for-profit or non-profit entities. + +Institutions become eligible to become an Institutional Partner by +employing individuals who actively contribute to The Project as part of +their official duties. To state this another way, the only way for a +Partner to influence the project is by actively contributing to the open +development of the project, in equal terms to any other member of the +community of Contributors and Council Members. Merely using Project +Software in institutional context does not allow an entity to become an +Institutional Partner. Financial gifts do not enable an entity to become +an Institutional Partner. Once an institution becomes eligible for +Institutional Partnership, the Steering Council must nominate and +approve the Partnership. + +If an existing Institutional Partner no longer has a contributing +employee, they will be given a 1 year grace period for remaining +employees to begin contributing. + +An Institutional Partner is free to pursue funding for their work on The +Project through any legal means. This could involve a non-profit +organization raising money from private foundations and donors or a +for-profit company building proprietary products and services that +leverage Project Software and Services. Funding acquired by +Institutional Partners to work on The Project is called Institutional +Funding. However, no funding obtained by an Institutional Partner can +override the Steering Council. If a Partner has funding to do NumPy work +and the Council decides to not pursue that work as a project, the +Partner is free to pursue it on their own. However in this situation, +that part of the Partner’s work will not be under the NumPy umbrella and +cannot use the Project trademarks in a way that suggests a formal +relationship. + +Institutional Partner benefits are: + +- Acknowledgement on the NumPy websites, in talks and T-shirts. +- Ability to acknowledge their own funding sources on the NumPy + websites, in talks and T-shirts. +- Ability to influence the project through the participation of their + Council Member. +- Council Members invited to NumPy Developer Meetings. + +Existing Institutional Partners: + +- UC Berkeley (Nathaniel Smith) + +Document history +================ + +[TODO: add a link to the git log for this file] + +Acknowledgements +================ + +Substantial portions of this document were [STRIKEOUT:inspired] stolen +wholesale from the Jupyter/IPython project's governance document, `IPEP +29 `__. diff --git a/doc/source/dev/governance/index.rst b/doc/source/dev/governance/index.rst new file mode 100644 index 000000000000..9a611a2febac --- /dev/null +++ b/doc/source/dev/governance/index.rst @@ -0,0 +1,9 @@ +##################### +Contributing to Numpy +##################### + +.. toctree:: + :maxdepth: 3 + + governance + people diff --git a/doc/source/dev/governance/people.rst b/doc/source/dev/governance/people.rst new file mode 100644 index 000000000000..6c6ccf305459 --- /dev/null +++ b/doc/source/dev/governance/people.rst @@ -0,0 +1,49 @@ +Current steering council and institutional partners +=================================================== + +[DRAFT, not yet accepted] + +Steering council +---------------- + +* Sebastian Berg + +* Jaime Fernández del Río + +* Ralf Gommers + +* Alex Griffing + +* Charles Harris + +* Nathaniel Smith + +* Julian Taylor + +* Pauli Virtanen + + +Emeritus members +---------------- + +* Travis Oliphant - Project Founder / Emeritus Leader (served: 2001(??)-2012) + + +NumFOCUS Subcommittee +--------------------- + +* Chuck Harris + +* Ralf Gommers + +* Jaime Fernández del Río + +* Nathaniel Smith + +* External member: Thomas Caswell + + +Institutional Partners +---------------------- + +* UC Berkeley (Nathaniel Smith) diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst index b0d0ec483046..cb71a3e5cb6a 100644 --- a/doc/source/dev/index.rst +++ b/doc/source/dev/index.rst @@ -7,5 +7,6 @@ Contributing to Numpy gitwash/index development_environment + governance/index For core developers: see :ref:`development-workflow`. From 02df76120373575c4f33af3dff856d9f5a2c3e24 Mon Sep 17 00:00:00 2001 From: "Nathaniel J. Smith" Date: Wed, 23 Sep 2015 21:39:41 -0700 Subject: [PATCH 019/496] DEV: Governance: delete redundant bits The NumFOCUS subcommittee and Institutional Partners are now listed in a separate file, so delete them from here. We will eventually want to clean up the list of initial members of the steering council, but the details are still under discussion on the mailing list, so I left that section alone for now. --- doc/source/dev/governance/governance.rst | 8 -------- 1 file changed, 8 deletions(-) diff --git a/doc/source/dev/governance/governance.rst b/doc/source/dev/governance/governance.rst index 8f06129b5ae7..6ca03d37bf7a 100644 --- a/doc/source/dev/governance/governance.rst +++ b/doc/source/dev/governance/governance.rst @@ -323,10 +323,6 @@ its interactions with NumFOCUS. the reportee + 1 is the max). This avoids effective majorities resting on one person. -[Initially, the NumFOCUS subcommittee will consist of: Chuck Harris, -Ralf Gommers, Nathaniel Smith, and ???? as internal members, and Thomas -Caswell as external member.] - Institutional Partners and Funding ================================== @@ -388,10 +384,6 @@ Institutional Partner benefits are: Council Member. - Council Members invited to NumPy Developer Meetings. -Existing Institutional Partners: - -- UC Berkeley (Nathaniel Smith) - Document history ================ From 81d23fd51ff412f91d5a87e9b5f80e117d837672 Mon Sep 17 00:00:00 2001 From: "Nathaniel J. Smith" Date: Wed, 23 Sep 2015 21:42:18 -0700 Subject: [PATCH 020/496] DEV: Governance: remove link to inaccurate github contributors page See Travis's email here: https://mail.scipy.org/pipermail/numpy-discussion/2015-September/073712.html --- doc/source/dev/governance/governance.rst | 4 ---- 1 file changed, 4 deletions(-) diff --git a/doc/source/dev/governance/governance.rst b/doc/source/dev/governance/governance.rst index 6ca03d37bf7a..a0d97e274669 100644 --- a/doc/source/dev/governance/governance.rst +++ b/doc/source/dev/governance/governance.rst @@ -46,10 +46,6 @@ and public Project discussions on GitHub, mailing lists, and other channels. The foundation of Project participation is openness and transparency. -Here is a list of the current Contributors to the main NumPy repository: - -https://github.com/numpy/numpy/graphs/contributors - The Project Community consists of all Contributors and Users of the Project. Contributors work on behalf of and are responsible to the larger Project Community and we strive to keep the barrier between From 1dfb8d782426eaccdc149b2ffc1b9b83b4714637 Mon Sep 17 00:00:00 2001 From: "Nathaniel J. Smith" Date: Wed, 23 Sep 2015 21:56:18 -0700 Subject: [PATCH 021/496] DEV: Governance: update link to Jupyter/IPython governance doc --- doc/source/dev/governance/governance.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/dev/governance/governance.rst b/doc/source/dev/governance/governance.rst index a0d97e274669..6134e1bfd9c2 100644 --- a/doc/source/dev/governance/governance.rst +++ b/doc/source/dev/governance/governance.rst @@ -389,5 +389,5 @@ Acknowledgements ================ Substantial portions of this document were [STRIKEOUT:inspired] stolen -wholesale from the Jupyter/IPython project's governance document, `IPEP -29 `__. +wholesale from the `Jupyter/IPython project's governance document +`_. From 1bb5b29de7d61f160b14d6d5e32aa622793a92a4 Mon Sep 17 00:00:00 2001 From: "Nathaniel J. Smith" Date: Wed, 23 Sep 2015 21:56:51 -0700 Subject: [PATCH 022/496] DEV: Governance: fix pandoc weirdness I used pandoc to convert Markdown to ReST, and for some reason it marked all the links with two underscores instead of the more conventional single underscore. I'm not sure why. --- doc/source/dev/governance/governance.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/source/dev/governance/governance.rst b/doc/source/dev/governance/governance.rst index 6134e1bfd9c2..e558eb29c6df 100644 --- a/doc/source/dev/governance/governance.rst +++ b/doc/source/dev/governance/governance.rst @@ -84,10 +84,10 @@ The mechanism we use to accomplish this goal may be unfamiliar for those who are not experienced with the cultural norms around free/open-source software development. We provide a summary here, and highly recommend that all Contributors additionally read `Chapter 4: Social and Political -Infrastructure `__ +Infrastructure `_ of Karl Fogel's classic *Producing Open Source Software*, and in particular the section on `Consensus-based -Democracy `__, +Democracy `_, for a more detailed discussion. In this context, consensus does *not* require: @@ -122,7 +122,7 @@ few days, then it's probably fine. And worst case, if a change is more controversial than expected, or a crucial critique is delayed because someone was on vacation, then it's no big deal: we apologize for misjudging the situation, `back up, and sort things -out `__. +out `_. If one does need to invoke a formal veto, then it should consist of: @@ -186,7 +186,7 @@ Council decision making If it becomes necessary for the Steering Council to produce a formal decision, then they will use a form of the `Apache Foundation voting -process `__. This is a +process `_. This is a formalized version of consensus, in which +1 votes indicate agreement, -1 votes are vetoes (and must be accompanied with a rationale, as above), and one can also vote fractionally (e.g. -0.5, +0.5) if one From b354c005ccfe961e793ec4567e9cf6bbdca1b5f2 Mon Sep 17 00:00:00 2001 From: carlkl Date: Thu, 24 Sep 2015 23:46:23 +0200 Subject: [PATCH 023/496] BLD: mingwpy fixes --- numpy/core/setup.py | 5 ++++- numpy/core/src/private/npy_config.h | 5 +++++ numpy/distutils/fcompiler/gnu.py | 13 +++++++------ numpy/distutils/mingw32ccompiler.py | 19 ++++++++++--------- numpy/distutils/misc_util.py | 17 +++++++++++++++-- numpy/distutils/system_info.py | 18 ------------------ 6 files changed, 41 insertions(+), 36 deletions(-) diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 6d9926d89e97..68d0a56e8576 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -103,6 +103,8 @@ def win32_checks(deflist): deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING') def check_math_capabilities(config, moredefs, mathlibs): + from numpy.distutils.misc_util import mingw32 + def check_func(func_name): return config.check_func(func_name, libraries=mathlibs, decl=True, call=True) @@ -172,7 +174,8 @@ def check_funcs(funcs_name): # C99 functions: float and long double versions check_funcs(C99_FUNCS_SINGLE) - check_funcs(C99_FUNCS_EXTENDED) + if not mingw32(): + check_funcs(C99_FUNCS_EXTENDED) def check_complex(config, mathlibs): priv = [] diff --git a/numpy/core/src/private/npy_config.h b/numpy/core/src/private/npy_config.h index fa20eb4f38f0..c81f06461252 100644 --- a/numpy/core/src/private/npy_config.h +++ b/numpy/core/src/private/npy_config.h @@ -61,6 +61,11 @@ #endif +/* Disable broken mingw-w64 hypot function */ +#if defined(__MINGW32__) +#undef HAVE_HYPOT +#endif + /* Intel C for Windows uses POW for 64 bits longdouble*/ #if defined(_MSC_VER) && defined(__INTEL_COMPILER) diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py index a7fd3a77f747..9acbe537d9e6 100644 --- a/numpy/distutils/fcompiler/gnu.py +++ b/numpy/distutils/fcompiler/gnu.py @@ -24,7 +24,6 @@ def is_win32(): return sys.platform == "win32" and platform.architecture()[0] == "32bit" if is_win64(): - #_EXTRAFLAGS = ["-fno-leading-underscore"] _EXTRAFLAGS = [] else: _EXTRAFLAGS = [] @@ -215,10 +214,12 @@ def get_flags_opt(self): # use -mincoming-stack-boundary=2 # due to the change to 16 byte stack alignment since GCC 4.6 # but 32 bit Windows ABI defines 4 bytes stack alignment - opt = ['-O2 -march=core2 -mtune=generic -mfpmath=sse -msse2 ' - '-mincoming-stack-boundary=2'] + opt = ['-O2 -march=pentium4 -mtune=generic -mfpmath=sse -msse2' + ' -mlong-double-64 -mincoming-stack-boundary=2' + ' -ffpe-summary=invalid,zero'] else: - opt = ['-O2 -march=x86-64 -DMS_WIN64 -mtune=generic -msse2'] + opt = ['-O2 -march=x86-64 -DMS_WIN64 -mtune=generic -msse2' + ' -mlong-double-64 -ffpe-summary=invalid,zero'] else: opt = ['-O2'] @@ -270,11 +271,11 @@ def version_match(self, version_string): 'version_cmd' : ["", "-dumpversion"], 'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form", "-fno-second-underscore"] + _EXTRAFLAGS, - 'compiler_f90' : [None, "-Wall", "-g", + 'compiler_f90' : [None, "-Wall", "-fno-second-underscore"] + _EXTRAFLAGS, 'compiler_fix' : [None, "-Wall", "-g","-ffixed-form", "-fno-second-underscore"] + _EXTRAFLAGS, - 'linker_so' : ["", "-Wall", "-g"], + 'linker_so' : ["", "-Wall"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"], 'linker_exe' : [None, "-Wall"] diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py index f72c3bbbbfb5..f1220b36af2b 100644 --- a/numpy/distutils/mingw32ccompiler.py +++ b/numpy/distutils/mingw32ccompiler.py @@ -95,9 +95,10 @@ def __init__ (self, # Before build with MinGW-W64 generate the python import library # with gendef and dlltool according to the MingW-W64 FAQ. # Use the MinGW-W64 provided msvc runtime import libraries. + # The mingwpy package deploys it's own import libraries. # Don't call build_import_library() and build_msvcr_library. - if 'MinGW-W64' not in str(out_string): + if 'MinGW-W64' not in str(out_string) and 'mingwpy' not in str(out_string): # **changes: eric jones 4/11/01 # 1. Check for import library on Windows. Build if it doesn't @@ -131,10 +132,10 @@ def __init__ (self, else: # gcc-4 series releases do not support -mno-cygwin option self.set_executables( - compiler='gcc -march=x86-64 -mtune=generic -DMS_WIN64' - ' -O2 -msse2 -Wall', - compiler_so='gcc -march=x86-64 -mtune=generic -DMS_WIN64' - ' -O2 -msse2 -Wall -Wstrict-prototypes', + compiler='gcc -O2 -march=x86-64 -mtune=generic -DMS_WIN64' + ' -msse2 -mlong-double-64 -Wall', + compiler_so='gcc -O2 -march=x86-64 -mtune=generic -DMS_WIN64' + ' -msse2 -mlong-double-64 -Wall -Wstrict-prototypes', linker_exe='gcc', linker_so='gcc -shared -Wl,-gc-sections -Wl,-s') else: @@ -158,11 +159,11 @@ def __init__ (self, # build needs '-mincoming-stack-boundary=2' due to ABI # incompatibility to Win32 ABI self.set_executables( - compiler='gcc -O2 -march=core2 -mtune=generic' - ' -mfpmath=sse -msse2' + compiler='gcc -O2 -march=pentium4 -mtune=generic' + ' -mfpmath=sse -msse2 -mlong-double-64' ' -mincoming-stack-boundary=2 -Wall', - compiler_so='gcc -O2 -march=core2 -mtune=generic' - ' -mfpmath=sse -msse2' + compiler_so='gcc -O2 -march=pentium4 -mtune=generic' + ' -mfpmath=sse -msse2 -mlong-double-64' ' -mincoming-stack-boundary=2 -Wall' ' -Wstrict-prototypes', linker_exe='g++ ', diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py index 75d864c5a7bc..f5ef1f9b9838 100644 --- a/numpy/distutils/misc_util.py +++ b/numpy/distutils/misc_util.py @@ -381,9 +381,22 @@ def mingw32(): """Return true when using mingw32 environment. """ if sys.platform=='win32': - if os.environ.get('OSTYPE', '')=='msys': + # mingw32 compiler configured in %USERPROFILE%\pydistutils.cfg + # or distutils\distutils.cfg + from distutils.dist import Distribution + _dist = Distribution() + _dist.parse_config_files() + _bld = _dist.get_option_dict('build') + if _bld and 'mingw32' in _bld.get('compiler'): return True - if os.environ.get('MSYSTEM', '')=='MINGW32': + # parse setup.py command line: --compiler=mingw32 or -c mingw32 + elif (_i for _i in sys.argv if 'mingw32' in _i) and \ + (_i for _i in sys.argv if ('setup.py') in _i): + return True + # using msys or msys2 shell + elif os.environ.get('OSTYPE', '')=='msys': + return True + elif os.environ.get('MSYSTEM', '') in ('MINGW32', 'MINGW64'): return True return False diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 9dd48e2dccef..0da13a7df2b9 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -1751,24 +1751,6 @@ def check_embedded_lapack(self, info): res = False finally: shutil.rmtree(tmpdir) - if sys.platform == 'win32' and not res: - c = distutils.ccompiler.new_compiler(compiler='mingw32') - tmpdir = tempfile.mkdtemp() - src = os.path.join(tmpdir, 'source.c') - out = os.path.join(tmpdir, 'a.out') - try: - with open(src, 'wt') as f: - f.write(s) - obj = c.compile([src], output_dir=tmpdir) - try: - c.link_executable(obj, out, libraries=info['libraries'], - library_dirs=info['library_dirs'], - extra_postargs=extra_args) - res = True - except distutils.ccompiler.LinkError: - res = False - finally: - shutil.rmtree(tmpdir) return res From 4a9ad17e58a042931798c32b39b137f52abc3aed Mon Sep 17 00:00:00 2001 From: Jonathan Helmus Date: Thu, 24 Sep 2015 20:35:15 -0500 Subject: [PATCH 024/496] BUG: numpy.ma functions can be called with only keyword arguments numpy.ma.empty, zeros, ones, etc can be called using only keyword arguments without a positional argument. Previously a single positional argument was required. For example: np.ma.zeros(shape=(10, )) closes #6106 --- numpy/ma/core.py | 4 ++-- numpy/ma/tests/test_core.py | 12 ++++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index ca6698492696..61f0c12a8994 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -7536,7 +7536,7 @@ def getdoc(self): doc = sig + doc return doc - def __call__(self, a, *args, **params): + def __call__(self, *args, **params): # Find the common parameters to the call and the definition _extras = self._extras common_params = set(params).intersection(_extras) @@ -7544,7 +7544,7 @@ def __call__(self, a, *args, **params): for p in common_params: _extras[p] = params.pop(p) # Get the result - result = self._func.__call__(a, *args, **params).view(MaskedArray) + result = self._func.__call__(*args, **params).view(MaskedArray) if "fill_value" in common_params: result.fill_value = _extras.get("fill_value", None) if "hardmask" in common_params: diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index d2b984084d15..aa6ce5db9ffe 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -1670,6 +1670,18 @@ def test_fillvalue_as_arguments(self): a = identity(3, fill_value=0., dtype=complex) assert_equal(a.fill_value, 0.) + def test_shape_argument(self): + # Test that shape can be provides as an argument + # GH issue 6106 + a = empty(shape=(3, )) + assert_equal(a.shape, (3, )) + + a = ones(shape=(3, ), dtype=float) + assert_equal(a.shape, (3, )) + + a = zeros(shape=(3, ), dtype=complex) + assert_equal(a.shape, (3, )) + def test_fillvalue_in_view(self): # Test the behavior of fill_value in view From 8863ee9a24e866f79653f57154d431246a0e9079 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 25 Sep 2015 17:41:51 +0200 Subject: [PATCH 025/496] BUG: Add void field at end of dtype.descr to match itemsize dtype.descr returns void fields to explain the padding part of the dtype. The last void field for the itemsize itself was however not included. Closes gh-6359 --- numpy/core/_internal.py | 4 ++++ numpy/core/tests/test_dtype.py | 15 ++++++++++++++- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py index 879f4a224541..81f5be4ada6f 100644 --- a/numpy/core/_internal.py +++ b/numpy/core/_internal.py @@ -121,6 +121,10 @@ def _array_descr(descriptor): offset += field[0].itemsize result.append(tup) + if descriptor.itemsize > offset: + num = descriptor.itemsize - offset + result.append(('', '|V%d' % num)) + return result # Build a new array from the information in a pickle. diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index 496664622e40..29f2ee7bdd6a 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -535,7 +535,7 @@ def test_empty_string_to_object(self): # Pull request #4722 np.array(["", ""]).astype(object) -class TestDtypeAttributeDeletion(object): +class TestDtypeAttributeDeletion(TestCase): def test_dtype_non_writable_attributes_deletion(self): dt = np.dtype(np.double) @@ -552,6 +552,19 @@ def test_dtype_writable_attributes_deletion(self): for s in attr: assert_raises(AttributeError, delattr, dt, s) + +class TestDtypeAttributes(TestCase): + def test_descr_has_trailing_void(self): + # see gh-6359 + dtype = np.dtype({ + 'names': ['A', 'B'], + 'formats': ['f4', 'f4'], + 'offsets': [0, 8], + 'itemsize': 16}) + new_dtype = np.dtype(dtype.descr) + assert_equal(new_dtype.itemsize, 16) + + class TestDtypeAttributes(TestCase): def test_name_builtin(self): From ae56c58db4207bd11100a9d24c9edf7694e34d67 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 25 Sep 2015 17:54:24 +0200 Subject: [PATCH 026/496] BUG,ENH: allow linalg.cond to work on a stack of matrices This was buggy, because the underlying functions supported it partially but cond was not aware of this. Closes gh-6351 --- numpy/linalg/linalg.py | 14 +++++++------- numpy/linalg/tests/test_linalg.py | 14 ++++++++++++-- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index a2405c180347..f5cb3cb77c47 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -1012,9 +1012,9 @@ def eig(a): w : (..., M) array The eigenvalues, each repeated according to its multiplicity. The eigenvalues are not necessarily ordered. The resulting - array will be of complex type, unless the imaginary part is - zero in which case it will be cast to a real type. When `a` - is real the resulting eigenvalues will be real (0 imaginary + array will be of complex type, unless the imaginary part is + zero in which case it will be cast to a real type. When `a` + is real the resulting eigenvalues will be real (0 imaginary part) or occur in conjugate pairs v : (..., M, M) array @@ -1382,7 +1382,7 @@ def cond(x, p=None): Parameters ---------- - x : (M, N) array_like + x : (..., M, N) array_like The matrix whose condition number is sought. p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional Order of the norm: @@ -1451,12 +1451,12 @@ def cond(x, p=None): 0.70710678118654746 """ - x = asarray(x) # in case we have a matrix + x = asarray(x) # in case we have a matrix if p is None: s = svd(x, compute_uv=False) - return s[0]/s[-1] + return s[..., 0]/s[..., -1] else: - return norm(x, p)*norm(inv(x), p) + return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1)) def matrix_rank(M, tol=None): diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index aedcc6a9503e..7c577d86fed2 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -556,7 +556,12 @@ class TestCondSVD(LinalgTestCase, LinalgGeneralizedTestCase): def do(self, a, b): c = asarray(a) # a might be a matrix s = linalg.svd(c, compute_uv=False) - old_assert_almost_equal(s[0] / s[-1], linalg.cond(a), decimal=5) + old_assert_almost_equal( + s[..., 0] / s[..., -1], linalg.cond(a), decimal=5) + + def test_stacked_arrays_explicitly(self): + A = np.array([[1., 2., 1.], [0, -2., 0], [6., 2., 3.]]) + assert_equal(linalg.cond(A), linalg.cond(A[None, ...])[0]) class TestCond2(LinalgTestCase): @@ -564,7 +569,12 @@ class TestCond2(LinalgTestCase): def do(self, a, b): c = asarray(a) # a might be a matrix s = linalg.svd(c, compute_uv=False) - old_assert_almost_equal(s[0] / s[-1], linalg.cond(a, 2), decimal=5) + old_assert_almost_equal( + s[..., 0] / s[..., -1], linalg.cond(a, 2), decimal=5) + + def test_stacked_arrays_explicitly(self): + A = np.array([[1., 2., 1.], [0, -2., 0], [6., 2., 3.]]) + assert_equal(linalg.cond(A, 2), linalg.cond(A[None, ...], 2)[0]) class TestCondInf(object): From a1a03f2d73d8587d6ddc4cf410b0b9738b33bd28 Mon Sep 17 00:00:00 2001 From: Antony Lee Date: Fri, 25 Sep 2015 15:44:27 -0700 Subject: [PATCH 027/496] Document empty(..., object) initialization to None. Behavior goes back at least to 1.6.2. Fixes #6367. --- numpy/add_newdocs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py index ce5ef6d098d9..293005434a8d 100644 --- a/numpy/add_newdocs.py +++ b/numpy/add_newdocs.py @@ -752,8 +752,8 @@ def luf(lamdaexpr, *args, **kwargs): Returns ------- out : ndarray - Array of uninitialized (arbitrary) data with the given - shape, dtype, and order. + Array of uninitialized (arbitrary) data of the given shape, dtype, and + order. Object arrays will be initialized to None. See Also -------- From 89ef1193448297d76951f52efb68436be218fb7d Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sun, 27 Sep 2015 15:54:42 +0200 Subject: [PATCH 028/496] TST: Add test that array interface descr and typestr itemsize match --- numpy/core/tests/test_multiarray.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index a2667172c71f..f9ae7c16e610 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -5390,6 +5390,16 @@ class ArrayLike(object): assert_equal(np.array(ArrayLike()), 1) +def test_array_interface_itemsize(): + # See gh-6361 + my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'], + 'offsets': [0, 8], 'itemsize': 16}) + a = np.ones(10, dtype=my_dtype) + descr_t = np.dtype(a.__array_interface__['descr']) + typestr_t = np.dtype(a.__array_interface__['typestr']) + assert_equal(descr_t.itemsize, typestr_t.itemsize) + + def test_flat_element_deletion(): it = np.ones(3).flat try: From efd18a5455ea2b21eb0e3bd9ca9fab0d08e09526 Mon Sep 17 00:00:00 2001 From: Michael Currie Date: Fri, 17 Apr 2015 11:55:11 -0600 Subject: [PATCH 029/496] BUG: Expanded warning conditions for array_split Zero arrays can also occur with any of the partitions sub_arys[i] induced by array_split, not just the final partition sub_arys[-1]. Modified by seberg. Closes gh-5771 --- numpy/lib/shape_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py index 011434dda0a3..481ae96e5877 100644 --- a/numpy/lib/shape_base.py +++ b/numpy/lib/shape_base.py @@ -423,7 +423,7 @@ def array_split(ary, indices_or_sections, axis=0): # This "kludge" was introduced here to replace arrays shaped (0, 10) # or similar with an array shaped (0,). # There seems no need for this, so give a FutureWarning to remove later. - if sub_arys[-1].size == 0 and sub_arys[-1].ndim != 1: + if any(arr.size == 0 and arr.ndim != 1 for arr in sub_arys): warnings.warn("in the future np.array_split will retain the shape of " "arrays with a zero size, instead of replacing them by " "`array([])`, which always has a shape of (0,).", From f29c387272a9279f82ab04bbbe1bb68040b6d383 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sun, 27 Sep 2015 16:09:43 +0200 Subject: [PATCH 030/496] TST: Test empty warning for split with manual inputs --- numpy/lib/tests/test_shape_base.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py index fb9d7f364ee6..9a7fefb8dce8 100644 --- a/numpy/lib/tests/test_shape_base.py +++ b/numpy/lib/tests/test_shape_base.py @@ -111,6 +111,15 @@ def test_integer_split_2D_rows(self): compare_results(res, desired) assert_(a.dtype.type is res[-1].dtype.type) + # Same thing for manual splits: + res = assert_warns(FutureWarning, array_split, a, [0, 1, 2], axis=0) + + # After removing the FutureWarning, the last should be zeros((0, 10)) + desired = [np.array([]), np.array([np.arange(10)]), + np.array([np.arange(10)])] + compare_results(res, desired) + assert_(a.dtype.type is res[-1].dtype.type) + def test_integer_split_2D_cols(self): a = np.array([np.arange(10), np.arange(10)]) res = array_split(a, 3, axis=-1) From e06bad5fb0dc377da54412a0d127461f21cf8553 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sun, 27 Sep 2015 17:27:29 +0200 Subject: [PATCH 031/496] REV: Make sure ravel returns a contiguous array This is a bit more then it used to do, so it is not a complete revert. Some of the "weird" cases where a copy was unnecessarily done will now only be gone with RELAXED_STRIDES_CHECKING. --- numpy/core/src/multiarray/shape.c | 36 +++++++++---------- numpy/core/tests/test_multiarray.py | 56 +++++++++++++++++------------ 2 files changed, 50 insertions(+), 42 deletions(-) diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c index b679d6d5d5f9..f46f820caaed 100644 --- a/numpy/core/src/multiarray/shape.c +++ b/numpy/core/src/multiarray/shape.c @@ -940,55 +940,51 @@ PyArray_Ravel(PyArrayObject *arr, NPY_ORDER order) order = NPY_FORTRANORDER; } } + else if (order == NPY_ANYORDER) { + order = PyArray_ISFORTRAN(arr) ? NPY_FORTRANORDER : NPY_CORDER; + } - if (order != NPY_KEEPORDER) { - return PyArray_Newshape(arr, &newdim, order); + if (order == NPY_CORDER && PyArray_IS_C_CONTIGUOUS(arr)) { + return PyArray_Newshape(arr, &newdim, NPY_CORDER); + } + else if (order == NPY_FORTRANORDER && PyArray_IS_F_CONTIGUOUS(arr)) { + return PyArray_Newshape(arr, &newdim, NPY_FORTRANORDER); } /* For KEEPORDER, check if we can make a flattened view */ - else { + else if (order == NPY_KEEPORDER) { npy_stride_sort_item strideperm[NPY_MAXDIMS]; - npy_intp stride = 0, base_stride = NPY_MIN_INTP; + npy_intp stride; int i, ndim = PyArray_NDIM(arr); PyArray_CreateSortedStridePerm(PyArray_NDIM(arr), PyArray_STRIDES(arr), strideperm); + /* The output array must be contiguous, so the first stride is fixed */ + stride = PyArray_ITEMSIZE(arr); + for (i = ndim-1; i >= 0; --i) { if (PyArray_DIM(arr, strideperm[i].perm) == 1) { /* A size one dimension does not matter */ continue; } - if (base_stride == NPY_MIN_INTP) { - stride = strideperm[i].stride; - base_stride = stride; - } - else if (strideperm[i].stride != stride) { + if (strideperm[i].stride != stride) { break; } stride *= PyArray_DIM(arr, strideperm[i].perm); } -#if NPY_RELAXED_STRIDES_CHECKING == 0 - /* - * For tidyness, cannot be reached with relaxed strides checking - * since the array is guaranteed contiguous (without, not sure...) - */ - if (base_stride == NPY_MIN_INTP) { - base_stride = PyArray_ITEMSIZE(arr); - } -#endif - /* If all the strides matched a contiguous layout, return a view */ if (i < 0) { PyArrayObject *ret; + stride = PyArray_ITEMSIZE(arr); val[0] = PyArray_SIZE(arr); Py_INCREF(PyArray_DESCR(arr)); ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(arr), PyArray_DESCR(arr), 1, val, - &base_stride, + &stride, PyArray_BYTES(arr), PyArray_FLAGS(arr), (PyObject *)arr); diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index a2667172c71f..5a73aa93507a 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -2075,41 +2075,37 @@ def test_ravel(self): assert_equal(a.ravel(order='K'), [2, 3, 0, 1]) assert_(a.ravel(order='K').flags.owndata) + # Test simple 1-d copy behaviour: + a = np.arange(10)[::2] + assert_(a.ravel('K').flags.owndata) + assert_(a.ravel('C').flags.owndata) + assert_(a.ravel('F').flags.owndata) + # Not contiguous and 1-sized axis with non matching stride a = np.arange(2**3 * 2)[::2] a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2) strides = list(a.strides) strides[1] = 123 a.strides = strides - assert_(np.may_share_memory(a.ravel(order='K'), a)) + assert_(a.ravel(order='K').flags.owndata) assert_equal(a.ravel('K'), np.arange(0, 15, 2)) - # General case of possible ravel that is not contiguous but - # works and includes a 1-sized axis with non matching stride - a = a.swapaxes(-1, -2) # swap back to C-order - assert_(np.may_share_memory(a.ravel(order='C'), a)) - assert_(np.may_share_memory(a.ravel(order='K'), a)) - - a = a.T # swap all to Fortran order - assert_(np.may_share_memory(a.ravel(order='F'), a)) + # contiguous and 1-sized axis with non matching stride works: + a = np.arange(2**3) + a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2) + strides = list(a.strides) + strides[1] = 123 + a.strides = strides assert_(np.may_share_memory(a.ravel(order='K'), a)) + assert_equal(a.ravel(order='K'), np.arange(2**3)) - # Test negative strides: + # Test negative strides (not very interesting since non-contiguous): a = np.arange(4)[::-1].reshape(2, 2) - assert_(np.may_share_memory(a.ravel(order='C'), a)) - assert_(np.may_share_memory(a.ravel(order='K'), a)) + assert_(a.ravel(order='C').flags.owndata) + assert_(a.ravel(order='K').flags.owndata) assert_equal(a.ravel('C'), [3, 2, 1, 0]) assert_equal(a.ravel('K'), [3, 2, 1, 0]) - # Test keeporder with weirdly strided 1-sized dims (1-d first stride) - a = np.arange(8)[::2].reshape(1, 2, 2, 1) # neither C, nor F order - strides = list(a.strides) - strides[0] = -12 - strides[-1] = 0 - a.strides = strides - assert_(np.may_share_memory(a.ravel(order='K'), a)) - assert_equal(a.ravel('K'), a.ravel('C')) - # 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING): a = np.array([[1]]) a.strides = (123, 432) @@ -2125,7 +2121,7 @@ def test_ravel(self): assert_equal(a.ravel(order), [0]) assert_(np.may_share_memory(a.ravel(order), a)) - #Test that certain non-inplace ravels work right (mostly) for 'K': + # Test that certain non-inplace ravels work right (mostly) for 'K': b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2) a = b[..., ::2] assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28]) @@ -2139,6 +2135,22 @@ def test_ravel(self): assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14]) assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14]) + def test_ravel_subclass(self): + class ArraySubclass(np.ndarray): + pass + + a = np.arange(10).view(ArraySubclass) + assert_(isinstance(a.ravel('C'), ArraySubclass)) + assert_(isinstance(a.ravel('F'), ArraySubclass)) + assert_(isinstance(a.ravel('A'), ArraySubclass)) + assert_(isinstance(a.ravel('K'), ArraySubclass)) + + a = np.arange(10)[::2].view(ArraySubclass) + assert_(isinstance(a.ravel('C'), ArraySubclass)) + assert_(isinstance(a.ravel('F'), ArraySubclass)) + assert_(isinstance(a.ravel('A'), ArraySubclass)) + assert_(isinstance(a.ravel('K'), ArraySubclass)) + def test_swapaxes(self): a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy() idx = np.indices(a.shape) From 26208a21a13432e062e9b268e66a87915974d214 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sun, 27 Sep 2015 18:08:04 +0200 Subject: [PATCH 032/496] DOC: Document behaviour of ravel more clearly --- numpy/core/fromnumeric.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 10626fe9fd77..10f4a98c513c 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -1371,7 +1371,7 @@ def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None): def ravel(a, order='C'): - """Return a flattened array. + """Return a contiguous flattened array. A 1-D array, containing the elements of the input, is returned. A copy is made only if needed. @@ -1415,6 +1415,7 @@ def ravel(a, order='C'): ndarray.flat : 1-D iterator over an array. ndarray.flatten : 1-D array copy of the elements of an array in row-major order. + ndarray.reshape : Change the shape of an array without changing its data. Notes ----- @@ -1425,6 +1426,9 @@ def ravel(a, order='C'): the index along the last quickest. The opposite holds for column-major, Fortran-style index ordering. + When a view is desired in as many cases as possible, ``arr.reshape(-1)`` + may be preferable. + Examples -------- It is equivalent to ``reshape(-1, order=order)``. From 3fd6de4c9492daf401ea1e58ea12c4ef3c8c2b2d Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sun, 27 Sep 2015 17:50:12 +0200 Subject: [PATCH 033/496] BUG: Fix vdot for uncontiguous arrays. Note that using Newshape also means that less copying is done in principle, because ravel will always return a contiguous array. --- numpy/core/src/multiarray/multiarraymodule.c | 16 ++++++++------ numpy/core/tests/test_multiarray.py | 22 ++++++++++++++++++++ 2 files changed, 32 insertions(+), 6 deletions(-) diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index 0cf6a7cbbf70..14df0899e806 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -2253,8 +2253,10 @@ array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *args) { int typenum; char *ip1, *ip2, *op; - npy_intp n, stride; + npy_intp n, stride1, stride2; PyObject *op1, *op2; + npy_intp newdimptr[1] = {-1}; + PyArray_Dims newdims = {newdimptr, 1}; PyArrayObject *ap1 = NULL, *ap2 = NULL, *ret = NULL; PyArray_Descr *type; PyArray_DotFunc *vdot; @@ -2278,7 +2280,8 @@ array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *args) Py_DECREF(type); goto fail; } - op1 = PyArray_Ravel(ap1, NPY_CORDER); + + op1 = PyArray_Newshape(ap1, &newdims, NPY_CORDER); if (op1 == NULL) { Py_DECREF(type); goto fail; @@ -2290,7 +2293,7 @@ array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *args) if (ap2 == NULL) { goto fail; } - op2 = PyArray_Ravel(ap2, NPY_CORDER); + op2 = PyArray_Newshape(ap2, &newdims, NPY_CORDER); if (op2 == NULL) { goto fail; } @@ -2310,7 +2313,8 @@ array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *args) } n = PyArray_DIM(ap1, 0); - stride = type->elsize; + stride1 = PyArray_STRIDE(ap1, 0); + stride2 = PyArray_STRIDE(ap2, 0); ip1 = PyArray_DATA(ap1); ip2 = PyArray_DATA(ap2); op = PyArray_DATA(ret); @@ -2338,11 +2342,11 @@ array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *args) } if (n < 500) { - vdot(ip1, stride, ip2, stride, op, n, NULL); + vdot(ip1, stride1, ip2, stride2, op, n, NULL); } else { NPY_BEGIN_THREADS_DESCR(type); - vdot(ip1, stride, ip2, stride, op, n, NULL); + vdot(ip1, stride1, ip2, stride2, op, n, NULL); NPY_END_THREADS_DESCR(type); } diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index a2667172c71f..44bb9e02f041 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -3997,6 +3997,28 @@ def test_vdot_array_order(self): assert_equal(np.vdot(b, a), res) assert_equal(np.vdot(b, b), res) + def test_vdot_uncontiguous(self): + for size in [2, 1000]: + # Different sizes match different branches in vdot. + a = np.zeros((size, 2, 2)) + b = np.zeros((size, 2, 2)) + a[:, 0, 0] = np.arange(size) + b[:, 0, 0] = np.arange(size) + 1 + # Make a and b uncontiguous: + a = a[..., 0] + b = b[..., 0] + + assert_equal(np.vdot(a, b), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a, b.copy()), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a.copy(), b), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a.copy('F'), b), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a, b.copy('F')), + np.vdot(a.flatten(), b.flatten())) + class TestDot(TestCase): def setUp(self): From 649d19ff0ec5bba1fe0e89402d7cc8c4597ea170 Mon Sep 17 00:00:00 2001 From: Antony Lee Date: Mon, 28 Sep 2015 11:02:15 -0700 Subject: [PATCH 034/496] FutureWarning for np.full(..., non-float). cf. discussion in #6366. --- numpy/core/numeric.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index 1b7dfca3eac5..5d4464ea71fc 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -258,8 +258,9 @@ def full(shape, fill_value, dtype=None, order='C'): fill_value : scalar Fill value. dtype : data-type, optional - The desired data-type for the array, e.g., `numpy.int8`. Default is - is chosen as `np.array(fill_value).dtype`. + The desired data-type for the array, e.g., `np.int8`. Default + is `float`, but will change to `np.array(fill_value).dtype` in a + future release. order : {'C', 'F'}, optional Whether to store multidimensional data in C- or Fortran-contiguous (row- or column-wise) order in memory. @@ -290,6 +291,10 @@ def full(shape, fill_value, dtype=None, order='C'): """ a = empty(shape, dtype, order) + if array(fill_value).dtype != a.dtype: + warnings.warn( + "in the future, full(..., {0!r}) will return an array of {1!r}". + format(fill_value, array(fill_value).dtype), FutureWarning) multiarray.copyto(a, fill_value, casting='unsafe') return a From 00edb2b6a96b7189be91d16cb84981a60c5961e8 Mon Sep 17 00:00:00 2001 From: Yash Mehrotra Date: Tue, 29 Sep 2015 01:45:45 +0530 Subject: [PATCH 035/496] MAINT: Deprecated PyObject_Compare in favor of PyObject_RichCompareBool. Fixes #6265 and #6229 --- numpy/core/include/numpy/npy_3kcompat.h | 6 +-- numpy/core/src/multiarray/arraytypes.c.src | 5 +- numpy/core/src/multiarray/multiarraymodule.c | 11 +---- numpy/core/src/umath/loops.c.src | 49 ++++++++++---------- numpy/core/tests/test_umath.py | 16 +++++++ 5 files changed, 46 insertions(+), 41 deletions(-) diff --git a/numpy/core/include/numpy/npy_3kcompat.h b/numpy/core/include/numpy/npy_3kcompat.h index 72ddaf66b297..cd96697982ba 100644 --- a/numpy/core/include/numpy/npy_3kcompat.h +++ b/numpy/core/include/numpy/npy_3kcompat.h @@ -325,7 +325,7 @@ PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp) { int v; v = PyObject_RichCompareBool(i1, i2, Py_LT); - if (v == 0) { + if (v == 1) { *cmp = -1; return 1; } @@ -334,7 +334,7 @@ PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp) } v = PyObject_RichCompareBool(i1, i2, Py_GT); - if (v == 0) { + if (v == 1) { *cmp = 1; return 1; } @@ -343,7 +343,7 @@ PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp) } v = PyObject_RichCompareBool(i1, i2, Py_EQ); - if (v == 0) { + if (v == 1) { *cmp = 0; return 1; } diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src index 68944a1bdd4d..5aa7e61426cc 100644 --- a/numpy/core/src/multiarray/arraytypes.c.src +++ b/numpy/core/src/multiarray/arraytypes.c.src @@ -2784,7 +2784,7 @@ OBJECT_compare(PyObject **ip1, PyObject **ip2, PyArrayObject *NPY_UNUSED(ap)) } return 1; } -#if defined(NPY_PY3K) + if (PyObject_RichCompareBool(*ip1, *ip2, Py_LT) == 1) { return -1; } @@ -2794,9 +2794,6 @@ OBJECT_compare(PyObject **ip1, PyObject **ip2, PyArrayObject *NPY_UNUSED(ap)) else { return 0; } -#else - return PyObject_Compare(*ip1, *ip2); -#endif } diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index e72c355dc283..5c08aef7776d 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -1443,13 +1443,9 @@ _equivalent_fields(PyObject *field1, PyObject *field2) { if (field1 == NULL || field2 == NULL) { return 0; } -#if defined(NPY_PY3K) + val = PyObject_RichCompareBool(field1, field2, Py_EQ); if (val != 1 || PyErr_Occurred()) { -#else - val = PyObject_Compare(field1, field2); - if (val != 0 || PyErr_Occurred()) { -#endif same = 0; } else { @@ -1476,13 +1472,8 @@ _equivalent_subarrays(PyArray_ArrayDescr *sub1, PyArray_ArrayDescr *sub2) return 0; } -#if defined(NPY_PY3K) val = PyObject_RichCompareBool(sub1->shape, sub2->shape, Py_EQ); if (val != 1 || PyErr_Occurred()) { -#else - val = PyObject_Compare(sub1->shape, sub2->shape); - if (val != 0 || PyErr_Occurred()) { -#endif PyErr_Clear(); return 0; } diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 36046d9b80e6..04a3fd0c8091 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -2618,41 +2618,42 @@ OBJECT_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUS NPY_NO_EXPORT void OBJECT_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { -#if defined(NPY_PY3K) PyObject *zero = PyLong_FromLong(0); + UNARY_LOOP { PyObject *in1 = *(PyObject **)ip1; PyObject **out = (PyObject **)op1; + PyObject *ret = NULL; int v; - PyObject *ret; - if (PyObject_Cmp(in1 ? in1 : Py_None, zero, &v) == -1) { - return; + + if (in1 == NULL) { + in1 = Py_None; } - ret = PyLong_FromLong(v); - if (PyErr_Occurred()) { - Py_DECREF(zero); - return; + + if ((v = PyObject_RichCompareBool(in1, zero, Py_LT)) == 1) { + ret = PyLong_FromLong(-1); } - Py_XDECREF(*out); - *out = ret; - } - Py_DECREF(zero); -#else - PyObject *zero = PyInt_FromLong(0); - UNARY_LOOP { - PyObject *in1 = *(PyObject **)ip1; - PyObject **out = (PyObject **)op1; - PyObject *ret = PyInt_FromLong( - PyObject_Compare(in1 ? in1 : Py_None, zero)); - if (PyErr_Occurred()) { - Py_DECREF(zero); - return; + else if (v == 0 && + (v = PyObject_RichCompareBool(in1, zero, Py_GT)) == 1) { + ret = PyLong_FromLong(1); + } + else if (v == 0 && + (v = PyObject_RichCompareBool(in1, zero, Py_EQ)) == 1) { + ret = PyLong_FromLong(0); + } + else if (v == 0) { + /* in1 is NaN */ + PyErr_SetString(PyExc_TypeError, + "unorderable types for comparison"); + } + + if (ret == NULL) { + break; } Py_XDECREF(*out); *out = ret; } - Py_DECREF(zero); -#endif + Py_XDECREF(zero); } /* diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 033fac37da96..ebf8e0380f28 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -935,6 +935,22 @@ def test_sign(self): assert_equal(res, tgt) assert_equal(out, tgt) + def test_sign_dtype_object(self): + # In reference to github issue #6229 + + foo = np.array([-.1, 0, .1]) + a = np.sign(foo.astype(np.object)) + b = np.sign(foo) + + assert_array_equal(a, b) + + def test_sign_dtype_nan_object(self): + # In reference to github issue #6229 + def test_nan(): + foo = np.array([np.nan]) + a = np.sign(foo.astype(np.object)) + + assert_raises(TypeError, test_nan) class TestMinMax(TestCase): def test_minmax_blocked(self): From 032951dc09ae110f3260cf29abf9d584bda262a7 Mon Sep 17 00:00:00 2001 From: Antony Lee Date: Mon, 28 Sep 2015 13:27:26 -0700 Subject: [PATCH 036/496] Add tests. --- numpy/core/tests/test_deprecations.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index 3e76409c5068..e3aea7efb588 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -374,5 +374,15 @@ def test_simple(self): arr.__getitem__, (slice(None), index)) +class TestFullDefaultDtype: + """np.full defaults to float when dtype is not set. In the future, it will + use the fill value's dtype. + """ + + def test_full_default_dtype(self): + assert_warns(FutureWarning, np.full, 1, 1) + assert_warns(FutureWarning, np.full, 1, None) + + if __name__ == "__main__": run_module_suite() From 08e8c1415670b029c26ae8ce0585fb9ea0b11e63 Mon Sep 17 00:00:00 2001 From: Ruediger Meier Date: Sat, 26 Sep 2015 14:01:26 +0200 Subject: [PATCH 037/496] MAINT: corrcoef, memory usage optimization We calculate sqrt on the small vector rather than on that huge product matrix and we combine the "outer" product with element-wise devision. So even though we have a slower loop over the rows now ... this code snippet runs about 3 times faster than before. However the speed improvement of the whole function is not really significant because cov() takes 80-99% of the time (dependent on blas/lapack implementation and number of CPU cores). More important is that we will safe 1/3 memory. For example corrcoef() for a [23k, m] matrix needs 8GB now instead of 12GB. --- numpy/lib/function_base.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 007ff42a45eb..e649807e37fe 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -2349,7 +2349,11 @@ def corrcoef(x, y=None, rowvar=1, bias=np._NoValue, ddof=np._NoValue): except ValueError: # scalar covariance # nan if incorrect value (nan, inf, 0), 1 otherwise return c / c - return c / sqrt(multiply.outer(d, d)) + d = sqrt(d) + # calculate "c / multiply.outer(d, d)" row-wise ... for memory and speed + for i in range(0, d.size): + c[i,:] /= (d * d[i]) + return c def blackman(M): From 09bf9cc62d47bb7d0d738c70a0fbdb9ff2a13985 Mon Sep 17 00:00:00 2001 From: Chris Hogan Date: Wed, 23 Sep 2015 15:34:49 -0500 Subject: [PATCH 038/496] BUG: Guarantee non-zero is 1 for switch statements In numpy/core/src/npymath/npy_math.c.src there is a state machine sequence that assumes signbit returns either a 1 or 0. However, all the online documentation states that it will return either a 0 or a nonzero value, which seems to be determined by the OS. These changes allow the code to work with a zero or a nonzero value. --- numpy/core/src/npymath/npy_math.c.src | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/npymath/npy_math.c.src b/numpy/core/src/npymath/npy_math.c.src index b7f28bb39be0..7f62810d5b7b 100644 --- a/numpy/core/src/npymath/npy_math.c.src +++ b/numpy/core/src/npymath/npy_math.c.src @@ -130,7 +130,7 @@ double npy_atan2(double y, double x) return npy_atan(y); } - m = 2 * npy_signbit(x) + npy_signbit(y); + m = 2 * (npy_signbit((x)) != 0) + (npy_signbit((y)) != 0); if (y == 0.0) { switch(m) { case 0: From 899325ec043688c751001662f5e7226e7e92e04e Mon Sep 17 00:00:00 2001 From: "Nathaniel J. Smith" Date: Thu, 27 Aug 2015 00:02:17 -0700 Subject: [PATCH 039/496] MAINT: cleanup dead code/arguments/fields from ufuncs The check_return argument and ufunc object field was never used; ditto for the "new" inner loop selector (which was never implemented), along with associated typedefs. Since I was looking at this code anyway trying to figure out which parts were actually in use, I figured I'd clear up some of the brush to make it easier next time... --- doc/release/1.11.0-notes.rst | 10 +++++++ .../reference/c-api.types-and-structures.rst | 7 +---- doc/source/reference/c-api.ufunc.rst | 11 +++---- doc/source/user/c-info.ufunc-tutorial.rst | 7 ++--- numpy/core/include/numpy/ufuncobject.h | 30 ++++--------------- numpy/core/src/umath/ufunc_object.c | 30 ++++++------------- numpy/core/src/umath/umathmodule.c | 1 - 7 files changed, 33 insertions(+), 63 deletions(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index 9afe6e8664ce..990562604f21 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -35,6 +35,16 @@ Deprecated to error e.g., in reshape, take, and specifying reduce axis. +C API +~~~~~ + +Removed the ``check_return`` and ``inner_loop_selector`` members of +the ``PyUFuncObject`` struct (replacing them with ``reserved`` slots +to preserve struct layout). These were never used for anything, so +it's unlikely that any third-party code is using them either, but we +mention it here for completeness. + + New Features ============ diff --git a/doc/source/reference/c-api.types-and-structures.rst b/doc/source/reference/c-api.types-and-structures.rst index 35ffc2d1e2d5..d7342bd9e7a8 100644 --- a/doc/source/reference/c-api.types-and-structures.rst +++ b/doc/source/reference/c-api.types-and-structures.rst @@ -681,7 +681,7 @@ PyUFunc_Type PyUFuncGenericFunction *functions; void **data; int ntypes; - int check_return; + int reserved1; const char *name; char *types; const char *doc; @@ -748,11 +748,6 @@ PyUFunc_Type specifies how many different 1-d loops (of the builtin data types) are available. - .. c:member:: int PyUFuncObject.check_return - - Obsolete and unused. However, it is set by the corresponding entry in - the main ufunc creation routine: :c:func:`PyUFunc_FromFuncAndData` (...). - .. c:member:: char *PyUFuncObject.name A string name for the ufunc. This is used dynamically to build diff --git a/doc/source/reference/c-api.ufunc.rst b/doc/source/reference/c-api.ufunc.rst index ee1822122cc7..892ccbdc7354 100644 --- a/doc/source/reference/c-api.ufunc.rst +++ b/doc/source/reference/c-api.ufunc.rst @@ -67,7 +67,7 @@ Functions .. c:function:: PyObject* PyUFunc_FromFuncAndData(PyUFuncGenericFunction* func, void** data, char* types, int ntypes, int nin, int nout, int identity, - char* name, char* doc, int check_return) + char* name, char* doc, int unused) Create a new broadcasting universal function from required variables. Each ufunc builds around the notion of an element-by-element @@ -121,15 +121,12 @@ Functions dynamically determined from the object and available when accessing the **__doc__** attribute of the ufunc. - :param check_return: - Unused and present for backwards compatibility of the C-API. A - corresponding *check_return* integer does exist in the ufunc - structure and it does get set with this value when the ufunc - object is created. + :param unused: + Unused and present for backwards compatibility of the C-API. .. c:function:: PyObject* PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction* func, void** data, char* types, int ntypes, int nin, int nout, int identity, - char* name, char* doc, int check_return, char *signature) + char* name, char* doc, int unused, char *signature) This function is very similar to PyUFunc_FromFuncAndData above, but has an extra *signature* argument, to define generalized universal functions. diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst index db25568b9fe4..109e6adb5cc4 100644 --- a/doc/source/user/c-info.ufunc-tutorial.rst +++ b/doc/source/user/c-info.ufunc-tutorial.rst @@ -1060,7 +1060,7 @@ automatically generates a ufunc from a C function with the correct signature. .. c:function:: PyObject *PyUFunc_FromFuncAndData( PyUFuncGenericFunction* func, void** data, char* types, int ntypes, int nin, int nout, int identity, - char* name, char* doc, int check_return) + char* name, char* doc, int unused) *func* @@ -1170,10 +1170,9 @@ automatically generates a ufunc from a C function with the correct signature. response to ``{ufunc_name}.__doc__``). Do not include the function signature or the name as this is generated automatically. - *check_return* + *unused* - Not presently used, but this integer value does get set in the - structure-member of similar name. + Unused; kept for compatiblity. Just set it to zero. .. index:: pair: ufunc; adding new diff --git a/numpy/core/include/numpy/ufuncobject.h b/numpy/core/include/numpy/ufuncobject.h index a24a0d83774f..1cca64b75b49 100644 --- a/numpy/core/include/numpy/ufuncobject.h +++ b/numpy/core/include/numpy/ufuncobject.h @@ -18,17 +18,6 @@ typedef void (*PyUFuncGenericFunction) npy_intp *strides, void *innerloopdata); -/* - * The most generic one-dimensional inner loop for - * a standard element-wise ufunc. This typedef is also - * more consistent with the other NumPy function pointer typedefs - * than PyUFuncGenericFunction. - */ -typedef void (PyUFunc_StridedInnerLoopFunc)( - char **dataptrs, npy_intp *strides, - npy_intp count, - NpyAuxData *innerloopdata); - /* * The most generic one-dimensional inner loop for * a masked standard element-wise ufunc. "Masked" here means that it skips @@ -112,13 +101,6 @@ typedef int (PyUFunc_LegacyInnerLoopSelectionFunc)( PyUFuncGenericFunction *out_innerloop, void **out_innerloopdata, int *out_needs_api); -typedef int (PyUFunc_InnerLoopSelectionFunc)( - struct _tagPyUFuncObject *ufunc, - PyArray_Descr **dtypes, - npy_intp *fixed_strides, - PyUFunc_StridedInnerLoopFunc **out_innerloop, - NpyAuxData **out_innerloopdata, - int *out_needs_api); typedef int (PyUFunc_MaskedInnerLoopSelectionFunc)( struct _tagPyUFuncObject *ufunc, PyArray_Descr **dtypes, @@ -148,8 +130,8 @@ typedef struct _tagPyUFuncObject { /* The number of elements in 'functions' and 'data' */ int ntypes; - /* Does not appear to be used */ - int check_return; + /* Used to be unused field 'check_return' */ + int reserved1; /* The name of the ufunc */ const char *name; @@ -204,11 +186,11 @@ typedef struct _tagPyUFuncObject { */ PyUFunc_LegacyInnerLoopSelectionFunc *legacy_inner_loop_selector; /* - * A function which returns an inner loop for the new mechanism - * in NumPy 1.7 and later. If provided, this is used, otherwise - * if NULL the legacy_inner_loop_selector is used instead. + * This was blocked off to be the "new" inner loop selector in 1.7, + * but this was never implemented. (This is also why the above + * selector is called the "legacy" selector.) */ - PyUFunc_InnerLoopSelectionFunc *inner_loop_selector; + void *reserved2; /* * A function which returns a masked inner loop for the ufunc. */ diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 7797731015de..4bc9582b4c9a 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -2623,22 +2623,9 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc, else { NPY_UF_DBG_PRINT("Executing legacy inner loop\n"); - if (ufunc->legacy_inner_loop_selector != NULL) { - retval = execute_legacy_ufunc_loop(ufunc, trivial_loop_ok, - op, dtypes, order, - buffersize, arr_prep, arr_prep_args); - } - else { - /* - * TODO: When this is supported, it should be preferred over - * the legacy_inner_loop_selector - */ - PyErr_SetString(PyExc_RuntimeError, - "usage of the new inner_loop_selector isn't " - "implemented yet"); - retval = -1; - goto fail; - } + retval = execute_legacy_ufunc_loop(ufunc, trivial_loop_ok, + op, dtypes, order, + buffersize, arr_prep, arr_prep_args); } if (retval < 0) { goto fail; @@ -4480,10 +4467,10 @@ NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndData(PyUFuncGenericFunction *func, void **data, char *types, int ntypes, int nin, int nout, int identity, - const char *name, const char *doc, int check_return) + const char *name, const char *doc, int unused) { return PyUFunc_FromFuncAndDataAndSignature(func, data, types, ntypes, - nin, nout, identity, name, doc, check_return, NULL); + nin, nout, identity, name, doc, 0, NULL); } /*UFUNC_API*/ @@ -4492,7 +4479,7 @@ PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void **data, char *types, int ntypes, int nin, int nout, int identity, const char *name, const char *doc, - int check_return, const char *signature) + int unused, const char *signature) { PyUFuncObject *ufunc; @@ -4510,6 +4497,9 @@ PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void **data, } PyObject_Init((PyObject *)ufunc, &PyUFunc_Type); + ufunc->reserved1 = 0; + ufunc->reserved2 = NULL; + ufunc->nin = nin; ufunc->nout = nout; ufunc->nargs = nin+nout; @@ -4519,7 +4509,6 @@ PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void **data, ufunc->data = data; ufunc->types = types; ufunc->ntypes = ntypes; - ufunc->check_return = check_return; ufunc->ptr = NULL; ufunc->obj = NULL; ufunc->userloops=NULL; @@ -4527,7 +4516,6 @@ PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void **data, /* Type resolution and inner loop selection functions */ ufunc->type_resolver = &PyUFunc_DefaultTypeResolver; ufunc->legacy_inner_loop_selector = &PyUFunc_DefaultLegacyInnerLoopSelector; - ufunc->inner_loop_selector = NULL; ufunc->masked_inner_loop_selector = &PyUFunc_DefaultMaskedInnerLoopSelector; if (name == NULL) { diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c index b1da2aeed768..d19d5b9d2eac 100644 --- a/numpy/core/src/umath/umathmodule.c +++ b/numpy/core/src/umath/umathmodule.c @@ -123,7 +123,6 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUS self->identity = PyUFunc_None; self->functions = pyfunc_functions; self->ntypes = 1; - self->check_return = 0; /* generalized ufunc */ self->core_enabled = 0; From 1e436a5ad94da01e3771a09861279b07dcff8dc6 Mon Sep 17 00:00:00 2001 From: Ronan Lamy Date: Tue, 29 Sep 2015 17:36:00 +0100 Subject: [PATCH 040/496] MAINT: Simplify numpy/linalg/setup.py Compute the sources required to build extensions lapack_lite and _umath_linalg in a more direct way that doesn't rely on abusing the 'depends' argument to config.add_extension(). --- numpy/linalg/setup.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/numpy/linalg/setup.py b/numpy/linalg/setup.py index 282c3423c93c..4bd57453cab8 100644 --- a/numpy/linalg/setup.py +++ b/numpy/linalg/setup.py @@ -20,31 +20,31 @@ def configuration(parent_package='',top_path=None): os.path.join(src_dir, 'blas_lite.c'), os.path.join(src_dir, 'dlamch.c'), os.path.join(src_dir, 'f2c_lite.c'), - os.path.join(src_dir, 'f2c.h'), ] + all_sources = config.paths(lapack_lite_src) lapack_info = get_info('lapack_opt', 0) # and {} def get_lapack_lite_sources(ext, build_dir): if not lapack_info: print("### Warning: Using unoptimized lapack ###") - return ext.depends[:-1] + return all_sources else: if sys.platform=='win32': print("### Warning: python_xerbla.c is disabled ###") - return ext.depends[:1] - return ext.depends[:2] + return [] + return [all_sources[0]] config.add_extension('lapack_lite', - sources = [get_lapack_lite_sources], - depends = ['lapack_litemodule.c'] + lapack_lite_src, + sources = ['lapack_litemodule.c', get_lapack_lite_sources], + depends = ['lapack_lite/f2c.h'], extra_info = lapack_info ) # umath_linalg module config.add_extension('_umath_linalg', - sources = [get_lapack_lite_sources], - depends = ['umath_linalg.c.src'] + lapack_lite_src, + sources = ['umath_linalg.c.src', get_lapack_lite_sources], + depends = ['lapack_lite/f2c.h'], extra_info = lapack_info, libraries = ['npymath'], ) From 272bae005bc91fdeddb4c88924aebab5ce67493b Mon Sep 17 00:00:00 2001 From: Ronan Lamy Date: Tue, 29 Sep 2015 17:39:54 +0100 Subject: [PATCH 041/496] STY: Make numpy/linalg/setup.py PEP8 compliant --- numpy/linalg/setup.py | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/numpy/linalg/setup.py b/numpy/linalg/setup.py index 4bd57453cab8..adc8f1784866 100644 --- a/numpy/linalg/setup.py +++ b/numpy/linalg/setup.py @@ -3,7 +3,7 @@ import os import sys -def configuration(parent_package='',top_path=None): +def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration from numpy.distutils.system_info import get_info config = Configuration('linalg', parent_package, top_path) @@ -23,32 +23,33 @@ def configuration(parent_package='',top_path=None): ] all_sources = config.paths(lapack_lite_src) - lapack_info = get_info('lapack_opt', 0) # and {} + lapack_info = get_info('lapack_opt', 0) # and {} + def get_lapack_lite_sources(ext, build_dir): if not lapack_info: print("### Warning: Using unoptimized lapack ###") return all_sources else: - if sys.platform=='win32': + if sys.platform == 'win32': print("### Warning: python_xerbla.c is disabled ###") return [] return [all_sources[0]] - config.add_extension('lapack_lite', - sources = ['lapack_litemodule.c', get_lapack_lite_sources], - depends = ['lapack_lite/f2c.h'], - extra_info = lapack_info - ) + config.add_extension( + 'lapack_lite', + sources=['lapack_litemodule.c', get_lapack_lite_sources], + depends=['lapack_lite/f2c.h'], + extra_info=lapack_info, + ) # umath_linalg module - - config.add_extension('_umath_linalg', - sources = ['umath_linalg.c.src', get_lapack_lite_sources], - depends = ['lapack_lite/f2c.h'], - extra_info = lapack_info, - libraries = ['npymath'], - ) - + config.add_extension( + '_umath_linalg', + sources=['umath_linalg.c.src', get_lapack_lite_sources], + depends=['lapack_lite/f2c.h'], + extra_info=lapack_info, + libraries=['npymath'], + ) return config if __name__ == '__main__': From 0862e89fb51b2e6fc2dfe74e6166a218b67ff06d Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Sun, 13 Sep 2015 00:12:23 -0400 Subject: [PATCH 042/496] ENH: adds lexsort for arrays with object dtype --- doc/release/1.11.0-notes.rst | 6 +++++ numpy/core/src/multiarray/item_selection.c | 31 ++++++++++++++++++---- numpy/core/tests/test_multiarray.py | 16 +++++++++++ 3 files changed, 48 insertions(+), 5 deletions(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index 990562604f21..891a676842a3 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -68,6 +68,12 @@ Improvements The ``axis`` parameter was added to *np.gradient* for consistency. It allows to specify over which axes the gradient is calculated. +*np.lexsort* now supports arrays with object data-type +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The function now internally calls the generic ``npy_amergesort`` +when the type does not implement a merge-sort kind of ``argsort`` +method. + Changes ======= diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index d3b9a036d491..ec0717bd6fe4 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -1427,9 +1427,10 @@ PyArray_LexSort(PyObject *sort_keys, int axis) goto fail; } } - if (!PyArray_DESCR(mps[i])->f->argsort[NPY_MERGESORT]) { + if (!PyArray_DESCR(mps[i])->f->argsort[NPY_MERGESORT] + && !PyArray_DESCR(mps[i])->f->compare) { PyErr_Format(PyExc_TypeError, - "merge sort not available for item %zd", i); + "item %zd type does not have compare function", i); goto fail; } if (!object @@ -1520,15 +1521,25 @@ PyArray_LexSort(PyObject *sort_keys, int axis) *iptr++ = i; } for (j = 0; j < n; j++) { + int rcode; elsize = PyArray_DESCR(mps[j])->elsize; astride = PyArray_STRIDES(mps[j])[axis]; argsort = PyArray_DESCR(mps[j])->f->argsort[NPY_MERGESORT]; + if(argsort == NULL) { + argsort = npy_amergesort; + } _unaligned_strided_byte_copy(valbuffer, (npy_intp) elsize, its[j]->dataptr, astride, N, elsize); if (swaps[j]) { _strided_byte_swap(valbuffer, (npy_intp) elsize, N, elsize); } - if (argsort(valbuffer, (npy_intp *)indbuffer, N, mps[j]) < 0) { + rcode = argsort(valbuffer, (npy_intp *)indbuffer, N, mps[j]); +#if defined(NPY_PY3K) + if (rcode < 0 || (PyDataType_REFCHK(PyArray_DESCR(mps[j])) + && PyErr_Occurred())) { +#else + if (rcode < 0) { +#endif PyDataMem_FREE(valbuffer); PyDataMem_FREE(indbuffer); free(swaps); @@ -1551,9 +1562,19 @@ PyArray_LexSort(PyObject *sort_keys, int axis) *iptr++ = i; } for (j = 0; j < n; j++) { + int rcode; argsort = PyArray_DESCR(mps[j])->f->argsort[NPY_MERGESORT]; - if (argsort(its[j]->dataptr, (npy_intp *)rit->dataptr, - N, mps[j]) < 0) { + if(argsort == NULL) { + argsort = npy_amergesort; + } + rcode = argsort(its[j]->dataptr, + (npy_intp *)rit->dataptr, N, mps[j]); +#if defined(NPY_PY3K) + if (rcode < 0 || (PyDataType_REFCHK(PyArray_DESCR(mps[j])) + && PyErr_Occurred())) { +#else + if (rcode < 0) { +#endif goto fail; } PyArray_ITER_NEXT(its[j]); diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 872f9bde403d..9fd08e0230bf 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -3207,6 +3207,22 @@ def test_datetime(self): expected_idx = np.array([2, 1, 0]) assert_array_equal(idx, expected_idx) + def test_object(self): # gh-6312 + a = np.random.choice(10, 1000) + b = np.random.choice(['abc', 'xy', 'wz', 'efghi', 'qwst', 'x'], 1000) + + for u in a, b: + left = np.lexsort((u.astype('O'),)) + right = np.argsort(u, kind='mergesort') + assert_array_equal(left, right) + + for u, v in (a, b), (b, a): + idx = np.lexsort((u, v)) + assert_array_equal(idx, np.lexsort((u.astype('O'), v))) + assert_array_equal(idx, np.lexsort((u, v.astype('O')))) + u, v = np.array(u, dtype='object'), np.array(v, dtype='object') + assert_array_equal(idx, np.lexsort((u, v))) + class TestIO(object): """Test tofile, fromfile, tobytes, and fromstring""" From b3dfa8de56c4a892db58284aebe7554e25829c64 Mon Sep 17 00:00:00 2001 From: Jonathan Helmus Date: Fri, 2 Oct 2015 09:36:20 -0500 Subject: [PATCH 043/496] STY: Remove trailing whitespace from numpy/ma/core.py --- numpy/ma/core.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 61f0c12a8994..43c92f1ac007 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -6816,7 +6816,7 @@ def resize(x, new_shape): return result -def rank(obj): +def rank(obj): """ maskedarray version of the numpy function. @@ -6833,7 +6833,7 @@ def rank(obj): rank.__doc__ = np.rank.__doc__ -def ndim(obj): +def ndim(obj): """ maskedarray version of the numpy function. From 7744de5cb61f279a05f5a8b139e5a8a58b4d8c98 Mon Sep 17 00:00:00 2001 From: Jonathan Helmus Date: Fri, 2 Oct 2015 10:13:38 -0500 Subject: [PATCH 044/496] BUG: numpy.ma.round works on zero dimensional arrays numpy.ma.round returns a scalar or np.ma.masked when called with a zero dimensional array. This behavior is consistent with numpy.round. closes #2244 --- numpy/ma/core.py | 8 ++++++-- numpy/ma/tests/test_core.py | 25 +++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 43c92f1ac007..ba9643d4fd83 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -5107,8 +5107,12 @@ def round(self, decimals=0, out=None): """ result = self._data.round(decimals=decimals, out=out).view(type(self)) - result._mask = self._mask - result._update_from(self) + if result.ndim: + result._mask = self._mask + result._update_from(self) + elif self._mask: + # Return masked when the scalar is masked + result = masked # No explicit output: we're done if out is None: return result diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index aa6ce5db9ffe..07aaaacdd534 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -3443,6 +3443,31 @@ def test_round_with_output(self): result = xm.round(decimals=2, out=output) self.assertTrue(result is output) + def test_round_with_scalar(self): + # Testing round with scalar/zero dimension input + # GH issue 2244 + a = array(1.1, mask=[False]) + assert_equal(a.round(), 1) + + a = array(1.1, mask=[True]) + assert_(a.round() is masked) + + a = array(1.1, mask=[False]) + output = np.empty(1, dtype=float) + output.fill(-9999) + a.round(out=output) + assert_equal(output, 1) + + a = array(1.1, mask=[False]) + output = array(-9999., mask=[True]) + a.round(out=output) + assert_equal(output[()], 1) + + a = array(1.1, mask=[True]) + output = array(-9999., mask=[False]) + a.round(out=output) + assert_(output[()] is masked) + def test_identity(self): a = identity(5) self.assertTrue(isinstance(a, MaskedArray)) From 7c263ac239b6e4c807411986a12aec96e9924e37 Mon Sep 17 00:00:00 2001 From: Lars Buitinck Date: Fri, 2 Oct 2015 23:00:47 +0200 Subject: [PATCH 045/496] DOC: typo: affect --- numpy/lib/function_base.py | 6 +++--- numpy/lib/index_tricks.py | 2 +- numpy/ma/extras.py | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 3c941ca5bfd9..f859b1d5d1e9 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -2316,11 +2316,11 @@ def corrcoef(x, y=None, rowvar=1, bias=np._NoValue, ddof=np._NoValue): is transposed: each column represents a variable, while the rows contain observations. bias : _NoValue, optional - Has no affect, do not use. + Has no effect, do not use. .. deprecated:: 1.10.0 ddof : _NoValue, optional - Has no affect, do not use. + Has no effect, do not use. .. deprecated:: 1.10.0 @@ -2342,7 +2342,7 @@ def corrcoef(x, y=None, rowvar=1, bias=np._NoValue, ddof=np._NoValue): """ if bias is not np._NoValue or ddof is not np._NoValue: # 2015-03-15, 1.10 - warnings.warn('bias and ddof have no affect and are deprecated', + warnings.warn('bias and ddof have no effect and are deprecated', DeprecationWarning) c = cov(x, y, rowvar) try: diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py index c68bf26340b2..8bcc3fb5389b 100644 --- a/numpy/lib/index_tricks.py +++ b/numpy/lib/index_tricks.py @@ -681,7 +681,7 @@ def fill_diagonal(a, val, wrap=False): wrap : bool For tall matrices in NumPy version up to 1.6.2, the diagonal "wrapped" after N columns. You can have this behavior - with this option. This affect only tall matrices. + with this option. This affects only tall matrices. See also -------- diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index a47c58684885..322303f03646 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1415,7 +1415,7 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, is transposed: each column represents a variable, while the rows contain observations. bias : _NoValue, optional - Has no affect, do not use. + Has no effect, do not use. .. deprecated:: 1.10.0 allow_masked : bool, optional @@ -1424,7 +1424,7 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, If False, raises an exception. Because `bias` is deprecated, this argument needs to be treated as keyword only to avoid a warning. ddof : _NoValue, optional - Has no affect, do not use. + Has no effect, do not use. .. deprecated:: 1.10.0 @@ -1440,7 +1440,7 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, arguments had no effect on the return values of the function and can be safely ignored in this and previous versions of numpy. """ - msg = 'bias and ddof have no affect and are deprecated' + msg = 'bias and ddof have no effect and are deprecated' if bias is not np._NoValue or ddof is not np._NoValue: # 2015-03-15, 1.10 warnings.warn(msg, DeprecationWarning) From 435af7ce14620b6fc6243dd76779d4d7020b2fb3 Mon Sep 17 00:00:00 2001 From: "Nathaniel J. Smith" Date: Sat, 3 Oct 2015 21:47:21 -0700 Subject: [PATCH 046/496] DEV: minor textual cleanups to governance document --- doc/source/dev/governance/governance.rst | 6 +++--- doc/source/dev/governance/people.rst | 6 ++++++ 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/doc/source/dev/governance/governance.rst b/doc/source/dev/governance/governance.rst index e558eb29c6df..cb0e32825b4f 100644 --- a/doc/source/dev/governance/governance.rst +++ b/doc/source/dev/governance/governance.rst @@ -383,11 +383,11 @@ Institutional Partner benefits are: Document history ================ -[TODO: add a link to the git log for this file] +https://github.com/numpy/numpy/commits/master/doc/source/dev/governance/governance.rst Acknowledgements ================ -Substantial portions of this document were [STRIKEOUT:inspired] stolen -wholesale from the `Jupyter/IPython project's governance document +Substantial portions of this document were adapted from the +`Jupyter/IPython project's governance document `_. diff --git a/doc/source/dev/governance/people.rst b/doc/source/dev/governance/people.rst index 6c6ccf305459..0fcef08b2d6a 100644 --- a/doc/source/dev/governance/people.rst +++ b/doc/source/dev/governance/people.rst @@ -47,3 +47,9 @@ Institutional Partners ---------------------- * UC Berkeley (Nathaniel Smith) + + +Document history +---------------- + +https://github.com/numpy/numpy/commits/master/doc/source/dev/governance/governance.rst From 6823e7a010a95096c11cd4c13dd5aacad4820b7e Mon Sep 17 00:00:00 2001 From: "Nathaniel J. Smith" Date: Sat, 3 Oct 2015 21:47:49 -0700 Subject: [PATCH 047/496] DEV: Fix dates on Travis's time as leader The 2001 date I had originally was based on the beginning of NumPy's git history -- but further research suggests that this actually runs back into Numeric times, before the NumPy project was founded. New 2005 date is based on Fernando's recollection of the fateful meeting that led to NumPy's creation, and is consistent with Wikipedia's claim that 2006 was the first release. If this is wrong please let me know :-) --- doc/source/dev/governance/people.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/dev/governance/people.rst b/doc/source/dev/governance/people.rst index 0fcef08b2d6a..75be362138e2 100644 --- a/doc/source/dev/governance/people.rst +++ b/doc/source/dev/governance/people.rst @@ -26,7 +26,7 @@ Steering council Emeritus members ---------------- -* Travis Oliphant - Project Founder / Emeritus Leader (served: 2001(??)-2012) +* Travis Oliphant - Project Founder / Emeritus Leader (served: 2005-2012) NumFOCUS Subcommittee From 29b09f86b7956e822751f7da54a998395a2baf76 Mon Sep 17 00:00:00 2001 From: "Nathaniel J. Smith" Date: Sat, 3 Oct 2015 21:51:14 -0700 Subject: [PATCH 048/496] DEV: remove the interstitial notes on the seed steering council Now that this seems to be settled, we don't need this cluttering up the main text. (For the record: the rule listed here is what we actually used.) --- doc/source/dev/governance/governance.rst | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/doc/source/dev/governance/governance.rst b/doc/source/dev/governance/governance.rst index cb0e32825b4f..84d1ec0be165 100644 --- a/doc/source/dev/governance/governance.rst +++ b/doc/source/dev/governance/governance.rst @@ -242,21 +242,6 @@ deemed to be actively harmful to the project’s well-being, and attempts at communication and conflict resolution have failed. This requires the consensus of the remaining Members. -[We also have to decide on the initial membership for the Council. While -the above text makes pains to distinguish between "committer" and -"Council Member", in the past we've pretty much treated them as the -same. So to keep things simple and deterministic, I propose that we seed -the Council with everyone who has reviewed/merged a pull request since -Jan 1, 2014, and move those who haven't used their commit bit in >1.5 -years to the emeritus list. Based on the output of - -git log --grep="^Merge pull request" --since 2014-01-01 \| grep Author: -\| sort -u - -I believe this would give us an initial Steering Council of: Sebastian -Berg, Jaime Fernández del Río, Ralf Gommers, Alex Griffing, Charles -Harris, Nathaniel Smith, Julian Taylor, and Pauli Virtanen (assuming -everyone on that list is interested/willing to serve).] Conflict of interest ~~~~~~~~~~~~~~~~~~~~ From 365a7477a18d28c7d0fca002bcca575e928efd54 Mon Sep 17 00:00:00 2001 From: "Nathaniel J. Smith" Date: Sat, 3 Oct 2015 21:53:23 -0700 Subject: [PATCH 049/496] DEV: remove draft markers from governance document In preparation for merging. --- doc/source/dev/governance/governance.rst | 2 -- doc/source/dev/governance/people.rst | 2 -- 2 files changed, 4 deletions(-) diff --git a/doc/source/dev/governance/governance.rst b/doc/source/dev/governance/governance.rst index 84d1ec0be165..0690d02cf830 100644 --- a/doc/source/dev/governance/governance.rst +++ b/doc/source/dev/governance/governance.rst @@ -2,8 +2,6 @@ NumPy project governance and decision-making ================================================================ -[DRAFT, not yet accepted] - The purpose of this document is to formalize the governance process used by the NumPy project in both ordinary and extraordinary situations, and to clarify how decisions are made and how the various diff --git a/doc/source/dev/governance/people.rst b/doc/source/dev/governance/people.rst index 75be362138e2..140b52415dfe 100644 --- a/doc/source/dev/governance/people.rst +++ b/doc/source/dev/governance/people.rst @@ -1,8 +1,6 @@ Current steering council and institutional partners =================================================== -[DRAFT, not yet accepted] - Steering council ---------------- From 881849c5385524ceafc462d230960463a01e47a6 Mon Sep 17 00:00:00 2001 From: Lars Buitinck Date: Sun, 4 Oct 2015 13:31:57 +0200 Subject: [PATCH 050/496] ENH: halve the memory requirement of np.cov MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prevents allocation of an n²-sized array. XXX For large arrays, multiplying by 1/fact is more than 10% faster than dividing by fact, but that doesn't pass the tests. --- numpy/lib/function_base.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 3c941ca5bfd9..a0c9f12748dd 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -2286,7 +2286,9 @@ def cov(m, y=None, rowvar=1, bias=0, ddof=None, fweights=None, aweights=None): X_T = X.T else: X_T = (X*w).T - return (dot(X, X_T.conj())/fact).squeeze() + c = dot(X, X_T.conj()) + c /= fact + return c.squeeze() def corrcoef(x, y=None, rowvar=1, bias=np._NoValue, ddof=np._NoValue): From 11dea9e4701d0b1a6e163e8a70d33d5fc806941a Mon Sep 17 00:00:00 2001 From: Jonathan Helmus Date: Sun, 4 Oct 2015 08:10:47 -0500 Subject: [PATCH 051/496] MAINT: More expressive if statement in np.ma.round --- numpy/ma/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index ba9643d4fd83..38657ab12343 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -5107,7 +5107,7 @@ def round(self, decimals=0, out=None): """ result = self._data.round(decimals=decimals, out=out).view(type(self)) - if result.ndim: + if result.ndim > 0: result._mask = self._mask result._update_from(self) elif self._mask: From 13dd162c498391fbfd108f00ebeae060ed438f8a Mon Sep 17 00:00:00 2001 From: Thomas Robitaille Date: Sun, 19 Jul 2015 17:26:10 +0200 Subject: [PATCH 052/496] BUG: Fixed string representation of mvoid with multi-dimensional columns This fixes a bug that caused the string representation of masked structured array rows with multi-dimensional columns to fail (numpy/numpy#6019), and includes a regression test. Since __repr__ suffered from a similar bug, and since previously __repr__ returned the same as __str__ for mvoid, we now set __repr__ to reference the same method as __str__. --- numpy/ma/core.py | 32 +++++++------------------------- numpy/ma/tests/test_core.py | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 25 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index c47bcc073792..ff1b528201e1 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -5767,33 +5767,15 @@ def __setitem__(self, indx, value): def __str__(self): m = self._mask - if (m is nomask): - return self._data.__str__() - m = tuple(m) - if (not any(m)): + if m is nomask: return self._data.__str__() - r = self._data.tolist() - p = masked_print_option - if not p.enabled(): - p = 'N/A' - else: - p = str(p) - r = [(str(_), p)[int(_m)] for (_, _m) in zip(r, m)] - return "(%s)" % ", ".join(r) + printopt = masked_print_option + rdtype = _recursive_make_descr(self._data.dtype, "O") + res = np.array([self._data]).astype(rdtype) + _recursive_printoption(res, self._mask, printopt) + return str(res[0]) - def __repr__(self): - m = self._mask - if (m is nomask): - return self._data.__repr__() - m = tuple(m) - if not any(m): - return self._data.__repr__() - p = masked_print_option - if not p.enabled(): - return self.filled(self.fill_value).__repr__() - p = str(p) - r = [(str(_), p)[int(_m)] for (_, _m) in zip(self._data.tolist(), m)] - return "(%s)" % ", ".join(r) + __repr__ = __str__ def __iter__(self): "Defines an iterator for mvoid" diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index ce6cddac7170..f89e29875be6 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -685,6 +685,42 @@ def test_mvoid_print(self): finally: masked_print_option.set_display(ini_display) + def test_mvoid_multidim_print(self): + + # regression test for gh-6019 + t_ma = masked_array(data = [([1, 2, 3],)], + mask = [([False, True, False],)], + fill_value = ([999999, 999999, 999999],), + dtype = [('a', ' Date: Sun, 4 Oct 2015 20:16:38 +0200 Subject: [PATCH 053/496] Update __init__.py --- numpy/core/__init__.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py index 41314cee4a12..16dcbe0b15ec 100644 --- a/numpy/core/__init__.py +++ b/numpy/core/__init__.py @@ -6,15 +6,16 @@ # disables OpenBLAS affinity setting of the main thread that limits # python threads or processes to one core import os -envbak = os.environ.copy() -if 'OPENBLAS_MAIN_FREE' not in os.environ: - os.environ['OPENBLAS_MAIN_FREE'] = '1' -if 'GOTOBLAS_MAIN_FREE' not in os.environ: - os.environ['GOTOBLAS_MAIN_FREE'] = '1' +env_added = [] +for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']: + if envkey not in os.environ: + os.environ[envkey] = '1' + env_added.append(envkey) from . import multiarray -os.environ.clear() -os.environ.update(envbak) -del envbak +for envkey in env_added: + del os.environ[envkey] +del envkey +del env_added del os from . import umath From 7c1f44fd07917b3a2fac483c2263edd67b490eda Mon Sep 17 00:00:00 2001 From: mtran Date: Fri, 7 Aug 2015 16:02:27 -0700 Subject: [PATCH 054/496] DOC: Add RandomState stability guarantee to RandomState documentation. --- numpy/random/mtrand/mtrand.pyx | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index 59f9dcd6bcab..fb325d9c1bdb 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -587,6 +587,12 @@ cdef class RandomState: array filled with generated values is returned. If `size` is a tuple, then an array with that shape is filled and returned. + * Compatibility Guarantee* + A fixed seed and a fixed series of calls to 'RandomState' methods will + always produce the same results regardless of platform or numpy + version. Small differences in floating point values may occur due to + rounding differences between compilers. + Parameters ---------- seed : {None, int, array_like}, optional From ae2d0bb7c2d227e893195cc3e52477567781e2db Mon Sep 17 00:00:00 2001 From: jason king Date: Sat, 5 Sep 2015 23:01:24 +1000 Subject: [PATCH 055/496] DOC: Update docs for numpy.genfromtxt. Note that a list of strings can be passed as the first parameter. The strings are treated as the lines in a file. Closes #6247 --- doc/source/user/basics.io.genfromtxt.rst | 6 +++--- numpy/lib/npyio.py | 8 +++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/doc/source/user/basics.io.genfromtxt.rst b/doc/source/user/basics.io.genfromtxt.rst index 11205e555cec..75ba8a11dac4 100644 --- a/doc/source/user/basics.io.genfromtxt.rst +++ b/doc/source/user/basics.io.genfromtxt.rst @@ -29,9 +29,9 @@ Defining the input The only mandatory argument of :func:`~numpy.genfromtxt` is the source of the data. It can be a string corresponding to the name of a local or remote file, or a file-like object with a :meth:`read` method (such as an -actual file or a :class:`StringIO.StringIO` object). If the argument is -the URL of a remote file, this latter is automatically downloaded in the -current directory. +actual file or a :class:`StringIO.StringIO` object), a list of strings, or +a generator. If the argument is the URL of a remote file, then the file +is automatically downloaded to the current directory. The input file can be a text file or an archive. Currently, the function recognizes :class:`gzip` and :class:`bz2` (`bzip2`) archives. The type of diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py index 12052a08ed96..9f90039eb616 100644 --- a/numpy/lib/npyio.py +++ b/numpy/lib/npyio.py @@ -1271,10 +1271,12 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, Parameters ---------- - fname : file or str - File, filename, or generator to read. If the filename extension is + fname : file, str, list of str + File, filename, list, or generator to read. If the filename extension is `.gz` or `.bz2`, the file is first decompressed. Note that generators must return byte strings in Python 3k. + The strings in a list or strings produced by a generator are treated + as lines. dtype : dtype, optional Data type of the resulting array. If None, the dtypes will be determined by the contents of each @@ -1455,7 +1457,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, fhd = iter(fname) except TypeError: raise TypeError( - "fname must be a string, filehandle, or generator. " + "fname must be a string, filehandle, list of strings, or generator. " "(got %s instead)" % type(fname)) split_line = LineSplitter(delimiter=delimiter, comments=comments, From cad4c90de5292023358e31e82378815720c99c47 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 4 Oct 2015 15:05:37 -0600 Subject: [PATCH 056/496] DOC: Cleanup genfromtxt documentation a bit. --- doc/source/user/basics.io.genfromtxt.rst | 22 ++++++++++++---------- numpy/lib/npyio.py | 15 +++++++-------- 2 files changed, 19 insertions(+), 18 deletions(-) diff --git a/doc/source/user/basics.io.genfromtxt.rst b/doc/source/user/basics.io.genfromtxt.rst index 75ba8a11dac4..5c0e28e6f6b2 100644 --- a/doc/source/user/basics.io.genfromtxt.rst +++ b/doc/source/user/basics.io.genfromtxt.rst @@ -27,17 +27,19 @@ Defining the input ================== The only mandatory argument of :func:`~numpy.genfromtxt` is the source of -the data. It can be a string corresponding to the name of a local or -remote file, or a file-like object with a :meth:`read` method (such as an -actual file or a :class:`StringIO.StringIO` object), a list of strings, or -a generator. If the argument is the URL of a remote file, then the file -is automatically downloaded to the current directory. - -The input file can be a text file or an archive. Currently, the function +the data. It can be a string, a list of strings, or a generator. If a +single string is provided, it is assumed to be the name of a local or +remote file, or a open file-like object with a :meth:`read` method, for +example, a file or :class:`StringIO.StringIO` object. If a list of strings +or a generator returning strings is provided, each string is treated as one +line in a file. When the URL of a remote file is passed, the file is +automatically downloaded to the current directory and opened. + +Recognized file types are text files and archives. Currently, the function recognizes :class:`gzip` and :class:`bz2` (`bzip2`) archives. The type of -the archive is determined by examining the extension of the file: if the -filename ends with ``'.gz'``, a :class:`gzip` archive is expected; if it -ends with ``'bz2'``, a :class:`bzip2` archive is assumed. +the archive is determined from the extension of the file: if the filename +ends with ``'.gz'``, a :class:`gzip` archive is expected; if it ends with +``'bz2'``, a :class:`bzip2` archive is assumed. diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py index 9f90039eb616..640f4fa32d04 100644 --- a/numpy/lib/npyio.py +++ b/numpy/lib/npyio.py @@ -1271,12 +1271,11 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, Parameters ---------- - fname : file, str, list of str - File, filename, list, or generator to read. If the filename extension is - `.gz` or `.bz2`, the file is first decompressed. Note that - generators must return byte strings in Python 3k. - The strings in a list or strings produced by a generator are treated - as lines. + fname : file, str, list of str, generator + File, filename, list, or generator to read. If the filename + extension is `.gz` or `.bz2`, the file is first decompressed. Mote + that generators must return byte strings in Python 3k. The strings + in a list or produced by a generator are treated as lines. dtype : dtype, optional Data type of the resulting array. If None, the dtypes will be determined by the contents of each @@ -1457,8 +1456,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, fhd = iter(fname) except TypeError: raise TypeError( - "fname must be a string, filehandle, list of strings, or generator. " - "(got %s instead)" % type(fname)) + "fname must be a string, filehandle, list of strings, " + "or generator. Got %s instead." % type(fname)) split_line = LineSplitter(delimiter=delimiter, comments=comments, autostrip=autostrip)._handyman From c74001edb7294cd4eca949162c07844a7b2676a5 Mon Sep 17 00:00:00 2001 From: "Nathaniel J. Smith" Date: Sun, 4 Oct 2015 17:22:51 -0700 Subject: [PATCH 057/496] DEV: for new council members, clarify order of voting and talking to them Original text seemed to imply that we would always ask the potential new Council Member whether they were interested *before* we actually decided whether to offer them a spot, which could create a sticky situation if someone ever got voted down. Rephrase to make clear that things do not have to occur in this order. --- doc/source/dev/governance/governance.rst | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/doc/source/dev/governance/governance.rst b/doc/source/dev/governance/governance.rst index 0690d02cf830..f9e60c656ca2 100644 --- a/doc/source/dev/governance/governance.rst +++ b/doc/source/dev/governance/governance.rst @@ -202,11 +202,12 @@ In practice, we anticipate that for most Steering Council decisions Council membership ~~~~~~~~~~~~~~~~~~ -To become eligible to join the Steering Council, an individual must be a -Project Contributor who has produced contributions that are substantial -in quality and quantity, and sustained over at least one year. Potential -Council Members are nominated by existing Council members and voted upon -by the existing Council after asking if the potential Member is +To become eligible to join the Steering Council, an individual must be +a Project Contributor who has produced contributions that are +substantial in quality and quantity, and sustained over at least one +year. Potential Council Members are nominated by existing Council +members, and become members following consensus of the existing +Council members, and confirmation that the potential Member is interested and willing to serve in that capacity. The Council will be initially formed from the set of existing Core Developers who, as of late 2015, have been significantly active over the last year. From 5c563cca22afb9be958deb98adb566feb4985986 Mon Sep 17 00:00:00 2001 From: "Nathaniel J. Smith" Date: Sun, 4 Oct 2015 17:26:30 -0700 Subject: [PATCH 058/496] DEV: governance: Include links to current membership of each group --- doc/source/dev/governance/governance.rst | 11 +++++++++++ doc/source/dev/governance/people.rst | 2 ++ 2 files changed, 13 insertions(+) diff --git a/doc/source/dev/governance/governance.rst b/doc/source/dev/governance/governance.rst index f9e60c656ca2..2428ad1e69a5 100644 --- a/doc/source/dev/governance/governance.rst +++ b/doc/source/dev/governance/governance.rst @@ -202,6 +202,9 @@ In practice, we anticipate that for most Steering Council decisions Council membership ~~~~~~~~~~~~~~~~~~ +A list of current Steering Council Members is maintained at the +page :ref:`governance-people`. + To become eligible to join the Steering Council, an individual must be a Project Contributor who has produced contributions that are substantial in quality and quantity, and sustained over at least one @@ -303,6 +306,10 @@ its interactions with NumFOCUS. the reportee + 1 is the max). This avoids effective majorities resting on one person. +The current membership of the NumFOCUS Subcommittee is listed at the +page :ref:`governance-people`. + + Institutional Partners and Funding ================================== @@ -364,6 +371,10 @@ Institutional Partner benefits are: Council Member. - Council Members invited to NumPy Developer Meetings. +A list of current Institutional Partners is maintained at the page +:ref:`governance-people`. + + Document history ================ diff --git a/doc/source/dev/governance/people.rst b/doc/source/dev/governance/people.rst index 140b52415dfe..a0f08b57ddf0 100644 --- a/doc/source/dev/governance/people.rst +++ b/doc/source/dev/governance/people.rst @@ -1,3 +1,5 @@ +.. _governance-people: + Current steering council and institutional partners =================================================== From fd953f7e2134a661f44f0f85ed539048b0b09b13 Mon Sep 17 00:00:00 2001 From: "Nathaniel J. Smith" Date: Sun, 4 Oct 2015 17:31:27 -0700 Subject: [PATCH 059/496] DEV: governance: wording tweak --- doc/source/dev/governance/governance.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/source/dev/governance/governance.rst b/doc/source/dev/governance/governance.rst index 2428ad1e69a5..c72f6e99969b 100644 --- a/doc/source/dev/governance/governance.rst +++ b/doc/source/dev/governance/governance.rst @@ -267,14 +267,14 @@ issue, but must recuse themselves from voting on the issue. Private communications of the Council ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Unless specifically required, all Council discussions and activities -will be public and done in collaboration and discussion with the Project -Contributors and Community. The Council will have a private mailing list -that will be used sparingly and only when a specific matter requires -privacy. When private communications and decisions are needed, the -Council will do its best to summarize those to the Community after -eliding personal/private/sensitive information that should not be posted -to the public internet. +To the maximum extent possible, Council discussions and activities +will be public and done in collaboration and discussion with the +Project Contributors and Community. The Council will have a private +mailing list that will be used sparingly and only when a specific +matter requires privacy. When private communications and decisions are +needed, the Council will do its best to summarize those to the +Community after eliding personal/private/sensitive information that +should not be posted to the public internet. Subcommittees ~~~~~~~~~~~~~ From 1081a33ff20cbf3d2d1a223ed1bcd8f015ed102a Mon Sep 17 00:00:00 2001 From: "Nathaniel J. Smith" Date: Sun, 4 Oct 2015 17:41:03 -0700 Subject: [PATCH 060/496] DEV: attempt to clarify rules for ending an Institutional Partnership --- doc/source/dev/governance/governance.rst | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/doc/source/dev/governance/governance.rst b/doc/source/dev/governance/governance.rst index c72f6e99969b..2e39379007c8 100644 --- a/doc/source/dev/governance/governance.rst +++ b/doc/source/dev/governance/governance.rst @@ -344,9 +344,12 @@ an Institutional Partner. Once an institution becomes eligible for Institutional Partnership, the Steering Council must nominate and approve the Partnership. -If an existing Institutional Partner no longer has a contributing -employee, they will be given a 1 year grace period for remaining -employees to begin contributing. +If at some point an existing Institutional Partner stops having any +contributing employees, then a one year grace period commences. If at +the end of this one year period they continue not to have any +contributing employees, then their Institutional Partnership will +lapse, and resuming it will require going through the normal process +for new Partnerships. An Institutional Partner is free to pursue funding for their work on The Project through any legal means. This could involve a non-profit From e9f44ffb5232a298733c4299b6be2b078b34c2a9 Mon Sep 17 00:00:00 2001 From: eulerreich Date: Sun, 4 Oct 2015 22:26:27 -0500 Subject: [PATCH 061/496] typo --- numpy/add_newdocs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py index 293005434a8d..a53db35d4d96 100644 --- a/numpy/add_newdocs.py +++ b/numpy/add_newdocs.py @@ -2108,7 +2108,7 @@ def luf(lamdaexpr, *args, **kwargs): Using the Einstein summation convention, many common multi-dimensional array operations can be represented in a simple fashion. This function - provides a way compute such summations. The best way to understand this + provides a way to compute such summations. The best way to understand this function is to try the examples below, which show how many common NumPy functions can be implemented as calls to `einsum`. From fdd7f04c909b438b052f5ce6d5db06efbc6965f0 Mon Sep 17 00:00:00 2001 From: jason king Date: Mon, 5 Oct 2015 15:53:16 +1100 Subject: [PATCH 062/496] DOC: fixing some minor nitpicks. string to str, can't to cannot. --- numpy/ctypeslib.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py index 6c65860e8d7c..fa1dcad6f124 100644 --- a/numpy/ctypeslib.py +++ b/numpy/ctypeslib.py @@ -99,10 +99,10 @@ def load_library(libname, loader_path): Parameters ---------- - libname : string + libname : str Name of the library, which can have 'lib' as a prefix, but without an extension. - loader_path : string + loader_path : str Where the library can be found. Returns @@ -114,7 +114,7 @@ def load_library(libname, loader_path): ------ OSError If there is no library with the expected extension, or the - library is defective and can't be loaded. + library is defective and cannot be loaded. """ if ctypes.__version__ < '1.0.1': import warnings From cd212173210a59ff34aa4edd3308bc520ee3e974 Mon Sep 17 00:00:00 2001 From: Lars Buitinck Date: Mon, 5 Oct 2015 07:49:47 +0200 Subject: [PATCH 063/496] ENH: speed up cov by ~10% for large arrays MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replaces n² divisions by one division and n² multiplications. --- numpy/lib/function_base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index a0c9f12748dd..399e24fe87e1 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -2269,7 +2269,7 @@ def cov(m, y=None, rowvar=1, bias=0, ddof=None, fweights=None, aweights=None): # Determine the normalization if w is None: - fact = float(X.shape[1] - ddof) + fact = X.shape[1] - ddof elif ddof == 0: fact = w_sum elif aweights is None: @@ -2287,7 +2287,7 @@ def cov(m, y=None, rowvar=1, bias=0, ddof=None, fweights=None, aweights=None): else: X_T = (X*w).T c = dot(X, X_T.conj()) - c /= fact + c *= 1. / np.float64(fact) return c.squeeze() From b6a8b35d13ce712272824af31d465ca437b9cd0e Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 4 Oct 2015 13:18:43 -0600 Subject: [PATCH 064/496] DOC: Update RandomState guarantee to be more explicit. --- numpy/random/mtrand/mtrand.pyx | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index fb325d9c1bdb..cd50c496858b 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -587,11 +587,14 @@ cdef class RandomState: array filled with generated values is returned. If `size` is a tuple, then an array with that shape is filled and returned. - * Compatibility Guarantee* - A fixed seed and a fixed series of calls to 'RandomState' methods will - always produce the same results regardless of platform or numpy - version. Small differences in floating point values may occur due to - rounding differences between compilers. + *Compatibility Guarantee* + A fixed seed and a fixed series of calls to 'RandomState' methods using + the same parameters will always produce the same results up to roundoff + error except when the values were incorrect. Incorrect values will be + fixed and the NumPy version in which the fix was made will be noted in + the relevant docstring. Extension of existing parameter ranges and the + addition of new parameters is allowed as long the previous behavior + remains unchanged. Parameters ---------- From 84dc0cb469c47187fd11ba030e2c661cbece9c53 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 5 Oct 2015 11:05:10 -0600 Subject: [PATCH 065/496] BUG: Fix missing np prefix in test_multiarray.py. Bug introduced in 68e61c2f. --- numpy/core/tests/test_multiarray.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 183a30b29685..2630f04db2ff 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -2559,7 +2559,7 @@ def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw): def test_out_override(self): # regression test for github bug 4753 - class OutClass(ndarray): + class OutClass(np.ndarray): def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw): if 'out' in kw: tmp_kw = kw.copy() From 780c1053d7c938e251abe4abb7b81d2f7034ca04 Mon Sep 17 00:00:00 2001 From: Lars Buitinck Date: Sat, 3 Oct 2015 14:18:51 +0200 Subject: [PATCH 066/496] TST: new np.corrcoef improved stability --- numpy/lib/tests/test_function_base.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 5e758fb89595..4516c92488ab 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1597,6 +1597,12 @@ def test_empty(self): assert_array_equal(corrcoef(np.array([]).reshape(2, 0)), np.array([[np.nan, np.nan], [np.nan, np.nan]])) + def test_extreme(self): + x = [[1e-100, 1e100], [1e100, 1e-100]] + with np.errstate(all='raise'): + c = corrcoef(x) + assert_array_almost_equal(c, np.array([[1., -1.], [-1., 1.]])) + class TestCov(TestCase): x1 = np.array([[0, 2], [1, 1], [2, 0]]).T From 9c99f1d69600983775c7b5917e33db37ab18ea9d Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 5 Oct 2015 22:30:23 -0600 Subject: [PATCH 067/496] DOC: Update 1.10.x release notes. Version format was incorrect, was x.y.z.dev+githash instead of x.y.z.dev0+githash --- doc/release/1.10.0-notes.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/release/1.10.0-notes.rst b/doc/release/1.10.0-notes.rst index 404e53cd1232..75530db4e354 100644 --- a/doc/release/1.10.0-notes.rst +++ b/doc/release/1.10.0-notes.rst @@ -53,7 +53,7 @@ Compatibility notes numpy version string ~~~~~~~~~~~~~~~~~~~~ The numpy version string for development builds has been changed from -``x.y.z.dev-githash`` to ``x.y.z.dev+githash`` (note the +) in order to comply +``x.y.z.dev-githash`` to ``x.y.z.dev0+githash`` (note the +) in order to comply with PEP 440. relaxed stride checking From 6fb2f17975fc1eec548685543d0f2d5e343fd050 Mon Sep 17 00:00:00 2001 From: jason king Date: Wed, 7 Oct 2015 23:21:46 +1100 Subject: [PATCH 068/496] DOC: fixing beta calculation for numpy.random,gumbel --- numpy/random/mtrand/mtrand.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index cd50c496858b..97ea9506e2e8 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -3087,7 +3087,7 @@ cdef class RandomState: ... means.append(a.mean()) ... maxima.append(a.max()) >>> count, bins, ignored = plt.hist(maxima, 30, normed=True) - >>> beta = np.std(maxima)*np.pi/np.sqrt(6) + >>> beta = np.std(maxima) * np.sqrt(6) / np.pi >>> mu = np.mean(maxima) - 0.57721*beta >>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta) ... * np.exp(-np.exp(-(bins - mu)/beta)), From 84187c8232aa9dc89eb5ca4ca3e34319c9006ae9 Mon Sep 17 00:00:00 2001 From: Tobias Megies Date: Tue, 6 Oct 2015 19:09:28 +0200 Subject: [PATCH 069/496] DOC: fix var. reference in percentile docstring The argument for the original input array is named `a` but in the docstring it was at some point referred to as `arr`. [skip ci] --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index b09dcad15fd4..555d0838645c 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -3411,7 +3411,7 @@ def percentile(a, q, axis=None, out=None, keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. + the result will broadcast correctly against the original array `a`. .. versionadded:: 1.9.0 From 43d1bf78aa71dd218f7f6849148c31b2de3fe47d Mon Sep 17 00:00:00 2001 From: Colin Jermain Date: Thu, 17 Sep 2015 14:22:56 -0400 Subject: [PATCH 070/496] BUG: only require tell/seek in np.fromfile when buffered --- numpy/core/include/numpy/npy_3kcompat.h | 60 +++++++++++++++++++++---- 1 file changed, 51 insertions(+), 9 deletions(-) diff --git a/numpy/core/include/numpy/npy_3kcompat.h b/numpy/core/include/numpy/npy_3kcompat.h index cd96697982ba..6a11cf960ab6 100644 --- a/numpy/core/include/numpy/npy_3kcompat.h +++ b/numpy/core/include/numpy/npy_3kcompat.h @@ -147,8 +147,8 @@ PyUnicode_Concat2(PyObject **left, PyObject *right) static NPY_INLINE FILE* npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos) { - int fd, fd2; - PyObject *ret, *os; + int fd, fd2, unbuf; + PyObject *ret, *os, *io, *io_raw; npy_off_t pos; FILE *handle; @@ -193,9 +193,30 @@ npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos) /* Record the original raw file handle position */ *orig_pos = npy_ftell(handle); if (*orig_pos == -1) { - PyErr_SetString(PyExc_IOError, "obtaining file position failed"); - fclose(handle); - return NULL; + /* The io module is needed to determine if buffering is used */ + io = PyImport_ImportModule("io"); + if (io == NULL) { + fclose(handle); + return NULL; + } + /* File object instances of RawIOBase are unbuffered */ + io_raw = PyObject_GetAttrString(io, "RawIOBase"); + Py_DECREF(io); + if (io_raw == NULL) { + fclose(handle); + return NULL; + } + unbuf = PyObject_IsInstance(file, io_raw); + Py_DECREF(io_raw); + if (unbuf == 1) { + /* Succeed if the IO is unbuffered */ + return handle; + } + else { + PyErr_SetString(PyExc_IOError, "obtaining file position failed"); + fclose(handle); + return NULL; + } } /* Seek raw handle to the Python-side position */ @@ -224,8 +245,8 @@ npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos) static NPY_INLINE int npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos) { - int fd; - PyObject *ret; + int fd, unbuf; + PyObject *ret, *io, *io_raw; npy_off_t position; position = npy_ftell(handle); @@ -241,9 +262,30 @@ npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos) if (fd == -1) { return -1; } + if (npy_lseek(fd, orig_pos, SEEK_SET) == -1) { - PyErr_SetString(PyExc_IOError, "seeking file failed"); - return -1; + + /* The io module is needed to determine if buffering is used */ + io = PyImport_ImportModule("io"); + if (io == NULL) { + return -1; + } + /* File object instances of RawIOBase are unbuffered */ + io_raw = PyObject_GetAttrString(io, "RawIOBase"); + Py_DECREF(io); + if (io_raw == NULL) { + return -1; + } + unbuf = PyObject_IsInstance(file, io_raw); + Py_DECREF(io_raw); + if (unbuf == 1) { + /* Succeed if the IO is unbuffered */ + return 0; + } + else { + PyErr_SetString(PyExc_IOError, "seeking file failed"); + return -1; + } } if (position == -1) { From 94842acb726f47722033052f8ca094481bdf7a90 Mon Sep 17 00:00:00 2001 From: Colin Jermain Date: Sun, 4 Oct 2015 17:47:55 -0400 Subject: [PATCH 071/496] TST: for unbuffered IO without tell/seek --- numpy/core/tests/test_multiarray.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 2630f04db2ff..d47b9f0da926 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -3365,6 +3365,19 @@ def test_roundtrip_repr(self): y = np.fromstring(s, sep="@") assert_array_equal(x, y) + def test_unbuffered_fromfile(self): + # gh-6246 + self.x.tofile(self.filename) + + def fail(*args, **kwargs): + raise io.IOError('Can not tell or seek') + + f = io.open(self.filename, 'rb', buffering=0) + f.seek = fail + f.tell = fail + y = np.fromfile(self.filename, dtype=self.dtype) + assert_array_equal(y, self.x.flat) + def test_file_position_after_fromfile(self): # gh-4118 sizes = [io.DEFAULT_BUFFER_SIZE//8, From 02fc99244721145896f8f17ec41cdc64567419ef Mon Sep 17 00:00:00 2001 From: Julian Taylor Date: Thu, 8 Oct 2015 01:20:54 +0200 Subject: [PATCH 072/496] ENH: improve worst case of ma.clump_masked The worst case of alternating masked iterated all boundaries and sliced half away, improve this by only iterating the needed half of the boundary index array. --- numpy/ma/extras.py | 37 ++++++++++++++++++----------------- numpy/ma/tests/test_extras.py | 20 +++++++++++++++++++ 2 files changed, 39 insertions(+), 18 deletions(-) diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 322303f03646..b4021df631d8 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1797,15 +1797,27 @@ def _ezclump(mask): Returns a series of slices. """ - #def clump_masked(a): if mask.ndim > 1: mask = mask.ravel() idx = (mask[1:] ^ mask[:-1]).nonzero() idx = idx[0] + 1 - slices = [slice(left, right) - for (left, right) in zip(itertools.chain([0], idx), - itertools.chain(idx, [len(mask)]),)] - return slices + + if mask[0]: + if len(idx) == 0: + return [slice(0, mask.size)] + + r = [slice(0, idx[0])] + r.extend((slice(left, right) + for left, right in zip(idx[1:-1:2], idx[2::2]))) + else: + if len(idx) == 0: + return [] + + r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])] + + if mask[-1]: + r.append(slice(idx[-1], mask.size)) + return r def clump_unmasked(a): @@ -1844,12 +1856,7 @@ def clump_unmasked(a): mask = getattr(a, '_mask', nomask) if mask is nomask: return [slice(0, a.size)] - slices = _ezclump(mask) - if a[0] is masked: - result = slices[1::2] - else: - result = slices[::2] - return result + return _ezclump(~mask) def clump_masked(a): @@ -1888,13 +1895,7 @@ def clump_masked(a): mask = ma.getmask(a) if mask is nomask: return [] - slices = _ezclump(mask) - if len(slices): - if a[0] is masked: - slices = slices[::2] - else: - slices = slices[1::2] - return slices + return _ezclump(mask) ############################################################################### diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index b6d90299516f..f07083d172e7 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -84,6 +84,22 @@ def test_masked_all_like(self): test = masked_all_like(control) assert_equal(test, control) + def check_clump(self, f): + for i in range(1, 7): + for j in range(2**i): + k = np.arange(i, dtype=int) + ja = np.full(i, j, dtype=int) + a = masked_array(2**k) + a.mask = (ja & (2**k)) != 0 + s = 0 + for sl in f(a): + s += a.data[sl].sum() + if f == clump_unmasked: + assert_equal(a.compressed().sum(), s) + else: + a.mask = ~a.mask + assert_equal(a.compressed().sum(), s) + def test_clump_masked(self): # Test clump_masked a = masked_array(np.arange(10)) @@ -93,6 +109,8 @@ def test_clump_masked(self): control = [slice(0, 3), slice(6, 7), slice(8, 10)] assert_equal(test, control) + self.check_clump(clump_masked) + def test_clump_unmasked(self): # Test clump_unmasked a = masked_array(np.arange(10)) @@ -101,6 +119,8 @@ def test_clump_unmasked(self): control = [slice(3, 6), slice(7, 8), ] assert_equal(test, control) + self.check_clump(clump_unmasked) + def test_flatnotmasked_contiguous(self): # Test flatnotmasked_contiguous a = arange(10) From 3aa0637c2688c83cf163c795ca77484fd370c98e Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 8 Oct 2015 14:26:48 -0600 Subject: [PATCH 073/496] MAINT: Remove single file compilation support. --- numpy/core/code_generators/genapi.py | 16 ++------------- .../code_generators/generate_numpy_api.py | 6 ------ .../code_generators/generate_ufunc_api.py | 4 ---- numpy/core/include/numpy/_numpyconfig.h.in | 1 - numpy/core/include/numpy/ndarraytypes.h | 6 +----- numpy/core/setup.py | 20 ------------------- numpy/core/src/multiarray/_datetime.h | 5 ----- numpy/core/src/multiarray/arraytypes.h | 2 -- numpy/core/src/multiarray/buffer.h | 4 ---- numpy/core/src/multiarray/conversion_utils.h | 4 ---- .../core/src/multiarray/datetime_busdaycal.h | 4 ---- numpy/core/src/multiarray/descriptor.h | 2 -- numpy/core/src/multiarray/getset.h | 2 -- numpy/core/src/multiarray/mapping.h | 4 ---- numpy/core/src/multiarray/methods.h | 2 -- numpy/core/src/multiarray/number.h | 5 ----- numpy/core/src/multiarray/scalartypes.c.src | 2 -- numpy/core/src/multiarray/scalartypes.h | 13 ------------ numpy/core/src/multiarray/sequence.h | 4 ---- numpy/core/src/multiarray/usertypes.h | 4 ---- numpy/core/src/umath/loops.c.src | 2 -- numpy/core/src/umath/reduction.c | 2 -- numpy/core/src/umath/scalarmath.c.src | 3 +-- numpy/core/src/umath/ufunc_object.c | 3 +-- numpy/core/src/umath/ufunc_type_resolution.c | 2 -- numpy/core/src/umath/umathmodule.c | 2 -- 26 files changed, 5 insertions(+), 119 deletions(-) diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py index 84bd042f53ae..05166f1e56a1 100644 --- a/numpy/core/code_generators/genapi.py +++ b/numpy/core/code_generators/genapi.py @@ -311,11 +311,7 @@ def array_api_define(self): def internal_define(self): astr = """\ -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT PyTypeObject %(type)s; -#else - NPY_NO_EXPORT PyTypeObject %(type)s; -#endif +extern NPY_NO_EXPORT PyTypeObject %(type)s; """ % {'type': self.name} return astr @@ -337,11 +333,7 @@ def array_api_define(self): def internal_define(self): astr = """\ -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - extern NPY_NO_EXPORT %(type)s %(name)s; -#else - NPY_NO_EXPORT %(type)s %(name)s; -#endif +extern NPY_NO_EXPORT %(type)s %(name)s; """ % {'type': self.type, 'name': self.name} return astr @@ -365,11 +357,7 @@ def array_api_define(self): def internal_define(self): astr = """\ -#ifdef NPY_ENABLE_SEPARATE_COMPILATION extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; -#else -NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; -#endif """ return astr diff --git a/numpy/core/code_generators/generate_numpy_api.py b/numpy/core/code_generators/generate_numpy_api.py index 415cbf7fcd00..a97564fa40a9 100644 --- a/numpy/core/code_generators/generate_numpy_api.py +++ b/numpy/core/code_generators/generate_numpy_api.py @@ -17,15 +17,9 @@ npy_bool obval; } PyBoolScalarObject; -#ifdef NPY_ENABLE_SEPARATE_COMPILATION extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; -#else -NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; -NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; -NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; -#endif %s diff --git a/numpy/core/code_generators/generate_ufunc_api.py b/numpy/core/code_generators/generate_ufunc_api.py index 7a33004e4244..bb2ad78540b3 100644 --- a/numpy/core/code_generators/generate_ufunc_api.py +++ b/numpy/core/code_generators/generate_ufunc_api.py @@ -11,11 +11,7 @@ h_template = r""" #ifdef _UMATHMODULE -#ifdef NPY_ENABLE_SEPARATE_COMPILATION extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; -#else -NPY_NO_EXPORT PyTypeObject PyUFunc_Type; -#endif %s diff --git a/numpy/core/include/numpy/_numpyconfig.h.in b/numpy/core/include/numpy/_numpyconfig.h.in index 20f21c1a74a1..0510ff9b2ee2 100644 --- a/numpy/core/include/numpy/_numpyconfig.h.in +++ b/numpy/core/include/numpy/_numpyconfig.h.in @@ -29,7 +29,6 @@ @DEFINE_NPY_SIZEOF_LONGLONG@ @DEFINE_NPY_SIZEOF_PY_LONG_LONG@ -@DEFINE_NPY_ENABLE_SEPARATE_COMPILATION@ @DEFINE_NPY_RELAXED_STRIDES_CHECKING@ #define NPY_VISIBILITY_HIDDEN @VISIBILITY_HIDDEN@ diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h index 8403ee29f001..f1fe89f1a158 100644 --- a/numpy/core/include/numpy/ndarraytypes.h +++ b/numpy/core/include/numpy/ndarraytypes.h @@ -6,11 +6,7 @@ #include "npy_cpu.h" #include "utils.h" -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN -#else - #define NPY_NO_EXPORT static -#endif +#define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN /* Only use thread if configured in config and python supports it */ #if defined WITH_THREAD && !NPY_NO_SMP diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 361bf90821a6..aa9e03e0607d 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -15,8 +15,6 @@ from setup_common import * -# Set to True to enable multiple file compilations (experimental) -ENABLE_SEPARATE_COMPILATION = (os.environ.get('NPY_SEPARATE_COMPILATION', "1") != "0") # Set to True to enable relaxed strides checking. This (mostly) means # that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags. NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0") @@ -444,9 +442,6 @@ def generate_config_h(ext, build_dir): else: PYTHON_HAS_UNICODE_WIDE = False - if ENABLE_SEPARATE_COMPILATION: - moredefs.append(('ENABLE_SEPARATE_COMPILATION', 1)) - if NPY_RELAXED_STRIDES_CHECKING: moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1)) @@ -549,9 +544,6 @@ def generate_numpyconfig_h(ext, build_dir): moredefs.extend(cocache.check_ieee_macros(config_cmd)[1]) moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1]) - if ENABLE_SEPARATE_COMPILATION: - moredefs.append(('NPY_ENABLE_SEPARATE_COMPILATION', 1)) - if NPY_RELAXED_STRIDES_CHECKING: moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1)) @@ -850,11 +842,6 @@ def generate_multiarray_templated_sources(ext, build_dir): else: extra_info = {} - if not ENABLE_SEPARATE_COMPILATION: - multiarray_deps.extend(multiarray_src) - multiarray_src = [join('src', 'multiarray', 'multiarraymodule_onefile.c')] - multiarray_src.append(generate_multiarray_templated_sources) - config.add_extension('multiarray', sources=multiarray_src + [generate_config_h, @@ -922,13 +909,6 @@ def generate_umath_c(ext, build_dir): join(codegen_dir, 'generate_ufunc_api.py'), join('src', 'private', 'ufunc_override.h')] + npymath_sources - if not ENABLE_SEPARATE_COMPILATION: - umath_deps.extend(umath_src) - umath_src = [join('src', 'umath', 'umathmodule_onefile.c')] - umath_src.append(generate_umath_templated_sources) - umath_src.append(join('src', 'umath', 'funcs.inc.src')) - umath_src.append(join('src', 'umath', 'simd.inc.src')) - config.add_extension('umath', sources=umath_src + [generate_config_h, diff --git a/numpy/core/src/multiarray/_datetime.h b/numpy/core/src/multiarray/_datetime.h index 8cfd5d8515fc..46e1e453e917 100644 --- a/numpy/core/src/multiarray/_datetime.h +++ b/numpy/core/src/multiarray/_datetime.h @@ -1,13 +1,8 @@ #ifndef _NPY_PRIVATE__DATETIME_H_ #define _NPY_PRIVATE__DATETIME_H_ -#ifdef NPY_ENABLE_SEPARATE_COMPILATION extern NPY_NO_EXPORT char *_datetime_strings[NPY_DATETIME_NUMUNITS]; extern NPY_NO_EXPORT int _days_per_month_table[2][12]; -#else -NPY_NO_EXPORT char *_datetime_strings[NPY_DATETIME_NUMUNITS]; -NPY_NO_EXPORT int _days_per_month_table[2][12]; -#endif NPY_NO_EXPORT void numpy_pydatetime_import(void); diff --git a/numpy/core/src/multiarray/arraytypes.h b/numpy/core/src/multiarray/arraytypes.h index 15520ce740d1..d1c16cdeac55 100644 --- a/numpy/core/src/multiarray/arraytypes.h +++ b/numpy/core/src/multiarray/arraytypes.h @@ -3,11 +3,9 @@ #include "common.h" -#ifdef NPY_ENABLE_SEPARATE_COMPILATION extern NPY_NO_EXPORT PyArray_Descr LONGLONG_Descr; extern NPY_NO_EXPORT PyArray_Descr LONG_Descr; extern NPY_NO_EXPORT PyArray_Descr INT_Descr; -#endif NPY_NO_EXPORT int set_typeinfo(PyObject *dict); diff --git a/numpy/core/src/multiarray/buffer.h b/numpy/core/src/multiarray/buffer.h index c0a1f8e26016..d2ea01b349fb 100644 --- a/numpy/core/src/multiarray/buffer.h +++ b/numpy/core/src/multiarray/buffer.h @@ -1,11 +1,7 @@ #ifndef _NPY_PRIVATE_BUFFER_H_ #define _NPY_PRIVATE_BUFFER_H_ -#ifdef NPY_ENABLE_SEPARATE_COMPILATION extern NPY_NO_EXPORT PyBufferProcs array_as_buffer; -#else -NPY_NO_EXPORT PyBufferProcs array_as_buffer; -#endif NPY_NO_EXPORT void _array_dealloc_buffer_info(PyArrayObject *self); diff --git a/numpy/core/src/multiarray/conversion_utils.h b/numpy/core/src/multiarray/conversion_utils.h index 59d3120a43e0..cd43f25c38d3 100644 --- a/numpy/core/src/multiarray/conversion_utils.h +++ b/numpy/core/src/multiarray/conversion_utils.h @@ -63,10 +63,6 @@ PyArray_ConvertMultiAxis(PyObject *axis_in, int ndim, npy_bool *out_axis_flags); * that it is in an unpickle context instead of a normal context without * evil global state like we create here. */ -#ifdef NPY_ENABLE_SEPARATE_COMPILATION extern NPY_NO_EXPORT int evil_global_disable_warn_O4O8_flag; -#else -NPY_NO_EXPORT int evil_global_disable_warn_O4O8_flag; -#endif #endif diff --git a/numpy/core/src/multiarray/datetime_busdaycal.h b/numpy/core/src/multiarray/datetime_busdaycal.h index 5d7325733f66..cd79d0bb5ed4 100644 --- a/numpy/core/src/multiarray/datetime_busdaycal.h +++ b/numpy/core/src/multiarray/datetime_busdaycal.h @@ -26,11 +26,7 @@ typedef struct { npy_bool weekmask[7]; } NpyBusDayCalendar; -#ifdef NPY_ENABLE_SEPARATE_COMPILATION extern NPY_NO_EXPORT PyTypeObject NpyBusDayCalendar_Type; -#else -NPY_NO_EXPORT PyTypeObject NpyBusDayCalendar_Type; -#endif /* diff --git a/numpy/core/src/multiarray/descriptor.h b/numpy/core/src/multiarray/descriptor.h index 01a77895405e..ff1fc980a394 100644 --- a/numpy/core/src/multiarray/descriptor.h +++ b/numpy/core/src/multiarray/descriptor.h @@ -36,8 +36,6 @@ NPY_NO_EXPORT PyObject * arraydescr_construction_repr(PyArray_Descr *dtype, int includealignflag, int shortrepr); -#ifdef NPY_ENABLE_SEPARATE_COMPILATION extern NPY_NO_EXPORT char *_datetime_strings[]; -#endif #endif diff --git a/numpy/core/src/multiarray/getset.h b/numpy/core/src/multiarray/getset.h index 98bd217f726d..4f1209de5a64 100644 --- a/numpy/core/src/multiarray/getset.h +++ b/numpy/core/src/multiarray/getset.h @@ -1,8 +1,6 @@ #ifndef _NPY_ARRAY_GETSET_H_ #define _NPY_ARRAY_GETSET_H_ -#ifdef NPY_ENABLE_SEPARATE_COMPILATION extern NPY_NO_EXPORT PyGetSetDef array_getsetlist[]; -#endif #endif diff --git a/numpy/core/src/multiarray/mapping.h b/numpy/core/src/multiarray/mapping.h index 40ccabd62894..4e22f79df2c9 100644 --- a/numpy/core/src/multiarray/mapping.h +++ b/numpy/core/src/multiarray/mapping.h @@ -1,11 +1,7 @@ #ifndef _NPY_ARRAYMAPPING_H_ #define _NPY_ARRAYMAPPING_H_ -#ifdef NPY_ENABLE_SEPARATE_COMPILATION extern NPY_NO_EXPORT PyMappingMethods array_as_mapping; -#else -NPY_NO_EXPORT PyMappingMethods array_as_mapping; -#endif /* diff --git a/numpy/core/src/multiarray/methods.h b/numpy/core/src/multiarray/methods.h index fc3b987fad90..7bf87f42d53f 100644 --- a/numpy/core/src/multiarray/methods.h +++ b/numpy/core/src/multiarray/methods.h @@ -1,9 +1,7 @@ #ifndef _NPY_ARRAY_METHODS_H_ #define _NPY_ARRAY_METHODS_H_ -#ifdef NPY_ENABLE_SEPARATE_COMPILATION extern NPY_NO_EXPORT PyMethodDef array_methods[]; -#endif NPY_NO_EXPORT const char * npy_casting_to_string(NPY_CASTING casting); diff --git a/numpy/core/src/multiarray/number.h b/numpy/core/src/multiarray/number.h index 43f04d1c6b91..0c8355e3170d 100644 --- a/numpy/core/src/multiarray/number.h +++ b/numpy/core/src/multiarray/number.h @@ -39,13 +39,8 @@ typedef struct { PyObject *conjugate; } NumericOps; -#ifdef NPY_ENABLE_SEPARATE_COMPILATION extern NPY_NO_EXPORT NumericOps n_ops; extern NPY_NO_EXPORT PyNumberMethods array_as_number; -#else -NPY_NO_EXPORT NumericOps n_ops; -NPY_NO_EXPORT PyNumberMethods array_as_number; -#endif NPY_NO_EXPORT PyObject * array_int(PyArrayObject *v); diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index b5e0fde85096..ee5741ae0315 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -3724,7 +3724,6 @@ NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = { /**end repeat**/ -#ifdef NPY_ENABLE_SEPARATE_COMPILATION /* * This table maps the built-in type numbers to their scalar * type numbers. Note that signed integers are mapped to INTNEG_SCALAR, @@ -3760,7 +3759,6 @@ _npy_can_cast_safely_table[NPY_NTYPES][NPY_NTYPES]; */ NPY_NO_EXPORT signed char _npy_type_promotion_table[NPY_NTYPES][NPY_NTYPES]; -#endif NPY_NO_EXPORT void initialize_casting_tables(void) diff --git a/numpy/core/src/multiarray/scalartypes.h b/numpy/core/src/multiarray/scalartypes.h index c9b80f9b3965..b8d6cf83ee9a 100644 --- a/numpy/core/src/multiarray/scalartypes.h +++ b/numpy/core/src/multiarray/scalartypes.h @@ -2,7 +2,6 @@ #define _NPY_SCALARTYPES_H_ /* Internal look-up tables */ -#ifdef NPY_ENABLE_SEPARATE_COMPILATION extern NPY_NO_EXPORT unsigned char _npy_can_cast_safely_table[NPY_NTYPES][NPY_NTYPES]; extern NPY_NO_EXPORT signed char @@ -13,18 +12,6 @@ extern NPY_NO_EXPORT signed char _npy_smallest_type_of_kind_table[NPY_NSCALARKINDS]; extern NPY_NO_EXPORT signed char _npy_next_larger_type_table[NPY_NTYPES]; -#else -NPY_NO_EXPORT unsigned char -_npy_can_cast_safely_table[NPY_NTYPES][NPY_NTYPES]; -NPY_NO_EXPORT signed char -_npy_scalar_kinds_table[NPY_NTYPES]; -NPY_NO_EXPORT signed char -_npy_type_promotion_table[NPY_NTYPES][NPY_NTYPES]; -NPY_NO_EXPORT signed char -_npy_smallest_type_of_kind_table[NPY_NSCALARKINDS]; -NPY_NO_EXPORT signed char -_npy_next_larger_type_table[NPY_NTYPES]; -#endif NPY_NO_EXPORT void initialize_casting_tables(void); diff --git a/numpy/core/src/multiarray/sequence.h b/numpy/core/src/multiarray/sequence.h index 321c0200fce3..b28c50d975ca 100644 --- a/numpy/core/src/multiarray/sequence.h +++ b/numpy/core/src/multiarray/sequence.h @@ -1,10 +1,6 @@ #ifndef _NPY_ARRAY_SEQUENCE_H_ #define _NPY_ARRAY_SEQUENCE_H_ -#ifdef NPY_ENABLE_SEPARATE_COMPILATION extern NPY_NO_EXPORT PySequenceMethods array_as_sequence; -#else -NPY_NO_EXPORT PySequenceMethods array_as_sequence; -#endif #endif diff --git a/numpy/core/src/multiarray/usertypes.h b/numpy/core/src/multiarray/usertypes.h index 51f6a8720cce..b3e386c5c671 100644 --- a/numpy/core/src/multiarray/usertypes.h +++ b/numpy/core/src/multiarray/usertypes.h @@ -1,11 +1,7 @@ #ifndef _NPY_PRIVATE_USERTYPES_H_ #define _NPY_PRIVATE_USERTYPES_H_ -#ifdef NPY_ENABLE_SEPARATE_COMPILATION extern NPY_NO_EXPORT PyArray_Descr **userdescrs; -#else -NPY_NO_EXPORT PyArray_Descr **userdescrs; -#endif NPY_NO_EXPORT void PyArray_InitArrFuncs(PyArray_ArrFuncs *f); diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index a46b9e7a83e6..854c1e17a078 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -6,10 +6,8 @@ #include "Python.h" #include "npy_config.h" -#ifdef ENABLE_SEPARATE_COMPILATION #define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API #define NO_IMPORT_ARRAY -#endif #include "numpy/npy_common.h" #include "numpy/arrayobject.h" diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c index bb4c0f44e91e..ecd387777cd3 100644 --- a/numpy/core/src/umath/reduction.c +++ b/numpy/core/src/umath/reduction.c @@ -13,10 +13,8 @@ #include #include "npy_config.h" -#ifdef ENABLE_SEPARATE_COMPILATION #define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API #define NO_IMPORT_ARRAY -#endif #include diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src index 010261214b8f..c371a079f654 100644 --- a/numpy/core/src/umath/scalarmath.c.src +++ b/numpy/core/src/umath/scalarmath.c.src @@ -11,10 +11,9 @@ #include "Python.h" #include "npy_config.h" -#ifdef ENABLE_SEPARATE_COMPILATION #define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API #define NO_IMPORT_ARRAY -#endif + #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "numpy/arrayscalars.h" diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 4bc9582b4c9a..63ed4f492d2d 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -29,10 +29,9 @@ #include "Python.h" #include "npy_config.h" -#ifdef ENABLE_SEPARATE_COMPILATION + #define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API #define NO_IMPORT_ARRAY -#endif #include "npy_pycompat.h" diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c index 6f4f4123d026..ce9eec4b61b2 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.c +++ b/numpy/core/src/umath/ufunc_type_resolution.c @@ -14,10 +14,8 @@ #include "Python.h" #include "npy_config.h" -#ifdef ENABLE_SEPARATE_COMPILATION #define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API #define NO_IMPORT_ARRAY -#endif #include "npy_pycompat.h" diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c index d19d5b9d2eac..45accb970787 100644 --- a/numpy/core/src/umath/umathmodule.c +++ b/numpy/core/src/umath/umathmodule.c @@ -21,9 +21,7 @@ #include "Python.h" #include "npy_config.h" -#ifdef ENABLE_SEPARATE_COMPILATION #define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API -#endif #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" From 3b8d7a49c71166dfb0d828f248d9b217762ab520 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 8 Oct 2015 14:30:23 -0600 Subject: [PATCH 074/496] MAINT: Remove single file c files. These were the files in which all the other files were included for single file builds. --- .../src/multiarray/multiarraymodule_onefile.c | 63 ------------------- numpy/core/src/umath/umathmodule_onefile.c | 7 --- 2 files changed, 70 deletions(-) delete mode 100644 numpy/core/src/multiarray/multiarraymodule_onefile.c delete mode 100644 numpy/core/src/umath/umathmodule_onefile.c diff --git a/numpy/core/src/multiarray/multiarraymodule_onefile.c b/numpy/core/src/multiarray/multiarraymodule_onefile.c deleted file mode 100644 index 3924f3cf47f2..000000000000 --- a/numpy/core/src/multiarray/multiarraymodule_onefile.c +++ /dev/null @@ -1,63 +0,0 @@ -/* - * This file includes all the .c files needed for a complete multiarray module. - * This is used in the case where separate compilation is not enabled - * - * Note that the order of the includes matters - */ - -#include "common.c" - -#include "scalartypes.c" -#include "scalarapi.c" - -#include "datetime.c" -#include "datetime_strings.c" -#include "datetime_busday.c" -#include "datetime_busdaycal.c" -#include "arraytypes.c" -#include "vdot.c" - -#include "hashdescr.c" -#include "numpyos.c" - -#include "descriptor.c" -#include "flagsobject.c" -#include "alloc.c" -#include "ctors.c" -#include "iterators.c" -#include "mapping.c" -#include "number.c" -#include "getset.c" -#include "sequence.c" -#include "methods.c" -#include "convert_datatype.c" -#include "convert.c" -#include "shape.c" -#include "item_selection.c" -#include "calculation.c" -#include "usertypes.c" -#include "refcount.c" -#include "conversion_utils.c" -#include "buffer.c" - -#include "nditer_constr.c" -#include "nditer_api.c" -#include "nditer_templ.c" -#include "nditer_pywrap.c" -#include "lowlevel_strided_loops.c" -#include "dtype_transfer.c" -#include "einsum.c" -#include "array_assign.c" -#include "array_assign_scalar.c" -#include "array_assign_array.c" -#include "ucsnarrow.c" -#include "arrayobject.c" -#include "numpymemoryview.c" -#include "mem_overlap.c" -#include "multiarraymodule.c" -#include "compiled_base.c" - -#if defined(HAVE_CBLAS) -#include "python_xerbla.c" -#include "cblasfuncs.c" -#endif diff --git a/numpy/core/src/umath/umathmodule_onefile.c b/numpy/core/src/umath/umathmodule_onefile.c deleted file mode 100644 index 3661ad81abb0..000000000000 --- a/numpy/core/src/umath/umathmodule_onefile.c +++ /dev/null @@ -1,7 +0,0 @@ -#include "loops.c" -#include "scalarmath.c" - -#include "ufunc_object.c" -#include "ufunc_type_resolution.c" -#include "reduction.c" -#include "umathmodule.c" From 60ca6346918d4a716f2d81ebeca930fbf4b6c190 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 8 Oct 2015 14:39:50 -0600 Subject: [PATCH 075/496] TST: Remove single file compilation test from travis ci. --- .travis.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 5d5c149b4aeb..7074e7ada182 100644 --- a/.travis.yml +++ b/.travis.yml @@ -25,6 +25,7 @@ python: - 2.7 - 3.2 - 3.3 + - 3.4 - 3.5-dev matrix: include: @@ -46,9 +47,7 @@ matrix: - python3-dev - python3-nose - python: 2.7 - env: NPY_SEPARATE_COMPILATION=0 PYTHON_OO=1 - - python: 3.4 - env: NPY_RELAXED_STRIDES_CHECKING=0 + env: NPY_RELAXED_STRIDES_CHECKING=0 PYTHON_OO=1 - python: 2.7 env: USE_WHEEL=1 before_install: From 32862b4002d5206655b4e120c0651ada46c7bd83 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 8 Oct 2015 14:44:59 -0600 Subject: [PATCH 076/496] DOC: Document removal of single file builds in 1.11.0 release notes. --- doc/release/1.11.0-notes.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index 891a676842a3..68ee370ee148 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -10,8 +10,8 @@ Highlights Dropped Support: -* Building with Bento is no longer supported and the associated files have - been removed. +* Bento build support and related files have been removed. +* Single file build support and related files have been removed. Future Changes: From c2f00f785cc7046c90709fdd06fbc5ce212b0e0f Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 9 Oct 2015 13:43:28 +0200 Subject: [PATCH 077/496] DOC: typo: change NumFocus to NumFOCUS in one place in governance doc. --- doc/source/dev/governance/governance.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/dev/governance/governance.rst b/doc/source/dev/governance/governance.rst index 2e39379007c8..3b34e0da1cab 100644 --- a/doc/source/dev/governance/governance.rst +++ b/doc/source/dev/governance/governance.rst @@ -26,7 +26,7 @@ The Project =========== The NumPy Project (The Project) is an open source software project -affiliated with the 501(c)3 NumFocus Foundation. The goal of The Project +affiliated with the 501(c)3 NumFOCUS Foundation. The goal of The Project is to develop open source software for array-based computing in Python, and in particular the ``numpy`` package, along with related software such as ``f2py`` and the NumPy Sphinx extensions. The Software developed From e2eabf4b262df2728bc1d40ab665a3060a6953d7 Mon Sep 17 00:00:00 2001 From: "Gregory R. Lee" Date: Fri, 9 Oct 2015 12:35:16 -0400 Subject: [PATCH 078/496] DOC: update documentation of sign for complex and nan inputs --- numpy/core/code_generators/ufunc_docstrings.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py index 068644132847..34ac59984c20 100644 --- a/numpy/core/code_generators/ufunc_docstrings.py +++ b/numpy/core/code_generators/ufunc_docstrings.py @@ -2799,12 +2799,13 @@ def add_newdoc(place, name, doc): """ Returns an element-wise indication of the sign of a number. - The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. + The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. nan + is returned for nan inputs. - For complex inputs, the `sign` function returns: - ``-1+0j if x.real < 0, - 1+0j if x.real > 0, - sign(x.imag)+0j if x.real == 0.`` + For complex inputs, the `sign` function returns + ``sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j``. + + complex(nan, 0) is returned for complex nan inputs. Parameters ---------- From dc17f218c0f3165931d667594662acebb05fefce Mon Sep 17 00:00:00 2001 From: Julian Taylor Date: Fri, 9 Oct 2015 21:51:55 +0200 Subject: [PATCH 079/496] MAINT: remove Wreturn-type warnings from config checks closes gh-6427 --- numpy/distutils/command/autodist.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/distutils/command/autodist.py b/numpy/distutils/command/autodist.py index af53c5104585..d5e78963c128 100644 --- a/numpy/distutils/command/autodist.py +++ b/numpy/distutils/command/autodist.py @@ -55,6 +55,7 @@ def check_compiler_gcc4(cmd): #if (! defined __GNUC__) || (__GNUC__ < 4) #error gcc >= 4 required #endif + return 0; } """ return cmd.try_compile(body, None, None) @@ -72,6 +73,7 @@ def check_gcc_function_attribute(cmd, attribute, name): int main() { + return 0; } """ % (attribute, name) return cmd.try_compile(body, None, None) != 0 From eceb60bfad97b48f97c2144c18d41e19cf6baca9 Mon Sep 17 00:00:00 2001 From: "Nathaniel J. Smith" Date: Fri, 9 Oct 2015 13:49:59 -0700 Subject: [PATCH 080/496] DEV: add CC-0 public domain dedication to governance document We want to allow other projects to steal from us, like we stole from Jupyter/IPython :-). This relicensing / public domain dedication is possible because all text here is either by me (and thus copyright me) or else taken from the Jupyter/IPython document, and their document is also under CC-0 as per https://github.com/jupyter/governance/pull/9 --- doc/source/dev/governance/governance.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/doc/source/dev/governance/governance.rst b/doc/source/dev/governance/governance.rst index 3b34e0da1cab..54e52363c00d 100644 --- a/doc/source/dev/governance/governance.rst +++ b/doc/source/dev/governance/governance.rst @@ -389,3 +389,12 @@ Acknowledgements Substantial portions of this document were adapted from the `Jupyter/IPython project's governance document `_. + +License +======= + +To the extent possible under law, the authors have waived all +copyright and related or neighboring rights to the NumPy project +governance and decision-making document, as per the `CC-0 public +domain dedication / license +`_. From fbf549b9c2912a356dfeb71f3a411874879c9556 Mon Sep 17 00:00:00 2001 From: Julian Taylor Date: Sat, 10 Oct 2015 00:36:20 +0200 Subject: [PATCH 081/496] BUG: mask nan to 1 in ordered compare msvc2008 32 bit seems to miscompile it otherwise. closes gh-6428 --- numpy/core/src/umath/simd.inc.src | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src index 7652f18c5306..48c29483ff1c 100644 --- a/numpy/core/src/umath/simd.inc.src +++ b/numpy/core/src/umath/simd.inc.src @@ -536,11 +536,13 @@ sse2_compress4_to_byte_@TYPE@(@vtype@ r1, @vtype@ r2, @vtype@ r3, @vtype@ * r4, static NPY_INLINE int sse2_ordered_cmp_@kind@_@TYPE@(const @type@ a, const @type@ b) { + @vtype@ one = @vpre@_set1_@vsuf@(1); @type@ tmp; @vtype@ v = @vpre@_@VOP@_@vsufs@(@vpre@_load_@vsufs@(&a), @vpre@_load_@vsufs@(&b)); + v = @vpre@_and_@vsuf@(v, one); @vpre@_store_@vsufs@(&tmp, v); - return !(tmp == 0.); + return tmp; } static void From a3df126f1e9f39e91d684e5eaad26a844566ea12 Mon Sep 17 00:00:00 2001 From: Julian Taylor Date: Sat, 10 Oct 2015 01:15:12 +0200 Subject: [PATCH 082/496] BLD: import setuptools to allow compile with VS2008 python2.7 sdk Needed to build numpy with Microsoft Visual C++ Compiler for Python 2.7 Otherwise one gets an Unable to find vcvarsall.bat error SET DISTUTILS_USE_SDK=1 SET MSSdk=1 triggers the same from distutils. --- setup.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 90dcb2419835..8e5c3d04fad5 100755 --- a/setup.py +++ b/setup.py @@ -238,8 +238,12 @@ def setup_package(): FULLVERSION, GIT_REVISION = get_version_info() metadata['version'] = FULLVERSION else: - if len(sys.argv) >= 2 and sys.argv[1] == 'bdist_wheel': - # bdist_wheel needs setuptools + if (len(sys.argv) >= 2 and sys.argv[1] == 'bdist_wheel' or + sys.version_info[0] < 3 and sys.platform == "win32"): + # bdist_wheel and the MS python2.7 VS sdk needs setuptools + # the latter can also be triggered by (see python issue23246) + # SET DISTUTILS_USE_SDK=1 + # SET MSSdk=1 import setuptools from numpy.distutils.core import setup cwd = os.path.abspath(os.path.dirname(__file__)) From 14761c881934b0fbe1955c49a1d3afd73955d7fb Mon Sep 17 00:00:00 2001 From: Christoph Gohlke Date: Fri, 9 Oct 2015 16:42:07 -0700 Subject: [PATCH 083/496] BLD: do not build exclusively for SSE4.2 processors --- numpy/distutils/intelccompiler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/distutils/intelccompiler.py b/numpy/distutils/intelccompiler.py index a1f34e304eab..1f49dc4b4d38 100644 --- a/numpy/distutils/intelccompiler.py +++ b/numpy/distutils/intelccompiler.py @@ -78,7 +78,7 @@ def initialize(self, plat_name=None): self.lib = self.find_exe('xilib') self.linker = self.find_exe('xilink') self.compile_options = ['/nologo', '/O3', '/MD', '/W3', - '/Qstd=c99', '/QxSSE4.2'] + '/Qstd=c99', '/QaxSSE4.2'] self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/Qstd=c99', '/Z7', '/D_DEBUG'] From 95be11191f413ae06ab9baff0cf12e9d7978ed36 Mon Sep 17 00:00:00 2001 From: Christoph Gohlke Date: Fri, 9 Oct 2015 16:44:39 -0700 Subject: [PATCH 084/496] BLD: do not build exclusively for SSE4.2 processors --- numpy/distutils/fcompiler/intel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py index 28624918d6c7..2dd08e744a07 100644 --- a/numpy/distutils/fcompiler/intel.py +++ b/numpy/distutils/fcompiler/intel.py @@ -205,7 +205,7 @@ class IntelEM64VisualFCompiler(IntelVisualFCompiler): version_match = simple_version_match(start='Intel\(R\).*?64,') def get_flags_arch(self): - return ['/QxSSE4.2'] + return ['/QaxSSE4.2'] if __name__ == '__main__': From b53eb5d74dcac7239c41e617cc98f314caeae4c5 Mon Sep 17 00:00:00 2001 From: Christoph Gohlke Date: Fri, 9 Oct 2015 16:27:02 -0700 Subject: [PATCH 085/496] BLD: enable SSE2 for 32-bit msvc 9 and 10 compilers --- numpy/distutils/msvccompiler.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/numpy/distutils/msvccompiler.py b/numpy/distutils/msvccompiler.py index 0d28f6b9f674..86e9c5cc2807 100644 --- a/numpy/distutils/msvccompiler.py +++ b/numpy/distutils/msvccompiler.py @@ -2,6 +2,8 @@ import distutils.msvccompiler from distutils.msvccompiler import * +from .system_info import platform_bits + class MSVCCompiler(distutils.msvccompiler.MSVCCompiler): def __init__(self, verbose=0, dry_run=0, force=0): @@ -15,3 +17,6 @@ def initialize(self, plat_name=None): os.environ['lib'] = environ_lib + os.environ['lib'] if environ_include is not None: os.environ['include'] = environ_include + os.environ['include'] + if platform_bits == 32: + self.compile_options += ['/arch:SSE2'] + self.compile_options_debug += ['/arch:SSE2'] From cdb1b2b2a90010f6395abc813e27977560c659ba Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 10 Oct 2015 11:34:59 -0600 Subject: [PATCH 086/496] DOC: Document the reason msvc requires SSE2 on 32 bit platforms. --- numpy/distutils/msvccompiler.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/distutils/msvccompiler.py b/numpy/distutils/msvccompiler.py index 86e9c5cc2807..4c3658d5c8fc 100644 --- a/numpy/distutils/msvccompiler.py +++ b/numpy/distutils/msvccompiler.py @@ -18,5 +18,7 @@ def initialize(self, plat_name=None): if environ_include is not None: os.environ['include'] = environ_include + os.environ['include'] if platform_bits == 32: + # msvc9 building for 32 bits requires SSE2 to work around a + # compiler bug. self.compile_options += ['/arch:SSE2'] self.compile_options_debug += ['/arch:SSE2'] From 18ba6812fd9de8c0d7eaf22d0e0288b4ce24e389 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 11 Oct 2015 03:52:01 +0200 Subject: [PATCH 087/496] DOC: add lib.Arrayterator to reference guide. Closes gh-6404. --- doc/source/reference/routines.indexing.rst | 1 + numpy/lib/__init__.py | 2 +- numpy/lib/arrayterator.py | 7 +++---- numpy/lib/info.py | 6 ++++++ 4 files changed, 11 insertions(+), 5 deletions(-) diff --git a/doc/source/reference/routines.indexing.rst b/doc/source/reference/routines.indexing.rst index 853d24126cc3..8c3729f21c09 100644 --- a/doc/source/reference/routines.indexing.rst +++ b/doc/source/reference/routines.indexing.rst @@ -61,3 +61,4 @@ Iterating over arrays ndenumerate ndindex flatiter + lib.Arrayterator diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py index 8c420b0c3301..0606dfbbd4a3 100644 --- a/numpy/lib/__init__.py +++ b/numpy/lib/__init__.py @@ -21,7 +21,7 @@ from .arraysetops import * from .npyio import * from .financial import * -from .arrayterator import * +from .arrayterator import Arrayterator from .arraypad import * from ._version import * diff --git a/numpy/lib/arrayterator.py b/numpy/lib/arrayterator.py index 4aa977c46894..80b369bd5cf3 100644 --- a/numpy/lib/arrayterator.py +++ b/numpy/lib/arrayterator.py @@ -69,9 +69,8 @@ class Arrayterator(object): Examples -------- - >>> import numpy as np >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) - >>> a_itor = np.lib.arrayterator.Arrayterator(a, 2) + >>> a_itor = np.lib.Arrayterator(a, 2) >>> a_itor.shape (3, 4, 5, 6) @@ -149,13 +148,13 @@ def flat(self): See Also -------- - `Arrayterator` + Arrayterator flatiter Examples -------- >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) - >>> a_itor = np.lib.arrayterator.Arrayterator(a, 2) + >>> a_itor = np.lib.Arrayterator(a, 2) >>> for subarr in a_itor.flat: ... if not subarr: diff --git a/numpy/lib/info.py b/numpy/lib/info.py index e004b35a4d2b..ca1e7239756e 100644 --- a/numpy/lib/info.py +++ b/numpy/lib/info.py @@ -109,6 +109,12 @@ polyval Evaluate polynomial at given argument ================ =================== +Iterators +--------- +================ =================== +Arrayterator A buffered iterator for big arrays. +================ =================== + Import Tricks ------------- ================ =================== From 7978f3d422e24d1f92d626d19763e5b87193824e Mon Sep 17 00:00:00 2001 From: Antony Lee Date: Sun, 11 Oct 2015 15:54:48 -0700 Subject: [PATCH 088/496] DEP: Remove warning for `full` when dtype is set. See @rkern's comment in #6382. --- numpy/core/numeric.py | 6 +++--- numpy/core/tests/test_deprecations.py | 6 ++++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index 5d4464ea71fc..5c0e272398ee 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -291,10 +291,10 @@ def full(shape, fill_value, dtype=None, order='C'): """ a = empty(shape, dtype, order) - if array(fill_value).dtype != a.dtype: + if dtype is None and array(fill_value).dtype != a.dtype: warnings.warn( - "in the future, full(..., {0!r}) will return an array of {1!r}". - format(fill_value, array(fill_value).dtype), FutureWarning) + "in the future, full({0}, {1!r}) will return an array of {2!r}". + format(shape, fill_value, array(fill_value).dtype), FutureWarning) multiarray.copyto(a, fill_value, casting='unsafe') return a diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index e3aea7efb588..e2542195fe4a 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -10,8 +10,9 @@ import warnings import numpy as np -from numpy.testing import (run_module_suite, assert_raises, - assert_warns, assert_array_equal, assert_) +from numpy.testing import ( + run_module_suite, assert_raises, assert_warns, assert_no_warnings, + assert_array_equal, assert_) class _DeprecationTestCase(object): @@ -382,6 +383,7 @@ class TestFullDefaultDtype: def test_full_default_dtype(self): assert_warns(FutureWarning, np.full, 1, 1) assert_warns(FutureWarning, np.full, 1, None) + assert_no_warnings(np.full, 1, 1, float) if __name__ == "__main__": From 9a9504f45e05619e01e966e526c99bfe211b0c5c Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 11 Oct 2015 16:40:31 -0600 Subject: [PATCH 089/496] DOC: Create 1.10.1 release notes. [ci skip] --- doc/release/1.10.1-notes.rst | 41 ++++++++++++++++++++++++++++++++++++ doc/source/release.rst | 1 + 2 files changed, 42 insertions(+) create mode 100644 doc/release/1.10.1-notes.rst diff --git a/doc/release/1.10.1-notes.rst b/doc/release/1.10.1-notes.rst new file mode 100644 index 000000000000..9096f6c157ca --- /dev/null +++ b/doc/release/1.10.1-notes.rst @@ -0,0 +1,41 @@ +NumPy 1.10.1 Release Notes +************************** + +This release deals with a few build problems that showed up in 1.10.0. Most +users would not have seen these problems. The differences are: + +* Compiling with msvc9 or msvc10 for 32 bit Windows now requires SSE2. + This was the easiest fix for what looked to be some miscompiled code when + SSE2 was not used. If you need to compile for 32 bit Windows systems + without SSE2 support, mingw32 should still work. + +* Make compiling with VS2008 python2.7 SDK easier + +* Change Intel compiler options so that code will also be generated to + support systems without SSE4.2. + +* Some _config test functions needed an explicit integer return in + order to avoid the openSUSE rpmlinter erring out. + +* We ran into a problem with pipy not allowing reuse of filenames and a + resulting proliferation of *.*.*.postN releases. Not only were the names + getting out of hand, some packages were unable to work with the postN + suffix. + + +Numpy 1.10.1 supports Python 2.6 - 2.7 and 3.2 - 3.5. + + +Commits: + +45a3d84 DEP: Remove warning for `full` when dtype is set. +0c1a5df BLD: import setuptools to allow compile with VS2008 python2.7 sdk +04211c6 BUG: mask nan to 1 in ordered compare +826716f DOC: Document the reason msvc requires SSE2 on 32 bit platforms. +49fa187 BLD: enable SSE2 for 32-bit msvc 9 and 10 compilers +dcbc4cc MAINT: remove Wreturn-type warnings from config checks +d6564cb BLD: do not build exclusively for SSE4.2 processors +15cb66f BLD: do not build exclusively for SSE4.2 processors +c38bc08 DOC: fix var. reference in percentile docstring +78497f4 DOC: Sync 1.10.0-notes.rst in 1.10.x branch with master. + diff --git a/doc/source/release.rst b/doc/source/release.rst index 5ec9eb342689..201d3e77fae4 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -3,6 +3,7 @@ Release Notes ************* .. include:: ../release/1.11.0-notes.rst +.. include:: ../release/1.10.1-notes.rst .. include:: ../release/1.10.0-notes.rst .. include:: ../release/1.9.2-notes.rst .. include:: ../release/1.9.1-notes.rst From 6967f50dd1eb922cfa0e28956baa5a0bdc85331a Mon Sep 17 00:00:00 2001 From: Jonathan Helmus Date: Thu, 8 Oct 2015 17:40:11 -0500 Subject: [PATCH 090/496] BUG: ma.put expands nomask Previously when put was used on a MaskedArray with nomask the mask would be incorrectly set to the mask of the values argument. closes #6425 --- numpy/ma/core.py | 20 +++++++++++--------- numpy/ma/tests/test_core.py | 14 ++++++++++++++ 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 9e8dad08cecb..6984165797e2 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -4399,7 +4399,6 @@ def put(self, indices, values, mode='raise'): [7 -- 30]] """ - m = self._mask # Hard mask: Get rid of the values/indices that fall on masked data if self._hardmask and self._mask is not nomask: mask = self._mask[indices] @@ -4411,16 +4410,19 @@ def put(self, indices, values, mode='raise'): self._data.put(indices, values, mode=mode) - if m is nomask: - m = getmask(values) + # short circut if neither self nor values are masked + if self._mask is nomask and getmask(values) is nomask: + return + + m = getmaskarray(self).copy() + + if getmask(values) is nomask: + m.put(indices, False, mode=mode) else: - m = m.copy() - if getmask(values) is nomask: - m.put(indices, False, mode=mode) - else: - m.put(indices, values._mask, mode=mode) - m = make_mask(m, copy=False, shrink=True) + m.put(indices, values._mask, mode=mode) + m = make_mask(m, copy=False, shrink=True) self._mask = m + return def ids(self): """ diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 550f03131000..f832ee60d7a1 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -2671,6 +2671,20 @@ def test_put(self): assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) + def test_put_nomask(self): + # GitHub issue 6425 + x = zeros(10) + z = array([3., -1.], mask=[False, True]) + + x.put([1, 2], z) + self.assertTrue(x[0] is not masked) + assert_equal(x[0], 0) + self.assertTrue(x[1] is not masked) + assert_equal(x[1], 3) + self.assertTrue(x[2] is masked) + self.assertTrue(x[3] is not masked) + assert_equal(x[3], 0) + def test_put_hardmask(self): # Tests put on hardmask d = arange(5) From 33adec24a1403df5c47afe235ac1869a8f489489 Mon Sep 17 00:00:00 2001 From: Stephan Hoyer Date: Mon, 12 Oct 2015 16:44:00 -0700 Subject: [PATCH 091/496] BUG: fix casting rules for generic datetime64/timedelta64 units Fixes GH6452 There are two types of datetime64/timedelta64 objects with generic times units: * NaT * unit-less timedelta64 objects Both of these should be safely castable to any more specific dtype. However, more specific dtypes should not be safely castable to generic units. Otherwise, the result of `np.datetime64('NaT')` or `np.timedelta(1)` is entirely useless, because they can't be used in any arithmetic operations or comparisons. This is a regression from NumPy 1.9, where these sort of operations worked because the default casting rules with ufuncs were less strict. --- numpy/core/src/multiarray/datetime.c | 22 ++++++++++++++-------- numpy/core/tests/test_datetime.py | 21 +++++++++++++++++++++ 2 files changed, 35 insertions(+), 8 deletions(-) diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c index 9e4e00e9c3fd..264178d30bcf 100644 --- a/numpy/core/src/multiarray/datetime.c +++ b/numpy/core/src/multiarray/datetime.c @@ -1232,12 +1232,18 @@ datetime_metadata_divides( { npy_uint64 num1, num2; - /* Generic units divide into anything */ - if (divisor->base == NPY_FR_GENERIC) { + /* + * Any unit can always divide into generic units. In other words, we + * should be able to convert generic units into any more specific unit. + */ + if (dividend->base == NPY_FR_GENERIC) { return 1; } - /* Non-generic units never divide into generic units */ - else if (dividend->base == NPY_FR_GENERIC) { + /* + * However, generic units cannot always divide into more specific units. + * We cannot safely convert datetimes with units back into generic units. + */ + else if (divisor->base == NPY_FR_GENERIC) { return 0; } @@ -1330,7 +1336,7 @@ can_cast_datetime64_units(NPY_DATETIMEUNIT src_unit, */ case NPY_SAME_KIND_CASTING: if (src_unit == NPY_FR_GENERIC || dst_unit == NPY_FR_GENERIC) { - return src_unit == dst_unit; + return src_unit == NPY_FR_GENERIC; } else { return (src_unit <= NPY_FR_D && dst_unit <= NPY_FR_D) || @@ -1344,7 +1350,7 @@ can_cast_datetime64_units(NPY_DATETIMEUNIT src_unit, */ case NPY_SAFE_CASTING: if (src_unit == NPY_FR_GENERIC || dst_unit == NPY_FR_GENERIC) { - return src_unit == dst_unit; + return src_unit == NPY_FR_GENERIC; } else { return (src_unit <= dst_unit) && @@ -1380,7 +1386,7 @@ can_cast_timedelta64_units(NPY_DATETIMEUNIT src_unit, */ case NPY_SAME_KIND_CASTING: if (src_unit == NPY_FR_GENERIC || dst_unit == NPY_FR_GENERIC) { - return src_unit == dst_unit; + return src_unit == NPY_FR_GENERIC; } else { return (src_unit <= NPY_FR_M && dst_unit <= NPY_FR_M) || @@ -1394,7 +1400,7 @@ can_cast_timedelta64_units(NPY_DATETIMEUNIT src_unit, */ case NPY_SAFE_CASTING: if (src_unit == NPY_FR_GENERIC || dst_unit == NPY_FR_GENERIC) { - return src_unit == dst_unit; + return src_unit == NPY_FR_GENERIC; } else { return (src_unit <= dst_unit) && diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py index 5fa28186736c..98da6638d156 100644 --- a/numpy/core/tests/test_datetime.py +++ b/numpy/core/tests/test_datetime.py @@ -114,6 +114,27 @@ def test_datetime_casting_rules(self): # Can cast safely if the integer multiplier does divide assert_(np.can_cast('M8[6h]', 'M8[3h]', casting='safe')) + # We can always cast types with generic units (corresponding to NaT) to + # more specific types + assert_(np.can_cast('m8', 'm8[h]', casting='same_kind')) + assert_(np.can_cast('m8', 'm8[h]', casting='safe')) + assert_(np.can_cast('M8', 'M8[h]', casting='same_kind')) + assert_(np.can_cast('M8', 'M8[h]', casting='safe')) + # but not the other way around + assert_(not np.can_cast('m8[h]', 'm8', casting='same_kind')) + assert_(not np.can_cast('m8[h]', 'm8', casting='safe')) + assert_(not np.can_cast('M8[h]', 'M8', casting='same_kind')) + assert_(not np.can_cast('M8[h]', 'M8', casting='safe')) + + def test_compare_generic_nat(self): + # regression tests for GH6452 + assert_equal(np.datetime64('NaT'), + np.datetime64('2000') + np.timedelta64('NaT')) + # nb. we may want to make NaT != NaT true in the future; this test + # verifies the existing behavior (and that it should not warn) + assert_(np.datetime64('NaT') == np.datetime64('NaT', 'us')) + assert_(np.datetime64('NaT', 'us') == np.datetime64('NaT')) + def test_datetime_scalar_construction(self): # Construct with different units assert_equal(np.datetime64('1950-03-12', 'D'), From 034c2e67b111da24a6c8f92578377e65410f42e9 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 14 Oct 2015 11:57:06 -0600 Subject: [PATCH 092/496] MAINT: Use Python 3.5 instead of 3.5-dev for travis 3.5 testing. Python 3.5 has been released, so update 3.5 testing version. --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 7074e7ada182..64ed69aa819d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,7 +26,7 @@ python: - 3.2 - 3.3 - 3.4 - - 3.5-dev + - 3.5 matrix: include: - python: 3.3 From 6587854828a31ea873499a171af9b794cd0b8b17 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 14 Oct 2015 23:22:00 +0200 Subject: [PATCH 093/496] REL: update Paver file to ensure sdist contents are OK for releases. --- pavement.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pavement.py b/pavement.py index acceed0ade4a..f4b1b2b1672e 100644 --- a/pavement.py +++ b/pavement.py @@ -549,8 +549,16 @@ def tarball_name(type='gztar'): @task def sdist(options): + # First clean the repo and update submodules (for up-to-date doc html theme + # and Sphinx extensions) + sh('git clean -xdf') + sh('git submodule init') + sh('git submodule update') + # To be sure to bypass paver when building sdist... paver + numpy.distutils # do not play well together. + # Cython is run over all Cython files in setup.py, so generated C files + # will be included. sh('python setup.py sdist --formats=gztar,zip') # Copy the superpack into installers dir From 9a62a26ab9689bbcf8a4eeb0076848ca9c44d4ae Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 14 Oct 2015 23:23:15 +0200 Subject: [PATCH 094/496] REL: remove release.sh because it's not in use anymore. We're using numpy-vendor/fabfile.py instead. --- release.sh | 74 ------------------------------------------------------ 1 file changed, 74 deletions(-) delete mode 100644 release.sh diff --git a/release.sh b/release.sh deleted file mode 100644 index e73f464437a6..000000000000 --- a/release.sh +++ /dev/null @@ -1,74 +0,0 @@ -#! /bin/sh -# Script to build tarballs, windows and OS X installers on OS X - -# Note that we build the corresponding set of OS X binaries to the python.org -# downloads, i.e. two versions for Python 2.7. The Intel 32/64-bit version is -# for OS X 10.6+, the other dmg installers are for 10.3+ and are built on 10.5 - -#--------------- -# Build tarballs -#--------------- -paver sdist - - -#-------------------- -# Build documentation -#-------------------- -# Check we're using the correct g++/c++ for the 32-bit 2.6 version we build for -# the docs and the 64-bit 2.7 dmg installer. -# We do this because for Python 2.6 we use a symlink on the PATH to select -# /usr/bin/g++-4.0, while for Python 2.7 we need the default 4.2 version. -export PATH=~/Code/tmp/gpp40temp/:$PATH -gpp="$(g++ --version | grep "4.0")" -if [ -z "$gpp" ]; then - echo "Wrong g++ version, we need 4.0 to compile scipy with Python 2.6" - exit 1 -fi - -# bootstrap needed to ensure we build the docs from the right scipy version -paver bootstrap -source bootstrap/bin/activate - -# build pdf docs -paver pdf - - -#-------------------------------------------------------- -# Build Windows and 64-bit OS X installers (on OS X 10.6) -#-------------------------------------------------------- -export MACOSX_DEPLOYMENT_TARGET=10.6 -# Use GCC 4.2 for 64-bit OS X installer for Python 2.7 -export PATH=~/Code/tmp/gpp42temp/:$PATH -gpp="$(g++ --version | grep "4.2")" -if [ -z "$gpp" ]; then - echo "Wrong g++ version, we need 4.2 for 64-bit binary for Python 2.7" - exit 1 -fi - -paver dmg -p 2.7 # 32/64-bit version - -paver bdist_superpack -p 3.2 -paver bdist_superpack -p 3.1 -paver bdist_superpack -p 2.7 -paver bdist_superpack -p 2.6 - - -#-------------------------------------------- -# Build 32-bit OS X installers (on OS X 10.5) -#-------------------------------------------- -#export MACOSX_DEPLOYMENT_TARGET=10.3 -#paver dmg -p 2.6 -#paver dmg -p 2.7 # 32-bit version - - -paver write_release_and_log - - -#------------------------------------------------------- -# Build basic (no SSE) Windows installers to put on PyPi -#------------------------------------------------------- -paver bdist_wininst_simple -p 2.6 -paver bdist_wininst_simple -p 2.7 -paver bdist_wininst_simple -p 3.1 -paver bdist_wininst_simple -p 3.2 - From 7b438fa90e53abe8b2f0356ec50daed6ab299794 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 14 Oct 2015 23:33:03 +0200 Subject: [PATCH 095/496] TST: raise errors for dev versions and warnings for releases on test runs. This approach is less error prone than switching from "develop" to "release" in maintenance branches by hand. See gh-6461 for details. --- doc/HOWTO_RELEASE.rst.txt | 16 ---------------- numpy/testing/nosetester.py | 22 +++++++++++++--------- 2 files changed, 13 insertions(+), 25 deletions(-) diff --git a/doc/HOWTO_RELEASE.rst.txt b/doc/HOWTO_RELEASE.rst.txt index a88e4db47ade..5fed523c1c7b 100644 --- a/doc/HOWTO_RELEASE.rst.txt +++ b/doc/HOWTO_RELEASE.rst.txt @@ -196,16 +196,6 @@ After a date is set, create a new maintenance/x.y.z branch, add new empty release notes for the next version in the master branch and update the Trac Milestones. -Handle test warnings --------------------- -The default behavior of the test suite in the master branch is to report errors -for DeprecationWarnings and RuntimeWarnings that are issued. For a released -version this is not desired. Therefore any known warnings should be solved or -explicitly silenced before making the release branch, then when the branch is -made, the default behavior should be switched to not raise errors. This is -done in the constructor of the NoseTester class in numpy/testing/nosetester.py, -by replacing ``raise_warnings="develop"`` with ``raise_warnings="release"``. - Make sure current trunk builds a package correctly -------------------------------------------------- :: @@ -220,12 +210,6 @@ best to read the pavement.py script. .. note:: The following steps are repeated for the beta(s), release candidates(s) and the final release. -Merge doc wiki edits --------------------- -The edits in the documentation wiki suitable for merging should be merged, -ideally just before making the release branch. How to do this is described in -detail in doc/HOWTO_MERGE_WIKI_DOCS.txt. - Check that docs can be built ---------------------------- Do:: diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py index faed0e6514b0..95ded8d93ff4 100644 --- a/numpy/testing/nosetester.py +++ b/numpy/testing/nosetester.py @@ -10,7 +10,7 @@ import sys import warnings from numpy.compat import basestring -from numpy import ModuleDeprecationWarning +import numpy as np def get_package_name(filepath): @@ -151,7 +151,7 @@ class NoseTester(object): The package to test. If a string, this should be the full path to the package. If None (default), `package` is set to the module from which `NoseTester` is initialized. - raise_warnings : str or sequence of warnings, optional + raise_warnings : None, str or sequence of warnings, optional This specifies which warnings to configure as 'raise' instead of 'warn' during the test execution. Valid strings are: @@ -163,11 +163,10 @@ class NoseTester(object): Notes ----- The default for `raise_warnings` is - ``(DeprecationWarning, RuntimeWarning)`` for the master branch of NumPy, - and ``()`` for maintenance branches and released versions. The purpose - of this switching behavior is to catch as many warnings as possible - during development, but not give problems for packaging of released - versions. + ``(DeprecationWarning, RuntimeWarning)`` for development versions of NumPy, + and ``()`` for released versions. The purpose of this switching behavior + is to catch as many warnings as possible during development, but not give + problems for packaging of released versions. """ # Stuff to exclude from tests. These are from numpy.distutils @@ -177,7 +176,12 @@ class NoseTester(object): 'pyrex_ext', 'swig_ext'] - def __init__(self, package=None, raise_warnings="develop"): + def __init__(self, package=None, raise_warnings=None): + if raise_warnings is None and '.dev0' in np.__version__: + raise_warnings = "develop" + elif raise_warnings is None: + raise_warnings = "release" + package_name = None if package is None: f = sys._getframe(1) @@ -418,7 +422,7 @@ def test(self, label='fast', verbose=1, extra_argv=None, warnings.filterwarnings('ignore', message='Not importing directory') warnings.filterwarnings("ignore", message="numpy.dtype size changed") warnings.filterwarnings("ignore", message="numpy.ufunc size changed") - warnings.filterwarnings("ignore", category=ModuleDeprecationWarning) + warnings.filterwarnings("ignore", category=np.ModuleDeprecationWarning) warnings.filterwarnings("ignore", category=FutureWarning) # Filter out boolean '-' deprecation messages. This allows # older versions of scipy to test without a flood of messages. From 5c0cd1b23e1a6797cf3af04c5c3cfb41312f1750 Mon Sep 17 00:00:00 2001 From: Chris Hogan Date: Thu, 15 Oct 2015 14:09:24 -0500 Subject: [PATCH 096/496] BUG: Fix Intel compiler flags for OS X build --- numpy/distutils/intelccompiler.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/numpy/distutils/intelccompiler.py b/numpy/distutils/intelccompiler.py index 1f49dc4b4d38..20c6d2ba413a 100644 --- a/numpy/distutils/intelccompiler.py +++ b/numpy/distutils/intelccompiler.py @@ -20,12 +20,17 @@ def __init__(self, verbose=0, dry_run=0, force=0): self.cc_exe = ('icc -fPIC -fp-model strict -O3 ' '-fomit-frame-pointer -openmp') compiler = self.cc_exe + if platform.system() == 'Darwin': + shared_flag = '-Wl,-undefined,dynamic_lookup' + else: + shared_flag = '-shared' self.set_executables(compiler=compiler, compiler_so=compiler, compiler_cxx=compiler, archiver='xiar' + ' cru', linker_exe=compiler + ' -shared-intel', - linker_so=compiler + ' -shared -shared-intel') + linker_so=compiler + ' ' + shared_flag + + ' -shared-intel') class IntelItaniumCCompiler(IntelCCompiler): @@ -51,12 +56,17 @@ def __init__(self, verbose=0, dry_run=0, force=0): self.cc_exe = ('icc -m64 -fPIC -fp-model strict -O3 ' '-fomit-frame-pointer -openmp -xSSE4.2') compiler = self.cc_exe + if platform.system() == 'Darwin': + shared_flag = '-Wl,-undefined,dynamic_lookup' + else: + shared_flag = '-shared' self.set_executables(compiler=compiler, compiler_so=compiler, compiler_cxx=compiler, archiver='xiar' + ' cru', linker_exe=compiler + ' -shared-intel', - linker_so=compiler + ' -shared -shared-intel') + linker_so=compiler + ' ' + shared_flag + + ' -shared-intel') if platform.system() == 'Windows': From 07c66c8ea72181954f4fcf9f62af677d22eec639 Mon Sep 17 00:00:00 2001 From: Samuel St-Jean Date: Fri, 16 Oct 2015 11:33:51 +0200 Subject: [PATCH 097/496] Fixed a typo in np.inner doc --- numpy/add_newdocs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py index a53db35d4d96..b00e229c3e4f 100644 --- a/numpy/add_newdocs.py +++ b/numpy/add_newdocs.py @@ -1228,7 +1228,7 @@ def luf(lamdaexpr, *args, **kwargs): Parameters ---------- a, b : array_like - If `a` and `b` are nonscalar, their last dimensions of must match. + If `a` and `b` are nonscalar, their last dimensions must match. Returns ------- From e149fac25eefcc93c821ea5cca261aa55abd1f7f Mon Sep 17 00:00:00 2001 From: Gabi Davar Date: Mon, 13 Apr 2015 20:33:26 +0300 Subject: [PATCH 098/496] LIBPATH with spaces is now supported Python 2.7+ and Win32 --- numpy/distutils/ccompiler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py index a61d21029a75..b5970d76fe08 100644 --- a/numpy/distutils/ccompiler.py +++ b/numpy/distutils/ccompiler.py @@ -617,8 +617,8 @@ def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries): # Also fix up the various compiler modules, which do # from distutils.ccompiler import gen_lib_options # Don't bother with mwerks, as we don't support Classic Mac. -for _cc in ['msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']: - _m = sys.modules.get('distutils.'+_cc+'compiler') +for _cc in ['msvc9', 'msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']: + _m = sys.modules.get('distutils.' + _cc + 'compiler') if _m is not None: setattr(_m, 'gen_lib_options', gen_lib_options) From f8edf9e397808bfa4db94dab12fd564080239d92 Mon Sep 17 00:00:00 2001 From: Alexander Heger Date: Sat, 12 Sep 2015 13:26:01 +1000 Subject: [PATCH 099/496] BUG: Allow nested use of parameters for array dimensions in f2py. Nested use of parameters in specifying dimensions caused problems. For example parameter (i=7) common buf(nvar*(nvar+1) * (n + 1)) This fix was suggested by Pearu on github. Closes #5877. --- numpy/f2py/crackfortran.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 8c024734233d..f392f946ced4 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -2624,11 +2624,12 @@ def analyzevars(block): if d in params: d = str(params[d]) for p in list(params.keys()): - m = re.match( - r'(?P.*?)\b' + p + r'\b(?P.*)', d, re.I) - if m: + re_1 = re.compile(r'(?P.*?)\b' + p + r'\b(?P.*)', re.I) + m = re_1.match(d) + while m: d = m.group('before') + \ str(params[p]) + m.group('after') + m = re_1.match(d) if d == star: dl = [star] else: From 81bdad0401275890d31bf2edc274fdb8e9ef950a Mon Sep 17 00:00:00 2001 From: Alexander Heger Date: Sat, 12 Sep 2015 12:30:52 +1000 Subject: [PATCH 100/496] BUG: allow extension of common blocks in numpy.f2py Lack of this feature resulted in the generation of incorrect *.pyf files. For example, the pyf file created by: subroutine sub3 (some arguments) real a, b, c, d common /coeff/ a, b common /coeff/ c, d return end Should contain both common statements the declaration of all four variables a, b, c, and d. Closes #5876. --- doc/source/f2py/signature-file.rst | 13 +++++++------ numpy/f2py/crackfortran.py | 7 ++----- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/doc/source/f2py/signature-file.rst b/doc/source/f2py/signature-file.rst index cfc35ebda1d8..54b8736f12e3 100644 --- a/doc/source/f2py/signature-file.rst +++ b/doc/source/f2py/signature-file.rst @@ -178,12 +178,13 @@ Common block statements: := [ ( ) ] [ , ] - One ``python module`` block should not contain two or more - ``common`` blocks with the same name. Otherwise, the latter ones are - ignored. The types of variables in ```` are defined - using ````. Note that the corresponding - ```` may contain array specifications; - then you don't need to specify these in ````. + If a ``python module`` block contains two or more ``common`` blocks + with the same name, the variables from the additional declarations + are appened. The types of variables in ```` are + defined using ````. Note that the + corresponding ```` may contain array + specifications; then you don't need to specify these in + ````. Other statements: The ```` part refers to any other Fortran language diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 8c024734233d..6f52a9a610e7 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -1372,11 +1372,8 @@ def analyzeline(m, case, line): if 'common' in groupcache[groupcounter]: commonkey = groupcache[groupcounter]['common'] for c in cl: - if c[0] in commonkey: - outmess( - 'analyzeline: previously defined common block encountered. Skipping.\n') - continue - commonkey[c[0]] = [] + if c[0] not in commonkey: + commonkey[c[0]] = [] for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]: if i: commonkey[c[0]].append(i) From b1ce387fd259b63530ebe5390ad528bac304075f Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 15 Oct 2015 16:51:22 -0600 Subject: [PATCH 101/496] MAINT: Spellcheck doc/source/f2py/signature-file.rst. --- doc/source/f2py/signature-file.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/source/f2py/signature-file.rst b/doc/source/f2py/signature-file.rst index 54b8736f12e3..a8924712ff17 100644 --- a/doc/source/f2py/signature-file.rst +++ b/doc/source/f2py/signature-file.rst @@ -21,7 +21,7 @@ scanning Fortran codes and writing a signature file, F2PY lowers all cases automatically except in multiline blocks or when ``--no-lower`` option is used. -The syntax of signature files is overvied below. +The syntax of signature files is presented below. Python module block ===================== @@ -180,7 +180,7 @@ Common block statements: If a ``python module`` block contains two or more ``common`` blocks with the same name, the variables from the additional declarations - are appened. The types of variables in ```` are + are appended. The types of variables in ```` are defined using ````. Note that the corresponding ```` may contain array specifications; then you don't need to specify these in @@ -401,8 +401,8 @@ The following attributes are used by F2PY: a C function. This is because the concepts of Fortran- and C contiguity overlap in one-dimensional cases. - If ``intent(c)`` is used as an statement but without entity - declaration list, then F2PY adds ``intent(c)`` attibute to all + If ``intent(c)`` is used as a statement but without an entity + declaration list, then F2PY adds the ``intent(c)`` attribute to all arguments. Also, when wrapping C functions, one must use ``intent(c)`` @@ -597,7 +597,7 @@ A C expression may contain: ``shape(,)`` Returns the ````-th dimension of an array ````. ``len()`` - Returns the lenght of an array ````. + Returns the length of an array ````. ``size()`` Returns the size of an array ````. ``slen()`` From 3a816a4db9b498eb64eb837fdcca0fa8ddbe063e Mon Sep 17 00:00:00 2001 From: Allan Haldane Date: Sat, 17 Oct 2015 14:00:36 -0400 Subject: [PATCH 102/496] BUG: recarrays viewed as subarrays don't convert to np.record type Record array views were updated in #5943 to return np.record dtype where possible, but forgot about the case of sub-arrays. That's fixed here, so accessing subarray fields by attribute or index works sensibly, as well as viewing a record array as a subarray dtype, and printing subarrays. This also happens to fix #6459, since it affects the same lines. Fixes #6497 #6459 --- numpy/core/records.py | 30 +++++++++++++++++++----------- numpy/core/tests/test_records.py | 23 +++++++++++++++++++++++ 2 files changed, 42 insertions(+), 11 deletions(-) diff --git a/numpy/core/records.py b/numpy/core/records.py index 4a995533a544..4ce3fe98a273 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -448,12 +448,14 @@ def __getattribute__(self, attr): # At this point obj will always be a recarray, since (see # PyArray_GetField) the type of obj is inherited. Next, if obj.dtype is - # non-structured, convert it to an ndarray. If obj is structured leave - # it as a recarray, but make sure to convert to the same dtype.type (eg - # to preserve numpy.record type if present), since nested structured - # fields do not inherit type. + # non-structured, convert it to an ndarray. Then if obj is structured + # with void type convert it to the same dtype.type (eg to preserve + # numpy.record type if present), since nested structured fields do not + # inherit type. Don't do this for non-void structures though. if obj.dtype.fields: - return obj.view(dtype=(self.dtype.type, obj.dtype.fields)) + if issubclass(obj.dtype.type, nt.void): + return obj.view(dtype=(self.dtype.type, obj.dtype)) + return obj else: return obj.view(ndarray) @@ -463,8 +465,9 @@ def __getattribute__(self, attr): # Thus, you can't create attributes on-the-fly that are field names. def __setattr__(self, attr, val): - # Automatically convert (void) dtypes to records. - if attr == 'dtype' and issubclass(val.type, nt.void): + # Automatically convert (void) structured types to records + # (but not non-void structures, subarrays, or non-structured voids) + if attr == 'dtype' and issubclass(val.type, nt.void) and val.fields: val = sb.dtype((record, val)) newattr = attr not in self.__dict__ @@ -499,7 +502,9 @@ def __getitem__(self, indx): # we might also be returning a single element if isinstance(obj, ndarray): if obj.dtype.fields: - return obj.view(dtype=(self.dtype.type, obj.dtype.fields)) + if issubclass(obj.dtype.type, nt.void): + return obj.view(dtype=(self.dtype.type, obj.dtype)) + return obj else: return obj.view(type=ndarray) else: @@ -519,11 +524,14 @@ def __repr__(self): # If this is a full record array (has numpy.record dtype), # or if it has a scalar (non-void) dtype with no records, # represent it using the rec.array function. Since rec.array - # converts dtype to a numpy.record for us, use only dtype.descr, - # not repr(dtype). + # converts dtype to a numpy.record for us, convert back + # to non-record before printing + plain_dtype = self.dtype + if plain_dtype.type is record: + plain_dtype = sb.dtype((nt.void, plain_dtype)) lf = '\n'+' '*len("rec.array(") return ('rec.array(%s, %sdtype=%s)' % - (lst, lf, repr(self.dtype.descr))) + (lst, lf, plain_dtype)) else: # otherwise represent it using np.array plus a view # This should only happen if the user is playing diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py index 7a18f295bb1a..290bc4fa7fe1 100644 --- a/numpy/core/tests/test_records.py +++ b/numpy/core/tests/test_records.py @@ -121,6 +121,23 @@ def test_recarray_views(self): assert_equal(type(rv), np.recarray) assert_equal(rv.dtype.type, np.record) + # check that accessing nested structures keep record type, but + # not for subarrays, non-void structures, non-structured voids + test_dtype = [('a', 'f4,f4'), ('b', 'V8'), ('c', ('f4',2)), + ('d', ('i8', 'i4,i4'))] + r = np.rec.array([((1,1), b'11111111', [1,1], 1), + ((1,1), b'11111111', [1,1], 1)], dtype=test_dtype) + assert_equal(r.a.dtype.type, np.record) + assert_equal(r.b.dtype.type, np.void) + assert_equal(r.c.dtype.type, np.float32) + assert_equal(r.d.dtype.type, np.int64) + # check the same, but for views + r = np.rec.array(np.ones(4, dtype='i4,i4')) + assert_equal(r.view('f4,f4').dtype.type, np.record) + assert_equal(r.view(('i4',2)).dtype.type, np.int32) + assert_equal(r.view('V8').dtype.type, np.void) + assert_equal(r.view(('i8', 'i4,i4')).dtype.type, np.int64) + #check that we can undo the view arrs = [np.ones(4, dtype='f4,i4'), np.ones(4, dtype='f8')] for arr in arrs: @@ -135,6 +152,12 @@ def test_recarray_repr(self): a = np.array(np.ones(4, dtype='f8')) assert_(repr(np.rec.array(a)).startswith('rec.array')) + # check that the 'np.record' part of the dtype isn't shown + a = np.rec.array(np.ones(3, dtype='i4,i4')) + assert_equal(repr(a).find('numpy.record'), -1) + a = np.rec.array(np.ones(3, dtype='i4')) + assert_(repr(a).find('dtype=int32') != -1) + def test_recarray_from_names(self): ra = np.rec.array([ (1, 'abc', 3.7000002861022949, 0), From 0b83a90f5e6157ced66792c3de6f68436ee96e05 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 18 Oct 2015 14:24:00 +0200 Subject: [PATCH 103/496] REL: add "make upload" command for built docs, update "make dist". Changes to "make dist" for building docs: - remove .chm related line, we don't distribute those on docs.scipy.org - only include needed pdf files in dist.tar.gz [ci skip] --- doc/Makefile | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/doc/Makefile b/doc/Makefile index d8c1ab918bf6..b52933e14dfa 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -32,6 +32,7 @@ help: @echo " linkcheck to check all external links for integrity" @echo " dist PYVER=... to make a distribution-ready tree" @echo " gitwash-update GITWASH=path/to/gitwash update gitwash developer docs" + @echo " upload USERNAME=... RELEASE=... to upload built docs to docs.scipy.org" clean: -rm -rf build/* source/reference/generated @@ -59,6 +60,7 @@ gitwash-update: INSTALL_DIR = $(CURDIR)/build/inst-dist/ INSTALL_PPH = $(INSTALL_DIR)/lib/python$(PYVER)/site-packages:$(INSTALL_DIR)/local/lib/python$(PYVER)/site-packages:$(INSTALL_DIR)/lib/python$(PYVER)/dist-packages:$(INSTALL_DIR)/local/lib/python$(PYVER)/dist-packages +UPLOAD_DIR=/srv/docs_scipy_org/doc/numpy-$(RELEASE) DIST_VARS=SPHINXBUILD="LANG=C PYTHONPATH=$(INSTALL_PPH) python$(PYVER) `which sphinx-build`" PYTHON="PYTHONPATH=$(INSTALL_PPH) python$(PYVER)" SPHINXOPTS="$(SPHINXOPTS)" @@ -72,8 +74,8 @@ real-dist: dist-build html html-scipyorg -rm -rf build/dist cp -r build/html-scipyorg build/dist cd build/html && zip -9r ../dist/numpy-html.zip . - cp build/latex/numpy-*.pdf build/dist - -zip build/dist/numpy-chm.zip build/htmlhelp/numpy.chm + cp build/latex/numpy-ref.pdf build/dist + cp build/latex/numpy-user.pdf build/dist cd build/dist && tar czf ../dist.tar.gz * chmod ug=rwX,o=rX -R build/dist find build/dist -type d -print0 | xargs -0r chmod g+s @@ -84,6 +86,21 @@ dist-build: install -d $(subst :, ,$(INSTALL_PPH)) $(PYTHON) `which easy_install` --prefix=$(INSTALL_DIR) ../dist/*.egg +upload: + # SSH must be correctly configured for this to work. + # Assumes that ``make dist`` was already run + # Example usage: ``make upload USERNAME=rgommers RELEASE=1.10.1`` + ssh $(USERNAME)@new.scipy.org mkdir $(UPLOAD_DIR) + scp build/dist.tar.gz $(USERNAME)@new.scipy.org:$(UPLOAD_DIR) + ssh $(USERNAME)@new.scipy.org tar xvC $(UPLOAD_DIR) \ + -zf $(UPLOAD_DIR)/dist.tar.gz + ssh $(USERNAME)@new.scipy.org mv $(UPLOAD_DIR)/numpy-ref.pdf \ + $(UPLOAD_DIR)/numpy-ref-$(RELEASE).pdf + ssh $(USERNAME)@new.scipy.org mv $(UPLOAD_DIR)/numpy-user.pdf \ + $(UPLOAD_DIR)/numpy-user-$(RELEASE).pdf + ssh $(USERNAME)@new.scipy.org mv $(UPLOAD_DIR)/numpy-html.zip \ + $(UPLOAD_DIR)/numpy-html-$(RELEASE).zip + ssh $(USERNAME)@new.scipy.org /srv/bin/fixperm-scipy_org.sh #------------------------------------------------------------------------------ # Basic Sphinx generation rules for different formats From 037d2840ce0763bec31715e792fb30001bc80ed5 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 17 Oct 2015 19:07:51 -0600 Subject: [PATCH 104/496] DOC: Mention that default casting for inplace operations has changed. Mention in the compatibility section that default casting has changed from 'unsafe' to 'same_kind' for inplace operations. Closes #6498. --- doc/release/1.10.0-notes.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/doc/release/1.10.0-notes.rst b/doc/release/1.10.0-notes.rst index 75530db4e354..0341d2a6aad5 100644 --- a/doc/release/1.10.0-notes.rst +++ b/doc/release/1.10.0-notes.rst @@ -50,6 +50,16 @@ See below for more details on these changes. Compatibility notes =================== +Default casting rule change +~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Default casting for inplace operations has changed to ``'same_kind'``. For +instance, if n is an array of integers, and f is an array of floats, then +``n += f`` will result in a ``TypeError``, whereas in previous Numpy +versions the floats would be silently cast to ints. In the unlikely case +that the example code is not an actual bug, it can be updated in a backward +compatible way by rewriting it as ``np.add(n, f, out=n, casting='unsafe')``. +The old ``'unsafe'`` default has been deprecated since Numpy 1.7. + numpy version string ~~~~~~~~~~~~~~~~~~~~ The numpy version string for development builds has been changed from From b3ce7a68b94c066bc3ffc55bc68fdb0c367fb143 Mon Sep 17 00:00:00 2001 From: Allan Haldane Date: Wed, 12 Aug 2015 10:52:11 -0400 Subject: [PATCH 105/496] MAINT: Speedup field access by removing unneeded safety checks (1/3) Bypass unneeded "view" safety-checks in `array_subscript` and `array_assign_subscript`, by avoiding use of `PyArray_View`. --- numpy/core/_internal.py | 54 ++--- numpy/core/src/multiarray/arraytypes.c.src | 37 +-- numpy/core/src/multiarray/common.c | 33 +++ numpy/core/src/multiarray/common.h | 12 + numpy/core/src/multiarray/mapping.c | 250 ++++++++++++++++----- 5 files changed, 254 insertions(+), 132 deletions(-) diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py index 81f5be4ada6f..3ddc2c64d890 100644 --- a/numpy/core/_internal.py +++ b/numpy/core/_internal.py @@ -288,55 +288,23 @@ def _newnames(datatype, order): return tuple(list(order) + nameslist) raise ValueError("unsupported order value: %s" % (order,)) -def _index_fields(ary, names): - """ Given a structured array and a sequence of field names - construct new array with just those fields. +def _copy_fields(ary): + """Return copy of structured array with padding between fields removed. Parameters ---------- ary : ndarray - Structured array being subscripted - names : string or list of strings - Either a single field name, or a list of field names + Structured array from which to remove padding bytes Returns ------- - sub_ary : ndarray - If `names` is a single field name, the return value is identical to - ary.getfield, a writeable view into `ary`. If `names` is a list of - field names the return value is a copy of `ary` containing only those - fields. This is planned to return a view in the future. - - Raises - ------ - ValueError - If `ary` does not contain a field given in `names`. - + ary_copy : ndarray + Copy of ary with padding bytes removed """ dt = ary.dtype - - #use getfield to index a single field - if isinstance(names, basestring): - try: - return ary.getfield(dt.fields[names][0], dt.fields[names][1]) - except KeyError: - raise ValueError("no field of name %s" % names) - - for name in names: - if name not in dt.fields: - raise ValueError("no field of name %s" % name) - - formats = [dt.fields[name][0] for name in names] - offsets = [dt.fields[name][1] for name in names] - - view_dtype = {'names': names, 'formats': formats, - 'offsets': offsets, 'itemsize': dt.itemsize} - - # return copy for now (future plan to return ary.view(dtype=view_dtype)) - copy_dtype = {'names': view_dtype['names'], - 'formats': view_dtype['formats']} - return array(ary.view(dtype=view_dtype), dtype=copy_dtype, copy=True) - + copy_dtype = {'names': dt.names, + 'formats': [dt.fields[name][0] for name in dt.names]} + return array(ary, dtype=copy_dtype, copy=True) def _get_all_field_offsets(dtype, base_offset=0): """ Returns the types and offsets of all fields in a (possibly structured) @@ -478,6 +446,12 @@ def _view_is_safe(oldtype, newtype): If the new type is incompatible with the old type. """ + + # if the types are equivalent, there is no problem. + # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4')) + if oldtype == newtype: + return + new_fields = _get_all_field_offsets(newtype) new_size = newtype.itemsize diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src index 5aa7e61426cc..060f250980c7 100644 --- a/numpy/core/src/multiarray/arraytypes.c.src +++ b/numpy/core/src/multiarray/arraytypes.c.src @@ -621,31 +621,6 @@ OBJECT_setitem(PyObject *op, void *ov, void *NPY_UNUSED(ap)) /* VOID */ -/* unpack tuple of dtype->fields (descr, offset, title[not-needed]) */ -static int -unpack_field(PyObject * value, PyArray_Descr ** descr, npy_intp * offset) -{ - PyObject * off; - if (PyTuple_GET_SIZE(value) < 2) { - return -1; - } - *descr = (PyArray_Descr *)PyTuple_GET_ITEM(value, 0); - off = PyTuple_GET_ITEM(value, 1); - - if (PyInt_Check(off)) { - *offset = PyInt_AsSsize_t(off); - } - else if (PyLong_Check(off)) { - *offset = PyLong_AsSsize_t(off); - } - else { - return -1; - } - - return 0; -} - - static PyObject * VOID_getitem(void *input, void *vap) { @@ -674,7 +649,7 @@ VOID_getitem(void *input, void *vap) PyArray_Descr *new; key = PyTuple_GET_ITEM(names, i); tup = PyDict_GetItem(descr->fields, key); - if (unpack_field(tup, &new, &offset) < 0) { + if (_unpack_field(tup, &new, &offset) < 0) { Py_DECREF(ret); ((PyArrayObject_fields *)ap)->descr = descr; return NULL; @@ -811,7 +786,7 @@ VOID_setitem(PyObject *op, void *input, void *vap) npy_intp offset; key = PyTuple_GET_ITEM(names, i); tup = PyDict_GetItem(descr->fields, key); - if (unpack_field(tup, &new, &offset) < 0) { + if (_unpack_field(tup, &new, &offset) < 0) { ((PyArrayObject_fields *)ap)->descr = descr; return -1; } @@ -2178,7 +2153,7 @@ VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride, if (NPY_TITLE_KEY(key, value)) { continue; } - if (unpack_field(value, &new, &offset) < 0) { + if (_unpack_field(value, &new, &offset) < 0) { ((PyArrayObject_fields *)arr)->descr = descr; return; } @@ -2247,7 +2222,7 @@ VOID_copyswap (char *dst, char *src, int swap, PyArrayObject *arr) if (NPY_TITLE_KEY(key, value)) { continue; } - if (unpack_field(value, &new, &offset) < 0) { + if (_unpack_field(value, &new, &offset) < 0) { ((PyArrayObject_fields *)arr)->descr = descr; return; } @@ -2560,7 +2535,7 @@ VOID_nonzero (char *ip, PyArrayObject *ap) if (NPY_TITLE_KEY(key, value)) { continue; } - if (unpack_field(value, &new, &offset) < 0) { + if (_unpack_field(value, &new, &offset) < 0) { PyErr_Clear(); continue; } @@ -2876,7 +2851,7 @@ VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) npy_intp offset; key = PyTuple_GET_ITEM(names, i); tup = PyDict_GetItem(descr->fields, key); - if (unpack_field(tup, &new, &offset) < 0) { + if (_unpack_field(tup, &new, &offset) < 0) { goto finish; } /* descr is the only field checked by compare or copyswap */ diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c index 3352c35297b8..6a353f355cbd 100644 --- a/numpy/core/src/multiarray/common.c +++ b/numpy/core/src/multiarray/common.c @@ -876,3 +876,36 @@ dot_alignment_error(PyArrayObject *a, int i, PyArrayObject *b, int j) Py_XDECREF(shape1_i); Py_XDECREF(shape2_j); } + +/** + * unpack tuple of dtype->fields (descr, offset, title[not-needed]) + * + * @param "value" should be the tuple. + * + * @return "descr" will be set to the field's dtype + * @return "offset" will be set to the field's offset + * + * returns -1 on failure, 0 on success. + */ +NPY_NO_EXPORT int +_unpack_field(PyObject *value, PyArray_Descr **descr, npy_intp *offset) +{ + PyObject * off; + if (PyTuple_GET_SIZE(value) < 2) { + return -1; + } + *descr = (PyArray_Descr *)PyTuple_GET_ITEM(value, 0); + off = PyTuple_GET_ITEM(value, 1); + + if (PyInt_Check(off)) { + *offset = PyInt_AsSsize_t(off); + } + else if (PyLong_Check(off)) { + *offset = PyLong_AsSsize_t(off); + } + else { + return -1; + } + + return 0; +} diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h index 11993829f687..8f64aff74ff2 100644 --- a/numpy/core/src/multiarray/common.h +++ b/numpy/core/src/multiarray/common.h @@ -75,6 +75,18 @@ convert_shape_to_string(npy_intp n, npy_intp *vals, char *ending); NPY_NO_EXPORT void dot_alignment_error(PyArrayObject *a, int i, PyArrayObject *b, int j); +/** + * unpack tuple of dtype->fields (descr, offset, title[not-needed]) + * + * @param "value" should be the tuple. + * + * @return "descr" will be set to the field's dtype + * @return "offset" will be set to the field's offset + * + * returns -1 on failure, 0 on success. + */ +NPY_NO_EXPORT int +_unpack_field(PyObject *value, PyArray_Descr **descr, npy_intp *offset); /* * Returns -1 and sets an exception if *index is an invalid index for diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c index 42a12db14adf..44de1cbf266e 100644 --- a/numpy/core/src/multiarray/mapping.c +++ b/numpy/core/src/multiarray/mapping.c @@ -1250,51 +1250,190 @@ array_subscript_asarray(PyArrayObject *self, PyObject *op) return PyArray_EnsureAnyArray(array_subscript(self, op)); } +/* + * Attempts to subscript an array using a field name or list of field names. + * + * If an error occurred, return 0 and set view to NULL. If the subscript is not + * a string or list of strings, return -1 and set view to NULL. Otherwise + * return 0 and set view to point to a new view into arr for the given fields. + */ NPY_NO_EXPORT int -obj_is_string_or_stringlist(PyObject *op) +_get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view) { + *view = NULL; + + /* first check for a single field name */ #if defined(NPY_PY3K) - if (PyUnicode_Check(op)) { + if (PyUnicode_Check(ind)) { #else - if (PyString_Check(op) || PyUnicode_Check(op)) { + if (PyString_Check(ind) || PyUnicode_Check(ind)) { #endif - return 1; + PyObject *tup; + PyArray_Descr *fieldtype; + npy_intp offset; + + /* get the field offset and dtype */ + tup = PyDict_GetItem(PyArray_DESCR(arr)->fields, ind); + if (tup == NULL){ + PyObject *errmsg = PyUString_FromString("no field of name "); + PyUString_Concat(&errmsg, ind); + PyErr_SetObject(PyExc_ValueError, errmsg); + Py_DECREF(errmsg); + return 0; + } + if (_unpack_field(tup, &fieldtype, &offset) < 0) { + return 0; + } + + /* view the array at the new offset+dtype */ + Py_INCREF(fieldtype); + *view = (PyArrayObject*)PyArray_NewFromDescr( + Py_TYPE(arr), + fieldtype, + PyArray_NDIM(arr), + PyArray_SHAPE(arr), + PyArray_STRIDES(arr), + PyArray_DATA(arr) + offset, + PyArray_FLAGS(arr), + (PyObject *)arr); + if (*view == NULL) { + return 0; + } + Py_INCREF(arr); + if (PyArray_SetBaseObject(*view, (PyObject *)arr) < 0) { + Py_DECREF(*view); + *view = NULL; + } + return 0; } - else if (PySequence_Check(op) && !PyTuple_Check(op)) { + /* next check for a list of field names */ + else if (PySequence_Check(ind) && !PyTuple_Check(ind)) { int seqlen, i; - PyObject *obj = NULL; - seqlen = PySequence_Size(op); + PyObject *name = NULL, *tup; + PyObject *fields, *names; + PyArray_Descr *view_dtype; + + /* variables needed to make a copy, to remove in the future */ + static PyObject *copyfunc = NULL; + PyObject *viewcopy; + + seqlen = PySequence_Size(ind); - /* quit if we come across a 0-d array (seqlen==-1) or a 0-len array */ + /* quit if have a 0-d array (seqlen==-1) or a 0-len array */ if (seqlen == -1) { PyErr_Clear(); - return 0; + return -1; } if (seqlen == 0) { + return -1; + } + + fields = PyDict_New(); + if (fields == NULL) { + return 0; + } + names = PyTuple_New(seqlen); + if (names == NULL) { + Py_DECREF(fields); return 0; } for (i = 0; i < seqlen; i++) { - obj = PySequence_GetItem(op, i); - if (obj == NULL) { - /* only happens for strange sequence objects. Silently fail */ + name = PySequence_GetItem(ind, i); + if (name == NULL) { + /* only happens for strange sequence objects */ PyErr_Clear(); - return 0; + Py_DECREF(fields); + Py_DECREF(names); + return -1; } #if defined(NPY_PY3K) - if (!PyUnicode_Check(obj)) { + if (!PyUnicode_Check(name)) { #else - if (!PyString_Check(obj) && !PyUnicode_Check(obj)) { + if (!PyString_Check(name) && !PyUnicode_Check(name)) { #endif - Py_DECREF(obj); + Py_DECREF(name); + Py_DECREF(fields); + Py_DECREF(names); + return -1; + } + + tup = PyDict_GetItem(PyArray_DESCR(arr)->fields, name); + if (tup == NULL){ + PyObject *errmsg = PyUString_FromString("no field of name "); + PyUString_ConcatAndDel(&errmsg, name); + PyErr_SetObject(PyExc_ValueError, errmsg); + Py_DECREF(errmsg); + Py_DECREF(fields); + Py_DECREF(names); return 0; } - Py_DECREF(obj); + if (PyDict_SetItem(fields, name, tup) < 0) { + Py_DECREF(name); + Py_DECREF(fields); + Py_DECREF(names); + return 0; + } + if (PyTuple_SetItem(names, i, name) < 0) { + Py_DECREF(fields); + Py_DECREF(names); + return 0; + } + } + + view_dtype = PyArray_DescrNewFromType(NPY_VOID); + if (view_dtype == NULL) { + Py_DECREF(fields); + Py_DECREF(names); + return 0; + } + view_dtype->elsize = PyArray_DESCR(arr)->elsize; + view_dtype->names = names; + view_dtype->fields = fields; + view_dtype->flags = PyArray_DESCR(arr)->flags; + + *view = (PyArrayObject*)PyArray_NewFromDescr( + Py_TYPE(arr), + view_dtype, + PyArray_NDIM(arr), + PyArray_SHAPE(arr), + PyArray_STRIDES(arr), + PyArray_DATA(arr), + PyArray_FLAGS(arr), + (PyObject *)arr); + if (*view == NULL) { + return 0; + } + Py_INCREF(arr); + if (PyArray_SetBaseObject(*view, (PyObject *)arr) < 0) { + Py_DECREF(*view); + *view = NULL; + return 0; + } + + /* + * Return copy for now (future plan to return the view above). All the + * following code in this block can then be replaced by "return 0;" + */ + npy_cache_import("numpy.core._internal", "_copy_fields", ©func); + if (copyfunc == NULL) { + Py_DECREF(*view); + *view = NULL; + return 0; + } + + viewcopy = PyObject_CallFunction(copyfunc, "O", *view); + if (viewcopy == NULL) { + Py_DECREF(*view); + *view = NULL; + return 0; } - return 1; + Py_DECREF(*view); + *view = (PyArrayObject*)viewcopy; + return 0; } - return 0; + return -1; } /* @@ -1318,25 +1457,20 @@ array_subscript(PyArrayObject *self, PyObject *op) PyArrayMapIterObject * mit = NULL; /* return fields if op is a string index */ - if (PyDataType_HASFIELDS(PyArray_DESCR(self)) && - obj_is_string_or_stringlist(op)) { - PyObject *obj; - static PyObject *indexfunc = NULL; - npy_cache_import("numpy.core._internal", "_index_fields", &indexfunc); - if (indexfunc == NULL) { - return NULL; - } - - obj = PyObject_CallFunction(indexfunc, "OO", self, op); - if (obj == NULL) { - return NULL; - } + if (PyDataType_HASFIELDS(PyArray_DESCR(self))) { + PyArrayObject *view; + int ret = _get_field_view(self, op, &view); + if (ret == 0){ + if (view == NULL) { + return NULL; + } - /* warn if writing to a copy. copies will have no base */ - if (PyArray_BASE((PyArrayObject*)obj) == NULL) { - PyArray_ENABLEFLAGS((PyArrayObject*)obj, NPY_ARRAY_WARN_ON_WRITE); + /* warn if writing to a copy. copies will have no base */ + if (PyArray_BASE(view) == NULL) { + PyArray_ENABLEFLAGS(view, NPY_ARRAY_WARN_ON_WRITE); + } + return (PyObject*)view; } - return obj; } /* Prepare the indices */ @@ -1671,37 +1805,31 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) } /* field access */ - if (PyDataType_HASFIELDS(PyArray_DESCR(self)) && - obj_is_string_or_stringlist(ind)) { - PyObject *obj; - static PyObject *indexfunc = NULL; + if (PyDataType_HASFIELDS(PyArray_DESCR(self))){ + PyArrayObject *view; + int ret = _get_field_view(self, ind, &view); + if (ret == 0){ #if defined(NPY_PY3K) - if (!PyUnicode_Check(ind)) { + if (!PyUnicode_Check(ind)) { #else - if (!PyString_Check(ind) && !PyUnicode_Check(ind)) { + if (!PyString_Check(ind) && !PyUnicode_Check(ind)) { #endif - PyErr_SetString(PyExc_ValueError, - "multi-field assignment is not supported"); - } - - npy_cache_import("numpy.core._internal", "_index_fields", &indexfunc); - if (indexfunc == NULL) { - return -1; - } - - obj = PyObject_CallFunction(indexfunc, "OO", self, ind); - if (obj == NULL) { - return -1; - } + PyErr_SetString(PyExc_ValueError, + "multi-field assignment is not supported"); + return -1; + } - if (PyArray_CopyObject((PyArrayObject*)obj, op) < 0) { - Py_DECREF(obj); - return -1; + if (view == NULL) { + return -1; + } + if (PyArray_CopyObject(view, op) < 0) { + Py_DECREF(view); + return -1; + } + Py_DECREF(view); + return 0; } - Py_DECREF(obj); - - return 0; } /* Prepare the indices */ From 9d1a7c9fd4b911e7dfb741e2f5e2702e5c7df267 Mon Sep 17 00:00:00 2001 From: Allan Haldane Date: Fri, 14 Aug 2015 19:31:51 -0400 Subject: [PATCH 106/496] MAINT: Speedup field access by removing unneeded safety checks (2/3) Bypass unneeded "view" safety checks in voidtype_ subscript/assignment methods, by falling back to ndarray methods which skip the checks. --- numpy/core/src/multiarray/scalartypes.c.src | 117 +++++++++++--------- numpy/core/tests/test_multiarray.py | 4 +- 2 files changed, 66 insertions(+), 55 deletions(-) diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index ee5741ae0315..1bd5b22d2124 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -1681,13 +1681,15 @@ voidtype_setfield(PyVoidScalarObject *self, PyObject *args, PyObject *kwds) * b['x'][0] = arange(3) # uses ndarray setitem * * Ndarray's setfield would try to broadcast the lhs. Instead we use - * ndarray getfield to get the field safely, then setitem to set the value - * without broadcast. Note we also want subarrays to be set properly, ie + * ndarray getfield to get the field safely, then setitem with an empty + * tuple to set the value without broadcast. Note we also want subarrays to + * be set properly, ie * * a = np.zeros(1, dtype=[('x', 'i', 5)]) * a[0]['x'] = 1 * - * sets all values to 1. Setitem does this. + * sets all values to 1. "getfield + setitem with empty tuple" takes + * care of both object arrays and subarrays. */ PyObject *getfield_args, *value, *arr, *meth, *arr_field, *emptytuple; @@ -1726,15 +1728,15 @@ voidtype_setfield(PyVoidScalarObject *self, PyObject *args, PyObject *kwds) return NULL; } - /* 2. Fill the resulting array using setitem */ + /* 2. Assign the value using setitem with empty tuple. */ emptytuple = PyTuple_New(0); if (PyObject_SetItem(arr_field, emptytuple, value) < 0) { Py_DECREF(arr_field); Py_DECREF(emptytuple); return NULL; } - Py_DECREF(arr_field); Py_DECREF(emptytuple); + Py_DECREF(arr_field); Py_RETURN_NONE; } @@ -2157,11 +2159,14 @@ voidtype_length(PyVoidScalarObject *self) } } +static PyObject * +voidtype_subscript(PyVoidScalarObject *self, PyObject *ind); + static PyObject * voidtype_item(PyVoidScalarObject *self, Py_ssize_t n) { npy_intp m; - PyObject *flist=NULL, *fieldind, *fieldparam, *fieldinfo, *ret; + PyObject *flist=NULL; if (!(PyDataType_HASFIELDS(self->descr))) { PyErr_SetString(PyExc_IndexError, @@ -2177,22 +2182,16 @@ voidtype_item(PyVoidScalarObject *self, Py_ssize_t n) PyErr_Format(PyExc_IndexError, "invalid index (%d)", (int) n); return NULL; } - /* no error checking needed: descr->names is well structured */ - fieldind = PyTuple_GET_ITEM(flist, n); - fieldparam = PyDict_GetItem(self->descr->fields, fieldind); - fieldinfo = PyTuple_GetSlice(fieldparam, 0, 2); - ret = voidtype_getfield(self, fieldinfo, NULL); - Py_DECREF(fieldinfo); - return ret; -} + return voidtype_subscript(self, PyTuple_GetItem(flist, n)); +} /* get field by name or number */ static PyObject * voidtype_subscript(PyVoidScalarObject *self, PyObject *ind) { npy_intp n; - PyObject *ret, *fieldinfo, *fieldparam; + PyObject *ret, *args; if (!(PyDataType_HASFIELDS(self->descr))) { PyErr_SetString(PyExc_IndexError, @@ -2205,14 +2204,9 @@ voidtype_subscript(PyVoidScalarObject *self, PyObject *ind) #else if (PyBytes_Check(ind) || PyUnicode_Check(ind)) { #endif - /* look up in fields */ - fieldparam = PyDict_GetItem(self->descr->fields, ind); - if (!fieldparam) { - goto fail; - } - fieldinfo = PyTuple_GetSlice(fieldparam, 0, 2); - ret = voidtype_getfield(self, fieldinfo, NULL); - Py_DECREF(fieldinfo); + args = Py_BuildValue("(O)", ind); + ret = gentype_generic_method((PyObject *)self, args, NULL, "__getitem__"); + Py_DECREF(args); return ret; } @@ -2228,12 +2222,14 @@ fail: return NULL; } +static int +voidtype_ass_subscript(PyVoidScalarObject *self, PyObject *ind, PyObject *val); + static int voidtype_ass_item(PyVoidScalarObject *self, Py_ssize_t n, PyObject *val) { npy_intp m; - PyObject *flist=NULL, *fieldinfo, *newtup; - PyObject *res; + PyObject *flist=NULL; if (!(PyDataType_HASFIELDS(self->descr))) { PyErr_SetString(PyExc_IndexError, @@ -2247,24 +2243,11 @@ voidtype_ass_item(PyVoidScalarObject *self, Py_ssize_t n, PyObject *val) n += m; } if (n < 0 || n >= m) { - goto fail; - } - fieldinfo = PyDict_GetItem(self->descr->fields, - PyTuple_GET_ITEM(flist, n)); - newtup = Py_BuildValue("(OOO)", val, - PyTuple_GET_ITEM(fieldinfo, 0), - PyTuple_GET_ITEM(fieldinfo, 1)); - res = voidtype_setfield(self, newtup, NULL); - Py_DECREF(newtup); - if (!res) { + PyErr_Format(PyExc_IndexError, "invalid index (%d)", (int) n); return -1; } - Py_DECREF(res); - return 0; -fail: - PyErr_Format(PyExc_IndexError, "invalid index (%d)", (int) n); - return -1; + return voidtype_ass_subscript(self, PyTuple_GetItem(flist, n), val); } static int @@ -2272,8 +2255,7 @@ voidtype_ass_subscript(PyVoidScalarObject *self, PyObject *ind, PyObject *val) { npy_intp n; char *msg = "invalid index"; - PyObject *fieldinfo, *newtup; - PyObject *res; + PyObject *args; if (!PyDataType_HASFIELDS(self->descr)) { PyErr_SetString(PyExc_IndexError, @@ -2292,20 +2274,49 @@ voidtype_ass_subscript(PyVoidScalarObject *self, PyObject *ind, PyObject *val) #else if (PyBytes_Check(ind) || PyUnicode_Check(ind)) { #endif - /* look up in fields */ - fieldinfo = PyDict_GetItem(self->descr->fields, ind); - if (!fieldinfo) { - goto fail; + /* + * Much like in voidtype_setfield, we cannot simply use ndarray's + * __setitem__ since assignment to void scalars should not broadcast + * the lhs. Instead we get a view through __getitem__ and then assign + * the value using setitem with an empty tuple (which treats both + * object arrays and subarrays properly). + * + * Also we do not want to use voidtype_setfield here, since we do + * not need to do the (slow) view safety checks, since we already + * know the dtype/offset are safe. + */ + + PyObject *arr, *arr_field, *meth, *emptytuple; + + /* 1. Convert to 0-d array and use getitem */ + arr = PyArray_FromScalar((PyObject*)self, NULL); + if (arr == NULL) { + return -1; + } + meth = PyObject_GetAttrString(arr, "__getitem__"); + if (meth == NULL) { + Py_DECREF(arr); + return -1; } - newtup = Py_BuildValue("(OOO)", val, - PyTuple_GET_ITEM(fieldinfo, 0), - PyTuple_GET_ITEM(fieldinfo, 1)); - res = voidtype_setfield(self, newtup, NULL); - Py_DECREF(newtup); - if (!res) { + args = Py_BuildValue("(O)", ind); + arr_field = PyObject_CallObject(meth, args); + Py_DECREF(meth); + Py_DECREF(arr); + Py_DECREF(args); + + if(arr_field == NULL){ return -1; } - Py_DECREF(res); + + /* 2. Assign the value using setitem with empty tuple. */ + emptytuple = PyTuple_New(0); + if (PyObject_SetItem(arr_field, emptytuple, val) < 0) { + Py_DECREF(arr_field); + Py_DECREF(emptytuple); + return -1; + } + Py_DECREF(emptytuple); + Py_DECREF(arr_field); return 0; } diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index d47b9f0da926..85b0e5519ee8 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -3754,8 +3754,8 @@ def test_field_names(self): b[0][fn1] = 2 assert_equal(b[fn1], 2) # Subfield - assert_raises(IndexError, b[0].__setitem__, fnn, 1) - assert_raises(IndexError, b[0].__getitem__, fnn) + assert_raises(ValueError, b[0].__setitem__, fnn, 1) + assert_raises(ValueError, b[0].__getitem__, fnn) # Subfield fn3 = func('f3') sfn1 = func('sf1') From 37382ac4be4139710476aa24cb5ad77fbac70728 Mon Sep 17 00:00:00 2001 From: Allan Haldane Date: Fri, 14 Aug 2015 03:08:37 -0400 Subject: [PATCH 107/496] MAINT: Speedup field access by removing unneeded safety checks (3/3) Skip safety-checks in views as long as neither old or new dtypes of view may have objects. --- numpy/core/src/multiarray/common.c | 18 ++++++++++++++++++ numpy/core/src/multiarray/common.h | 9 +++++++++ numpy/core/src/multiarray/getset.c | 24 ++++++++++++++---------- numpy/core/src/multiarray/methods.c | 25 ++++++++++++++----------- 4 files changed, 55 insertions(+), 21 deletions(-) diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c index 6a353f355cbd..1948b8b61f4f 100644 --- a/numpy/core/src/multiarray/common.c +++ b/numpy/core/src/multiarray/common.c @@ -909,3 +909,21 @@ _unpack_field(PyObject *value, PyArray_Descr **descr, npy_intp *offset) return 0; } + +/* + * check whether arrays with datatype dtype might have object fields. This will + * only happen for structured dtypes (which may have hidden objects even if the + * HASOBJECT flag is false), object dtypes, or subarray dtypes whose base type + * is either of these. + */ +NPY_NO_EXPORT int +_may_have_objects(PyArray_Descr *dtype) +{ + PyArray_Descr *base = dtype; + if (PyDataType_HASSUBARRAY(dtype)) { + base = dtype->subarray->base; + } + + return (PyDataType_HASFIELDS(base) || + PyDataType_FLAGCHK(base, NPY_ITEM_HASOBJECT) ); +} diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h index 8f64aff74ff2..5e14b80a71ca 100644 --- a/numpy/core/src/multiarray/common.h +++ b/numpy/core/src/multiarray/common.h @@ -88,6 +88,15 @@ dot_alignment_error(PyArrayObject *a, int i, PyArrayObject *b, int j); NPY_NO_EXPORT int _unpack_field(PyObject *value, PyArray_Descr **descr, npy_intp *offset); +/* + * check whether arrays with datatype dtype might have object fields. This will + * only happen for structured dtypes (which may have hidden objects even if the + * HASOBJECT flag is false), object dtypes, or subarray dtypes whose base type + * is either of these. + */ +NPY_NO_EXPORT int +_may_have_objects(PyArray_Descr *dtype); + /* * Returns -1 and sets an exception if *index is an invalid index for * an array of size max_item, otherwise adjusts it in place to be diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c index 5147b9735dc9..549ea333ae5e 100644 --- a/numpy/core/src/multiarray/getset.c +++ b/numpy/core/src/multiarray/getset.c @@ -438,10 +438,6 @@ array_descr_set(PyArrayObject *self, PyObject *arg) PyObject *safe; static PyObject *checkfunc = NULL; - npy_cache_import("numpy.core._internal", "_view_is_safe", &checkfunc); - if (checkfunc == NULL) { - return -1; - } if (arg == NULL) { PyErr_SetString(PyExc_AttributeError, @@ -456,13 +452,21 @@ array_descr_set(PyArrayObject *self, PyObject *arg) return -1; } - /* check that we are not reinterpreting memory containing Objects */ - safe = PyObject_CallFunction(checkfunc, "OO", PyArray_DESCR(self), newtype); - if (safe == NULL) { - Py_DECREF(newtype); - return -1; + /* check that we are not reinterpreting memory containing Objects. */ + if (_may_have_objects(PyArray_DESCR(self)) || _may_have_objects(newtype)) { + npy_cache_import("numpy.core._internal", "_view_is_safe", &checkfunc); + if (checkfunc == NULL) { + return -1; + } + + safe = PyObject_CallFunction(checkfunc, "OO", + PyArray_DESCR(self), newtype); + if (safe == NULL) { + Py_DECREF(newtype); + return -1; + } + Py_DECREF(safe); } - Py_DECREF(safe); if (newtype->elsize == 0) { /* Allow a void view */ diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c index fd329cb8ce17..84d4e2c9e2fa 100644 --- a/numpy/core/src/multiarray/methods.c +++ b/numpy/core/src/multiarray/methods.c @@ -362,19 +362,22 @@ PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset) PyObject *safe; static PyObject *checkfunc = NULL; - npy_cache_import("numpy.core._internal", "_getfield_is_safe", &checkfunc); - if (checkfunc == NULL) { - return NULL; - } + /* check that we are not reinterpreting memory containing Objects. */ + if (_may_have_objects(PyArray_DESCR(self)) || _may_have_objects(typed)) { + npy_cache_import("numpy.core._internal", "_getfield_is_safe", + &checkfunc); + if (checkfunc == NULL) { + return NULL; + } - /* check that we are not reinterpreting memory containing Objects */ - /* only returns True or raises */ - safe = PyObject_CallFunction(checkfunc, "OOi", PyArray_DESCR(self), - typed, offset); - if (safe == NULL) { - return NULL; + /* only returns True or raises */ + safe = PyObject_CallFunction(checkfunc, "OOi", PyArray_DESCR(self), + typed, offset); + if (safe == NULL) { + return NULL; + } + Py_DECREF(safe); } - Py_DECREF(safe); ret = PyArray_NewFromDescr(Py_TYPE(self), typed, From 8cf5b506d2d3da833b09e8bbbe874db6f9c5e809 Mon Sep 17 00:00:00 2001 From: Pauli Virtanen Date: Tue, 13 Oct 2015 20:21:03 +0300 Subject: [PATCH 108/496] PERF: add 0d structured indexing benchmark --- benchmarks/benchmarks/bench_indexing.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/benchmarks/benchmarks/bench_indexing.py b/benchmarks/benchmarks/bench_indexing.py index 4f2482ef825f..d6dc4edf0039 100644 --- a/benchmarks/benchmarks/bench_indexing.py +++ b/benchmarks/benchmarks/bench_indexing.py @@ -47,3 +47,26 @@ def time_mmap_slicing(self): def time_mmap_fancy_indexing(self): for i in range(1000): self.fp[self.indexes] + + +class IndexingStructured0D(Benchmark): + def setup(self): + self.dt = np.dtype([('a', 'f4', 256)]) + + self.A = np.zeros((), self.dt) + self.B = self.A.copy() + + self.a = np.zeros(1, self.dt)[0] + self.b = self.a.copy() + + def time_array_slice(self): + self.B['a'][:] = self.A['a'] + + def time_array_all(self): + self.B['a'] = self.A['a'] + + def time_scalar_slice(self): + self.b['a'][:] = self.a['a'] + + def time_scalar_all(self): + self.b['a'] = self.a['a'] From d75b44a32bc13bd457f756b6fdeebd8f12b21799 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Mon, 19 Oct 2015 07:52:39 +0200 Subject: [PATCH 109/496] DOC: update HOWTO_BUILD_DOCS and HOWTO_RELEASE for release doc builds. [ci skip] --- doc/HOWTO_BUILD_DOCS.rst.txt | 11 +++++++--- doc/HOWTO_RELEASE.rst.txt | 40 ++++++++++++++++-------------------- 2 files changed, 26 insertions(+), 25 deletions(-) diff --git a/doc/HOWTO_BUILD_DOCS.rst.txt b/doc/HOWTO_BUILD_DOCS.rst.txt index 79d76fb7d129..8107aaa81012 100644 --- a/doc/HOWTO_BUILD_DOCS.rst.txt +++ b/doc/HOWTO_BUILD_DOCS.rst.txt @@ -57,7 +57,7 @@ To build the PDF documentation, do instead:: You will need to have Latex installed for this. -In addition to the above, you can also do:: +Instead of the above, you can also do:: make dist @@ -65,14 +65,19 @@ which will rebuild Numpy, install it to a temporary location, and build the documentation in all formats. This will most likely again only work on Unix platforms. +The documentation for Numpy distributed at http://docs.scipy.org in html and +pdf format is also built with ``make dist``. See `HOWTO RELEASE`_ for details on +how to update http://docs.scipy.org. + .. _Matplotlib: http://matplotlib.org/ +.. _HOWTO RELEASE: https://github.com/numpy/numpy/blob/master/doc/HOWTO_RELEASE.rst.txt Sphinx extensions ----------------- Numpy's documentation uses several custom extensions to Sphinx. These -are shipped in the ``sphinxext/`` directory, and are automatically -enabled when building Numpy's documentation. +are shipped in the ``sphinxext/`` directory (as git submodules, as discussed +above), and are automatically enabled when building Numpy's documentation. If you want to make use of these extensions in third-party projects, they are available on PyPi_ as the numpydoc_ package. diff --git a/doc/HOWTO_RELEASE.rst.txt b/doc/HOWTO_RELEASE.rst.txt index 5fed523c1c7b..b77a6c25c8e0 100644 --- a/doc/HOWTO_RELEASE.rst.txt +++ b/doc/HOWTO_RELEASE.rst.txt @@ -32,7 +32,7 @@ Doc wiki Release Scripts --------------- -* https://github.com/certik/numpy-vendor +* https://github.com/numpy/numpy-vendor Supported platforms and versions @@ -217,7 +217,9 @@ Do:: cd doc/ make dist -to check that the documentation is in a buildable state. +to check that the documentation is in a buildable state. See +doc/HOWTO_BUILD_DOCS.rst.txt for more details and for how to update +http://docs.scipy.org. Check deprecations ------------------ @@ -292,9 +294,11 @@ Now, set ``release=True`` in setup.py, then :: git commit -m "REL: Release." setup.py - git tag + git tag -s git push origin +Note: ``git tag -s`` creates a signed tag - make sure your PGP key is public. + Apply patch to fix bogus strides -------------------------------- NPY_RELAXED_STRIDE_CHECKING was made the default in Numpy 1.10 and bogus @@ -336,32 +340,24 @@ works. Update docs.scipy.org --------------------- -Do the following (or ask the doc people to take care of it): - -Rebuild and upload documentation: +All documentation for a release can be updated on http://docs.scipy.org/ with: -- ``cd numpy/doc`` -- ``make dist`` -- Check that the built documentation is OK. -- ``touch output-is-fine`` -- ``make upload UPLOAD_TARGET=USERNAME@docs.scipy.org:/home/docserver/www-root/doc/numpy-1.5.x/`` + make dist + make upload USERNAME= RELEASE=1.11.0 -where USERNAME should be replaced by your account on -``docs.scipy.org``, and ``numpy-1.5.x`` by the version number of the -*release series*. For instance, for Numpy 1.5.1, it should be -``numpy-1.5.x`` and for Numpy 2.0.0 ``numpy-2.0.x``. +Note that ```` must have SSH credentials on the server. If you don't +have those, ask someone who does (the list currently includes @rgommers, +@juliantaylor and @pv). -Rebuild and upload ``docs.scipy.org`` front page, if the release -series is a new one. The front page sources are located in the Scipy -repository: +Also rebuild and upload ``docs.scipy.org`` front page, if the release +series is a new one. The front page sources have their own repo: +https://github.com/scipy/docs.scipy.org. Do the following: -- ``cd scipy/doc/frontpage`` -- Edit ``_templates/indexcontent.html`` to add links to the new release series. +- Update ``index.rst`` for the new version. - ``make dist`` - Check that the built documentation is OK. - ``touch output-is-fine`` -- ``make upload USER=USERNAME`` - +- ``make upload USERNAME= RELEASE=1.x.y`` Update scipy.org ---------------- From ac4433cf550762f53459d330c41a98178a4f9713 Mon Sep 17 00:00:00 2001 From: David Sanders Date: Mon, 19 Oct 2015 12:47:50 -0700 Subject: [PATCH 110/496] Do a TravisCI build with PYTHONOPTIMIZE=2 --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index 64ed69aa819d..2447360f5884 100644 --- a/.travis.yml +++ b/.travis.yml @@ -50,6 +50,8 @@ matrix: env: NPY_RELAXED_STRIDES_CHECKING=0 PYTHON_OO=1 - python: 2.7 env: USE_WHEEL=1 + - python: 2.7 + env: PYTHONOPTIMIZE=2 before_install: - uname -a - free -m From 922442fe0251df29b3494a2aa93a0d3f18155481 Mon Sep 17 00:00:00 2001 From: David Sanders Date: Mon, 19 Oct 2015 13:03:34 -0700 Subject: [PATCH 111/496] Fix use of __doc__ in setup.py for -OO mode --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 8e5c3d04fad5..106a5fa13509 100755 --- a/setup.py +++ b/setup.py @@ -15,7 +15,7 @@ """ from __future__ import division, print_function -DOCLINES = __doc__.split("\n") +DOCLINES = (__doc__ or '').split("\n") import os import sys From 9b59edebeadddf8012459e401434353b2de8babb Mon Sep 17 00:00:00 2001 From: Ryan Grout Date: Tue, 13 Oct 2015 16:52:56 -0500 Subject: [PATCH 112/496] BUG: fix AttributeError in numpy/distutils. Corrects an AttributeError on windows in some cases caused by #6185 --- numpy/distutils/ccompiler.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py index b5970d76fe08..ad235ed1997b 100644 --- a/numpy/distutils/ccompiler.py +++ b/numpy/distutils/ccompiler.py @@ -385,10 +385,11 @@ def CCompiler_customize(self, dist, need_cxx=0): a, b = 'cc', 'c++' self.compiler_cxx = [self.compiler[0].replace(a, b)]\ + self.compiler[1:] - elif not self.compiler_cxx: + else: if hasattr(self, 'compiler'): log.warn("#### %s #######" % (self.compiler,)) - log.warn('Missing compiler_cxx fix for '+self.__class__.__name__) + if not hasattr(self, 'compiler_cxx'): + log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__) return replace_method(CCompiler, 'customize', CCompiler_customize) From b57edaf29a731837f4b39cb68f21693fdccb940f Mon Sep 17 00:00:00 2001 From: Jaime Fernandez Date: Mon, 19 Oct 2015 19:18:57 -0700 Subject: [PATCH 113/496] DOC: fixed typo in arrays.classes.rst --- doc/source/reference/arrays.classes.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst index 1fa84c6c4ed5..5716f45621d3 100644 --- a/doc/source/reference/arrays.classes.rst +++ b/doc/source/reference/arrays.classes.rst @@ -24,7 +24,7 @@ subclass of an ndarray, then :func:`asanyarray` can be used to allow subclasses to propagate more cleanly through your subroutine. In principal a subclass could redefine any aspect of the array and therefore, under strict guidelines, :func:`asanyarray` would rarely be -useful. However, most subclasses of the arrayobject will not +useful. However, most subclasses of the array object will not redefine certain aspects of the array object such as the buffer interface, or the attributes of the array. One important example, however, of why your subroutine may not be able to handle an arbitrary @@ -47,10 +47,10 @@ Numpy provides several hooks that classes can customize: override behavior of Numpy's ufuncs. This works quite similarly to Python's ``__mul__`` and other binary operation routines. - - *ufunc* is the ufunc object that was called. + - *ufunc* is the ufunc object that was called. - *method* is a string indicating which Ufunc method was called (one of ``"__call__"``, ``"reduce"``, ``"reduceat"``, - ``"accumulate"``, ``"outer"``, ``"inner"``). + ``"accumulate"``, ``"outer"``, ``"inner"``). - *i* is the index of *self* in *inputs*. - *inputs* is a tuple of the input arguments to the ``ufunc`` - *kwargs* is a dictionary containing the optional input arguments From 7dd3f1455127b70239ed6a20da2fc3aca496437b Mon Sep 17 00:00:00 2001 From: Nicolas Calle Date: Mon, 19 Oct 2015 19:37:47 -0700 Subject: [PATCH 114/496] DOC: Fixed a typo at line 289 at c-api.array.rst Fixes #6521 --- doc/source/reference/c-api.array.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/c-api.array.rst b/doc/source/reference/c-api.array.rst index 1dfd7d8f0be8..a3fb5803b57e 100644 --- a/doc/source/reference/c-api.array.rst +++ b/doc/source/reference/c-api.array.rst @@ -286,7 +286,7 @@ From scratch This function steals a reference to *descr* if it is not NULL. Create a new array with the provided data-type descriptor, *descr* - , of the shape deteremined by *nd* and *dims*. + , of the shape determined by *nd* and *dims*. .. c:function:: PyArray_FILLWBYTE(PyObject* obj, int val) From 051ac58c4193bf8244b4ea3db012b783a65af1c7 Mon Sep 17 00:00:00 2001 From: Yifan Li Date: Mon, 19 Oct 2015 20:15:42 -0700 Subject: [PATCH 115/496] DOC: fixed #6525 --- doc/source/reference/c-api.array.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/source/reference/c-api.array.rst b/doc/source/reference/c-api.array.rst index a3fb5803b57e..32dace29f247 100644 --- a/doc/source/reference/c-api.array.rst +++ b/doc/source/reference/c-api.array.rst @@ -1829,7 +1829,7 @@ Item selection and manipulation *self* would be preserved. No checking is done on whether or not self is in ascending order. - The *side* argument indicates whther the index returned should be that of + The *side* argument indicates whether the index returned should be that of the first suitable location (if :c:data:`NPY_SEARCHLEFT`) or of the last (if :c:data:`NPY_SEARCHRIGHT`). @@ -1913,8 +1913,8 @@ Calculation The out argument specifies where to place the result. If out is NULL, then the output array is created, otherwise the output is placed in out which must be the correct size and type. A new - reference to the ouput array is always returned even when out - is not NULL. The caller of the routine has the responsability + reference to the output array is always returned even when out + is not NULL. The caller of the routine has the responsibility to ``DECREF`` out if not NULL or a memory-leak will occur. .. c:function:: PyObject* PyArray_Max(PyArrayObject* self, int axis, PyArrayObject* out) @@ -2040,7 +2040,7 @@ Array Functions The address to any Python object. This Python object will be replaced with an equivalent well-behaved, C-style contiguous, ndarray of the - given data type specifice by the last two arguments. Be sure that + given data type specified by the last two arguments. Be sure that stealing a reference in this way to the input object is justified. :param ptr: @@ -2110,7 +2110,7 @@ Array Functions .. versionadded:: 1.6 - Applies the einstein summation convention to the array operands + Applies the Einstein summation convention to the array operands provided, returning a new array or placing the result in *out*. The string in *subscripts* is a comma separated list of index letters. The number of operands is in *nop*, and *op_in* is an @@ -2773,7 +2773,7 @@ to. interface to a variable with members that detail the object's use of its chunk of memory. The *buf* variable is a pointer to a structure with base, ptr, len, and flags members. The - :c:type:`PyArray_Chunk` structure is binary compatibile with the + :c:type:`PyArray_Chunk` structure is binary compatible with the Python's buffer object (through its len member on 32-bit platforms and its ptr member on 64-bit platforms or in Python 2.5). On return, the base member is set to *obj* (or its base if *obj* is @@ -2905,7 +2905,7 @@ the C-API is needed then some additional steps must be taken. :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` to some name that will hold the C-API (*e.g.* myextension_ARRAY_API). This must be done **before** including the numpy/arrayobject.h file. In the module - intialization routine you call ``import_array`` (). In addition, + initialization routine you call ``import_array`` (). In addition, in the files that do not have the module initialization sub_routine define :c:macro:`NO_IMPORT_ARRAY` prior to including numpy/arrayobject.h. @@ -3070,7 +3070,7 @@ These macros are only meaningful if :c:data:`NPY_ALLOW_THREADS` evaluates True during compilation of the extension module. Otherwise, these macros are equivalent to whitespace. Python uses a single Global Interpreter Lock (GIL) for each Python process so that only a single -thread may excecute at a time (even on multi-cpu machines). When +thread may execute at a time (even on multi-cpu machines). When calling out to a compiled function that may take time to compute (and does not have side-effects for other threads like updated global variables), the GIL should be released so that other Python threads From 5fd1707ea9576b5a4efff5866a8e8d36f36ef6e9 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 20 Oct 2015 08:31:52 -0600 Subject: [PATCH 116/496] Revert "Merge pull request #6354 from mingwpy/mingwpy" Revert mingwpy modifications to distutils. They are causing problems for non-windows builds and it is better to wait until mingypy is further along. This reverts commit cda64938eb150b1af6677db4754da3be5fd7e483, reversing changes made to 8cb3ec6ab804f594daf553e53e7cf7478656bebd. --- numpy/core/setup.py | 5 +---- numpy/core/src/private/npy_config.h | 5 ----- numpy/distutils/fcompiler/gnu.py | 13 ++++++------- numpy/distutils/mingw32ccompiler.py | 19 +++++++++---------- numpy/distutils/misc_util.py | 17 ++--------------- numpy/distutils/system_info.py | 18 ++++++++++++++++++ 6 files changed, 36 insertions(+), 41 deletions(-) diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 2a1ae5f6226b..aa9e03e0607d 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -101,8 +101,6 @@ def win32_checks(deflist): deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING') def check_math_capabilities(config, moredefs, mathlibs): - from numpy.distutils.misc_util import mingw32 - def check_func(func_name): return config.check_func(func_name, libraries=mathlibs, decl=True, call=True) @@ -172,8 +170,7 @@ def check_funcs(funcs_name): # C99 functions: float and long double versions check_funcs(C99_FUNCS_SINGLE) - if not mingw32(): - check_funcs(C99_FUNCS_EXTENDED) + check_funcs(C99_FUNCS_EXTENDED) def check_complex(config, mathlibs): priv = [] diff --git a/numpy/core/src/private/npy_config.h b/numpy/core/src/private/npy_config.h index c81f06461252..fa20eb4f38f0 100644 --- a/numpy/core/src/private/npy_config.h +++ b/numpy/core/src/private/npy_config.h @@ -61,11 +61,6 @@ #endif -/* Disable broken mingw-w64 hypot function */ -#if defined(__MINGW32__) -#undef HAVE_HYPOT -#endif - /* Intel C for Windows uses POW for 64 bits longdouble*/ #if defined(_MSC_VER) && defined(__INTEL_COMPILER) diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py index 9acbe537d9e6..a7fd3a77f747 100644 --- a/numpy/distutils/fcompiler/gnu.py +++ b/numpy/distutils/fcompiler/gnu.py @@ -24,6 +24,7 @@ def is_win32(): return sys.platform == "win32" and platform.architecture()[0] == "32bit" if is_win64(): + #_EXTRAFLAGS = ["-fno-leading-underscore"] _EXTRAFLAGS = [] else: _EXTRAFLAGS = [] @@ -214,12 +215,10 @@ def get_flags_opt(self): # use -mincoming-stack-boundary=2 # due to the change to 16 byte stack alignment since GCC 4.6 # but 32 bit Windows ABI defines 4 bytes stack alignment - opt = ['-O2 -march=pentium4 -mtune=generic -mfpmath=sse -msse2' - ' -mlong-double-64 -mincoming-stack-boundary=2' - ' -ffpe-summary=invalid,zero'] + opt = ['-O2 -march=core2 -mtune=generic -mfpmath=sse -msse2 ' + '-mincoming-stack-boundary=2'] else: - opt = ['-O2 -march=x86-64 -DMS_WIN64 -mtune=generic -msse2' - ' -mlong-double-64 -ffpe-summary=invalid,zero'] + opt = ['-O2 -march=x86-64 -DMS_WIN64 -mtune=generic -msse2'] else: opt = ['-O2'] @@ -271,11 +270,11 @@ def version_match(self, version_string): 'version_cmd' : ["", "-dumpversion"], 'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form", "-fno-second-underscore"] + _EXTRAFLAGS, - 'compiler_f90' : [None, "-Wall", + 'compiler_f90' : [None, "-Wall", "-g", "-fno-second-underscore"] + _EXTRAFLAGS, 'compiler_fix' : [None, "-Wall", "-g","-ffixed-form", "-fno-second-underscore"] + _EXTRAFLAGS, - 'linker_so' : ["", "-Wall"], + 'linker_so' : ["", "-Wall", "-g"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"], 'linker_exe' : [None, "-Wall"] diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py index f1220b36af2b..f72c3bbbbfb5 100644 --- a/numpy/distutils/mingw32ccompiler.py +++ b/numpy/distutils/mingw32ccompiler.py @@ -95,10 +95,9 @@ def __init__ (self, # Before build with MinGW-W64 generate the python import library # with gendef and dlltool according to the MingW-W64 FAQ. # Use the MinGW-W64 provided msvc runtime import libraries. - # The mingwpy package deploys it's own import libraries. # Don't call build_import_library() and build_msvcr_library. - if 'MinGW-W64' not in str(out_string) and 'mingwpy' not in str(out_string): + if 'MinGW-W64' not in str(out_string): # **changes: eric jones 4/11/01 # 1. Check for import library on Windows. Build if it doesn't @@ -132,10 +131,10 @@ def __init__ (self, else: # gcc-4 series releases do not support -mno-cygwin option self.set_executables( - compiler='gcc -O2 -march=x86-64 -mtune=generic -DMS_WIN64' - ' -msse2 -mlong-double-64 -Wall', - compiler_so='gcc -O2 -march=x86-64 -mtune=generic -DMS_WIN64' - ' -msse2 -mlong-double-64 -Wall -Wstrict-prototypes', + compiler='gcc -march=x86-64 -mtune=generic -DMS_WIN64' + ' -O2 -msse2 -Wall', + compiler_so='gcc -march=x86-64 -mtune=generic -DMS_WIN64' + ' -O2 -msse2 -Wall -Wstrict-prototypes', linker_exe='gcc', linker_so='gcc -shared -Wl,-gc-sections -Wl,-s') else: @@ -159,11 +158,11 @@ def __init__ (self, # build needs '-mincoming-stack-boundary=2' due to ABI # incompatibility to Win32 ABI self.set_executables( - compiler='gcc -O2 -march=pentium4 -mtune=generic' - ' -mfpmath=sse -msse2 -mlong-double-64' + compiler='gcc -O2 -march=core2 -mtune=generic' + ' -mfpmath=sse -msse2' ' -mincoming-stack-boundary=2 -Wall', - compiler_so='gcc -O2 -march=pentium4 -mtune=generic' - ' -mfpmath=sse -msse2 -mlong-double-64' + compiler_so='gcc -O2 -march=core2 -mtune=generic' + ' -mfpmath=sse -msse2' ' -mincoming-stack-boundary=2 -Wall' ' -Wstrict-prototypes', linker_exe='g++ ', diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py index f5ef1f9b9838..75d864c5a7bc 100644 --- a/numpy/distutils/misc_util.py +++ b/numpy/distutils/misc_util.py @@ -381,22 +381,9 @@ def mingw32(): """Return true when using mingw32 environment. """ if sys.platform=='win32': - # mingw32 compiler configured in %USERPROFILE%\pydistutils.cfg - # or distutils\distutils.cfg - from distutils.dist import Distribution - _dist = Distribution() - _dist.parse_config_files() - _bld = _dist.get_option_dict('build') - if _bld and 'mingw32' in _bld.get('compiler'): + if os.environ.get('OSTYPE', '')=='msys': return True - # parse setup.py command line: --compiler=mingw32 or -c mingw32 - elif (_i for _i in sys.argv if 'mingw32' in _i) and \ - (_i for _i in sys.argv if ('setup.py') in _i): - return True - # using msys or msys2 shell - elif os.environ.get('OSTYPE', '')=='msys': - return True - elif os.environ.get('MSYSTEM', '') in ('MINGW32', 'MINGW64'): + if os.environ.get('MSYSTEM', '')=='MINGW32': return True return False diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 0da13a7df2b9..9dd48e2dccef 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -1751,6 +1751,24 @@ def check_embedded_lapack(self, info): res = False finally: shutil.rmtree(tmpdir) + if sys.platform == 'win32' and not res: + c = distutils.ccompiler.new_compiler(compiler='mingw32') + tmpdir = tempfile.mkdtemp() + src = os.path.join(tmpdir, 'source.c') + out = os.path.join(tmpdir, 'a.out') + try: + with open(src, 'wt') as f: + f.write(s) + obj = c.compile([src], output_dir=tmpdir) + try: + c.link_executable(obj, out, libraries=info['libraries'], + library_dirs=info['library_dirs'], + extra_postargs=extra_args) + res = True + except distutils.ccompiler.LinkError: + res = False + finally: + shutil.rmtree(tmpdir) return res From fdcdc5656d0ede894a084f630d134de49b9e7328 Mon Sep 17 00:00:00 2001 From: Alain Date: Tue, 20 Oct 2015 07:40:03 -0700 Subject: [PATCH 117/496] DOC: typo in arrays.interface.rst #6516 --- doc/source/reference/arrays.interface.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/arrays.interface.rst b/doc/source/reference/arrays.interface.rst index db640b2d146e..da946c9ed2a1 100644 --- a/doc/source/reference/arrays.interface.rst +++ b/doc/source/reference/arrays.interface.rst @@ -310,7 +310,7 @@ Differences with Array interface (Version 2) ============================================ The version 2 interface was very similar. The differences were -largely asthetic. In particular: +largely aesthetic. In particular: 1. The PyArrayInterface structure had no descr member at the end (and therefore no flag ARR_HAS_DESCR) From a08437e12d6b6796f4b0beda49bce9adc37db63d Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 20 Oct 2015 08:59:26 -0600 Subject: [PATCH 118/496] Revert "Merge pull request #5614 from charris/cleanup-gh-5587" Revert mingwpy modifications to distutils. They are causing problems for non-windows builds and it is better to wait until mingypy is further along. This reverts commit 96abd32de241864ee97f30357234cbc9a96c43ae, reversing changes made to 06af9918f6bf03b8d818ec834f9fb48db57d1489. --- INSTALL.txt | 32 ---------- numpy/core/src/multiarray/multiarraymodule.c | 9 +++ numpy/distutils/fcompiler/gnu.py | 32 +++------- numpy/distutils/mingw32ccompiler.py | 67 +++++++------------- numpy/distutils/system_info.py | 18 ------ numpy/lib/tests/test_function_base.py | 2 +- setup.py | 1 - 7 files changed, 42 insertions(+), 119 deletions(-) diff --git a/INSTALL.txt b/INSTALL.txt index 12fb47d445c2..6339cbb8746d 100644 --- a/INSTALL.txt +++ b/INSTALL.txt @@ -152,38 +152,6 @@ is broken). gcc 4.4 will hopefully be able to run natively. This is the only tested way to get a numpy with a FULL blas/lapack (scipy does not work because of C++). -Carl Kleffner's mingw-w64 toolchain -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Carl Kleffner has been working on mingw-w64 / OpenBLAS support and has put -together toolchains for that option. The toolchains are available at -https://bitbucket.org/carlkl/mingw-w64-for-python/downloads. The site.cfg -should be configured like so: - - [openblas] - libraries = openblaspy - library_dirs = /lib - include_dirs = /include - -The libopenblaspy.dll from /bin must be copied to numpy/core -before the build. For this mingw-w64 toolchain manual creation of the python -import libs is necessary, i.e.: - - gendef python2.7.dll - dlltool -D python27.dll -d python27.def -l libpython27.dll.a - move libpython27.dll.a libs\libpython27.dll.a - -For python-2.6 up to python 3.2 use -https://bitbucket.org/carlkl/mingw-w64-for-python/downloads/mingwpy_win32_vc90.tar.xz -or -https://bitbucket.org/carlkl/mingw-w64-for-python/downloads/mingwpy_amd64_vc90.tar.xz - -For python-3.3 and python-3.4 use -https://bitbucket.org/carlkl/mingw-w64-for-python/downloads/mingwpy_win32_vc100.tar.xz -or -https://bitbucket.org/carlkl/mingw-w64-for-python/downloads/mingwpy_amd64_vc100.tar.xz - - MS compilers ------------ diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index 2c694f936c6a..10c22ae5a5e1 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -4524,6 +4524,15 @@ PyMODINIT_FUNC initmultiarray(void) { goto err; } +#if defined(MS_WIN64) && defined(__GNUC__) + PyErr_WarnEx(PyExc_Warning, + "Numpy built with MINGW-W64 on Windows 64 bits is experimental, " \ + "and only available for \n" \ + "testing. You are advised not to use it for production. \n\n" \ + "CRASHES ARE TO BE EXPECTED - PLEASE REPORT THEM TO NUMPY DEVELOPERS", + 1); +#endif + /* Initialize access to the PyDateTime API */ numpy_pydatetime_import(); diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py index a7fd3a77f747..37be0800d487 100644 --- a/numpy/distutils/fcompiler/gnu.py +++ b/numpy/distutils/fcompiler/gnu.py @@ -20,8 +20,6 @@ # XXX: handle cross compilation def is_win64(): return sys.platform == "win32" and platform.architecture()[0] == "64bit" -def is_win32(): - return sys.platform == "win32" and platform.architecture()[0] == "32bit" if is_win64(): #_EXTRAFLAGS = ["-fno-leading-underscore"] @@ -138,7 +136,7 @@ def get_flags_linker_so(self): opt.extend(['-undefined', 'dynamic_lookup', '-bundle']) else: - opt.append("-shared -Wl,-gc-sections -Wl,-s") + opt.append("-shared") if sys.platform.startswith('sunos'): # SunOS often has dynamically loaded symbols defined in the # static library libg2c.a The linker doesn't like this. To @@ -210,18 +208,9 @@ def get_flags_opt(self): # With this compiler version building Fortran BLAS/LAPACK # with -O3 caused failures in lib.lapack heevr,syevr tests. opt = ['-O2'] - elif v and v >= '4.6.0': - if is_win32(): - # use -mincoming-stack-boundary=2 - # due to the change to 16 byte stack alignment since GCC 4.6 - # but 32 bit Windows ABI defines 4 bytes stack alignment - opt = ['-O2 -march=core2 -mtune=generic -mfpmath=sse -msse2 ' - '-mincoming-stack-boundary=2'] - else: - opt = ['-O2 -march=x86-64 -DMS_WIN64 -mtune=generic -msse2'] else: - opt = ['-O2'] - + opt = ['-O3'] + opt.append('-funroll-loops') return opt def _c_arch_flags(self): @@ -361,7 +350,10 @@ def get_target(self): return "" def get_flags_opt(self): - return GnuFCompiler.get_flags_opt(self) + if is_win64(): + return ['-O0'] + else: + return GnuFCompiler.get_flags_opt(self) def _can_target(cmd, arch): """Return true if the architecture supports the -arch flag""" @@ -386,13 +378,9 @@ def _can_target(cmd, arch): from distutils import log log.set_verbosity(2) - try: - compiler = GnuFCompiler() - compiler.customize() - print(compiler.get_version()) - except Exception: - msg = get_exception() - print(msg) + compiler = GnuFCompiler() + compiler.customize() + print(compiler.get_version()) try: compiler = Gnu95FCompiler() diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py index f72c3bbbbfb5..d22a2818eb14 100644 --- a/numpy/distutils/mingw32ccompiler.py +++ b/numpy/distutils/mingw32ccompiler.py @@ -87,30 +87,17 @@ def __init__ (self, elif self.linker_dll == 'gcc': self.linker = 'g++' - p = subprocess.Popen(['gcc', '--version'], shell=True, - stdout=subprocess.PIPE) - out_string = p.stdout.read() - p.stdout.close() - - # Before build with MinGW-W64 generate the python import library - # with gendef and dlltool according to the MingW-W64 FAQ. - # Use the MinGW-W64 provided msvc runtime import libraries. - # Don't call build_import_library() and build_msvcr_library. - - if 'MinGW-W64' not in str(out_string): - - # **changes: eric jones 4/11/01 - # 1. Check for import library on Windows. Build if it doesn't - # exist. - build_import_library() - - # Check for custom msvc runtime library on Windows. Build if it - # doesn't exist. - msvcr_success = build_msvcr_library() - msvcr_dbg_success = build_msvcr_library(debug=True) - if msvcr_success or msvcr_dbg_success: - # add preprocessor statement for using customized msvcr lib - self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR') + # **changes: eric jones 4/11/01 + # 1. Check for import library on Windows. Build if it doesn't exist. + + build_import_library() + + # Check for custom msvc runtime library on Windows. Build if it doesn't exist. + msvcr_success = build_msvcr_library() + msvcr_dbg_success = build_msvcr_library(debug=True) + if msvcr_success or msvcr_dbg_success: + # add preprocessor statement for using customized msvcr lib + self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR') # Define the MSVC version as hint for MinGW msvcr_version = '0x%03i0' % int(msvc_runtime_library().lstrip('msvcr')) @@ -131,12 +118,10 @@ def __init__ (self, else: # gcc-4 series releases do not support -mno-cygwin option self.set_executables( - compiler='gcc -march=x86-64 -mtune=generic -DMS_WIN64' - ' -O2 -msse2 -Wall', - compiler_so='gcc -march=x86-64 -mtune=generic -DMS_WIN64' - ' -O2 -msse2 -Wall -Wstrict-prototypes', - linker_exe='gcc', - linker_so='gcc -shared -Wl,-gc-sections -Wl,-s') + compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall', + compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall -Wstrict-prototypes', + linker_exe='gcc -g', + linker_so='gcc -g -shared') else: if self.gcc_version <= "3.0.0": self.set_executables( @@ -154,21 +139,13 @@ def __init__ (self, linker_exe='g++ -mno-cygwin', linker_so='g++ -mno-cygwin -shared') else: - # gcc-4 series releases do not support -mno-cygwin option i686 - # build needs '-mincoming-stack-boundary=2' due to ABI - # incompatibility to Win32 ABI - self.set_executables( - compiler='gcc -O2 -march=core2 -mtune=generic' - ' -mfpmath=sse -msse2' - ' -mincoming-stack-boundary=2 -Wall', - compiler_so='gcc -O2 -march=core2 -mtune=generic' - ' -mfpmath=sse -msse2' - ' -mincoming-stack-boundary=2 -Wall' - ' -Wstrict-prototypes', - linker_exe='g++ ', - linker_so='g++ -shared -Wl,-gc-sections -Wl,-s') - # added for python2.3 support we can't pass it through set_executables - # because pre 2.2 would fail + # gcc-4 series releases do not support -mno-cygwin option + self.set_executables(compiler='gcc -O2 -Wall', + compiler_so='gcc -O2 -Wall -Wstrict-prototypes', + linker_exe='g++ ', + linker_so='g++ -shared') + # added for python2.3 support + # we can't pass it through set_executables because pre 2.2 would fail self.compiler_cxx = ['g++'] # Maybe we should also append -mthreads, but then the finished dlls diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 9dd48e2dccef..0da13a7df2b9 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -1751,24 +1751,6 @@ def check_embedded_lapack(self, info): res = False finally: shutil.rmtree(tmpdir) - if sys.platform == 'win32' and not res: - c = distutils.ccompiler.new_compiler(compiler='mingw32') - tmpdir = tempfile.mkdtemp() - src = os.path.join(tmpdir, 'source.c') - out = os.path.join(tmpdir, 'a.out') - try: - with open(src, 'wt') as f: - f.write(s) - obj = c.compile([src], output_dir=tmpdir) - try: - c.link_executable(obj, out, libraries=info['libraries'], - library_dirs=info['library_dirs'], - extra_postargs=extra_args) - res = True - except distutils.ccompiler.LinkError: - res = False - finally: - shutil.rmtree(tmpdir) return res diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 4516c92488ab..3a4ffd74d1e9 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -732,7 +732,7 @@ def test_ufunc(self): args = np.array([0, 0.5 * np.pi, np.pi, 1.5 * np.pi, 2 * np.pi]) r1 = f(args) r2 = np.cos(args) - assert_array_almost_equal(r1, r2) + assert_array_equal(r1, r2) def test_keywords(self): diff --git a/setup.py b/setup.py index 8e5c3d04fad5..8f4a9895173e 100755 --- a/setup.py +++ b/setup.py @@ -221,7 +221,6 @@ def setup_package(): platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"], test_suite='nose.collector', cmdclass={"sdist": sdist_checked}, - package_data={'numpy.core': ['libopenblaspy.dll']}, ) # Run build From 25203e6e8b47b3bf08304273815d6a4b900778f3 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 20 Oct 2015 09:33:15 -0600 Subject: [PATCH 119/496] MAINT: Restore test fix that was reverted. Use assert_array_almost_equal instead of assert_array_equal when comparing against python.math functions. --- numpy/lib/tests/test_function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 3a4ffd74d1e9..4516c92488ab 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -732,7 +732,7 @@ def test_ufunc(self): args = np.array([0, 0.5 * np.pi, np.pi, 1.5 * np.pi, 2 * np.pi]) r1 = f(args) r2 = np.cos(args) - assert_array_equal(r1, r2) + assert_array_almost_equal(r1, r2) def test_keywords(self): From 3edf1a43987be276459e01c4afa7bbc6c74e7144 Mon Sep 17 00:00:00 2001 From: Jonathan Helmus Date: Tue, 20 Oct 2015 20:35:43 -0500 Subject: [PATCH 120/496] BUG: scalar argument to ma.atleast_* return arrays The np.ma.atleast_1d, np.ma.atleast_2d, np.ma.atleast_3d and np.ma.diagflat function return arrays when given a scalar in the same manner as their non-ma counterparts. Previously these function would return None. Additionally, the np.ma vstack, row_stack, hstack, column_stack, dstack, and hsplit functions now raise an expection when given a scalar argument. closes #3367 --- numpy/ma/extras.py | 4 ++++ numpy/ma/tests/test_extras.py | 28 ++++++++++++++++++++++++---- 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index b4021df631d8..ae4e0cee568e 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -270,6 +270,10 @@ def __call__(self, *args, **params): _d = func(tuple([np.asarray(a) for a in x]), **params) _m = func(tuple([getmaskarray(a) for a in x]), **params) return masked_array(_d, mask=_m) + else: + _d = func(np.asarray(x), **params) + _m = func(getmaskarray(x), **params) + return masked_array(_d, mask=_m) else: arrays = [] args = list(args) diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index f07083d172e7..c41c629fc475 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -23,11 +23,12 @@ nomask, ones, zeros, count ) from numpy.ma.extras import ( - atleast_2d, mr_, dot, polyfit, cov, corrcoef, median, average, unique, - setxor1d, setdiff1d, union1d, intersect1d, in1d, ediff1d, - apply_over_axes, apply_along_axis, compress_nd, compress_rowcols, + atleast_1d, atleast_2d, atleast_3d, mr_, dot, polyfit, cov, corrcoef, + median, average, unique, setxor1d, setdiff1d, union1d, intersect1d, in1d, + ediff1d, apply_over_axes, apply_along_axis, compress_nd, compress_rowcols, mask_rowcols, clump_masked, clump_unmasked, flatnotmasked_contiguous, - notmasked_contiguous, notmasked_edges, masked_all, masked_all_like + notmasked_contiguous, notmasked_edges, masked_all, masked_all_like, + diagflat ) import numpy.ma.extras as mae @@ -1147,6 +1148,25 @@ def test_atleast2d(self): assert_equal(a.shape, (3,)) assert_equal(a.mask.shape, a.data.shape) + def test_shape_scalar(self): + # the atleast and diagflat function should work with scalars + # GitHub issue #3367 + b = atleast_1d(1.0) + assert_equal(b.shape, (1, )) + assert_equal(b.mask.shape, b.data.shape) + + b = atleast_2d(1.0) + assert_equal(b.shape, (1, 1)) + assert_equal(b.mask.shape, b.data.shape) + + b = atleast_3d(1.0) + assert_equal(b.shape, (1, 1, 1)) + assert_equal(b.mask.shape, b.data.shape) + + b = diagflat(1.0) + assert_equal(b.shape, (1, 1)) + assert_equal(b.mask.shape, b.data.shape) + if __name__ == "__main__": run_module_suite() From 8da9c711446a5f2dc5b7ec400358ccdd208055fc Mon Sep 17 00:00:00 2001 From: Jonathan Helmus Date: Tue, 20 Oct 2015 21:09:08 -0500 Subject: [PATCH 121/496] BUG: ma.masked_values does not shrink mask if requested When called with the shrink parameter set to False, np.ma.masked_values will create a False filled array mask and not shrink the mask. Previously the mask would be shrunk to a single False scalar. closes #2674 --- numpy/ma/core.py | 4 ++-- numpy/ma/tests/test_core.py | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 6984165797e2..4ea52d0ab849 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -1544,7 +1544,7 @@ def make_mask(m, copy=False, shrink=True, dtype=MaskType): dtype=[('man', '|b1'), ('mouse', '|b1')]) """ - if m is nomask: + if m is nomask and shrink: return nomask elif isinstance(m, ndarray): # We won't return after this point to make sure we can shrink the mask @@ -2247,7 +2247,7 @@ def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): else: condition = umath.equal(xnew, value) mask = nomask - mask = mask_or(mask, make_mask(condition, shrink=shrink)) + mask = mask_or(mask, make_mask(condition, shrink=shrink), shrink=shrink) return masked_array(xnew, mask=mask, copy=copy, fill_value=value) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index f832ee60d7a1..0a98212540ce 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -1145,6 +1145,11 @@ def test_noshrinking(self): a /= 1. assert_equal(a.mask, [0, 0, 0]) + def test_noshink_on_creation(self): + # Check that the mask is not shrunk on array creation when not wanted + a = np.ma.masked_values([1., 2.5, 3.1], 1.5, shrink=False) + assert_equal(a.mask, [0, 0, 0]) + def test_mod(self): # Tests mod (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d From f7d3de79c39621398250fba4e9749cbb445f907a Mon Sep 17 00:00:00 2001 From: Sumith Date: Tue, 20 Oct 2015 19:31:32 +0530 Subject: [PATCH 122/496] MAINT: Remove version.py.in. This file was used by Bento. This was left over and is no longer being used. --- numpy/version.py.in | 8 -------- 1 file changed, 8 deletions(-) delete mode 100644 numpy/version.py.in diff --git a/numpy/version.py.in b/numpy/version.py.in deleted file mode 100644 index e466c1ede7f8..000000000000 --- a/numpy/version.py.in +++ /dev/null @@ -1,8 +0,0 @@ -short_version = $VERSION -version = $VERSION -full_version = $FULL_VERSION -git_revision = $GIT_REVISION -release = $IS_RELEASED - -if not release: - version = full_version From f549adf7728061e81558ecb7f3d2fe8b32a40602 Mon Sep 17 00:00:00 2001 From: Sumith Date: Wed, 21 Oct 2015 15:27:10 +0530 Subject: [PATCH 123/496] MAINT: Remove __config__.py.in. This file was used by Bento. This was left over and is longer being used. --- numpy/__config__.py.in | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 numpy/__config__.py.in diff --git a/numpy/__config__.py.in b/numpy/__config__.py.in deleted file mode 100644 index 3190d21b4c10..000000000000 --- a/numpy/__config__.py.in +++ /dev/null @@ -1,2 +0,0 @@ -def show(): - pass From 5caf4c932e43c47d73fad761e3257bb0d4551cc2 Mon Sep 17 00:00:00 2001 From: Ethan Kruse Date: Mon, 19 Oct 2015 13:29:01 -0700 Subject: [PATCH 124/496] BUG: Make median work for empty arrays (issue #6462) np.median([]) returns NaN. Fixes bug/regression that raised an IndexError. Added tests to ensure continued support of empty arrays. --- numpy/lib/function_base.py | 2 +- numpy/lib/tests/test_function_base.py | 28 +++++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 555d0838645c..fef69dff3207 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -3339,7 +3339,7 @@ def _median(a, axis=None, out=None, overwrite_input=False): indexer[axis] = slice(index-1, index+1) # Check if the array contains any nan's - if np.issubdtype(a.dtype, np.inexact): + if np.issubdtype(a.dtype, np.inexact) and sz > 0: # warn and return nans like mean would rout = mean(part[indexer], axis=axis, out=out) part = np.rollaxis(part, axis, part.ndim) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 4516c92488ab..cc53c2b8ebaa 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2597,6 +2597,34 @@ def test_nan_behavior(self): assert_equal(np.median(a, (0, 2)), b) assert_equal(len(w), 1) + def test_empty(self): + # empty arrays + a = np.array([], dtype=float) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a), np.nan) + assert_(w[0].category is RuntimeWarning) + + # multiple dimensions + a = np.array([], dtype=float, ndmin=3) + # no axis + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a), np.nan) + assert_(w[0].category is RuntimeWarning) + + # axis 0 and 1 + b = np.array([], dtype=float, ndmin=2) + assert_equal(np.median(a, axis=0), b) + assert_equal(np.median(a, axis=1), b) + + # axis 2 + b = np.array(np.nan, dtype=float, ndmin=2) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a, axis=2), b) + assert_(w[0].category is RuntimeWarning) + def test_object(self): o = np.arange(7.) assert_(type(np.median(o.astype(object))), float) From f994f2a7569b35810e35c96897680f9b38f291f7 Mon Sep 17 00:00:00 2001 From: lzkelley Date: Wed, 21 Oct 2015 22:17:00 -0400 Subject: [PATCH 125/496] DOC: clarify usage of 'argparse' return value. In response to Ticket #4724, explain that the 'index_array' returned by 'argparse' can only be used to (directly) sort a one-dimensional input array. --- numpy/core/fromnumeric.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 7ba3645127b7..0fc572cb6c81 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -848,7 +848,7 @@ def argsort(a, axis=-1, kind='quicksort', order=None): ------- index_array : ndarray, int Array of indices that sort `a` along the specified axis. - In other words, ``a[index_array]`` yields a sorted `a`. + If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`. See Also -------- From b94546d7426627c33686ddbf72bd69b8ddd4afed Mon Sep 17 00:00:00 2001 From: alex Date: Wed, 21 Oct 2015 17:32:51 -0400 Subject: [PATCH 126/496] BUG: fix inner() by copying if needed to enforce contiguity --- numpy/core/src/multiarray/cblasfuncs.c | 22 ++++++++++++++++++++-- numpy/core/tests/test_multiarray.py | 16 ++++++++++++++++ 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/multiarray/cblasfuncs.c b/numpy/core/src/multiarray/cblasfuncs.c index 9c7c26725706..67f325ba1577 100644 --- a/numpy/core/src/multiarray/cblasfuncs.c +++ b/numpy/core/src/multiarray/cblasfuncs.c @@ -674,8 +674,8 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2, * * This is for use by PyArray_InnerProduct. It is assumed on entry that the * arrays ap1 and ap2 have a common data type given by typenum that is - * float, double, cfloat, or cdouble and have dimension <= 2, and have the - * contiguous flag set. The * __numpy_ufunc__ nonsense is also assumed to + * float, double, cfloat, or cdouble and have dimension <= 2. + * The * __numpy_ufunc__ nonsense is also assumed to * have been taken care of. */ @@ -689,6 +689,24 @@ cblas_innerproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2) npy_intp dimensions[NPY_MAXDIMS]; PyTypeObject *subtype; + /* assure contiguous arrays */ + if (!PyArray_IS_C_CONTIGUOUS(ap1)) { + PyObject *op1 = PyArray_NewCopy(ap1, NPY_CORDER); + Py_DECREF(ap1); + ap1 = (PyArrayObject *)op1; + if (ap1 == NULL) { + goto fail; + } + } + if (!PyArray_IS_C_CONTIGUOUS(ap2)) { + PyObject *op2 = PyArray_NewCopy(ap2, NPY_CORDER); + Py_DECREF(ap2); + ap2 = (PyArrayObject *)op2; + if (ap2 == NULL) { + goto fail; + } + } + if (PyArray_NDIM(ap1) == 0 || PyArray_NDIM(ap2) == 0) { /* One of ap1 or ap2 is a scalar */ if (PyArray_NDIM(ap1) == 0) { diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 85b0e5519ee8..41b54a18b7f8 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -4728,6 +4728,22 @@ def test_vecself(self): p = np.inner(a, a) assert_almost_equal(p, 0, decimal=14) + def test_inner_product_with_various_contiguities(self): + # github issue 6532 + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + # check an inner product involving a matrix transpose + A = np.array([[1, 2], [3, 4]], dtype=dt) + B = np.array([[1, 3], [2, 4]], dtype=dt) + C = np.array([1, 1], dtype=dt) + desired = np.array([4, 6], dtype=dt) + assert_equal(np.inner(A.T, C), desired) + assert_equal(np.inner(B, C), desired) + # check an inner product involving an aliased and reversed view + a = np.arange(5).astype(dt) + b = a[::-1] + desired = np.array(10, dtype=dt).item() + assert_equal(np.inner(b, a), desired) + class TestSummarization(TestCase): def test_1d(self): From 9fa0dba63ac76a43a157e07c2a43f4678355653e Mon Sep 17 00:00:00 2001 From: Stephan Hoyer Date: Sat, 24 Oct 2015 12:01:16 -0700 Subject: [PATCH 127/496] BUG: error in broadcast_arrays with as_strided array Fixes GH6491 --- numpy/lib/stride_tricks.py | 9 ++++++--- numpy/lib/tests/test_stride_tricks.py | 8 ++++++++ 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py index 416776ff4751..f4b43a5a92d8 100644 --- a/numpy/lib/stride_tricks.py +++ b/numpy/lib/stride_tricks.py @@ -62,11 +62,14 @@ def _broadcast_to(array, shape, subok, readonly): if any(size < 0 for size in shape): raise ValueError('all elements of broadcast shape must be non-' 'negative') + needs_writeable = not readonly and array.flags.writeable + extras = ['reduce_ok'] if needs_writeable else [] + op_flag = 'readwrite' if needs_writeable else 'readonly' broadcast = np.nditer( - (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'], - op_flags=['readonly'], itershape=shape, order='C').itviews[0] + (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras, + op_flags=[op_flag], itershape=shape, order='C').itviews[0] result = _maybe_view_as_subclass(array, broadcast) - if not readonly and array.flags.writeable: + if needs_writeable and not result.flags.writeable: result.flags.writeable = True return result diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py index aad0695be91e..06e659002322 100644 --- a/numpy/lib/tests/test_stride_tricks.py +++ b/numpy/lib/tests/test_stride_tricks.py @@ -390,6 +390,14 @@ def test_writeable(): _, result = broadcast_arrays(0, original) assert_equal(result.flags.writeable, False) + # regresssion test for GH6491 + shape = (2,) + strides = [0] + tricky_array = as_strided(np.array(0), shape, strides) + other = np.zeros((1,)) + first, second = broadcast_arrays(tricky_array, other) + assert_(first.shape == second.shape) + def test_reference_types(): input_array = np.array('a', dtype=object) From 8d77f250d87073697f88fcb7f697cc68bea09d85 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 25 Oct 2015 00:16:41 +0200 Subject: [PATCH 128/496] MAINT: minor update to "make upload" doc build command. Ensure that http://docs.scipy.org/doc/numpy/reference/ also has the content of the latest release. [ci skip] --- doc/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/Makefile b/doc/Makefile index b52933e14dfa..47f191374583 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -100,6 +100,8 @@ upload: $(UPLOAD_DIR)/numpy-user-$(RELEASE).pdf ssh $(USERNAME)@new.scipy.org mv $(UPLOAD_DIR)/numpy-html.zip \ $(UPLOAD_DIR)/numpy-html-$(RELEASE).zip + ssh $(USERNAME)@new.scipy.org rm $(UPLOAD_DIR)/dist.tar.gz + ssh $(USERNAME)@new.scipy.org cp -r $(UPLOAD_DIR)/* /srv/docs_scipy_org/doc/numpy ssh $(USERNAME)@new.scipy.org /srv/bin/fixperm-scipy_org.sh #------------------------------------------------------------------------------ From 9d63530e0b7d0a880a0f49713c48ef6f0c11b315 Mon Sep 17 00:00:00 2001 From: Pauli Virtanen Date: Sun, 25 Oct 2015 13:57:37 +0200 Subject: [PATCH 129/496] DOC: remove placeholders and incompleteness warnings Neither are useful, and will discourage both reading and editing of the material. --- doc/source/user/howtofind.rst | 7 ------- doc/source/user/index.rst | 14 -------------- doc/source/user/misc.rst | 2 -- doc/source/user/performance.rst | 5 ----- numpy/doc/howtofind.py | 10 ---------- numpy/doc/io.py | 10 ---------- numpy/doc/jargon.py | 10 ---------- numpy/doc/methods_vs_functions.py | 10 ---------- numpy/doc/performance.py | 10 ---------- 9 files changed, 78 deletions(-) delete mode 100644 doc/source/user/howtofind.rst delete mode 100644 doc/source/user/performance.rst delete mode 100644 numpy/doc/howtofind.py delete mode 100644 numpy/doc/io.py delete mode 100644 numpy/doc/jargon.py delete mode 100644 numpy/doc/methods_vs_functions.py delete mode 100644 numpy/doc/performance.py diff --git a/doc/source/user/howtofind.rst b/doc/source/user/howtofind.rst deleted file mode 100644 index 00ed5daa70ab..000000000000 --- a/doc/source/user/howtofind.rst +++ /dev/null @@ -1,7 +0,0 @@ -************************* -How to find documentation -************************* - -.. seealso:: :ref:`Numpy-specific help functions ` - -.. automodule:: numpy.doc.howtofind diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst index 022efcaeb4ed..6c0a2e929d95 100644 --- a/doc/source/user/index.rst +++ b/doc/source/user/index.rst @@ -9,20 +9,6 @@ explains how to install and make use of the most important features of NumPy. For detailed reference documentation of the functions and classes contained in the package, see the :ref:`reference`. -.. warning:: - - This "User Guide" is still a work in progress; some of the material - is not organized, and several aspects of NumPy are not yet covered - sufficient detail. We are an open source community continually - working to improve the documentation and eagerly encourage interested - parties to contribute. For information on how to do so, please visit - the NumPy `doc wiki `_. - - More documentation for NumPy can be found on the `numpy.org - `__ website. - - Thanks! - .. toctree:: :maxdepth: 2 diff --git a/doc/source/user/misc.rst b/doc/source/user/misc.rst index 0e1807f3f900..c10aea48668e 100644 --- a/doc/source/user/misc.rst +++ b/doc/source/user/misc.rst @@ -3,5 +3,3 @@ Miscellaneous ************* .. automodule:: numpy.doc.misc - -.. automodule:: numpy.doc.methods_vs_functions diff --git a/doc/source/user/performance.rst b/doc/source/user/performance.rst deleted file mode 100644 index 59f8a2edc9ea..000000000000 --- a/doc/source/user/performance.rst +++ /dev/null @@ -1,5 +0,0 @@ -*********** -Performance -*********** - -.. automodule:: numpy.doc.performance diff --git a/numpy/doc/howtofind.py b/numpy/doc/howtofind.py deleted file mode 100644 index e080d263a279..000000000000 --- a/numpy/doc/howtofind.py +++ /dev/null @@ -1,10 +0,0 @@ -""" - -================= -How to Find Stuff -================= - -How to find things in NumPy. - -""" -from __future__ import division, absolute_import, print_function diff --git a/numpy/doc/io.py b/numpy/doc/io.py deleted file mode 100644 index e45bfc9b3211..000000000000 --- a/numpy/doc/io.py +++ /dev/null @@ -1,10 +0,0 @@ -""" - -========= -Array I/O -========= - -Placeholder for array I/O documentation. - -""" -from __future__ import division, absolute_import, print_function diff --git a/numpy/doc/jargon.py b/numpy/doc/jargon.py deleted file mode 100644 index 3fcbc7d23f2f..000000000000 --- a/numpy/doc/jargon.py +++ /dev/null @@ -1,10 +0,0 @@ -""" - -====== -Jargon -====== - -Placeholder for computer science, engineering and other jargon. - -""" -from __future__ import division, absolute_import, print_function diff --git a/numpy/doc/methods_vs_functions.py b/numpy/doc/methods_vs_functions.py deleted file mode 100644 index 4149000bc80a..000000000000 --- a/numpy/doc/methods_vs_functions.py +++ /dev/null @@ -1,10 +0,0 @@ -""" - -===================== -Methods vs. Functions -===================== - -Placeholder for Methods vs. Functions documentation. - -""" -from __future__ import division, absolute_import, print_function diff --git a/numpy/doc/performance.py b/numpy/doc/performance.py deleted file mode 100644 index b0c158bf33c2..000000000000 --- a/numpy/doc/performance.py +++ /dev/null @@ -1,10 +0,0 @@ -""" - -=========== -Performance -=========== - -Placeholder for Improving Performance documentation. - -""" -from __future__ import division, absolute_import, print_function From 3f6ffa8b7062c7023705361540b6efbe192289c7 Mon Sep 17 00:00:00 2001 From: Pauli Virtanen Date: Sun, 25 Oct 2015 16:20:51 +0200 Subject: [PATCH 130/496] DOC: reorganize user guide a bit + import "tentative numpy tutorial" from wiki The user guide was missing a quick tutorial --- the basics.* stuff is somewhat too complex already. The "building numpy" instructions also should not be "introductory material". --- doc/source/reference/routines.rst | 2 + doc/source/user/basics.rst | 2 +- doc/source/user/building.rst | 143 +++ doc/source/user/index.rst | 7 +- doc/source/user/install.rst | 197 +--- doc/source/user/introduction.rst | 10 - doc/source/user/quickstart.rst | 1414 +++++++++++++++++++++++++++++ doc/source/user/setting-up.rst | 9 + 8 files changed, 1580 insertions(+), 204 deletions(-) create mode 100644 doc/source/user/building.rst delete mode 100644 doc/source/user/introduction.rst create mode 100644 doc/source/user/quickstart.rst create mode 100644 doc/source/user/setting-up.rst diff --git a/doc/source/reference/routines.rst b/doc/source/reference/routines.rst index c2f091d83ea4..a9e80480b870 100644 --- a/doc/source/reference/routines.rst +++ b/doc/source/reference/routines.rst @@ -1,3 +1,5 @@ +.. _routines: + ******** Routines ******** diff --git a/doc/source/user/basics.rst b/doc/source/user/basics.rst index bbc3ab174627..1d91cc55cd64 100644 --- a/doc/source/user/basics.rst +++ b/doc/source/user/basics.rst @@ -3,7 +3,7 @@ Numpy basics ************ .. toctree:: - :maxdepth: 2 + :maxdepth: 1 basics.types basics.creation diff --git a/doc/source/user/building.rst b/doc/source/user/building.rst new file mode 100644 index 000000000000..c5f8fea1fa40 --- /dev/null +++ b/doc/source/user/building.rst @@ -0,0 +1,143 @@ +.. _building-from-source: + +Building from source +==================== + +A general overview of building NumPy from source is given here, with detailed +instructions for specific platforms given seperately. + +Prerequisites +------------- + +Building NumPy requires the following software installed: + +1) Python 2.6.x, 2.7.x, 3.2.x or newer + + On Debian and derivatives (Ubuntu): python, python-dev (or python3-dev) + + On Windows: the official python installer at + `www.python.org `_ is enough + + Make sure that the Python package distutils is installed before + continuing. For example, in Debian GNU/Linux, installing python-dev + also installs distutils. + + Python must also be compiled with the zlib module enabled. This is + practically always the case with pre-packaged Pythons. + +2) Compilers + + To build any extension modules for Python, you'll need a C compiler. + Various NumPy modules use FORTRAN 77 libraries, so you'll also need a + FORTRAN 77 compiler installed. + + Note that NumPy is developed mainly using GNU compilers. Compilers from + other vendors such as Intel, Absoft, Sun, NAG, Compaq, Vast, Porland, + Lahey, HP, IBM, Microsoft are only supported in the form of community + feedback, and may not work out of the box. GCC 4.x (and later) compilers + are recommended. + +3) Linear Algebra libraries + + NumPy does not require any external linear algebra libraries to be + installed. However, if these are available, NumPy's setup script can detect + them and use them for building. A number of different LAPACK library setups + can be used, including optimized LAPACK libraries such as ATLAS, MKL or the + Accelerate/vecLib framework on OS X. + +Basic Installation +------------------ + +To install NumPy run:: + + python setup.py install + +To perform an in-place build that can be run from the source folder run:: + + python setup.py build_ext --inplace + +The NumPy build system uses ``distutils`` and ``numpy.distutils``. +``setuptools`` is only used when building via ``pip`` or with ``python +setupegg.py``. Using ``virtualenv`` should work as expected. + +*Note: for build instructions to do development work on NumPy itself, see +:ref:`development-environment`*. + +.. _parallel-builds: + +Parallel builds +~~~~~~~~~~~~~~~ + +From NumPy 1.10.0 on it's also possible to do a parallel build with:: + + python setup.py build -j 4 install --prefix $HOME/.local + +This will compile numpy on 4 CPUs and install it into the specified prefix. +to perform a parallel in-place build, run:: + + python setup.py build_ext --inplace -j 4 + +The number of build jobs can also be specified via the environment variable +``NPY_NUM_BUILD_JOBS``. + + +FORTRAN ABI mismatch +-------------------- + +The two most popular open source fortran compilers are g77 and gfortran. +Unfortunately, they are not ABI compatible, which means that concretely you +should avoid mixing libraries built with one with another. In particular, if +your blas/lapack/atlas is built with g77, you *must* use g77 when building +numpy and scipy; on the contrary, if your atlas is built with gfortran, you +*must* build numpy/scipy with gfortran. This applies for most other cases +where different FORTRAN compilers might have been used. + +Choosing the fortran compiler +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To build with g77:: + + python setup.py build --fcompiler=gnu + +To build with gfortran:: + + python setup.py build --fcompiler=gnu95 + +For more information see:: + + python setup.py build --help-fcompiler + +How to check the ABI of blas/lapack/atlas +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +One relatively simple and reliable way to check for the compiler used to build +a library is to use ldd on the library. If libg2c.so is a dependency, this +means that g77 has been used. If libgfortran.so is a a dependency, gfortran +has been used. If both are dependencies, this means both have been used, which +is almost always a very bad idea. + +Disabling ATLAS and other accelerated libraries +----------------------------------------------- + +Usage of ATLAS and other accelerated libraries in Numpy can be disabled +via:: + + BLAS=None LAPACK=None ATLAS=None python setup.py build + + +Supplying additional compiler flags +----------------------------------- + +Additional compiler flags can be supplied by setting the ``OPT``, +``FOPT`` (for Fortran), and ``CC`` environment variables. + + +Building with ATLAS support +--------------------------- + +Ubuntu +~~~~~~ + +You can install the necessary package for optimized ATLAS with this command:: + + sudo apt-get install libatlas-base-dev diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst index 6c0a2e929d95..9f45b68d668a 100644 --- a/doc/source/user/index.rst +++ b/doc/source/user/index.rst @@ -10,10 +10,11 @@ NumPy. For detailed reference documentation of the functions and classes contained in the package, see the :ref:`reference`. .. toctree:: - :maxdepth: 2 + :maxdepth: 1 - introduction + setting-up + quickstart basics - performance misc + building c-info diff --git a/doc/source/user/install.rst b/doc/source/user/install.rst index dcf20498c132..ebb6bce623fa 100644 --- a/doc/source/user/install.rst +++ b/doc/source/user/install.rst @@ -1,194 +1,11 @@ -***************************** -Building and installing NumPy -***************************** - -Binary installers -================= +**************** +Installing NumPy +**************** In most use cases the best way to install NumPy on your system is by using an -installable binary package for your operating system. - -Windows -------- - -Good solutions for Windows are, `Enthought Canopy -`_, `Anaconda -`_ (which both provide binary installers -for Windows, OS X and Linux) and `Python (x, y) `_. -Both of these packages include Python, NumPy and many additional packages. - -A lightweight alternative is to download the Python -installer from `www.python.org `_ and the NumPy -installer for your Python version from the Sourceforge `download site --superpack-win32.exe /arch nosse - -or ``sse2`` or ``sse3`` instead of ``nosse``. - -Linux ------ - -All major distributions provide packages for NumPy. These are usually -reasonably up-to-date, but sometimes lag behind the most recent NumPy release. - -Mac OS X --------- - -Universal binary installers for NumPy are available from the `download site -`_ -this will give you a binary install (from the wheel packages) compatible with -at python.org Python, Homebrew and MacPorts:: - - pip install numpy - - -.. _building-from-source: - -Building from source -==================== - -A general overview of building NumPy from source is given here, with detailed -instructions for specific platforms given seperately. - -Prerequisites -------------- - -Building NumPy requires the following software installed: - -1) Python 2.6.x, 2.7.x, 3.2.x or newer - - On Debian and derivatives (Ubuntu): python, python-dev (or python3-dev) - - On Windows: the official python installer at - `www.python.org `_ is enough - - Make sure that the Python package distutils is installed before - continuing. For example, in Debian GNU/Linux, installing python-dev - also installs distutils. - - Python must also be compiled with the zlib module enabled. This is - practically always the case with pre-packaged Pythons. - -2) Compilers - - To build any extension modules for Python, you'll need a C compiler. - Various NumPy modules use FORTRAN 77 libraries, so you'll also need a - FORTRAN 77 compiler installed. - - Note that NumPy is developed mainly using GNU compilers. Compilers from - other vendors such as Intel, Absoft, Sun, NAG, Compaq, Vast, Porland, - Lahey, HP, IBM, Microsoft are only supported in the form of community - feedback, and may not work out of the box. GCC 4.x (and later) compilers - are recommended. - -3) Linear Algebra libraries - - NumPy does not require any external linear algebra libraries to be - installed. However, if these are available, NumPy's setup script can detect - them and use them for building. A number of different LAPACK library setups - can be used, including optimized LAPACK libraries such as ATLAS, MKL or the - Accelerate/vecLib framework on OS X. - -Basic Installation ------------------- - -To install NumPy run:: - - python setup.py install - -To perform an in-place build that can be run from the source folder run:: - - python setup.py build_ext --inplace - -The NumPy build system uses ``distutils`` and ``numpy.distutils``. -``setuptools`` is only used when building via ``pip`` or with ``python -setupegg.py``. Using ``virtualenv`` should work as expected. - -*Note: for build instructions to do development work on NumPy itself, see -:ref:`development-environment`*. - -.. _parallel-builds: - -Parallel builds -~~~~~~~~~~~~~~~ - -From NumPy 1.10.0 on it's also possible to do a parallel build with:: - - python setup.py build -j 4 install --prefix $HOME/.local - -This will compile numpy on 4 CPUs and install it into the specified prefix. -to perform a parallel in-place build, run:: - - python setup.py build_ext --inplace -j 4 - -The number of build jobs can also be specified via the environment variable -``NPY_NUM_BUILD_JOBS``. - - -FORTRAN ABI mismatch --------------------- - -The two most popular open source fortran compilers are g77 and gfortran. -Unfortunately, they are not ABI compatible, which means that concretely you -should avoid mixing libraries built with one with another. In particular, if -your blas/lapack/atlas is built with g77, you *must* use g77 when building -numpy and scipy; on the contrary, if your atlas is built with gfortran, you -*must* build numpy/scipy with gfortran. This applies for most other cases -where different FORTRAN compilers might have been used. - -Choosing the fortran compiler -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To build with g77:: - - python setup.py build --fcompiler=gnu - -To build with gfortran:: - - python setup.py build --fcompiler=gnu95 - -For more information see:: - - python setup.py build --help-fcompiler - -How to check the ABI of blas/lapack/atlas -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -One relatively simple and reliable way to check for the compiler used to build -a library is to use ldd on the library. If libg2c.so is a dependency, this -means that g77 has been used. If libgfortran.so is a a dependency, gfortran -has been used. If both are dependencies, this means both have been used, which -is almost always a very bad idea. - -Disabling ATLAS and other accelerated libraries ------------------------------------------------ - -Usage of ATLAS and other accelerated libraries in Numpy can be disabled -via:: - - BLAS=None LAPACK=None ATLAS=None python setup.py build - - -Supplying additional compiler flags ------------------------------------ - -Additional compiler flags can be supplied by setting the ``OPT``, -``FOPT`` (for Fortran), and ``CC`` environment variables. - - -Building with ATLAS support ---------------------------- - -Ubuntu -~~~~~~ - -You can install the necessary package for optimized ATLAS with this command:: +pre-built package for your operating system. - sudo apt-get install libatlas-base-dev +Please see http://scipy.org/install.html for links to available options. +For instructions on building for source package, see +:doc:`building`. This information is useful mainly for advanced users. diff --git a/doc/source/user/introduction.rst b/doc/source/user/introduction.rst deleted file mode 100644 index d29c13b3076b..000000000000 --- a/doc/source/user/introduction.rst +++ /dev/null @@ -1,10 +0,0 @@ -************ -Introduction -************ - - -.. toctree:: - - whatisnumpy - install - howtofind diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst new file mode 100644 index 000000000000..b636b5984b93 --- /dev/null +++ b/doc/source/user/quickstart.rst @@ -0,0 +1,1414 @@ +=================== +Quickstart tutorial +=================== + +.. currentmodule:: numpy + +.. testsetup:: + + import numpy as np + np.random.seed(1) + +Prerequisites +============= + +Before reading this tutorial you should know a bit of Python. If you +would like to refresh your memory, take a look at the `Python +tutorial `__. + +If you wish to work the examples in this tutorial, you must also have +some software installed on your computer. Please see +http://scipy.org/install.html for instructions. + +The Basics +========== + +NumPy's main object is the homogeneous multidimensional array. It is a +table of elements (usually numbers), all of the same type, indexed by a +tuple of positive integers. In Numpy dimensions are called *axes*. The +number of axes is *rank*. + +For example, the coordinates of a point in 3D space ``[1, 2, 1]`` is an +array of rank 1, because it has one axis. That axis has a length of 3. +In example pictured below, the array has rank 2 (it is 2-dimensional). +The first dimension (axis) has a length of 2, the second dimension has a +length of 3. + +:: + + [[ 1., 0., 0.], + [ 0., 1., 2.]] + +Numpy's array class is called ``ndarray``. It is also known by the alias +``array``. Note that ``numpy.array`` is not the same as the Standard +Python Library class ``array.array``, which only handles one-dimensional +arrays and offers less functionality. The more important attributes of +an ``ndarray`` object are: + +ndarray.ndim + the number of axes (dimensions) of the array. In the Python world, + the number of dimensions is referred to as *rank*. +ndarray.shape + the dimensions of the array. This is a tuple of integers indicating + the size of the array in each dimension. For a matrix with *n* rows + and *m* columns, ``shape`` will be ``(n,m)``. The length of the + ``shape`` tuple is therefore the rank, or number of dimensions, + ``ndim``. +ndarray.size + the total number of elements of the array. This is equal to the + product of the elements of ``shape``. +ndarray.dtype + an object describing the type of the elements in the array. One can + create or specify dtype's using standard Python types. Additionally + NumPy provides types of its own. numpy.int32, numpy.int16, and + numpy.float64 are some examples. +ndarray.itemsize + the size in bytes of each element of the array. For example, an + array of elements of type ``float64`` has ``itemsize`` 8 (=64/8), + while one of type ``complex32`` has ``itemsize`` 4 (=32/8). It is + equivalent to ``ndarray.dtype.itemsize``. +ndarray.data + the buffer containing the actual elements of the array. Normally, we + won't need to use this attribute because we will access the elements + in an array using indexing facilities. + +An example +---------- + + >>> import numpy as np + >>> a = np.arange(15).reshape(3, 5) + >>> a + array([[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14]]) + >>> a.shape + (3, 5) + >>> a.ndim + 2 + >>> a.dtype.name + 'int64' + >>> a.itemsize + 8 + >>> a.size + 15 + >>> type(a) + + >>> b = np.array([6, 7, 8]) + >>> b + array([6, 7, 8]) + >>> type(b) + + + +Array Creation +-------------- + +There are several ways to create arrays. + +For example, you can create an array from a regular Python list or tuple +using the ``array`` function. The type of the resulting array is deduced +from the type of the elements in the sequences. + + >>> import numpy as np + >>> a = np.array([2,3,4]) + >>> a + array([2, 3, 4]) + >>> a.dtype + dtype('int64') + >>> b = np.array([1.2, 3.5, 5.1]) + >>> b.dtype + dtype('float64') + +A frequent error consists in calling ``array`` with multiple numeric +arguments, rather than providing a single list of numbers as an +argument. + +:: + + >>> a = np.array(1,2,3,4) # WRONG + >>> a = np.array([1,2,3,4]) # RIGHT + +``array`` transforms sequences of sequences into two-dimensional arrays, +sequences of sequences of sequences into three-dimensional arrays, and +so on. + + >>> b = np.array([(1.5,2,3), (4,5,6)]) + >>> b + array([[ 1.5, 2. , 3. ], + [ 4. , 5. , 6. ]]) + +The type of the array can also be explicitly specified at creation time: + +:: + + >>> c = np.array( [ [1,2], [3,4] ], dtype=complex ) + >>> c + array([[ 1.+0.j, 2.+0.j], + [ 3.+0.j, 4.+0.j]]) + +Often, the elements of an array are originally unknown, but its size is +known. Hence, NumPy offers several functions to create +arrays with initial placeholder content. These minimize the necessity of +growing arrays, an expensive operation. + +The function ``zeros`` creates an array full of zeros, the function +``ones`` creates an array full of ones, and the function ``empty`` +creates an array whose initial content is random and depends on the +state of the memory. By default, the dtype of the created array is +``float64``. + + >>> np.zeros( (3,4) ) + array([[ 0., 0., 0., 0.], + [ 0., 0., 0., 0.], + [ 0., 0., 0., 0.]]) + >>> np.ones( (2,3,4), dtype=np.int16 ) # dtype can also be specified + array([[[ 1, 1, 1, 1], + [ 1, 1, 1, 1], + [ 1, 1, 1, 1]], + [[ 1, 1, 1, 1], + [ 1, 1, 1, 1], + [ 1, 1, 1, 1]]], dtype=int16) + >>> np.empty( (2,3) ) # uninitialized, output may vary + array([[ 3.73603959e-262, 6.02658058e-154, 6.55490914e-260], + [ 5.30498948e-313, 3.14673309e-307, 1.00000000e+000]]) + +To create sequences of numbers, NumPy provides a function analogous to +``range`` that returns arrays instead of lists + + >>> np.arange( 10, 30, 5 ) + array([10, 15, 20, 25]) + >>> np.arange( 0, 2, 0.3 ) # it accepts float arguments + array([ 0. , 0.3, 0.6, 0.9, 1.2, 1.5, 1.8]) + +When ``arange`` is used with floating point arguments, it is generally +not possible to predict the number of elements obtained, due to the +finite floating point precision. For this reason, it is usually better +to use the function ``linspace`` that receives as an argument the number +of elements that we want, instead of the step: + + >>> from numpy import pi + >>> np.linspace( 0, 2, 9 ) # 9 numbers from 0 to 2 + array([ 0. , 0.25, 0.5 , 0.75, 1. , 1.25, 1.5 , 1.75, 2. ]) + >>> x = np.linspace( 0, 2*pi, 100 ) # useful to evaluate function at lots of points + >>> f = np.sin(x) + +.. seealso:: + `array`, + `zeros`, + `zeros_like`, + `ones`, + `ones_like`, + `empty`, + `empty_like`, + `arange`, + `linspace`, + `numpy.random.rand`, + `numpy.random.randn`, + `fromfunction`, + `fromfile` + +Printing Arrays +--------------- + +When you print an array, NumPy displays it in a similar way to nested +lists, but with the following layout: + +- the last axis is printed from left to right, +- the second-to-last is printed from top to bottom, +- the rest are also printed from top to bottom, with each slice + separated from the next by an empty line. + +One-dimensional arrays are then printed as rows, bidimensionals as +matrices and tridimensionals as lists of matrices. + + >>> a = np.arange(6) # 1d array + >>> print(a) + [0 1 2 3 4 5] + >>> + >>> b = np.arange(12).reshape(4,3) # 2d array + >>> print(b) + [[ 0 1 2] + [ 3 4 5] + [ 6 7 8] + [ 9 10 11]] + >>> + >>> c = np.arange(24).reshape(2,3,4) # 3d array + >>> print(c) + [[[ 0 1 2 3] + [ 4 5 6 7] + [ 8 9 10 11]] + [[12 13 14 15] + [16 17 18 19] + [20 21 22 23]]] + +See :ref:`below ` to get +more details on ``reshape``. + +If an array is too large to be printed, NumPy automatically skips the +central part of the array and only prints the corners: + + >>> print(np.arange(10000)) + [ 0 1 2 ..., 9997 9998 9999] + >>> + >>> print(np.arange(10000).reshape(100,100)) + [[ 0 1 2 ..., 97 98 99] + [ 100 101 102 ..., 197 198 199] + [ 200 201 202 ..., 297 298 299] + ..., + [9700 9701 9702 ..., 9797 9798 9799] + [9800 9801 9802 ..., 9897 9898 9899] + [9900 9901 9902 ..., 9997 9998 9999]] + +To disable this behaviour and force NumPy to print the entire array, you +can change the printing options using ``set_printoptions``. + +:: + + >>> np.set_printoptions(threshold='nan') + + +Basic Operations +---------------- + +Arithmetic operators on arrays apply *elementwise*. A new array is +created and filled with the result. + + >>> a = np.array( [20,30,40,50] ) + >>> b = np.arange( 4 ) + >>> b + array([0, 1, 2, 3]) + >>> c = a-b + >>> c + array([20, 29, 38, 47]) + >>> b**2 + array([0, 1, 4, 9]) + >>> 10*np.sin(a) + array([ 9.12945251, -9.88031624, 7.4511316 , -2.62374854]) + >>> a<35 + array([ True, True, False, False], dtype=bool) + +Unlike in many matrix languages, the product operator ``*`` operates +elementwise in NumPy arrays. The matrix product can be performed using +the ``dot`` function or method: + + >>> A = np.array( [[1,1], + ... [0,1]] ) + >>> B = np.array( [[2,0], + ... [3,4]] ) + >>> A*B # elementwise product + array([[2, 0], + [0, 4]]) + >>> A.dot(B) # matrix product + array([[5, 4], + [3, 4]]) + >>> np.dot(A, B) # another matrix product + array([[5, 4], + [3, 4]]) + +Some operations, such as ``+=`` and ``*=``, act in place to modify an +existing array rather than create a new one. + + >>> a = np.ones((2,3), dtype=int) + >>> b = np.random.random((2,3)) + >>> a *= 3 + >>> a + array([[3, 3, 3], + [3, 3, 3]]) + >>> b += a + >>> b + array([[ 3.417022 , 3.72032449, 3.00011437], + [ 3.30233257, 3.14675589, 3.09233859]]) + >>> a += b # b is not automatically converted to integer type + Traceback (most recent call last): + ... + TypeError: Cannot cast ufunc add output from dtype('float64') to dtype('int64') with casting rule 'same_kind' + +When operating with arrays of different types, the type of the resulting +array corresponds to the more general or precise one (a behavior known +as upcasting). + + >>> a = np.ones(3, dtype=np.int32) + >>> b = np.linspace(0,pi,3) + >>> b.dtype.name + 'float64' + >>> c = a+b + >>> c + array([ 1. , 2.57079633, 4.14159265]) + >>> c.dtype.name + 'float64' + >>> d = np.exp(c*1j) + >>> d + array([ 0.54030231+0.84147098j, -0.84147098+0.54030231j, + -0.54030231-0.84147098j]) + >>> d.dtype.name + 'complex128' + +Many unary operations, such as computing the sum of all the elements in +the array, are implemented as methods of the ``ndarray`` class. + + >>> a = np.random.random((2,3)) + >>> a + array([[ 0.18626021, 0.34556073, 0.39676747], + [ 0.53881673, 0.41919451, 0.6852195 ]]) + >>> a.sum() + 2.5718191614547998 + >>> a.min() + 0.1862602113776709 + >>> a.max() + 0.6852195003967595 + +By default, these operations apply to the array as though it were a list +of numbers, regardless of its shape. However, by specifying the ``axis`` +parameter you can apply an operation along the specified axis of an +array: + + >>> b = np.arange(12).reshape(3,4) + >>> b + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> + >>> b.sum(axis=0) # sum of each column + array([12, 15, 18, 21]) + >>> + >>> b.min(axis=1) # min of each row + array([0, 4, 8]) + >>> + >>> b.cumsum(axis=1) # cumulative sum along each row + array([[ 0, 1, 3, 6], + [ 4, 9, 15, 22], + [ 8, 17, 27, 38]]) + + +Universal Functions +------------------- + +NumPy provides familiar mathematical functions such as sin, cos, and +exp. In NumPy, these are called "universal +functions"(\ ``ufunc``). Within NumPy, these functions +operate elementwise on an array, producing an array as output. + + >>> B = np.arange(3) + >>> B + array([0, 1, 2]) + >>> np.exp(B) + array([ 1. , 2.71828183, 7.3890561 ]) + >>> np.sqrt(B) + array([ 0. , 1. , 1.41421356]) + >>> C = np.array([2., -1., 4.]) + >>> np.add(B, C) + array([ 2., 0., 6.]) + +.. seealso:: + + `all`, + `any`, + `apply_along_axis`, + `argmax`, + `argmin`, + `argsort`, + `average`, + `bincount`, + `ceil`, + `clip`, + `conj`, + `corrcoef`, + `cov`, + `cross`, + `cumprod`, + `cumsum`, + `diff`, + `dot`, + `floor`, + `inner`, + `inv`, + `lexsort`, + `max`, + `maximum`, + `mean`, + `median`, + `min`, + `minimum`, + `nonzero`, + `outer`, + `prod`, + `re`, + `round`, + `sort`, + `std`, + `sum`, + `trace`, + `transpose`, + `var`, + `vdot`, + `vectorize`, + `where` + +Indexing, Slicing and Iterating +------------------------------- + +**One-dimensional** arrays can be indexed, sliced and iterated over, +much like +`lists `__ +and other Python sequences. + + >>> a = np.arange(10)**3 + >>> a + array([ 0, 1, 8, 27, 64, 125, 216, 343, 512, 729]) + >>> a[2] + 8 + >>> a[2:5] + array([ 8, 27, 64]) + >>> a[:6:2] = -1000 # equivalent to a[0:6:2] = -1000; from start to position 6, exclusive, set every 2nd element to -1000 + >>> a + array([-1000, 1, -1000, 27, -1000, 125, 216, 343, 512, 729]) + >>> a[ : :-1] # reversed a + array([ 729, 512, 343, 216, 125, -1000, 27, -1000, 1, -1000]) + >>> for i in a: + ... print(i**(1/3.)) + ... + nan + 1.0 + nan + 3.0 + nan + 5.0 + 6.0 + 7.0 + 8.0 + 9.0 + +**Multidimensional** arrays can have one index per axis. These indices +are given in a tuple separated by commas: + + >>> def f(x,y): + ... return 10*x+y + ... + >>> b = np.fromfunction(f,(5,4),dtype=int) + >>> b + array([[ 0, 1, 2, 3], + [10, 11, 12, 13], + [20, 21, 22, 23], + [30, 31, 32, 33], + [40, 41, 42, 43]]) + >>> b[2,3] + 23 + >>> b[0:5, 1] # each row in the second column of b + array([ 1, 11, 21, 31, 41]) + >>> b[ : ,1] # equivalent to the previous example + array([ 1, 11, 21, 31, 41]) + >>> b[1:3, : ] # each column in the second and third row of b + array([[10, 11, 12, 13], + [20, 21, 22, 23]]) + +When fewer indices are provided than the number of axes, the missing +indices are considered complete slices\ ``:`` + + >>> b[-1] # the last row. Equivalent to b[-1,:] + array([40, 41, 42, 43]) + +The expression within brackets in ``b[i]`` is treated as an ``i`` +followed by as many instances of ``:`` as needed to represent the +remaining axes. NumPy also allows you to write this using dots as +``b[i,...]``. + +The **dots** (``...``) represent as many colons as needed to produce a +complete indexing tuple. For example, if ``x`` is a rank 5 array (i.e., +it has 5 axes), then + +- ``x[1,2,...]`` is equivalent to ``x[1,2,:,:,:]``, +- ``x[...,3]`` to ``x[:,:,:,:,3]`` and +- ``x[4,...,5,:]`` to ``x[4,:,:,5,:]``. + + >>> c = np.array( [[[ 0, 1, 2], # a 3D array (two stacked 2D arrays) + ... [ 10, 12, 13]], + ... [[100,101,102], + ... [110,112,113]]]) + >>> c.shape + (2, 2, 3) + >>> c[1,...] # same as c[1,:,:] or c[1] + array([[100, 101, 102], + [110, 112, 113]]) + >>> c[...,2] # same as c[:,:,2] + array([[ 2, 13], + [102, 113]]) + +**Iterating** over multidimensional arrays is done with respect to the +first axis: + + >>> for row in b: + ... print(row) + ... + [0 1 2 3] + [10 11 12 13] + [20 21 22 23] + [30 31 32 33] + [40 41 42 43] + +However, if one wants to perform an operation on each element in the +array, one can use the ``flat`` attribute which is an +`iterator `__ +over all the elements of the array: + + >>> for element in b.flat: + ... print(element) + ... + 0 + 1 + 2 + 3 + 10 + 11 + 12 + 13 + 20 + 21 + 22 + 23 + 30 + 31 + 32 + 33 + 40 + 41 + 42 + 43 + +.. seealso:: + + :ref:`basics.indexing`, + :ref:`arrays.indexing` (reference), + `newaxis`, + `ndenumerate`, + `indices` + +.. _quickstart.shape-manipulation: + +Shape Manipulation +================== + +Changing the shape of an array +------------------------------ + +An array has a shape given by the number of elements along each axis: + + >>> a = np.floor(10*np.random.random((3,4))) + >>> a + array([[ 2., 8., 0., 6.], + [ 4., 5., 1., 1.], + [ 8., 9., 3., 6.]]) + >>> a.shape + (3, 4) + +The shape of an array can be changed with various commands: + + >>> a.ravel() # flatten the array + array([ 2., 8., 0., 6., 4., 5., 1., 1., 8., 9., 3., 6.]) + >>> a.shape = (6, 2) + >>> a.T + array([[ 2., 0., 4., 1., 8., 3.], + [ 8., 6., 5., 1., 9., 6.]]) + +The order of the elements in the array resulting from ravel() is +normally "C-style", that is, the rightmost index "changes the fastest", +so the element after a[0,0] is a[0,1]. If the array is reshaped to some +other shape, again the array is treated as "C-style". Numpy normally +creates arrays stored in this order, so ravel() will usually not need to +copy its argument, but if the array was made by taking slices of another +array or created with unusual options, it may need to be copied. The +functions ravel() and reshape() can also be instructed, using an +optional argument, to use FORTRAN-style arrays, in which the leftmost +index changes the fastest. + +The `reshape` function returns its +argument with a modified shape, whereas the +`ndarray.resize` method modifies the array +itself: + + >>> a + array([[ 2., 8.], + [ 0., 6.], + [ 4., 5.], + [ 1., 1.], + [ 8., 9.], + [ 3., 6.]]) + >>> a.resize((2,6)) + >>> a + array([[ 2., 8., 0., 6., 4., 5.], + [ 1., 1., 8., 9., 3., 6.]]) + +If a dimension is given as -1 in a reshaping operation, the other +dimensions are automatically calculated: + + >>> a.reshape(3,-1) + array([[ 2., 8., 0., 6.], + [ 4., 5., 1., 1.], + [ 8., 9., 3., 6.]]) + +.. seealso:: + + `ndarray.shape`, + `reshape`, + `resize`, + `ravel` + +Stacking together different arrays +---------------------------------- + +Several arrays can be stacked together along different axes: + + >>> a = np.floor(10*np.random.random((2,2))) + >>> a + array([[ 8., 8.], + [ 0., 0.]]) + >>> b = np.floor(10*np.random.random((2,2))) + >>> b + array([[ 1., 8.], + [ 0., 4.]]) + >>> np.vstack((a,b)) + array([[ 8., 8.], + [ 0., 0.], + [ 1., 8.], + [ 0., 4.]]) + >>> np.hstack((a,b)) + array([[ 8., 8., 1., 8.], + [ 0., 0., 0., 4.]]) + +The function `column_stack` +stacks 1D arrays as columns into a 2D array. It is equivalent to +`vstack` only for 1D arrays: + + >>> from numpy import newaxis + >>> np.column_stack((a,b)) # With 2D arrays + array([[ 8., 8., 1., 8.], + [ 0., 0., 0., 4.]]) + >>> a = np.array([4.,2.]) + >>> b = np.array([2.,8.]) + >>> a[:,newaxis] # This allows to have a 2D columns vector + array([[ 4.], + [ 2.]]) + >>> np.column_stack((a[:,newaxis],b[:,newaxis])) + array([[ 4., 2.], + [ 2., 8.]]) + >>> np.vstack((a[:,newaxis],b[:,newaxis])) # The behavior of vstack is different + array([[ 4.], + [ 2.], + [ 2.], + [ 8.]]) + +For arrays of with more than two dimensions, +`hstack` stacks along their second +axes, `vstack` stacks along their +first axes, and `concatenate` +allows for an optional arguments giving the number of the axis along +which the concatenation should happen. + +**Note** + +In complex cases, `r_` and +`c_` are useful for creating arrays +by stacking numbers along one axis. They allow the use of range literals +(":") : + + >>> np.r_[1:4,0,4] + array([1, 2, 3, 0, 4]) + +When used with arrays as arguments, +`r_` and +`c_` are similar to +`vstack` and +`hstack` in their default behavior, +but allow for an optional argument giving the number of the axis along +which to concatenate. + +.. seealso:: + + `hstack`, + `vstack`, + `column_stack`, + `concatenate`, + `c_`, + `r_` + +Splitting one array into several smaller ones +--------------------------------------------- + +Using `hsplit`, you can split an +array along its horizontal axis, either by specifying the number of +equally shaped arrays to return, or by specifying the columns after +which the division should occur: + + >>> a = np.floor(10*np.random.random((2,12))) + >>> a + array([[ 9., 5., 6., 3., 6., 8., 0., 7., 9., 7., 2., 7.], + [ 1., 4., 9., 2., 2., 1., 0., 6., 2., 2., 4., 0.]]) + >>> np.hsplit(a,3) # Split a into 3 + [array([[ 9., 5., 6., 3.], + [ 1., 4., 9., 2.]]), array([[ 6., 8., 0., 7.], + [ 2., 1., 0., 6.]]), array([[ 9., 7., 2., 7.], + [ 2., 2., 4., 0.]])] + >>> np.hsplit(a,(3,4)) # Split a after the third and the fourth column + [array([[ 9., 5., 6.], + [ 1., 4., 9.]]), array([[ 3.], + [ 2.]]), array([[ 6., 8., 0., 7., 9., 7., 2., 7.], + [ 2., 1., 0., 6., 2., 2., 4., 0.]])] + +`vsplit` splits along the vertical +axis, and `array_split` allows +one to specify along which axis to split. + +Copies and Views +================ + +When operating and manipulating arrays, their data is sometimes copied +into a new array and sometimes not. This is often a source of confusion +for beginners. There are three cases: + +No Copy at All +-------------- + +Simple assignments make no copy of array objects or of their data. + + >>> a = np.arange(12) + >>> b = a # no new object is created + >>> b is a # a and b are two names for the same ndarray object + True + >>> b.shape = 3,4 # changes the shape of a + >>> a.shape + (3, 4) + +Python passes mutable objects as references, so function calls make no +copy. + + >>> def f(x): + ... print(id(x)) + ... + >>> id(a) # id is a unique identifier of an object + 148293216 + >>> f(a) + 148293216 + +View or Shallow Copy +-------------------- + +Different array objects can share the same data. The ``view`` method +creates a new array object that looks at the same data. + + >>> c = a.view() + >>> c is a + False + >>> c.base is a # c is a view of the data owned by a + True + >>> c.flags.owndata + False + >>> + >>> c.shape = 2,6 # a's shape doesn't change + >>> a.shape + (3, 4) + >>> c[0,4] = 1234 # a's data changes + >>> a + array([[ 0, 1, 2, 3], + [1234, 5, 6, 7], + [ 8, 9, 10, 11]]) + +Slicing an array returns a view of it: + + >>> s = a[ : , 1:3] # spaces added for clarity; could also be written "s = a[:,1:3]" + >>> s[:] = 10 # s[:] is a view of s. Note the difference between s=10 and s[:]=10 + >>> a + array([[ 0, 10, 10, 3], + [1234, 10, 10, 7], + [ 8, 10, 10, 11]]) + + +Deep Copy +--------- + +The ``copy`` method makes a complete copy of the array and its data. + + + >>> d = a.copy() # a new array object with new data is created + >>> d is a + False + >>> d.base is a # d doesn't share anything with a + False + >>> d[0,0] = 9999 + >>> a + array([[ 0, 10, 10, 3], + [1234, 10, 10, 7], + [ 8, 10, 10, 11]]) + + +Functions and Methods Overview +------------------------------ + +Here is a list of some useful NumPy functions and methods names +ordered in categories. See :ref:`routines` for the full list. + +Array Creation + `arange`, + `array`, + `copy`, + `empty`, + `empty_like`, + `eye`, + `fromfile`, + `fromfunction`, + `identity`, + `linspace`, + `logspace`, + `mgrid`, + `ogrid`, + `ones`, + `ones_like`, + `r`, + `zeros`, + `zeros_like` +Conversions + `ndarray.astype`, + `atleast_1d`, + `atleast_2d`, + `atleast_3d`, + `mat` +Manipulations + `array_split`, + `column_stack`, + `concatenate`, + `diagonal`, + `dsplit`, + `dstack`, + `hsplit`, + `hstack`, + `ndarray.item`, + `newaxis`, + `ravel`, + `repeat`, + `reshape`, + `resize`, + `squeeze`, + `swapaxes`, + `take`, + `transpose`, + `vsplit`, + `vstack` +Questions + `all`, + `any`, + `nonzero`, + `where` +Ordering + `argmax`, + `argmin`, + `argsort`, + `max`, + `min`, + `ptp`, + `searchsorted`, + `sort` +Operations + `choose`, + `compress`, + `cumprod`, + `cumsum`, + `inner`, + `ndarray.fill`, + `imag`, + `prod`, + `put`, + `putmask`, + `real`, + `sum` +Basic Statistics + `cov`, + `mean`, + `std`, + `var` +Basic Linear Algebra + `cross`, + `dot`, + `outer`, + `linalg.svd`, + `vdot` + +Less Basic +========== + +Broadcasting rules +------------------ + +Broadcasting allows universal functions to deal in a meaningful way with +inputs that do not have exactly the same shape. + +The first rule of broadcasting is that if all input arrays do not have +the same number of dimensions, a "1" will be repeatedly prepended to the +shapes of the smaller arrays until all the arrays have the same number +of dimensions. + +The second rule of broadcasting ensures that arrays with a size of 1 +along a particular dimension act as if they had the size of the array +with the largest shape along that dimension. The value of the array +element is assumed to be the same along that dimension for the +"broadcast" array. + +After application of the broadcasting rules, the sizes of all arrays +must match. More details can be found in :doc:`basics.broadcasting`. + +Fancy indexing and index tricks +=============================== + +NumPy offers more indexing facilities than regular Python sequences. In +addition to indexing by integers and slices, as we saw before, arrays +can be indexed by arrays of integers and arrays of booleans. + +Indexing with Arrays of Indices +------------------------------- + + >>> a = np.arange(12)**2 # the first 12 square numbers + >>> i = np.array( [ 1,1,3,8,5 ] ) # an array of indices + >>> a[i] # the elements of a at the positions i + array([ 1, 1, 9, 64, 25]) + >>> + >>> j = np.array( [ [ 3, 4], [ 9, 7 ] ] ) # a bidimensional array of indices + >>> a[j] # the same shape as j + array([[ 9, 16], + [81, 49]]) + +When the indexed array ``a`` is multidimensional, a single array of +indices refers to the first dimension of ``a``. The following example +shows this behavior by converting an image of labels into a color image +using a palette. + + >>> palette = np.array( [ [0,0,0], # black + ... [255,0,0], # red + ... [0,255,0], # green + ... [0,0,255], # blue + ... [255,255,255] ] ) # white + >>> image = np.array( [ [ 0, 1, 2, 0 ], # each value corresponds to a color in the palette + ... [ 0, 3, 4, 0 ] ] ) + >>> palette[image] # the (2,4,3) color image + array([[[ 0, 0, 0], + [255, 0, 0], + [ 0, 255, 0], + [ 0, 0, 0]], + [[ 0, 0, 0], + [ 0, 0, 255], + [255, 255, 255], + [ 0, 0, 0]]]) + +We can also give indexes for more than one dimension. The arrays of +indices for each dimension must have the same shape. + + >>> a = np.arange(12).reshape(3,4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> i = np.array( [ [0,1], # indices for the first dim of a + ... [1,2] ] ) + >>> j = np.array( [ [2,1], # indices for the second dim + ... [3,3] ] ) + >>> + >>> a[i,j] # i and j must have equal shape + array([[ 2, 5], + [ 7, 11]]) + >>> + >>> a[i,2] + array([[ 2, 6], + [ 6, 10]]) + >>> + >>> a[:,j] # i.e., a[ : , j] + array([[[ 2, 1], + [ 3, 3]], + [[ 6, 5], + [ 7, 7]], + [[10, 9], + [11, 11]]]) + +Naturally, we can put ``i`` and ``j`` in a sequence (say a list) and +then do the indexing with the list. + + >>> l = [i,j] + >>> a[l] # equivalent to a[i,j] + array([[ 2, 5], + [ 7, 11]]) + +However, we can not do this by putting ``i`` and ``j`` into an array, +because this array will be interpreted as indexing the first dimension +of a. + + >>> s = np.array( [i,j] ) + >>> a[s] # not what we want + Traceback (most recent call last): + File "", line 1, in ? + IndexError: index (3) out of range (0<=index<=2) in dimension 0 + >>> + >>> a[tuple(s)] # same as a[i,j] + array([[ 2, 5], + [ 7, 11]]) + +Another common use of indexing with arrays is the search of the maximum +value of time-dependent series : + + >>> time = np.linspace(20, 145, 5) # time scale + >>> data = np.sin(np.arange(20)).reshape(5,4) # 4 time-dependent series + >>> time + array([ 20. , 51.25, 82.5 , 113.75, 145. ]) + >>> data + array([[ 0. , 0.84147098, 0.90929743, 0.14112001], + [-0.7568025 , -0.95892427, -0.2794155 , 0.6569866 ], + [ 0.98935825, 0.41211849, -0.54402111, -0.99999021], + [-0.53657292, 0.42016704, 0.99060736, 0.65028784], + [-0.28790332, -0.96139749, -0.75098725, 0.14987721]]) + >>> + >>> ind = data.argmax(axis=0) # index of the maxima for each series + >>> ind + array([2, 0, 3, 1]) + >>> + >>> time_max = time[ ind] # times corresponding to the maxima + >>> + >>> data_max = data[ind, xrange(data.shape[1])] # => data[ind[0],0], data[ind[1],1]... + >>> + >>> time_max + array([ 82.5 , 20. , 113.75, 51.25]) + >>> data_max + array([ 0.98935825, 0.84147098, 0.99060736, 0.6569866 ]) + >>> + >>> np.all(data_max == data.max(axis=0)) + True + +You can also use indexing with arrays as a target to assign to: + + >>> a = np.arange(5) + >>> a + array([0, 1, 2, 3, 4]) + >>> a[[1,3,4]] = 0 + >>> a + array([0, 0, 2, 0, 0]) + +However, when the list of indices contains repetitions, the assignment +is done several times, leaving behind the last value: + + >>> a = np.arange(5) + >>> a[[0,0,2]]=[1,2,3] + >>> a + array([2, 1, 3, 3, 4]) + +This is reasonable enough, but watch out if you want to use Python's +``+=`` construct, as it may not do what you expect: + + >>> a = np.arange(5) + >>> a[[0,0,2]]+=1 + >>> a + array([1, 1, 3, 3, 4]) + +Even though 0 occurs twice in the list of indices, the 0th element is +only incremented once. This is because Python requires "a+=1" to be +equivalent to "a=a+1". + +Indexing with Boolean Arrays +---------------------------- + +When we index arrays with arrays of (integer) indices we are providing +the list of indices to pick. With boolean indices the approach is +different; we explicitly choose which items in the array we want and +which ones we don't. + +The most natural way one can think of for boolean indexing is to use +boolean arrays that have *the same shape* as the original array: + + >>> a = np.arange(12).reshape(3,4) + >>> b = a > 4 + >>> b # b is a boolean with a's shape + array([[False, False, False, False], + [False, True, True, True], + [ True, True, True, True]], dtype=bool) + >>> a[b] # 1d array with the selected elements + array([ 5, 6, 7, 8, 9, 10, 11]) + +This property can be very useful in assignments: + + >>> a[b] = 0 # All elements of 'a' higher than 4 become 0 + >>> a + array([[0, 1, 2, 3], + [4, 0, 0, 0], + [0, 0, 0, 0]]) + +You can look at the following +example to see +how to use boolean indexing to generate an image of the `Mandelbrot +set `__: + +.. plot:: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> def mandelbrot( h,w, maxit=20 ): + ... """Returns an image of the Mandelbrot fractal of size (h,w).""" + ... y,x = np.ogrid[ -1.4:1.4:h*1j, -2:0.8:w*1j ] + ... c = x+y*1j + ... z = c + ... divtime = maxit + np.zeros(z.shape, dtype=int) + ... + ... for i in range(maxit): + ... z = z**2 + c + ... diverge = z*np.conj(z) > 2**2 # who is diverging + ... div_now = diverge & (divtime==maxit) # who is diverging now + ... divtime[div_now] = i # note when + ... z[diverge] = 2 # avoid diverging too much + ... + ... return divtime + >>> plt.imshow(mandelbrot(400,400)) + >>> plt.show() + +The second way of indexing with booleans is more similar to integer +indexing; for each dimension of the array we give a 1D boolean array +selecting the slices we want. + + >>> a = np.arange(12).reshape(3,4) + >>> b1 = np.array([False,True,True]) # first dim selection + >>> b2 = np.array([True,False,True,False]) # second dim selection + >>> + >>> a[b1,:] # selecting rows + array([[ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> + >>> a[b1] # same thing + array([[ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> + >>> a[:,b2] # selecting columns + array([[ 0, 2], + [ 4, 6], + [ 8, 10]]) + >>> + >>> a[b1,b2] # a weird thing to do + array([ 4, 10]) + +Note that the length of the 1D boolean array must coincide with the +length of the dimension (or axis) you want to slice. In the previous +example, ``b1`` is a 1-rank array with length 3 (the number of *rows* in +``a``), and ``b2`` (of length 4) is suitable to index the 2nd rank +(columns) of ``a``. + +The ix_() function +------------------- + +The `ix_` function can be used to combine different vectors so as to +obtain the result for each n-uplet. For example, if you want to compute +all the a+b\*c for all the triplets taken from each of the vectors a, b +and c: + + >>> a = np.array([2,3,4,5]) + >>> b = np.array([8,5,4]) + >>> c = np.array([5,4,6,8,3]) + >>> ax,bx,cx = np.ix_(a,b,c) + >>> ax + array([[[2]], + [[3]], + [[4]], + [[5]]]) + >>> bx + array([[[8], + [5], + [4]]]) + >>> cx + array([[[5, 4, 6, 8, 3]]]) + >>> ax.shape, bx.shape, cx.shape + ((4, 1, 1), (1, 3, 1), (1, 1, 5)) + >>> result = ax+bx*cx + >>> result + array([[[42, 34, 50, 66, 26], + [27, 22, 32, 42, 17], + [22, 18, 26, 34, 14]], + [[43, 35, 51, 67, 27], + [28, 23, 33, 43, 18], + [23, 19, 27, 35, 15]], + [[44, 36, 52, 68, 28], + [29, 24, 34, 44, 19], + [24, 20, 28, 36, 16]], + [[45, 37, 53, 69, 29], + [30, 25, 35, 45, 20], + [25, 21, 29, 37, 17]]]) + >>> result[3,2,4] + 17 + >>> a[3]+b[2]*c[4] + 17 + +You could also implement the reduce as follows: + + >>> def ufunc_reduce(ufct, *vectors): + ... vs = np.ix_(*vectors) + ... r = ufct.identity + ... for v in vs: + ... r = ufct(r,v) + ... return r + +and then use it as: + + >>> ufunc_reduce(np.add,a,b,c) + array([[[15, 14, 16, 18, 13], + [12, 11, 13, 15, 10], + [11, 10, 12, 14, 9]], + [[16, 15, 17, 19, 14], + [13, 12, 14, 16, 11], + [12, 11, 13, 15, 10]], + [[17, 16, 18, 20, 15], + [14, 13, 15, 17, 12], + [13, 12, 14, 16, 11]], + [[18, 17, 19, 21, 16], + [15, 14, 16, 18, 13], + [14, 13, 15, 17, 12]]]) + +The advantage of this version of reduce compared to the normal +ufunc.reduce is that it makes use of the `Broadcasting +Rules `__ +in order to avoid creating an argument array the size of the output +times the number of vectors. + +Indexing with strings +--------------------- + +See `RecordArrays `__. + +Linear Algebra +============== + +Work in progress. Basic linear algebra to be included here. + +Simple Array Operations +----------------------- + +See linalg.py in numpy folder for more. + + >>> import numpy as np + >>> a = np.array([[1.0, 2.0], [3.0, 4.0]]) + >>> print(a) + [[ 1. 2.] + [ 3. 4.]] + + >>> a.transpose() + array([[ 1., 3.], + [ 2., 4.]]) + + >>> np.linalg.inv(a) + array([[-2. , 1. ], + [ 1.5, -0.5]]) + + >>> u = np.eye(2) # unit 2x2 matrix; "eye" represents "I" + >>> u + array([[ 1., 0.], + [ 0., 1.]]) + >>> j = np.array([[0.0, -1.0], [1.0, 0.0]]) + + >>> np.dot (j, j) # matrix product + array([[-1., 0.], + [ 0., -1.]]) + + >>> np.trace(u) # trace + 2.0 + + >>> y = np.array([[5.], [7.]]) + >>> np.linalg.solve(a, y) + array([[-3.], + [ 4.]]) + + >>> np.linalg.eig(j) + (array([ 0.+1.j, 0.-1.j]), array([[ 0.70710678+0.j , 0.70710678-0.j ], + [ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])) + +:: + + Parameters: + square matrix + Returns + The eigenvalues, each repeated according to its multiplicity. + The normalized (unit "length") eigenvectors, such that the + column ``v[:,i]`` is the eigenvector corresponding to the + eigenvalue ``w[i]`` . + +Tricks and Tips +=============== + +Here we give a list of short and useful tips. + +"Automatic" Reshaping +--------------------- + +To change the dimensions of an array, you can omit one of the sizes +which will then be deduced automatically: + + >>> a = np.arange(30) + >>> a.shape = 2,-1,3 # -1 means "whatever is needed" + >>> a.shape + (2, 5, 3) + >>> a + array([[[ 0, 1, 2], + [ 3, 4, 5], + [ 6, 7, 8], + [ 9, 10, 11], + [12, 13, 14]], + [[15, 16, 17], + [18, 19, 20], + [21, 22, 23], + [24, 25, 26], + [27, 28, 29]]]) + +Vector Stacking +--------------- + +How do we construct a 2D array from a list of equally-sized row vectors? +In MATLAB this is quite easy: if ``x`` and ``y`` are two vectors of the +same length you only need do ``m=[x;y]``. In NumPy this works via the +functions ``column_stack``, ``dstack``, ``hstack`` and ``vstack``, +depending on the dimension in which the stacking is to be done. For +example: + +:: + + x = np.arange(0,10,2) # x=([0,2,4,6,8]) + y = np.arange(5) # y=([0,1,2,3,4]) + m = np.vstack([x,y]) # m=([[0,2,4,6,8], + # [0,1,2,3,4]]) + xy = np.hstack([x,y]) # xy =([0,2,4,6,8,0,1,2,3,4]) + +The logic behind those functions in more than two dimensions can be +strange. + +.. seealso:: + + :doc:`numpy-for-matlab-users` + +Histograms +---------- + +The NumPy ``histogram`` function applied to an array returns a pair of +vectors: the histogram of the array and the vector of bins. Beware: +``matplotlib`` also has a function to build histograms (called ``hist``, +as in Matlab) that differs from the one in NumPy. The main difference is +that ``pylab.hist`` plots the histogram automatically, while +``numpy.histogram`` only generates the data. + +.. plot:: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> # Build a vector of 10000 normal deviates with variance 0.5^2 and mean 2 + >>> mu, sigma = 2, 0.5 + >>> v = np.random.normal(mu,sigma,10000) + >>> # Plot a normalized histogram with 50 bins + >>> plt.hist(v, bins=50, normed=1) # matplotlib version (plot) + >>> plt.show() + >>> # Compute the histogram with numpy and then plot it + >>> (n, bins) = np.histogram(v, bins=50, normed=True) # NumPy version (no plot) + >>> plt.plot(.5*(bins[1:]+bins[:-1]), n) + >>> plt.show() + + +Further reading +=============== + +- The `Python tutorial `__ +- :ref:`reference` +- `SciPy Tutorial `__ +- `SciPy Lecture Notes `__ +- A `matlab, R, IDL, NumPy/SciPy dictionary `__ diff --git a/doc/source/user/setting-up.rst b/doc/source/user/setting-up.rst new file mode 100644 index 000000000000..f70dacf82d62 --- /dev/null +++ b/doc/source/user/setting-up.rst @@ -0,0 +1,9 @@ +********** +Setting up +********** + +.. toctree:: + :maxdepth: 1 + + whatisnumpy + install From 4b1f1243fb2b2eb35adf50a26b0c4d2bcc3a7709 Mon Sep 17 00:00:00 2001 From: Pauli Virtanen Date: Sun, 25 Oct 2015 16:24:06 +0200 Subject: [PATCH 131/496] DOC: import "numpy for matlab users" from the wiki Might be useful to someone. Excised too opinionated parts and replaced most links to specific software with the topical software page links. --- doc/source/user/index.rst | 1 + doc/source/user/numpy-for-matlab-users.rst | 695 +++++++++++++++++++++ 2 files changed, 696 insertions(+) create mode 100644 doc/source/user/numpy-for-matlab-users.rst diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst index 9f45b68d668a..a45fec9ecb6d 100644 --- a/doc/source/user/index.rst +++ b/doc/source/user/index.rst @@ -16,5 +16,6 @@ classes contained in the package, see the :ref:`reference`. quickstart basics misc + numpy-for-matlab-users building c-info diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst new file mode 100644 index 000000000000..2b8f4374955e --- /dev/null +++ b/doc/source/user/numpy-for-matlab-users.rst @@ -0,0 +1,695 @@ +.. _numpy-for-matlab-users: + +====================== +Numpy for Matlab users +====================== + +Introduction +============ + +MATLAB® and NumPy/SciPy have a lot in common. But +there are many differences. NumPy and SciPy were created to do numerical +and scientific computing in the most natural way with Python, not to be +MATLAB® clones. This page is intended to be a place to collect wisdom +about the differences, mostly for the purpose of helping proficient +MATLAB® users become proficient NumPy and SciPy users. + +.. raw:: html + + + +Some Key Differences +==================== + +.. list-table:: + + * - In MATLAB®, the basic data type is a multidimensional array of double precision floating point numbers. Most expressions take such arrays and return such arrays. Operations on the 2-D instances of these arrays are designed to act more or less like matrix operations in linear algebra. + - In NumPy the basic type is a multidimensional ``array``. Operations on these arrays in all dimensionalities including 2D are elementwise operations. However, there is a special ``matrix`` type for doing linear algebra, which is just a subclass of the ``array`` class. Operations on matrix-class arrays are linear algebra operations. + + * - MATLAB® uses 1 (one) based indexing. The initial element of a sequence is found using a(1). + :ref:`See note INDEXING ` + - Python uses 0 (zero) based indexing. The initial element of a sequence is found using a[0]. + + * - MATLAB®'s scripting language was created for doing linear algebra. The syntax for basic matrix operations is nice and clean, but the API for adding GUIs and making full-fledged applications is more or less an afterthought. + - NumPy is based on Python, which was designed from the outset to be an excellent general-purpose programming language. While Matlab's syntax for some array manipulations is more compact than NumPy's, NumPy (by virtue of being an add-on to Python) can do many things that Matlab just cannot, for instance subclassing the main array type to do both array and matrix math cleanly. + + * - In MATLAB®, arrays have pass-by-value semantics, with a lazy copy-on-write scheme to prevent actually creating copies until they are actually needed. Slice operations copy parts of the array. + - In NumPy arrays have pass-by-reference semantics. Slice operations are views into an array. + + +'array' or 'matrix'? Which should I use? +======================================== + +Numpy provides, in addition to `np.ndarray`` an additional matrix type +that you may see used in some existing code. Which one to use? + +Short answer +------------ + +**Use arrays**. + +- They are the standard vector/matrix/tensor type of numpy. Many numpy + function return arrays, not matrices. +- There is a clear distinction between element-wise operations and + linear algebra operations. +- You can have standard vectors or row/column vectors if you like. + +The only disadvantage of using the array type is that you will have to +use ``dot`` instead of ``*`` to multiply (reduce) two tensors (scalar +product, matrix vector multiplication etc.). + +Long answer +----------- + +Numpy contains both an ``array`` class and a ``matrix`` class. The +``array`` class is intended to be a general-purpose n-dimensional array +for many kinds of numerical computing, while ``matrix`` is intended to +facilitate linear algebra computations specifically. In practice there +are only a handful of key differences between the two. + +- Operator ``*``, ``dot()``, and ``multiply()``: + + - For ``array``, **'``*``\ ' means element-wise multiplication**, + and the ``dot()`` function is used for matrix multiplication. + - For ``matrix``, **'``*``\ ' means matrix multiplication**, and the + ``multiply()`` function is used for element-wise multiplication. + +- Handling of vectors (rank-1 arrays) + + - For ``array``, the **vector shapes 1xN, Nx1, and N are all + different things**. Operations like ``A[:,1]`` return a rank-1 + array of shape N, not a rank-2 of shape Nx1. Transpose on a rank-1 + ``array`` does nothing. + - For ``matrix``, **rank-1 arrays are always upconverted to 1xN or + Nx1 matrices** (row or column vectors). ``A[:,1]`` returns a + rank-2 matrix of shape Nx1. + +- Handling of higher-rank arrays (rank > 2) + + - ``array`` objects **can have rank > 2**. + - ``matrix`` objects **always have exactly rank 2**. + +- Convenience attributes + + - ``array`` **has a .T attribute**, which returns the transpose of + the data. + - ``matrix`` **also has .H, .I, and .A attributes**, which return + the conjugate transpose, inverse, and ``asarray()`` of the matrix, + respectively. + +- Convenience constructor + + - The ``array`` constructor **takes (nested) Python sequences as + initializers**. As in, ``array([[1,2,3],[4,5,6]])``. + - The ``matrix`` constructor additionally **takes a convenient + string initializer**. As in ``matrix("[1 2 3; 4 5 6]")``. + +There are pros and cons to using both: + +- ``array`` + + - ``:)`` You can treat rank-1 arrays as *either* row or column + vectors. ``dot(A,v)`` treats ``v`` as a column vector, while + ``dot(v,A)`` treats ``v`` as a row vector. This can save you + having to type a lot of transposes. + - ``<:(`` Having to use the ``dot()`` function for matrix-multiply is + messy -- ``dot(dot(A,B),C)`` vs. ``A*B*C``. + - ``:)`` Element-wise multiplication is easy: ``A*B``. + - ``:)`` ``array`` is the "default" NumPy type, so it gets the most + testing, and is the type most likely to be returned by 3rd party + code that uses NumPy. + - ``:)`` Is quite at home handling data of any rank. + - ``:)`` Closer in semantics to tensor algebra, if you are familiar + with that. + - ``:)`` *All* operations (``*``, ``/``, ``+``, ```` etc.) are + elementwise + +- ``matrix`` + + - ``:\\`` Behavior is more like that of MATLAB® matrices. + - ``<:(`` Maximum of rank-2. To hold rank-3 data you need ``array`` or + perhaps a Python list of ``matrix``. + - ``<:(`` Minimum of rank-2. You cannot have vectors. They must be + cast as single-column or single-row matrices. + - ``<:(`` Since ``array`` is the default in NumPy, some functions may + return an ``array`` even if you give them a ``matrix`` as an + argument. This shouldn't happen with NumPy functions (if it does + it's a bug), but 3rd party code based on NumPy may not honor type + preservation like NumPy does. + - ``:)`` ``A*B`` is matrix multiplication, so more convenient for + linear algebra. + - ``<:(`` Element-wise multiplication requires calling a function, + ``multipy(A,B)``. + - ``<:(`` The use of operator overloading is a bit illogical: ``*`` + does not work elementwise but ``/`` does. + +The ``array`` is thus much more advisable to use. + +Facilities for Matrix Users +=========================== + +Numpy has some features that facilitate the use of the ``matrix`` type, +which hopefully make things easier for Matlab converts. + +- A ``matlib`` module has been added that contains matrix versions of + common array constructors like ``ones()``, ``zeros()``, ``empty()``, + ``eye()``, ``rand()``, ``repmat()``, etc. Normally these functions + return ``array``\ s, but the ``matlib`` versions return ``matrix`` + objects. +- ``mat`` has been changed to be a synonym for ``asmatrix``, rather + than ``matrix``, thus making it concise way to convert an ``array`` + to a ``matrix`` without copying the data. +- Some top-level functions have been removed. For example + ``numpy.rand()`` now needs to be accessed as ``numpy.random.rand()``. + Or use the ``rand()`` from the ``matlib`` module. But the + "numpythonic" way is to use ``numpy.random.random()``, which takes a + tuple for the shape, like other numpy functions. + +Table of Rough MATLAB-NumPy Equivalents +======================================= + +The table below gives rough equivalents for some common MATLAB® +expressions. **These are not exact equivalents**, but rather should be +taken as hints to get you going in the right direction. For more detail +read the built-in documentation on the NumPy functions. + +Some care is necessary when writing functions that take arrays or +matrices as arguments --- if you are expecting an ``array`` and are +given a ``matrix``, or vice versa, then '\*' (multiplication) will give +you unexpected results. You can convert back and forth between arrays +and matrices using + +- ``asarray``: always returns an object of type ``array`` +- ``asmatrix`` or ``mat``: always return an object of type + ``matrix`` +- ``asanyarray``: always returns an ``array`` object or a subclass + derived from it, depending on the input. For instance if you pass in + a ``matrix`` it returns a ``matrix``. + +These functions all accept both arrays and matrices (among other things +like Python lists), and thus are useful when writing functions that +should accept any array-like object. + +In the table below, it is assumed that you have executed the following +commands in Python: + +:: + + from numpy import * + import scipy.linalg + +Also assume below that if the Notes talk about "matrix" that the +arguments are rank 2 entities. + +General Purpose Equivalents +--------------------------- + +.. list-table:: + :header-rows: 1 + + * - **MATLAB** + - **numpy** + - **Notes** + * - ``help func`` + - ``info(func)`` or ``help(func)`` or ``func?`` (in Ipython) + - get help on the function *func* + * - ``which func`` + - `see note HELP `__ + - find out where *func* is defined + * - ``type func`` + - ``source(func)`` or ``func??`` (in Ipython) + - print source for *func* (if not a native function) + * - ``a && b`` + - ``a and b`` + - short-circuiting logical AND operator (Python native operator); scalar arguments only + * - ``a || b`` + - ``a or b`` + - short-circuiting logical OR operator (Python native operator); scalar arguments only + * - ``1*i``, ``1*j``, ``1i``, ``1j`` + - ``1j`` + - complex numbers + * - ``eps`` + - ``np.spacing(1)`` + - Distance between 1 and the nearest floating point number + * - ``ode45`` + - ``scipy.integrate.ode(f).set_integrator('dopri5')`` + - integrate an ODE with Runge-Kutta 4,5 + * - ``ode15s`` + - ``scipy.integrate.ode(f).set_integrator('vode', method='bdf', order=15)`` + - integrate an ODE with BDF method + +Linear Algebra Equivalents +-------------------------- + +.. list-table:: + :header-rows: 1 + + * - MATLAB + - NumPy + - Notes + + * - ``ndims(a)`` + - ``ndim(a)`` or ``a.ndim`` + - get the number of dimensions of a (tensor rank) + + * - ``numel(a)`` + - ``size(a)`` or ``a.size`` + - get the number of elements of an array + + * - ``size(a)`` + - ``shape(a)`` or ``a.shape`` + - get the "size" of the matrix + + * - ``size(a,n)`` + - ``a.shape[n-1]`` + - get the number of elements of the n-th dimension of array a. (Note that MATLAB® uses 1 based indexing while Python uses 0 based indexing, See note :ref:`INDEXING `) + + * - ``[ 1 2 3; 4 5 6 ]`` + - ``array([[1.,2.,3.], [4.,5.,6.]])`` + - 2x3 matrix literal + + * - ``[ a b; c d ]`` + - ``vstack([hstack([a,b]), hstack([c,d])])`` or + ``bmat('a b; c d').A`` + - construct a matrix from blocks a,b,c, and d + + * - ``a(end)`` + - ``a[-1]`` + - access last element in the 1xn matrix ``a`` + + * - ``a(2,5)`` + - ``a[1,4]`` + - access element in second row, fifth column + + * - ``a(2,:)`` + - ``a[1]`` or ``a[1,:]`` + - entire second row of ``a`` + + * - ``a(1:5,:)`` + - ``a[0:5]`` or ``a[:5]`` or ``a[0:5,:]`` + - the first five rows of ``a`` + + * - ``a(end-4:end,:)`` + - ``a[-5:]`` + - the last five rows of ``a`` + + * - ``a(1:3,5:9)`` + - ``a[0:3][:,4:9]`` + - rows one to three and columns five to nine of ``a``. This gives read-only access. + + * - ``a([2,4,5],[1,3])`` + - ``a[ix_([1,3,4],[0,2])]`` + - rows 2,4 and 5 and columns 1 and 3. This allows the matrix to be modified, and doesn't require a regular slice. + + * - ``a(3:2:21,:)`` + - ``a[ 2:21:2,:]`` + - every other row of ``a``, starting with the third and going to the twenty-first + + * - ``a(1:2:end,:)`` + - ``a[ ::2,:]`` + - every other row of ``a``, starting with the first + + * - ``a(end:-1:1,:)`` or ``flipud(a)`` + - ``a[ ::-1,:]`` + - ``a`` with rows in reverse order + + * - ``a([1:end 1],:)`` + - ``a[r_[:len(a),0]]`` + - ``a`` with copy of the first row appended to the end + + * - ``a.'`` + - ``a.transpose()`` or ``a.T`` + - transpose of ``a`` + + * - ``a'`` + - ``a.conj().transpose()`` or ``a.conj().T`` + - conjugate transpose of ``a`` + + * - ``a * b`` + - ``a.dot(b)`` + - matrix multiply + + * - ``a .* b`` + - ``a * b`` + - element-wise multiply + + * - ``a./b`` + - ``a/b`` + - element-wise divide + + * - ``a.^3`` + - ``a**3`` + - element-wise exponentiation + + * - ``(a>0.5)`` + - ``(a>0.5)`` + - matrix whose i,jth element is (a_ij > 0.5) + + * - ``find(a>0.5)`` + - ``nonzero(a>0.5)`` + - find the indices where (a > 0.5) + + * - ``a(:,find(v>0.5))`` + - ``a[:,nonzero(v>0.5)[0]]`` + - extract the columms of a where vector v > 0.5 + + * - ``a(:,find(v>0.5))`` + - ``a[:,v.T>0.5]`` + - extract the columms of a where column vector v > 0.5 + + * - ``a(a<0.5)=0`` + - ``a[a<0.5]=0`` + - a with elements less than 0.5 zeroed out + + * - ``a .* (a>0.5)`` + - ``a * (a>0.5)`` + - a with elements less than 0.5 zeroed out + + * - ``a(:) = 3`` + - ``a[:] = 3`` + - set all values to the same scalar value + + * - ``y=x`` + - ``y = x.copy()`` + - numpy assigns by reference + + * - ``y=x(2,:)`` + - ``y = x[1,:].copy()`` + - numpy slices are by reference + + * - ``y=x(:)`` + - ``y = x.flatten(1)`` + - turn array into vector (note that this forces a copy) + + * - ``1:10`` + - ``arange(1.,11.)`` or ``r_[1.:11.]`` or ``r_[1:10:10j]`` + - create an increasing vector (see note :ref:`RANGES `) + + * - ``0:9`` + - ``arange(10.)`` or ``r_[:10.]`` or ``r_[:9:10j]`` + - create an increasing vector (see note :ref:`RANGES `) + + * - ``[1:10]'`` + - ``arange(1.,11.)[:, newaxis]`` + - create a column vector + + * - ``zeros(3,4)`` + - ``zeros((3,4))`` + - 3x4 rank-2 array full of 64-bit floating point zeros + + * - ``zeros(3,4,5)`` + - ``zeros((3,4,5))`` + - 3x4x5 rank-3 array full of 64-bit floating point zeros + + * - ``ones(3,4)`` + - ``ones((3,4))`` + - 3x4 rank-2 array full of 64-bit floating point ones + + * - ``eye(3)`` + - ``eye(3)`` + - 3x3 identity matrix + + * - ``diag(a)`` + - ``diag(a)`` + - vector of diagonal elements of a + + * - ``diag(a,0)`` + - ``diag(a,0)`` + - square diagonal matrix whose nonzero values are the elements of a + + * - ``rand(3,4)`` + - ``random.rand(3,4)`` + - random 3x4 matrix + + * - ``linspace(1,3,4)`` + - ``linspace(1,3,4)`` + - 4 equally spaced samples between 1 and 3, inclusive + + * - ``[x,y]=meshgrid(0:8,0:5)`` + - ``mgrid[0:9.,0:6.]`` or ``meshgrid(r_[0:9.],r_[0:6.]`` + - two 2D arrays: one of x values, the other of y values + + * - + - ``ogrid[0:9.,0:6.]`` or ``ix_(r_[0:9.],r_[0:6.]`` + - the best way to eval functions on a grid + + * - ``[x,y]=meshgrid([1,2,4],[2,4,5])`` + - ``meshgrid([1,2,4],[2,4,5])`` + - + + * - + - ``ix_([1,2,4],[2,4,5])`` + - the best way to eval functions on a grid + + * - ``repmat(a, m, n)`` + - ``tile(a, (m, n))`` + - create m by n copies of a + + * - ``[a b]`` + - ``concatenate((a,b),1)`` or ``hstack((a,b))`` or ``column_stack((a,b))`` or ``c_[a,b]`` + - concatenate columns of ``a`` and ``b`` + + * - ``[a; b]`` + - ``concatenate((a,b))`` or ``vstack((a,b))`` or ``r_[a,b]`` + - concatenate rows of a and b + + * - ``max(max(a))`` + - ``a.max()`` + - maximum element of a (with ndims(a)<=2 for matlab) + + * - ``max(a)`` + - ``a.max(0)`` + - maximum element of each column of matrix a + + * - ``max(a,[],2)`` + - ``a.max(1)`` + - maximum element of each row of matrix a + + * - ``max(a,b)`` + - ``maximum(a, b)`` + - compares a and b element-wise, and returns the maximum value from each pair + + * - ``norm(v)`` + - ``sqrt(dot(v,v))`` or ``np.linalg.norm(v)`` + - L2 norm of vector v + + * - ``a & b`` + - ``logical_and(a,b)`` + - element-by-element AND operator (Numpy ufunc) :ref:`See note LOGICOPS ` + + * - ``a | b`` + - ``logical_or(a,b)`` + - element-by-element OR operator (Numpy ufunc) :ref:`See note LOGICOPS ` + + * - ``bitand(a,b)`` + - ``a & b`` + - bitwise AND operator (Python native and Numpy ufunc) + + * - ``bitor(a,b)`` + - ``a | b`` + - bitwise OR operator (Python native and Numpy ufunc) + + * - ``inv(a)`` + - ``linalg.inv(a)`` + - inverse of square matrix a + + * - ``pinv(a)`` + - ``linalg.pinv(a)`` + - pseudo-inverse of matrix a + + * - ``rank(a)`` + - ``linalg.matrix_rank(a)`` + - rank of a matrix a + + * - ``a\b`` + - ``linalg.solve(a,b)`` if ``a`` is square; ``linalg.lstsq(a,b)`` otherwise + - solution of a x = b for x + + * - ``b/a`` + - Solve a.T x.T = b.T instead + - solution of x a = b for x + + * - ``[U,S,V]=svd(a)`` + - ``U, S, Vh = linalg.svd(a), V = Vh.T`` + - singular value decomposition of a + + * - ``chol(a)`` + - ``linalg.cholesky(a).T`` + - cholesky factorization of a matrix (chol(a) in matlab returns an upper triangular matrix, but linalg.cholesky(a) returns a lower triangular matrix) + + * - ``[V,D]=eig(a)`` + - ``D,V = linalg.eig(a)`` + - eigenvalues and eigenvectors of a + + * - ``[V,D]=eig(a,b)`` + - ``V,D = np.linalg.eig(a,b)`` + - eigenvalues and eigenvectors of a,b + + * - ``[V,D]=eigs(a,k)`` + - + - find the k largest eigenvalues and eigenvectors of a + + * - ``[Q,R,P]=qr(a,0)`` + - ``Q,R = scipy.linalg.qr(a)`` + - QR decomposition + + * - ``[L,U,P]=lu(a)`` + - ``L,U = scipy.linalg.lu(a)`` or ``LU,P=scipy.linalg.lu_factor(a)`` + - LU decomposition (note: P(Matlab) == transpose(P(numpy)) ) + + * - ``conjgrad`` + - ``scipy.sparse.linalg.cg`` + - Conjugate gradients solver + + * - ``fft(a)`` + - ``fft(a)`` + - Fourier transform of a + + * - ``ifft(a)`` + - ``ifft(a)`` + - inverse Fourier transform of a + + * - ``sort(a)`` + - ``sort(a)`` or ``a.sort()`` + - sort the matrix + + * - ``[b,I] = sortrows(a,i)`` + - ``I = argsort(a[:,i]), b=a[I,:]`` + - sort the rows of the matrix + + * - ``regress(y,X)`` + - ``linalg.lstsq(X,y)`` + - multilinear regression + + * - ``decimate(x, q)`` + - ``scipy.signal.resample(x, len(x)/q)`` + - downsample with low-pass filtering + + * - ``unique(a)`` + - ``unique(a)`` + - + + * - ``squeeze(a)`` + - ``a.squeeze()`` + - + +.. _numpy-for-matlab-users.notes: + +Notes +===== + +\ **Submatrix**: Assignment to a submatrix can be done with lists of +indexes using the ``ix_`` command. E.g., for 2d array ``a``, one might +do: ``ind=[1,3]; a[np.ix_(ind,ind)]+=100``. + +\ **HELP**: There is no direct equivalent of MATLAB's ``which`` command, +but the commands ``help`` and ``source`` will usually list the filename +where the function is located. Python also has an ``inspect`` module (do +``import inspect``) which provides a ``getfile`` that often works. + +\ **INDEXING**: MATLAB® uses one based indexing, so the initial element +of a sequence has index 1. Python uses zero based indexing, so the +initial element of a sequence has index 0. Confusion and flamewars arise +because each has advantages and disadvantages. One based indexing is +consistent with common human language usage, where the "first" element +of a sequence has index 1. Zero based indexing `simplifies +indexing `__. +See also `a text by prof.dr. Edsger W. +Dijkstra `__. + +\ **RANGES**: In MATLAB®, ``0:5`` can be used as both a range literal +and a 'slice' index (inside parentheses); however, in Python, constructs +like ``0:5`` can *only* be used as a slice index (inside square +brackets). Thus the somewhat quirky ``r_`` object was created to allow +numpy to have a similarly terse range construction mechanism. Note that +``r_`` is not called like a function or a constructor, but rather +*indexed* using square brackets, which allows the use of Python's slice +syntax in the arguments. + +\ **LOGICOPS**: & or \| in Numpy is bitwise AND/OR, while in Matlab & +and \| are logical AND/OR. The difference should be clear to anyone with +significant programming experience. The two can appear to work the same, +but there are important differences. If you would have used Matlab's & +or \| operators, you should use the Numpy ufuncs +logical\_and/logical\_or. The notable differences between Matlab's and +Numpy's & and \| operators are: + +- Non-logical {0,1} inputs: Numpy's output is the bitwise AND of the + inputs. Matlab treats any non-zero value as 1 and returns the logical + AND. For example (3 & 4) in Numpy is 0, while in Matlab both 3 and 4 + are considered logical true and (3 & 4) returns 1. +- Precedence: Numpy's & operator is higher precedence than logical + operators like < and >; Matlab's is the reverse. + +If you know you have boolean arguments, you can get away with using +Numpy's bitwise operators, but be careful with parentheses, like this: z += (x > 1) & (x < 2). The absence of Numpy operator forms of logical\_and +and logical\_or is an unfortunate consequence of Python's design. + +**RESHAPE and LINEAR INDEXING**: Matlab always allows multi-dimensional +arrays to be accessed using scalar or linear indices, Numpy does not. +Linear indices are common in Matlab programs, e.g. find() on a matrix +returns them, whereas Numpy's find behaves differently. When converting +Matlab code it might be necessary to first reshape a matrix to a linear +sequence, perform some indexing operations and then reshape back. As +reshape (usually) produces views onto the same storage, it should be +possible to do this fairly efficiently. Note that the scan order used by +reshape in Numpy defaults to the 'C' order, whereas Matlab uses the +Fortran order. If you are simply converting to a linear sequence and +back this doesn't matter. But if you are converting reshapes from Matlab +code which relies on the scan order, then this Matlab code: z = +reshape(x,3,4); should become z = x.reshape(3,4,order='F').copy() in +Numpy. + +Customizing Your Environment +============================ + +In MATLAB® the main tool available to you for customizing the +environment is to modify the search path with the locations of your +favorite functions. You can put such customizations into a startup +script that MATLAB will run on startup. + +NumPy, or rather Python, has similar facilities. + +- To modify your Python search path to include the locations of your + own modules, define the ``PYTHONPATH`` environment variable. +- To have a particular script file executed when the interactive Python + interpreter is started, define the ``PYTHONSTARTUP`` environment + variable to contain the name of your startup script. + +Unlike MATLAB®, where anything on your path can be called immediately, +with Python you need to first do an 'import' statement to make functions +in a particular file accessible. + +For example you might make a startup script that looks like this (Note: +this is just an example, not a statement of "best practices"): + +:: + + # Make all numpy available via shorter 'num' prefix + import numpy as num + # Make all matlib functions accessible at the top level via M.func() + import numpy.matlib as M + # Make some matlib functions accessible directly at the top level via, e.g. rand(3,3) + from numpy.matlib import rand,zeros,ones,empty,eye + # Define a Hermitian function + def hermitian(A, **kwargs): + return num.transpose(A,**kwargs).conj() + # Make some shorcuts for transpose,hermitian: + # num.transpose(A) --> T(A) + # hermitian(A) --> H(A) + T = num.transpose + H = hermitian + +Links +===== + +See http://mathesaurus.sf.net/ for another MATLAB®/NumPy +cross-reference. + +An extensive list of tools for scientific work with python can be +found in the `topical software page `__. + +MATLAB® and SimuLink® are registered trademarks of The MathWorks. From 69e1fb6a9f97c93648442d99ab383d44c60d9d6d Mon Sep 17 00:00:00 2001 From: Yash Mehrotra Date: Mon, 26 Oct 2015 09:49:40 +0530 Subject: [PATCH 132/496] BUG: Fixed partition errors on empty input. Closes #6530 --- numpy/core/src/multiarray/item_selection.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index ec0717bd6fe4..64fa70b6da07 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -809,7 +809,7 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, PyArrayIterObject *it; npy_intp size; - int ret = -1; + int ret = 0; NPY_BEGIN_THREADS_DEF; @@ -829,6 +829,7 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, if (needcopy) { buffer = PyDataMem_NEW(N * elsize); if (buffer == NULL) { + ret = -1; goto fail; } } @@ -947,7 +948,7 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, PyArrayIterObject *it, *rit; npy_intp size; - int ret = -1; + int ret = 0; NPY_BEGIN_THREADS_DEF; @@ -969,6 +970,7 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, it = (PyArrayIterObject *)PyArray_IterAllButAxis((PyObject *)op, &axis); rit = (PyArrayIterObject *)PyArray_IterAllButAxis((PyObject *)rop, &axis); if (it == NULL || rit == NULL) { + ret = -1; goto fail; } size = it->size; @@ -978,6 +980,7 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, if (needcopy) { valbuffer = PyDataMem_NEW(N * elsize); if (valbuffer == NULL) { + ret = -1; goto fail; } } @@ -985,6 +988,7 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, if (needidxbuffer) { idxbuffer = (npy_intp *)PyDataMem_NEW(N * sizeof(npy_intp)); if (idxbuffer == NULL) { + ret = -1; goto fail; } } From 83d5f9a331543fa0748708972a5e6b7c5dcbcb03 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 26 Oct 2015 14:59:57 -0600 Subject: [PATCH 133/496] BUG: Revert some import * fixes in f2py. The files * capi_maps.py * crackfortran.py * f90mod_rules.py previously used `from .auxfuncs import *` and also called `eval` without an explicit enviroment. An attempt to use explicit imports led to errors, and because static code analysis in not sufficient to determine what functions need to be imported, it is safest to continue using `import *` pending a major refactoring of f2py. Closes #6563. --- numpy/f2py/capi_maps.py | 16 +++++----------- numpy/f2py/crackfortran.py | 11 +++++------ numpy/f2py/f90mod_rules.py | 10 +++++----- 3 files changed, 15 insertions(+), 22 deletions(-) diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py index 11cd47702e42..6e5293cc8599 100644 --- a/numpy/f2py/capi_maps.py +++ b/numpy/f2py/capi_maps.py @@ -22,20 +22,14 @@ import re import os import sys -from .auxfuncs import ( - debugcapi, dictappend, errmess, gentitle, getcallprotoargument, - getcallstatement, getfortranname, getpymethoddef, getrestdoc, - getusercode, getusercode1, hasinitvalue, hasnote, hasresultnote, - isarray, iscomplex, iscomplexarray, iscomplexfunction, isexternal, - isfunction, isintent_aux, isintent_callback, isintent_dict, - isintent_hide, isintent_in, isintent_inout, isintent_out, ismodule, - isoptional, isrequired, isscalar, isstring, isstringarray, - isstringfunction, issubroutine, l_and, l_not, l_or, outmess -) - from .crackfortran import markoutercomma from . import cb_rules +# The eviroment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * + __all__ = [ 'getctype', 'getstrlength', 'getarrdims', 'getpydocsign', 'getarrdocsign', 'getinit', 'sign2map', 'routsign2map', 'modsign2map', diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 6146e5098414..9f8c8962a2f3 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -149,12 +149,11 @@ import platform from . import __version__ -from .auxfuncs import ( - errmess, hascommon, isdouble, iscomplex, isexternal, isinteger, - isintent_aux, isintent_c, isintent_callback, isintent_in, - isintent_inout, isintent_inplace, islogical, isoptional, isscalar, - isstring, isstringarray, l_or, show -) + +# The eviroment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * f2py_version = __version__.version diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py index 88d661369564..ec3a248397f4 100644 --- a/numpy/f2py/f90mod_rules.py +++ b/numpy/f2py/f90mod_rules.py @@ -21,15 +21,15 @@ import numpy as np -from .auxfuncs import ( - applyrules, dictappend, hasbody, hasnote, isallocatable, isfunction, - isintent_hide, ismodule, isprivate, isroutine, isstringarray, l_or, - outmess -) from . import capi_maps from . import func2subr from .crackfortran import undo_rmbadname, undo_rmbadname1 +# The eviroment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * + options = {} From 39743700bd6c4f5c891a99a76b65243c53374827 Mon Sep 17 00:00:00 2001 From: Matthew Brett Date: Tue, 22 Sep 2015 16:28:35 -0700 Subject: [PATCH 134/496] DOC: update HOWTO_RELEASE document An update of release notes from experience of releasing 1.9.3, with responses to comments by Chuck and Ralf. --- doc/HOWTO_RELEASE.rst.txt | 203 +++++++++++++++++++++++++++++--------- 1 file changed, 157 insertions(+), 46 deletions(-) diff --git a/doc/HOWTO_RELEASE.rst.txt b/doc/HOWTO_RELEASE.rst.txt index b77a6c25c8e0..ee05981fc11f 100644 --- a/doc/HOWTO_RELEASE.rst.txt +++ b/doc/HOWTO_RELEASE.rst.txt @@ -1,10 +1,12 @@ This file gives an overview of what is necessary to build binary releases for -NumPy on OS X. Windows binaries are built here using Wine, they can of course -also be built on Windows itself. Building OS X binaries on another platform is -not possible. +NumPy. Windows binaries are built here using Wine, they can of course also be +built on Windows itself. Building OS X binaries on another platform is not +possible, but our current OSX binary build procedure uses travis-ci virtual +machines running OSX. Current build and release info ============================== + The current info on building and releasing NumPy and SciPy is scattered in several places. It should be summarized in one place, updated and where necessary described in more detail. The sections below list all places where @@ -34,24 +36,35 @@ Release Scripts --------------- * https://github.com/numpy/numpy-vendor - Supported platforms and versions ================================ -Python 2.6-2.7 and >=3.2 are the currently supported versions on all platforms. + +Python 2.6-2.7 and >=3.2 are the currently supported versions when building +from source. We test numpy against all these versions every time we merge +code to trunk. Binary installers may be available for a subset of these +versions (see below). OS X ---- -OS X versions >= 10.5 are supported. Note that there are currently still -issues with compiling on 10.7, due to Apple moving to gcc-llvm. -Only the Python from `python.org `_ is supported. Binaries -do *not* support Apple Python. + +Python 2.7 and >=3.3 are the versions for which we provide binary installers. +OS X versions >= 10.6 are supported. We build binary wheels for OSX that are +compatible with Python.org Python, system Python, homebrew and macports - see +this `OSX wheel building summary +`_ for details. Windows ------- -Windows XP, Vista and 7 are supported. + +32-bit Python 2.7, 3.3, 3.4 are the versions for which we provide binary +installers. Windows XP, Vista and 7 are supported. Our current windows mingw +toolchain is not able to build 64-bit binaries of numpy. We are hoping to +update to a `mingw-w64 toolchain +`_ soon. Linux ----- + Many distributions include NumPy. Building from source is also relatively straightforward. Only tarballs are created for Linux, no specific binary installers are provided (yet). @@ -61,28 +74,24 @@ BSD / Solaris No binaries are provided, but succesful builds on Solaris and BSD have been reported. - Tool chain ========== + Compilers --------- + The same gcc version is used as the one with which Python itself is built on each platform. At the moment this means: -* OS X uses gcc-4.0 (since that is what Python itself is built with) up to - Python 2.6. Python 2.7 comes in two flavors; the 32-bit version is built with - gcc-4.0 and the 64-bit version with gcc-4.2. The "release.sh" script - sets environment variables to pick the right compiler. - All binaries should be built on OS X 10.5, with the exception of the 64-bit - Python 2.7 one which should be built on 10.6. +* OS X builds on travis currently use `clang`. It appears that binary wheels + for OSX >= 10.6 can be safely built from from OSX 10.9 when building against + the Python from the Python.org installers. * Windows builds use MinGW 3.4.5. Updating this to a more recent MinGW with GCC 4.x is desired, but there are still practical difficulties in building the binary installers. -Cython is not needed for building the binaries, because generated C files from -Cython sources are checked in at the moment. It is worth keeping an eye on what -Cython versions have been used to generate all current C files, it should be -the same and most recent version (0.16 as of now). +You will need Cython for building the binaries. Cython compiles the ``.pyx`` +files in the numpy distribution to ``.c`` files. Fortran: on OS X gfortran from `this site `_ is used. On Windows g77 (included in MinGW) is the current default, in the future @@ -93,13 +102,6 @@ Python * Python(s) from `python.org `_ * virtualenv * paver -* bdist_mpkg from https://github.com/rgommers/bdist_mpkg (has a necessary - patch, don't use the unsupported version on PyPi). - -Python itself should be installed multiple times - each version a binary is -built for should be installed. The other dependencies only have to be installed -for the default Python version on the system. The same applies to the doc-build -dependencies below. Building docs ------------- @@ -113,7 +115,7 @@ Wine For building Windows binaries on OS X Wine can be used. In Wine the following needs to be installed: -* Python 2.6-2.7 and 3.2 +* Python 2.6-2.7 and 3.3 * MakeNsis * CpuId plugin for MakeNsis : this can be found in the NumPy source tree under tools/win32build/cpucaps and has to be built with MinGW (see SConstruct file in @@ -167,22 +169,27 @@ What is released Binaries -------- -Windows binaries in "superpack" form for Python 2.6/2.7/3.2/3.3. -A superpack contains three builds, for SSE2, SSE3 and no SSE. -OS X binaries are made in dmg format, targeting only the Python from -`python.org `_ +Windows binary installers in "superpack" form for Python 2.7/3.3/3.4. A +superpack contains three builds, for SSE2, SSE3 and no SSE. + +Wheels +------ + +OSX wheels built via travis-ci : see - see `building OSX wheels`_. +.. _build OSX wheels: https://github.com/MacPython/numpy-wheels Other ----- + * Release Notes * Changelog Source distribution ------------------- -A source release in both .zip and .tar.gz formats is released. +We build source releases in both .zip and .tar.gz formats. Release process =============== @@ -200,6 +207,7 @@ Make sure current trunk builds a package correctly -------------------------------------------------- :: + git clean -fxd python setup.py bdist python setup.py sdist @@ -270,8 +278,12 @@ updated for a major release. Check the release notes ----------------------- -Check that the release notes are up-to-date, and mention at least the -following: +Check that the release notes are up-to-date. + +Write or update the release notes in a file named for the release, such as +``doc/release/1.11.0-notes.rst``. + +Mention at least the following: - major new features - deprecated and removed features @@ -289,15 +301,55 @@ Identify the commit hash of the release, e.g. 1b2e1d63ff. :: git co 1b2e1d63ff # gives warning about detached head -Now, set ``release=True`` in setup.py, then +First, change/check the following variables in ``pavement.py`` depending on the +release version:: -:: + RELEASE_NOTES = 'doc/release/1.7.0-notes.rst' + LOG_START = 'v1.6.0' + LOG_END = 'maintenance/1.7.x' + +Do any other changes. When you are ready to release, do the following +changes:: + + diff --git a/setup.py b/setup.py + index b1f53e3..8b36dbe 100755 + --- a/setup.py + +++ b/setup.py + @@ -57,7 +57,7 @@ PLATFORMS = ["Windows", "Linux", "Solaris", "Mac OS- + MAJOR = 1 + MINOR = 7 + MICRO = 0 + -ISRELEASED = False + +ISRELEASED = True + VERSION = '%d.%d.%drc1' % (MAJOR, MINOR, MICRO) + + # Return the git revision as a string - git commit -m "REL: Release." setup.py +And make sure the ``VERSION`` variable is set properly. + +Now you can make the release commit and tag. We recommend you don't push +the commit or tag immediately, just in case you need to do more cleanup. We +prefer to defer the push of the tag until we're confident this is the exact +form of the released code (see: :ref:`push-tag-and-commit`): + + git commit -s -m "REL: Release." setup.py git tag -s - git push origin -Note: ``git tag -s`` creates a signed tag - make sure your PGP key is public. +The ``-s`` flag makes a PGP (usually GPG) signed tag. Please do sign the +release tags. + +The release tag should have the release number in the annotation (tag +message). Unfortunately the name of a tag can be changed without breaking the +signature, the contents of the message cannot. + +See : https://github.com/scipy/scipy/issues/4919 for a discussion of signing +release tags, and http://keyring.debian.org/creating-key.html for instructions +on creating a GPG key if you do not have one. + +To make your key more readily identifiable as you, consider sending your key +to public keyservers, with a command such as:: + + gpg --send-keys Apply patch to fix bogus strides -------------------------------- @@ -314,8 +366,34 @@ Increment the release number in setup.py. Release candidates should have "rc1" Also create a new version hash in cversions.txt and a corresponding version define NPY_x_y_API_VERSION in numpyconfig.h +Trigger the OSX builds on travis +-------------------------------- + +See `build OSX wheels`_. + +You may need to check the ``.travis.yml`` file of the +https://github.com/MacPython/numpy-wheels repository. + +Make sure that the releast tag has been pushed, and that the ``.travis.yml`` +is set thusly:: + + - NP_COMMIT=latest-tag # comment out to build version in submodule + +Trigger a build by doing an empty (or otherwise) commit to the repository:: + + cd /path/to/numpy-wheels + git commit --allow-empty + git push + +The wheels, once built, appear in http://wheels.scipy.org + Make the release ---------------- + +Build the changelog and notes for upload with:: + + paver write_release_and_log + The tar-files and binary releases for distribution should be uploaded to SourceForge, together with the Release Notes and the Changelog. Uploading can be done through a web interface or, more efficiently, through scp/sftp/rsync as @@ -327,19 +405,41 @@ For example:: Update PyPi ----------- + The final release (not betas or release candidates) should be uploaded to PyPi. There are two ways to update PyPi, the first one is:: - $ python setup.py sdist upload + $ git clean -fxd # to be safe + $ python setup.py sdist --formats=gztar,zip # to check + # python setup.py sdist --formats=gztar,zip upload --sign -and the second one is to upload the PKG_INFO file inside the sdist dir in the +This will ask for your key PGP passphrase, in order to sign the built source +packages. + +The second way is to upload the PKG_INFO file inside the sdist dir in the web interface of PyPi. The source tarball can also be uploaded through this -interface. A simple binary installer for windows, created with -``bdist_wininst``, should also be uploaded to PyPi so ``easy_install numpy`` -works. +interface. + +To push the travis-ci OSX wheels up to pypi see : +https://github.com/MacPython/numpy-wheels#uploading-the-built-wheels-to-pypi + +.. _push-tag-and-commit: + +Push the release tag and commit +------------------------------- + +Finally, now you are confident this tag correctly defines the source code that +you released you can push the tag and release commit up to github:: + + git push # Push release commit + git push upstream # Push tag named + +where ``upstream`` points to the main https://github.com/numpy/numpy.git +repository. Update docs.scipy.org --------------------- + All documentation for a release can be updated on http://docs.scipy.org/ with: make dist @@ -361,11 +461,16 @@ https://github.com/scipy/docs.scipy.org. Do the following: Update scipy.org ---------------- + A release announcement with a link to the download site should be placed in the sidebar of the front page of scipy.org. +The scipy.org should be a PR at https://github.com/scipy/scipy.org. The file +that needs modification is ``www/index.rst``. Search for ``News``. + Announce to the lists --------------------- + The release should be announced on the mailing lists of NumPy and SciPy, to python-announce, and possibly also those of Matplotlib,IPython and/or Pygame. @@ -374,6 +479,12 @@ During the beta/RC phase an explicit request for testing the binaries with several other libraries (SciPy/Matplotlib/Pygame) should be posted on the mailing list. +Announce to Linux Weekly News +----------------------------- + +Email the editor of LWN to let them know of the release. Directions at: +https://lwn.net/op/FAQ.lwn#contact + After the final release ----------------------- After the final release is announced, a few administrative tasks are left to be From 086d42d8b315cacf04ccaf4a805dc2fc7c137fee Mon Sep 17 00:00:00 2001 From: Allan Haldane Date: Sun, 25 Oct 2015 23:41:11 -0400 Subject: [PATCH 135/496] TST: Remove tests of view safety checks (see next commit) Remove unit tests for the view safety chekcs, which are to be reverted in the next commit. --- numpy/core/tests/test_multiarray.py | 83 ---------------------------- numpy/lib/tests/test_recfunctions.py | 10 ---- 2 files changed, 93 deletions(-) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 85b0e5519ee8..116348667088 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -5829,89 +5829,6 @@ def test_collections_hashable(self): x = np.array([]) self.assertFalse(isinstance(x, collections.Hashable)) -from numpy.core._internal import _view_is_safe - -class TestObjViewSafetyFuncs(TestCase): - def test_view_safety(self): - psize = np.dtype('p').itemsize - - # creates dtype but with extra character code - for missing 'p' fields - def mtype(s): - n, offset, fields = 0, 0, [] - for c in s.split(','): # subarrays won't work - if c != '-': - fields.append(('f{0}'.format(n), c, offset)) - n += 1 - offset += np.dtype(c).itemsize if c != '-' else psize - - names, formats, offsets = zip(*fields) - return np.dtype({'names': names, 'formats': formats, - 'offsets': offsets, 'itemsize': offset}) - - # test nonequal itemsizes with objects: - # these should succeed: - _view_is_safe(np.dtype('O,p,O,p'), np.dtype('O,p,O,p,O,p')) - _view_is_safe(np.dtype('O,O'), np.dtype('O,O,O')) - # these should fail: - assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('O,O')) - assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('O,p')) - assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('p,O')) - - # test nonequal itemsizes with missing fields: - # these should succeed: - _view_is_safe(mtype('-,p,-,p'), mtype('-,p,-,p,-,p')) - _view_is_safe(np.dtype('p,p'), np.dtype('p,p,p')) - # these should fail: - assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('p,p')) - assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('p,-')) - assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('-,p')) - - # scans through positions at which we can view a type - def scanView(d1, otype): - goodpos = [] - for shift in range(d1.itemsize - np.dtype(otype).itemsize+1): - d2 = np.dtype({'names': ['f0'], 'formats': [otype], - 'offsets': [shift], 'itemsize': d1.itemsize}) - try: - _view_is_safe(d1, d2) - except TypeError: - pass - else: - goodpos.append(shift) - return goodpos - - # test partial overlap with object field - assert_equal(scanView(np.dtype('p,O,p,p,O,O'), 'p'), - [0] + list(range(2*psize, 3*psize+1))) - assert_equal(scanView(np.dtype('p,O,p,p,O,O'), 'O'), - [psize, 4*psize, 5*psize]) - - # test partial overlap with missing field - assert_equal(scanView(mtype('p,-,p,p,-,-'), 'p'), - [0] + list(range(2*psize, 3*psize+1))) - - # test nested structures with objects: - nestedO = np.dtype([('f0', 'p'), ('f1', 'p,O,p')]) - assert_equal(scanView(nestedO, 'p'), list(range(psize+1)) + [3*psize]) - assert_equal(scanView(nestedO, 'O'), [2*psize]) - - # test nested structures with missing fields: - nestedM = np.dtype([('f0', 'p'), ('f1', mtype('p,-,p'))]) - assert_equal(scanView(nestedM, 'p'), list(range(psize+1)) + [3*psize]) - - # test subarrays with objects - subarrayO = np.dtype('p,(2,3)O,p') - assert_equal(scanView(subarrayO, 'p'), [0, 7*psize]) - assert_equal(scanView(subarrayO, 'O'), - list(range(psize, 6*psize+1, psize))) - - #test dtype with overlapping fields - overlapped = np.dtype({'names': ['f0', 'f1', 'f2', 'f3'], - 'formats': ['p', 'p', 'p', 'p'], - 'offsets': [0, 1, 3*psize-1, 3*psize], - 'itemsize': 4*psize}) - assert_equal(scanView(overlapped, 'p'), [0, 1, 3*psize-1, 3*psize]) - class TestArrayPriority(TestCase): # This will go away when __array_priority__ is settled, meanwhile diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py index 09cc29dc114c..699a04716d69 100644 --- a/numpy/lib/tests/test_recfunctions.py +++ b/numpy/lib/tests/test_recfunctions.py @@ -720,15 +720,5 @@ def test_append_to_objects(self): dtype=[('A', object), ('B', float), ('C', int)]) assert_equal(test, control) - def test_append_with_objects(self): - "Test append_fields when the appended data contains objects" - obj = self.data['obj'] - x = np.array([(10, 1.), (20, 2.)], dtype=[('A', int), ('B', float)]) - y = np.array([obj, obj], dtype=object) - test = append_fields(x, 'C', data=y, dtypes=object, usemask=False) - control = np.array([(10, 1.0, obj), (20, 2.0, obj)], - dtype=[('A', int), ('B', float), ('C', object)]) - assert_equal(test, control) - if __name__ == '__main__': run_module_suite() From 796a5f87c11fdc3a345fca5448b27a02856c2e4d Mon Sep 17 00:00:00 2001 From: Allan Haldane Date: Sun, 25 Oct 2015 23:29:06 -0400 Subject: [PATCH 136/496] BUG: revert view safety checks Because of slowdowns caused by the view safety checks introduced in #5548 they are removed here for 1.10. The plan is to reintroduce a better version of the checks in 1.11. --- numpy/core/_internal.py | 137 ++++------------------------------------ 1 file changed, 13 insertions(+), 124 deletions(-) diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py index 3ddc2c64d890..47c933411123 100644 --- a/numpy/core/_internal.py +++ b/numpy/core/_internal.py @@ -306,94 +306,6 @@ def _copy_fields(ary): 'formats': [dt.fields[name][0] for name in dt.names]} return array(ary, dtype=copy_dtype, copy=True) -def _get_all_field_offsets(dtype, base_offset=0): - """ Returns the types and offsets of all fields in a (possibly structured) - data type, including nested fields and subarrays. - - Parameters - ---------- - dtype : data-type - Data type to extract fields from. - base_offset : int, optional - Additional offset to add to all field offsets. - - Returns - ------- - fields : list of (data-type, int) pairs - A flat list of (dtype, byte offset) pairs. - - """ - fields = [] - if dtype.fields is not None: - for name in dtype.names: - sub_dtype = dtype.fields[name][0] - sub_offset = dtype.fields[name][1] + base_offset - fields.extend(_get_all_field_offsets(sub_dtype, sub_offset)) - else: - if dtype.shape: - sub_offsets = _get_all_field_offsets(dtype.base, base_offset) - count = 1 - for dim in dtype.shape: - count *= dim - fields.extend((typ, off + dtype.base.itemsize*j) - for j in range(count) for (typ, off) in sub_offsets) - else: - fields.append((dtype, base_offset)) - return fields - -def _check_field_overlap(new_fields, old_fields): - """ Perform object memory overlap tests for two data-types (see - _view_is_safe). - - This function checks that new fields only access memory contained in old - fields, and that non-object fields are not interpreted as objects and vice - versa. - - Parameters - ---------- - new_fields : list of (data-type, int) pairs - Flat list of (dtype, byte offset) pairs for the new data type, as - returned by _get_all_field_offsets. - old_fields: list of (data-type, int) pairs - Flat list of (dtype, byte offset) pairs for the old data type, as - returned by _get_all_field_offsets. - - Raises - ------ - TypeError - If the new fields are incompatible with the old fields - - """ - - #first go byte by byte and check we do not access bytes not in old_fields - new_bytes = set() - for tp, off in new_fields: - new_bytes.update(set(range(off, off+tp.itemsize))) - old_bytes = set() - for tp, off in old_fields: - old_bytes.update(set(range(off, off+tp.itemsize))) - if new_bytes.difference(old_bytes): - raise TypeError("view would access data parent array doesn't own") - - #next check that we do not interpret non-Objects as Objects, and vv - obj_offsets = [off for (tp, off) in old_fields if tp.type is object_] - obj_size = dtype(object_).itemsize - - for fld_dtype, fld_offset in new_fields: - if fld_dtype.type is object_: - # check we do not create object views where - # there are no objects. - if fld_offset not in obj_offsets: - raise TypeError("cannot view non-Object data as Object type") - else: - # next check we do not create non-object views - # where there are already objects. - # see validate_object_field_overlap for a similar computation. - for obj_offset in obj_offsets: - if (fld_offset < obj_offset + obj_size and - obj_offset < fld_offset + fld_dtype.itemsize): - raise TypeError("cannot view Object as non-Object type") - def _getfield_is_safe(oldtype, newtype, offset): """ Checks safety of getfield for object arrays. @@ -415,10 +327,16 @@ def _getfield_is_safe(oldtype, newtype, offset): If the field access is invalid """ - new_fields = _get_all_field_offsets(newtype, offset) - old_fields = _get_all_field_offsets(oldtype) - # raises if there is a problem - _check_field_overlap(new_fields, old_fields) + if newtype.hasobject or oldtype.hasobject: + if offset == 0 and newtype == oldtype: + return + if oldtype.names: + for name in oldtype.names: + if (oldtype.fields[name][1] == offset and + oldtype.fields[name][0] == newtype): + return + raise TypeError("Cannot get/set field of an object array") + return def _view_is_safe(oldtype, newtype): """ Checks safety of a view involving object arrays, for example when @@ -426,13 +344,6 @@ def _view_is_safe(oldtype, newtype): np.zeros(10, dtype=oldtype).view(newtype) - We need to check that - 1) No memory that is not an object will be interpreted as a object, - 2) No memory containing an object will be interpreted as an arbitrary type. - Both cases can cause segfaults, eg in the case the view is written to. - Strategy here is to also disallow views where newtype has any field in a - place oldtype doesn't. - Parameters ---------- oldtype : data-type @@ -452,31 +363,9 @@ def _view_is_safe(oldtype, newtype): if oldtype == newtype: return - new_fields = _get_all_field_offsets(newtype) - new_size = newtype.itemsize - - old_fields = _get_all_field_offsets(oldtype) - old_size = oldtype.itemsize - - # if the itemsizes are not equal, we need to check that all the - # 'tiled positions' of the object match up. Here, we allow - # for arbirary itemsizes (even those possibly disallowed - # due to stride/data length issues). - if old_size == new_size: - new_num = old_num = 1 - else: - gcd_new_old = _gcd(new_size, old_size) - new_num = old_size // gcd_new_old - old_num = new_size // gcd_new_old - - # get position of fields within the tiling - new_fieldtile = [(tp, off + new_size*j) - for j in range(new_num) for (tp, off) in new_fields] - old_fieldtile = [(tp, off + old_size*j) - for j in range(old_num) for (tp, off) in old_fields] - - # raises if there is a problem - _check_field_overlap(new_fieldtile, old_fieldtile) + if newtype.hasobject or oldtype.hasobject: + raise TypeError("Cannot change data-type for object array.") + return # Given a string containing a PEP 3118 format specifier, # construct a Numpy dtype From 773437cb73eba2b41ac78dd6e6d9d13f34e65e98 Mon Sep 17 00:00:00 2001 From: Yash Mehrotra Date: Tue, 27 Oct 2015 13:55:38 +0530 Subject: [PATCH 137/496] TST: Added tests for empty partition and argpartition --- numpy/core/tests/test_item_selection.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/numpy/core/tests/test_item_selection.py b/numpy/core/tests/test_item_selection.py index f3e7701d404f..5e9cadd8f3cb 100644 --- a/numpy/core/tests/test_item_selection.py +++ b/numpy/core/tests/test_item_selection.py @@ -68,6 +68,24 @@ def test_unicode_mode(self): k = b'\xc3\xa4'.decode("UTF8") assert_raises(ValueError, d.take, 5, mode=k) + def test_empty_partition(self): + # In reference to github issue #6530 + a_original = np.array([0, 2, 4, 6, 8, 10]) + a = a_original.copy() + + # An empty partition should be a successful no-op + a.partition(np.array([], dtype=np.int16)) + + assert_array_equal(a, a_original) + + def test_empty_argpartition(self): + # In reference to github issue #6530 + a = np.array([0, 2, 4, 6, 8, 10]) + a = a.argpartition(np.array([], dtype=np.int16)) + + b = np.array([0, 1, 2, 3, 4, 5]) + assert_array_equal(a, b) + if __name__ == "__main__": run_module_suite() From 4d9bf8a3aa2f2bfe37e1e32fb036fafa156a6d38 Mon Sep 17 00:00:00 2001 From: Yash Mehrotra Date: Tue, 27 Oct 2015 14:40:52 +0530 Subject: [PATCH 138/496] TST: Added regression test empty percentile, in ref to #6530 and #6553 --- numpy/core/tests/test_regression.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py index 3aba71463428..ac34cfa5343b 100644 --- a/numpy/core/tests/test_regression.py +++ b/numpy/core/tests/test_regression.py @@ -2173,5 +2173,9 @@ def test_leak_in_structured_dtype_comparison(self): after = sys.getrefcount(a) assert_equal(before, after) + def test_empty_percentile(self): + # gh-6530 / gh-6553 + assert_array_equal(np.percentile(np.arange(10), []), np.array([])) + if __name__ == "__main__": run_module_suite() From 02b42d4995f6afe596951447ba58a1a43883778f Mon Sep 17 00:00:00 2001 From: Wendell Smith Date: Wed, 28 Oct 2015 18:30:27 -0400 Subject: [PATCH 139/496] BUG: Fix for #6569, allowing build_ext --inplace --- numpy/testing/nosetester.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py index 95ded8d93ff4..c9c6d10f02b6 100644 --- a/numpy/testing/nosetester.py +++ b/numpy/testing/nosetester.py @@ -177,7 +177,8 @@ class NoseTester(object): 'swig_ext'] def __init__(self, package=None, raise_warnings=None): - if raise_warnings is None and '.dev0' in np.__version__: + if raise_warnings is None and ( + not hasattr(np, '__version__') or '.dev0' in np.__version__): raise_warnings = "develop" elif raise_warnings is None: raise_warnings = "release" From 0befb0b3444804b8a212ef069c4c3f141d19750b Mon Sep 17 00:00:00 2001 From: "Nathaniel J. Smith" Date: Wed, 28 Oct 2015 00:53:58 -0700 Subject: [PATCH 140/496] TST: attempt to make test_load_refcount deterministic Use a different strategy to detect whether np.load creates cycles. Fixes gh-6571, I hope. --- numpy/lib/tests/test_io.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index f4ce67805766..af904e96a404 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -1905,12 +1905,17 @@ def test_load_refcount(): np.savez(f, [1, 2, 3]) f.seek(0) - gc.collect() - n_before = len(gc.get_objects()) - np.load(f) - n_after = len(gc.get_objects()) - - assert_equal(n_before, n_after) + assert_(gc.isenabled()) + gc.disable() + try: + gc.collect() + np.load(f) + # gc.collect returns the number of unreachable objects in cycles that + # were found -- we are checking that no cycles were created by np.load + n_objects_in_cycles = gc.collect() + finally: + gc.enable() + assert_equal(n_objects_in_cycles, 0) if __name__ == "__main__": run_module_suite() From cc2a27284fa9bd9ff8cc1397633e1daa470689ba Mon Sep 17 00:00:00 2001 From: Pauli Virtanen Date: Thu, 29 Oct 2015 20:01:31 +0200 Subject: [PATCH 141/496] MAINT: fix mistake in doc upload rule [ci skip] --- doc/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/Makefile b/doc/Makefile index 47f191374583..063ab0db83bc 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -101,7 +101,7 @@ upload: ssh $(USERNAME)@new.scipy.org mv $(UPLOAD_DIR)/numpy-html.zip \ $(UPLOAD_DIR)/numpy-html-$(RELEASE).zip ssh $(USERNAME)@new.scipy.org rm $(UPLOAD_DIR)/dist.tar.gz - ssh $(USERNAME)@new.scipy.org cp -r $(UPLOAD_DIR)/* /srv/docs_scipy_org/doc/numpy + ssh $(USERNAME)@new.scipy.org ln -snf numpy-$(RELEASE) /srv/docs_scipy_org/doc/numpy ssh $(USERNAME)@new.scipy.org /srv/bin/fixperm-scipy_org.sh #------------------------------------------------------------------------------ From 8c688bdd51eddd20f146a135c7ee3e0b9a9ffd8a Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 29 Oct 2015 21:15:57 +0100 Subject: [PATCH 142/496] MAINT: remove useless files with outdated info from repo root and doc/. [ci skip] --- COMPATIBILITY | 59 ------------------------------- DEV_README.txt | 18 ---------- TEST_COMMIT | 18 ---------- doc/HOWTO_MERGE_WIKI_DOCS.rst.txt | 49 ------------------------- 4 files changed, 144 deletions(-) delete mode 100644 COMPATIBILITY delete mode 100644 DEV_README.txt delete mode 100644 TEST_COMMIT delete mode 100644 doc/HOWTO_MERGE_WIKI_DOCS.rst.txt diff --git a/COMPATIBILITY b/COMPATIBILITY deleted file mode 100644 index d2cd3cd27594..000000000000 --- a/COMPATIBILITY +++ /dev/null @@ -1,59 +0,0 @@ - - -X.flat returns an indexable 1-D iterator (mostly similar to an array -but always 1-d) --- only has .copy and .__array__ attributes of an array!!! - -.typecode() --> .dtype.char - -.iscontiguous() --> .flags['CONTIGUOUS'] or .flags.contiguous - -.byteswapped() -> .byteswap() - -.itemsize() -> .itemsize - -.toscalar() -> .item() - -If you used typecode characters: - -'c' -> 'S1' or 'c' -'b' -> 'B' -'1' -> 'b' -'s' -> 'h' -'w' -> 'H' -'u' -> 'I' - - -C -level - -some API calls that used to take PyObject * now take PyArrayObject * -(this should only cause warnings during compile and not actual problems). - PyArray_Take - -These commands now return a buffer that must be freed once it is used -using PyMemData_FREE(ptr); - -a->descr->zero --> PyArray_Zero(a) -a->descr->one --> PyArray_One(a) - -Numeric/arrayobject.h --> numpy/oldnumeric.h - - -# These will actually work and are defines for PyArray_BYTE, -# but you really should change it in your code -PyArray_CHAR --> PyArray_CHAR - (or PyArray_STRING which is more flexible) -PyArray_SBYTE --> PyArray_BYTE - -Any uses of character codes will need adjusting.... -use PyArray_XXXLTR where XXX is the name of the type. - - -If you used function pointers directly (why did you do that?), -the arguments have changed. Everything that was an int is now an intp. -Also, arrayobjects should be passed in at the end. - -a->descr->cast[i](fromdata, fromstep, todata, tostep, n) -a->descr->cast[i](fromdata, todata, n, PyArrayObject *in, PyArrayObject *out) - anything but single-stepping is not supported by this function - use the PyArray_CastXXXX functions. - diff --git a/DEV_README.txt b/DEV_README.txt deleted file mode 100644 index 7dc8bceed498..000000000000 --- a/DEV_README.txt +++ /dev/null @@ -1,18 +0,0 @@ -Thank you for your willingness to help make NumPy the best array system -available. - -We have a few simple rules: - - * try hard to keep the Git repository in a buildable state and to not - indiscriminately muck with what others have contributed. - - * Simple changes (including bug fixes) and obvious improvements are - always welcome. Changes that fundamentally change behavior need - discussion on numpy-discussions@scipy.org before anything is - done. - - * Please add meaningful comments when you check changes in. These - comments form the basis of the change-log. - - * Add unit tests to exercise new code, and regression tests - whenever you fix a bug. diff --git a/TEST_COMMIT b/TEST_COMMIT deleted file mode 100644 index ca662401bd0e..000000000000 --- a/TEST_COMMIT +++ /dev/null @@ -1,18 +0,0 @@ -oliphant: yes -stefanv: yes -rkern: yes -pearu: yes -fperez: yes -chanley: yes -cookedm: yes -swalton: yes -eric: yes -charris: no -fonnesbeck: no -afayolle: no -dubois: no -sasha: yes -tim_hochberg: yes -jarrod.millman: yes -ariver: 2010-01-14 20:02:18 -rgommers: test build bot v3 diff --git a/doc/HOWTO_MERGE_WIKI_DOCS.rst.txt b/doc/HOWTO_MERGE_WIKI_DOCS.rst.txt deleted file mode 100644 index 3431d28b0a11..000000000000 --- a/doc/HOWTO_MERGE_WIKI_DOCS.rst.txt +++ /dev/null @@ -1,49 +0,0 @@ -======================================== -Merging documentation back from Doc-Wiki -======================================== - -This document describes how to merge back docstring edits from the pydocweb -wiki (at http://docs.scipy.org/doc/) to NumPy/SciPy trunk. - -Basic steps ------------ -It works like this, both for NumPy and SciPy: - -1. Go to http://docs.scipy.org/scipy/patch/ and log in. -2. Click on "Select OK to apply" -3. Click on "Generate patch" -4. Select all the text in the browser and save as a patch. -5. Check the patch file for errors etc., edit if necessary. - Especially browse through the changes in example codes. - - .. warning:: - - The examples in the documentation will be run eg. on user's computers - eventually, and we do a very limited screening of the edits on the wiki. - Hence, before committing things to SVN, you absolutely **MUST** read - through all changes to the examples (``>>>`` lines, ``plot::``, and - ``doctest::``) and check that they don't try to do anything silly and - dangerous. - -6. Apply patch (typically ``patch -p1 < newdocs.patch`` from base numpy dir). - This may ask you to specify location of a few files by hand, though. -7. Run tests to see if something is broken -8. Commit - -Errors in patch file --------------------- - -Note that it is necessary to check the generated patch before trying -to apply. If there are errors they are noted at the top of the -file. There are two known reasons for errors: - -* If the error message is "source location for docstring is not - known", then the function usually needs to get handled with - ``add_newdoc()`` in numpy/add_newdocs.py. - - This may also be a sign that the docstring is generated and assigned - by some automatic means, in which case the generation system may - need to be revised. - -* If there are other messages, this may indicate a bug in the - patch generation itself. From 8363b2e39a4c7a8a023f3a46fc41c7a6c90b1f53 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 27 Oct 2015 15:31:13 -0600 Subject: [PATCH 143/496] DOC: Release notes for Numpy 1.10.2. [ci skip] --- doc/release/1.10.2-notes.rst | 60 ++++++++++++++++++++++++++++++++++++ doc/source/release.rst | 1 + 2 files changed, 61 insertions(+) create mode 100644 doc/release/1.10.2-notes.rst diff --git a/doc/release/1.10.2-notes.rst b/doc/release/1.10.2-notes.rst new file mode 100644 index 000000000000..9a694435137c --- /dev/null +++ b/doc/release/1.10.2-notes.rst @@ -0,0 +1,60 @@ +NumPy 1.10.2 Release Notes +************************** + +This release deals with a number of bugs that turned up in 1.10.1 and +adds various build and release improvements. + +Numpy 1.10.1 supports Python 2.6 - 2.7 and 3.2 - 3.5. + +Issues Fixed +============ + +* gh-6563 Intent(out) broken in recent versions of f2py. +* gh-6530 The partition function errors out on empty input. +* gh-6498 Mention change in default casting rule in 1.10 release notes. +* gh-6497 Failure of reduce operation on recarrays. +* gh-6495 Unrecognized command line option '-ffpe-summary' in gfortran. +* gh-6491 Error in broadcasting stride_tricks array. +* gh-6467 Performance regression for record array access. +* gh-6462 Median of empty array produces IndexError. + +Merged PRs +========== + +The following PRs in master have been backported to 1.10.2 + +* gh-5773 MAINT: Hide testing helper tracebacks when using them with pytest. +* gh-6208 MAINT: Speedup field access by removing unneeded safety checks. +* gh-6460 BUG: Replacing the os.environ.clear by less invasive procedure. +* gh-6470 BUG: Fix AttributeError in numpy distutils. +* gh-6472 MAINT: Use Python 3.5 instead of 3.5-dev for travis 3.5 testing. +* gh-6474 REL: Update Paver script for sdist and auto-switch test warnings. +* gh-6478 BUG: Fix Intel compiler flags for OS X build. +* gh-6481 MAINT: LIBPATH with spaces is now supported Python 2.7+ and Win32. +* gh-6487 BUG: Allow nested use of parameters in definition of arrays in f2py. +* gh-6488 BUG: Extend common blocks rather than overwriting in f2py. +* gh-6499 DOC: Mention that default casting for inplace operations has changed. +* gh-6500 BUG: Recarrays viewed as subarrays don't convert to np.record type. +* gh-6501 REL: Add "make upload" command for built docs, update "make dist". +* gh-6526 BUG: Fix use of __doc__ in setup.py for -OO mode. +* gh-6527 BUG: Fix the IndexError when taking the median of an empty array. +* gh-6537 BUG: Make ma.atleast_* with scalar argument return arrays. +* gh-6538 BUG: Fix ma.masked_values does not shrink mask if requested. +* gh-6546 BUG: Fix inner product regression for non-contiguous arrays. +* gh-6553 BUG: Fix partition and argpartition error for empty input. +* gh-6556 BUG: Error in broadcast_arrays with as_strided array. +* gh-6558 MAINT: Minor update to "make upload" doc build command. +* gh-6562 BUG: Disable view safety checks in recarray. +* gh-6567 BUG: Revert some import * fixes in f2py. +* gh-6577 BUG: Fix for #6569, allowing build_ext --inplace +* gh-6579 MAINT: Fix mistake in doc upload rule + +The following PR reverted initial work for mingwpy. + +* gh-6536 BUG: revert gh-5614 to fix non-windows build problems + +Notes +===== +A bug in the Numpy 1.10.1 release resulted in exceptions being raised for +``RuntimeWarning`` and ``DeprecationWarning`` in projects depending on Numpy. +That has been fixed. diff --git a/doc/source/release.rst b/doc/source/release.rst index 201d3e77fae4..9e908dd98446 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -3,6 +3,7 @@ Release Notes ************* .. include:: ../release/1.11.0-notes.rst +.. include:: ../release/1.10.2-notes.rst .. include:: ../release/1.10.1-notes.rst .. include:: ../release/1.10.0-notes.rst .. include:: ../release/1.9.2-notes.rst From 3ae925fc92a779b61c36d2d9bf52cc57eda15d02 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 30 Oct 2015 02:13:50 +0100 Subject: [PATCH 144/496] BUG: fix MANIFEST.in for removal of a file in gh-8047. --- MANIFEST.in | 1 - 1 file changed, 1 deletion(-) diff --git a/MANIFEST.in b/MANIFEST.in index 6f4826478d88..56d40efbf1fd 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -4,7 +4,6 @@ # data, etc files to distribution. Avoid using MANIFEST.in for that. # include MANIFEST.in -include COMPATIBILITY include *.txt include setupegg.py include site.cfg.example From db85edeb9326d4ea736e41baf23ae8edfdd6f84c Mon Sep 17 00:00:00 2001 From: Jonathan Helmus Date: Thu, 29 Oct 2015 20:53:26 -0500 Subject: [PATCH 145/496] BUG: immutable _arraymethod function in ma.core Replace the _arraymethod class in ma.core with a function factory which returns class method wrappers around basic array methods. These methods are bound to the MaskedArray instance and are immutable. Previously _arraymethod was a class which would incorrectly operate on the MaskedArray object which last accessed the particular named function. closes #5247 --- numpy/ma/core.py | 67 ++++++++++++------------------------- numpy/ma/tests/test_core.py | 8 +++++ 2 files changed, 30 insertions(+), 45 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 4ea52d0ab849..7d9acbd1ca85 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -2467,25 +2467,18 @@ def flatten_sequence(iterable): return out -class _arraymethod(object): +def _arraymethod(funcname, onmask=True): """ - Define a wrapper for basic array methods. + Return a class method wrapper around a basic array method. - Upon call, returns a masked array, where the new ``_data`` array is - the output of the corresponding method called on the original - ``_data``. + Creates a class method which returns a masked array, where the new + ``_data`` array is the output of the corresponding basic method called + on the original ``_data``. If `onmask` is True, the new mask is the output of the method called on the initial mask. Otherwise, the new mask is just a reference to the initial mask. - Attributes - ---------- - _onmask : bool - Holds the `onmask` parameter. - obj : object - The object calling `_arraymethod`. - Parameters ---------- funcname : str @@ -2495,47 +2488,31 @@ class _arraymethod(object): alone (False). Default is True. Make available as `_onmask` attribute. - """ - - def __init__(self, funcname, onmask=True): - self.__name__ = funcname - self._onmask = onmask - self.obj = None - self.__doc__ = self.getdoc() - - def getdoc(self): - "Return the doc of the function (from the doc of the method)." - methdoc = getattr(ndarray, self.__name__, None) or \ - getattr(np, self.__name__, None) - if methdoc is not None: - return methdoc.__doc__ - - def __get__(self, obj, objtype=None): - self.obj = obj - return self + Returns + ------- + method : instancemethod + Class method wrapper of the specified basic array method. - def __call__(self, *args, **params): - methodname = self.__name__ - instance = self.obj - # Fallback : if the instance has not been initialized, use the first - # arg - if instance is None: - args = list(args) - instance = args.pop(0) - data = instance._data - mask = instance._mask - cls = type(instance) - result = getattr(data, methodname)(*args, **params).view(cls) - result._update_from(instance) + """ + def wrapped_method(self, *args, **params): + result = getattr(self._data, funcname)(*args, **params) + result = result.view(type(self)) + result._update_from(self) + mask = self._mask if result.ndim: - if not self._onmask: + if not onmask: result.__setmask__(mask) elif mask is not nomask: - result.__setmask__(getattr(mask, methodname)(*args, **params)) + result.__setmask__(getattr(mask, funcname)(*args, **params)) else: if mask.ndim and (not mask.dtype.names and mask.all()): return masked return result + methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None) + if methdoc is not None: + wrapped_method.__doc__ = methdoc.__doc__ + wrapped_method.__name__ = funcname + return wrapped_method class MaskedIterator(object): diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 0a98212540ce..70c1ee12c01c 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -401,6 +401,14 @@ def test_copy(self): assert_not_equal(y._data.ctypes.data, x._data.ctypes.data) assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data) + def test_copy_immutable(self): + # Tests that the copy method is immutable, GitHub issue #5247 + a = np.ma.array([1, 2, 3]) + b = np.ma.array([4, 5, 6]) + a_copy_method = a.copy + b.copy + assert_equal(a_copy_method(), [1, 2, 3]) + def test_deepcopy(self): from copy import deepcopy a = array([0, 1, 2], mask=[False, True, False]) From f0c9703bd4bbc8d31c6ef5d94c141ec766f33dbc Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 30 Oct 2015 13:27:51 -0600 Subject: [PATCH 146/496] DEP: Remove FutureWarning from np.lib.split and go to future. Previously an empty array resulting from split always had dimension 1-D. In Numpy 1.9 a FutureWarning was raised to notify users that it was planned to preserve the dimensions of empty arrays in a future numpy release. This removes the FutureWarning and implements preservation of dimensions. Note that there was a bug in numpy 1.9 and the dimensions of empty arrays was already preserved in some cases and no warning was issued. This PR fixes that inconsistency by preserving the dimensions in all cases rather than fixing the bug, as the dimension preserving behavior was already depended on by some users. See the discussion in gh-6575 about this change. --- numpy/lib/shape_base.py | 11 +---------- numpy/lib/tests/test_shape_base.py | 30 ++++++++++++------------------ 2 files changed, 13 insertions(+), 28 deletions(-) diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py index b2beef0a8944..615cf88f4660 100644 --- a/numpy/lib/shape_base.py +++ b/numpy/lib/shape_base.py @@ -421,18 +421,9 @@ def array_split(ary, indices_or_sections, axis=0): end = div_points[i + 1] sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0)) - # This "kludge" was introduced here to replace arrays shaped (0, 10) - # or similar with an array shaped (0,). - # There seems no need for this, so give a FutureWarning to remove later. - if any(arr.size == 0 and arr.ndim != 1 for arr in sub_arys): - warnings.warn("in the future np.array_split will retain the shape of " - "arrays with a zero size, instead of replacing them by " - "`array([])`, which always has a shape of (0,).", - FutureWarning) - sub_arys = _replace_zero_by_x_arrays(sub_arys) - return sub_arys + def split(ary,indices_or_sections,axis=0): """ Split an array into multiple sub-arrays. diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py index 8ab72b9f938b..3f05f80c0b29 100644 --- a/numpy/lib/tests/test_shape_base.py +++ b/numpy/lib/tests/test_shape_base.py @@ -103,21 +103,17 @@ def test_integer_split(self): def test_integer_split_2D_rows(self): a = np.array([np.arange(10), np.arange(10)]) - res = assert_warns(FutureWarning, array_split, a, 3, axis=0) - - # After removing the FutureWarning, the last should be zeros((0, 10)) - desired = [np.array([np.arange(10)]), np.array([np.arange(10)]), - np.array([])] - compare_results(res, desired) + res = array_split(a, 3, axis=0) + tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), + np.zeros((0, 10))] + compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) # Same thing for manual splits: - res = assert_warns(FutureWarning, array_split, a, [0, 1, 2], axis=0) - - # After removing the FutureWarning, the last should be zeros((0, 10)) - desired = [np.array([]), np.array([np.arange(10)]), - np.array([np.arange(10)])] - compare_results(res, desired) + res = array_split(a, [0, 1, 2], axis=0) + tgt = [np.zeros((0, 10)), np.array([np.arange(10)]), + np.array([np.arange(10)])] + compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) def test_integer_split_2D_cols(self): @@ -132,12 +128,10 @@ def test_integer_split_2D_default(self): """ This will fail if we change default axis """ a = np.array([np.arange(10), np.arange(10)]) - res = assert_warns(FutureWarning, array_split, a, 3) - - # After removing the FutureWarning, the last should be zeros((0, 10)) - desired = [np.array([np.arange(10)]), np.array([np.arange(10)]), - np.array([])] - compare_results(res, desired) + res = array_split(a, 3) + tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), + np.zeros((0, 10))] + compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) # perhaps should check higher dimensions From 260474171a7e37043b566d33a9ca0f02aa53fbe8 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 30 Oct 2015 14:06:39 -0600 Subject: [PATCH 147/496] DOC: Update 1.10.0 release notes for future changes to np.lib.split. Note that empty array resulting from a split will preserve dimensions starting in Numpy 1.11. --- doc/release/1.10.0-notes.rst | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/doc/release/1.10.0-notes.rst b/doc/release/1.10.0-notes.rst index 0341d2a6aad5..e753707d47e0 100644 --- a/doc/release/1.10.0-notes.rst +++ b/doc/release/1.10.0-notes.rst @@ -20,7 +20,8 @@ Highlights * Addition of `nanprod` to the set of nanfunctions. * Support for the '@' operator in Python 3.5. -Dropped Support: +Dropped Support +=============== * The _dotblas module has been removed. CBLAS Support is now in Multiarray. @@ -35,15 +36,22 @@ Dropped Support: * Keywords ``skiprows`` and ``missing`` removed from np.genfromtxt. * Keyword ``old_behavior`` removed from np.correlate. -Future Changes: +Future Changes +============== * In array comparisons like ``arr1 == arr2``, many corner cases involving strings or structured dtypes that used to return scalars now issue ``FutureWarning`` or ``DeprecationWarning``, and in the future will be change to either perform elementwise comparisons or raise an error. -* The SafeEval class will be removed. -* The alterdot and restoredot functions will be removed. +* In ``np.lib.split`` an empty array in the result always had dimension + ``(0,)`` no matter the dimensions of the array being split. In Numpy 1.11 + that behavior will be changed so that the dimensions will be preserved. A + ``FutureWarning`` for this change has been in place since Numpy 1.9 but, + due to a bug, sometimes no warning was raised and the dimensions were + already preserved. +* The SafeEval class will be removed in Numpy 1.11. +* The alterdot and restoredot functions will be removed in Numpy 1.11. See below for more details on these changes. From 5dbe32630ccf22874c654dce68eb1635136287a4 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Fri, 30 Oct 2015 18:50:22 -0400 Subject: [PATCH 148/496] DOC: Fix an argument in 'Numpy for Matlab Users'. I've seen at least three occurrences (a couple on stackoverflow, and one private email) of someone using the argument 'order=15' in a call to the 'set_integrator' method of 'scipy.integrate.ode'. Presumably this is because of the suggestion in the "NumPy for Matlab Users" guide to replace 'ode15s' with 'scipy.integrate.ode(f).set_integrator('vode', method='bdf', order=15)'. The stiff solver in 'vode'--presumably the solver of interest for someone using 'ode15s'--has a maximum order of 5, and the non-stiff solver has maximum order of 12. (The maximum order of 'ode15s' is also 5; the '15' in the name refers to the variable order ranging from 1 to 5.) --- doc/source/user/numpy-for-matlab-users.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst index 2b8f4374955e..be0b2cbd9ecf 100644 --- a/doc/source/user/numpy-for-matlab-users.rst +++ b/doc/source/user/numpy-for-matlab-users.rst @@ -237,7 +237,7 @@ General Purpose Equivalents - ``scipy.integrate.ode(f).set_integrator('dopri5')`` - integrate an ODE with Runge-Kutta 4,5 * - ``ode15s`` - - ``scipy.integrate.ode(f).set_integrator('vode', method='bdf', order=15)`` + - ``scipy.integrate.ode(f).set_integrator('vode', method='bdf', order=5)`` - integrate an ODE with BDF method Linear Algebra Equivalents From 93c8e0e6689b04aa143009a6b52dae22cc18ed8b Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Fri, 30 Oct 2015 22:11:31 -0400 Subject: [PATCH 149/496] DOC: A bit of copy-editing of the 'NumPy for Matlab Users' guide. --- doc/source/user/numpy-for-matlab-users.rst | 68 +++++++++++----------- 1 file changed, 35 insertions(+), 33 deletions(-) diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst index be0b2cbd9ecf..d94233a2ed2f 100644 --- a/doc/source/user/numpy-for-matlab-users.rst +++ b/doc/source/user/numpy-for-matlab-users.rst @@ -42,7 +42,7 @@ Some Key Differences 'array' or 'matrix'? Which should I use? ======================================== -Numpy provides, in addition to `np.ndarray`` an additional matrix type +Numpy provides, in addition to ``np.ndarray``, an additional matrix type that you may see used in some existing code. Which one to use? Short answer @@ -51,7 +51,7 @@ Short answer **Use arrays**. - They are the standard vector/matrix/tensor type of numpy. Many numpy - function return arrays, not matrices. + functions return arrays, not matrices. - There is a clear distinction between element-wise operations and linear algebra operations. - You can have standard vectors or row/column vectors if you like. @@ -123,7 +123,7 @@ There are pros and cons to using both: - ``:)`` Is quite at home handling data of any rank. - ``:)`` Closer in semantics to tensor algebra, if you are familiar with that. - - ``:)`` *All* operations (``*``, ``/``, ``+``, ```` etc.) are + - ``:)`` *All* operations (``*``, ``/``, ``+``, ``-`` etc.) are elementwise - ``matrix`` @@ -159,7 +159,7 @@ which hopefully make things easier for Matlab converts. return ``array``\ s, but the ``matlib`` versions return ``matrix`` objects. - ``mat`` has been changed to be a synonym for ``asmatrix``, rather - than ``matrix``, thus making it concise way to convert an ``array`` + than ``matrix``, thus making it a concise way to convert an ``array`` to a ``matrix`` without copying the data. - Some top-level functions have been removed. For example ``numpy.rand()`` now needs to be accessed as ``numpy.random.rand()``. @@ -252,7 +252,7 @@ Linear Algebra Equivalents * - ``ndims(a)`` - ``ndim(a)`` or ``a.ndim`` - - get the number of dimensions of a (tensor rank) + - get the number of dimensions of ``a`` (tensor rank) * - ``numel(a)`` - ``size(a)`` or ``a.size`` @@ -264,7 +264,7 @@ Linear Algebra Equivalents * - ``size(a,n)`` - ``a.shape[n-1]`` - - get the number of elements of the n-th dimension of array a. (Note that MATLAB® uses 1 based indexing while Python uses 0 based indexing, See note :ref:`INDEXING `) + - get the number of elements of the n-th dimension of array ``a``. (Note that MATLAB® uses 1 based indexing while Python uses 0 based indexing, See note :ref:`INDEXING `) * - ``[ 1 2 3; 4 5 6 ]`` - ``array([[1.,2.,3.], [4.,5.,6.]])`` @@ -273,7 +273,7 @@ Linear Algebra Equivalents * - ``[ a b; c d ]`` - ``vstack([hstack([a,b]), hstack([c,d])])`` or ``bmat('a b; c d').A`` - - construct a matrix from blocks a,b,c, and d + - construct a matrix from blocks ``a``, ``b``, ``c``, and ``d`` * - ``a(end)`` - ``a[-1]`` @@ -345,27 +345,29 @@ Linear Algebra Equivalents * - ``(a>0.5)`` - ``(a>0.5)`` - - matrix whose i,jth element is (a_ij > 0.5) + - matrix whose i,jth element is (a_ij > 0.5). The Matlab result is + an array of 0s and 1s. The NumPy result is an array of the boolean + values ``False`` and ``True``. * - ``find(a>0.5)`` - ``nonzero(a>0.5)`` - - find the indices where (a > 0.5) + - find the indices where (``a`` > 0.5) * - ``a(:,find(v>0.5))`` - ``a[:,nonzero(v>0.5)[0]]`` - - extract the columms of a where vector v > 0.5 + - extract the columms of ``a`` where vector v > 0.5 * - ``a(:,find(v>0.5))`` - ``a[:,v.T>0.5]`` - - extract the columms of a where column vector v > 0.5 + - extract the columms of ``a`` where column vector v > 0.5 * - ``a(a<0.5)=0`` - ``a[a<0.5]=0`` - - a with elements less than 0.5 zeroed out + - ``a`` with elements less than 0.5 zeroed out * - ``a .* (a>0.5)`` - ``a * (a>0.5)`` - - a with elements less than 0.5 zeroed out + - ``a`` with elements less than 0.5 zeroed out * - ``a(:) = 3`` - ``a[:] = 3`` @@ -380,7 +382,7 @@ Linear Algebra Equivalents - numpy slices are by reference * - ``y=x(:)`` - - ``y = x.flatten(1)`` + - ``y = x.flatten()`` - turn array into vector (note that this forces a copy) * - ``1:10`` @@ -413,11 +415,11 @@ Linear Algebra Equivalents * - ``diag(a)`` - ``diag(a)`` - - vector of diagonal elements of a + - vector of diagonal elements of ``a`` * - ``diag(a,0)`` - ``diag(a,0)`` - - square diagonal matrix whose nonzero values are the elements of a + - square diagonal matrix whose nonzero values are the elements of ``a`` * - ``rand(3,4)`` - ``random.rand(3,4)`` @@ -445,7 +447,7 @@ Linear Algebra Equivalents * - ``repmat(a, m, n)`` - ``tile(a, (m, n))`` - - create m by n copies of a + - create m by n copies of ``a`` * - ``[a b]`` - ``concatenate((a,b),1)`` or ``hstack((a,b))`` or ``column_stack((a,b))`` or ``c_[a,b]`` @@ -453,27 +455,27 @@ Linear Algebra Equivalents * - ``[a; b]`` - ``concatenate((a,b))`` or ``vstack((a,b))`` or ``r_[a,b]`` - - concatenate rows of a and b + - concatenate rows of ``a`` and ``b`` * - ``max(max(a))`` - ``a.max()`` - - maximum element of a (with ndims(a)<=2 for matlab) + - maximum element of ``a`` (with ndims(a)<=2 for matlab) * - ``max(a)`` - ``a.max(0)`` - - maximum element of each column of matrix a + - maximum element of each column of matrix ``a`` * - ``max(a,[],2)`` - ``a.max(1)`` - - maximum element of each row of matrix a + - maximum element of each row of matrix ``a`` * - ``max(a,b)`` - ``maximum(a, b)`` - - compares a and b element-wise, and returns the maximum value from each pair + - compares ``a`` and ``b`` element-wise, and returns the maximum value from each pair * - ``norm(v)`` - ``sqrt(dot(v,v))`` or ``np.linalg.norm(v)`` - - L2 norm of vector v + - L2 norm of vector ``v`` * - ``a & b`` - ``logical_and(a,b)`` @@ -493,15 +495,15 @@ Linear Algebra Equivalents * - ``inv(a)`` - ``linalg.inv(a)`` - - inverse of square matrix a + - inverse of square matrix ``a`` * - ``pinv(a)`` - ``linalg.pinv(a)`` - - pseudo-inverse of matrix a + - pseudo-inverse of matrix ``a`` * - ``rank(a)`` - ``linalg.matrix_rank(a)`` - - rank of a matrix a + - rank of a matrix ``a`` * - ``a\b`` - ``linalg.solve(a,b)`` if ``a`` is square; ``linalg.lstsq(a,b)`` otherwise @@ -513,23 +515,23 @@ Linear Algebra Equivalents * - ``[U,S,V]=svd(a)`` - ``U, S, Vh = linalg.svd(a), V = Vh.T`` - - singular value decomposition of a + - singular value decomposition of ``a`` * - ``chol(a)`` - ``linalg.cholesky(a).T`` - - cholesky factorization of a matrix (chol(a) in matlab returns an upper triangular matrix, but linalg.cholesky(a) returns a lower triangular matrix) + - cholesky factorization of a matrix (``chol(a)`` in matlab returns an upper triangular matrix, but ``linalg.cholesky(a)`` returns a lower triangular matrix) * - ``[V,D]=eig(a)`` - ``D,V = linalg.eig(a)`` - - eigenvalues and eigenvectors of a + - eigenvalues and eigenvectors of ``a`` * - ``[V,D]=eig(a,b)`` - ``V,D = np.linalg.eig(a,b)`` - - eigenvalues and eigenvectors of a,b + - eigenvalues and eigenvectors of ``a``, ``b`` * - ``[V,D]=eigs(a,k)`` - - - find the k largest eigenvalues and eigenvectors of a + - find the ``k`` largest eigenvalues and eigenvectors of ``a`` * - ``[Q,R,P]=qr(a,0)`` - ``Q,R = scipy.linalg.qr(a)`` @@ -545,11 +547,11 @@ Linear Algebra Equivalents * - ``fft(a)`` - ``fft(a)`` - - Fourier transform of a + - Fourier transform of ``a`` * - ``ifft(a)`` - ``ifft(a)`` - - inverse Fourier transform of a + - inverse Fourier transform of ``a`` * - ``sort(a)`` - ``sort(a)`` or ``a.sort()`` From 90a8ea3338ae076223485def2fbc5527cadcf84a Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 31 Oct 2015 11:02:49 -0600 Subject: [PATCH 150/496] DOC: Update 1.11.0 release notes with np.lib.split changes. Note that the FutureWarning has been removed and that empty arrays resulting from np.lib.split will preserve dimensions instead of always having shape (0,). --- doc/release/1.11.0-notes.rst | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index 68ee370ee148..f8d3d4dbfe31 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -8,20 +8,22 @@ Highlights ========== -Dropped Support: +Dropped Support +=============== * Bento build support and related files have been removed. * Single file build support and related files have been removed. -Future Changes: +Future Changes +============== Compatibility notes =================== -Deprecated to error -~~~~~~~~~~~~~~~~~~~ +DeprecationWarning to error +~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Indexing with floats raises IndexError, e.g., a[0, 0.0]. @@ -34,6 +36,15 @@ Deprecated to error * Non-integers used as index values raise TypeError, e.g., in reshape, take, and specifying reduce axis. +FutureWarning to changed behavior +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* In ``np.lib.split`` an empty array in the result always had dimension + ``(0,)`` no matter the dimensions of the array being split. This + has been changed so that the dimensions will be preserved. A + ``FutureWarning`` for this change has been in place since Numpy 1.9 but, + due to a bug, sometimes no warning was raised and the dimensions were + already preserved. C API ~~~~~ From f2bc8bac509623723ae0be0b008b03b6cb602d81 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 30 Oct 2015 12:55:57 -0600 Subject: [PATCH 151/496] DOC: Update 1.10.2 release notes. [ci skip] --- doc/release/1.10.2-notes.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/release/1.10.2-notes.rst b/doc/release/1.10.2-notes.rst index 9a694435137c..8a2a827d7b89 100644 --- a/doc/release/1.10.2-notes.rst +++ b/doc/release/1.10.2-notes.rst @@ -51,7 +51,8 @@ The following PRs in master have been backported to 1.10.2 The following PR reverted initial work for mingwpy. -* gh-6536 BUG: revert gh-5614 to fix non-windows build problems +* gh-6536 BUG: Revert gh-5614 to fix non-windows build problems +* gh-6576 BUG: Revert gh-6376 to fix split behavior for empty arrays. Notes ===== From b9ef15c4ece1e74d315289584fe33e98bd0c5633 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 31 Oct 2015 11:52:09 -0600 Subject: [PATCH 152/496] BUG: Fix use of PyArray_ISFORTRAN in numpy.i. PyArray_ISFORTRAN was used to implement array_is_fortran in numpy.i when what was wanted was PyArray_IS_F_CONTIGUOUS. The difference is that PyArray_ISFORTRAN will return False if the array is c_contiguous. Previous to relaxed stride checking this did not matter, but currently arrays with ndim > 1 may be both C and Fortran contiguous and that results in errors when PyArray_ISFORTRAN is mistakenly used to check for Fortran contiguity. --- tools/swig/numpy.i | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/swig/numpy.i b/tools/swig/numpy.i index 2ddc11de7a40..11fcd42fe0b1 100644 --- a/tools/swig/numpy.i +++ b/tools/swig/numpy.i @@ -96,7 +96,7 @@ %#endif %#define array_is_contiguous(a) (PyArray_ISCONTIGUOUS((PyArrayObject*)a)) %#define array_is_native(a) (PyArray_ISNOTSWAPPED((PyArrayObject*)a)) -%#define array_is_fortran(a) (PyArray_ISFORTRAN((PyArrayObject*)a)) +%#define array_is_fortran(a) (PyArray_IS_F_CONTIGUOUS((PyArrayObject*)a)) } /**********************************************************************/ From d07e84c499aaaa04b0723bf80e7c41aba7b5d51c Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 31 Oct 2015 12:21:14 -0600 Subject: [PATCH 153/496] DOC: Update documentation for isfortran. Clarify isfortran checks if an array is both Fortran contiguous and *not* C contiguous. The suggests that it only checks if the array is F contiguous, but that is not the case. --- numpy/core/numeric.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index 5c0e272398ee..4350e123f2f1 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -696,8 +696,12 @@ def require(a, dtype=None, requirements=None): def isfortran(a): """ - Returns True if array is arranged in Fortran-order in memory - and not C-order. + Returns True if the array is Fortran contiguous but *not* C contiguous. + + This function is obsolete and, because of changes due to relaxed stride + checking, its return value for the same array may differ for versions + of Numpy >= 1.10 and previous versions. If you only want to check if an + array is Fortran contiguous use ``a.flags.f_contiguous`` instead. Parameters ---------- From 34051eb5427869a62bee6809a8d8c89ba52a7600 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 1 Nov 2015 17:46:27 -0700 Subject: [PATCH 154/496] DOC: Update 1.10.2 release notes to mention fix to numpy.i --- doc/release/1.10.2-notes.rst | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/doc/release/1.10.2-notes.rst b/doc/release/1.10.2-notes.rst index 8a2a827d7b89..930e55ea7d67 100644 --- a/doc/release/1.10.2-notes.rst +++ b/doc/release/1.10.2-notes.rst @@ -6,9 +6,22 @@ adds various build and release improvements. Numpy 1.10.1 supports Python 2.6 - 2.7 and 3.2 - 3.5. + +Compatibility notes +=================== + +fix swig bug in ``numpy.i`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Relaxed stride checking revealed a bug in ``array_is_fortran(a)``, that was +using PyArray_ISFORTRAN to check for Fortran contiguity instead of +PyArray_IS_F_CONTIGUOUS. You may want to regenerate swigged files using the +updated numpy.i + + Issues Fixed ============ +* gh-6590 Fortran Array problem in numpy 1.10. * gh-6563 Intent(out) broken in recent versions of f2py. * gh-6530 The partition function errors out on empty input. * gh-6498 Mention change in default casting rule in 1.10 release notes. @@ -47,11 +60,16 @@ The following PRs in master have been backported to 1.10.2 * gh-6562 BUG: Disable view safety checks in recarray. * gh-6567 BUG: Revert some import * fixes in f2py. * gh-6577 BUG: Fix for #6569, allowing build_ext --inplace -* gh-6579 MAINT: Fix mistake in doc upload rule +* gh-6579 MAINT: Fix mistake in doc upload rule. +* gh-6596 BUG: Fix swig for relaxed stride checking. The following PR reverted initial work for mingwpy. * gh-6536 BUG: Revert gh-5614 to fix non-windows build problems + +And the this PR reverted a fix for np.lib.split that undid some behavior +that will be standard in 1.11. + * gh-6576 BUG: Revert gh-6376 to fix split behavior for empty arrays. Notes From 6056be2e00dd7fa7543fb9d11901be095ac28e82 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 1 Nov 2015 18:53:24 -0700 Subject: [PATCH 155/496] DOC: Sync 1.10.2 release notes in master with 1.10.x. [ci skip] --- doc/release/1.10.2-notes.rst | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/doc/release/1.10.2-notes.rst b/doc/release/1.10.2-notes.rst index 930e55ea7d67..e008012c1b28 100644 --- a/doc/release/1.10.2-notes.rst +++ b/doc/release/1.10.2-notes.rst @@ -21,15 +21,16 @@ updated numpy.i Issues Fixed ============ -* gh-6590 Fortran Array problem in numpy 1.10. -* gh-6563 Intent(out) broken in recent versions of f2py. -* gh-6530 The partition function errors out on empty input. -* gh-6498 Mention change in default casting rule in 1.10 release notes. -* gh-6497 Failure of reduce operation on recarrays. -* gh-6495 Unrecognized command line option '-ffpe-summary' in gfortran. -* gh-6491 Error in broadcasting stride_tricks array. -* gh-6467 Performance regression for record array access. * gh-6462 Median of empty array produces IndexError. +* gh-6467 Performance regression for record array access. +* gh-6491 Error in broadcasting stride_tricks array. +* gh-6495 Unrecognized command line option '-ffpe-summary' in gfortran. +* gh-6497 Failure of reduce operation on recarrays. +* gh-6498 Mention change in default casting rule in 1.10 release notes. +* gh-6530 The partition function errors out on empty input. +* gh-6563 Intent(out) broken in recent versions of f2py. +* gh-6575 BUG: Split produces empty arrays with wrong number of dimensions +* gh-6590 Fortran Array problem in numpy 1.10. Merged PRs ========== @@ -63,12 +64,14 @@ The following PRs in master have been backported to 1.10.2 * gh-6579 MAINT: Fix mistake in doc upload rule. * gh-6596 BUG: Fix swig for relaxed stride checking. -The following PR reverted initial work for mingwpy. +Initial support for mingwpy was reverted as it was causing problems for +non-windows builds. * gh-6536 BUG: Revert gh-5614 to fix non-windows build problems -And the this PR reverted a fix for np.lib.split that undid some behavior -that will be standard in 1.11. +A fix for np.lib.split was reverted because it resulted in "fixing" +behavior will be present in the Numpy 1.11 and was already present in +Numpy 1.9. See the discussion of the issue at gh-6575 for clarification. * gh-6576 BUG: Revert gh-6376 to fix split behavior for empty arrays. From 6663d40f051bc64802dea09f66fd2a016c9c87e9 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 1 Nov 2015 19:16:10 -0700 Subject: [PATCH 156/496] DOC: Update 1.10.2 release notes. [ci skip] --- doc/release/1.10.2-notes.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/release/1.10.2-notes.rst b/doc/release/1.10.2-notes.rst index e008012c1b28..d508e7bc2f5f 100644 --- a/doc/release/1.10.2-notes.rst +++ b/doc/release/1.10.2-notes.rst @@ -60,9 +60,11 @@ The following PRs in master have been backported to 1.10.2 * gh-6558 MAINT: Minor update to "make upload" doc build command. * gh-6562 BUG: Disable view safety checks in recarray. * gh-6567 BUG: Revert some import * fixes in f2py. +* gh-6574 DOC: Release notes for Numpy 1.10.2. * gh-6577 BUG: Fix for #6569, allowing build_ext --inplace * gh-6579 MAINT: Fix mistake in doc upload rule. * gh-6596 BUG: Fix swig for relaxed stride checking. +* gh-6606 DOC: Update 1.10.2 release notes. Initial support for mingwpy was reverted as it was causing problems for non-windows builds. From 04e40fc5c21335f296009f40f4599b66dc099e6d Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Mon, 2 Nov 2015 13:25:28 +0000 Subject: [PATCH 157/496] MAINT: random: allow nonc==0 in noncentral_chisquare. Noncentral chi-square reduces to a central chi-square, so just defer to that. --- numpy/random/mtrand/distributions.c | 3 +++ numpy/random/mtrand/mtrand.pyx | 8 ++++---- numpy/random/tests/test_random.py | 7 +++++++ 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/numpy/random/mtrand/distributions.c b/numpy/random/mtrand/distributions.c index f5ee6d8c1960..39004178da06 100644 --- a/numpy/random/mtrand/distributions.c +++ b/numpy/random/mtrand/distributions.c @@ -231,6 +231,9 @@ double rk_chisquare(rk_state *state, double df) double rk_noncentral_chisquare(rk_state *state, double df, double nonc) { + if (nonc == 0){ + return rk_chisquare(state, df); + } if(1 < df) { const double Chi2 = rk_chisquare(state, df - 1); diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index 97ea9506e2e8..f8ae8d71be19 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -2219,7 +2219,7 @@ cdef class RandomState: Degrees of freedom, should be > 0 as of Numpy 1.10, should be > 1 for earlier versions. nonc : float - Non-centrality, should be > 0. + Non-centrality, should be non-negative. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. Default is None, in which case a @@ -2285,8 +2285,8 @@ cdef class RandomState: if not PyErr_Occurred(): if fdf <= 0: raise ValueError("df <= 0") - if fnonc <= 0: - raise ValueError("nonc <= 0") + if fnonc < 0: + raise ValueError("nonc < 0") return cont2_array_sc(self.internal_state, rk_noncentral_chisquare, size, fdf, fnonc, self.lock) @@ -2296,7 +2296,7 @@ cdef class RandomState: ononc = PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) if np.any(np.less_equal(odf, 0.0)): raise ValueError("df <= 0") - if np.any(np.less_equal(ononc, 0.0)): + if np.any(np.less(ononc, 0.0)): raise ValueError("nonc < 0") return cont2_array(self.internal_state, rk_noncentral_chisquare, size, odf, ononc, self.lock) diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index 596c218a2eb4..ab7f90d82c54 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -504,6 +504,13 @@ def test_noncentral_chisquare(self): [ 0.332334982684171 , 0.15451287602753125]]) np.testing.assert_array_almost_equal(actual, desired, decimal=14) + np.random.seed(self.seed) + actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + desired = np.array([[9.597154162763948, 11.725484450296079], + [10.413711048138335, 3.694475922923986], + [13.484222138963087, 14.377255424602957]]) + np.testing.assert_array_almost_equal(actual, desired, decimal=14) + def test_noncentral_f(self): np.random.seed(self.seed) actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1, From 6a09c85cae64417dec7d2e757bfe034673acd23d Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 2 Nov 2015 09:44:55 -0700 Subject: [PATCH 158/496] BUG: Add choice and dirichlet to numpy.random.__all__. Closes #6602. --- doc/release/1.10.2-notes.rst | 2 ++ numpy/random/info.py | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/doc/release/1.10.2-notes.rst b/doc/release/1.10.2-notes.rst index d508e7bc2f5f..efd57d9278ec 100644 --- a/doc/release/1.10.2-notes.rst +++ b/doc/release/1.10.2-notes.rst @@ -31,6 +31,7 @@ Issues Fixed * gh-6563 Intent(out) broken in recent versions of f2py. * gh-6575 BUG: Split produces empty arrays with wrong number of dimensions * gh-6590 Fortran Array problem in numpy 1.10. +* gh-6602 Random __all__ missing choice and dirichlet. Merged PRs ========== @@ -65,6 +66,7 @@ The following PRs in master have been backported to 1.10.2 * gh-6579 MAINT: Fix mistake in doc upload rule. * gh-6596 BUG: Fix swig for relaxed stride checking. * gh-6606 DOC: Update 1.10.2 release notes. +* gh-6614 BUG: Add choice and dirichlet to numpy.random.__all__. Initial support for mingwpy was reverted as it was causing problems for non-windows builds. diff --git a/numpy/random/info.py b/numpy/random/info.py index 396e623815a8..be9c8d9bd286 100644 --- a/numpy/random/info.py +++ b/numpy/random/info.py @@ -13,6 +13,8 @@ permutation Randomly permute a sequence / generate a random sequence. shuffle Randomly permute a sequence in place. seed Seed the random number generator. +choice Random sample from 1-D array. + ==================== ========================================================= ==================== ========================================================= @@ -91,6 +93,8 @@ 'binomial', 'bytes', 'chisquare', + 'choice', + 'dirichlet', 'exponential', 'f', 'gamma', From cbfd9370ee273ef8f7694d1c689649e98d8ee08b Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 3 Nov 2015 09:15:12 -0700 Subject: [PATCH 159/496] BUG: Fix swig make_fortran function. The function was calling PyArray_FromArray with NPY_FORTRANORDER instead of NPY_ARRAY_F_CONTIGUOUS. The first is of type NPY_ORDER and the second is a flag. Closes #6618. [ci skip] --- doc/release/1.10.2-notes.rst | 2 ++ tools/swig/numpy.i | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/release/1.10.2-notes.rst b/doc/release/1.10.2-notes.rst index efd57d9278ec..31a793420036 100644 --- a/doc/release/1.10.2-notes.rst +++ b/doc/release/1.10.2-notes.rst @@ -32,6 +32,7 @@ Issues Fixed * gh-6575 BUG: Split produces empty arrays with wrong number of dimensions * gh-6590 Fortran Array problem in numpy 1.10. * gh-6602 Random __all__ missing choice and dirichlet. +* gh-6618 NPY_FORTRANORDER in make_fortran() in numpy.i Merged PRs ========== @@ -67,6 +68,7 @@ The following PRs in master have been backported to 1.10.2 * gh-6596 BUG: Fix swig for relaxed stride checking. * gh-6606 DOC: Update 1.10.2 release notes. * gh-6614 BUG: Add choice and dirichlet to numpy.random.__all__. +* gh-6621 BUG: Fix swig make_fortran function. Initial support for mingwpy was reverted as it was causing problems for non-windows builds. diff --git a/tools/swig/numpy.i b/tools/swig/numpy.i index 11fcd42fe0b1..67a519e6d038 100644 --- a/tools/swig/numpy.i +++ b/tools/swig/numpy.i @@ -295,7 +295,7 @@ Py_INCREF(array_descr(ary)); result = (PyArrayObject*) PyArray_FromArray(ary, array_descr(ary), - NPY_FORTRANORDER); + NPY_ARRAY_F_CONTIGUOUS); *is_new_object = 1; } return result; From 8028a7715da5d6c9b2c5586a3055fdb0ba81dab7 Mon Sep 17 00:00:00 2001 From: Julien Lhermitte Date: Wed, 4 Nov 2015 17:35:42 -0500 Subject: [PATCH 160/496] added extra line in the tile help doc to outline a general repeat, commonly used --- numpy/lib/shape_base.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py index 615cf88f4660..c2b06775ab28 100644 --- a/numpy/lib/shape_base.py +++ b/numpy/lib/shape_base.py @@ -837,6 +837,12 @@ def tile(A, reps): [1, 2], [3, 4]]) + >>> c = np.array([1,2,3,4]) + >>> np.tile(c,(4,1)) + array([[1, 2, 3, 4], + [1, 2, 3, 4], + [1, 2, 3, 4], + [1, 2, 3, 4]]) """ try: tup = tuple(reps) From 7036842f828fa41f95c333812bccccb3ae469f91 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 4 Nov 2015 15:37:22 -0700 Subject: [PATCH 161/496] BUG: Make allclose return python bool. The function was returning an ndarray subtype when the at least one of the arguments was a subtype. Closes #6475. --- numpy/core/numeric.py | 3 ++- numpy/core/tests/test_numeric.py | 10 ++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index 4350e123f2f1..2ece2ce8dc04 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -2278,7 +2278,8 @@ def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): True """ - return all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)) + res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)) + return bool(res) def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): """ diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index 2fa8593b9b5a..f5c22392a414 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -1471,6 +1471,16 @@ def test_equalnan(self): x = np.array([1.0, np.nan]) assert_(np.allclose(x, x, equal_nan=True)) + def test_return_class_is_ndarray(self): + # Issue gh-6475 + # Check that allclose does not preserve subtypes + class Foo(np.ndarray): + def __new__(cls, *args, **kwargs): + return np.array(*args, **kwargs).view(cls) + + a = Foo([1]) + assert_(type(np.allclose(a, a)) is bool) + class TestIsclose(object): rtol = 1e-5 From a2435ae57ae21fd547ba7f2114301094680f6cee Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 4 Nov 2015 15:46:53 -0700 Subject: [PATCH 162/496] DOC: Update 1.10.2 release notes for gh-6475 fix. --- doc/release/1.10.2-notes.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/release/1.10.2-notes.rst b/doc/release/1.10.2-notes.rst index 31a793420036..4a62be2daeca 100644 --- a/doc/release/1.10.2-notes.rst +++ b/doc/release/1.10.2-notes.rst @@ -33,6 +33,7 @@ Issues Fixed * gh-6590 Fortran Array problem in numpy 1.10. * gh-6602 Random __all__ missing choice and dirichlet. * gh-6618 NPY_FORTRANORDER in make_fortran() in numpy.i +* gh-6475 np.allclose returns a memmap when one of its arguments is a memmap. Merged PRs ========== @@ -69,6 +70,7 @@ The following PRs in master have been backported to 1.10.2 * gh-6606 DOC: Update 1.10.2 release notes. * gh-6614 BUG: Add choice and dirichlet to numpy.random.__all__. * gh-6621 BUG: Fix swig make_fortran function. +* gh-6628 BUG: Make allclose return python bool. Initial support for mingwpy was reverted as it was causing problems for non-windows builds. From 4ac1740eaefa89b495171d8a5862ae2d1cacc5da Mon Sep 17 00:00:00 2001 From: Julien Lhermitte Date: Thu, 5 Nov 2015 22:42:51 -0500 Subject: [PATCH 163/496] added more text --- numpy/lib/shape_base.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py index c2b06775ab28..ffbe56721660 100644 --- a/numpy/lib/shape_base.py +++ b/numpy/lib/shape_base.py @@ -799,6 +799,9 @@ def tile(A, reps): Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as (1, 1, 2, 2). + Note : Although tile may be used for broadcasting, it is strongly + recommended to use numpy's broadcasting operations and functions. + Parameters ---------- A : array_like @@ -814,6 +817,7 @@ def tile(A, reps): See Also -------- repeat : Repeat elements of an array. + broadcast_to : Broadcast an array to a new shape Examples -------- From c06726daa4d57893bc6132c3f311bd14ec6dc110 Mon Sep 17 00:00:00 2001 From: Allan Haldane Date: Fri, 6 Nov 2015 12:32:39 -0500 Subject: [PATCH 164/496] ENH: make recarray.getitem return a recarray recarray.__getitem__ should return a recarray when the returned value had structured type (it's documented to do so). Fixes #6641 --- numpy/core/records.py | 1 + numpy/core/tests/test_records.py | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/numpy/core/records.py b/numpy/core/records.py index 4ce3fe98a273..b0775538478b 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -502,6 +502,7 @@ def __getitem__(self, indx): # we might also be returning a single element if isinstance(obj, ndarray): if obj.dtype.fields: + obj = obj.view(recarray) if issubclass(obj.dtype.type, nt.void): return obj.view(dtype=(self.dtype.type, obj.dtype)) return obj diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py index 290bc4fa7fe1..e0f0a3a8ff51 100644 --- a/numpy/core/tests/test_records.py +++ b/numpy/core/tests/test_records.py @@ -121,6 +121,14 @@ def test_recarray_views(self): assert_equal(type(rv), np.recarray) assert_equal(rv.dtype.type, np.record) + #check that getitem also preserves np.recarray and np.record + r = np.rec.array(np.ones(4, dtype=[('a', 'i4'), ('b', 'i4'), + ('c', 'i4,i4')])) + assert_equal(r['c'].dtype.type, np.record) + assert_equal(type(r['c']), np.recarray) + assert_equal(r[['a', 'b']].dtype.type, np.record) + assert_equal(type(r[['a', 'b']]), np.recarray) + # check that accessing nested structures keep record type, but # not for subarrays, non-void structures, non-structured voids test_dtype = [('a', 'f4,f4'), ('b', 'V8'), ('c', ('f4',2)), From 81e50a3416061726ccdef6033652a282ac4f01c4 Mon Sep 17 00:00:00 2001 From: Allan Haldane Date: Fri, 6 Nov 2015 12:04:15 -0500 Subject: [PATCH 165/496] BUG: Fix memleak in _convert_from_dict Fixes a memleak introduced in #5920, where PyDict_GetItemString was replaced by PyMapping_GetItemString which returns a new ref. Fixes #6636 --- numpy/core/src/multiarray/descriptor.c | 31 +++++++++++++++++++------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c index 05397228ef09..83cd64bdca79 100644 --- a/numpy/core/src/multiarray/descriptor.c +++ b/numpy/core/src/multiarray/descriptor.c @@ -36,6 +36,19 @@ static PyObject *typeDict = NULL; /* Must be explicitly loaded */ static PyArray_Descr * _use_inherit(PyArray_Descr *type, PyObject *newobj, int *errflag); + +/* + * Returns value of PyMapping_GetItemString but as a borrowed reference instead + * of a new reference. + */ +static PyObject * +Borrowed_PyMapping_GetItemString(PyObject *o, char *key) +{ + PyObject *ret = PyMapping_GetItemString(o, key); + Py_XDECREF(ret); + return ret; +} + /* * Creates a dtype object from ctypes inputs. * @@ -952,17 +965,19 @@ _convert_from_dict(PyObject *obj, int align) if (fields == NULL) { return (PyArray_Descr *)PyErr_NoMemory(); } - /* Use PyMapping_GetItemString to support dictproxy objects as well */ - names = PyMapping_GetItemString(obj, "names"); - descrs = PyMapping_GetItemString(obj, "formats"); + /* + * Use PyMapping_GetItemString to support dictproxy objects as well. + */ + names = Borrowed_PyMapping_GetItemString(obj, "names"); + descrs = Borrowed_PyMapping_GetItemString(obj, "formats"); if (!names || !descrs) { Py_DECREF(fields); PyErr_Clear(); return _use_fields_dict(obj, align); } n = PyObject_Length(names); - offsets = PyMapping_GetItemString(obj, "offsets"); - titles = PyMapping_GetItemString(obj, "titles"); + offsets = Borrowed_PyMapping_GetItemString(obj, "offsets"); + titles = Borrowed_PyMapping_GetItemString(obj, "titles"); if (!offsets || !titles) { PyErr_Clear(); } @@ -980,7 +995,7 @@ _convert_from_dict(PyObject *obj, int align) * If a property 'aligned' is in the dict, it overrides the align flag * to be True if it not already true. */ - tmp = PyMapping_GetItemString(obj, "aligned"); + tmp = Borrowed_PyMapping_GetItemString(obj, "aligned"); if (tmp == NULL) { PyErr_Clear(); } else { @@ -1154,7 +1169,7 @@ _convert_from_dict(PyObject *obj, int align) } /* Override the itemsize if provided */ - tmp = PyMapping_GetItemString(obj, "itemsize"); + tmp = Borrowed_PyMapping_GetItemString(obj, "itemsize"); if (tmp == NULL) { PyErr_Clear(); } else { @@ -1186,7 +1201,7 @@ _convert_from_dict(PyObject *obj, int align) } /* Add the metadata if provided */ - metadata = PyMapping_GetItemString(obj, "metadata"); + metadata = Borrowed_PyMapping_GetItemString(obj, "metadata"); if (metadata == NULL) { PyErr_Clear(); From bb0d990e88a2120df2150e097dc17795474591e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9my=20L=C3=A9one?= Date: Sat, 7 Nov 2015 11:53:08 +0100 Subject: [PATCH 166/496] MAINT: list litteral --- numpy/_build_utils/waf.py | 5 +---- numpy/distutils/mingw32ccompiler.py | 4 +--- numpy/distutils/npy_pkg_config.py | 3 +-- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/numpy/_build_utils/waf.py b/numpy/_build_utils/waf.py index 263640d9eef1..f1b6990bbabc 100644 --- a/numpy/_build_utils/waf.py +++ b/numpy/_build_utils/waf.py @@ -268,10 +268,7 @@ def check_type_size(conf, type_name, expected_sizes=None, **kw): @waflib.Configure.conf def check_functions_at_once(self, funcs, **kw): - header = [] - header = ['#ifdef __cplusplus'] - header.append('extern "C" {') - header.append('#endif') + header = ['#ifdef __cplusplus', 'extern "C" {', '#endif'] for f in funcs: header.append("\tchar %s();" % f) # Handle MSVC intrinsics: force MS compiler to make a function diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py index d22a2818eb14..111653a8294d 100644 --- a/numpy/distutils/mingw32ccompiler.py +++ b/numpy/distutils/mingw32ccompiler.py @@ -244,9 +244,7 @@ def find_python_dll(): # - find it in python main dir # - in system32, # - ortherwise (Sxs), I don't know how to get it. - lib_dirs = [] - lib_dirs.append(sys.prefix) - lib_dirs.append(os.path.join(sys.prefix, 'lib')) + lib_dirs = [sys.prefix, os.path.join(sys.prefix, 'lib')] try: lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'system32')) except KeyError: diff --git a/numpy/distutils/npy_pkg_config.py b/numpy/distutils/npy_pkg_config.py index 6156439e1f26..1c801fd9c069 100644 --- a/numpy/distutils/npy_pkg_config.py +++ b/numpy/distutils/npy_pkg_config.py @@ -141,8 +141,7 @@ def libs(self, section="default"): return _escape_backslash(val) def __str__(self): - m = ['Name: %s' % self.name] - m.append('Description: %s' % self.description) + m = ['Name: %s' % self.name, 'Description: %s' % self.description] if self.requires: m.append('Requires:') else: From a572f1b3e6e3ab4469e714098d9cea57361d52b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9my=20L=C3=A9one?= Date: Sat, 7 Nov 2015 11:50:12 +0100 Subject: [PATCH 167/496] MAINT: Remove useless semicolon --- numpy/core/code_generators/generate_umath.py | 2 +- numpy/linalg/linalg.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index e3c4fbaa2149..b3d8f43ae00b 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -1006,7 +1006,7 @@ def make_code(funcdict, filename): %s } """ % (filename, code1, code2, code3) - return code; + return code if __name__ == "__main__": diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index f5cb3cb77c47..2e969727b93c 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -1622,7 +1622,7 @@ def pinv(a, rcond=1e-15 ): if s[i] > cutoff: s[i] = 1./s[i] else: - s[i] = 0.; + s[i] = 0. res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u))) return wrap(res) From def49ba8fa3fe252c26c210d7730ed5c3a42c60e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9my=20L=C3=A9one?= Date: Sat, 7 Nov 2015 12:00:36 +0100 Subject: [PATCH 168/496] MAINT: Dictionary litteral --- numpy/core/setup.py | 15 ++++----------- numpy/core/tests/test_item_selection.py | 7 +++---- numpy/core/tests/test_umath.py | 18 ++++++++---------- numpy/distutils/system_info.py | 16 +++++++--------- numpy/distutils/tests/test_system_info.py | 17 ++++++++--------- numpy/f2py/capi_maps.py | 6 ++---- numpy/lib/format.py | 3 +-- 7 files changed, 33 insertions(+), 49 deletions(-) diff --git a/numpy/core/setup.py b/numpy/core/setup.py index aa9e03e0607d..2e9e277af01c 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -257,17 +257,10 @@ def check_types(config_cmd, ext, build_dir): # Expected size (in number of bytes) for each type. This is an # optimization: those are only hints, and an exhaustive search for the size # is done if the hints are wrong. - expected = {} - expected['short'] = [2] - expected['int'] = [4] - expected['long'] = [8, 4] - expected['float'] = [4] - expected['double'] = [8] - expected['long double'] = [16, 12, 8] - expected['Py_intptr_t'] = [8, 4] - expected['PY_LONG_LONG'] = [8] - expected['long long'] = [8] - expected['off_t'] = [8, 4] + expected = {'short': [2], 'int': [4], 'long': [8, 4], + 'float': [4], 'double': [8], 'long double': [16, 12, 8], + 'Py_intptr_t': [8, 4], 'PY_LONG_LONG': [8], 'long long': [8], + 'off_t': [8, 4]} # Check we have the python header (-dev* packages on Linux) result = config_cmd.check_header('Python.h') diff --git a/numpy/core/tests/test_item_selection.py b/numpy/core/tests/test_item_selection.py index 5e9cadd8f3cb..ddce20fe9a9f 100644 --- a/numpy/core/tests/test_item_selection.py +++ b/numpy/core/tests/test_item_selection.py @@ -18,10 +18,9 @@ def test_simple(self): index_arrays = [np.empty(0, dtype=np.intp), np.empty(tuple(), dtype=np.intp), np.empty((1, 1), dtype=np.intp)] - real_indices = {} - real_indices['raise'] = {-1:1, 4:IndexError} - real_indices['wrap'] = {-1:1, 4:0} - real_indices['clip'] = {-1:0, 4:1} + real_indices = {'raise': {-1: 1, 4: IndexError}, + 'wrap': {-1: 1, 4: 0}, + 'clip': {-1: 0, 4: 1}} # Currently all types but object, use the same function generation. # So it should not be necessary to test all. However test also a non # refcounted struct on top of object. diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index ebf8e0380f28..541ad974b367 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -1836,16 +1836,14 @@ def test_spacing_gfortran(): # WRITE(*,*) spacing(1000._SGL) # WRITE(*,*) spacing(10500._SGL) # END PROGRAM - ref = {} - ref[np.float64] = [1.69406589450860068E-021, - 2.22044604925031308E-016, - 1.13686837721616030E-013, - 1.81898940354585648E-012] - ref[np.float32] = [ - 9.09494702E-13, - 1.19209290E-07, - 6.10351563E-05, - 9.76562500E-04] + ref = {np.float64: [1.69406589450860068E-021, + 2.22044604925031308E-016, + 1.13686837721616030E-013, + 1.81898940354585648E-012], + np.float32: [9.09494702E-13, + 1.19209290E-07, + 6.10351563E-05, + 9.76562500E-04]} for dt, dec_ in zip([np.float32, np.float64], (10, 20)): x = np.array([1e-5, 1, 1000, 10500], dtype=dt) diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 0da13a7df2b9..a0c6f44f79a8 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -470,15 +470,13 @@ def __init__(self, ): self.__class__.info = {} self.local_prefixes = [] - defaults = {} - defaults['library_dirs'] = os.pathsep.join(default_lib_dirs) - defaults['include_dirs'] = os.pathsep.join(default_include_dirs) - defaults['runtime_library_dirs'] = os.pathsep.join(default_runtime_dirs) - defaults['rpath'] = '' - defaults['src_dirs'] = os.pathsep.join(default_src_dirs) - defaults['search_static_first'] = str(self.search_static_first) - defaults['extra_compile_args'] = '' - defaults['extra_link_args'] = '' + defaults = {'library_dirs': os.pathsep.join(default_lib_dirs), + 'include_dirs': os.pathsep.join(default_include_dirs), + 'runtime_library_dirs': os.pathsep.join(default_runtime_dirs), + 'rpath': '', + 'src_dirs': os.pathsep.join(default_src_dirs), + 'search_static_first': str(self.search_static_first), + 'extra_compile_args': '', 'extra_link_args': ''} self.cp = ConfigParser(defaults) self.files = [] self.files.extend(get_standard_file('.numpy-site.cfg')) diff --git a/numpy/distutils/tests/test_system_info.py b/numpy/distutils/tests/test_system_info.py index f9d45319e35b..58ad05a593bd 100644 --- a/numpy/distutils/tests/test_system_info.py +++ b/numpy/distutils/tests/test_system_info.py @@ -64,15 +64,14 @@ def __init__(self, ): self.__class__.info = {} self.local_prefixes = [] - defaults = {} - defaults['library_dirs'] = '' - defaults['include_dirs'] = '' - defaults['runtime_library_dirs'] = '' - defaults['rpath'] = '' - defaults['src_dirs'] = '' - defaults['search_static_first'] = "0" - defaults['extra_compile_args'] = '' - defaults['extra_link_args'] = '' + defaults = {'library_dirs': '', + 'include_dirs': '', + 'runtime_library_dirs': '', + 'rpath': '', + 'src_dirs': '', + 'search_static_first': "0", + 'extra_compile_args': '', + 'extra_link_args': ''} self.cp = ConfigParser(defaults) # We have to parse the config files afterwards # to have a consistent temporary filepath diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py index 6e5293cc8599..5270cabb5be5 100644 --- a/numpy/f2py/capi_maps.py +++ b/numpy/f2py/capi_maps.py @@ -519,8 +519,7 @@ def sign2map(a, var): if k[:4] == 'out=': out_a = k[4:] break - ret = {'varname': a, 'outvarname': out_a} - ret['ctype'] = getctype(var) + ret = {'varname': a, 'outvarname': out_a, 'ctype': getctype(var)} intent_flags = [] for f, s in isintent_dict.items(): if f(var): @@ -823,8 +822,7 @@ def cb_routsign2map(rout, um): def common_sign2map(a, var): # obsolute - ret = {'varname': a} - ret['ctype'] = getctype(var) + ret = {'varname': a, 'ctype': getctype(var)} if isstringarray(var): ret['ctype'] = 'char' if ret['ctype'] in c2capi_map: diff --git a/numpy/lib/format.py b/numpy/lib/format.py index 66a1b356c1c7..a0f2c5497586 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -265,8 +265,7 @@ def header_data_from_array_1_0(array): This has the appropriate entries for writing its string representation to the header of the file. """ - d = {} - d['shape'] = array.shape + d = {'shape': array.shape} if array.flags.c_contiguous: d['fortran_order'] = False elif array.flags.f_contiguous: From 01308287f48dce9d92e613a59d0bcafa1aed8234 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 7 Nov 2015 12:20:43 +0100 Subject: [PATCH 169/496] MAINT: remove _build_utils/waf.py. This was used by Bento, and support for Bento was removed in gh-6268. --- numpy/_build_utils/waf.py | 528 -------------------------------------- 1 file changed, 528 deletions(-) delete mode 100644 numpy/_build_utils/waf.py diff --git a/numpy/_build_utils/waf.py b/numpy/_build_utils/waf.py deleted file mode 100644 index f1b6990bbabc..000000000000 --- a/numpy/_build_utils/waf.py +++ /dev/null @@ -1,528 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import os -import re - -import waflib.Configure -import waflib.Tools.c_config -from waflib import Logs, Utils - -from .common \ - import \ - LONG_DOUBLE_REPRESENTATION_SRC, pyod, \ - long_double_representation - -DEFKEYS = waflib.Tools.c_config.DEFKEYS -DEFINE_COMMENTS = "define_commentz" - -def to_header(dct): - if 'header_name' in dct: - dct = Utils.to_list(dct['header_name']) - return ''.join(['#include <%s>\n' % x for x in dct]) - return '' - -# Make the given string safe to be used as a CPP macro -def sanitize_string(s): - key_up = s.upper() - return re.sub('[^A-Z0-9_]', '_', key_up) - -def validate_arguments(self, kw): - if not 'env' in kw: - kw['env'] = self.env.derive() - if not "compile_mode" in kw: - kw["compile_mode"] = "c" - if not 'compile_filename' in kw: - kw['compile_filename'] = 'test.c' + \ - ((kw['compile_mode'] == 'cxx') and 'pp' or '') - if not 'features' in kw: - kw['features'] = [kw['compile_mode']] - if not 'execute' in kw: - kw['execute'] = False - if not 'okmsg' in kw: - kw['okmsg'] = 'yes' - if not 'errmsg' in kw: - kw['errmsg'] = 'no !' - - if 'define_name' in kw: - comment = kw.get('define_comment', None) - self.undefine_with_comment(kw['define_name'], comment) - -def try_compile(self, kw): - self.start_msg(kw["msg"]) - ret = None - try: - ret = self.run_c_code(**kw) - except self.errors.ConfigurationError as e: - self.end_msg(kw['errmsg'], 'YELLOW') - if Logs.verbose > 1: - raise - else: - self.fatal('The configuration failed') - else: - kw['success'] = ret - self.end_msg(self.ret_msg(kw['okmsg'], kw)) - -@waflib.Configure.conf -def check_header(self, header_name, **kw): - code = """ -%s - -int main() -{ -} -""" % to_header({"header_name": header_name}) - - kw["code"] = code - kw["define_comment"] = "/* Define to 1 if you have the <%s> header file. */" % header_name - kw["define_name"] = "HAVE_%s" % sanitize_string(header_name) - if not "features" in kw: - kw["features"] = ["c"] - kw["msg"] = "Checking for header %r" % header_name - - validate_arguments(self, kw) - try_compile(self, kw) - ret = kw["success"] - if ret == 0: - kw["define_value"] = 1 - else: - kw["define_value"] = 0 - - self.post_check(**kw) - if not kw.get('execute', False): - return ret == 0 - return ret - -@waflib.Configure.conf -def check_declaration(self, symbol, **kw): - code = r""" -int main() -{ -#ifndef %s - (void) %s; -#endif - ; - return 0; -} -""" % (symbol, symbol) - - kw["code"] = to_header(kw) + code - kw["msg"] = "Checking for macro %r" % symbol - kw["errmsg"] = "not found" - kw["okmsg"] = "yes" - - validate_arguments(self, kw) - try_compile(self, kw) - ret = kw["success"] - - kw["define_name"] = "HAVE_DECL_%s" % sanitize_string(symbol) - kw["define_comment"] = "/* Set to 1 if %s is defined. */" % symbol - self.post_check(**kw) - if not kw.get('execute', False): - return ret == 0 - return ret - -@waflib.Configure.conf -def check_type(self, type_name, **kw): - code = r""" -int main() { - if ((%(type_name)s *) 0) - return 0; - if (sizeof (%(type_name)s)) - return 0; -} -""" % {"type_name": type_name} - - kw["code"] = to_header(kw) + code - kw["msg"] = "Checking for type %r" % type_name - kw["errmsg"] = "not found" - kw["okmsg"] = "yes" - - validate_arguments(self, kw) - try_compile(self, kw) - ret = kw["success"] - if ret == 0: - kw["define_value"] = 1 - else: - kw["define_value"] = 0 - - kw["define_name"] = "HAVE_%s" % sanitize_string(type_name) - kw["define_comment"] = "/* Define to 1 if the system has the type `%s'. */" % type_name - self.post_check(**kw) - if not kw.get('execute', False): - return ret == 0 - return ret - -def do_binary_search(conf, type_name, kw): - code = """\ -typedef %(type)s waf_check_sizeof_type; -int main () -{ - static int test_array [1 - 2 * !(((long) (sizeof (waf_check_sizeof_type))) >= 0)]; - test_array [0] = 0 - - ; - return 0; -} -""" % {"type": type_name} - kw["code"] = to_header(kw) + code - - try: - conf.run_c_code(**kw) - except conf.errors.ConfigurationError as e: - conf.end_msg("failed !") - if waflib.Logs.verbose > 1: - raise - else: - conf.fatal("The configuration failed !") - - body = r""" -typedef %(type)s waf_check_sizeof_type; -int main () -{ - static int test_array [1 - 2 * !(((long) (sizeof (waf_check_sizeof_type))) <= %(size)s)]; - test_array [0] = 0 - - ; - return 0; -} -""" - # The principle is simple: we first find low and high bounds - # of size for the type, where low/high are looked up on a log - # scale. Then, we do a binary search to find the exact size - # between low and high - low = 0 - mid = 0 - while True: - try: - kw["code"] = to_header(kw) + body % {"type": type_name, "size": mid} - validate_arguments(conf, kw) - conf.run_c_code(**kw) - break - except conf.errors.ConfigurationError: - #log.info("failure to test for bound %d" % mid) - low = mid + 1 - mid = 2 * mid + 1 - - high = mid - ret = None - # Binary search: - while low != high: - mid = (high - low) / 2 + low - try: - kw["code"] = to_header(kw) + body % {"type": type_name, "size": mid} - validate_arguments(conf, kw) - ret = conf.run_c_code(**kw) - high = mid - except conf.errors.ConfigurationError: - low = mid + 1 - - return low - -@waflib.Configure.conf -def check_type_size(conf, type_name, expected_sizes=None, **kw): - kw["define_name"] = "SIZEOF_%s" % sanitize_string(type_name) - kw["define_comment"] = "/* The size of `%s', as computed by sizeof. */" % type_name - kw["msg"] = "Checking sizeof(%s)" % type_name - - validate_arguments(conf, kw) - conf.start_msg(kw["msg"]) - - if expected_sizes is not None: - try: - val = int(expected_sizes) - except TypeError: - values = expected_sizes - else: - values = [val] - - size = None - for value in values: - code = """\ - typedef %(type)s waf_check_sizeof_type; - int main () - { - static int test_array [1 - 2 * !(((long) (sizeof (waf_check_sizeof_type))) == %(size)d)]; - test_array [0] = 0 - - ; - return 0; - } - """ % {"type": type_name, "size": value} - kw["code"] = to_header(kw) + code - try: - conf.run_c_code(**kw) - size = value - break - except conf.errors.ConfigurationError: - pass - if size is None: - size = do_binary_search(conf, type_name, kw) - else: - size = do_binary_search(conf, type_name, kw) - - kw["define_value"] = size - kw["success"] = 0 - conf.end_msg(size) - conf.post_check(**kw) - return size - -@waflib.Configure.conf -def check_functions_at_once(self, funcs, **kw): - header = ['#ifdef __cplusplus', 'extern "C" {', '#endif'] - for f in funcs: - header.append("\tchar %s();" % f) - # Handle MSVC intrinsics: force MS compiler to make a function - # call. Useful to test for some functions when built with - # optimization on, to avoid build error because the intrinsic - # and our 'fake' test declaration do not match. - header.append("#ifdef _MSC_VER") - header.append("#pragma function(%s)" % f) - header.append("#endif") - header.append('#ifdef __cplusplus') - header.append('};') - header.append('#endif') - funcs_decl = "\n".join(header) - - tmp = [] - for f in funcs: - tmp.append("\t%s();" % f) - tmp = "\n".join(tmp) - - code = r""" -%(include)s -%(funcs_decl)s - -int main (void) -{ - %(tmp)s - return 0; -} -""" % {"tmp": tmp, "include": to_header(kw), "funcs_decl": funcs_decl} - kw["code"] = code - if not "features" in kw: - kw["features"] = ["c", "cprogram"] - - msg = ", ".join(funcs) - if len(msg) > 30: - _funcs = list(funcs) - msg = [] - while len(", ".join(msg)) < 30 and _funcs: - msg.append(_funcs.pop(0)) - msg = ", ".join(msg) + ",..." - if "lib" in kw: - kw["msg"] = "Checking for functions %s in library %r" % (msg, kw["lib"]) - else: - kw["msg"] = "Checking for functions %s" % msg - - validate_arguments(self, kw) - try_compile(self, kw) - ret = kw["success"] - - # We set the config.h define here because we need to define several of them - # in one shot - if ret == 0: - for f in funcs: - self.define_with_comment("HAVE_%s" % sanitize_string(f), 1, - "/* Define to 1 if you have the `%s' function. */" % f) - - self.post_check(**kw) - if not kw.get('execute', False): - return ret == 0 - return ret - -@waflib.Configure.conf -def check_inline(conf, **kw): - validate_arguments(conf, kw) - - code = """ -#ifndef __cplusplus -static %(inline)s int static_func (void) -{ - return 0; -} -%(inline)s int nostatic_func (void) -{ - return 0; -} -#endif""" - - conf.start_msg("Checking for inline support") - inline = None - for k in ['inline', '__inline__', '__inline']: - try: - kw["code"] = code % {"inline": k} - ret = conf.run_c_code(**kw) - inline = k - break - except conf.errors.ConfigurationError: - pass - - if inline is None: - conf.end_msg("failed", 'YELLOW') - if Logs.verbose > 1: - raise - else: - conf.fatal('The configuration failed') - else: - kw['success'] = ret - conf.end_msg(inline) - return inline - -@waflib.Configure.conf -def check_ldouble_representation(conf, **kw): - msg = { - 'INTEL_EXTENDED_12_BYTES_LE': "Intel extended, little endian", - 'INTEL_EXTENDED_16_BYTES_LE': "Intel extended, little endian", - 'IEEE_QUAD_BE': "IEEE Quad precision, big endian", - 'IEEE_QUAD_LE': "IEEE Quad precision, little endian", - 'IEEE_DOUBLE_LE': "IEEE Double precision, little endian", - 'IEEE_DOUBLE_BE': "IEEE Double precision, big endian" - } - - code = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'} - validate_arguments(conf, kw) - - conf.start_msg("Checking for long double representation... ") - try: - kw["code"] = code - ret = conf.run_c_code(**kw) - except conf.errors.ConfigurationError as e: - conf.end_msg(kw['errmsg'], 'YELLOW') - if Logs.verbose > 1: - raise - else: - conf.fatal('The configuration failed') - else: - task_gen = conf.test_bld.groups[0][0] - obj_filename = task_gen.tasks[0].outputs[0].abspath() - tp = long_double_representation(pyod(obj_filename)) - kw['success'] = ret - conf.end_msg(msg[tp]) - kw["define_name"] = "HAVE_LDOUBLE_%s" % tp - kw["define_comment"] = "/* Define for arch-specific long double representation */" - ret = kw["success"] - - conf.post_check(**kw) - if not kw.get('execute', False): - return ret == 0 - return ret - -@waflib.Configure.conf -def post_check(self, *k, **kw): - "set the variables after a test was run successfully" - - is_success = False - if kw['execute']: - if kw['success'] is not None: - if kw.get('define_ret', False): - is_success = kw['success'] - else: - is_success = (kw['success'] == 0) - else: - is_success = (kw['success'] == 0) - - def define_or_stuff(): - nm = kw['define_name'] - cmt = kw.get('define_comment', None) - value = kw.get("define_value", is_success) - if kw['execute'] and kw.get('define_ret', None) and isinstance(is_success, str): - self.define_with_comment(kw['define_name'], value, cmt, quote=kw.get('quote', 1)) - else: - self.define_cond(kw['define_name'], value, cmt) - - if 'define_name' in kw: - define_or_stuff() - - if is_success and 'uselib_store' in kw: - from waflib.Tools import ccroot - - # TODO see get_uselib_vars from ccroot.py - _vars = set([]) - for x in kw['features']: - if x in ccroot.USELIB_VARS: - _vars |= ccroot.USELIB_VARS[x] - - for k in _vars: - lk = k.lower() - if k == 'INCLUDES': lk = 'includes' - if k == 'DEFKEYS': lk = 'defines' - if lk in kw: - val = kw[lk] - # remove trailing slash - if isinstance(val, str): - val = val.rstrip(os.path.sep) - self.env.append_unique(k + '_' + kw['uselib_store'], val) - return is_success - -@waflib.Configure.conf -def define_with_comment(conf, define, value, comment=None, quote=True): - if comment is None: - return conf.define(define, value, quote) - - assert define and isinstance(define, str) - - comment_tbl = conf.env[DEFINE_COMMENTS] or {} - comment_tbl[define] = comment - conf.env[DEFINE_COMMENTS] = comment_tbl - - return conf.define(define, value, quote) - -@waflib.Configure.conf -def undefine_with_comment(conf, define, comment=None): - if comment is None: - return conf.undefine(define) - - comment_tbl = conf.env[DEFINE_COMMENTS] or {} - comment_tbl[define] = comment - conf.env[DEFINE_COMMENTS] = comment_tbl - - conf.undefine(define) - -@waflib.Configure.conf -def get_comment(self, key): - assert key and isinstance(key, str) - - if key in self.env[DEFINE_COMMENTS]: - return self.env[DEFINE_COMMENTS][key] - return None - -@waflib.Configure.conf -def define_cond(self, name, value, comment): - """Conditionally define a name. - Formally equivalent to: if value: define(name, 1) else: undefine(name)""" - if value: - self.define_with_comment(name, value, comment) - else: - self.undefine(name) - -@waflib.Configure.conf -def get_config_header(self, defines=True, headers=False, define_prefix=None): - """ - Create the contents of a ``config.h`` file from the defines and includes - set in conf.env.define_key / conf.env.include_key. No include guards are added. - - :param defines: write the defines values - :type defines: bool - :param headers: write the headers - :type headers: bool - :return: the contents of a ``config.h`` file - :rtype: string - """ - tpl = self.env["CONFIG_HEADER_TEMPLATE"] or "%(content)s" - - lst = [] - if headers: - for x in self.env[INCKEYS]: - lst.append('#include <%s>' % x) - - if defines: - for x in self.env[DEFKEYS]: - cmt = self.get_comment(x) - if cmt is not None: - lst.append(cmt) - if self.is_defined(x): - val = self.get_define(x) - lst.append('#define %s %s\n' % (x, val)) - else: - lst.append('/* #undef %s */\n' % x) - return tpl % {"content": "\n".join(lst)} From 1e17e2d7d176eb97157e3f613425ef746df56ed1 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 6 Nov 2015 17:46:42 -0700 Subject: [PATCH 170/496] STY: Minor style fixups. Fix some long lines and indentation in numpy/ma/core.py and numpy/ma/extras.py --- numpy/ma/core.py | 6 ++++-- numpy/ma/extras.py | 3 ++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 7d9acbd1ca85..ffc8bb4d2f7a 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -32,7 +32,9 @@ from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue from numpy import array as narray from numpy.lib.function_base import angle -from numpy.compat import getargspec, formatargspec, long, basestring, unicode, bytes, sixu +from numpy.compat import ( + getargspec, formatargspec, long, basestring, unicode, bytes, sixu + ) from numpy import expand_dims as n_expand_dims if sys.version_info[0] >= 3: @@ -5884,7 +5886,7 @@ def filled(self, fill_value=None): -------- MaskedArray.filled - """ + """ return asarray(self).filled(fill_value)[()] def tolist(self): diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index ae4e0cee568e..c4473b498090 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1073,7 +1073,8 @@ def dot(a, b, strict=False): fill_value = 999999) """ - #!!!: Works only with 2D arrays. There should be a way to get it to run with higher dimension + # !!!: Works only with 2D arrays. There should be a way to get it to run + # with higher dimension if strict and (a.ndim == 2) and (b.ndim == 2): a = mask_rows(a) b = mask_cols(b) From cf9f1907b99d06291ab16ad4d2105a871f56f7d9 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 7 Nov 2015 12:50:41 -0700 Subject: [PATCH 171/496] BUG, MAINT: Refactor ma.dot function and the corresponding method. The basic implementation of ma.dot is moved from the method to the function and the function itself is moved from extras.py to core.py on account of import complications. The mask_rowcols function from extras is also moved to core.py as it is needed by dot. For backwards compatibility, both functions are still exported in extras.__all__ and can be imported from that module. They are not included in part of core.__all__. An out parameter is also added to ma.dot. This PR also closes #6611. --- numpy/ma/core.py | 237 +++++++++++++++++++++++++++++++++++++++++---- numpy/ma/extras.py | 146 +--------------------------- 2 files changed, 221 insertions(+), 162 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index ffc8bb4d2f7a..87082d139d50 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -37,6 +37,7 @@ ) from numpy import expand_dims as n_expand_dims + if sys.version_info[0] >= 3: import pickle else: @@ -4653,24 +4654,44 @@ def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): return D.astype(dtype).filled(0).sum(axis=None, out=out) trace.__doc__ = ndarray.trace.__doc__ - def dot(self, other, out=None): - am = ~getmaskarray(self) - bm = ~getmaskarray(other) - if out is None: - d = np.dot(filled(self, 0), filled(other, 0)) - m = ~np.dot(am, bm) - if d.ndim == 0: - d = np.asarray(d) - r = d.view(get_masked_subclass(self, other)) - r.__setmask__(m) - return r - d = self.filled(0).dot(other.filled(0), out._data) - if out.mask.shape != d.shape: - out._mask = np.empty(d.shape, MaskType) - np.dot(am, bm, out._mask) - np.logical_not(out._mask, out._mask) - return out - dot.__doc__ = ndarray.dot.__doc__ + def dot(self, b, out=None, strict=False): + """ + a.dot(b, out=None) + + Masked dot product of two arrays. Note that `out` and `strict` are + located in different positions than in `ma.dot`. In order to + maintain compatibility with the functional version, it is + recommended that the optional arguments be treated as keyword only. + At some point that may be mandatory. + + .. versionadded:: 1.10.0 + + Parameters + ---------- + b : masked_array_like + Inputs array. + out : masked_array, optional + Output argument. This must have the exact kind that would be + returned if it was not used. In particular, it must have the + right type, must be C-contiguous, and its dtype must be the + dtype that would be returned for `ma.dot(a,b)`. This is a + performance feature. Therefore, if these conditions are not + met, an exception is raised, instead of attempting to be + flexible. + strict : bool, optional + Whether masked data are propagated (True) or set to 0 (False) + for the computation. Default is False. Propagating the mask + means that if a masked value appears in a row or column, the + whole row or column is considered masked. + + .. versionadded:: 1.10.2 + + See Also + -------- + numpy.ma.dot : equivalent function + + """ + return dot(self, b, out=out, strict=strict) def sum(self, axis=None, dtype=None, out=None): """ @@ -7023,6 +7044,186 @@ def round_(a, decimals=0, out=None): round = round_ +# Needed by dot, so move here from extras.py. It will still be exported +# from extras.py for compatibility. +def mask_rowcols(a, axis=None): + """ + Mask rows and/or columns of a 2D array that contain masked values. + + Mask whole rows and/or columns of a 2D array that contain + masked values. The masking behavior is selected using the + `axis` parameter. + + - If `axis` is None, rows *and* columns are masked. + - If `axis` is 0, only rows are masked. + - If `axis` is 1 or -1, only columns are masked. + + Parameters + ---------- + a : array_like, MaskedArray + The array to mask. If not a MaskedArray instance (or if no array + elements are masked). The result is a MaskedArray with `mask` set + to `nomask` (False). Must be a 2D array. + axis : int, optional + Axis along which to perform the operation. If None, applies to a + flattened version of the array. + + Returns + ------- + a : MaskedArray + A modified version of the input array, masked depending on the value + of the `axis` parameter. + + Raises + ------ + NotImplementedError + If input array `a` is not 2D. + + See Also + -------- + mask_rows : Mask rows of a 2D array that contain masked values. + mask_cols : Mask cols of a 2D array that contain masked values. + masked_where : Mask where a condition is met. + + Notes + ----- + The input array's mask is modified by this function. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.zeros((3, 3), dtype=np.int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = ma.masked_equal(a, 1) + >>> a + masked_array(data = + [[0 0 0] + [0 -- 0] + [0 0 0]], + mask = + [[False False False] + [False True False] + [False False False]], + fill_value=999999) + >>> ma.mask_rowcols(a) + masked_array(data = + [[0 -- 0] + [-- -- --] + [0 -- 0]], + mask = + [[False True False] + [ True True True] + [False True False]], + fill_value=999999) + + """ + a = array(a, subok=False) + if a.ndim != 2: + raise NotImplementedError("mask_rowcols works for 2D arrays only.") + m = getmask(a) + # Nothing is masked: return a + if m is nomask or not m.any(): + return a + maskedval = m.nonzero() + a._mask = a._mask.copy() + if not axis: + a[np.unique(maskedval[0])] = masked + if axis in [None, 1, -1]: + a[:, np.unique(maskedval[1])] = masked + return a + + +# Include masked dot here to avoid import problems in getting it from +# extras.py. Note that it is not included in __all__, but rather exported +# from extras in order to avoid backward compatibility problems. +def dot(a, b, strict=False, out=None): + """ + Return the dot product of two arrays. + + This function is the equivalent of `numpy.dot` that takes masked values + into account. Note that `strict` and `out` are in different position + than in the method version. In order to maintain compatibility with the + corresponding method, it is recommended that the optional arguments be + treated as keyword only. At some point that may be mandatory. + + .. note:: + Works only with 2-D arrays at the moment. + + + Parameters + ---------- + a, b : masked_array_like + Inputs arrays. + strict : bool, optional + Whether masked data are propagated (True) or set to 0 (False) for + the computation. Default is False. Propagating the mask means that + if a masked value appears in a row or column, the whole row or + column is considered masked. + out : masked_array, optional + Output argument. This must have the exact kind that would be returned + if it was not used. In particular, it must have the right type, must be + C-contiguous, and its dtype must be the dtype that would be returned + for `dot(a,b)`. This is a performance feature. Therefore, if these + conditions are not met, an exception is raised, instead of attempting + to be flexible. + + .. versionadded:: 1.10.2 + + See Also + -------- + numpy.dot : Equivalent function for ndarrays. + + Examples + -------- + >>> a = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]]) + >>> b = ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]]) + >>> np.ma.dot(a, b) + masked_array(data = + [[21 26] + [45 64]], + mask = + [[False False] + [False False]], + fill_value = 999999) + >>> np.ma.dot(a, b, strict=True) + masked_array(data = + [[-- --] + [-- 64]], + mask = + [[ True True] + [ True False]], + fill_value = 999999) + + """ + # !!!: Works only with 2D arrays. There should be a way to get it to run + # with higher dimension + if strict and (a.ndim == 2) and (b.ndim == 2): + a = mask_rowcols(a, 0) + b = mask_rowcols(b, 1) + am = ~getmaskarray(a) + bm = ~getmaskarray(b) + + if out is None: + d = np.dot(filled(a, 0), filled(b, 0)) + m = ~np.dot(am, bm) + if d.ndim == 0: + d = np.asarray(d) + r = d.view(get_masked_subclass(a, b)) + r.__setmask__(m) + return r + else: + d = np.dot(filled(a, 0), filled(b, 0), out._data) + if out.mask.shape != d.shape: + out._mask = np.empty(d.shape, MaskType) + np.dot(am, bm, out._mask) + np.logical_not(out._mask, out._mask) + return out + + def inner(a, b): """ Returns the inner product of a and b for arrays of floating point types. diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index c4473b498090..e1d228e73482 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -29,7 +29,8 @@ from .core import ( MaskedArray, MAError, add, array, asarray, concatenate, filled, getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or, - nomask, ones, sort, zeros, getdata + nomask, ones, sort, zeros, getdata, get_masked_subclass, dot, + mask_rowcols ) import numpy as np @@ -846,96 +847,6 @@ def compress_cols(a): raise NotImplementedError("compress_cols works for 2D arrays only.") return compress_rowcols(a, 1) -def mask_rowcols(a, axis=None): - """ - Mask rows and/or columns of a 2D array that contain masked values. - - Mask whole rows and/or columns of a 2D array that contain - masked values. The masking behavior is selected using the - `axis` parameter. - - - If `axis` is None, rows *and* columns are masked. - - If `axis` is 0, only rows are masked. - - If `axis` is 1 or -1, only columns are masked. - - Parameters - ---------- - a : array_like, MaskedArray - The array to mask. If not a MaskedArray instance (or if no array - elements are masked). The result is a MaskedArray with `mask` set - to `nomask` (False). Must be a 2D array. - axis : int, optional - Axis along which to perform the operation. If None, applies to a - flattened version of the array. - - Returns - ------- - a : MaskedArray - A modified version of the input array, masked depending on the value - of the `axis` parameter. - - Raises - ------ - NotImplementedError - If input array `a` is not 2D. - - See Also - -------- - mask_rows : Mask rows of a 2D array that contain masked values. - mask_cols : Mask cols of a 2D array that contain masked values. - masked_where : Mask where a condition is met. - - Notes - ----- - The input array's mask is modified by this function. - - Examples - -------- - >>> import numpy.ma as ma - >>> a = np.zeros((3, 3), dtype=np.int) - >>> a[1, 1] = 1 - >>> a - array([[0, 0, 0], - [0, 1, 0], - [0, 0, 0]]) - >>> a = ma.masked_equal(a, 1) - >>> a - masked_array(data = - [[0 0 0] - [0 -- 0] - [0 0 0]], - mask = - [[False False False] - [False True False] - [False False False]], - fill_value=999999) - >>> ma.mask_rowcols(a) - masked_array(data = - [[0 -- 0] - [-- -- --] - [0 -- 0]], - mask = - [[False True False] - [ True True True] - [False True False]], - fill_value=999999) - - """ - a = array(a, subok=False) - if a.ndim != 2: - raise NotImplementedError("mask_rowcols works for 2D arrays only.") - m = getmask(a) - # Nothing is masked: return a - if m is nomask or not m.any(): - return a - maskedval = m.nonzero() - a._mask = a._mask.copy() - if not axis: - a[np.unique(maskedval[0])] = masked - if axis in [None, 1, -1]: - a[:, np.unique(maskedval[1])] = masked - return a - def mask_rows(a, axis=None): """ Mask rows of a 2D array that contain masked values. @@ -1027,59 +938,6 @@ def mask_cols(a, axis=None): return mask_rowcols(a, 1) -def dot(a, b, strict=False): - """ - Return the dot product of two arrays. - - .. note:: - Works only with 2-D arrays at the moment. - - This function is the equivalent of `numpy.dot` that takes masked values - into account, see `numpy.dot` for details. - - Parameters - ---------- - a, b : ndarray - Inputs arrays. - strict : bool, optional - Whether masked data are propagated (True) or set to 0 (False) for the - computation. Default is False. - Propagating the mask means that if a masked value appears in a row or - column, the whole row or column is considered masked. - - See Also - -------- - numpy.dot : Equivalent function for ndarrays. - - Examples - -------- - >>> a = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]]) - >>> b = ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]]) - >>> np.ma.dot(a, b) - masked_array(data = - [[21 26] - [45 64]], - mask = - [[False False] - [False False]], - fill_value = 999999) - >>> np.ma.dot(a, b, strict=True) - masked_array(data = - [[-- --] - [-- 64]], - mask = - [[ True True] - [ True False]], - fill_value = 999999) - - """ - # !!!: Works only with 2D arrays. There should be a way to get it to run - # with higher dimension - if strict and (a.ndim == 2) and (b.ndim == 2): - a = mask_rows(a) - b = mask_cols(b) - return a.dot(b) - #####-------------------------------------------------------------------------- #---- --- arraysetops --- #####-------------------------------------------------------------------------- From 3e82108f701b0ce6cbb9e16f5d7fd4c3cb27a97c Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 7 Nov 2015 12:58:06 -0700 Subject: [PATCH 172/496] TST: Add tests for ma.dot. Test that ma.dot always returns a masked array. Test basic that the new out parameter in ma.dot works. --- numpy/ma/tests/test_core.py | 4 +-- numpy/ma/tests/test_extras.py | 54 +++++++++++++++++++++++------------ 2 files changed, 37 insertions(+), 21 deletions(-) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 70c1ee12c01c..61fd77bda7ad 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -3192,7 +3192,7 @@ def test_dot(self): assert_almost_equal(r.filled(0), fX.dot(fX)) assert_(r.mask[1,3]) r1 = empty_like(r) - mX.dot(mX, r1) + mX.dot(mX, out=r1) assert_almost_equal(r, r1) mYY = mXX.swapaxes(-1, -2) @@ -3200,7 +3200,7 @@ def test_dot(self): r = mXX.dot(mYY) assert_almost_equal(r.filled(0), fXX.dot(fYY)) r1 = empty_like(r) - mXX.dot(mYY, r1) + mXX.dot(mYY, out=r1) assert_almost_equal(r, r1) def test_dot_shape_mismatch(self): diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index c41c629fc475..6138d0573967 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -538,26 +538,26 @@ def test_dot(self): m = [1, 0, 0, 0, 0, 0] a = masked_array(n, mask=m).reshape(2, 3) b = masked_array(n, mask=m).reshape(3, 2) - c = dot(a, b, True) + c = dot(a, b, strict=True) assert_equal(c.mask, [[1, 1], [1, 0]]) - c = dot(b, a, True) + c = dot(b, a, strict=True) assert_equal(c.mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) - c = dot(a, b, False) + c = dot(a, b, strict=False) assert_equal(c, np.dot(a.filled(0), b.filled(0))) - c = dot(b, a, False) + c = dot(b, a, strict=False) assert_equal(c, np.dot(b.filled(0), a.filled(0))) # m = [0, 0, 0, 0, 0, 1] a = masked_array(n, mask=m).reshape(2, 3) b = masked_array(n, mask=m).reshape(3, 2) - c = dot(a, b, True) + c = dot(a, b, strict=True) assert_equal(c.mask, [[0, 1], [1, 1]]) - c = dot(b, a, True) + c = dot(b, a, strict=True) assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [1, 1, 1]]) - c = dot(a, b, False) + c = dot(a, b, strict=False) assert_equal(c, np.dot(a.filled(0), b.filled(0))) assert_equal(c, dot(a, b)) - c = dot(b, a, False) + c = dot(b, a, strict=False) assert_equal(c, np.dot(b.filled(0), a.filled(0))) # m = [0, 0, 0, 0, 0, 0] @@ -570,37 +570,53 @@ def test_dot(self): # a = masked_array(n, mask=[1, 0, 0, 0, 0, 0]).reshape(2, 3) b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) - c = dot(a, b, True) + c = dot(a, b, strict=True) assert_equal(c.mask, [[1, 1], [0, 0]]) - c = dot(a, b, False) + c = dot(a, b, strict=False) assert_equal(c, np.dot(a.filled(0), b.filled(0))) - c = dot(b, a, True) + c = dot(b, a, strict=True) assert_equal(c.mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) - c = dot(b, a, False) + c = dot(b, a, strict=False) assert_equal(c, np.dot(b.filled(0), a.filled(0))) # a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) - c = dot(a, b, True) + c = dot(a, b, strict=True) assert_equal(c.mask, [[0, 0], [1, 1]]) c = dot(a, b) assert_equal(c, np.dot(a.filled(0), b.filled(0))) - c = dot(b, a, True) + c = dot(b, a, strict=True) assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [0, 0, 1]]) - c = dot(b, a, False) + c = dot(b, a, strict=False) assert_equal(c, np.dot(b.filled(0), a.filled(0))) # a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) b = masked_array(n, mask=[0, 0, 1, 0, 0, 0]).reshape(3, 2) - c = dot(a, b, True) + c = dot(a, b, strict=True) assert_equal(c.mask, [[1, 0], [1, 1]]) - c = dot(a, b, False) + c = dot(a, b, strict=False) assert_equal(c, np.dot(a.filled(0), b.filled(0))) - c = dot(b, a, True) + c = dot(b, a, strict=True) assert_equal(c.mask, [[0, 0, 1], [1, 1, 1], [0, 0, 1]]) - c = dot(b, a, False) + c = dot(b, a, strict=False) assert_equal(c, np.dot(b.filled(0), a.filled(0))) + def test_dot_returns_maskedarray(self): + # See gh-6611 + a = np.eye(3) + b = array(a) + assert_(type(dot(a, a)) is MaskedArray) + assert_(type(dot(a, b)) is MaskedArray) + assert_(type(dot(b, a)) is MaskedArray) + assert_(type(dot(b, b)) is MaskedArray) + + def test_dot_out(self): + a = array(np.eye(3)) + out = array(np.zeros((3, 3))) + res = dot(a, a, out=out) + assert_(res is out) + assert_equal(a, res) + class TestApplyAlongAxis(TestCase): # Tests 2D functions From 1e8e955719a58f4180526e16c3efeab9aac68747 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 10 Nov 2015 21:53:52 -0700 Subject: [PATCH 173/496] DOC: Sync 1.10.2 release notes with maintenance/1.10.x. [ci skip] --- doc/release/1.10.2-notes.rst | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/doc/release/1.10.2-notes.rst b/doc/release/1.10.2-notes.rst index 4a62be2daeca..e38f523b7635 100644 --- a/doc/release/1.10.2-notes.rst +++ b/doc/release/1.10.2-notes.rst @@ -17,23 +17,27 @@ using PyArray_ISFORTRAN to check for Fortran contiguity instead of PyArray_IS_F_CONTIGUOUS. You may want to regenerate swigged files using the updated numpy.i - Issues Fixed ============ * gh-6462 Median of empty array produces IndexError. * gh-6467 Performance regression for record array access. +* gh-6475 np.allclose returns a memmap when one of its arguments is a memmap. * gh-6491 Error in broadcasting stride_tricks array. * gh-6495 Unrecognized command line option '-ffpe-summary' in gfortran. * gh-6497 Failure of reduce operation on recarrays. * gh-6498 Mention change in default casting rule in 1.10 release notes. * gh-6530 The partition function errors out on empty input. * gh-6563 Intent(out) broken in recent versions of f2py. +* gh-6569 Cannot run tests after 'python setup.py build_ext -i' +* gh-6572 Error in broadcasting stride_tricks array component. * gh-6575 BUG: Split produces empty arrays with wrong number of dimensions * gh-6590 Fortran Array problem in numpy 1.10. * gh-6602 Random __all__ missing choice and dirichlet. +* gh-6611 ma.dot no longer always returns a masked array in 1.10. * gh-6618 NPY_FORTRANORDER in make_fortran() in numpy.i -* gh-6475 np.allclose returns a memmap when one of its arguments is a memmap. +* gh-6636 Memory leak in nested dtypes in numpy.recarray +* gh-6641 Subsetting recarray by fields yields a structured array. Merged PRs ========== @@ -71,6 +75,9 @@ The following PRs in master have been backported to 1.10.2 * gh-6614 BUG: Add choice and dirichlet to numpy.random.__all__. * gh-6621 BUG: Fix swig make_fortran function. * gh-6628 BUG: Make allclose return python bool. +* gh-6642 BUG: Fix memleak in _convert_from_dict. +* gh-6643 ENH: make recarray.getitem return a recarray. +* gh-6653 BUG: Fix ma dot to always return masked array. Initial support for mingwpy was reverted as it was causing problems for non-windows builds. From 59fbef0510c9a24d6f07ce80bb92fc57718ae95f Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 11 Nov 2015 15:52:17 -0700 Subject: [PATCH 174/496] BUG: ma.make_mask should always return nomask for nomask argument. When the mask argument was nomask, commit 8da9c71 changed the behavior to depend on shrink=True as well, resulting in array(False) false being returned when shrink=True, which in turn led to bugs because nomask is a singleton and is detected by `mask is nomask`. That dectection fails when nomask is replaced by array(False). Closes #6667. --- numpy/ma/core.py | 9 +++++---- numpy/ma/tests/test_core.py | 11 +++++++++++ 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 87082d139d50..b9f7da092659 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -1495,9 +1495,10 @@ def make_mask(m, copy=False, shrink=True, dtype=MaskType): shrink : bool, optional Whether to shrink `m` to ``nomask`` if all its values are False. dtype : dtype, optional - Data-type of the output mask. By default, the output mask has - a dtype of MaskType (bool). If the dtype is flexible, each field - has a boolean dtype. + Data-type of the output mask. By default, the output mask has a + dtype of MaskType (bool). If the dtype is flexible, each field has + a boolean dtype. This is ignored when `m` is ``nomask``, in which + case ``nomask`` is always returned. Returns ------- @@ -1547,7 +1548,7 @@ def make_mask(m, copy=False, shrink=True, dtype=MaskType): dtype=[('man', '|b1'), ('mouse', '|b1')]) """ - if m is nomask and shrink: + if m is nomask: return nomask elif isinstance(m, ndarray): # We won't return after this point to make sure we can shrink the mask diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 61fd77bda7ad..e5fdfddb144f 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -11,8 +11,10 @@ import warnings import pickle import operator +import itertools from functools import reduce + import numpy as np import numpy.ma.core import numpy.core.fromnumeric as fromnumeric @@ -3816,6 +3818,15 @@ def test_make_mask(self): assert_equal(test.dtype, bdtype) assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype)) + # test that nomask is returned when m is nomask. + bools = [True, False] + dtypes = [MaskType, np.float] + msgformat = 'copy=%s, shrink=%s, dtype=%s' + for cpy, shr, dt in itertools.product(bools, bools, dtypes): + res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dt) + assert_(res is nomask, msgformat % (cpy, shr, dt)) + + def test_mask_or(self): # Initialize mtype = [('a', np.bool), ('b', np.bool)] From 628d55c08f753ea8714d29a453587f901e5bcb80 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 11 Nov 2015 16:26:43 -0700 Subject: [PATCH 175/496] DOC: Update 1.10.2-notes. Document fix to ma.make_mask. --- doc/release/1.10.2-notes.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/release/1.10.2-notes.rst b/doc/release/1.10.2-notes.rst index e38f523b7635..70c9398bae94 100644 --- a/doc/release/1.10.2-notes.rst +++ b/doc/release/1.10.2-notes.rst @@ -38,6 +38,7 @@ Issues Fixed * gh-6618 NPY_FORTRANORDER in make_fortran() in numpy.i * gh-6636 Memory leak in nested dtypes in numpy.recarray * gh-6641 Subsetting recarray by fields yields a structured array. +* gh-6667 ma.make_mask handles ma.nomask input incorrectly. Merged PRs ========== @@ -78,6 +79,7 @@ The following PRs in master have been backported to 1.10.2 * gh-6642 BUG: Fix memleak in _convert_from_dict. * gh-6643 ENH: make recarray.getitem return a recarray. * gh-6653 BUG: Fix ma dot to always return masked array. +* gh-6668 BUG: ma.make_mask should always return nomask for nomask argument. Initial support for mingwpy was reverted as it was causing problems for non-windows builds. From 296b0b78fd381116b0e26c14ed5caaf1c07575a8 Mon Sep 17 00:00:00 2001 From: floatingpointstack Date: Thu, 12 Nov 2015 16:45:41 +0100 Subject: [PATCH 176/496] MAINT: Typo in arrays.indexing.rst Issue#6670 --- doc/source/reference/arrays.indexing.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/arrays.indexing.rst b/doc/source/reference/arrays.indexing.rst index 228e9a8d44ad..50b2492d2e61 100644 --- a/doc/source/reference/arrays.indexing.rst +++ b/doc/source/reference/arrays.indexing.rst @@ -193,7 +193,7 @@ basic slicing that returns a :term:`view`). fundamentally different than ``x[(1,2,3)]``. The latter is equivalent to ``x[1,2,3]`` which will trigger basic selection while the former will trigger advanced indexing. Be sure to understand - why this is occurs. + why this occurs. Also recognize that ``x[[1,2,3]]`` will trigger advanced indexing, whereas ``x[[1,2,slice(None)]]`` will trigger basic slicing. From 1392e2417150bdea473e3b29867c685c09b25447 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 12 Nov 2015 17:20:17 +0000 Subject: [PATCH 177/496] DOC: document that assert_raises can be used as a context manager --- numpy/testing/utils.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py index c6d863f9498a..099b75bdf43d 100644 --- a/numpy/testing/utils.py +++ b/numpy/testing/utils.py @@ -1102,6 +1102,18 @@ def assert_raises(*args,**kwargs): deemed to have suffered an error, exactly as for an unexpected exception. + Alternatively, `assert_raises` can be used as a context manager: + + >>> from numpy.testing import assert_raises + >>> with assert_raises(ZeroDivisionError): + ... 1 / 0 + + is equivalent to + + >>> def div(x, y): + ... return x / y + >>> assert_raises(ZeroDivisionError, div, 1, 0) + """ __tracebackhide__ = True # Hide traceback for py.test nose = import_nose() From 8efc87ec599c0b3eac4e63bea6eda9023d8ed96d Mon Sep 17 00:00:00 2001 From: Pauli Virtanen Date: Thu, 12 Nov 2015 20:15:37 +0200 Subject: [PATCH 178/496] ENH: reimplement may_share_memory in C to improve its performance --- numpy/add_newdocs.py | 39 +++++++++++++++ numpy/core/function_base.py | 45 +---------------- numpy/core/numeric.py | 4 +- numpy/core/src/multiarray/multiarraymodule.c | 52 ++++++++++++++++---- 4 files changed, 85 insertions(+), 55 deletions(-) diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py index b00e229c3e4f..c140360891e5 100644 --- a/numpy/add_newdocs.py +++ b/numpy/add_newdocs.py @@ -3826,6 +3826,45 @@ def luf(lamdaexpr, *args, **kwargs): """) +add_newdoc('numpy.core.multiarray', 'may_share_memory', + """ + may_share_memory(a, b, max_work=None) + + Determine if two arrays might share memory + + A return of True does not necessarily mean that the two arrays + share any element. It just means that they *might*. + + Only the memory bounds of a and b are checked by default. + + Parameters + ---------- + a, b : ndarray + Input arrays + max_work : int, optional + Effort to spend on solving the overlap problem. See + `shares_memory` for details. Default for ``may_share_memory`` + is to do a bounds check. + + Returns + ------- + out : bool + + See Also + -------- + shares_memory + + Examples + -------- + >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) + False + >>> x = np.zeros([3, 4]) + >>> np.may_share_memory(x[:,0], x[:,1]) + True + + """) + + add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder', """ arr.newbyteorder(new_order='S') diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py index 05fea557a5ea..c82c9bb6b571 100644 --- a/numpy/core/function_base.py +++ b/numpy/core/function_base.py @@ -1,6 +1,6 @@ from __future__ import division, absolute_import, print_function -__all__ = ['logspace', 'linspace', 'may_share_memory'] +__all__ = ['logspace', 'linspace'] from . import numeric as _nx from .numeric import result_type, NaN, shares_memory, MAY_SHARE_BOUNDS, TooHardError @@ -201,46 +201,3 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None): if dtype is None: return _nx.power(base, y) return _nx.power(base, y).astype(dtype) - - -def may_share_memory(a, b, max_work=None): - """Determine if two arrays can share memory - - A return of True does not necessarily mean that the two arrays - share any element. It just means that they *might*. - - Only the memory bounds of a and b are checked by default. - - Parameters - ---------- - a, b : ndarray - Input arrays - max_work : int, optional - Effort to spend on solving the overlap problem. See - `shares_memory` for details. Default for ``may_share_memory`` - is to do a bounds check. - - Returns - ------- - out : bool - - See Also - -------- - shares_memory - - Examples - -------- - >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) - False - >>> x = np.zeros([3, 4]) - >>> np.may_share_memory(x[:,0], x[:,1]) - True - - """ - if max_work is None: - max_work = MAY_SHARE_BOUNDS - try: - return shares_memory(a, b, max_work=max_work) - except (TooHardError, OverflowError): - # Unable to determine, assume yes - return True diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index 2ece2ce8dc04..3b442ea7822c 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -41,7 +41,8 @@ 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', 'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like', 'matmul', - 'shares_memory', 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'TooHardError', + 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', + 'TooHardError', ] if sys.version_info[0] < 3: @@ -384,6 +385,7 @@ def extend_all(module): fromfile = multiarray.fromfile frombuffer = multiarray.frombuffer shares_memory = multiarray.shares_memory +may_share_memory = multiarray.may_share_memory if sys.version_info[0] < 3: newbuffer = multiarray.newbuffer getbuffer = multiarray.getbuffer diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index 10c22ae5a5e1..486fdbc9bb46 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -3989,7 +3989,8 @@ test_interrupt(PyObject *NPY_UNUSED(self), PyObject *args) static PyObject * -array_shares_memory(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) +array_shares_memory_impl(PyObject *args, PyObject *kwds, Py_ssize_t default_max_work, + int raise_exceptions) { PyArrayObject * self = NULL; PyArrayObject * other = NULL; @@ -3998,9 +3999,11 @@ array_shares_memory(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwd mem_overlap_t result; static PyObject *too_hard_cls = NULL; - Py_ssize_t max_work = NPY_MAY_SHARE_EXACT; + Py_ssize_t max_work; NPY_BEGIN_THREADS_DEF; + max_work = default_max_work; + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&O&|O", kwlist, PyArray_Converter, &self, PyArray_Converter, &other, @@ -4043,17 +4046,29 @@ array_shares_memory(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwd Py_RETURN_TRUE; } else if (result == MEM_OVERLAP_OVERFLOW) { - PyErr_SetString(PyExc_OverflowError, - "Integer overflow in computing overlap"); - return NULL; + if (raise_exceptions) { + PyErr_SetString(PyExc_OverflowError, + "Integer overflow in computing overlap"); + return NULL; + } + else { + /* Don't know, so say yes */ + Py_RETURN_TRUE; + } } else if (result == MEM_OVERLAP_TOO_HARD) { - npy_cache_import("numpy.core._internal", "TooHardError", - &too_hard_cls); - if (too_hard_cls) { - PyErr_SetString(too_hard_cls, "Exceeded max_work"); + if (raise_exceptions) { + npy_cache_import("numpy.core._internal", "TooHardError", + &too_hard_cls); + if (too_hard_cls) { + PyErr_SetString(too_hard_cls, "Exceeded max_work"); + } + return NULL; + } + else { + /* Don't know, so say yes */ + Py_RETURN_TRUE; } - return NULL; } else { /* Doesn't happen usually */ @@ -4069,6 +4084,20 @@ array_shares_memory(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwd } +static PyObject * +array_shares_memory(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) +{ + return array_shares_memory_impl(args, kwds, NPY_MAY_SHARE_EXACT, 1); +} + + +static PyObject * +array_may_share_memory(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) +{ + return array_shares_memory_impl(args, kwds, NPY_MAY_SHARE_BOUNDS, 0); +} + + static struct PyMethodDef array_module_methods[] = { {"_get_ndarray_c_version", (PyCFunction)array__get_ndarray_c_version, @@ -4178,6 +4207,9 @@ static struct PyMethodDef array_module_methods[] = { {"shares_memory", (PyCFunction)array_shares_memory, METH_VARARGS | METH_KEYWORDS, NULL}, + {"may_share_memory", + (PyCFunction)array_may_share_memory, + METH_VARARGS | METH_KEYWORDS, NULL}, /* Datetime-related functions */ {"datetime_data", (PyCFunction)array_datetime_data, From f2be3a2f822b3f5cf4b706cc2e28f0c169e6d995 Mon Sep 17 00:00:00 2001 From: Pauli Virtanen Date: Thu, 12 Nov 2015 20:20:44 +0200 Subject: [PATCH 179/496] BUG: don't use PyArray_Converter in may_share_memory The converter function has NPY_ARRAY_CARRAY enabled, which can cause false negatives for non-ndarray inputs. Fixes gh-5604 --- numpy/core/src/multiarray/multiarraymodule.c | 32 +++++++++++++++++--- numpy/core/tests/test_mem_overlap.py | 28 +++++++++++++++++ 2 files changed, 56 insertions(+), 4 deletions(-) diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index 486fdbc9bb46..b9d79029eb2f 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -3992,6 +3992,8 @@ static PyObject * array_shares_memory_impl(PyObject *args, PyObject *kwds, Py_ssize_t default_max_work, int raise_exceptions) { + PyObject * self_obj = NULL; + PyObject * other_obj = NULL; PyArrayObject * self = NULL; PyArrayObject * other = NULL; PyObject *max_work_obj = NULL; @@ -4004,13 +4006,35 @@ array_shares_memory_impl(PyObject *args, PyObject *kwds, Py_ssize_t default_max_ max_work = default_max_work; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&O&|O", kwlist, - PyArray_Converter, &self, - PyArray_Converter, &other, - &max_work_obj)) { + if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|O", kwlist, + &self_obj, &other_obj, &max_work_obj)) { return NULL; } + if (PyArray_Check(self_obj)) { + self = (PyArrayObject*)self_obj; + Py_INCREF(self); + } + else { + /* Use FromAny to enable checking overlap for objects exposing array + interfaces etc. */ + self = (PyArrayObject*)PyArray_FromAny(self_obj, NULL, 0, 0, 0, NULL); + if (self == NULL) { + goto fail; + } + } + + if (PyArray_Check(other_obj)) { + other = (PyArrayObject*)other_obj; + Py_INCREF(other); + } + else { + other = (PyArrayObject*)PyArray_FromAny(other_obj, NULL, 0, 0, 0, NULL); + if (other == NULL) { + goto fail; + } + } + if (max_work_obj == NULL || max_work_obj == Py_None) { /* noop */ } diff --git a/numpy/core/tests/test_mem_overlap.py b/numpy/core/tests/test_mem_overlap.py index 728cc675d8bb..8d39fa4c0a36 100644 --- a/numpy/core/tests/test_mem_overlap.py +++ b/numpy/core/tests/test_mem_overlap.py @@ -482,5 +482,33 @@ def test_internal_overlap_fuzz(): no_overlap += 1 +def test_non_ndarray_inputs(): + # Regression check for gh-5604 + + class MyArray(object): + def __init__(self, data): + self.data = data + + @property + def __array_interface__(self): + return self.data.__array_interface__ + + class MyArray2(object): + def __init__(self, data): + self.data = data + + def __array__(self): + return self.data + + for cls in [MyArray, MyArray2]: + x = np.arange(5) + + assert_(np.may_share_memory(cls(x[::2]), x[1::2])) + assert_(not np.shares_memory(cls(x[::2]), x[1::2])) + + assert_(np.shares_memory(cls(x[1::3]), x[::2])) + assert_(np.may_share_memory(cls(x[1::3]), x[::2])) + + if __name__ == "__main__": run_module_suite() From 35c2d9c9bb597be696005e325742fb8ae3e8f117 Mon Sep 17 00:00:00 2001 From: Griffin Hosseinzadeh Date: Fri, 13 Nov 2015 02:17:54 +0000 Subject: [PATCH 180/496] add clarification of weights to documentation --- numpy/lib/polynomial.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py index de93763000b8..2f677438ba9c 100644 --- a/numpy/lib/polynomial.py +++ b/numpy/lib/polynomial.py @@ -427,7 +427,8 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): default) just the coefficients are returned, when True diagnostic information from the singular value decomposition is also returned. w : array_like, shape (M,), optional - weights to apply to the y-coordinates of the sample points. + Weights to apply to the y-coordinates of the sample points. For + gaussian uncertainties, use 1/sigma (not 1/sigma**2). cov : bool, optional Return the estimate and the covariance matrix of the estimate If full is True, then cov is not returned. From eadc135a5f2ab577748188770af66feafe87859d Mon Sep 17 00:00:00 2001 From: Pauli Virtanen Date: Sat, 14 Nov 2015 14:28:25 +0200 Subject: [PATCH 181/496] BUG: testing: fix a bug in assert_string_equal --- numpy/testing/tests/test_utils.py | 19 ++++++++++++++++++- numpy/testing/utils.py | 11 ++++++----- 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index a31fce4afbc3..13aeffe02877 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -9,7 +9,8 @@ assert_array_almost_equal, build_err_msg, raises, assert_raises, assert_warns, assert_no_warnings, assert_allclose, assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp, - clear_and_catch_warnings, run_module_suite + clear_and_catch_warnings, run_module_suite, + assert_string_equal ) import unittest @@ -715,6 +716,22 @@ def test_nan(self): lambda: assert_array_max_ulp(nan, nzero, maxulp=maxulp)) +class TestStringEqual(unittest.TestCase): + def test_simple(self): + assert_string_equal("hello", "hello") + assert_string_equal("hello\nmultiline", "hello\nmultiline") + + try: + assert_string_equal("foo\nbar", "hello\nbar") + except AssertionError as exc: + assert_equal(str(exc), "Differences in strings:\n- foo\n+ hello") + else: + raise AssertionError("exception not raised") + + self.assertRaises(AssertionError, + lambda: assert_string_equal("foo", "hello")) + + def assert_warn_len_equal(mod, n_in_context): mod_warns = mod.__warningregistry__ # Python 3.4 appears to clear any pre-existing warnings of the same type, diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py index 099b75bdf43d..8a282ff3c4f5 100644 --- a/numpy/testing/utils.py +++ b/numpy/testing/utils.py @@ -1018,11 +1018,12 @@ def assert_string_equal(actual, desired): if not d2.startswith('+ '): raise AssertionError(repr(d2)) l.append(d2) - d3 = diff.pop(0) - if d3.startswith('? '): - l.append(d3) - else: - diff.insert(0, d3) + if diff: + d3 = diff.pop(0) + if d3.startswith('? '): + l.append(d3) + else: + diff.insert(0, d3) if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]): continue diff_list.extend(l) From 70c5052a9f07025c236033cf629506bb38eb6d97 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sun, 15 Nov 2015 07:48:20 +0000 Subject: [PATCH 182/496] ENH: testing: add SkipTest and KnownFailureException * use SkipTest in numpy tests instead of importing it from nose * add a KnownFailureException as an alias for KnownFailureTest (the former is preferred, but the latter is kept for backcompat) * rename the KnownFailure nose plugin into KnownFailurePlugin, and keep the old name for backcompat --- doc/release/1.11.0-notes.rst | 4 ++++ numpy/core/tests/test_multiarray.py | 3 +-- numpy/core/tests/test_print.py | 8 ++++---- numpy/f2py/tests/test_array_from_pyobj.py | 6 ++---- numpy/f2py/tests/util.py | 9 ++++----- numpy/lib/tests/test__datasource.py | 8 +++----- numpy/lib/tests/test_format.py | 3 +-- numpy/linalg/tests/test_linalg.py | 3 +-- numpy/testing/decorators.py | 19 ++++++++++--------- numpy/testing/noseclasses.py | 19 +++++++++---------- numpy/testing/nosetester.py | 8 ++++---- numpy/testing/tests/test_decorators.py | 20 ++++++++++---------- numpy/testing/utils.py | 21 +++++++++++++++++++-- 13 files changed, 72 insertions(+), 59 deletions(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index f8d3d4dbfe31..580c0c952b74 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -71,6 +71,10 @@ via ``python runtests.py --bench``. For more details, see ``benchmarks/README.rs arrays have memory overlap is added. ``np.may_share_memory`` also now has an option to spend more effort to reduce false positives. +* ``SkipTest`` and ``KnownFailureException`` exception classes are exposed in the +``numpy.testing`` namespace. Raise them in a test function to mark the test to +be skipped or mark it as a known failure, respectively. + Improvements ============ diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 15dd9302cc8d..693847273ff5 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -17,7 +17,6 @@ import numpy as np -from nose import SkipTest from numpy.compat import asbytes, getexception, strchar, unicode, sixu from test_print import in_foreign_locale from numpy.core.multiarray_tests import ( @@ -29,7 +28,7 @@ TestCase, run_module_suite, assert_, assert_raises, assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, assert_allclose, - assert_array_less, runstring, dec + assert_array_less, runstring, dec, SkipTest ) # Need to test an object that does not fully implement math interface diff --git a/numpy/core/tests/test_print.py b/numpy/core/tests/test_print.py index f595cbe44bfd..6234b641ea8c 100644 --- a/numpy/core/tests/test_print.py +++ b/numpy/core/tests/test_print.py @@ -6,7 +6,7 @@ import numpy as np from numpy.testing import ( - run_module_suite, assert_, assert_equal + run_module_suite, assert_, assert_equal, SkipTest ) @@ -207,7 +207,7 @@ def test_scalar_format(): def in_foreign_locale(func): """ Swap LC_NUMERIC locale to one in which the decimal point is ',' and not '.' - If not possible, raise nose.SkipTest + If not possible, raise SkipTest """ if sys.platform == 'win32': @@ -225,8 +225,8 @@ def wrapper(*args, **kwargs): except locale.Error: pass else: - raise nose.SkipTest("Skipping locale test, because " - "French locale not found") + raise SkipTest("Skipping locale test, because " + "French locale not found") return func(*args, **kwargs) finally: locale.setlocale(locale.LC_NUMERIC, locale=curloc) diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py index 9551c099ed3c..48bb7c0f4d93 100644 --- a/numpy/f2py/tests/test_array_from_pyobj.py +++ b/numpy/f2py/tests/test_array_from_pyobj.py @@ -5,13 +5,11 @@ import sys import copy -import nose - from numpy import ( array, alltrue, ndarray, zeros, dtype, intp, clongdouble ) from numpy.testing import ( - run_module_suite, assert_, assert_equal + run_module_suite, assert_, assert_equal, SkipTest ) from numpy.core.multiarray import typeinfo import util @@ -28,7 +26,7 @@ def setup(): # Check compiler availability first if not util.has_c_compiler(): - raise nose.SkipTest("No C compiler available") + raise SkipTest("No C compiler available") if wrap is None: config_code = """ diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index 5b4e072e7ff9..8d06d96800ae 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -17,10 +17,9 @@ import re import random -import nose - from numpy.compat import asbytes, asstr import numpy.f2py +from numpy.testing import SkipTest try: from hashlib import md5 @@ -334,7 +333,7 @@ def setUp(self): # Check compiler availability first if not has_c_compiler(): - raise nose.SkipTest("No C compiler available") + raise SkipTest("No C compiler available") codes = [] if self.sources: @@ -350,9 +349,9 @@ def setUp(self): elif fn.endswith('.f90'): needs_f90 = True if needs_f77 and not has_f77_compiler(): - raise nose.SkipTest("No Fortran 77 compiler available") + raise SkipTest("No Fortran 77 compiler available") if needs_f90 and not has_f90_compiler(): - raise nose.SkipTest("No Fortran 90 compiler available") + raise SkipTest("No Fortran 90 compiler available") # Build the module if self.code is not None: diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py index 090f71f670c9..f4bece352b76 100644 --- a/numpy/lib/tests/test__datasource.py +++ b/numpy/lib/tests/test__datasource.py @@ -7,7 +7,7 @@ from numpy.compat import asbytes from numpy.testing import ( - run_module_suite, TestCase, assert_ + run_module_suite, TestCase, assert_, SkipTest ) import numpy.lib._datasource as datasource @@ -137,8 +137,7 @@ def test_ValidGzipFile(self): import gzip except ImportError: # We don't have the gzip capabilities to test. - import nose - raise nose.SkipTest + raise SkipTest # Test datasource's internal file_opener for Gzip files. filepath = os.path.join(self.tmpdir, 'foobar.txt.gz') fp = gzip.open(filepath, 'w') @@ -154,8 +153,7 @@ def test_ValidBz2File(self): import bz2 except ImportError: # We don't have the bz2 capabilities to test. - import nose - raise nose.SkipTest + raise SkipTest # Test datasource's internal file_opener for BZip2 files. filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2') fp = bz2.BZ2File(filepath, 'w') diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index 4f8a651489da..1bf65fa61d4f 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -287,7 +287,7 @@ from numpy.compat import asbytes, asbytes_nested, sixu from numpy.testing import ( run_module_suite, assert_, assert_array_equal, assert_raises, raises, - dec + dec, SkipTest ) from numpy.lib import format @@ -812,7 +812,6 @@ def test_bad_header(): def test_large_file_support(): - from nose import SkipTest if (sys.platform == 'win32' or sys.platform == 'cygwin'): raise SkipTest("Unknown if Windows has sparse filesystems") # try creating a large sparse file diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 7c577d86fed2..afa098f12250 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -17,7 +17,7 @@ from numpy.testing import ( assert_, assert_equal, assert_raises, assert_array_equal, assert_almost_equal, assert_allclose, run_module_suite, - dec + dec, SkipTest ) @@ -1215,7 +1215,6 @@ def test_xerbla_override(): # Check that our xerbla has been successfully linked in. If it is not, # the default xerbla routine is called, which prints a message to stdout # and may, or may not, abort the process depending on the LAPACK package. - from nose import SkipTest XERBLA_OK = 255 diff --git a/numpy/testing/decorators.py b/numpy/testing/decorators.py index 56962b93c322..df3d297ff2fe 100644 --- a/numpy/testing/decorators.py +++ b/numpy/testing/decorators.py @@ -18,6 +18,7 @@ import warnings import collections +from .utils import SkipTest def slow(t): """ @@ -141,14 +142,14 @@ def get_msg(func,msg=None): def skipper_func(*args, **kwargs): """Skipper for normal test functions.""" if skip_val(): - raise nose.SkipTest(get_msg(f, msg)) + raise SkipTest(get_msg(f, msg)) else: return f(*args, **kwargs) def skipper_gen(*args, **kwargs): """Skipper for test generators.""" if skip_val(): - raise nose.SkipTest(get_msg(f, msg)) + raise SkipTest(get_msg(f, msg)) else: for x in f(*args, **kwargs): yield x @@ -166,7 +167,7 @@ def skipper_gen(*args, **kwargs): def knownfailureif(fail_condition, msg=None): """ - Make function raise KnownFailureTest exception if given condition is true. + Make function raise KnownFailureException exception if given condition is true. If the condition is a callable, it is used at runtime to dynamically make the decision. This is useful for tests that may require costly @@ -178,15 +179,15 @@ def knownfailureif(fail_condition, msg=None): Flag to determine whether to mark the decorated test as a known failure (if True) or not (if False). msg : str, optional - Message to give on raising a KnownFailureTest exception. + Message to give on raising a KnownFailureException exception. Default is None. Returns ------- decorator : function - Decorator, which, when applied to a function, causes SkipTest - to be raised when `skip_condition` is True, and the function - to be called normally otherwise. + Decorator, which, when applied to a function, causes + KnownFailureException to be raised when `fail_condition` is True, + and the function to be called normally otherwise. Notes ----- @@ -207,11 +208,11 @@ def knownfail_decorator(f): # Local import to avoid a hard nose dependency and only incur the # import time overhead at actual test-time. import nose - from .noseclasses import KnownFailureTest + from .noseclasses import KnownFailureException def knownfailer(*args, **kwargs): if fail_val(): - raise KnownFailureTest(msg) + raise KnownFailureException(msg) else: return f(*args, **kwargs) return nose.tools.make_decorator(f)(knownfailer) diff --git a/numpy/testing/noseclasses.py b/numpy/testing/noseclasses.py index e6cc10179125..197e20bacb50 100644 --- a/numpy/testing/noseclasses.py +++ b/numpy/testing/noseclasses.py @@ -8,6 +8,7 @@ import os import doctest +import inspect import nose from nose.plugins import doctests as npd @@ -16,7 +17,8 @@ from nose.util import src import numpy from .nosetester import get_package_name -import inspect +from .utils import KnownFailureException, KnownFailureTest + # Some of the classes in this module begin with 'Numpy' to clearly distinguish # them from the plethora of very similar names from nose/unittest/doctest @@ -298,19 +300,14 @@ def configure(self, options, config): if p.name != self.to_unplug] -class KnownFailureTest(Exception): - '''Raise this exception to mark a test as a known failing test.''' - pass - - -class KnownFailure(ErrorClassPlugin): +class KnownFailurePlugin(ErrorClassPlugin): '''Plugin that installs a KNOWNFAIL error class for the - KnownFailureClass exception. When KnownFailureTest is raised, + KnownFailureClass exception. When KnownFailure is raised, the exception will be logged in the knownfail attribute of the result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the exception will not be counted as an error or failure.''' enabled = True - knownfail = ErrorClass(KnownFailureTest, + knownfail = ErrorClass(KnownFailureException, label='KNOWNFAIL', isfailure=False) @@ -318,7 +315,7 @@ def options(self, parser, env=os.environ): env_opt = 'NOSE_WITHOUT_KNOWNFAIL' parser.add_option('--no-knownfail', action='store_true', dest='noKnownFail', default=env.get(env_opt, False), - help='Disable special handling of KnownFailureTest ' + help='Disable special handling of KnownFailure ' 'exceptions') def configure(self, options, conf): @@ -329,6 +326,8 @@ def configure(self, options, conf): if disable: self.enabled = False +KnownFailure = KnownFailurePlugin # backwards compat + # Class allows us to save the results of the tests in runTests - see runTests # method docstring for details diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py index c9c6d10f02b6..551e630ec2cd 100644 --- a/numpy/testing/nosetester.py +++ b/numpy/testing/nosetester.py @@ -121,8 +121,8 @@ def run_module_suite(file_to_run=None, argv=None): argv = argv + [file_to_run] nose = import_nose() - from .noseclasses import KnownFailure - nose.run(argv=argv, addplugins=[KnownFailure()]) + from .noseclasses import KnownFailurePlugin + nose.run(argv=argv, addplugins=[KnownFailurePlugin()]) class NoseTester(object): @@ -301,8 +301,8 @@ def prepare_test_args(self, label='fast', verbose=1, extra_argv=None, '--cover-tests', '--cover-erase'] # construct list of plugins import nose.plugins.builtin - from .noseclasses import KnownFailure, Unplugger - plugins = [KnownFailure()] + from .noseclasses import KnownFailurePlugin, Unplugger + plugins = [KnownFailurePlugin()] plugins += [p() for p in nose.plugins.builtin.plugins] # add doctesting if required doctest_argv = '--with-doctest' in argv diff --git a/numpy/testing/tests/test_decorators.py b/numpy/testing/tests/test_decorators.py index f8a5be672c07..7dbb5a8286e9 100644 --- a/numpy/testing/tests/test_decorators.py +++ b/numpy/testing/tests/test_decorators.py @@ -1,7 +1,7 @@ from __future__ import division, absolute_import, print_function -from numpy.testing import dec, assert_, assert_raises, run_module_suite -from numpy.testing.noseclasses import KnownFailureTest +from numpy.testing import (dec, assert_, assert_raises, run_module_suite, + SkipTest, KnownFailureException) import nose def test_slow(): @@ -40,7 +40,7 @@ def f1(x): f1('a') except DidntSkipException: raise Exception('Failed to skip') - except nose.SkipTest: + except SkipTest: pass @dec.skipif(False) @@ -51,7 +51,7 @@ def f2(x): f2('a') except DidntSkipException: pass - except nose.SkipTest: + except SkipTest: raise Exception('Skipped when not expected to') @@ -68,7 +68,7 @@ def f1(x): f1('a') except DidntSkipException: raise Exception('Failed to skip') - except nose.SkipTest: + except SkipTest: pass @dec.skipif(skip_tester) @@ -80,7 +80,7 @@ def f2(x): f2('a') except DidntSkipException: pass - except nose.SkipTest: + except SkipTest: raise Exception('Skipped when not expected to') @@ -93,7 +93,7 @@ def g1(x): try: for j in g1(10): pass - except KnownFailureTest: + except KnownFailureException: pass else: raise Exception('Failed to mark as known failure') @@ -107,7 +107,7 @@ def g2(x): try: for j in g2(10): pass - except KnownFailureTest: + except KnownFailureException: raise Exception('Marked incorretly as known failure') except DidntSkipException: pass @@ -126,7 +126,7 @@ def g1(x): skip_flag = 'skip me!' for j in g1(10): pass - except KnownFailureTest: + except KnownFailureException: pass else: raise Exception('Failed to mark as known failure') @@ -141,7 +141,7 @@ def g2(x): skip_flag = 'do not skip' for j in g2(10): pass - except KnownFailureTest: + except KnownFailureException: raise Exception('Marked incorretly as known failure') except DidntSkipException: pass diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py index 8a282ff3c4f5..00f7ce4d1976 100644 --- a/numpy/testing/utils.py +++ b/numpy/testing/utils.py @@ -13,6 +13,7 @@ import shutil import contextlib from tempfile import mkdtemp + from .nosetester import import_nose from numpy.core import float32, empty, arange, array_repr, ndarray @@ -28,11 +29,27 @@ 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure', 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex', 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings', - 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings'] + 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings', + 'SkipTest', 'KnownFailureException'] -verbose = 0 +class KnownFailureException(Exception): + '''Raise this exception to mark a test as a known failing test.''' + pass + +KnownFailureTest = KnownFailureException # backwards compat + +# nose.SkipTest is unittest.case.SkipTest +# import it into the namespace, so that it's available as np.testing.SkipTest +try: + from unittest.case import SkipTest +except ImportError: + # on py2.6 unittest.case is not available. Ask nose for a replacement. + SkipTest = import_nose().SkipTest + + +verbose = 0 def assert_(val, msg=''): """ From 904da7c202384c8a2a6ec88cece378f70e2dd956 Mon Sep 17 00:00:00 2001 From: Julian Taylor Date: Wed, 11 Nov 2015 19:34:23 +0100 Subject: [PATCH 183/496] ENH: use prefetching for summation It seems the small blocksizes (128) messes up the hardware prefetcher which would usually be able to work fine on this iteration pattern. Fix this by using software prefetching. Improves performance for large sums by 15%-30%. Tested on core2duo, xeon E5-4620, i5-3470 and AMD phenom II X4. Prefers __builtin_prefetch as that, unlike SSE2 _mm_prefetch, also works on capable non-x86 cpus. --- numpy/core/include/numpy/npy_common.h | 15 +++++++++++++++ numpy/core/setup_common.py | 3 +++ numpy/core/src/umath/loops.c.src | 4 ++++ 3 files changed, 22 insertions(+) diff --git a/numpy/core/include/numpy/npy_common.h b/numpy/core/include/numpy/npy_common.h index eff5dd339c50..47ef94c9283c 100644 --- a/numpy/core/include/numpy/npy_common.h +++ b/numpy/core/include/numpy/npy_common.h @@ -61,6 +61,21 @@ #define NPY_UNLIKELY(x) (x) #endif +#ifdef HAVE___BUILTIN_PREFETCH +/* unlike _mm_prefetch also works on non-x86 */ +#define NPY_PREFETCH(x, rw, loc) __builtin_prefetch((x), (rw), (loc)) +#else +#ifdef HAVE__MM_PREFETCH +/* _MM_HINT_ET[01] (rw = 1) unsupported, only available in gcc >= 4.9 */ +#define NPY_PREFETCH(x, rw, loc) _mm_prefetch((x), loc == 0 ? _MM_HINT_NTA : \ + (loc == 1 ? _MM_HINT_T2 : \ + (loc == 2 ? _MM_HINT_T1 : \ + (loc == 3 ? _MM_HINT_T0 : -1)))) +#else +#define NPY_PREFETCH(x, rw,loc) +#endif +#endif + #if defined(_MSC_VER) #define NPY_INLINE __inline #elif defined(__GNUC__) diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py index 68efd179185c..d93e475e3e54 100644 --- a/numpy/core/setup_common.py +++ b/numpy/core/setup_common.py @@ -125,7 +125,10 @@ def check_api_version(apiversion, codegen_dir): ("__builtin_expect", '5, 0'), ("__builtin_mul_overflow", '5, 5, (int*)5'), ("_mm_load_ps", '(float*)0', "xmmintrin.h"), # SSE + ("_mm_prefetch", '(float*)0, _MM_HINT_NTA', + "xmmintrin.h"), # SSE ("_mm_load_pd", '(double*)0', "emmintrin.h"), # SSE2 + ("__builtin_prefetch", "(float*)0, 0, 3"), ] # function attributes diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 854c1e17a078..aff6180c7150 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -1444,6 +1444,8 @@ pairwise_sum_@TYPE@(@dtype@ *a, npy_uintp n, npy_intp stride) r[7] = @trf@(a[7 * stride]); for (i = 8; i < n - (n % 8); i += 8) { + /* small blocksizes seems to mess with hardware prefetch */ + NPY_PREFETCH(&a[(i + 512 / sizeof(a[0])) * stride], 0, 3); r[0] += @trf@(a[(i + 0) * stride]); r[1] += @trf@(a[(i + 1) * stride]); r[2] += @trf@(a[(i + 2) * stride]); @@ -2190,6 +2192,8 @@ pairwise_sum_@TYPE@(@ftype@ *rr, @ftype@ * ri, @ftype@ * a, npy_uintp n, r[7] = a[6 * stride + 1]; for (i = 8; i < n - (n % 8); i += 8) { + /* small blocksizes seems to mess with hardware prefetch */ + NPY_PREFETCH(&a[(i + 512 / sizeof(a[0])) * stride], 0, 3); r[0] += a[(i + 0) * stride]; r[1] += a[(i + 0) * stride + 1]; r[2] += a[(i + 2) * stride]; From 46d2e8356760e7549d0c80da9fe232177924183c Mon Sep 17 00:00:00 2001 From: lzkelley Date: Sun, 15 Nov 2015 17:25:45 -0500 Subject: [PATCH 184/496] BUG, MAINT: check that histogram range parameters are finite, add tests to assure this. Improved some error-types. --- numpy/lib/function_base.py | 13 ++++++++++--- numpy/lib/tests/test_function_base.py | 17 +++++++++++++++++ 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index fef69dff3207..9261dba22293 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -336,8 +336,12 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, if (range is not None): mn, mx = range if (mn > mx): - raise AttributeError( + raise ValueError( 'max must be larger than min in range parameter.') + if not np.all(np.isfinite([mn, mx])): + raise ValueError( + 'range parameter must be finite.') + if isinstance(bins, basestring): bins = _hist_optim_numbins_estimator(a, bins) @@ -422,7 +426,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, else: bins = asarray(bins) if (np.diff(bins) < 0).any(): - raise AttributeError( + raise ValueError( 'bins must increase monotonically.') # Initialize empty histogram @@ -533,7 +537,7 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None): try: M = len(bins) if M != D: - raise AttributeError( + raise ValueError( 'The dimension of bins must be equal to the dimension of the ' ' sample x.') except TypeError: @@ -551,6 +555,9 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None): smin = atleast_1d(array(sample.min(0), float)) smax = atleast_1d(array(sample.max(0), float)) else: + if not np.all(np.isfinite(range)): + raise ValueError( + 'range parameter must be finite.') smin = zeros(D) smax = zeros(D) for i in arange(D): diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index cc53c2b8ebaa..88c932692936 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1267,6 +1267,13 @@ def test_empty(self): assert_array_equal(a, np.array([0])) assert_array_equal(b, np.array([0, 1])) + def test_finite_range(self): + # Normal ranges should be fine + vals = np.linspace(0.0, 1.0, num=100) + histogram(vals, range=[0.25,0.75]) + assert_raises(ValueError, histogram, vals, range=[np.nan,0.75]) + assert_raises(ValueError, histogram, vals, range=[0.25,np.inf]) + class TestHistogramOptimBinNums(TestCase): """ @@ -1489,6 +1496,16 @@ def test_rightmost_binedge(self): assert_(hist[0] == 0.0) assert_(hist[1] == 0.0) + def test_finite_range(self): + vals = np.random.random((100,3)) + histogramdd(vals, range=[[0.0,1.0],[0.25,0.75],[0.25,0.5]]) + assert_raises(ValueError, histogramdd, vals, + range=[[0.0,1.0],[0.25,0.75],[0.25,np.inf]]) + assert_raises(ValueError, histogramdd, vals, + range=[[0.0,1.0],[np.nan,0.75],[0.25,0.5]]) + + + class TestUnique(TestCase): From e67156e6e5149513cca722cf3f633135738d1df6 Mon Sep 17 00:00:00 2001 From: Julian Taylor Date: Tue, 17 Nov 2015 19:23:33 +0100 Subject: [PATCH 185/496] BUG: fix removing tempdirs created during build Old code used the thread local storage wrong and also only deleted the directories created for the last parallel build section as the exit handler only knows about one of the directories. Fix by storing all created tempdirs to delete at exit. --- numpy/distutils/misc_util.py | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py index 75d864c5a7bc..345e60f26e47 100644 --- a/numpy/distutils/misc_util.py +++ b/numpy/distutils/misc_util.py @@ -18,6 +18,20 @@ except ImportError: from dummy_threading import local as tlocal +# stores temporary directory of each thread to only create one per thread +_tdata = tlocal() + +# store all created temporary directories so they can be deleted on exit +_tmpdirs = [] +def clean_up_temporary_directory(): + for d in _tmpdirs: + try: + shutil.rmtree(d) + except OSError: + pass + +atexit.register(clean_up_temporary_directory) + try: set except NameError: @@ -283,26 +297,13 @@ def gpaths(paths, local_path='', include_non_existing=True): paths = (paths,) return _fix_paths(paths, local_path, include_non_existing) - -def clean_up_temporary_directory(): - tdata = tlocal() - _temporary_directory = getattr(tdata, 'tempdir', None) - if not _temporary_directory: - return - try: - shutil.rmtree(_temporary_directory) - except OSError: - pass - _temporary_directory = None - def make_temp_file(suffix='', prefix='', text=True): - tdata = tlocal() - if not hasattr(tdata, 'tempdir'): - tdata.tempdir = tempfile.mkdtemp() - atexit.register(clean_up_temporary_directory) + if not hasattr(_tdata, 'tempdir'): + _tdata.tempdir = tempfile.mkdtemp() + _tmpdirs.append(_tdata.tempdir) fid, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, - dir=tdata.tempdir, + dir=_tdata.tempdir, text=text) fo = os.fdopen(fid, 'w') return fo, name From 0b19e6828135f601630d9e715e9ed3ff3ba489c9 Mon Sep 17 00:00:00 2001 From: Cong Ma Date: Wed, 18 Nov 2015 00:09:08 +0100 Subject: [PATCH 186/496] MAINT: fix spurious semicolon in macro definition of PyArray_FROM_OT There is a spurious semicolon (;) character at the end of the macro definition of PyArray_FROM_OT, in the header file ndarrayobject.h. This prevents the macro from being used like a function, e.g. one can't write like if ( !(arr = PyArray_FROM_OT( ... )) ) ... ... After removing the semicolon, the macro can be used like a C function. --- numpy/core/include/numpy/ndarrayobject.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/include/numpy/ndarrayobject.h b/numpy/core/include/numpy/ndarrayobject.h index fbaaeacea04b..c97a3a797a68 100644 --- a/numpy/core/include/numpy/ndarrayobject.h +++ b/numpy/core/include/numpy/ndarrayobject.h @@ -96,7 +96,7 @@ extern "C" CONFUSE_EMACS NULL) #define PyArray_FROM_OT(m,type) PyArray_FromAny(m, \ - PyArray_DescrFromType(type), 0, 0, 0, NULL); + PyArray_DescrFromType(type), 0, 0, 0, NULL) #define PyArray_FROM_OTF(m, type, flags) \ PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, \ From 8d00317744444ed5b5fc4c454c9c0040c8999f1d Mon Sep 17 00:00:00 2001 From: Matthew Brett Date: Tue, 17 Nov 2015 18:05:45 -0800 Subject: [PATCH 187/496] TST: test np.rint bug for large integers Test for https://github.com/numpy/numpy/issues/6685 Add test to remind packagers that they may need to fix or workaround this bug on some systems. --- numpy/core/tests/test_umath.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 541ad974b367..2ba988b87880 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -1926,5 +1926,15 @@ def test_complex_nan_comparisons(): assert_equal(x == y, False, err_msg="%r == %r" % (x, y)) +def test_rint_big_int(): + # np.rint bug for large integer values on Windows 32-bit and MKL + # https://github.com/numpy/numpy/issues/6685 + val = 4607998452777363968 + # This is exactly representable in floating point + assert_equal(val, int(float(val))) + # Rint should not change the value + assert_equal(val, np.rint(val)) + + if __name__ == "__main__": run_module_suite() From 4d2d360fd75301d4a3cb9914872f90fbef689667 Mon Sep 17 00:00:00 2001 From: Iceman9 Date: Wed, 11 Nov 2015 00:19:45 +0100 Subject: [PATCH 188/496] BLD: Enabled building with MSVC 14.0 Reallocated free functions in mem_overlap.c Cosmetics. Final indent. Added tests if pointer==NULL Fixed indent Fixed position of goto label. Fixed ISO C90 violation. Made simpler checks and removed redundant lines. --- numpy/core/src/multiarray/mapping.c | 2 +- numpy/core/src/private/mem_overlap.c | 25 +++++++++++++++++++------ 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c index 44de1cbf266e..7d0bfa822333 100644 --- a/numpy/core/src/multiarray/mapping.c +++ b/numpy/core/src/multiarray/mapping.c @@ -1293,7 +1293,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view) PyArray_NDIM(arr), PyArray_SHAPE(arr), PyArray_STRIDES(arr), - PyArray_DATA(arr) + offset, + ((char *)PyArray_DATA(arr)) + offset, PyArray_FLAGS(arr), (PyObject *)arr); if (*view == NULL) { diff --git a/numpy/core/src/private/mem_overlap.c b/numpy/core/src/private/mem_overlap.c index 3cab83497c55..b2b80b4e6c57 100644 --- a/numpy/core/src/private/mem_overlap.c +++ b/numpy/core/src/private/mem_overlap.c @@ -479,6 +479,7 @@ NPY_VISIBILITY_HIDDEN mem_overlap_t solve_diophantine(unsigned int n, diophantine_term_t *E, npy_int64 b, Py_ssize_t max_work, int require_ub_nontrivial, npy_int64 *x) { + mem_overlap_t res; unsigned int j; for (j = 0; j < n; ++j) { @@ -535,15 +536,27 @@ solve_diophantine(unsigned int n, diophantine_term_t *E, npy_int64 b, return MEM_OVERLAP_NO; } else { - diophantine_term_t Ep[n]; - npy_int64 Epsilon[n], Gamma[n]; Py_ssize_t count = 0; + diophantine_term_t *Ep = NULL; + npy_int64 *Epsilon = NULL, *Gamma = NULL; - if (diophantine_precompute(n, E, Ep, Gamma, Epsilon)) { - return MEM_OVERLAP_OVERFLOW; + Ep = malloc(n * sizeof(diophantine_term_t)); + Epsilon = malloc(n * sizeof(npy_int64)); + Gamma = malloc(n * sizeof(npy_int64)); + if (Ep == NULL || Epsilon == NULL || Gamma == NULL) { + res = MEM_OVERLAP_ERROR; + } + else if (diophantine_precompute(n, E, Ep, Gamma, Epsilon)) { + res = MEM_OVERLAP_OVERFLOW; + } + else { + res = diophantine_dfs(n, n-1, E, Ep, Gamma, Epsilon, b, max_work, + require_ub_nontrivial, x, &count); } - return diophantine_dfs(n, n-1, E, Ep, Gamma, Epsilon, b, max_work, - require_ub_nontrivial, x, &count); + free(Ep); + free(Gamma); + free(Epsilon); + return res; } } From 6fd06c7a2e6ef37604a99159cc8d1517aec95652 Mon Sep 17 00:00:00 2001 From: Allan Haldane Date: Thu, 19 Nov 2015 12:31:11 -0500 Subject: [PATCH 189/496] BUG: fix pointer arithmetic in _get_field_view Should have used PyArray_BYTES, not PyArray_DATA Fixes #6701 --- numpy/core/src/multiarray/mapping.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c index 7d0bfa822333..b701c82215d3 100644 --- a/numpy/core/src/multiarray/mapping.c +++ b/numpy/core/src/multiarray/mapping.c @@ -1293,7 +1293,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view) PyArray_NDIM(arr), PyArray_SHAPE(arr), PyArray_STRIDES(arr), - ((char *)PyArray_DATA(arr)) + offset, + PyArray_BYTES(arr) + offset, PyArray_FLAGS(arr), (PyObject *)arr); if (*view == NULL) { From 2a47ffc477fc72e2bc59341e1debac2096d10499 Mon Sep 17 00:00:00 2001 From: Allan Haldane Date: Thu, 19 Nov 2015 13:17:10 -0500 Subject: [PATCH 190/496] MAINT: fix casting warnings in mapping.c --- numpy/core/src/multiarray/mapping.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c index b701c82215d3..6c56d77bb3ec 100644 --- a/numpy/core/src/multiarray/mapping.c +++ b/numpy/core/src/multiarray/mapping.c @@ -169,7 +169,8 @@ prepare_index(PyArrayObject *self, PyObject *index, int new_ndim, fancy_ndim, used_ndim, index_ndim; int curr_idx, get_idx; - npy_intp i, n; + int i; + npy_intp n; npy_bool make_tuple = 0; PyObject *obj = NULL; @@ -348,14 +349,15 @@ prepare_index(PyArrayObject *self, PyObject *index, #else if (PyLong_CheckExact(obj) || !PyArray_Check(obj)) { #endif - i = PyArray_PyIntAsIntp(obj); - if ((i == -1) && PyErr_Occurred()) { + npy_intp ind = PyArray_PyIntAsIntp(obj); + + if ((ind == -1) && PyErr_Occurred()) { PyErr_Clear(); } else { index_type |= HAS_INTEGER; indices[curr_idx].object = NULL; - indices[curr_idx].value = i; + indices[curr_idx].value = ind; indices[curr_idx].type = HAS_INTEGER; used_ndim += 1; new_ndim += 0; @@ -527,15 +529,16 @@ prepare_index(PyArrayObject *self, PyObject *index, * sure that array-likes or odder arrays are always * handled right. */ - i = PyArray_PyIntAsIntp((PyObject *)arr); + npy_intp ind = PyArray_PyIntAsIntp((PyObject *)arr); + Py_DECREF(arr); - if ((i == -1) && PyErr_Occurred()) { + if ((ind == -1) && PyErr_Occurred()) { goto failed_building_indices; } else { index_type |= (HAS_INTEGER | HAS_SCALAR_ARRAY); indices[curr_idx].object = NULL; - indices[curr_idx].value = i; + indices[curr_idx].value = ind; indices[curr_idx].type = HAS_INTEGER; used_ndim += 1; new_ndim += 0; @@ -2445,8 +2448,8 @@ mapiter_fill_info(PyArrayMapIterObject *mit, npy_index_info *indices, /* advance curr_dim for non-fancy indices */ else if (indices[i].type == HAS_ELLIPSIS) { - curr_dim += indices[i].value; - result_dim += indices[i].value; + curr_dim += (int)indices[i].value; + result_dim += (int)indices[i].value; } else if (indices[i].type != HAS_NEWAXIS){ curr_dim += 1; @@ -2891,7 +2894,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type, stride = extra_op_dtype->elsize; for (i=PyArray_NDIM(subspace) - 1; i >= 0; i--) { strides[mit->nd_fancy + strideperm[i].perm] = stride; - stride *= PyArray_DIM(subspace, strideperm[i].perm); + stride *= PyArray_DIM(subspace, (int)strideperm[i].perm); } /* From 07b11e8676f43e9fe2f1dc1cb5c777728ada0f04 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 24 Nov 2015 11:33:14 -0700 Subject: [PATCH 191/496] BUG: Include relevant files from numpy/linalg/lapack_lite in sdist. After 1e436a5 *.h and *.c files from numpy/linalg/lapack_lite were no longer included in source distributions. Fix this by adding them to MANIFEST.in. Closes #6694. --- MANIFEST.in | 1 + 1 file changed, 1 insertion(+) diff --git a/MANIFEST.in b/MANIFEST.in index 56d40efbf1fd..3695dfe57cfa 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -11,6 +11,7 @@ include numpy/random/mtrand/generate_mtrand_c.py recursive-include numpy/random/mtrand *.pyx *.pxd # Add build support that should go in sdist, but not go in bdist/be installed recursive-include numpy/_build_utils * +recursive-include numpy/linalg/lapack_lite *.c *.h # Add sdist files whose use depends on local configuration. include numpy/core/src/multiarray/cblasfuncs.c include numpy/core/src/multiarray/python_xerbla.c From e131ba4464a2d2d6df9cb6f95b147514a14e0597 Mon Sep 17 00:00:00 2001 From: Julian Taylor Date: Fri, 11 Sep 2015 18:18:09 +0200 Subject: [PATCH 192/496] MAINT: enable Werror=vla in travis avoids issues with stone age compilers like MSVC --- tools/travis-test.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/travis-test.sh b/tools/travis-test.sh index b888a7eb034f..818d0cf41704 100755 --- a/tools/travis-test.sh +++ b/tools/travis-test.sh @@ -10,6 +10,8 @@ if [ -r /usr/lib/libeatmydata/libeatmydata.so ]; then export LD_PRELOAD=/usr/lib/libeatmydata/libeatmydata.so fi +# make some warnings fatal, mostly to match windows compilers +werrors="-Werror=declaration-after-statement -Werror=vla -Werror=nonnull" setup_base() { @@ -27,16 +29,14 @@ if [ -z "$USE_DEBUG" ]; then $PIP install . else sysflags="$($PYTHON -c "from distutils import sysconfig; print (sysconfig.get_config_var('CFLAGS'))")" - # windows compilers have this requirement - CFLAGS="$sysflags -Werror=declaration-after-statement -Werror=nonnull -Wlogical-op" $PIP install . 2>&1 | tee log + CFLAGS="$sysflags $werrors -Wlogical-op" $PIP install . 2>&1 | tee log grep -v "_configtest" log | grep -vE "ld returned 1|no previously-included files matching" | grep -E "warning\>"; # accept a mysterious memset warning that shows with -flto test $(grep -v "_configtest" log | grep -vE "ld returned 1|no previously-included files matching" | grep -E "warning\>" -c) -lt 2; fi else sysflags="$($PYTHON -c "from distutils import sysconfig; print (sysconfig.get_config_var('CFLAGS'))")" - # windows compilers have this requirement - CFLAGS="$sysflags -Werror=declaration-after-statement -Werror=nonnull" $PYTHON setup.py build_ext --inplace + CFLAGS="$sysflags $werrors" $PYTHON setup.py build_ext --inplace fi } From 4e276acb5b5081ac9b5f54f1d0a60bf3473572b5 Mon Sep 17 00:00:00 2001 From: Gerrit Holl Date: Wed, 25 Nov 2015 16:11:23 +0000 Subject: [PATCH 193/496] BUG: Fix for #6719 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit numpy/random/mtrand/mtrand.pyx contains a line where cython fails to compile, complaining “Pythonic division not allowed without gil”. By running this code segment under cdivision(True), this problem is avoided. --- numpy/random/mtrand/mtrand.pyx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index f8ae8d71be19..080591e5ee53 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -127,6 +127,7 @@ cdef extern from "initarray.h": # Initialize numpy import_array() +cimport cython import numpy as np import operator import warnings @@ -4484,7 +4485,7 @@ cdef class RandomState: mnarr = multin mnix = PyArray_DATA(mnarr) sz = PyArray_SIZE(mnarr) - with self.lock, nogil: + with self.lock, nogil, cython.cdivision(True): i = 0 while i < sz: Sum = 1.0 From 9705c743ab340b13ab6e1ea7ff1dfb3e76299c4a Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 25 Nov 2015 11:02:13 -0700 Subject: [PATCH 194/496] MAINT: Localize variables only used with relaxed stride checking. The varibles in question lead to unused variable warnings when not compiling with NPY_RELAXED_STRIDE_CHECKING=1 resulting in failing travis tests. --- numpy/core/src/multiarray/buffer.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c index 7f7607e1f6a4..5fa3ba95b7ec 100644 --- a/numpy/core/src/multiarray/buffer.c +++ b/numpy/core/src/multiarray/buffer.c @@ -629,8 +629,6 @@ array_getbuffer(PyObject *obj, Py_buffer *view, int flags) { PyArrayObject *self; _buffer_info_t *info = NULL; - int i; - Py_ssize_t sd; self = (PyArrayObject*)obj; @@ -715,15 +713,19 @@ array_getbuffer(PyObject *obj, Py_buffer *view, int flags) * regenerate strides from shape. */ if (PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS) && - !((flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS)) { - sd = view->itemsize; + !((flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS)) { + Py_ssize_t sd = view->itemsize; + int i; + for (i = view->ndim-1; i >= 0; --i) { view->strides[i] = sd; sd *= view->shape[i]; } } else if (PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)) { - sd = view->itemsize; + Py_ssize_t sd = view->itemsize; + int i; + for (i = 0; i < view->ndim; ++i) { view->strides[i] = sd; sd *= view->shape[i]; From 6711d17a529c79b71397b48167b009f8dbf41985 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 25 Nov 2015 13:10:05 -0700 Subject: [PATCH 195/496] TST: Fix travis-ci test for numpy wheels. --- tools/travis-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/travis-test.sh b/tools/travis-test.sh index b888a7eb034f..8d2e200a69cc 100755 --- a/tools/travis-test.sh +++ b/tools/travis-test.sh @@ -100,7 +100,7 @@ if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then . venv-for-wheel/bin/activate # Move out of source directory to avoid finding local numpy pushd dist - $PIP install --pre --upgrade --find-links . numpy + $PIP install --pre --no-index --upgrade --find-links=. numpy $PIP install nose popd run_test From a083c9e358d831260b100f81953b0935313c689b Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 24 Nov 2015 13:05:01 -0700 Subject: [PATCH 196/496] BUG: Readd fallback CBLAS detection on linux. Fallback CBLAS detection was removed in gh-6183 because it led to problems on windows when mingw was used with python compiled with msvc but msvc was not installed. As a result of that fix, CBLAS detection failed for some Linux installations. The solution here is to add back the fallback detection but make it specific to non-windows platforms. Closes #6675. --- numpy/distutils/system_info.py | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index a0c6f44f79a8..7ea8b8c62b90 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -1678,9 +1678,37 @@ def calc_info(self): info = self.check_libs(lib_dirs, blas_libs, []) if info is None: return - info['language'] = 'f77' # XXX: is it generally true? + if platform.system() != 'Windows' and self.has_cblas(): + # The check for windows is needed because has_cblas uses the + # same compiler that was used to compile Python and msvc is + # often not installed when mingw is being used. This rough + # treatment is not desirable, but windows is tricky. + info['language'] = 'c' + info['define_macros'] = [('HAVE_CBLAS', None)] + else: + info['language'] = 'f77' # XXX: is it generally true? self.set_info(**info) + def has_cblas(self): + # primitive cblas check by looking for the header + res = False + c = distutils.ccompiler.new_compiler() + tmpdir = tempfile.mkdtemp() + s = """#include """ + src = os.path.join(tmpdir, 'source.c') + try: + with open(src, 'wt') as f: + f.write(s) + try: + c.compile([src], output_dir=tmpdir, + include_dirs=self.get_include_dirs()) + res = True + except distutils.ccompiler.CompileError: + res = False + finally: + shutil.rmtree(tmpdir) + return res + class openblas_info(blas_info): section = 'openblas' From 531f2ad34d006585281b9416fa30992dd37cf2af Mon Sep 17 00:00:00 2001 From: Simon Conseil Date: Thu, 26 Nov 2015 15:04:37 +0100 Subject: [PATCH 197/496] ENH: Avoid memory peak when creating a MaskedArray with mask=True/False (#6732). When the `mask` parameter is set to True or False, create directly a `ndarray` of boolean instead of going inside `np.resize` which was causing of memory peak of ~15 times the size of the mask. --- numpy/ma/core.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index b9f7da092659..75c7c001e9df 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -2756,13 +2756,19 @@ def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, _data._sharedmask = True else: # Case 2. : With a mask in input. - # Read the mask with the current mdtype - try: - mask = np.array(mask, copy=copy, dtype=mdtype) - # Or assume it's a sequence of bool/int - except TypeError: - mask = np.array([tuple([m] * len(mdtype)) for m in mask], - dtype=mdtype) + # If mask is boolean, create an array of True or False + if mask is True: + mask = np.ones(_data.shape, dtype=mdtype) + elif mask is False: + mask = np.zeros(_data.shape, dtype=mdtype) + else: + # Read the mask with the current mdtype + try: + mask = np.array(mask, copy=copy, dtype=mdtype) + # Or assume it's a sequence of bool/int + except TypeError: + mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) # Make sure the mask and the data have the same shape if mask.shape != _data.shape: (nd, nm) = (_data.size, mask.size) @@ -4690,7 +4696,7 @@ def dot(self, b, out=None, strict=False): See Also -------- numpy.ma.dot : equivalent function - + """ return dot(self, b, out=out, strict=strict) From 593345a75fdd3de103e3e50969fbca2ed752f08d Mon Sep 17 00:00:00 2001 From: Simon Conseil Date: Fri, 27 Nov 2015 23:12:32 +0100 Subject: [PATCH 198/496] ENH: Avoid memory peak and useless computations when printing a MaskedArray. Ref #3544. When printing a `MaskedArray`, the whole array is converted to the object dtype, whereas only a few values are printed to screen. So the approach here is to cut the array and keep only a subset that it used for the string conversion. This way the output should not change. --- numpy/ma/core.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index b9f7da092659..3c1f8210d484 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -3710,8 +3710,19 @@ def __str__(self): # convert to object array to make filled work names = self.dtype.names if names is None: - res = self._data.astype("O") - res.view(ndarray)[m] = f + data = self._data + mask = m + nval = 50 + # For big arrays, to avoid a costly conversion to the + # object dtype, extract the corners before the conversion. + for axis in range(self.ndim): + if data.shape[axis] > 2 * nval: + arr = np.split(data, (nval, -nval), axis=axis) + data = np.concatenate((arr[0], arr[2]), axis=axis) + arr = np.split(mask, (nval, -nval), axis=axis) + mask = np.concatenate((arr[0], arr[2]), axis=axis) + res = data.astype("O") + res.view(ndarray)[mask] = f else: rdtype = _recursive_make_descr(self.dtype, "O") res = self._data.astype(rdtype) @@ -4690,7 +4701,7 @@ def dot(self, b, out=None, strict=False): See Also -------- numpy.ma.dot : equivalent function - + """ return dot(self, b, out=out, strict=strict) From e9ef83d72b8d5ca6828b16fc8cdf435905fe9bb0 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 26 Nov 2015 14:49:26 -0700 Subject: [PATCH 199/496] DEP: Deprecate changing shape of non-C-contiguous array via descr. This deprecates assignment of a new descriptor to the dtype attribute of a non-C-contiguous array if it result in changing the shape. This effectively bars viewing a multidimensional Fortran array using a dtype that changes the element size along the first axis. The reason for the deprecation is that, when relaxed strides checking is enabled, arrays that are both C and Fortran contiguous are always treated as C contiguous which breaks some code that depended the two being mutually exclusive for arrays of dimension > 1. The intent of this deprecation is to prepare the way to always enable relaxed stride checking. Example ------- ``` In [1]: import warnings In [2]: warnings.simplefilter('always') In [3]: a = ones((2, 1), order='F').view(complex) /home/charris/.local/bin/ipython:1: DeprecationWarning: Changing the shape of non-C contiguous array by descriptor assignment is deprecated. To maintain the Fortran contiguity of a multidimensional Fortran array, use 'a.T.view(...).T' instead ``` --- numpy/core/src/multiarray/getset.c | 20 +++++++++++++++++--- numpy/core/tests/test_deprecations.py | 16 +++++++++++++++- 2 files changed, 32 insertions(+), 4 deletions(-) diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c index 549ea333ae5e..c2a88e3b94a3 100644 --- a/numpy/core/src/multiarray/getset.c +++ b/numpy/core/src/multiarray/getset.c @@ -488,11 +488,25 @@ array_descr_set(PyArrayObject *self, PyObject *arg) if ((newtype->elsize != PyArray_DESCR(self)->elsize) && - (PyArray_NDIM(self) == 0 || !PyArray_ISONESEGMENT(self) || - PyDataType_HASSUBARRAY(newtype))) { + (PyArray_NDIM(self) == 0 || + !PyArray_ISONESEGMENT(self) || + PyDataType_HASSUBARRAY(newtype))) { goto fail; } - if (PyArray_ISCONTIGUOUS(self)) { + + /* Deprecate not C contiguous and a dimension changes */ + if (newtype->elsize != PyArray_DESCR(self)->elsize && + !PyArray_IS_C_CONTIGUOUS(self)) { + /* 11/27/2015 1.11.0 */ + if (DEPRECATE("Changing the shape of non-C contiguous array by\n" + "descriptor assignment is deprecated. To maintain\n" + "the Fortran contiguity of a multidimensional Fortran\n" + "array, use 'a.T.view(...).T' instead") < 0) { + return -1; + } + } + + if (PyArray_IS_C_CONTIGUOUS(self)) { i = PyArray_NDIM(self) - 1; } else { diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index e2542195fe4a..8f7e55d9125c 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -375,7 +375,7 @@ def test_simple(self): arr.__getitem__, (slice(None), index)) -class TestFullDefaultDtype: +class TestFullDefaultDtype(object): """np.full defaults to float when dtype is not set. In the future, it will use the fill value's dtype. """ @@ -386,5 +386,19 @@ def test_full_default_dtype(self): assert_no_warnings(np.full, 1, 1, float) +class TestNonCContiguousViewDeprecation(_DeprecationTestCase): + """View of non-C-contiguous arrays deprecated in 1.11.0. + + The deprecation will not be raised for arrays that are both C and F + contiguous, as C contiguous is dominant. There are more such arrays + with relaxed stride checking than without so the deprecation is not + as visible with relaxed stride checking in force. + """ + + def test_fortran_contiguous(self): + self.assert_deprecated(np.ones((2,2)).T.view, args=(np.complex,)) + self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,)) + + if __name__ == "__main__": run_module_suite() From d07e20ea2718c2d460a203f6775aef6cea8ba520 Mon Sep 17 00:00:00 2001 From: Gerrit Holl Date: Thu, 26 Nov 2015 12:01:15 +0000 Subject: [PATCH 200/496] BUG/TST: Fix for #6729 Fix representation of a structured masked array with dimension zero. The effect of representing a masked array with dimension zero is now similar to respresenting an mvoid. This commit fixes #6729. --- numpy/ma/core.py | 2 +- numpy/ma/tests/test_core.py | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index b9f7da092659..b7ee4a7974b8 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -3695,7 +3695,7 @@ def __str__(self): if m is nomask: res = self._data else: - if m.shape == (): + if m.shape == () and m.itemsize==len(m.dtype): if m.dtype.names: m = m.view((bool, len(m.dtype))) if m.any(): diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index e5fdfddb144f..4e6a20ad9c55 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -625,6 +625,18 @@ def test_fancy_printoptions(self): control = "[(--, (2, --)) (4, (--, 6.0))]" assert_equal(str(test), control) + # Test 0-d array with multi-dimensional dtype + t_2d0 = masked_array(data = (0, [[0.0, 0.0, 0.0], + [0.0, 0.0, 0.0]], + 0.0), + mask = (False, [[True, False, True], + [False, False, True]], + False), + dtype = "int, (2,3)float, float") + control = "(0, [[--, 0.0, --], [0.0, 0.0, --]], 0.0)" + assert_equal(str(t_2d0), control) + + def test_flatten_structured_array(self): # Test flatten_structured_array on arrays # On ndarray From 70d8cf55339eca151ad0896526f2e0815dba1489 Mon Sep 17 00:00:00 2001 From: Simon Conseil Date: Tue, 1 Dec 2015 22:07:27 +0100 Subject: [PATCH 201/496] Test that the mask dtype if MaskType before using np.zeros/ones --- numpy/ma/core.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 75c7c001e9df..807b28c5bd98 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -2757,9 +2757,9 @@ def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, else: # Case 2. : With a mask in input. # If mask is boolean, create an array of True or False - if mask is True: + if mask is True and mdtype == MaskType: mask = np.ones(_data.shape, dtype=mdtype) - elif mask is False: + elif mask is False and mdtype == MaskType: mask = np.zeros(_data.shape, dtype=mdtype) else: # Read the mask with the current mdtype From 511dab48438dcc9470b5632e206eeef74f5ad6bc Mon Sep 17 00:00:00 2001 From: Simon Conseil Date: Tue, 1 Dec 2015 22:08:31 +0100 Subject: [PATCH 202/496] Add some tests for mask creation with mask=True or False. --- numpy/ma/tests/test_core.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index e5fdfddb144f..cab5abb335ac 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -191,6 +191,15 @@ def test_creation_maskcreation(self): dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6) fail_if_equal(dma_3.mask, dma_1.mask) + x = array([1, 2, 3], mask=True) + assert_equal(x._mask, [True, True, True]) + x = array([1, 2, 3], mask=False) + assert_equal(x._mask, [False, False, False]) + y = array([1, 2, 3], mask=x._mask, copy=False) + assert_(np.may_share_memory(x.mask, y.mask)) + y = array([1, 2, 3], mask=x._mask, copy=True) + assert_(not np.may_share_memory(x.mask, y.mask)) + def test_creation_with_list_of_maskedarrays(self): # Tests creaating a masked array from alist of masked arrays. x = array(np.arange(5), mask=[1, 0, 0, 0, 0]) From b5c456e84dc87521a476ff51e3a2ab55f8c5c29f Mon Sep 17 00:00:00 2001 From: Simon Conseil Date: Tue, 1 Dec 2015 23:35:31 +0100 Subject: [PATCH 203/496] Allow to change the maximum width with a class variable. --- numpy/ma/core.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 3c1f8210d484..5ef76a66e101 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -2684,6 +2684,8 @@ class MaskedArray(ndarray): _defaultmask = nomask _defaulthardmask = False _baseclass = ndarray + # Maximum number of elements per axis used when printing an array. + _print_width = 100 def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, subok=True, ndmin=0, fill_value=None, @@ -3712,14 +3714,14 @@ def __str__(self): if names is None: data = self._data mask = m - nval = 50 # For big arrays, to avoid a costly conversion to the # object dtype, extract the corners before the conversion. for axis in range(self.ndim): - if data.shape[axis] > 2 * nval: - arr = np.split(data, (nval, -nval), axis=axis) + if data.shape[axis] > self._print_width: + ind = np.int(self._print_width / 2) + arr = np.split(data, (ind, -ind), axis=axis) data = np.concatenate((arr[0], arr[2]), axis=axis) - arr = np.split(mask, (nval, -nval), axis=axis) + arr = np.split(mask, (ind, -ind), axis=axis) mask = np.concatenate((arr[0], arr[2]), axis=axis) res = data.astype("O") res.view(ndarray)[mask] = f From d0e9d98b2aa126bb2654c4c5966a4034c4bb99fc Mon Sep 17 00:00:00 2001 From: Simon Conseil Date: Wed, 2 Dec 2015 00:01:40 +0100 Subject: [PATCH 204/496] Use integer division to avoid casting to int. --- numpy/ma/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 5ef76a66e101..d412968c3080 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -3718,7 +3718,7 @@ def __str__(self): # object dtype, extract the corners before the conversion. for axis in range(self.ndim): if data.shape[axis] > self._print_width: - ind = np.int(self._print_width / 2) + ind = self._print_width // 2 arr = np.split(data, (ind, -ind), axis=axis) data = np.concatenate((arr[0], arr[2]), axis=axis) arr = np.split(mask, (ind, -ind), axis=axis) From f752d84206bece604bbb8cb1c78b1f3a6af468f8 Mon Sep 17 00:00:00 2001 From: Simon Conseil Date: Wed, 2 Dec 2015 10:19:08 +0100 Subject: [PATCH 205/496] DOC: Add changelog for #6734 and #6748. --- doc/release/1.11.0-notes.rst | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index 580c0c952b74..e60062c81824 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -36,7 +36,7 @@ DeprecationWarning to error * Non-integers used as index values raise TypeError, e.g., in reshape, take, and specifying reduce axis. -FutureWarning to changed behavior +FutureWarning to changed behavior ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * In ``np.lib.split`` an empty array in the result always had dimension @@ -89,6 +89,13 @@ The function now internally calls the generic ``npy_amergesort`` when the type does not implement a merge-sort kind of ``argsort`` method. +Memory and speed improvements for masked arrays +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Creating a masked array with ``mask=True`` (resp. ``mask=False``) now uses +``np.ones`` (resp. ``np.zeros``) to create the mask, which is faster and avoid +a big memory peak. Another optimization was done to avoid a memory peak and +useless computations when printing a masked array. + Changes ======= From 927e8809cb566722b1bd0b15dca9f030f3cd29b9 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 2 Dec 2015 21:08:24 +0100 Subject: [PATCH 206/496] BUG: resizing empty array with complex dtype failed This is because the dtype was passed into the new array as a char, and many dtypes do not have a valid char representation. Closes gh-6740 --- numpy/core/fromnumeric.py | 2 +- numpy/core/tests/test_numeric.py | 10 +++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 0fc572cb6c81..197513294d01 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -1134,7 +1134,7 @@ def resize(a, new_shape): a = ravel(a) Na = len(a) if not Na: - return mu.zeros(new_shape, a.dtype.char) + return mu.zeros(new_shape, a.dtype) total_size = um.multiply.reduce(new_shape) n_copies = int(total_size / Na) extra = total_size % Na diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index f5c22392a414..43dad42f1f7c 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -30,7 +30,15 @@ def test_copies(self): def test_zeroresize(self): A = np.array([[1, 2], [3, 4]]) Ar = np.resize(A, (0,)) - assert_equal(Ar, np.array([])) + assert_array_equal(Ar, np.array([])) + assert_equal(A.dtype, Ar.dtype) + + def test_reshape_from_zero(self): + # See also gh-6740 + A = np.zeros(0, dtype=[('a', np.float32, 1)]) + Ar = np.resize(A, (2, 1)) + assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype)) + assert_equal(A.dtype, Ar.dtype) class TestNonarrayArgs(TestCase): From c8a09822c707f320d8c8ac242a8628de690a5899 Mon Sep 17 00:00:00 2001 From: Gerrit Holl Date: Thu, 3 Dec 2015 16:34:52 +0000 Subject: [PATCH 207/496] BUG/TST: Fix #6760 by correctly describing mask on nested subdtypes Fix #6760. In ma.core._recursive_make_descr, consider the case where a subdtype does itself have named fields. This ensures the correct mask for an array like `ma.zeros(2, dtype([("A", "(2,2)i1,(2,2)i1", (2,2))]))`. --- numpy/ma/core.py | 2 +- numpy/ma/tests/test_core.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 9cc1a12727c0..ebc335a85098 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -1248,7 +1248,7 @@ def _recursive_make_descr(datatype, newtype=bool_): # Is this some kind of composite a la (np.float,2) elif datatype.subdtype: mdescr = list(datatype.subdtype) - mdescr[0] = newtype + mdescr[0] = _recursive_make_descr(datatype.subdtype[0], newtype) return tuple(mdescr) else: return newtype diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 2fdd00484907..369138471380 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -608,6 +608,13 @@ def test_filled_w_nested_dtype(self): control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype) assert_equal(test, control) + # test if mask gets set correctly (see #6760) + Z = numpy.ma.zeros(2, numpy.dtype([("A", "(2,2)i1,(2,2)i1", (2,2))])) + assert_equal(Z.data.dtype, numpy.dtype([('A', [('f0', 'i1', (2, 2)), + ('f1', 'i1', (2, 2))], (2, 2))])) + assert_equal(Z.mask.dtype, numpy.dtype([('A', [('f0', '?', (2, 2)), + ('f1', '?', (2, 2))], (2, 2))])) + def test_filled_w_f_order(self): # Test filled w/ F-contiguous array a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'), From 53facf327c7b2949a5f31c308fd66b0b1f24b615 Mon Sep 17 00:00:00 2001 From: Gerrit Holl Date: Wed, 2 Dec 2015 11:55:36 +0000 Subject: [PATCH 208/496] BUG/TST: Fix for #6724, make numpy.ma.mvoid consistent with numpy.void Make indexing on numpy.ma.mvoid consistent with indexing on numpy.void. Changes behaviour in rare cases (see below). Fixes #6724. Sometimes, indexing ma.mvoid results in a non-scalar mask. For example, dimension increases if indexing with a multi-dimensional field. Previously, this led to a ValueError (truth value ambiguous). With this commit, indexing now returns an ma.masked_array so that there is no loss of information. Note that there is a precedence for returning from void to array. Z = zeros((2,), dtype="(2,)i2,(2,)i2"), then Z[0] is a void, but Z[0][0] and Z[0]["f1"] are array. This commit therefore implements behaviouk such that numpy.ma.mvoid is consistent with numpy.void. Also adds a related test. The behaviour changes in cases where for a masked array `X`, X.dtype["A"] is multidimensional but size 1, such as in the example below. Any case where X.dtype["A"] is multidimensional but with size>1 would previously fail. Old behaviour: In [15]: X = ma.masked_array(data=[([0],)], mask=[([False],)], dtype=[("A", "(1,1)i2", (1,1))]) In [16]: X[0]["A"] Out[16]: array([[[[0]]]], dtype=int16) In [17]: X = ma.masked_array(data=[([0],)], mask=[([True],)], dtype=[("A", "(1,1)i2", (1,1))]) In [18]: X[0]["A"] Out[18]: masked New behaviour: In [1]: X = ma.masked_array(data=[([0],)], mask=[([False],)], dtype=[("A", "(1,1)i2", (1,1))]) In [2]: X[0]["A"] Out[2]: masked_array(data = [[[[0]]]], mask = [[[[False]]]], fill_value = [[[[16959]]]]) In [3]: X = ma.masked_array(data=[([0],)], mask=[([True],)], dtype=[("A", "(1,1)i2", (1,1))]) In [4]: X[0]["A"] Out[4]: masked_array(data = [[[[--]]]], mask = [[[[ True]]]], fill_value = [[[[16959]]]]) The new behaviour is more consistent with indexing the data themselves: In [7]: X.data[0]["A"] Out[7]: array([[[[0]]]], dtype=int16) In theory, this change in behaviour can break code, but I would consider it very unlikely. --- numpy/ma/core.py | 12 ++++++++++++ numpy/ma/tests/test_core.py | 8 ++++++++ 2 files changed, 20 insertions(+) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 9cc1a12727c0..cb480deff40f 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -5869,6 +5869,18 @@ def __getitem__(self, indx): """ m = self._mask + if isinstance(m[indx], ndarray): + # Can happen when indx is a multi-dimensional field: + # A = ma.masked_array(data=[([0,1],)], mask=[([True, + # False],)], dtype=[("A", ">i2", (2,))]) + # x = A[0]; y = x["A"]; then y.mask["A"].size==2 + # and we can not say masked/unmasked. + # The result is no longer mvoid! + # See also issue #6724. + return masked_array( + data=self._data[indx], mask=m[indx], + fill_value=self._fill_value[indx], + hard_mask=self._hardmask) if m is not nomask and m[indx]: return masked return self._data[indx] diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 2fdd00484907..a0b71854457c 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -712,6 +712,14 @@ def test_mvoid_getitem(self): self.assertTrue(f['a'] is masked) assert_equal(f[1], 4) + # exotic dtype + A = masked_array(data=[([0,1],)], + mask=[([True, False],)], + dtype=[("A", ">i2", (2,))]) + assert_equal(A[0]["A"], A["A"][0]) + assert_equal(A[0]["A"], masked_array(data=[0, 1], + mask=[True, False], dtype=">i2")) + def test_mvoid_iter(self): # Test iteration on __getitem__ ndtype = [('a', int), ('b', int)] From 16c6a361e741685feb67c29c36631829ce3b9559 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 3 Dec 2015 13:00:55 -0700 Subject: [PATCH 209/496] DOC: Document fortran order view deprecation in 1.11 release notes. --- doc/release/1.11.0-notes.rst | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index 580c0c952b74..7c2ed2133296 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -18,6 +18,8 @@ Dropped Support Future Changes ============== +* Relaxed stride checking will become the default. + Compatibility notes =================== @@ -36,7 +38,7 @@ DeprecationWarning to error * Non-integers used as index values raise TypeError, e.g., in reshape, take, and specifying reduce axis. -FutureWarning to changed behavior +FutureWarning to changed behavior ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * In ``np.lib.split`` an empty array in the result always had dimension @@ -96,3 +98,16 @@ Changes Deprecations ============ +Views of arrays in Fortran order +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The f_contiguous flag was used to signal that views as a dtypes that +changed the element size would change the first index. This was always a +bit problematical for arrays that were both f_contiguous and c_contiguous +because c_contiguous took precendence. Relaxed stride checking results in +more such dual contiguous arrays and breaks some existing code as a result. +Note that this also affects changing the dtype by assigning to the dtype +attribute of an array. The aim of this deprecation is to restrict views to +c_contiguous arrays at some future time. A work around that is backward +compatible is to use `a.T.view(...).T` instead. A parameter will also be +added to the view method to explicitly ask for Fortran order views, but +that will not be backward compatible. From 2fb84baca65c23006612be4bb2b92b1738a3aba2 Mon Sep 17 00:00:00 2001 From: Allan Haldane Date: Wed, 2 Dec 2015 14:54:06 -0500 Subject: [PATCH 210/496] BUG: link cblas library if cblas is detected --- numpy/distutils/system_info.py | 49 ++++++++++++++++++++++++++-------- 1 file changed, 38 insertions(+), 11 deletions(-) diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 7ea8b8c62b90..94436243ee58 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -1678,33 +1678,60 @@ def calc_info(self): info = self.check_libs(lib_dirs, blas_libs, []) if info is None: return - if platform.system() != 'Windows' and self.has_cblas(): + if platform.system() == 'Windows': # The check for windows is needed because has_cblas uses the # same compiler that was used to compile Python and msvc is # often not installed when mingw is being used. This rough # treatment is not desirable, but windows is tricky. - info['language'] = 'c' - info['define_macros'] = [('HAVE_CBLAS', None)] - else: info['language'] = 'f77' # XXX: is it generally true? + else: + lib = self.has_cblas(info) + if lib is not None: + info['language'] = 'c' + info['libraries'] = [lib] + info['define_macros'] = [('HAVE_CBLAS', None)] self.set_info(**info) - def has_cblas(self): - # primitive cblas check by looking for the header + def has_cblas(self, info): + # primitive cblas check by looking for the header and trying to link + # cblas or blas res = False c = distutils.ccompiler.new_compiler() tmpdir = tempfile.mkdtemp() - s = """#include """ + s = """#include + int main(int argc, const char *argv[]) + { + double a[4] = {1,2,3,4}; + double b[4] = {5,6,7,8}; + return cblas_ddot(4, a, 1, b, 1) > 10; + }""" src = os.path.join(tmpdir, 'source.c') try: with open(src, 'wt') as f: f.write(s) + try: - c.compile([src], output_dir=tmpdir, - include_dirs=self.get_include_dirs()) - res = True + # check we can compile (find headers) + obj = c.compile([src], output_dir=tmpdir, + include_dirs=self.get_include_dirs()) + + # check we can link (find library) + # some systems have separate cblas and blas libs. First + # check for cblas lib, and if not present check for blas lib. + try: + c.link_executable(obj, os.path.join(tmpdir, "a.out"), + libraries=["cblas"], + library_dirs=info['library_dirs'], + extra_postargs=info.get('extra_link_args', [])) + res = "cblas" + except distutils.ccompiler.LinkError: + c.link_executable(obj, os.path.join(tmpdir, "a.out"), + libraries=["blas"], + library_dirs=info['library_dirs'], + extra_postargs=info.get('extra_link_args', [])) + res = "blas" except distutils.ccompiler.CompileError: - res = False + res = None finally: shutil.rmtree(tmpdir) return res From e3243e2110842132ec3ebfe1d9f7857ec58ffd34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Hees?= Date: Wed, 2 Dec 2015 16:48:36 +0100 Subject: [PATCH 211/496] TST: test f2py, fallback on f2py2.7 etc., fixes #6718 --- numpy/tests/test_scripts.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py index c7bb125b31b9..552383d7796b 100644 --- a/numpy/tests/test_scripts.py +++ b/numpy/tests/test_scripts.py @@ -12,6 +12,7 @@ from numpy.compat.py3k import basestring, asbytes from nose.tools import assert_equal from numpy.testing.decorators import skipif +from numpy.testing import assert_ skipif_inplace = skipif(isfile(pathjoin(dirname(np.__file__), '..', 'setup.py'))) @@ -63,7 +64,18 @@ def test_f2py(): if sys.platform == 'win32': f2py_cmd = r"%s\Scripts\f2py.py" % dirname(sys.executable) code, stdout, stderr = run_command([sys.executable, f2py_cmd, '-v']) + assert_equal(stdout.strip(), asbytes('2')) else: - f2py_cmd = 'f2py' + basename(sys.executable)[6:] - code, stdout, stderr = run_command([f2py_cmd, '-v']) - assert_equal(stdout.strip(), asbytes('2')) + # unclear what f2py cmd was installed as, check plain (f2py) and + # current python version specific one (f2py3.4) + f2py_cmds = ['f2py', 'f2py' + basename(sys.executable)[6:]] + success = False + for f2py_cmd in f2py_cmds: + try: + code, stdout, stderr = run_command([f2py_cmd, '-v']) + assert_equal(stdout.strip(), asbytes('2')) + success = True + break + except FileNotFoundError: + pass + assert_(success, "wasn't able to find f2py or %s on commandline" % f2py_cmds[1]) From 8e9c91ca962b1cb76c392983740300691a37c3cd Mon Sep 17 00:00:00 2001 From: Pauli Virtanen Date: Sun, 6 Dec 2015 01:08:43 +0200 Subject: [PATCH 212/496] BENCH: allow benchmark suite to run on Python 3 --- benchmarks/benchmarks/bench_app.py | 2 ++ benchmarks/benchmarks/common.py | 12 +++++++----- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/benchmarks/benchmarks/bench_app.py b/benchmarks/benchmarks/bench_app.py index 0e2aca64b00f..ccf6e4c4af85 100644 --- a/benchmarks/benchmarks/bench_app.py +++ b/benchmarks/benchmarks/bench_app.py @@ -4,6 +4,8 @@ import numpy as np +from six.moves import xrange + class LaplaceInplace(Benchmark): params = ['inplace', 'normal'] diff --git a/benchmarks/benchmarks/common.py b/benchmarks/benchmarks/common.py index c99b0afb8e9e..e98396bed877 100644 --- a/benchmarks/benchmarks/common.py +++ b/benchmarks/benchmarks/common.py @@ -1,3 +1,5 @@ +from __future__ import absolute_import, division, print_function + import numpy import random @@ -26,7 +28,7 @@ # values which will be used to construct our sample data matrices # replicate 10 times to speed up initial imports of this helper # and generate some redundancy -values = [random.uniform(0, 100) for x in range(nx*ny/10)]*10 +values = [random.uniform(0, 100) for x in range(nx*ny//10)]*10 squares = {t: numpy.array(values, dtype=getattr(numpy, t)).reshape((nx, ny)) @@ -34,16 +36,16 @@ # adjust complex ones to have non-degenerated imagery part -- use # original data transposed for that -for t, v in squares.iteritems(): +for t, v in squares.items(): if t.startswith('complex'): v += v.T*1j # smaller squares -squares_ = {t: s[:nxs, :nys] for t, s in squares.iteritems()} +squares_ = {t: s[:nxs, :nys] for t, s in squares.items()} # vectors -vectors = {t: s[0] for t, s in squares.iteritems()} +vectors = {t: s[0] for t, s in squares.items()} -indexes = range(nx) +indexes = list(range(nx)) # so we do not have all items indexes.pop(5) indexes.pop(95) From 4743f3b4454a736e2bbd8b6f116f7efaef13c406 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 5 Dec 2015 19:16:00 -0700 Subject: [PATCH 213/496] MAINT: Include from __future__ boilerplate in some files missing it. Some newer *.py files are missing the `from __future__` boilerplate that helps assure Python2 and Python3 compatibility. --- numpy/_build_utils/apple_accelerate.py | 2 ++ numpy/compat/tests/test_compat.py | 2 ++ numpy/core/tests/test_scalarinherit.py | 1 + numpy/distutils/msvc9compiler.py | 2 ++ numpy/distutils/msvccompiler.py | 2 ++ numpy/f2py/__main__.py | 2 ++ numpy/lib/tests/test_packbits.py | 3 ++- numpy/linalg/tests/test_deprecations.py | 2 ++ pavement.py | 2 +- runtests.py | 1 + tools/win32build/build-cpucaps.py | 2 ++ 11 files changed, 19 insertions(+), 2 deletions(-) diff --git a/numpy/_build_utils/apple_accelerate.py b/numpy/_build_utils/apple_accelerate.py index d7351f4c52d4..2d5bbab5ea00 100644 --- a/numpy/_build_utils/apple_accelerate.py +++ b/numpy/_build_utils/apple_accelerate.py @@ -1,3 +1,5 @@ +from __future__ import division, absolute_import, print_function + import os import sys import re diff --git a/numpy/compat/tests/test_compat.py b/numpy/compat/tests/test_compat.py index 9822ab374201..1ac24401a719 100644 --- a/numpy/compat/tests/test_compat.py +++ b/numpy/compat/tests/test_compat.py @@ -1,3 +1,5 @@ +from __future__ import division, absolute_import, print_function + from os.path import join from numpy.compat import isfileobj diff --git a/numpy/core/tests/test_scalarinherit.py b/numpy/core/tests/test_scalarinherit.py index a2ca3e458aae..d8fd0acc3aaf 100644 --- a/numpy/core/tests/test_scalarinherit.py +++ b/numpy/core/tests/test_scalarinherit.py @@ -2,6 +2,7 @@ """ Test printing of scalar types. """ +from __future__ import division, absolute_import, print_function import numpy as np from numpy.testing import TestCase, run_module_suite diff --git a/numpy/distutils/msvc9compiler.py b/numpy/distutils/msvc9compiler.py index 636165bd52a2..c53f45531c60 100644 --- a/numpy/distutils/msvc9compiler.py +++ b/numpy/distutils/msvc9compiler.py @@ -1,3 +1,5 @@ +from __future__ import division, absolute_import, print_function + import os import distutils.msvc9compiler from distutils.msvc9compiler import * diff --git a/numpy/distutils/msvccompiler.py b/numpy/distutils/msvccompiler.py index 4c3658d5c8fc..78a386d5dff0 100644 --- a/numpy/distutils/msvccompiler.py +++ b/numpy/distutils/msvccompiler.py @@ -1,3 +1,5 @@ +from __future__ import division, absolute_import, print_function + import os import distutils.msvccompiler from distutils.msvccompiler import * diff --git a/numpy/f2py/__main__.py b/numpy/f2py/__main__.py index 8f6d25619b85..cb8f261c1b9e 100644 --- a/numpy/f2py/__main__.py +++ b/numpy/f2py/__main__.py @@ -1,4 +1,6 @@ # See http://cens.ioc.ee/projects/f2py2e/ +from __future__ import division, print_function + import os import sys for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]: diff --git a/numpy/lib/tests/test_packbits.py b/numpy/lib/tests/test_packbits.py index 186e8960db62..0de084ef9a47 100644 --- a/numpy/lib/tests/test_packbits.py +++ b/numpy/lib/tests/test_packbits.py @@ -1,5 +1,6 @@ -import numpy as np +from __future__ import division, absolute_import, print_function +import numpy as np from numpy.testing import assert_array_equal, assert_equal, assert_raises diff --git a/numpy/linalg/tests/test_deprecations.py b/numpy/linalg/tests/test_deprecations.py index 13d244199733..9b6fe343f5fc 100644 --- a/numpy/linalg/tests/test_deprecations.py +++ b/numpy/linalg/tests/test_deprecations.py @@ -1,6 +1,8 @@ """Test deprecation and future warnings. """ +from __future__ import division, absolute_import, print_function + import numpy as np from numpy.testing import assert_warns, run_module_suite diff --git a/pavement.py b/pavement.py index f4b1b2b1672e..ef6c6af52574 100644 --- a/pavement.py +++ b/pavement.py @@ -54,7 +54,7 @@ - fix bdist_mpkg: we build the same source twice -> how to make sure we use the same underlying python for egg install in venv and for bdist_mpkg """ -from __future__ import division, absolute_import, print_function +from __future__ import division, print_function # What need to be installed to build everything on mac os x: # - wine: python 2.6 and 2.5 + makensis + cpuid plugin + mingw, all in the PATH diff --git a/runtests.py b/runtests.py index 9376ae55f59b..957cbef100bc 100755 --- a/runtests.py +++ b/runtests.py @@ -24,6 +24,7 @@ $ python runtests.py --lcov-html """ +from __future__ import division, print_function # # This is a generic test runner script for projects using Numpy's test diff --git a/tools/win32build/build-cpucaps.py b/tools/win32build/build-cpucaps.py index d6a9dabc26b0..0c0a32dc5ec3 100644 --- a/tools/win32build/build-cpucaps.py +++ b/tools/win32build/build-cpucaps.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function + import os import subprocess # build cpucaps.dll From 7ffa81f04ba046d5413ab7c3f22c44fd545fcd5a Mon Sep 17 00:00:00 2001 From: Allan Haldane Date: Sun, 6 Dec 2015 14:10:48 -0500 Subject: [PATCH 214/496] BUG: metadata is not copied to base_dtype The (somewhat obsolete) metadata attribute of the data_dtype should be carried over in dtype specifications of the form (base_dtype, data_dtype). Fixes #6771 Incidentally fixes a reference leak in `dtype(('i4,i4', 'i4,i4'))` --- numpy/core/src/multiarray/descriptor.c | 8 ++++++++ numpy/core/tests/test_dtype.py | 4 ++++ 2 files changed, 12 insertions(+) diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c index 83cd64bdca79..03a4654a0f7c 100644 --- a/numpy/core/src/multiarray/descriptor.c +++ b/numpy/core/src/multiarray/descriptor.c @@ -806,11 +806,19 @@ _use_inherit(PyArray_Descr *type, PyObject *newobj, int *errflag) } new->elsize = conv->elsize; if (PyDataType_HASFIELDS(conv)) { + Py_XDECREF(new->fields); new->fields = conv->fields; Py_XINCREF(new->fields); + + Py_XDECREF(new->names); new->names = conv->names; Py_XINCREF(new->names); } + if (conv->metadata != NULL) { + Py_XDECREF(new->metadata); + new->metadata = conv->metadata; + Py_XINCREF(new->metadata); + } new->flags = conv->flags; Py_DECREF(conv); *errflag = 0; diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index 29f2ee7bdd6a..6d898eaa1f62 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -408,6 +408,10 @@ def test_nested_metadata(self): d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))]) self.assertEqual(d['a'].metadata, {'datum': 1}) + def base_metadata_copied(self): + d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1}))) + assert_equal(d.metadata, {'datum': 1}) + class TestString(TestCase): def test_complex_dtype_str(self): dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), From 090e85e7bd77e67c8b6bac63fc4b22331eac1950 Mon Sep 17 00:00:00 2001 From: Gerrit Holl Date: Wed, 25 Nov 2015 20:35:22 +0000 Subject: [PATCH 215/496] BUG/TST: Fix for #6723 including test: force fill_value.ndim==0 Fix issue #6723. Given an exotic masked structured array, where one of the fields has a multidimensional dtype, make sure that, when accessing this field, the fill_value still makes sense. As it stands prior to this commit, the fill_value will end up being multidimensional, possibly with a shape incompatible with the mother array, which leads to broadcasting errors in methods such as .filled(). This commit uses the first element of this multidimensional fill value as the new fill value. When more than one unique value existed in fill_value, a warning is issued. Also add a test to verify that fill_value.ndim remains 0 after indexing. --- numpy/ma/core.py | 20 ++++++++++++++++++++ numpy/ma/tests/test_core.py | 4 ++++ 2 files changed, 24 insertions(+) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 25e542cd6587..0a83284ca243 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -3129,6 +3129,26 @@ def __getitem__(self, indx): if isinstance(indx, basestring): if self._fill_value is not None: dout._fill_value = self._fill_value[indx] + + # If we're indexing a multidimensional field in a + # structured array (such as dtype("(2,)i2,(2,)i1")), + # dimensionality goes up (M[field].ndim == M.ndim + + # len(M.dtype[field].shape)). That's fine for + # M[field] but problematic for M[field].fill_value + # which should have shape () to avoid breaking several + # methods. There is no great way out, so set to + # first element. See issue #6723. + if dout._fill_value.ndim > 0: + if not (dout._fill_value == + dout._fill_value.flat[0]).all(): + warnings.warn( + "Upon accessing multidimensional field " + "{indx:s}, need to keep dimensionality " + "of fill_value at 0. Discarding " + "heterogeneous fill_value and setting " + "all to {fv!s}.".format(indx=indx, + fv=dout._fill_value[0])) + dout._fill_value = dout._fill_value.flat[0] dout._isfield = True # Update the mask if needed if _mask is not nomask: diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index cecdedf269f5..8d7d9a47d504 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -1674,6 +1674,10 @@ def test_fillvalue_exotic_dtype(self): assert_equal(test, control) control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype) assert_equal(_check_fill_value(0, ndtype), control) + # but when indexing, fill value should become scalar not tuple + # See issue #6723 + M = masked_array(control) + assert_equal(M["f1"].fill_value.ndim, 0) def test_fillvalue_datetime_timedelta(self): # Test default fillvalue for datetime64 and timedelta64 types. From 7b137ab0d52d95d5846f903a64207f5d6c0df17e Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 7 Dec 2015 13:22:52 -0700 Subject: [PATCH 216/496] BUG: Quick and dirty fix for interp. The original had incorrect comparisons involving <=, <, and also failed when the number of data points was 2. This fixes the use of the comparisons and uses linear search for fewer than 5 data points. The whole routine needs a simplified rewrite, but this takes care of the bug. Closes #6468. --- numpy/core/src/multiarray/compiled_base.c | 37 +++++++++++---------- numpy/lib/tests/test_function_base.py | 40 ++++++++++++++++++++--- 2 files changed, 55 insertions(+), 22 deletions(-) diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index 8ffeedac2f37..b9db3bb8fe74 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -529,14 +529,15 @@ binary_search_with_guess(const npy_double key, const npy_double *arr, } /* - * It would seem that for the following code to work, 'len' should - * at least be 4. But because of the way 'guess' is normalized, it - * will always be set to 1 if len <= 4. Given that, and that keys - * outside of the 'arr' bounds have already been handled, and the - * order in which comparisons happen below, it should become obvious - * that it will work with any array of at least 2 items. + * If len <= 4 use linear search. + * From above we know key >= arr[0] when we start. */ - assert (len >= 2); + if (len <= 4) { + npy_intp i; + + for (i = 1; i < len && key >= arr[i]; ++i); + return i - 1; + } if (guess > len - 3) { guess = len - 3; @@ -546,36 +547,36 @@ binary_search_with_guess(const npy_double key, const npy_double *arr, } /* check most likely values: guess - 1, guess, guess + 1 */ - if (key <= arr[guess]) { - if (key <= arr[guess - 1]) { + if (key < arr[guess]) { + if (key < arr[guess - 1]) { imax = guess - 1; /* last attempt to restrict search to items in cache */ if (guess > LIKELY_IN_CACHE_SIZE && - key > arr[guess - LIKELY_IN_CACHE_SIZE]) { + key >= arr[guess - LIKELY_IN_CACHE_SIZE]) { imin = guess - LIKELY_IN_CACHE_SIZE; } } else { - /* key > arr[guess - 1] */ + /* key >= arr[guess - 1] */ return guess - 1; } } else { - /* key > arr[guess] */ - if (key <= arr[guess + 1]) { + /* key >= arr[guess] */ + if (key < arr[guess + 1]) { return guess; } else { - /* key > arr[guess + 1] */ - if (key <= arr[guess + 2]) { + /* key >= arr[guess + 1] */ + if (key < arr[guess + 2]) { return guess + 1; } else { - /* key > arr[guess + 2] */ + /* key >= arr[guess + 2] */ imin = guess + 2; /* last attempt to restrict search to items in cache */ if (guess < len - LIKELY_IN_CACHE_SIZE - 1 && - key <= arr[guess + LIKELY_IN_CACHE_SIZE]) { + key < arr[guess + LIKELY_IN_CACHE_SIZE]) { imax = guess + LIKELY_IN_CACHE_SIZE; } } @@ -673,7 +674,7 @@ arr_interp(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict) } } - /* binary_search_with_guess needs at least a 2 item long array */ + /* binary_search_with_guess needs at least a 3 item long array */ if (lenxp == 1) { const npy_double xp_val = dx[0]; const npy_double fp_val = dy[0]; diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 88c932692936..a5ac78e33719 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1974,10 +1974,42 @@ def test_basic(self): assert_almost_equal(np.interp(x0, x, y), x0) def test_right_left_behavior(self): - assert_equal(interp([-1, 0, 1], [0], [1]), [1, 1, 1]) - assert_equal(interp([-1, 0, 1], [0], [1], left=0), [0, 1, 1]) - assert_equal(interp([-1, 0, 1], [0], [1], right=0), [1, 1, 0]) - assert_equal(interp([-1, 0, 1], [0], [1], left=0, right=0), [0, 1, 0]) + # Needs range of sizes to test different code paths. + # size ==1 is special cased, 1 < size < 5 is linear search, and + # size >= 5 goes through local search and possibly binary search. + for size in range(1, 10): + xp = np.arange(size, dtype=np.double) + yp = np.ones(size, dtype=np.double) + incpts = np.array([-1, 0, size - 1, size], dtype=np.double) + decpts = incpts[::-1] + + incres = interp(incpts, xp, yp) + decres = interp(decpts, xp, yp) + inctgt = np.array([1, 1, 1, 1], dtype=np.float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, left=0) + decres = interp(decpts, xp, yp, left=0) + inctgt = np.array([0, 1, 1, 1], dtype=np.float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, right=2) + decres = interp(decpts, xp, yp, right=2) + inctgt = np.array([1, 1, 1, 2], dtype=np.float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, left=0, right=2) + decres = interp(decpts, xp, yp, left=0, right=2) + inctgt = np.array([0, 1, 1, 2], dtype=np.float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) def test_scalar_interpolation_point(self): x = np.linspace(0, 1, 5) From 8a2808ad368b66feacd8f3e1259e57e3a9fd1587 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 13 Nov 2015 12:09:40 -0700 Subject: [PATCH 217/496] DOC: update the 1.10.2 release notes. Hopefully the final update before 1.10.2 is released. [ci skip] --- doc/release/1.10.2-notes.rst | 53 +++++++++++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 4 deletions(-) diff --git a/doc/release/1.10.2-notes.rst b/doc/release/1.10.2-notes.rst index 70c9398bae94..c2e305a40191 100644 --- a/doc/release/1.10.2-notes.rst +++ b/doc/release/1.10.2-notes.rst @@ -10,16 +10,36 @@ Numpy 1.10.1 supports Python 2.6 - 2.7 and 3.2 - 3.5. Compatibility notes =================== -fix swig bug in ``numpy.i`` +Relaxed stride checking is no longer the default +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +There were back compatibility problems involving views changing the dtype of +multidimensional Fortran arrays that need to be dealt with over a longer +timeframe. + +Fix swig bug in ``numpy.i`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Relaxed stride checking revealed a bug in ``array_is_fortran(a)``, that was using PyArray_ISFORTRAN to check for Fortran contiguity instead of PyArray_IS_F_CONTIGUOUS. You may want to regenerate swigged files using the updated numpy.i +Deprecate views changing dimensions in fortran order +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This deprecates assignment of a new descriptor to the dtype attribute of +a non-C-contiguous array if it result in changing the shape. This +effectively bars viewing a multidimensional Fortran array using a dtype +that changes the element size along the first axis. + +The reason for the deprecation is that, when relaxed strides checking is +enabled, arrays that are both C and Fortran contiguous are always treated +as C contiguous which breaks some code that depended the two being mutually +exclusive for non-scalar arrays of ndim > 1. This deprecation prepares the +way to always enable relaxed stride checking. + + Issues Fixed ============ - +* gh-6019 Masked array repr fails for structured array with multi-dimensional column. * gh-6462 Median of empty array produces IndexError. * gh-6467 Performance regression for record array access. * gh-6475 np.allclose returns a memmap when one of its arguments is a memmap. @@ -28,6 +48,7 @@ Issues Fixed * gh-6497 Failure of reduce operation on recarrays. * gh-6498 Mention change in default casting rule in 1.10 release notes. * gh-6530 The partition function errors out on empty input. +* gh-6532 numpy.inner return wrong inaccurate value sometimes. * gh-6563 Intent(out) broken in recent versions of f2py. * gh-6569 Cannot run tests after 'python setup.py build_ext -i' * gh-6572 Error in broadcasting stride_tricks array component. @@ -39,6 +60,10 @@ Issues Fixed * gh-6636 Memory leak in nested dtypes in numpy.recarray * gh-6641 Subsetting recarray by fields yields a structured array. * gh-6667 ma.make_mask handles ma.nomask input incorrectly. +* gh-6675 Optimized blas detection broken in master and 1.10. +* gh-6678 Getting unexpected error from: X.dtype = complex (or Y = X.view(complex)) +* gh-6718 f2py test fail in pip installed numpy-1.10.1 in virtualenv. +* gh-6719 Error compiling Cython file: Pythonic division not allowed without gil. Merged PRs ========== @@ -46,6 +71,7 @@ Merged PRs The following PRs in master have been backported to 1.10.2 * gh-5773 MAINT: Hide testing helper tracebacks when using them with pytest. +* gh-6094 BUG: Fixed a bug with string representation of masked structured arrays. * gh-6208 MAINT: Speedup field access by removing unneeded safety checks. * gh-6460 BUG: Replacing the os.environ.clear by less invasive procedure. * gh-6470 BUG: Fix AttributeError in numpy distutils. @@ -80,6 +106,17 @@ The following PRs in master have been backported to 1.10.2 * gh-6643 ENH: make recarray.getitem return a recarray. * gh-6653 BUG: Fix ma dot to always return masked array. * gh-6668 BUG: ma.make_mask should always return nomask for nomask argument. +* gh-6686 BUG: Fix a bug in assert_string_equal. +* gh-6695 BUG: Fix removing tempdirs created during build. +* gh-6697 MAINT: Fix spurious semicolon in macro definition of PyArray_FROM_OT. +* gh-6698 TST: test np.rint bug for large integers. +* gh-6717 BUG: Readd fallback CBLAS detection on linux. +* gh-6721 BUG: Fix for #6719. +* gh-6726 BUG: Fix bugs exposed by relaxed stride rollback. +* gh-6757 BUG: link cblas library if cblas is detected. +* gh-6756 TST: only test f2py, not f2py2.7 etc, fixes #6718. +* gh-6747 DEP: Deprecate changing shape of non-C-contiguous array via descr. + Initial support for mingwpy was reverted as it was causing problems for non-windows builds. @@ -87,11 +124,19 @@ non-windows builds. * gh-6536 BUG: Revert gh-5614 to fix non-windows build problems A fix for np.lib.split was reverted because it resulted in "fixing" -behavior will be present in the Numpy 1.11 and was already present in -Numpy 1.9. See the discussion of the issue at gh-6575 for clarification. +behavior that will be present in the Numpy 1.11 and that was already +present in Numpy 1.9. See the discussion of the issue at gh-6575 for +clarification. * gh-6576 BUG: Revert gh-6376 to fix split behavior for empty arrays. +Relaxed stride checking was reverted. There were back compatibility +problems involving views changing the dtype of multidimensional Fortran +arrays that need to be dealt with over a longer timeframe. + +* gh-6735 MAINT: Make no relaxed stride checking the default for 1.10. + + Notes ===== A bug in the Numpy 1.10.1 release resulted in exceptions being raised for From 6ddc496dfafa717ccb88941c111cd6a6e6d64d98 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 6 Dec 2015 12:06:07 -0700 Subject: [PATCH 218/496] DOC: Update the 1.10.0 release notes. Document that relaxed stride checking is no longer the default in 1.10.2. The 1.10.0 release notes may be read more often than the following notes, so this will help folks to notice the change. [ci skip] --- doc/release/1.10.0-notes.rst | 6 ++++++ doc/release/1.10.2-notes.rst | 13 +++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/doc/release/1.10.0-notes.rst b/doc/release/1.10.0-notes.rst index e753707d47e0..35e967f44457 100644 --- a/doc/release/1.10.0-notes.rst +++ b/doc/release/1.10.0-notes.rst @@ -78,6 +78,12 @@ relaxed stride checking ~~~~~~~~~~~~~~~~~~~~~~~ NPY_RELAXED_STRIDE_CHECKING is now true by default. +UPDATE: In 1.10.2 the default value of NPY_RELAXED_STRIDE_CHECKING was +changed to false for back compatibility reasons. More time is needed before +it can be made the default. As part of the roadmap a deprecation of +dimension changing views of f_contiguous not c_contiguous arrays was also +added. + Concatenation of 1d arrays along any but ``axis=0`` raises ``IndexError`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Using axis != 0 has raised a DeprecationWarning since NumPy 1.7, it now diff --git a/doc/release/1.10.2-notes.rst b/doc/release/1.10.2-notes.rst index c2e305a40191..a597a817c834 100644 --- a/doc/release/1.10.2-notes.rst +++ b/doc/release/1.10.2-notes.rst @@ -39,9 +39,11 @@ way to always enable relaxed stride checking. Issues Fixed ============ + * gh-6019 Masked array repr fails for structured array with multi-dimensional column. * gh-6462 Median of empty array produces IndexError. * gh-6467 Performance regression for record array access. +* gh-6468 numpy.interp uses 'left' value even when x[0]==xp[0]. * gh-6475 np.allclose returns a memmap when one of its arguments is a memmap. * gh-6491 Error in broadcasting stride_tricks array. * gh-6495 Unrecognized command line option '-ffpe-summary' in gfortran. @@ -64,11 +66,15 @@ Issues Fixed * gh-6678 Getting unexpected error from: X.dtype = complex (or Y = X.view(complex)) * gh-6718 f2py test fail in pip installed numpy-1.10.1 in virtualenv. * gh-6719 Error compiling Cython file: Pythonic division not allowed without gil. +* gh-6771 Numpy.rec.fromarrays losing dtype metadata between versions 1.9.2 and 1.10.1 +* gh-6781 The travis-ci script in maintenance/1.10.x needs fixing. + Merged PRs ========== -The following PRs in master have been backported to 1.10.2 +The following PRs have been merged into 1.10.2. When the PR is a backport, +the PR number for the original PR against master is listed. * gh-5773 MAINT: Hide testing helper tracebacks when using them with pytest. * gh-6094 BUG: Fixed a bug with string representation of masked structured arrays. @@ -116,7 +122,10 @@ The following PRs in master have been backported to 1.10.2 * gh-6757 BUG: link cblas library if cblas is detected. * gh-6756 TST: only test f2py, not f2py2.7 etc, fixes #6718. * gh-6747 DEP: Deprecate changing shape of non-C-contiguous array via descr. - +* gh-6775 MAINT: Include from __future__ boilerplate in some files missing it. +* gh-6780 BUG: metadata is not copied to base_dtype. +* gh-6783 BUG: Fix travis ci testing for new google infrastructure. +* gh-6785 BUG: Quick and dirty fix for interp. Initial support for mingwpy was reverted as it was causing problems for non-windows builds. From 2620f2306a24f934986a3357e2f275cb3a158a1e Mon Sep 17 00:00:00 2001 From: "Nathaniel J. Smith" Date: Mon, 7 Dec 2015 23:25:56 -0800 Subject: [PATCH 219/496] [doc] Fix title of governance section in docs --- doc/source/dev/governance/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/dev/governance/index.rst b/doc/source/dev/governance/index.rst index 9a611a2febac..3919e5e66a5e 100644 --- a/doc/source/dev/governance/index.rst +++ b/doc/source/dev/governance/index.rst @@ -1,5 +1,5 @@ ##################### -Contributing to Numpy +NumPy governance ##################### .. toctree:: From dafefb2c3721a1fb43469c8068b57bf8f5b22aa6 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 8 Dec 2015 16:58:00 -0700 Subject: [PATCH 220/496] DOC: Update future changes in the 1.11.0 release notes. Support for Python 2.6, 3.2, and 3.3 will be dropped in Numpy 1.12.0. [ci skip] --- doc/release/1.11.0-notes.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index fac868ca3103..a6c04e95a036 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -18,7 +18,8 @@ Dropped Support Future Changes ============== -* Relaxed stride checking will become the default. +* Relaxed stride checking will become the default in 1.12.0. +* Support for Python 2.6, 3.2, and 3.3 will be dropped in 1.12.0. Compatibility notes From 9d11602007923310d996ebed636281afe940c783 Mon Sep 17 00:00:00 2001 From: "Nathaniel J. Smith" Date: Tue, 8 Dec 2015 19:10:38 -0800 Subject: [PATCH 221/496] [TST] fix test_dtype_error to actually test what it's supposed to Discovered while cleaning up uses of the silly aliases like 'np.object'. --- numpy/lib/tests/test_nanfunctions.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index f418504c28b8..7a7b37b98c8d 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -395,12 +395,12 @@ class TestNanFunctions_MeanVarStd(TestCase, SharedNanFunctionsTestsMixin): def test_dtype_error(self): for f in self.nanfuncs: - for dtype in [np.bool_, np.int_, np.object]: - assert_raises(TypeError, f, _ndat, axis=1, dtype=np.int) + for dtype in [np.bool_, np.int_, np.object_]: + assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype) def test_out_dtype_error(self): for f in self.nanfuncs: - for dtype in [np.bool_, np.int_, np.object]: + for dtype in [np.bool_, np.int_, np.object_]: out = np.empty(_ndat.shape[0], dtype=dtype) assert_raises(TypeError, f, _ndat, axis=1, out=out) From 27abfb4923a3ff0433bb7f5dd08a42603641b519 Mon Sep 17 00:00:00 2001 From: Olivier Grisel Date: Thu, 3 Dec 2015 16:29:22 +0100 Subject: [PATCH 222/496] ENH deploy dev wheels to rackspace --- .travis.yml | 12 ++++++++++-- tools/travis-test.sh | 8 +++++--- tools/travis-upload-wheel.sh | 11 +++++++++++ 3 files changed, 26 insertions(+), 5 deletions(-) create mode 100755 tools/travis-upload-wheel.sh diff --git a/.travis.yml b/.travis.yml index 2447360f5884..314fd18d5298 100644 --- a/.travis.yml +++ b/.travis.yml @@ -48,8 +48,13 @@ matrix: - python3-nose - python: 2.7 env: NPY_RELAXED_STRIDES_CHECKING=0 PYTHON_OO=1 - - python: 2.7 - env: USE_WHEEL=1 + - python: 3.5 + env: + - USE_WHEEL=1 + - WHEELHOUSE_UPLOADER_USERNAME=travis.numpy + # The following is generated with the command: + # travis encrypt -r numpy/numpy WHEELHOUSE_UPLOADER_SECRET=tH3AP1KeY + - secure: "IEicLPrP2uW+jW51GRwkONQpdPqMVtQL5bdroqR/U8r9TrXrbCVRhp4AP8JYZT0ptoBpmZWWGjmKBndB68QlMiUjQPowiFWt9Ka92CaqYdU7nqfWp9VImSndPmssjmCXJ1v1IjZPAMahp7Qnm0rWRmA0z9SomuRUQOJQ6s684vU=" - python: 2.7 env: PYTHONOPTIMIZE=2 before_install: @@ -73,3 +78,6 @@ before_install: script: - ./tools/travis-test.sh + +after_success: + - ./tools/travis-upload-wheel.sh diff --git a/tools/travis-test.sh b/tools/travis-test.sh index 795915d0bfe3..af151f434f4a 100755 --- a/tools/travis-test.sh +++ b/tools/travis-test.sh @@ -94,14 +94,16 @@ export PIP if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then # Build wheel $PIP install wheel + # ensure that the pip / setuptools versions deployed inside the venv are recent enough + $PIP install -U virtualenv $PYTHON setup.py bdist_wheel # Make another virtualenv to install into - virtualenv --python=python venv-for-wheel + virtualenv --python=`which $PYTHON` venv-for-wheel . venv-for-wheel/bin/activate # Move out of source directory to avoid finding local numpy pushd dist - $PIP install --pre --no-index --upgrade --find-links=. numpy - $PIP install nose + pip install --pre --no-index --upgrade --find-links=. numpy + pip install nose popd run_test elif [ "$USE_CHROOT" != "1" ]; then diff --git a/tools/travis-upload-wheel.sh b/tools/travis-upload-wheel.sh new file mode 100755 index 000000000000..60b9aa7cb740 --- /dev/null +++ b/tools/travis-upload-wheel.sh @@ -0,0 +1,11 @@ +#!/bin/bash +set -ex + +export CLOUD_CONTAINER_NAME=travis-dev-wheels + +if [[ ( $USE_WHEEL == 1 ) && \ + ( "$TRAVIS_BRANCH" == "master" ) && \ + ( "$TRAVIS_PULL_REQUEST" == "false" ) ]]; then + pip install wheelhouse_uploader + python -m wheelhouse_uploader upload --local-folder $TRAVIS_BUILD_DIR/dist/ $CLOUD_CONTAINER_NAME +fi From bb959e1857c3ba2ad98ab87f13fdcc6b43740ffb Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 10 Dec 2015 19:41:33 -0700 Subject: [PATCH 223/496] MAINT: Replace assert with assert_(...) in some tests. --- numpy/core/tests/test_datetime.py | 4 ++-- numpy/core/tests/test_defchararray.py | 10 ++++---- numpy/core/tests/test_deprecations.py | 2 +- numpy/core/tests/test_mem_overlap.py | 9 ++++---- numpy/core/tests/test_memmap.py | 12 +++++----- numpy/core/tests/test_multiarray.py | 30 ++++++++++++------------ numpy/core/tests/test_numeric.py | 4 ++-- numpy/core/tests/test_scalarinherit.py | 12 +++++----- numpy/core/tests/test_shape_base.py | 4 ++-- numpy/core/tests/test_ufunc.py | 6 ++--- numpy/lib/tests/test_io.py | 6 ++--- numpy/linalg/tests/test_linalg.py | 4 ++-- numpy/ma/tests/test_core.py | 32 +++++++++++++------------- 13 files changed, 68 insertions(+), 67 deletions(-) diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py index 5fa28186736c..563aa48fb49c 100644 --- a/numpy/core/tests/test_datetime.py +++ b/numpy/core/tests/test_datetime.py @@ -571,9 +571,9 @@ def test_setstate(self): "Verify that datetime dtype __setstate__ can handle bad arguments" dt = np.dtype('>M8[us]') assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1)) - assert (dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) + assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) - assert (dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) + assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) def test_dtype_promotion(self): # datetime datetime computes the metadata gcd diff --git a/numpy/core/tests/test_defchararray.py b/numpy/core/tests/test_defchararray.py index 9ef316481732..e828b879f31e 100644 --- a/numpy/core/tests/test_defchararray.py +++ b/numpy/core/tests/test_defchararray.py @@ -680,15 +680,15 @@ def test_slice(self): dtype='S4').view(np.chararray) sl1 = arr[:] assert_array_equal(sl1, arr) - assert sl1.base is arr - assert sl1.base.base is arr.base + assert_(sl1.base is arr) + assert_(sl1.base.base is arr.base) sl2 = arr[:, :] assert_array_equal(sl2, arr) - assert sl2.base is arr - assert sl2.base.base is arr.base + assert_(sl2.base is arr) + assert_(sl2.base.base is arr.base) - assert arr[0, 0] == asbytes('abc') + assert_(arr[0, 0] == asbytes('abc')) def test_empty_indexing(): diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index 8f7e55d9125c..518b367f07eb 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -277,7 +277,7 @@ def test_array_richcompare_legacy_weirdness(self): with warnings.catch_warnings() as l: warnings.filterwarnings("always") assert_raises(TypeError, f, arg1, arg2) - assert not l + assert_(not l) else: # py2 assert_warns(DeprecationWarning, f, arg1, arg2) diff --git a/numpy/core/tests/test_mem_overlap.py b/numpy/core/tests/test_mem_overlap.py index 8d39fa4c0a36..a8b29ecd1b39 100644 --- a/numpy/core/tests/test_mem_overlap.py +++ b/numpy/core/tests/test_mem_overlap.py @@ -79,7 +79,8 @@ def _check_assignment(srcidx, dstidx): cpy[dstidx] = arr[srcidx] arr[dstidx] = arr[srcidx] - assert np.all(arr == cpy), 'assigning arr[%s] = arr[%s]' % (dstidx, srcidx) + assert_(np.all(arr == cpy), + 'assigning arr[%s] = arr[%s]' % (dstidx, srcidx)) def test_overlapping_assignments(): @@ -129,7 +130,7 @@ def test_diophantine_fuzz(): if X is None: # Check the simplified decision problem agrees X_simplified = solve_diophantine(A, U, b, simplify=1) - assert X_simplified is None, (A, U, b, X_simplified) + assert_(X_simplified is None, (A, U, b, X_simplified)) # Check no solution exists (provided the problem is # small enough so that brute force checking doesn't @@ -149,7 +150,7 @@ def test_diophantine_fuzz(): else: # Check the simplified decision problem agrees X_simplified = solve_diophantine(A, U, b, simplify=1) - assert X_simplified is not None, (A, U, b, X_simplified) + assert_(X_simplified is not None, (A, U, b, X_simplified)) # Check validity assert_(sum(a*x for a, x in zip(A, X)) == b) @@ -391,7 +392,7 @@ def random_slice(n, step): s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) a = x[s1].transpose(t1) - assert not internal_overlap(a) + assert_(not internal_overlap(a)) cases += 1 diff --git a/numpy/core/tests/test_memmap.py b/numpy/core/tests/test_memmap.py index 1585586caf9a..e41758c51033 100644 --- a/numpy/core/tests/test_memmap.py +++ b/numpy/core/tests/test_memmap.py @@ -103,28 +103,28 @@ def test_arithmetic_drops_references(self): shape=self.shape) tmp = (fp + 10) if isinstance(tmp, memmap): - assert tmp._mmap is not fp._mmap + assert_(tmp._mmap is not fp._mmap) def test_indexing_drops_references(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) tmp = fp[[(1, 2), (2, 3)]] if isinstance(tmp, memmap): - assert tmp._mmap is not fp._mmap + assert_(tmp._mmap is not fp._mmap) def test_slicing_keeps_references(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) - assert fp[:2, :2]._mmap is fp._mmap + assert_(fp[:2, :2]._mmap is fp._mmap) def test_view(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) new1 = fp.view() new2 = new1.view() - assert(new1.base is fp) - assert(new2.base is fp) + assert_(new1.base is fp) + assert_(new2.base is fp) new_array = asarray(fp) - assert(new_array.base is fp) + assert_(new_array.base is fp) if __name__ == "__main__": run_module_suite() diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 693847273ff5..59360795411f 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -3588,8 +3588,8 @@ def test_contiguous(self): self.a.flat[12] = 100.0 except ValueError: testpassed = True - assert testpassed - assert self.a.flat[12] == 12.0 + assert_(testpassed) + assert_(self.a.flat[12] == 12.0) def test_discontiguous(self): testpassed = False @@ -3597,8 +3597,8 @@ def test_discontiguous(self): self.b.flat[4] = 100.0 except ValueError: testpassed = True - assert testpassed - assert self.b.flat[4] == 12.0 + assert_(testpassed) + assert_(self.b.flat[4] == 12.0) def test___array__(self): c = self.a.flat.__array__() @@ -3606,16 +3606,16 @@ def test___array__(self): e = self.a0.flat.__array__() f = self.b0.flat.__array__() - assert c.flags.writeable is False - assert d.flags.writeable is False - assert e.flags.writeable is True - assert f.flags.writeable is True + assert_(c.flags.writeable is False) + assert_(d.flags.writeable is False) + assert_(e.flags.writeable is True) + assert_(f.flags.writeable is True) - assert c.flags.updateifcopy is False - assert d.flags.updateifcopy is False - assert e.flags.updateifcopy is False - assert f.flags.updateifcopy is True - assert f.base is self.b0 + assert_(c.flags.updateifcopy is False) + assert_(d.flags.updateifcopy is False) + assert_(e.flags.updateifcopy is False) + assert_(f.flags.updateifcopy is True) + assert_(f.base is self.b0) class TestResize(TestCase): def test_basic(self): @@ -5440,14 +5440,14 @@ def test_relaxed_strides(self): if np.ones((10, 1), order="C").flags.f_contiguous: c.strides = (-1, 80, 8) - assert memoryview(c).strides == (800, 80, 8) + assert_(memoryview(c).strides == (800, 80, 8)) # Writing C-contiguous data to a BytesIO buffer should work fd = io.BytesIO() fd.write(c.data) fortran = c.T - assert memoryview(fortran).strides == (8, 80, 800) + assert_(memoryview(fortran).strides == (8, 80, 800)) arr = np.ones((1, 10)) if arr.flags.f_contiguous: diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index 43dad42f1f7c..b7e146b5a203 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -328,8 +328,8 @@ def test_errobj(self): def log_err(*args): self.called += 1 extobj_err = args - assert (len(extobj_err) == 2) - assert ("divide" in extobj_err[0]) + assert_(len(extobj_err) == 2) + assert_("divide" in extobj_err[0]) with np.errstate(divide='ignore'): np.seterrobj([20000, 3, log_err]) diff --git a/numpy/core/tests/test_scalarinherit.py b/numpy/core/tests/test_scalarinherit.py index d8fd0acc3aaf..e8cf7fde0056 100644 --- a/numpy/core/tests/test_scalarinherit.py +++ b/numpy/core/tests/test_scalarinherit.py @@ -5,7 +5,7 @@ from __future__ import division, absolute_import, print_function import numpy as np -from numpy.testing import TestCase, run_module_suite +from numpy.testing import TestCase, run_module_suite, assert_ class A(object): @@ -26,17 +26,17 @@ class C0(B0): class TestInherit(TestCase): def test_init(self): x = B(1.0) - assert str(x) == '1.0' + assert_(str(x) == '1.0') y = C(2.0) - assert str(y) == '2.0' + assert_(str(y) == '2.0') z = D(3.0) - assert str(z) == '3.0' + assert_(str(z) == '3.0') def test_init2(self): x = B0(1.0) - assert str(x) == '1.0' + assert_(str(x) == '1.0') y = C0(2.0) - assert str(y) == '2.0' + assert_(str(y) == '2.0') if __name__ == "__main__": run_module_suite() diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py index cba083875cc3..0d163c1dc9bf 100644 --- a/numpy/core/tests/test_shape_base.py +++ b/numpy/core/tests/test_shape_base.py @@ -295,8 +295,8 @@ def test_stack(): for axis, expected_shape in zip(axes, expected_shapes): assert_equal(np.stack(arrays, axis).shape, expected_shape) # empty arrays - assert stack([[], [], []]).shape == (3, 0) - assert stack([[], [], []], axis=1).shape == (0, 3) + assert_(stack([[], [], []]).shape == (3, 0)) + assert_(stack([[], [], []], axis=1).shape == (0, 3)) # edge cases assert_raises_regex(ValueError, 'need at least one array', stack, []) assert_raises_regex(ValueError, 'must have the same shape', diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py index 934d91e7caf6..eb09853861c8 100644 --- a/numpy/core/tests/test_ufunc.py +++ b/numpy/core/tests/test_ufunc.py @@ -37,17 +37,17 @@ def test_sig_dtype(self): class TestUfunc(TestCase): def test_pickle(self): import pickle - assert pickle.loads(pickle.dumps(np.sin)) is np.sin + assert_(pickle.loads(pickle.dumps(np.sin)) is np.sin) # Check that ufunc not defined in the top level numpy namespace such as # numpy.core.test_rational.test_add can also be pickled - assert pickle.loads(pickle.dumps(test_add)) is test_add + assert_(pickle.loads(pickle.dumps(test_add)) is test_add) def test_pickle_withstring(self): import pickle astring = asbytes("cnumpy.core\n_ufunc_reconstruct\np0\n" "(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.") - assert pickle.loads(astring) is np.cos + assert_(pickle.loads(astring) is np.cos) def test_reduceat_shifting_sum(self): L = 6 diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index af904e96a404..bffc5c63e066 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -1815,9 +1815,9 @@ def test_auto_dtype_largeint(self): assert_equal(test.dtype.names, ['f0', 'f1', 'f2']) - assert test.dtype['f0'] == np.float - assert test.dtype['f1'] == np.int64 - assert test.dtype['f2'] == np.integer + assert_(test.dtype['f0'] == np.float) + assert_(test.dtype['f1'] == np.int64) + assert_(test.dtype['f2'] == np.integer) assert_allclose(test['f0'], 73786976294838206464.) assert_equal(test['f1'], 17179869184) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index afa098f12250..fc139be1923c 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -61,7 +61,7 @@ def get_rtol(dtype): class LinalgCase(object): def __init__(self, name, a, b, exception_cls=None): - assert isinstance(name, str) + assert_(isinstance(name, str)) self.name = name self.a = a self.b = b @@ -267,7 +267,7 @@ def _stride_comb_iter(x): xi = xi[slices] xi[...] = x xi = xi.view(x.__class__) - assert np.all(xi == x) + assert_(np.all(xi == x)) yield xi, "stride_" + "_".join(["%+d" % j for j in repeats]) # generate also zero strides if possible diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index cecdedf269f5..36b3c5ad0fa6 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -757,46 +757,46 @@ def test_mvoid_multidim_print(self): mask = [([False, True, False],)], fill_value = ([999999, 999999, 999999],), dtype = [('a', ' Date: Sun, 1 Nov 2015 09:27:35 +0100 Subject: [PATCH 224/496] DOC: fix method signatures in "array subclasses" * Change ".. function::" -> ".. method::" * Remove "self" argument * Change "self" to "obj" in __array_finalize__ --- doc/source/reference/arrays.classes.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst index 5716f45621d3..b82f7d33c114 100644 --- a/doc/source/reference/arrays.classes.rst +++ b/doc/source/reference/arrays.classes.rst @@ -39,9 +39,9 @@ Special attributes and methods Numpy provides several hooks that classes can customize: -.. function:: class.__numpy_ufunc__(self, ufunc, method, i, inputs, **kwargs) +.. method:: class.__numpy_ufunc__(ufunc, method, i, inputs, **kwargs) - .. versionadded:: 1.10 + .. versionadded:: 1.11 Any class (ndarray subclass or not) can define this method to override behavior of Numpy's ufuncs. This works quite similarly to @@ -109,7 +109,7 @@ Numpy provides several hooks that classes can customize: your_obj)`` always calls only your ``__numpy_ufunc__``, as expected. -.. function:: class.__array_finalize__(self) +.. method:: class.__array_finalize__(obj) This method is called whenever the system internally allocates a new array from *obj*, where *obj* is a subclass (subtype) of the @@ -118,7 +118,7 @@ Numpy provides several hooks that classes can customize: to update meta-information from the "parent." Subclasses inherit a default implementation of this method that does nothing. -.. function:: class.__array_prepare__(array, context=None) +.. method:: class.__array_prepare__(array, context=None) At the beginning of every :ref:`ufunc `, this method is called on the input object with the highest array @@ -130,7 +130,7 @@ Numpy provides several hooks that classes can customize: the subclass and update metadata before returning the array to the ufunc for computation. -.. function:: class.__array_wrap__(array, context=None) +.. method:: class.__array_wrap__(array, context=None) At the end of every :ref:`ufunc `, this method is called on the input object with the highest array priority, or @@ -149,7 +149,7 @@ Numpy provides several hooks that classes can customize: possibility for the Python type of the returned object. Subclasses inherit a default value of 0.0 for this attribute. -.. function:: class.__array__([dtype]) +.. method:: class.__array__([dtype]) If a class (ndarray subclass or not) having the :func:`__array__` method is used as the output object of an :ref:`ufunc From 192bff5ea1b37359c45e4e54c4b14b402e368663 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 11 Dec 2015 09:51:16 -0700 Subject: [PATCH 225/496] TST,BUG: Make test_mvoid_multidim_print work for 32 bit systems. The test currently uses an ` Date: Fri, 11 Dec 2015 13:11:17 -0800 Subject: [PATCH 226/496] BUG: Disable 32-bit msvc9 compiler optimizations for npy_rint --- numpy/core/src/npymath/npy_math.c.src | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/core/src/npymath/npy_math.c.src b/numpy/core/src/npymath/npy_math.c.src index 7f62810d5b7b..32fa41788e0b 100644 --- a/numpy/core/src/npymath/npy_math.c.src +++ b/numpy/core/src/npymath/npy_math.c.src @@ -260,6 +260,9 @@ double npy_atanh(double x) #endif #ifndef HAVE_RINT +#if defined(_MSC_VER) && (_MSC_VER == 1500) && !defined(_WIN64) +#pragma optimize("", off) +#endif double npy_rint(double x) { double y, r; @@ -280,6 +283,9 @@ double npy_rint(double x) } return y; } +#if defined(_MSC_VER) && (_MSC_VER == 1500) && !defined(_WIN64) +#pragma optimize("", on) +#endif #endif #ifndef HAVE_TRUNC From bcc4334edaed9c72b1f81a07ff920a44e1a6ff13 Mon Sep 17 00:00:00 2001 From: Christoph Gohlke Date: Fri, 11 Dec 2015 13:25:38 -0800 Subject: [PATCH 227/496] TST: Fix test_mvoid_multidim_print failures on Python 2.x for Windows --- numpy/ma/tests/test_core.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 53cdef97811d..e0d9f072cad2 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -764,25 +764,25 @@ def test_mvoid_multidim_print(self): t_2d = masked_array(data = [([[1, 2], [3,4]],)], mask = [([[False, True], [True, False]],)], - dtype = [('a', ' Date: Fri, 11 Dec 2015 16:47:43 -0500 Subject: [PATCH 228/496] Updated typos in histogram bin estimator equations In all cases, it's either ...*n^(-1/3) or .../n^(1/3), not both. The actual functions are implemented correctly. --- numpy/lib/function_base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 9261dba22293..3298789eeb6e 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -268,14 +268,14 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, large datasets respectively. Switchover point is usually x.size~1000. 'FD' (Freedman Diaconis Estimator) - .. math:: h = 2 \\frac{IQR}{n^{-1/3}} + .. math:: h = 2 \\frac{IQR}{n^{1/3}} The binwidth is proportional to the interquartile range (IQR) and inversely proportional to cube root of a.size. Can be too conservative for small datasets, but is quite good for large datasets. The IQR is very robust to outliers. 'Scott' - .. math:: h = \\frac{3.5\\sigma}{n^{-1/3}} + .. math:: h = \\frac{3.5\\sigma}{n^{1/3}} The binwidth is proportional to the standard deviation (sd) of the data and inversely proportional to cube root of a.size. Can be too conservative for small datasets, but is quite good From 7747c3a88cb0cad5687093d1345efcb2743fc1d5 Mon Sep 17 00:00:00 2001 From: Boxiang Sun Date: Thu, 10 Dec 2015 18:48:48 +0800 Subject: [PATCH 229/496] BUG: Fix thinko in assert_deprecated() assert_deprecated() was recently reworked for stylistic changes (in 0aa32608 "STY: Minor style cleanups in tests and C code.") but made a thinko - `lst` is already a list of warnings, so we don't need to put that into [] braces when preparing assertion text. If we do the reporting breaks: In [1]: msg = "4 warnings found but 3 expected." In [2]: lst = ['CategoryA', 'CategoryB', 'CategoryC'] In [3]: n.join([msg] + [lst]) TypeError Traceback (most recent call last) ----> 1 n.join([msg] + [lst]) TypeError: sequence item 1: expected string, list found Fix it. Cc: Charles Harris --- numpy/core/tests/test_deprecations.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index 8f7e55d9125c..bfbe27e1b450 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -89,7 +89,7 @@ def assert_deprecated(self, function, num=1, ignore_others=False, if num is not None and num_found != num: msg = "%i warnings found but %i expected." % (len(self.log), num) lst = [w.category for w in self.log] - raise AssertionError("\n".join([msg] + [lst])) + raise AssertionError("\n".join([msg] + lst)) with warnings.catch_warnings(): warnings.filterwarnings("error", message=self.message, @@ -400,5 +400,18 @@ def test_fortran_contiguous(self): self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,)) +class TestTestDeprecated(object): + def test_assert_deprecated(self): + test_case_instance = _DeprecationTestCase() + test_case_instance.setUp() + assert_raises(AssertionError, + test_case_instance.assert_deprecated, + lambda: None) + + def foo(): + warnings.warn("foo", category=DeprecationWarning) + + test_case_instance.assert_deprecated(foo) + if __name__ == "__main__": run_module_suite() From f2fd27266f49fe6cbe8a85a03d7d43216cf7c8f7 Mon Sep 17 00:00:00 2001 From: "Francis T. O'Donovan" Date: Sat, 12 Dec 2015 02:07:06 -0500 Subject: [PATCH 230/496] MAINT: Resolve import naming collision and optimize imports --- numpy/distutils/ccompiler.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py index ad235ed1997b..2f2d63b599c1 100644 --- a/numpy/distutils/ccompiler.py +++ b/numpy/distutils/ccompiler.py @@ -1,23 +1,22 @@ from __future__ import division, absolute_import, print_function -import re import os +import re import sys import types from copy import copy - -from distutils.ccompiler import * from distutils import ccompiler +from distutils.ccompiler import * from distutils.errors import DistutilsExecError, DistutilsModuleError, \ DistutilsPlatformError from distutils.sysconfig import customize_compiler from distutils.version import LooseVersion from numpy.distutils import log +from numpy.distutils.compat import get_exception from numpy.distutils.exec_command import exec_command from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \ quote_args, get_num_build_jobs -from numpy.distutils.compat import get_exception def replace_method(klass, method_name, func): @@ -634,7 +633,6 @@ def gen_preprocess_options (macros, include_dirs): # that removing this fix causes f2py problems on Windows XP (see ticket #723). # Specifically, on WinXP when gfortran is installed in a directory path, which # contains spaces, then f2py is unable to find it. -import re import string _wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace) _squote_re = re.compile(r"'(?:[^'\\]|\\.)*'") From 7546bf894d12b4a5b085713526a359f7224d7e27 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 11 Dec 2015 11:14:20 -0700 Subject: [PATCH 231/496] DOC: Update 1.10.2 release notes with fixes for windows i386. --- doc/release/1.10.2-notes.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/release/1.10.2-notes.rst b/doc/release/1.10.2-notes.rst index a597a817c834..02e75647479f 100644 --- a/doc/release/1.10.2-notes.rst +++ b/doc/release/1.10.2-notes.rst @@ -68,6 +68,7 @@ Issues Fixed * gh-6719 Error compiling Cython file: Pythonic division not allowed without gil. * gh-6771 Numpy.rec.fromarrays losing dtype metadata between versions 1.9.2 and 1.10.1 * gh-6781 The travis-ci script in maintenance/1.10.x needs fixing. +* gh-6807 Windows testing errors for 1.10.2 Merged PRs @@ -126,6 +127,9 @@ the PR number for the original PR against master is listed. * gh-6780 BUG: metadata is not copied to base_dtype. * gh-6783 BUG: Fix travis ci testing for new google infrastructure. * gh-6785 BUG: Quick and dirty fix for interp. +* gh-6813 TST,BUG: Make test_mvoid_multidim_print work for 32 bit systems. +* gh-6817 BUG: Disable 32-bit msvc9 compiler optimizations for npy_rint. +* gh-6819 TST: Fix test_mvoid_multidim_print failures on Python 2.x for Windows. Initial support for mingwpy was reverted as it was causing problems for non-windows builds. From f0d6d470c13405f9643f8bde50da74170b66c5c0 Mon Sep 17 00:00:00 2001 From: Pauli Virtanen Date: Mon, 14 Dec 2015 01:01:51 +0200 Subject: [PATCH 232/496] CI: run benchmark suite in travis-CI This should ensure the suite stays in working condition, not to produce reliable timing information. --- .travis.yml | 5 ++++- tools/travis-test.sh | 11 +++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 314fd18d5298..f14b9c912c68 100644 --- a/.travis.yml +++ b/.travis.yml @@ -56,7 +56,9 @@ matrix: # travis encrypt -r numpy/numpy WHEELHOUSE_UPLOADER_SECRET=tH3AP1KeY - secure: "IEicLPrP2uW+jW51GRwkONQpdPqMVtQL5bdroqR/U8r9TrXrbCVRhp4AP8JYZT0ptoBpmZWWGjmKBndB68QlMiUjQPowiFWt9Ka92CaqYdU7nqfWp9VImSndPmssjmCXJ1v1IjZPAMahp7Qnm0rWRmA0z9SomuRUQOJQ6s684vU=" - python: 2.7 - env: PYTHONOPTIMIZE=2 + env: + - PYTHONOPTIMIZE=2 + - USE_ASV=1 before_install: - uname -a - free -m @@ -74,6 +76,7 @@ before_install: # pip install coverage # Speed up install by not compiling Cython - pip install --install-option="--no-cython-compile" Cython + - if [ -n "$USE_ASV" ]; then pip install asv; fi - popd script: diff --git a/tools/travis-test.sh b/tools/travis-test.sh index af151f434f4a..3591c36dfa38 100755 --- a/tools/travis-test.sh +++ b/tools/travis-test.sh @@ -75,6 +75,17 @@ run_test() $PYTHON ../tools/test-installed-numpy.py # --mode=full # - coverage run --source=$INSTALLDIR --rcfile=../.coveragerc $(which $PYTHON) ../tools/test-installed-numpy.py # - coverage report --rcfile=../.coveragerc --show-missing + + if [ -n "$USE_ASV" ]; then + pushd ../benchmarks + $PYTHON `which asv` machine --machine travis + $PYTHON `which asv` dev 2>&1| tee asv-output.log + if grep -q Traceback asv-output.log; then + echo "Some benchmarks have errors!" + exit 1 + fi + popd + fi } # travis venv tests override python From ff92db22b195c05aace5c277527078b6055c3f78 Mon Sep 17 00:00:00 2001 From: Pauli Virtanen Date: Sun, 13 Dec 2015 23:08:07 +0200 Subject: [PATCH 233/496] BENCH: speed up benchmark suite import time; bump bench_ufunc timeout upward The input data generation in benchmarks/common.py takes ~ 1s, and it is not used by most benchmarks. Generate it lazily instead, making sure the generation is done in the setup() routines. --- benchmarks/benchmarks/bench_indexing.py | 8 +- benchmarks/benchmarks/bench_io.py | 7 +- benchmarks/benchmarks/bench_linalg.py | 14 +-- benchmarks/benchmarks/bench_reduce.py | 11 ++- benchmarks/benchmarks/bench_ufunc.py | 6 +- benchmarks/benchmarks/common.py | 112 +++++++++++++++++------- 6 files changed, 107 insertions(+), 51 deletions(-) diff --git a/benchmarks/benchmarks/bench_indexing.py b/benchmarks/benchmarks/bench_indexing.py index d6dc4edf0039..3e5a2ee60255 100644 --- a/benchmarks/benchmarks/bench_indexing.py +++ b/benchmarks/benchmarks/bench_indexing.py @@ -1,6 +1,6 @@ from __future__ import absolute_import, division, print_function -from .common import Benchmark, squares_, indexes_, indexes_rand_ +from .common import Benchmark, get_squares_, get_indexes_, get_indexes_rand_ import sys import six @@ -17,10 +17,10 @@ class Indexing(Benchmark): def setup(self, indexes, sel, op): sel = sel.replace('I', indexes) - ns = {'squares_': squares_, + ns = {'squares_': get_squares_(), 'np': np, - 'indexes_': indexes_, - 'indexes_rand_': indexes_rand_} + 'indexes_': get_indexes_(), + 'indexes_rand_': get_indexes_rand_()} if sys.version_info[0] >= 3: code = "def run():\n for a in squares_.values(): a[%s]%s" diff --git a/benchmarks/benchmarks/bench_io.py b/benchmarks/benchmarks/bench_io.py index 45cdf95ee126..782d4ab30f1b 100644 --- a/benchmarks/benchmarks/bench_io.py +++ b/benchmarks/benchmarks/bench_io.py @@ -1,6 +1,6 @@ from __future__ import absolute_import, division, print_function -from .common import Benchmark, squares +from .common import Benchmark, get_squares import numpy as np @@ -57,5 +57,8 @@ def time_copyto_8_dense(self): class Savez(Benchmark): + def setup(self): + self.squares = get_squares() + def time_vb_savez_squares(self): - np.savez('tmp.npz', squares) + np.savez('tmp.npz', self.squares) diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py index c844cc79e2ea..a323609b7594 100644 --- a/benchmarks/benchmarks/bench_linalg.py +++ b/benchmarks/benchmarks/bench_linalg.py @@ -1,6 +1,6 @@ from __future__ import absolute_import, division, print_function -from .common import Benchmark, squares_, indexes_rand +from .common import Benchmark, get_squares_, get_indexes_rand, TYPES1 import numpy as np @@ -36,7 +36,7 @@ def time_tensordot_a_b_axes_1_0_0_1(self): class Linalg(Benchmark): params = [['svd', 'pinv', 'det', 'norm'], - list(squares_.keys())] + TYPES1] param_names = ['op', 'type'] def setup(self, op, typename): @@ -46,10 +46,10 @@ def setup(self, op, typename): if op == 'cholesky': # we need a positive definite - self.a = np.dot(squares_[typename], - squares_[typename].T) + self.a = np.dot(get_squares_()[typename], + get_squares_()[typename].T) else: - self.a = squares_[typename] + self.a = get_squares_()[typename] # check that dtype is supported at all try: @@ -63,8 +63,8 @@ def time_op(self, op, typename): class Lstsq(Benchmark): def setup(self): - self.a = squares_['float64'] - self.b = indexes_rand[:100].astype(np.float64) + self.a = get_squares_()['float64'] + self.b = get_indexes_rand()[:100].astype(np.float64) def time_numpy_linalg_lstsq_a__b_float64(self): np.linalg.lstsq(self.a, self.b) diff --git a/benchmarks/benchmarks/bench_reduce.py b/benchmarks/benchmarks/bench_reduce.py index a810e828e348..70402352884e 100644 --- a/benchmarks/benchmarks/bench_reduce.py +++ b/benchmarks/benchmarks/bench_reduce.py @@ -1,16 +1,19 @@ from __future__ import absolute_import, division, print_function -from .common import Benchmark, TYPES1, squares +from .common import Benchmark, TYPES1, get_squares import numpy as np class AddReduce(Benchmark): + def setup(self): + self.squares = get_squares().values() + def time_axis_0(self): - [np.add.reduce(a, axis=0) for a in squares.values()] + [np.add.reduce(a, axis=0) for a in self.squares] def time_axis_1(self): - [np.add.reduce(a, axis=1) for a in squares.values()] + [np.add.reduce(a, axis=1) for a in self.squares] class AddReduceSeparate(Benchmark): @@ -18,7 +21,7 @@ class AddReduceSeparate(Benchmark): param_names = ['axis', 'type'] def setup(self, axis, typename): - self.a = squares[typename] + self.a = get_squares()[typename] def time_reduce(self, axis, typename): np.add.reduce(self.a, axis=axis) diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index 7946ccf65fb3..8f821ce080d8 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -1,6 +1,6 @@ from __future__ import absolute_import, division, print_function -from .common import Benchmark, squares_ +from .common import Benchmark, get_squares_ import numpy as np @@ -39,7 +39,7 @@ def time_broadcast(self): class UFunc(Benchmark): params = [ufuncs] param_names = ['ufunc'] - timeout = 2 + timeout = 10 def setup(self, ufuncname): np.seterr(all='ignore') @@ -48,7 +48,7 @@ def setup(self, ufuncname): except AttributeError: raise NotImplementedError() self.args = [] - for t, a in squares_.items(): + for t, a in get_squares_().items(): arg = (a,) * self.f.nin try: self.f(*arg) diff --git a/benchmarks/benchmarks/common.py b/benchmarks/benchmarks/common.py index e98396bed877..066d4b130633 100644 --- a/benchmarks/benchmarks/common.py +++ b/benchmarks/benchmarks/common.py @@ -25,40 +25,90 @@ 'complex256', ] + +def memoize(func): + result = [] + def wrapper(): + if not result: + result.append(func()) + return result[0] + return wrapper + + # values which will be used to construct our sample data matrices # replicate 10 times to speed up initial imports of this helper # and generate some redundancy -values = [random.uniform(0, 100) for x in range(nx*ny//10)]*10 - -squares = {t: numpy.array(values, - dtype=getattr(numpy, t)).reshape((nx, ny)) - for t in TYPES1} - -# adjust complex ones to have non-degenerated imagery part -- use -# original data transposed for that -for t, v in squares.items(): - if t.startswith('complex'): - v += v.T*1j - -# smaller squares -squares_ = {t: s[:nxs, :nys] for t, s in squares.items()} -# vectors -vectors = {t: s[0] for t, s in squares.items()} - -indexes = list(range(nx)) -# so we do not have all items -indexes.pop(5) -indexes.pop(95) - -indexes_rand = indexes[:] # copy -random.shuffle(indexes_rand) # in-place shuffle - -# only now make them arrays -indexes = numpy.array(indexes) -indexes_rand = numpy.array(indexes_rand) -# smaller versions -indexes_ = indexes[indexes < nxs] -indexes_rand_ = indexes_rand[indexes_rand < nxs] + +@memoize +def get_values(): + rnd = numpy.random.RandomState(1) + values = numpy.tile(rnd.uniform(0, 100, size=nx*ny//10), 10) + return values + + +@memoize +def get_squares(): + values = get_values() + squares = {t: numpy.array(values, + dtype=getattr(numpy, t)).reshape((nx, ny)) + for t in TYPES1} + + # adjust complex ones to have non-degenerated imagery part -- use + # original data transposed for that + for t, v in squares.items(): + if t.startswith('complex'): + v += v.T*1j + return squares + + +@memoize +def get_squares_(): + # smaller squares + squares_ = {t: s[:nxs, :nys] for t, s in get_squares().items()} + return squares_ + + +@memoize +def get_vectors(): + # vectors + vectors = {t: s[0] for t, s in get_squares().items()} + return vectors + + +@memoize +def get_indexes(): + indexes = list(range(nx)) + # so we do not have all items + indexes.pop(5) + indexes.pop(95) + + indexes = numpy.array(indexes) + return indexes + + +@memoize +def get_indexes_rand(): + rnd = random.Random(1) + + indexes_rand = get_indexes().tolist() # copy + rnd.shuffle(indexes_rand) # in-place shuffle + indexes_rand = numpy.array(indexes_rand) + return indexes_rand + + +@memoize +def get_indexes_(): + # smaller versions + indexes = get_indexes() + indexes_ = indexes[indexes < nxs] + return indexes_ + + +@memoize +def get_indexes_rand_(): + indexes_rand = get_indexes_rand() + indexes_rand_ = indexes_rand[indexes_rand < nxs] + return indexes_rand_ class Benchmark(object): From a1f4197a02d8c1bfa6a2130c0fb1e21da4138981 Mon Sep 17 00:00:00 2001 From: gopalmeena Date: Tue, 15 Dec 2015 01:16:25 +0530 Subject: [PATCH 234/496] Made changes in numpy/numpy/random/mtrand/matrand.pyx --- numpy/random/mtrand/mtrand.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index 080591e5ee53..d6ba58bb2b0d 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -1280,7 +1280,7 @@ cdef class RandomState: Random values in a given shape. - Create an array of the given shape and propagate it with + Create an array of the given shape and populate it with random samples from a uniform distribution over ``[0, 1)``. From 9ec7b11a8e458c810acd958a5072686458f84f57 Mon Sep 17 00:00:00 2001 From: John Bjorn Nelson Date: Tue, 15 Dec 2015 08:04:09 -0500 Subject: [PATCH 235/496] Correct reference to Johnk's algorithm --- numpy/random/mtrand/distributions.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/random/mtrand/distributions.c b/numpy/random/mtrand/distributions.c index 39004178da06..7c44088a75df 100644 --- a/numpy/random/mtrand/distributions.c +++ b/numpy/random/mtrand/distributions.c @@ -188,7 +188,7 @@ double rk_beta(rk_state *state, double a, double b) if ((a <= 1.0) && (b <= 1.0)) { double U, V, X, Y; - /* Use Jonk's algorithm */ + /* Use Johnk's algorithm */ while (1) { From 293e930ad4be772ec07f2777527f1d211257c3b5 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 15 Dec 2015 14:48:00 -0700 Subject: [PATCH 236/496] TST: Clean up travis-test and make it work with current travis ci. Travis ci is migrating to GCI and the 32 bit tests broke in the process. This cleans up the tools/travis-test script, fixes it for current travis, and changes the 32 bit test to use python 2.7 in order to turn up errors involving python long integers. In preparation for dropping Python 3.2 and 3.3, the USE_DEBUG test is run in the travis ci trusty beta so that python3 defaults to 3.4. --- .travis.yml | 10 ++-- tools/travis-test.sh | 131 ++++++++++++++++++++++++++----------------- 2 files changed, 85 insertions(+), 56 deletions(-) diff --git a/.travis.yml b/.travis.yml index f14b9c912c68..589d7a9e67dc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -29,16 +29,18 @@ python: - 3.5 matrix: include: - - python: 3.3 - env: USE_CHROOT=1 ARCH=i386 DIST=trusty PYTHON=3.4 + - python: 2.7 + env: USE_CHROOT=1 ARCH=i386 DIST=trusty PYTHON=2.7 sudo: true + dist: trusty addons: apt: packages: - - *common_packages - debootstrap - - python: 3.2 + - python: 3.4 env: USE_DEBUG=1 + sudo: true + dist: trusty addons: apt: packages: diff --git a/tools/travis-test.sh b/tools/travis-test.sh index 3591c36dfa38..939594d8ce27 100755 --- a/tools/travis-test.sh +++ b/tools/travis-test.sh @@ -1,4 +1,5 @@ #!/bin/bash + set -ex # Travis legacy boxes give you 1.5 CPUs, container-based boxes give you 2 CPUs @@ -10,54 +11,94 @@ if [ -r /usr/lib/libeatmydata/libeatmydata.so ]; then export LD_PRELOAD=/usr/lib/libeatmydata/libeatmydata.so fi +# travis venv tests override python +PYTHON=${PYTHON:-python} +PIP=${PIP:-pip} + +# explicit python version needed here +if [ -n "$USE_DEBUG" ]; then + PYTHON="python3-dbg" +fi + +if [ -n "$PYTHON_OO" ]; then + PYTHON="${PYTHON} -OO" +fi + # make some warnings fatal, mostly to match windows compilers werrors="-Werror=declaration-after-statement -Werror=vla -Werror=nonnull" setup_base() { # We used to use 'setup.py install' here, but that has the terrible - # behaviour that if a copy of the package is already installed in - # the install location, then the new copy just gets dropped on top - # of it. Travis typically has a stable numpy release pre-installed, - # and if we don't remove it, then we can accidentally end up - # e.g. running old test modules that were in the stable release but - # have been removed from master. (See gh-2765, gh-2768.) Using 'pip - # install' also has the advantage that it tests that numpy is 'pip - # install' compatible, see e.g. gh-2766... -if [ -z "$USE_DEBUG" ]; then - if [ -z "$IN_CHROOT" ]; then - $PIP install . + # behaviour that if a copy of the package is already installed in the + # install location, then the new copy just gets dropped on top of it. + # Travis typically has a stable numpy release pre-installed, and if we + # don't remove it, then we can accidentally end up e.g. running old + # test modules that were in the stable release but have been removed + # from master. (See gh-2765, gh-2768.) Using 'pip install' also has + # the advantage that it tests that numpy is 'pip install' compatible, + # see e.g. gh-2766... + if [ -z "$USE_DEBUG" ]; then + if [ -z "$IN_CHROOT" ]; then + $PIP install . + else + sysflags="$($PYTHON -c "from distutils import sysconfig; \ + print (sysconfig.get_config_var('CFLAGS'))")" + CFLAGS="$sysflags $werrors -Wlogical-op" $PIP install . 2>&1 | tee log + grep -v "_configtest" log \ + | grep -vE "ld returned 1|no previously-included files matching" \ + | grep -E "warning\>" \ + | tee warnings + # Check for an acceptable number of warnings. Some warnings are out of + # our control, so adjust the number as needed. + [[ $(wc -l < warnings) -lt 1 ]] + fi else - sysflags="$($PYTHON -c "from distutils import sysconfig; print (sysconfig.get_config_var('CFLAGS'))")" - CFLAGS="$sysflags $werrors -Wlogical-op" $PIP install . 2>&1 | tee log - grep -v "_configtest" log | grep -vE "ld returned 1|no previously-included files matching" | grep -E "warning\>"; - # accept a mysterious memset warning that shows with -flto - test $(grep -v "_configtest" log | grep -vE "ld returned 1|no previously-included files matching" | grep -E "warning\>" -c) -lt 2; + sysflags="$($PYTHON -c "from distutils import sysconfig; \ + print (sysconfig.get_config_var('CFLAGS'))")" + CFLAGS="$sysflags $werrors" $PYTHON setup.py build_ext --inplace fi -else - sysflags="$($PYTHON -c "from distutils import sysconfig; print (sysconfig.get_config_var('CFLAGS'))")" - CFLAGS="$sysflags $werrors" $PYTHON setup.py build_ext --inplace -fi } setup_chroot() { # this can all be replaced with: # apt-get install libpython2.7-dev:i386 - # CC="gcc -m32" LDSHARED="gcc -m32 -shared" LDFLAGS="-m32 -shared" linux32 python setup.py build + # CC="gcc -m32" LDSHARED="gcc -m32 -shared" LDFLAGS="-m32 -shared" \ + # linux32 python setup.py build # when travis updates to ubuntu 14.04 + # + # Numpy may not distinquish between 64 and 32 bit atlas in the + # configuration stage. DIR=$1 set -u - sudo debootstrap --variant=buildd --include=fakeroot,build-essential --arch=$ARCH --foreign $DIST $DIR + sudo debootstrap --variant=buildd --include=fakeroot,build-essential \ + --arch=$ARCH --foreign $DIST $DIR sudo chroot $DIR ./debootstrap/debootstrap --second-stage + + # put the numpy repo in the chroot directory sudo rsync -a $TRAVIS_BUILD_DIR $DIR/ - echo deb http://archive.ubuntu.com/ubuntu/ $DIST main restricted universe multiverse | sudo tee -a $DIR/etc/apt/sources.list - echo deb http://archive.ubuntu.com/ubuntu/ $DIST-updates main restricted universe multiverse | sudo tee -a $DIR/etc/apt/sources.list - echo deb http://security.ubuntu.com/ubuntu $DIST-security main restricted universe multiverse | sudo tee -a $DIR/etc/apt/sources.list + + # set up repos in the chroot directory for installing packages + echo deb http://archive.ubuntu.com/ubuntu/ \ + $DIST main restricted universe multiverse \ + | sudo tee -a $DIR/etc/apt/sources.list + echo deb http://archive.ubuntu.com/ubuntu/ \ + $DIST-updates main restricted universe multiverse \ + | sudo tee -a $DIR/etc/apt/sources.list + echo deb http://security.ubuntu.com/ubuntu \ + $DIST-security main restricted universe multiverse \ + | sudo tee -a $DIR/etc/apt/sources.list + + # install needed packages sudo chroot $DIR bash -c "apt-get update" - sudo chroot $DIR bash -c "apt-get install -qq -y --force-yes eatmydata" - echo /usr/lib/libeatmydata/libeatmydata.so | sudo tee -a $DIR/etc/ld.so.preload - sudo chroot $DIR bash -c "apt-get install -qq -y --force-yes libatlas-dev libatlas-base-dev gfortran python3-dev python3-nose python3-pip cython3 cython" + sudo chroot $DIR bash -c "apt-get install -qq -y --force-yes \ + eatmydata libatlas-dev libatlas-base-dev gfortran \ + python-dev python-nose python-pip cython" + + # faster operation with preloaded eatmydata + echo /usr/lib/libeatmydata/libeatmydata.so | \ + sudo tee -a $DIR/etc/ld.so.preload } run_test() @@ -70,42 +111,29 @@ run_test() # of numpy in the source directory. mkdir -p empty cd empty - INSTALLDIR=$($PYTHON -c "import os; import numpy; print(os.path.dirname(numpy.__file__))") + INSTALLDIR=$($PYTHON -c \ + "import os; import numpy; print(os.path.dirname(numpy.__file__))") export PYTHONWARNINGS=default - $PYTHON ../tools/test-installed-numpy.py # --mode=full - # - coverage run --source=$INSTALLDIR --rcfile=../.coveragerc $(which $PYTHON) ../tools/test-installed-numpy.py - # - coverage report --rcfile=../.coveragerc --show-missing - + $PYTHON ../tools/test-installed-numpy.py if [ -n "$USE_ASV" ]; then pushd ../benchmarks $PYTHON `which asv` machine --machine travis $PYTHON `which asv` dev 2>&1| tee asv-output.log if grep -q Traceback asv-output.log; then - echo "Some benchmarks have errors!" - exit 1 + echo "Some benchmarks have errors!" + exit 1 fi popd fi } -# travis venv tests override python -PYTHON=${PYTHON:-python} -PIP=${PIP:-pip} - -if [ -n "$USE_DEBUG" ]; then - PYTHON=python3-dbg -fi - -if [ -n "$PYTHON_OO" ]; then - PYTHON="$PYTHON -OO" -fi - export PYTHON export PIP if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then # Build wheel $PIP install wheel - # ensure that the pip / setuptools versions deployed inside the venv are recent enough + # ensure that the pip / setuptools versions deployed inside + # the venv are recent enough $PIP install -U virtualenv $PYTHON setup.py bdist_wheel # Make another virtualenv to install into @@ -117,15 +145,14 @@ if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then pip install nose popd run_test -elif [ "$USE_CHROOT" != "1" ]; then - setup_base - run_test elif [ -n "$USE_CHROOT" ] && [ $# -eq 0 ]; then DIR=/chroot setup_chroot $DIR # run again in chroot with this time testing - sudo linux32 chroot $DIR bash -c "cd numpy && PYTHON=python3 PIP=pip3 IN_CHROOT=1 $0 test" + sudo linux32 chroot $DIR bash -c \ + "cd numpy && PYTHON=python PIP=pip IN_CHROOT=1 $0 test" else + setup_base run_test fi From 8f87f431e275c243d17857ba7a027102dd81cfe8 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 16 Dec 2015 12:54:23 -0700 Subject: [PATCH 237/496] STY: Style fixes for .travis.yml and travis-upload-wheel.sh * shell script style fixes inspired by google shell style guide https://google.github.io/styleguide/shell.xml * .travis.yml longline breaking tested with http://yaml-online-parser.appspot.com/ --- .travis.yml | 5 ++++- tools/travis-upload-wheel.sh | 12 +++++++----- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/.travis.yml b/.travis.yml index 589d7a9e67dc..e0887a82a3f3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -56,7 +56,10 @@ matrix: - WHEELHOUSE_UPLOADER_USERNAME=travis.numpy # The following is generated with the command: # travis encrypt -r numpy/numpy WHEELHOUSE_UPLOADER_SECRET=tH3AP1KeY - - secure: "IEicLPrP2uW+jW51GRwkONQpdPqMVtQL5bdroqR/U8r9TrXrbCVRhp4AP8JYZT0ptoBpmZWWGjmKBndB68QlMiUjQPowiFWt9Ka92CaqYdU7nqfWp9VImSndPmssjmCXJ1v1IjZPAMahp7Qnm0rWRmA0z9SomuRUQOJQ6s684vU=" + - secure: "IEicLPrP2uW+jW51GRwkONQpdPqMVtQL5bdroqR/U8r9Tr\ + XrbCVRhp4AP8JYZT0ptoBpmZWWGjmKBndB68QlMiUjQPow\ + iFWt9Ka92CaqYdU7nqfWp9VImSndPmssjmCXJ1v1IjZPAM\ + ahp7Qnm0rWRmA0z9SomuRUQOJQ6s684vU=" - python: 2.7 env: - PYTHONOPTIMIZE=2 diff --git a/tools/travis-upload-wheel.sh b/tools/travis-upload-wheel.sh index 60b9aa7cb740..06a8f3ebac54 100755 --- a/tools/travis-upload-wheel.sh +++ b/tools/travis-upload-wheel.sh @@ -1,11 +1,13 @@ #!/bin/bash +# set -ex export CLOUD_CONTAINER_NAME=travis-dev-wheels -if [[ ( $USE_WHEEL == 1 ) && \ - ( "$TRAVIS_BRANCH" == "master" ) && \ - ( "$TRAVIS_PULL_REQUEST" == "false" ) ]]; then - pip install wheelhouse_uploader - python -m wheelhouse_uploader upload --local-folder $TRAVIS_BUILD_DIR/dist/ $CLOUD_CONTAINER_NAME +if [[ ( ${USE_WHEEL} == 1 ) \ + && ( "${TRAVIS_BRANCH}" == "master" ) \ + && ( "${TRAVIS_PULL_REQUEST}" == "false" ) ]]; then + pip install wheelhouse_uploader + python -m wheelhouse_uploader upload --local-folder \ + ${TRAVIS_BUILD_DIR}/dist/ ${CLOUD_CONTAINER_NAME} fi From 084952500f388caed570b0961194a5b76bd8b6be Mon Sep 17 00:00:00 2001 From: gfyoung Date: Sun, 13 Dec 2015 18:11:14 -0800 Subject: [PATCH 238/496] MAINT: minor spelling and grammar corrections --- doc/release/1.11.0-notes.rst | 2 +- numpy/core/tests/test_deprecations.py | 20 ++++++++++---------- numpy/random/tests/test_random.py | 6 +++--- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index fac868ca3103..47b11fd5768c 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -110,7 +110,7 @@ Views of arrays in Fortran order The f_contiguous flag was used to signal that views as a dtypes that changed the element size would change the first index. This was always a bit problematical for arrays that were both f_contiguous and c_contiguous -because c_contiguous took precendence. Relaxed stride checking results in +because c_contiguous took precedence. Relaxed stride checking results in more such dual contiguous arrays and breaks some existing code as a result. Note that this also affects changing the dtype by assigning to the dtype attribute of an array. The aim of this deprecation is to restrict views to diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index 372af9c0b790..f6dc3d842219 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -163,8 +163,8 @@ def test(self): class TestComparisonDeprecations(_DeprecationTestCase): - """This tests the deprecation, for non-elementwise comparison logic. - This used to mean that when an error occured during element-wise comparison + """This tests the deprecation, for non-element-wise comparison logic. + This used to mean that when an error occurred during element-wise comparison (i.e. broadcasting) NotImplemented was returned, but also in the comparison itself, False was given instead of the error. @@ -192,13 +192,13 @@ def test_string(self): b = np.array(['a', 'b', 'c']) assert_raises(ValueError, lambda x, y: x == y, a, b) - # The empty list is not cast to string, this is only to document + # The empty list is not cast to string, as this is only to document # that fact (it likely should be changed). This means that the # following works (and returns False) due to dtype mismatch: a == [] def test_none_comparison(self): - # Test comparison of None, which should result in elementwise + # Test comparison of None, which should result in element-wise # comparison in the future. [1, 2] == None should be [False, False]. with warnings.catch_warnings(): warnings.filterwarnings('always', '', FutureWarning) @@ -211,7 +211,7 @@ def test_none_comparison(self): assert_raises(FutureWarning, operator.ne, np.arange(3), None) def test_scalar_none_comparison(self): - # Scalars should still just return false and not give a warnings. + # Scalars should still just return False and not give a warnings. # The comparisons are flagged by pep8, ignore that. with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', FutureWarning) @@ -226,9 +226,9 @@ def test_scalar_none_comparison(self): assert_(np.datetime64('NaT') != None) assert_(len(w) == 0) - # For documentaiton purpose, this is why the datetime is dubious. + # For documentation purposes, this is why the datetime is dubious. # At the time of deprecation this was no behaviour change, but - # it has to be considered when the deprecations is done. + # it has to be considered when the deprecations are done. assert_(np.equal(np.datetime64('NaT'), None)) def test_void_dtype_equality_failures(self): @@ -338,8 +338,8 @@ def test_bool_error(self): class TestAlterdotRestoredotDeprecations(_DeprecationTestCase): """The alterdot/restoredot functions are deprecated. - These functions no longer do anything in numpy 1.10, so should not be - used. + These functions no longer do anything in numpy 1.10, so + they should not be used. """ @@ -350,7 +350,7 @@ def test_alterdot_restoredot_deprecation(self): class TestBooleanIndexShapeMismatchDeprecation(): """Tests deprecation for boolean indexing where the boolean array - does not match the input array along the given diemsions. + does not match the input array along the given dimensions. """ message = r"boolean index did not match indexed array" diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index ab7f90d82c54..193844030efd 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -26,12 +26,12 @@ def test_array(self): assert_equal(s.randint(1000), 265) def test_invalid_scalar(self): - # seed must be a unsigned 32 bit integers + # seed must be an unsigned 32 bit integer assert_raises(TypeError, np.random.RandomState, -0.5) assert_raises(ValueError, np.random.RandomState, -1) def test_invalid_array(self): - # seed must be a unsigned 32 bit integers + # seed must be an unsigned 32 bit integer assert_raises(TypeError, np.random.RandomState, [-0.5]) assert_raises(ValueError, np.random.RandomState, [-1]) assert_raises(ValueError, np.random.RandomState, [4294967296]) @@ -129,7 +129,7 @@ def test_negative_binomial(self): self.prng.negative_binomial(0.5, 0.5) class TestRandomDist(TestCase): - # Make sure the random distrobution return the correct value for a + # Make sure the random distribution returns the correct value for a # given seed def setUp(self): From a9aae5f30324a84e8caaaec91ab358e8e96b9a7b Mon Sep 17 00:00:00 2001 From: Matthew Brett Date: Wed, 16 Dec 2015 19:36:52 +0000 Subject: [PATCH 239/496] DOC: change uses of `rank` for `dimension` We used to use ``rank`` to mean the number of axes in an array, but no more. Change these uses of rank to refer to dimensions. Closes gh-6839 --- doc/source/user/numpy-for-matlab-users.rst | 52 +++++++++++----------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst index d94233a2ed2f..9edb588ea008 100644 --- a/doc/source/user/numpy-for-matlab-users.rst +++ b/doc/source/user/numpy-for-matlab-users.rst @@ -76,20 +76,20 @@ are only a handful of key differences between the two. - For ``matrix``, **'``*``\ ' means matrix multiplication**, and the ``multiply()`` function is used for element-wise multiplication. -- Handling of vectors (rank-1 arrays) +- Handling of vectors (one-dimensional arrays) - - For ``array``, the **vector shapes 1xN, Nx1, and N are all - different things**. Operations like ``A[:,1]`` return a rank-1 - array of shape N, not a rank-2 of shape Nx1. Transpose on a rank-1 - ``array`` does nothing. - - For ``matrix``, **rank-1 arrays are always upconverted to 1xN or - Nx1 matrices** (row or column vectors). ``A[:,1]`` returns a - rank-2 matrix of shape Nx1. + - For ``array``, the **vector shapes 1xN, Nx1, and N are all different + things**. Operations like ``A[:,1]`` return a one-dimensional array of + shape N, not a two-dimensional array of shape Nx1. Transpose on a + one-dimensional ``array`` does nothing. + - For ``matrix``, **one-dimensional arrays are always upconverted to 1xN + or Nx1 matrices** (row or column vectors). ``A[:,1]`` returns a + two-dimensional matrix of shape Nx1. -- Handling of higher-rank arrays (rank > 2) +- Handling of higher-dimensional arrays (ndim > 2) - - ``array`` objects **can have rank > 2**. - - ``matrix`` objects **always have exactly rank 2**. + - ``array`` objects **can have number of dimensions > 2**; + - ``matrix`` objects **always have exactly two dimensions**. - Convenience attributes @@ -110,17 +110,17 @@ There are pros and cons to using both: - ``array`` - - ``:)`` You can treat rank-1 arrays as *either* row or column + - ``:)`` You can treat one-dimensional arrays as *either* row or column vectors. ``dot(A,v)`` treats ``v`` as a column vector, while - ``dot(v,A)`` treats ``v`` as a row vector. This can save you - having to type a lot of transposes. + ``dot(v,A)`` treats ``v`` as a row vector. This can save you having to + type a lot of transposes. - ``<:(`` Having to use the ``dot()`` function for matrix-multiply is messy -- ``dot(dot(A,B),C)`` vs. ``A*B*C``. - ``:)`` Element-wise multiplication is easy: ``A*B``. - ``:)`` ``array`` is the "default" NumPy type, so it gets the most testing, and is the type most likely to be returned by 3rd party code that uses NumPy. - - ``:)`` Is quite at home handling data of any rank. + - ``:)`` Is quite at home handling data of any number of dimensions. - ``:)`` Closer in semantics to tensor algebra, if you are familiar with that. - ``:)`` *All* operations (``*``, ``/``, ``+``, ``-`` etc.) are @@ -129,9 +129,9 @@ There are pros and cons to using both: - ``matrix`` - ``:\\`` Behavior is more like that of MATLAB® matrices. - - ``<:(`` Maximum of rank-2. To hold rank-3 data you need ``array`` or - perhaps a Python list of ``matrix``. - - ``<:(`` Minimum of rank-2. You cannot have vectors. They must be + - ``<:(`` Maximum of two-dimensional. To hold three-dimensional data you + need ``array`` or perhaps a Python list of ``matrix``. + - ``<:(`` Minimum of two-dimensional. You cannot have vectors. They must be cast as single-column or single-row matrices. - ``<:(`` Since ``array`` is the default in NumPy, some functions may return an ``array`` even if you give them a ``matrix`` as an @@ -201,7 +201,7 @@ commands in Python: import scipy.linalg Also assume below that if the Notes talk about "matrix" that the -arguments are rank 2 entities. +arguments are two-dimensional entities. General Purpose Equivalents --------------------------- @@ -252,7 +252,7 @@ Linear Algebra Equivalents * - ``ndims(a)`` - ``ndim(a)`` or ``a.ndim`` - - get the number of dimensions of ``a`` (tensor rank) + - get the number of dimensions of an array * - ``numel(a)`` - ``size(a)`` or ``a.size`` @@ -264,7 +264,9 @@ Linear Algebra Equivalents * - ``size(a,n)`` - ``a.shape[n-1]`` - - get the number of elements of the n-th dimension of array ``a``. (Note that MATLAB® uses 1 based indexing while Python uses 0 based indexing, See note :ref:`INDEXING `) + - get the number of elements of the n-th dimension of array ``a``. (Note + that MATLAB® uses 1 based indexing while Python uses 0 based indexing, + See note :ref:`INDEXING `) * - ``[ 1 2 3; 4 5 6 ]`` - ``array([[1.,2.,3.], [4.,5.,6.]])`` @@ -399,15 +401,15 @@ Linear Algebra Equivalents * - ``zeros(3,4)`` - ``zeros((3,4))`` - - 3x4 rank-2 array full of 64-bit floating point zeros + - 3x4 two-dimensional array full of 64-bit floating point zeros * - ``zeros(3,4,5)`` - ``zeros((3,4,5))`` - - 3x4x5 rank-3 array full of 64-bit floating point zeros + - 3x4x5 three-dimensional array full of 64-bit floating point zeros * - ``ones(3,4)`` - ``ones((3,4))`` - - 3x4 rank-2 array full of 64-bit floating point ones + - 3x4 two-dimensional array full of 64-bit floating point ones * - ``eye(3)`` - ``eye(3)`` @@ -503,7 +505,7 @@ Linear Algebra Equivalents * - ``rank(a)`` - ``linalg.matrix_rank(a)`` - - rank of a matrix ``a`` + - matrix rank of a 2D array / matrix ``a`` * - ``a\b`` - ``linalg.solve(a,b)`` if ``a`` is square; ``linalg.lstsq(a,b)`` otherwise From 4ddc2a5850ac3d8a0514a6330ef7350022ee262b Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 16 Dec 2015 16:59:52 -0700 Subject: [PATCH 240/496] BUG: Fix use of python 3 only FileNotFoundError in test_f2py. Also rewrite error messages so that they read more like warnings than errors. --- numpy/tests/test_scripts.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py index 552383d7796b..74efd2650381 100644 --- a/numpy/tests/test_scripts.py +++ b/numpy/tests/test_scripts.py @@ -64,11 +64,12 @@ def test_f2py(): if sys.platform == 'win32': f2py_cmd = r"%s\Scripts\f2py.py" % dirname(sys.executable) code, stdout, stderr = run_command([sys.executable, f2py_cmd, '-v']) - assert_equal(stdout.strip(), asbytes('2')) + success = stdout.strip() == asbytes('2') + assert_(success, "Warning: f2py not found in path") else: # unclear what f2py cmd was installed as, check plain (f2py) and # current python version specific one (f2py3.4) - f2py_cmds = ['f2py', 'f2py' + basename(sys.executable)[6:]] + f2py_cmds = ('f2py', 'f2py' + basename(sys.executable)[6:]) success = False for f2py_cmd in f2py_cmds: try: @@ -76,6 +77,6 @@ def test_f2py(): assert_equal(stdout.strip(), asbytes('2')) success = True break - except FileNotFoundError: + except OSError: pass - assert_(success, "wasn't able to find f2py or %s on commandline" % f2py_cmds[1]) + assert_(success, "Warning: neither %s nor %s found in path" % f2py_cmds) From 18b01010077b034556cdb73b751544a06f48dcdc Mon Sep 17 00:00:00 2001 From: Eugene Krokhalev Date: Thu, 17 Dec 2015 19:55:05 +0300 Subject: [PATCH 241/496] BLD: use setuptools for bdist_egg distributions Not only bdist_wheel needs setuptools --- setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 36889cc42761..80ddd8ac386c 100755 --- a/setup.py +++ b/setup.py @@ -237,9 +237,9 @@ def setup_package(): FULLVERSION, GIT_REVISION = get_version_info() metadata['version'] = FULLVERSION else: - if (len(sys.argv) >= 2 and sys.argv[1] == 'bdist_wheel' or + if (len(sys.argv) >= 2 and sys.argv[1] in ('bdist_wheel', 'bdist_egg') or sys.version_info[0] < 3 and sys.platform == "win32"): - # bdist_wheel and the MS python2.7 VS sdk needs setuptools + # bdist_wheel, bdist_egg and the MS python2.7 VS sdk needs setuptools # the latter can also be triggered by (see python issue23246) # SET DISTUTILS_USE_SDK=1 # SET MSSdk=1 From 1350b46714ac8f6f04646ae637b84ef23c2ac917 Mon Sep 17 00:00:00 2001 From: Julian Taylor Date: Mon, 16 Nov 2015 23:09:57 +0100 Subject: [PATCH 242/496] ENH: use linux fallocate to reserve diskspace in array.tofile fallocate allows the filesystem to make smarter decisions about space allocation and gives a fast failure path for insufficient space. This is very important for filesystems that suffer a lot from fragmentation like btrfs. Restricted to linux only as that is the only system I know the behavior of. Other systems might also have this system call but we don't want to accidentally trigger explicit zeroing behavior as e.g. posix_fallocate would when there is no support for a real fallocate. --- numpy/core/setup_common.py | 2 +- numpy/core/src/multiarray/convert.c | 41 +++++++++++++++++++++++++++++ numpy/core/tests/test_multiarray.py | 13 +++++++++ 3 files changed, 55 insertions(+), 1 deletion(-) diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py index d93e475e3e54..e0cb3f6305df 100644 --- a/numpy/core/setup_common.py +++ b/numpy/core/setup_common.py @@ -104,7 +104,7 @@ def check_api_version(apiversion, codegen_dir): OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh", "rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow", "copysign", "nextafter", "ftello", "fseeko", - "strtoll", "strtoull", "cbrt", "strtold_l",] + "strtoll", "strtoull", "cbrt", "strtold_l", "fallocate"] OPTIONAL_HEADERS = [ diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c index 7cb27581a42e..805adec8fd71 100644 --- a/numpy/core/src/multiarray/convert.c +++ b/numpy/core/src/multiarray/convert.c @@ -2,6 +2,8 @@ #include #include "structmember.h" +#include + #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE #include "numpy/arrayobject.h" @@ -19,6 +21,42 @@ #include "convert.h" +int fallocate(int fd, int mode, off_t offset, off_t len); + +/* + * allocate nbytes of diskspace for file fp + * this allows the filesystem to make smarter allocation decisions and gives a + * fast exit on not enough free space + * returns -1 and raises exception on no space, ignores all other errors + */ +static int npy_fallocate(npy_intp nbytes, FILE * fp) +{ + /* + * unknown behavior on non-linux so don't try it + * we don't want explicit zeroing to happen + */ +#if defined(HAVE_FALLOCATE) && defined(__linux__) + int r; + /* small files not worth the system call */ + if (nbytes < 16 * 1024 * 1024) { + return 0; + } + /* btrfs can take a while to allocate making release worthwhile */ + NPY_BEGIN_ALLOW_THREADS; + r = fallocate(fileno(fp), 0, npy_ftell(fp), nbytes); + NPY_END_ALLOW_THREADS; + /* + * early exit on no space, other errors will also get found during fwrite + */ + if (r == -1 && errno == ENOSPC) { + PyErr_Format(PyExc_IOError, "Not enough free space to write " + "%"NPY_INTP_FMT" bytes", nbytes); + return -1; + } +#endif + return 0; +} + /* * Converts a subarray of 'self' into lists, with starting data pointer * 'dataptr' and from dimension 'startdim' to the last dimension of 'self'. @@ -92,6 +130,9 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format) "cannot write object arrays to a file in binary mode"); return -1; } + if (npy_fallocate(PyArray_NBYTES(self), fp) != 0) { + return -1; + } if (PyArray_ISCONTIGUOUS(self)) { size = PyArray_SIZE(self); diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 59360795411f..d03c5f54727d 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -9,6 +9,7 @@ import io import itertools import ctypes +import os if sys.version_info[0] >= 3: import builtins else: @@ -3377,6 +3378,18 @@ def fail(*args, **kwargs): y = np.fromfile(self.filename, dtype=self.dtype) assert_array_equal(y, self.x.flat) + def test_largish_file(self): + # check the fallocate path on files > 16MB + d = np.zeros(4 * 1024 ** 2) + d.tofile(self.filename) + assert_equal(os.path.getsize(self.filename), d.nbytes) + assert_array_equal(d, np.fromfile(self.filename)); + # check offset + with open(self.filename, "r+b") as f: + f.seek(d.nbytes) + d.tofile(f) + assert_equal(os.path.getsize(self.filename), d.nbytes * 2) + def test_file_position_after_fromfile(self): # gh-4118 sizes = [io.DEFAULT_BUFFER_SIZE//8, From 68355274ed556a7d6867ffb00d86fbcc2cfc1f15 Mon Sep 17 00:00:00 2001 From: Julian Taylor Date: Sat, 5 Dec 2015 16:12:49 +0100 Subject: [PATCH 243/496] DOC: add fallocate use to release notes --- doc/release/1.11.0-notes.rst | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index e7b9c57e2059..7790ac58f61d 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -82,12 +82,12 @@ Improvements ============ *np.gradient* now supports an ``axis`` argument -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ``axis`` parameter was added to *np.gradient* for consistency. It allows to specify over which axes the gradient is calculated. *np.lexsort* now supports arrays with object data-type -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The function now internally calls the generic ``npy_amergesort`` when the type does not implement a merge-sort kind of ``argsort`` method. @@ -99,6 +99,11 @@ Creating a masked array with ``mask=True`` (resp. ``mask=False``) now uses a big memory peak. Another optimization was done to avoid a memory peak and useless computations when printing a masked array. +*ndarray.tofile* now uses fallocate on linux +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The function now uses the fallocate system call to reserve sufficient +diskspace on filesystems that support it. + Changes ======= From 0757b34754aa7543576737d07ecdd9a14b7a23ca Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 17 Dec 2015 13:29:49 -0700 Subject: [PATCH 244/496] STY: Minor C style fixups for convert.c. --- numpy/core/src/multiarray/convert.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c index 805adec8fd71..5499160be51d 100644 --- a/numpy/core/src/multiarray/convert.c +++ b/numpy/core/src/multiarray/convert.c @@ -21,7 +21,8 @@ #include "convert.h" -int fallocate(int fd, int mode, off_t offset, off_t len); +int +fallocate(int fd, int mode, off_t offset, off_t len); /* * allocate nbytes of diskspace for file fp @@ -29,7 +30,8 @@ int fallocate(int fd, int mode, off_t offset, off_t len); * fast exit on not enough free space * returns -1 and raises exception on no space, ignores all other errors */ -static int npy_fallocate(npy_intp nbytes, FILE * fp) +static int +npy_fallocate(npy_intp nbytes, FILE * fp) { /* * unknown behavior on non-linux so don't try it From efc2e68c153cdfa12bf87b89f6f5ed0d2073cc51 Mon Sep 17 00:00:00 2001 From: Alex Rogozhnikov Date: Tue, 17 Nov 2015 22:24:04 +0300 Subject: [PATCH 245/496] BUG,ENH: Add extension parameter to f2py.compile and fix verbose. 1. Verbose parameter was ignored earlier. 2. Allowed .f90 extensions for tempfiles --- numpy/f2py/__init__.py | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py index 17a575927183..8a2aeab4ba3c 100644 --- a/numpy/f2py/__init__.py +++ b/numpy/f2py/__init__.py @@ -19,16 +19,30 @@ def compile(source, modulename='untitled', extra_args='', - verbose=1, - source_fn=None + verbose=True, + source_fn=None, + extension='.f' ): ''' Build extension module from processing source with f2py. - Read the source of this function for more information. + + Parameters + ---------- + source : str + Fortran source of module / subroutine to compile + modulename : str, optional + the name of compiled python module + extra_args: str, optional + additional parameters passed to f2py + verbose: bool, optional + print f2py output to screen + extension: {'.f', '.f90'}, optional + filename extension influences the fortran compiler behavior + ''' from numpy.distutils.exec_command import exec_command import tempfile if source_fn is None: - f = tempfile.NamedTemporaryFile(suffix='.f') + f = tempfile.NamedTemporaryFile(suffix=extension) else: f = open(source_fn, 'w') @@ -36,13 +50,15 @@ def compile(source, f.write(source) f.flush() - args = ' -c -m %s %s %s' % (modulename, f.name, extra_args) - c = '%s -c "import numpy.f2py as f2py2e;f2py2e.main()" %s' % \ - (sys.executable, args) - s, o = exec_command(c) + args = ' -c -m {} {} {}'.format(modulename, f.name, extra_args) + c = '{} -c "import numpy.f2py as f2py2e;f2py2e.main()" {}' + c = c.format(sys.executable, args) + status, output = exec_command(c) + if verbose: + print(output) finally: f.close() - return s + return status from numpy.testing import Tester test = Tester().test From d8967ceefe7afca7a260df3f744699d1512f6fd6 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 17 Dec 2015 14:39:29 -0700 Subject: [PATCH 246/496] DOC: Some documentation fixups. Add '.. versionadded:: 1.11.0' to the new `extension` parameter in f2py.compile and document it in the 1.11.0 release notes. --- doc/release/1.11.0-notes.rst | 31 ++++++++++++++++++++----------- numpy/f2py/__init__.py | 2 ++ 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index 7790ac58f61d..b3ddae604f27 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -39,6 +39,7 @@ DeprecationWarning to error * Non-integers used as index values raise TypeError, e.g., in reshape, take, and specifying reduce axis. + FutureWarning to changed behavior ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -62,21 +63,29 @@ mention it here for completeness. New Features ============ -* `np.histogram` now provides plugin estimators for automatically estimating the optimal -number of bins. Passing one of ['auto', 'fd', 'scott', 'rice', 'sturges'] -as the argument to 'bins' results in the corresponding estimator being used. +* `np.histogram` now provides plugin estimators for automatically + estimating the optimal number of bins. Passing one of ['auto', 'fd', + 'scott', 'rice', 'sturges'] as the argument to 'bins' results in the + corresponding estimator being used. -* A benchmark suite using `Airspeed Velocity `__ -has been added, converting the previous vbench-based one. You can run the suite locally -via ``python runtests.py --bench``. For more details, see ``benchmarks/README.rst``. +* A benchmark suite using `Airspeed Velocity + `__ has been added, converting the + previous vbench-based one. You can run the suite locally via ``python + runtests.py --bench``. For more details, see ``benchmarks/README.rst``. * A new function ``np.shares_memory`` that can check exactly whether two -arrays have memory overlap is added. ``np.may_share_memory`` also now -has an option to spend more effort to reduce false positives. + arrays have memory overlap is added. ``np.may_share_memory`` also now has + an option to spend more effort to reduce false positives. + +* ``SkipTest`` and ``KnownFailureException`` exception classes are exposed + in the ``numpy.testing`` namespace. Raise them in a test function to mark + the test to be skipped or mark it as a known failure, respectively. + +* ``f2py.compile`` has a new ``extension`` keyword parameter that allows the + fortran extension to be specified for generated temp files. For instance, + the files can be specifies to be ``*.f90``. The ``verbose`` argument is + also activated, it was previously ignored. -* ``SkipTest`` and ``KnownFailureException`` exception classes are exposed in the -``numpy.testing`` namespace. Raise them in a test function to mark the test to -be skipped or mark it as a known failure, respectively. Improvements ============ diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py index 8a2aeab4ba3c..ef92114edd63 100644 --- a/numpy/f2py/__init__.py +++ b/numpy/f2py/__init__.py @@ -38,6 +38,8 @@ def compile(source, extension: {'.f', '.f90'}, optional filename extension influences the fortran compiler behavior + .. versionadded:: 1.11.0 + ''' from numpy.distutils.exec_command import exec_command import tempfile From a6c69b0d2934523c6d37880c64bd012df8324e5b Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 17 Dec 2015 17:03:08 -0700 Subject: [PATCH 247/496] STY: Break some long lines in numpy-for-matlab-users.rst. --- doc/source/user/numpy-for-matlab-users.rst | 115 +++++++++++++++------ 1 file changed, 81 insertions(+), 34 deletions(-) diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst index 9edb588ea008..c3179b18267d 100644 --- a/doc/source/user/numpy-for-matlab-users.rst +++ b/doc/source/user/numpy-for-matlab-users.rst @@ -7,12 +7,12 @@ Numpy for Matlab users Introduction ============ -MATLAB® and NumPy/SciPy have a lot in common. But -there are many differences. NumPy and SciPy were created to do numerical -and scientific computing in the most natural way with Python, not to be -MATLAB® clones. This page is intended to be a place to collect wisdom -about the differences, mostly for the purpose of helping proficient -MATLAB® users become proficient NumPy and SciPy users. +MATLAB® and NumPy/SciPy have a lot in common. But there are many +differences. NumPy and SciPy were created to do numerical and scientific +computing in the most natural way with Python, not to be MATLAB® clones. +This page is intended to be a place to collect wisdom about the +differences, mostly for the purpose of helping proficient MATLAB® users +become proficient NumPy and SciPy users. .. raw:: html @@ -25,18 +25,39 @@ Some Key Differences .. list-table:: - * - In MATLAB®, the basic data type is a multidimensional array of double precision floating point numbers. Most expressions take such arrays and return such arrays. Operations on the 2-D instances of these arrays are designed to act more or less like matrix operations in linear algebra. - - In NumPy the basic type is a multidimensional ``array``. Operations on these arrays in all dimensionalities including 2D are elementwise operations. However, there is a special ``matrix`` type for doing linear algebra, which is just a subclass of the ``array`` class. Operations on matrix-class arrays are linear algebra operations. - - * - MATLAB® uses 1 (one) based indexing. The initial element of a sequence is found using a(1). + * - In MATLAB®, the basic data type is a multidimensional array of + double precision floating point numbers. Most expressions take such + arrays and return such arrays. Operations on the 2-D instances of + these arrays are designed to act more or less like matrix operations + in linear algebra. + - In NumPy the basic type is a multidimensional ``array``. Operations + on these arrays in all dimensionalities including 2D are elementwise + operations. However, there is a special ``matrix`` type for doing + linear algebra, which is just a subclass of the ``array`` class. + Operations on matrix-class arrays are linear algebra operations. + + * - MATLAB® uses 1 (one) based indexing. The initial element of a + sequence is found using a(1). :ref:`See note INDEXING ` - - Python uses 0 (zero) based indexing. The initial element of a sequence is found using a[0]. - - * - MATLAB®'s scripting language was created for doing linear algebra. The syntax for basic matrix operations is nice and clean, but the API for adding GUIs and making full-fledged applications is more or less an afterthought. - - NumPy is based on Python, which was designed from the outset to be an excellent general-purpose programming language. While Matlab's syntax for some array manipulations is more compact than NumPy's, NumPy (by virtue of being an add-on to Python) can do many things that Matlab just cannot, for instance subclassing the main array type to do both array and matrix math cleanly. - - * - In MATLAB®, arrays have pass-by-value semantics, with a lazy copy-on-write scheme to prevent actually creating copies until they are actually needed. Slice operations copy parts of the array. - - In NumPy arrays have pass-by-reference semantics. Slice operations are views into an array. + - Python uses 0 (zero) based indexing. The initial element of a + sequence is found using a[0]. + + * - MATLAB®'s scripting language was created for doing linear algebra. + The syntax for basic matrix operations is nice and clean, but the API + for adding GUIs and making full-fledged applications is more or less + an afterthought. + - NumPy is based on Python, which was designed from the outset to be + an excellent general-purpose programming language. While Matlab's + syntax for some array manipulations is more compact than + NumPy's, NumPy (by virtue of being an add-on to Python) can do many + things that Matlab just cannot, for instance subclassing the main + array type to do both array and matrix math cleanly. + + * - In MATLAB®, arrays have pass-by-value semantics, with a lazy + copy-on-write scheme to prevent actually creating copies until they + are actually needed. Slice operations copy parts of the array. + - In NumPy arrays have pass-by-reference semantics. Slice operations + are views into an array. 'array' or 'matrix'? Which should I use? @@ -212,30 +233,41 @@ General Purpose Equivalents * - **MATLAB** - **numpy** - **Notes** + * - ``help func`` - ``info(func)`` or ``help(func)`` or ``func?`` (in Ipython) - get help on the function *func* + * - ``which func`` - `see note HELP `__ - find out where *func* is defined + * - ``type func`` - ``source(func)`` or ``func??`` (in Ipython) - print source for *func* (if not a native function) + * - ``a && b`` - ``a and b`` - - short-circuiting logical AND operator (Python native operator); scalar arguments only + - short-circuiting logical AND operator (Python native operator); + scalar arguments only + * - ``a || b`` - ``a or b`` - - short-circuiting logical OR operator (Python native operator); scalar arguments only + - short-circuiting logical OR operator (Python native operator); + scalar arguments only + * - ``1*i``, ``1*j``, ``1i``, ``1j`` - ``1j`` - complex numbers + * - ``eps`` - ``np.spacing(1)`` - - Distance between 1 and the nearest floating point number + - Distance between 1 and the nearest floating point number. + * - ``ode45`` - ``scipy.integrate.ode(f).set_integrator('dopri5')`` - integrate an ODE with Runge-Kutta 4,5 + * - ``ode15s`` - ``scipy.integrate.ode(f).set_integrator('vode', method='bdf', order=5)`` - integrate an ODE with BDF method @@ -299,15 +331,18 @@ Linear Algebra Equivalents * - ``a(1:3,5:9)`` - ``a[0:3][:,4:9]`` - - rows one to three and columns five to nine of ``a``. This gives read-only access. + - rows one to three and columns five to nine of ``a``. This gives + read-only access. * - ``a([2,4,5],[1,3])`` - ``a[ix_([1,3,4],[0,2])]`` - - rows 2,4 and 5 and columns 1 and 3. This allows the matrix to be modified, and doesn't require a regular slice. + - rows 2,4 and 5 and columns 1 and 3. This allows the matrix to be + modified, and doesn't require a regular slice. * - ``a(3:2:21,:)`` - ``a[ 2:21:2,:]`` - - every other row of ``a``, starting with the third and going to the twenty-first + - every other row of ``a``, starting with the third and going to the + twenty-first * - ``a(1:2:end,:)`` - ``a[ ::2,:]`` @@ -347,8 +382,8 @@ Linear Algebra Equivalents * - ``(a>0.5)`` - ``(a>0.5)`` - - matrix whose i,jth element is (a_ij > 0.5). The Matlab result is - an array of 0s and 1s. The NumPy result is an array of the boolean + - matrix whose i,jth element is (a_ij > 0.5). The Matlab result is an + array of 0s and 1s. The NumPy result is an array of the boolean values ``False`` and ``True``. * - ``find(a>0.5)`` @@ -389,11 +424,13 @@ Linear Algebra Equivalents * - ``1:10`` - ``arange(1.,11.)`` or ``r_[1.:11.]`` or ``r_[1:10:10j]`` - - create an increasing vector (see note :ref:`RANGES `) + - create an increasing vector (see note :ref:`RANGES + `) * - ``0:9`` - ``arange(10.)`` or ``r_[:10.]`` or ``r_[:9:10j]`` - - create an increasing vector (see note :ref:`RANGES `) + - create an increasing vector (see note :ref:`RANGES + `) * - ``[1:10]'`` - ``arange(1.,11.)[:, newaxis]`` @@ -421,7 +458,8 @@ Linear Algebra Equivalents * - ``diag(a,0)`` - ``diag(a,0)`` - - square diagonal matrix whose nonzero values are the elements of ``a`` + - square diagonal matrix whose nonzero values are the elements of + ``a`` * - ``rand(3,4)`` - ``random.rand(3,4)`` @@ -452,7 +490,8 @@ Linear Algebra Equivalents - create m by n copies of ``a`` * - ``[a b]`` - - ``concatenate((a,b),1)`` or ``hstack((a,b))`` or ``column_stack((a,b))`` or ``c_[a,b]`` + - ``concatenate((a,b),1)`` or ``hstack((a,b))`` or + ``column_stack((a,b))`` or ``c_[a,b]`` - concatenate columns of ``a`` and ``b`` * - ``[a; b]`` @@ -473,7 +512,8 @@ Linear Algebra Equivalents * - ``max(a,b)`` - ``maximum(a, b)`` - - compares ``a`` and ``b`` element-wise, and returns the maximum value from each pair + - compares ``a`` and ``b`` element-wise, and returns the maximum value + from each pair * - ``norm(v)`` - ``sqrt(dot(v,v))`` or ``np.linalg.norm(v)`` @@ -481,11 +521,13 @@ Linear Algebra Equivalents * - ``a & b`` - ``logical_and(a,b)`` - - element-by-element AND operator (Numpy ufunc) :ref:`See note LOGICOPS ` + - element-by-element AND operator (Numpy ufunc) :ref:`See note + LOGICOPS ` * - ``a | b`` - ``logical_or(a,b)`` - - element-by-element OR operator (Numpy ufunc) :ref:`See note LOGICOPS ` + - element-by-element OR operator (Numpy ufunc) :ref:`See note LOGICOPS + ` * - ``bitand(a,b)`` - ``a & b`` @@ -508,7 +550,8 @@ Linear Algebra Equivalents - matrix rank of a 2D array / matrix ``a`` * - ``a\b`` - - ``linalg.solve(a,b)`` if ``a`` is square; ``linalg.lstsq(a,b)`` otherwise + - ``linalg.solve(a,b)`` if ``a`` is square; ``linalg.lstsq(a,b)`` + otherwise - solution of a x = b for x * - ``b/a`` @@ -521,7 +564,9 @@ Linear Algebra Equivalents * - ``chol(a)`` - ``linalg.cholesky(a).T`` - - cholesky factorization of a matrix (``chol(a)`` in matlab returns an upper triangular matrix, but ``linalg.cholesky(a)`` returns a lower triangular matrix) + - cholesky factorization of a matrix (``chol(a)`` in matlab returns an + upper triangular matrix, but ``linalg.cholesky(a)`` returns a lower + triangular matrix) * - ``[V,D]=eig(a)`` - ``D,V = linalg.eig(a)`` @@ -624,6 +669,7 @@ Numpy's & and \| operators are: inputs. Matlab treats any non-zero value as 1 and returns the logical AND. For example (3 & 4) in Numpy is 0, while in Matlab both 3 and 4 are considered logical true and (3 & 4) returns 1. + - Precedence: Numpy's & operator is higher precedence than logical operators like < and >; Matlab's is the reverse. @@ -659,6 +705,7 @@ NumPy, or rather Python, has similar facilities. - To modify your Python search path to include the locations of your own modules, define the ``PYTHONPATH`` environment variable. + - To have a particular script file executed when the interactive Python interpreter is started, define the ``PYTHONSTARTUP`` environment variable to contain the name of your startup script. From 088e20e272389395fb3fd24fed144ed19bae8cdb Mon Sep 17 00:00:00 2001 From: gfyoung Date: Fri, 11 Dec 2015 04:24:16 +0000 Subject: [PATCH 248/496] DEP: Stricter arg checking for array ordering The bug traces to the PyArray_OrderConverter method in conversion_utils.c, where no errors are thrown if the ORDER parameter passed in is not of the string data-type or has a string value of length greater than one. This commit causes a DeprecationWarning to be raised, which will later be turned into a TypeError or another type of error in a future release. Closes gh-6598. --- doc/release/1.11.0-notes.rst | 8 ++++++ numpy/add_newdocs.py | 12 +++++--- numpy/core/src/multiarray/conversion_utils.c | 21 ++++++++++++++ numpy/core/tests/test_deprecations.py | 30 ++++++++++++++++++++ numpy/lib/function_base.py | 10 ++++--- numpy/matrixlib/defmatrix.py | 14 +++++---- 6 files changed, 81 insertions(+), 14 deletions(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index b3ddae604f27..7c078eed972f 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -133,3 +133,11 @@ c_contiguous arrays at some future time. A work around that is backward compatible is to use `a.T.view(...).T` instead. A parameter will also be added to the view method to explicitly ask for Fortran order views, but that will not be backward compatible. + +Invalid arguments for array ordering +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +It is currently possible to pass in arguments for the ```order``` +parameter in methods like ```array.flatten``` or ```array.ravel``` +that were not one of the following: 'C', 'F', 'A', 'K' (note that +all of these possible values are unicode- and case-insensitive). +Such behaviour will not be allowed in future releases. diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py index c140360891e5..01ef24a5bf16 100644 --- a/numpy/add_newdocs.py +++ b/numpy/add_newdocs.py @@ -3567,10 +3567,14 @@ def luf(lamdaexpr, *args, **kwargs): Parameters ---------- - order : {'C', 'F', 'A'}, optional - Whether to flatten in row-major (C-style) or - column-major (Fortran-style) order or preserve the - C/Fortran ordering from `a`. The default is 'C'. + order : {'C', 'F', 'A', 'K'}, optional + 'C' means to flatten in row-major (C-style) order. + 'F' means to flatten in column-major (Fortran- + style) order. 'A' means to flatten in column-major + order if `a` is Fortran *contiguous* in memory, + row-major order otherwise. 'K' means to flatten + `a` in the order the elements occur in memory. + The default is 'C'. Returns ------- diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c index 88064c1d6e2e..d7a61787591c 100644 --- a/numpy/core/src/multiarray/conversion_utils.c +++ b/numpy/core/src/multiarray/conversion_utils.c @@ -540,6 +540,15 @@ PyArray_OrderConverter(PyObject *object, NPY_ORDER *val) return ret; } else if (!PyBytes_Check(object) || PyBytes_GET_SIZE(object) < 1) { + /* 2015-12-14, 1.11 */ + int ret = DEPRECATE("Non-string object detected for " + "the array ordering. Please pass " + "in 'C', 'F', 'A', or 'K' instead"); + + if (ret < 0) { + return -1; + } + if (PyObject_IsTrue(object)) { *val = NPY_FORTRANORDER; } @@ -553,6 +562,18 @@ PyArray_OrderConverter(PyObject *object, NPY_ORDER *val) } else { str = PyBytes_AS_STRING(object); + if (strlen(str) != 1) { + /* 2015-12-14, 1.11 */ + int ret = DEPRECATE("Non length-one string passed " + "in for the array ordering. " + "Please pass in 'C', 'F', 'A', " + "or 'K' instead"); + + if (ret < 0) { + return -1; + } + } + if (str[0] == 'C' || str[0] == 'c') { *val = NPY_CORDER; } diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index f6dc3d842219..65ddc1e77387 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -400,6 +400,36 @@ def test_fortran_contiguous(self): self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,)) +class TestInvalidOrderParameterInputForFlattenArrayDeprecation(_DeprecationTestCase): + """Invalid arguments to the ORDER parameter in array.flatten() should not be + allowed and should raise an error. However, in the interests of not breaking + code that may inadvertently pass invalid arguments to this parameter, a + DeprecationWarning will be issued instead for the time being to give developers + time to refactor relevant code. + """ + + def test_flatten_array_non_string_arg(self): + x = np.zeros((3, 5)) + self.message = ("Non-string object detected for " + "the array ordering. Please pass " + "in 'C', 'F', 'A', or 'K' instead") + self.assert_deprecated(x.flatten, args=(np.pi,)) + + def test_flatten_array_invalid_string_arg(self): + # Tests that a DeprecationWarning is raised + # when a string of length greater than one + # starting with "C", "F", "A", or "K" (case- + # and unicode-insensitive) is passed in for + # the ORDER parameter. Otherwise, a TypeError + # will be raised! + + x = np.zeros((3, 5)) + self.message = ("Non length-one string passed " + "in for the array ordering. Please " + "pass in 'C', 'F', 'A', or 'K' instead") + self.assert_deprecated(x.flatten, args=("FACK",)) + + class TestTestDeprecated(object): def test_assert_deprecated(self): test_case_instance = _DeprecationTestCase() diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 3298789eeb6e..541ad079cae4 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -3966,6 +3966,7 @@ def delete(arr, obj, axis=None): arr = asarray(arr) ndim = arr.ndim + arrorder = 'F' if arr.flags.fnc else 'C' if axis is None: if ndim != 1: arr = arr.ravel() @@ -4003,7 +4004,7 @@ def delete(arr, obj, axis=None): stop = xr[0] + 1 newshape[axis] -= numtodel - new = empty(newshape, arr.dtype, arr.flags.fnc) + new = empty(newshape, arr.dtype, arrorder) # copy initial chunk if start == 0: pass @@ -4054,7 +4055,7 @@ def delete(arr, obj, axis=None): if (obj < 0): obj += N newshape[axis] -= 1 - new = empty(newshape, arr.dtype, arr.flags.fnc) + new = empty(newshape, arr.dtype, arrorder) slobj[axis] = slice(None, obj) new[slobj] = arr[slobj] slobj[axis] = slice(obj, None) @@ -4197,6 +4198,7 @@ def insert(arr, obj, values, axis=None): arr = asarray(arr) ndim = arr.ndim + arrorder = 'F' if arr.flags.fnc else 'C' if axis is None: if ndim != 1: arr = arr.ravel() @@ -4265,7 +4267,7 @@ def insert(arr, obj, values, axis=None): values = np.rollaxis(values, 0, (axis % values.ndim) + 1) numnew = values.shape[axis] newshape[axis] += numnew - new = empty(newshape, arr.dtype, arr.flags.fnc) + new = empty(newshape, arr.dtype, arrorder) slobj[axis] = slice(None, index) new[slobj] = arr[slobj] slobj[axis] = slice(index, index+numnew) @@ -4298,7 +4300,7 @@ def insert(arr, obj, values, axis=None): old_mask = ones(newshape[axis], dtype=bool) old_mask[indices] = False - new = empty(newshape, arr.dtype, arr.flags.fnc) + new = empty(newshape, arr.dtype, arrorder) slobj2 = [slice(None)]*ndim slobj[axis] = indices slobj2[axis] = old_mask diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index ffd4578ba142..170db87c8d51 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -277,9 +277,9 @@ def __new__(subtype, data, dtype=None, copy=True): elif ndim == 1: shape = (1, shape[0]) - order = False + order = 'C' if (ndim == 2) and arr.flags.fortran: - order = True + order = 'F' if not (order or arr.flags.contiguous): arr = arr.copy() @@ -519,10 +519,12 @@ def flatten(self, order='C'): Parameters ---------- - order : {'C', 'F', 'A'}, optional - Whether to flatten in C (row-major), Fortran (column-major) order, - or preserve the C/Fortran ordering from `m`. - The default is 'C'. + order : {'C', 'F', 'A', 'K'}, optional + 'C' means to flatten in row-major (C-style) order. 'F' means to + flatten in column-major (Fortran-style) order. 'A' means to + flatten in column-major order if `m` is Fortran *contiguous* in + memory, row-major order otherwise. 'K' means to flatten `m` in + the order the elements occur in memory. The default is 'C'. Returns ------- From 4626b59a490bb5f35830a7b6bb74853a9c14aa63 Mon Sep 17 00:00:00 2001 From: Nathaniel Hellabyte Date: Thu, 19 Dec 2013 17:19:52 -0700 Subject: [PATCH 249/496] DOC: Update docstrings of np.sum and np.prod. * Improved language of sum doc for axis subheading. Added clarification of important axis values--{0,1}->iterative {row,column} sum. * Improved language of product axis subheading. --- numpy/core/fromnumeric.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 197513294d01..c95e00a41100 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -1740,9 +1740,11 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=False): Elements to sum. axis : None or int or tuple of ints, optional Axis or axes along which a sum is performed. - The default (`axis` = `None`) is perform a sum over all - the dimensions of the input array. `axis` may be negative, in + The default (`axis` = `None`) will sum all of the elements + of the input array. `axis` may be negative, in which case it counts from the last to the first axis. + `axis` = 0 or 1 will sum over the elements of the + columns or rows of the input array. .. versionadded:: 1.7.0 @@ -2393,8 +2395,8 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=False): Input data. axis : None or int or tuple of ints, optional Axis or axes along which a product is performed. - The default (`axis` = `None`) is perform a product over all - the dimensions of the input array. `axis` may be negative, in + The default (`axis` = `None`) will calculate the product + of all the elements in the input array. `axis` may be negative, in which case it counts from the last to the first axis. .. versionadded:: 1.7.0 From 5a5329d5d03d3e4ee5dc201ce0f64f9d3f821e78 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 18 Dec 2015 11:06:41 -0700 Subject: [PATCH 250/496] DOC: Clarify docstrings of np.sum and np.prod. --- numpy/core/fromnumeric.py | 75 +++++++++++++++++++-------------------- 1 file changed, 37 insertions(+), 38 deletions(-) diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index c95e00a41100..91ce077b8007 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -1739,33 +1739,30 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=False): a : array_like Elements to sum. axis : None or int or tuple of ints, optional - Axis or axes along which a sum is performed. - The default (`axis` = `None`) will sum all of the elements - of the input array. `axis` may be negative, in - which case it counts from the last to the first axis. - `axis` = 0 or 1 will sum over the elements of the - columns or rows of the input array. + Axis or axes along which a sum is performed. The default, + axis=None, will sum all of the elements of the input array. If + axis is negative it counts from the last to the first axis. .. versionadded:: 1.7.0 - If this is a tuple of ints, a sum is performed on multiple - axes, instead of a single axis or all the axes as before. + If axis is a tuple of ints, a sum is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. dtype : dtype, optional - The type of the returned array and of the accumulator in which - the elements are summed. By default, the dtype of `a` is used. - An exception is when `a` has an integer type with less precision - than the default platform integer. In that case, the default - platform integer is used instead. + The type of the returned array and of the accumulator in which the + elements are summed. The dtype of `a` is used by default unless `a` + has an integer dtype of less precision than the default platform + integer. In that case, if `a` is signed then the platform integer + is used while if `a` is unsigned then an unsigned integer of the + same precision as the platform integer is used. out : ndarray, optional - Array into which the output is placed. By default, a new array is - created. If `out` is given, it must be of the appropriate shape - (the shape of `a` with `axis` removed, i.e., - ``numpy.delete(a.shape, axis)``). Its type is preserved. See - `doc.ufuncs` (Section "Output arguments") for more details. + Alternative output array in which to place the result. It must have + the same shape as the expected output, but the type of the output + values will be cast if necessary. keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. + If this is set to True, the axes which are reduced are left in the + result as dimensions with size one. With this option, the result + will broadcast correctly against the input array. Returns ------- @@ -2394,29 +2391,31 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=False): a : array_like Input data. axis : None or int or tuple of ints, optional - Axis or axes along which a product is performed. - The default (`axis` = `None`) will calculate the product - of all the elements in the input array. `axis` may be negative, in - which case it counts from the last to the first axis. + Axis or axes along which a product is performed. The default, + axis=None, will calculate the product of all the elements in the + input array. If axis is negative it counts from the last to the + first axis. .. versionadded:: 1.7.0 - If this is a tuple of ints, a product is performed on multiple - axes, instead of a single axis or all the axes as before. - dtype : data-type, optional - The data-type of the returned array, as well as of the accumulator - in which the elements are multiplied. By default, if `a` is of - integer type, `dtype` is the default platform integer. (Note: if - the type of `a` is unsigned, then so is `dtype`.) Otherwise, - the dtype is the same as that of `a`. + If axis is a tuple of ints, a product is performed on all of the + axes specified in the tuple instead of a single axis or all the + axes as before. + dtype : dtype, optional + The type of the returned array, as well as of the accumulator in + which the elements are multiplied. The dtype of `a` is used by + default unless `a` has an integer dtype of less precision than the + default platform integer. In that case, if `a` is signed then the + platform integer is used while if `a` is unsigned then an unsigned + integer of the same precision as the platform integer is used. out : ndarray, optional Alternative output array in which to place the result. It must have - the same shape as the expected output, but the type of the - output values will be cast if necessary. + the same shape as the expected output, but the type of the output + values will be cast if necessary. keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. + If this is set to True, the axes which are reduced are left in the + result as dimensions with size one. With this option, the result + will broadcast correctly against the input array. Returns ------- From 42ba464096da39bc267fd8cdfbcc4089c90f6686 Mon Sep 17 00:00:00 2001 From: jason king Date: Mon, 5 Oct 2015 17:05:20 +1100 Subject: [PATCH 251/496] DOC, MAINT: Fix the numpy.ma.cov signature and documentation. The rowvar and bias parameters are booleans, not integers. --- numpy/lib/function_base.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 541ad079cae4..8335b4fdb9a3 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -2097,7 +2097,8 @@ def _vectorize_call(self, func, args): return _res -def cov(m, y=None, rowvar=1, bias=0, ddof=None, fweights=None, aweights=None): +def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, + aweights=None): """ Estimate a covariance matrix, given data and weights. @@ -2118,14 +2119,14 @@ def cov(m, y=None, rowvar=1, bias=0, ddof=None, fweights=None, aweights=None): y : array_like, optional An additional set of variables and observations. `y` has the same form as that of `m`. - rowvar : int, optional - If `rowvar` is non-zero (default), then each row represents a + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. - bias : int, optional - Default normalization is by ``(N - 1)``, where ``N`` corresponds to the - number of observations given (unbiased estimate). If `bias` is 1, then + bias : bool, optional + Default normalization (False) is by ``(N - 1)``, where ``N`` is the + number of observations given (unbiased estimate). If `bias` is True, then normalization is by ``N``. These values can be overridden by using the keyword ``ddof`` in numpy versions >= 1.5. ddof : int, optional From abb80f86435c368451729011efbde3544ca59baf Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Mon, 2 Nov 2015 12:38:52 +0000 Subject: [PATCH 252/496] DOC: typo in the docstring of random.multinomial Discuss a loaded dice with six sides. Also add the text about handling of input probabilities, as written by Robert Kern in gh-6612. [ci skip] --- numpy/random/mtrand/mtrand.pyx | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index d6ba58bb2b0d..a2cee28b8b59 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -4461,8 +4461,21 @@ cdef class RandomState: A loaded dice is more likely to land on number 6: - >>> np.random.multinomial(100, [1/7.]*5) - array([13, 16, 13, 16, 42]) + >>> np.random.multinomial(100, [1/7.]*5 + [2/7.]) + array([11, 16, 14, 17, 16, 26]) + + The probability inputs should already be normalized. The value of the + last entry is always ignored and assumed to take up any leftover + probability mass. To sample a biased coin which has twice as much + weight on one side than the other should *not* be sampled like so: + + >>> np.random.multinomial(100, [1.0, 2.0]) # WRONG + array([100, 0]) + + but rather, like so: + + >>> np.random.multinomial(100, [1.0 / 3, 2.0 / 3]) # RIGHT + array([38, 62]) """ cdef npy_intp d From 7fa53390da958cc985bfaeb1620990ddd2255ce8 Mon Sep 17 00:00:00 2001 From: Phaiax Date: Wed, 9 Dec 2015 13:08:54 +0100 Subject: [PATCH 253/496] Fix #6798 --- numpy/fft/fftpack.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/numpy/fft/fftpack.py b/numpy/fft/fftpack.py index 4ad4f680261f..398eec45e5ad 100644 --- a/numpy/fft/fftpack.py +++ b/numpy/fft/fftpack.py @@ -204,9 +204,11 @@ def ifft(a, n=None, axis=-1, norm=None): The input should be ordered in the same way as is returned by `fft`, i.e., ``a[0]`` should contain the zero frequency term, - ``a[1:n/2+1]`` should contain the positive-frequency terms, and + ``a[1:n/2]`` should contain the positive-frequency terms, and ``a[n/2+1:]`` should contain the negative-frequency terms, in order of - decreasingly negative frequency. See `numpy.fft` for details. + decreasingly negative frequency. For an even number of input points, + ``A[n/2]`` represents both positive and negative Nyquist frequency. + See `numpy.fft` for details. Parameters ---------- From 44293bb2834f2a4495dacee4ba112a3bfeef5b0c Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 18 Dec 2015 13:10:34 -0700 Subject: [PATCH 254/496] DOC: Clarify documentation for np.fft.ifft. The relationship between frequency and position in the input array is clarified. --- numpy/fft/fftpack.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/numpy/fft/fftpack.py b/numpy/fft/fftpack.py index 398eec45e5ad..c3bb732b2615 100644 --- a/numpy/fft/fftpack.py +++ b/numpy/fft/fftpack.py @@ -203,12 +203,16 @@ def ifft(a, n=None, axis=-1, norm=None): see `numpy.fft`. The input should be ordered in the same way as is returned by `fft`, - i.e., ``a[0]`` should contain the zero frequency term, - ``a[1:n/2]`` should contain the positive-frequency terms, and - ``a[n/2+1:]`` should contain the negative-frequency terms, in order of - decreasingly negative frequency. For an even number of input points, - ``A[n/2]`` represents both positive and negative Nyquist frequency. - See `numpy.fft` for details. + i.e., + + * ``a[0]`` should contain the zero frequency term, + * ``a[1:n//2]`` should contain the positive-frequency terms, + * ``a[n//2 + 1:]`` should contain the negative-frequency terms, in + increasing order starting from the most negative frequency. + + For an even number of input points, ``A[n//2]`` represents the sum of + the values at the positive and negative Nyquist frequencies, as the two + are aliased together. See `numpy.fft` for details. Parameters ---------- @@ -265,9 +269,9 @@ def ifft(a, n=None, axis=-1, norm=None): >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,))) >>> s = np.fft.ifft(n) >>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--') - [, ] + ... >>> plt.legend(('real', 'imaginary')) - + ... >>> plt.show() """ From 722b406f9291116c75207101165cefa58a83ed4d Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 18 Dec 2015 12:19:30 -0700 Subject: [PATCH 255/496] DOC: Update example in np.random.multinomial. Clarify that probabilities should be normalized. [ci skip] --- numpy/random/mtrand/mtrand.pyx | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index a2cee28b8b59..e12c7669d13c 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -4459,24 +4459,25 @@ cdef class RandomState: For the first run, we threw 3 times 1, 4 times 2, etc. For the second, we threw 2 times 1, 4 times 2, etc. - A loaded dice is more likely to land on number 6: + A loaded die is more likely to land on number 6: >>> np.random.multinomial(100, [1/7.]*5 + [2/7.]) array([11, 16, 14, 17, 16, 26]) - The probability inputs should already be normalized. The value of the - last entry is always ignored and assumed to take up any leftover - probability mass. To sample a biased coin which has twice as much - weight on one side than the other should *not* be sampled like so: - - >>> np.random.multinomial(100, [1.0, 2.0]) # WRONG - array([100, 0]) - - but rather, like so: + The probability inputs should be normalized. As an implementation + detail, the value of the last entry is ignored and assumed to take + up any leftover probability mass, but this should not be relied on. + A biased coin which has twice as much weight on one side as on the + other should be sampled like so: >>> np.random.multinomial(100, [1.0 / 3, 2.0 / 3]) # RIGHT array([38, 62]) + not like: + + >>> np.random.multinomial(100, [1.0, 2.0]) # WRONG + array([100, 0]) + """ cdef npy_intp d cdef ndarray parr "arrayObject_parr", mnarr "arrayObject_mnarr" From 73ba0080600ab81361df2e83b1d6db2a33ae8bcd Mon Sep 17 00:00:00 2001 From: Ian Henriksen Date: Fri, 18 Dec 2015 21:48:56 -0700 Subject: [PATCH 256/496] TST: Add initial appveyor configuration with no optimized BLAS. --- appveyor.yml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 appveyor.yml diff --git a/appveyor.yml b/appveyor.yml new file mode 100644 index 000000000000..026db34ea75c --- /dev/null +++ b/appveyor.yml @@ -0,0 +1,28 @@ +skip_tags: true +clone_depth: 1 + +os: Visual Studio 2015 + +environment: + PYTHON_ARCH: "x86_64" + matrix: + - PY_MAJOR_VER: 2 + - PY_MAJOR_VER: 3 + +matrix: + #fast_finish: true + allow_failures: + - PY_MAJOR_VER: 2 + - PY_MAJOR_VER: 3 + +build_script: + - ps: Start-FileDownload "https://repo.continuum.io/miniconda/Miniconda$env:PY_MAJOR_VER-latest-Windows-$env:PYTHON_ARCH.exe" C:\Miniconda.exe; echo "Finished downloading miniconda" + - cmd: C:\Miniconda.exe /S /D=C:\Py + - SET PATH=C:\Py;C:\Py\Scripts;C:\Py\Library\bin;%PATH% + - conda config --set always_yes yes + - conda update conda + - conda install cython nose + - pip install . -vvv + +test_script: + - python runtests.py -v -n From 443184b12513ce2a8adcc2a81c143bc4bc697219 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 19 Dec 2015 15:01:55 -0700 Subject: [PATCH 257/496] ENH: Add context manager `temppath` to manage a temporary file. Context manager intended for use when the same temporary file needs to be opened and closed more than once. The context manager creates the file, closes it, and returns the path to the file. On exit from the context block the file is removed. The file should be closed before exiting the context as an error will be raised on windows if not. Also fix up the `tempdir` context manager to deal with exceptions. Tests are added for both `temppath` and `tempdir`. --- numpy/testing/tests/test_utils.py | 33 ++++++++++++++++++++++++++++++- numpy/testing/utils.py | 31 +++++++++++++++++++++++++---- 2 files changed, 59 insertions(+), 5 deletions(-) diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 13aeffe02877..b558270b3b14 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -2,6 +2,7 @@ import warnings import sys +import os import numpy as np from numpy.testing import ( @@ -10,7 +11,7 @@ assert_warns, assert_no_warnings, assert_allclose, assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp, clear_and_catch_warnings, run_module_suite, - assert_string_equal + assert_string_equal, assert_, tempdir, temppath, ) import unittest @@ -780,6 +781,36 @@ def test_clear_and_catch_warnings(): assert_warn_len_equal(my_mod, 2) +def test_tempdir(): + with tempdir() as tdir: + fpath = os.path.join(tdir, 'tmp') + with open(fpath, 'w'): + pass + assert_(not os.path.isdir(tdir)) + + try: + with tempdir() as tdir: + raise ValueError() + except ValueError: + pass + assert_(not os.path.isdir(tdir)) + + + +def test_temppath(): + with temppath() as fpath: + with open(fpath, 'w') as f: + pass + assert_(not os.path.isfile(fpath)) + + try: + with temppath() as fpath: + raise ValueError() + except ValueError: + pass + assert_(not os.path.isfile(fpath)) + + class my_cacw(clear_and_catch_warnings): class_modules = (sys.modules[__name__],) diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py index 00f7ce4d1976..e85e2f95fc58 100644 --- a/numpy/testing/utils.py +++ b/numpy/testing/utils.py @@ -12,7 +12,7 @@ from functools import partial import shutil import contextlib -from tempfile import mkdtemp +from tempfile import mkdtemp, mkstemp from .nosetester import import_nose from numpy.core import float32, empty, arange, array_repr, ndarray @@ -30,7 +30,7 @@ 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex', 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings', 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings', - 'SkipTest', 'KnownFailureException'] + 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir'] class KnownFailureException(Exception): @@ -1810,8 +1810,31 @@ def tempdir(*args, **kwargs): """ tmpdir = mkdtemp(*args, **kwargs) - yield tmpdir - shutil.rmtree(tmpdir) + try: + yield tmpdir + finally: + shutil.rmtree(tmpdir) + +@contextlib.contextmanager +def temppath(*args, **kwargs): + """Context manager for temporary files. + + Context manager that returns the path to a closed temporary file. Its + parameters are the same as for tempfile.mkstemp and are passed directly + to that function. The underlying file is removed when the context is + exited, so it should be closed at that time. + + Windows does not allow a temporary file to be opened if it is already + open, so the underlying file must be closed after opening before it + can be opened again. + + """ + fd, path = mkstemp(*args, **kwargs) + os.close(fd) + try: + yield path + finally: + os.remove(path) class clear_and_catch_warnings(warnings.catch_warnings): From 8bc592fabf4a2b0bc76db996b1523330ba095be3 Mon Sep 17 00:00:00 2001 From: gfyoung Date: Sat, 19 Dec 2015 16:49:35 -0800 Subject: [PATCH 258/496] DOC: Use print only as function when print_function is imported from __future__ Closes gh-6863. --- numpy/add_newdocs.py | 18 +++---- numpy/core/arrayprint.py | 8 +-- numpy/core/fromnumeric.py | 10 ++-- numpy/core/numeric.py | 8 +-- numpy/core/numerictypes.py | 2 +- numpy/core/records.py | 8 +-- numpy/core/shape_base.py | 2 +- numpy/distutils/npy_pkg_config.py | 2 +- numpy/distutils/system_info.py | 2 +- numpy/doc/glossary.py | 6 +-- numpy/doc/misc.py | 2 +- numpy/doc/subclassing.py | 36 ++++++------- numpy/f2py/auxfuncs.py | 2 +- numpy/lib/arrayterator.py | 4 +- numpy/lib/financial.py | 4 +- numpy/lib/function_base.py | 8 +-- numpy/lib/index_tricks.py | 4 +- numpy/lib/polynomial.py | 16 +++--- numpy/lib/tests/test_format.py | 2 +- numpy/lib/twodim_base.py | 2 +- numpy/lib/type_check.py | 2 +- numpy/linalg/lapack_lite/clapack_scrub.py | 4 +- numpy/linalg/linalg.py | 2 +- numpy/ma/core.py | 62 +++++++++++------------ numpy/ma/extras.py | 14 ++--- numpy/ma/tests/test_old_ma.py | 6 +-- numpy/matrixlib/defmatrix.py | 2 +- numpy/testing/decorators.py | 2 +- numpy/testing/noseclasses.py | 22 ++++---- numpy/testing/utils.py | 2 +- tools/swig/test/testFortran.py | 2 +- tools/win32build/misc/x86analysis.py | 6 +-- 32 files changed, 136 insertions(+), 136 deletions(-) diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py index 01ef24a5bf16..7eef07c4a2f0 100644 --- a/numpy/add_newdocs.py +++ b/numpy/add_newdocs.py @@ -49,7 +49,7 @@ >>> type(fl) >>> for item in fl: - ... print item + ... print(item) ... 0 1 @@ -1548,7 +1548,7 @@ def luf(lamdaexpr, *args, **kwargs): >>> a = [1,5,1,4,3,4,4] # First column >>> b = [9,4,0,4,0,2,1] # Second column >>> ind = np.lexsort((b,a)) # Sort by a, then by b - >>> print ind + >>> print(ind) [2 0 4 6 5 3 1] >>> [(a[i],b[i]) for i in ind] @@ -4773,7 +4773,7 @@ def luf(lamdaexpr, *args, **kwargs): >>> y = x.view(dtype=np.int16, type=np.matrix) >>> y matrix([[513]], dtype=int16) - >>> print type(y) + >>> print(type(y)) Creating a view on a structured array so it can be used in calculations @@ -4789,7 +4789,7 @@ def luf(lamdaexpr, *args, **kwargs): Making changes to the view changes the underlying array >>> xv[0,1] = 20 - >>> print x + >>> print(x) [(1, 20) (3, 4)] Using a view to convert an array to a recarray: @@ -4915,7 +4915,7 @@ def luf(lamdaexpr, *args, **kwargs): [10000, 0, None] >>> def err_handler(type, flag): - ... print "Floating point error (%s), with flag %s" % (type, flag) + ... print("Floating point error (%s), with flag %s" % (type, flag)) ... >>> old_bufsize = np.setbufsize(20000) >>> old_err = np.seterr(divide='raise') @@ -4979,7 +4979,7 @@ def luf(lamdaexpr, *args, **kwargs): [10000, 0, None] >>> def err_handler(type, flag): - ... print "Floating point error (%s), with flag %s" % (type, flag) + ... print("Floating point error (%s), with flag %s" % (type, flag)) ... >>> new_errobj = [20000, 12, err_handler] >>> np.seterrobj(new_errobj) @@ -5064,7 +5064,7 @@ def luf(lamdaexpr, *args, **kwargs): >>> inds array([1, 4, 3, 2]) >>> for n in range(x.size): - ... print bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]] + ... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]]) ... 0.0 <= 0.2 < 1.0 4.0 <= 6.4 < 10.0 @@ -5473,7 +5473,7 @@ def luf(lamdaexpr, *args, **kwargs): 1 >>> np.power.identity 1 - >>> print np.exp.identity + >>> print(np.exp.identity) None """)) @@ -6181,7 +6181,7 @@ def luf(lamdaexpr, *args, **kwargs): Examples -------- >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) - >>> print dt.fields + >>> print(dt.fields) {'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)} """)) diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index a28b5a89e7b7..fefcb649393d 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -114,13 +114,13 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, Floating point precision can be set: >>> np.set_printoptions(precision=4) - >>> print np.array([1.123456789]) + >>> print(np.array([1.123456789])) [ 1.1235] Long arrays can be summarised: >>> np.set_printoptions(threshold=5) - >>> print np.arange(10) + >>> print(np.arange(10)) [0 1 2 ..., 7 8 9] Small results can be suppressed: @@ -420,8 +420,8 @@ def array2string(a, max_line_width=None, precision=None, Examples -------- >>> x = np.array([1e-16,1,2,3]) - >>> print np.array2string(x, precision=2, separator=',', - ... suppress_small=True) + >>> print(np.array2string(x, precision=2, separator=',', + ... suppress_small=True)) [ 0., 1., 2., 3.] >>> x = np.arange(3.) diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 197513294d01..7d2078adf63d 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -1434,20 +1434,20 @@ def ravel(a, order='C'): It is equivalent to ``reshape(-1, order=order)``. >>> x = np.array([[1, 2, 3], [4, 5, 6]]) - >>> print np.ravel(x) + >>> print(np.ravel(x)) [1 2 3 4 5 6] - >>> print x.reshape(-1) + >>> print(x.reshape(-1)) [1 2 3 4 5 6] - >>> print np.ravel(x, order='F') + >>> print(np.ravel(x, order='F')) [1 4 2 5 3 6] When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering: - >>> print np.ravel(x.T) + >>> print(np.ravel(x.T)) [1 4 2 5 3 6] - >>> print np.ravel(x.T, order='A') + >>> print(np.ravel(x.T, order='A')) [1 2 3 4 5 6] When ``order`` is 'K', it will preserve orderings that are neither 'C' diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index 3b442ea7822c..4f3d418e61d8 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -1808,7 +1808,7 @@ def set_string_function(f, repr=True): >>> a = np.arange(10) >>> a HA! - What are you going to do now? - >>> print a + >>> print(a) [0 1 2 3 4 5 6 7 8 9] We can reset the function to the default: @@ -2710,7 +2710,7 @@ def seterrcall(func): Callback upon error: >>> def err_handler(type, flag): - ... print "Floating point error (%s), with flag %s" % (type, flag) + ... print("Floating point error (%s), with flag %s" % (type, flag)) ... >>> saved_handler = np.seterrcall(err_handler) @@ -2729,7 +2729,7 @@ def seterrcall(func): >>> class Log(object): ... def write(self, msg): - ... print "LOG: %s" % msg + ... print("LOG: %s" % msg) ... >>> log = Log() @@ -2787,7 +2787,7 @@ def geterrcall(): >>> oldsettings = np.seterr(all='call') >>> def err_handler(type, flag): - ... print "Floating point error (%s), with flag %s" % (type, flag) + ... print("Floating point error (%s), with flag %s" % (type, flag)) >>> oldhandler = np.seterrcall(err_handler) >>> np.array([1, 2, 3]) / 0.0 Floating point error (divide by zero), with flag 1 diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py index 7dc6e0bd8fb9..1b6551e6c896 100644 --- a/numpy/core/numerictypes.py +++ b/numpy/core/numerictypes.py @@ -822,7 +822,7 @@ def sctype2char(sctype): Examples -------- >>> for sctype in [np.int32, np.float, np.complex, np.string_, np.ndarray]: - ... print np.sctype2char(sctype) + ... print(np.sctype2char(sctype)) l d D diff --git a/numpy/core/records.py b/numpy/core/records.py index b0775538478b..ca6070cf76a4 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -567,7 +567,7 @@ def fromarrays(arrayList, dtype=None, shape=None, formats=None, >>> x2=np.array(['a','dd','xyz','12']) >>> x3=np.array([1.1,2,3,4]) >>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c') - >>> print r[1] + >>> print(r[1]) (2, 'dd', 2.0) >>> x1[1]=34 >>> r.a @@ -643,7 +643,7 @@ def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, >>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)], ... names='col1,col2,col3') - >>> print r[0] + >>> print(r[0]) (456, 'dbe', 1.2) >>> r.col1 array([456, 2]) @@ -651,7 +651,7 @@ def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, array(['dbe', 'de'], dtype='|S3') >>> import pickle - >>> print pickle.loads(pickle.dumps(r)) + >>> print(pickle.loads(pickle.dumps(r))) [(456, 'dbe', 1.2) (2, 'de', 1.3)] """ @@ -736,7 +736,7 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, >>> fd.seek(0) >>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10, ... byteorder='<') - >>> print r[5] + >>> print(r[5]) (0.5, 10, 'abcde') >>> r.shape (10,) diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py index 0dd2e164abb1..599b48d82b50 100644 --- a/numpy/core/shape_base.py +++ b/numpy/core/shape_base.py @@ -150,7 +150,7 @@ def atleast_3d(*arys): True >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]): - ... print arr, arr.shape + ... print(arr, arr.shape) ... [[[1] [2]]] (1, 2, 1) diff --git a/numpy/distutils/npy_pkg_config.py b/numpy/distutils/npy_pkg_config.py index 1c801fd9c069..fe64709ca215 100644 --- a/numpy/distutils/npy_pkg_config.py +++ b/numpy/distutils/npy_pkg_config.py @@ -366,7 +366,7 @@ def read_config(pkgname, dirs=None): >>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath') >>> type(npymath_info) - >>> print npymath_info + >>> print(npymath_info) Name: npymath Description: Portable, core math library implementing C99 standard Requires: diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 94436243ee58..bf2762523b14 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -1947,7 +1947,7 @@ def calc_info(self): ## hex(vstr2hex(module.__version__))), ## ) ## except Exception as msg: -## print msg +## print(msg) dict_append(info, define_macros=macros) include_dirs = self.get_include_dirs() inc_dir = None diff --git a/numpy/doc/glossary.py b/numpy/doc/glossary.py index 9dacd1cb5226..4a323849139d 100644 --- a/numpy/doc/glossary.py +++ b/numpy/doc/glossary.py @@ -109,7 +109,7 @@ >>> def log(f): ... def new_logging_func(*args, **kwargs): - ... print "Logging call with parameters:", args, kwargs + ... print("Logging call with parameters:", args, kwargs) ... return f(*args, **kwargs) ... ... return new_logging_func @@ -185,7 +185,7 @@ It is often used in combintion with ``enumerate``:: >>> keys = ['a','b','c'] >>> for n, k in enumerate(keys): - ... print "Key %d: %s" % (n, k) + ... print("Key %d: %s" % (n, k)) ... Key 0: a Key 1: b @@ -315,7 +315,7 @@ ... color = 'blue' ... ... def paint(self): - ... print "Painting the city %s!" % self.color + ... print("Painting the city %s!" % self.color) ... >>> p = Paintbrush() >>> p.color = 'red' diff --git a/numpy/doc/misc.py b/numpy/doc/misc.py index 1709ad66da7a..e30caf0cb555 100644 --- a/numpy/doc/misc.py +++ b/numpy/doc/misc.py @@ -86,7 +86,7 @@ >>> np.sqrt(np.array([-1.])) FloatingPointError: invalid value encountered in sqrt >>> def errorhandler(errstr, errflag): - ... print "saw stupid error!" + ... print("saw stupid error!") >>> np.seterrcall(errorhandler) >>> j = np.seterr(all='call') diff --git a/numpy/doc/subclassing.py b/numpy/doc/subclassing.py index a62fc2d6de92..85327feab321 100644 --- a/numpy/doc/subclassing.py +++ b/numpy/doc/subclassing.py @@ -123,13 +123,13 @@ class C(object): def __new__(cls, *args): - print 'Cls in __new__:', cls - print 'Args in __new__:', args + print('Cls in __new__:', cls) + print('Args in __new__:', args) return object.__new__(cls, *args) def __init__(self, *args): - print 'type(self) in __init__:', type(self) - print 'Args in __init__:', args + print('type(self) in __init__:', type(self)) + print('Args in __init__:', args) meaning that we get: @@ -159,13 +159,13 @@ def __init__(self, *args): class D(C): def __new__(cls, *args): - print 'D cls is:', cls - print 'D args in __new__:', args + print('D cls is:', cls) + print('D args in __new__:', args) return C.__new__(C, *args) def __init__(self, *args): # we never get here - print 'In D __init__' + print('In D __init__') meaning that: @@ -242,18 +242,18 @@ class other than the class in which it is defined, the ``__init__`` class C(np.ndarray): def __new__(cls, *args, **kwargs): - print 'In __new__ with class %s' % cls + print('In __new__ with class %s' % cls) return np.ndarray.__new__(cls, *args, **kwargs) def __init__(self, *args, **kwargs): # in practice you probably will not need or want an __init__ # method for your subclass - print 'In __init__ with class %s' % self.__class__ + print('In __init__ with class %s' % self.__class__) def __array_finalize__(self, obj): - print 'In array_finalize:' - print ' self type is %s' % type(self) - print ' obj type is %s' % type(obj) + print('In array_finalize:') + print(' self type is %s' % type(self)) + print(' obj type is %s' % type(obj)) Now: @@ -441,16 +441,16 @@ def __new__(cls, input_array, info=None): return obj def __array_finalize__(self, obj): - print 'In __array_finalize__:' - print ' self is %s' % repr(self) - print ' obj is %s' % repr(obj) + print('In __array_finalize__:') + print(' self is %s' % repr(self)) + print(' obj is %s' % repr(obj)) if obj is None: return self.info = getattr(obj, 'info', None) def __array_wrap__(self, out_arr, context=None): - print 'In __array_wrap__:' - print ' self is %s' % repr(self) - print ' arr is %s' % repr(out_arr) + print('In __array_wrap__:') + print(' self is %s' % repr(self)) + print(' arr is %s' % repr(out_arr)) # then just call the parent return np.ndarray.__array_wrap__(self, out_arr, context) diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index b64aaa50d7ea..289102e5abd9 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -673,7 +673,7 @@ def getcallprotoargument(rout, cb_map={}): proto_args = ','.join(arg_types + arg_types2) if not proto_args: proto_args = 'void' - # print proto_args + # print(proto_args) return proto_args diff --git a/numpy/lib/arrayterator.py b/numpy/lib/arrayterator.py index 80b369bd5cf3..fb52ada86cee 100644 --- a/numpy/lib/arrayterator.py +++ b/numpy/lib/arrayterator.py @@ -80,7 +80,7 @@ class Arrayterator(object): >>> for subarr in a_itor: ... if not subarr.all(): - ... print subarr, subarr.shape + ... print(subarr, subarr.shape) ... [[[[0 1]]]] (1, 1, 1, 2) @@ -158,7 +158,7 @@ def flat(self): >>> for subarr in a_itor.flat: ... if not subarr: - ... print subarr, type(subarr) + ... print(subarr, type(subarr)) ... 0 diff --git a/numpy/lib/financial.py b/numpy/lib/financial.py index a7e4e60b6495..c42424da17ed 100644 --- a/numpy/lib/financial.py +++ b/numpy/lib/financial.py @@ -247,7 +247,7 @@ def nper(rate, pmt, pv, fv=0, when='end'): If you only had $150/month to pay towards the loan, how long would it take to pay-off a loan of $8,000 at 7% annual interest? - >>> print round(np.nper(0.07/12, -150, 8000), 5) + >>> print(round(np.nper(0.07/12, -150, 8000), 5)) 64.07335 So, over 64 months would be required to pay off the loan. @@ -347,7 +347,7 @@ def ipmt(rate, per, nper, pv, fv=0.0, when='end'): >>> for payment in per: ... index = payment - 1 ... principal = principal + ppmt[index] - ... print fmt.format(payment, ppmt[index], ipmt[index], principal) + ... print(fmt.format(payment, ppmt[index], ipmt[index], principal)) 1 -200.58 -17.17 2299.42 2 -201.96 -15.79 2097.46 3 -203.35 -14.40 1894.11 diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 8335b4fdb9a3..c69185c1cb09 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -833,7 +833,7 @@ class ndarray is returned. >>> try: ... np.asarray_chkfinite(a) ... except ValueError: - ... print 'ValueError' + ... print('ValueError') ... ValueError @@ -2200,13 +2200,13 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, >>> x = [-2.1, -1, 4.3] >>> y = [3, 1.1, 0.12] >>> X = np.vstack((x,y)) - >>> print np.cov(X) + >>> print(np.cov(X)) [[ 11.71 -4.286 ] [ -4.286 2.14413333]] - >>> print np.cov(x, y) + >>> print(np.cov(x, y)) [[ 11.71 -4.286 ] [ -4.286 2.14413333]] - >>> print np.cov(x) + >>> print(np.cov(x)) 11.71 """ diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py index 8bcc3fb5389b..a0875a25fd57 100644 --- a/numpy/lib/index_tricks.py +++ b/numpy/lib/index_tricks.py @@ -491,7 +491,7 @@ class ndenumerate(object): -------- >>> a = np.array([[1, 2], [3, 4]]) >>> for index, x in np.ndenumerate(a): - ... print index, x + ... print(index, x) (0, 0) 1 (0, 1) 2 (1, 0) 3 @@ -542,7 +542,7 @@ class ndindex(object): Examples -------- >>> for index in np.ndindex(3, 2, 1): - ... print index + ... print(index) (0, 0, 0) (0, 1, 0) (1, 0, 0) diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py index 2f677438ba9c..a5d3f5f5f4b8 100644 --- a/numpy/lib/polynomial.py +++ b/numpy/lib/polynomial.py @@ -715,12 +715,12 @@ def polyadd(a1, a2): >>> p1 = np.poly1d([1, 2]) >>> p2 = np.poly1d([9, 5, 4]) - >>> print p1 + >>> print(p1) 1 x + 2 - >>> print p2 + >>> print(p2) 2 9 x + 5 x + 4 - >>> print np.polyadd(p1, p2) + >>> print(np.polyadd(p1, p2)) 2 9 x + 6 x + 6 @@ -826,13 +826,13 @@ def polymul(a1, a2): >>> p1 = np.poly1d([1, 2, 3]) >>> p2 = np.poly1d([9, 5, 1]) - >>> print p1 + >>> print(p1) 2 1 x + 2 x + 3 - >>> print p2 + >>> print(p2) 2 9 x + 5 x + 1 - >>> print np.polymul(p1, p2) + >>> print(np.polymul(p1, p2)) 4 3 2 9 x + 23 x + 38 x + 17 x + 3 @@ -966,7 +966,7 @@ class poly1d(object): Construct the polynomial :math:`x^2 + 2x + 3`: >>> p = np.poly1d([1, 2, 3]) - >>> print np.poly1d(p) + >>> print(np.poly1d(p)) 2 1 x + 2 x + 3 @@ -1022,7 +1022,7 @@ class poly1d(object): using the `variable` parameter: >>> p = np.poly1d([1,2,3], variable='z') - >>> print p + >>> print(p) 2 1 z + 2 z + 3 diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index 1bf65fa61d4f..a091ef5b3fc9 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -112,7 +112,7 @@ >>> for arr in basic_arrays + record_arrays: ... f = BytesIO() ... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it - ... print repr(f.getvalue()) + ... print(repr(f.getvalue())) ... "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py index 464ffd914488..b2f350bb74b9 100644 --- a/numpy/lib/twodim_base.py +++ b/numpy/lib/twodim_base.py @@ -664,7 +664,7 @@ def histogram2d(x, y, bins=10, range=None, normed=False, weights=None): Or we fill the histogram H with a determined bin content: >>> H = np.ones((4, 4)).cumsum().reshape(4, 4) - >>> print H[::-1] # This shows the bin content in the order as plotted + >>> print(H[::-1]) # This shows the bin content in the order as plotted [[ 13. 14. 15. 16.] [ 9. 10. 11. 12.] [ 5. 6. 7. 8.] diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py index 2fe4e7d2318f..1313adff7180 100644 --- a/numpy/lib/type_check.py +++ b/numpy/lib/type_check.py @@ -501,7 +501,7 @@ def typename(char): >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q', ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q'] >>> for typechar in typechars: - ... print typechar, ' : ', np.typename(typechar) + ... print(typechar, ' : ', np.typename(typechar)) ... S1 : character ? : bool diff --git a/numpy/linalg/lapack_lite/clapack_scrub.py b/numpy/linalg/lapack_lite/clapack_scrub.py index 4a517d531c84..9dfee0a84901 100644 --- a/numpy/linalg/lapack_lite/clapack_scrub.py +++ b/numpy/linalg/lapack_lite/clapack_scrub.py @@ -14,9 +14,9 @@ def __init__(self, info, name=''): def begin(self, state_name): # if self.state_name == '': -# print '' +# print('') # else: -# print self.state_name +# print(self.state_name) Scanner.begin(self, state_name) def sep_seq(sequence, sep): diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index 2e969727b93c..9dc879d31045 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -1853,7 +1853,7 @@ def lstsq(a, b, rcond=-1): [ 3., 1.]]) >>> m, c = np.linalg.lstsq(A, y)[0] - >>> print m, c + >>> print(m, c) 1.0 -0.95 Plot the data along with the fitted line: diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 25e542cd6587..de716a6699aa 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -2147,12 +2147,12 @@ def masked_object(x, value, copy=True, shrink=True): >>> food = np.array(['green_eggs', 'ham'], dtype=object) >>> # don't eat spoiled food >>> eat = ma.masked_object(food, 'green_eggs') - >>> print eat + >>> print(eat) [-- ham] >>> # plain ol` ham is boring >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object) >>> eat = ma.masked_object(fresh_food, 'green_eggs') - >>> print eat + >>> print(eat) [cheese ham pineapple] Note that `mask` is set to ``nomask`` if possible. @@ -2548,7 +2548,7 @@ class MaskedIterator(object): >>> type(fl) >>> for item in fl: - ... print item + ... print(item) ... 0 1 @@ -3064,11 +3064,11 @@ def astype(self, newtype): Examples -------- >>> x = np.ma.array([[1,2,3.1],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> print x + >>> print(x) [[1.0 -- 3.1] [-- 5.0 --] [7.0 -- 9.0]] - >>> print x.astype(int32) + >>> print(x.astype(int32)) [[1 -- 3] [-- 5 --] [7 -- 9]] @@ -3656,7 +3656,7 @@ def compress(self, condition, axis=None, out=None): Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> print x + >>> print(x) [[1 -- 3] [-- 5 --] [7 -- 9]] @@ -4261,11 +4261,11 @@ def ravel(self, order='C'): Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> print x + >>> print(x) [[1 -- 3] [-- 5 --] [7 -- 9]] - >>> print x.ravel() + >>> print(x.ravel()) [1 -- 3 -- 5 -- 7 -- 9] """ @@ -4317,11 +4317,11 @@ def reshape(self, *s, **kwargs): Examples -------- >>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1]) - >>> print x + >>> print(x) [[-- 2] [3 --]] >>> x = x.reshape((4,1)) - >>> print x + >>> print(x) [[--] [2] [3] @@ -4382,18 +4382,18 @@ def put(self, indices, values, mode='raise'): Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> print x + >>> print(x) [[1 -- 3] [-- 5 --] [7 -- 9]] >>> x.put([0,4,8],[10,20,30]) - >>> print x + >>> print(x) [[10 -- 3] [-- 20 --] [7 -- 30]] >>> x.put(4,999) - >>> print x + >>> print(x) [[10 -- 3] [-- 999 --] [7 -- 30]] @@ -4745,17 +4745,17 @@ def sum(self, axis=None, dtype=None, out=None): Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> print x + >>> print(x) [[1 -- 3] [-- 5 --] [7 -- 9]] - >>> print x.sum() + >>> print(x.sum()) 25 - >>> print x.sum(axis=1) + >>> print(x.sum(axis=1)) [4 5 16] - >>> print x.sum(axis=0) + >>> print(x.sum(axis=0)) [8 5 12] - >>> print type(x.sum(axis=0, dtype=np.int64)[0]) + >>> print(type(x.sum(axis=0, dtype=np.int64)[0])) """ @@ -4823,7 +4823,7 @@ def cumsum(self, axis=None, dtype=None, out=None): Examples -------- >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) - >>> print marr.cumsum() + >>> print(marr.cumsum()) [0 1 3 -- -- -- 9 16 24 33] """ @@ -5223,12 +5223,12 @@ def argmin(self, axis=None, fill_value=None, out=None): -------- >>> x = np.ma.array(arange(4), mask=[1,1,0,0]) >>> x.shape = (2,2) - >>> print x + >>> print(x) [[-- --] [2 3]] - >>> print x.argmin(axis=0, fill_value=-1) + >>> print(x.argmin(axis=0, fill_value=-1)) [0 0] - >>> print x.argmin(axis=0, fill_value=9) + >>> print(x.argmin(axis=0, fill_value=9)) [1 1] """ @@ -5324,19 +5324,19 @@ def sort(self, axis=-1, kind='quicksort', order=None, >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) >>> # Default >>> a.sort() - >>> print a + >>> print(a) [1 3 5 -- --] >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) >>> # Put missing values in the front >>> a.sort(endwith=False) - >>> print a + >>> print(a) [-- -- 1 3 5] >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) >>> # fill_value takes over endwith >>> a.sort(endwith=False, fill_value=3) - >>> print a + >>> print(a) [1 -- -- 3 5] """ @@ -5452,7 +5452,7 @@ def mini(self, axis=None): Examples -------- >>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2) - >>> print x + >>> print(x) [[0 --] [2 3] [4 --]] @@ -5462,7 +5462,7 @@ def mini(self, axis=None): masked_array(data = [0 3], mask = [False False], fill_value = 999999) - >>> print x.mini(axis=1) + >>> print(x.mini(axis=1)) [0 2 4] """ @@ -5741,11 +5741,11 @@ def toflex(self): Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> print x + >>> print(x) [[1 -- 3] [-- 5 --] [7 -- 9]] - >>> print x.toflex() + >>> print(x.toflex()) [[(1, False) (2, True) (3, False)] [(4, True) (5, False) (6, True)] [(7, False) (8, True) (9, False)]] @@ -6914,14 +6914,14 @@ def where(condition, x=_NoValue, y=_NoValue): >>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0], ... [1, 0, 1], ... [0, 1, 0]]) - >>> print x + >>> print(x) [[0.0 -- 2.0] [-- 4.0 --] [6.0 -- 8.0]] >>> np.ma.where(x > 5) # return the indices where x > 5 (array([2, 2]), array([0, 2])) - >>> print np.ma.where(x > 5, x, -3.1416) + >>> print(np.ma.where(x > 5, x, -3.1416)) [[-3.1416 -- -3.1416] [-- -3.1416 --] [6.0 -- 8.0]] diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index e1d228e73482..9855b4e76481 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -439,7 +439,7 @@ def apply_over_axes(func, a, axes): >>> a = ma.arange(24).reshape(2,3,4) >>> a[:,0,1] = ma.masked >>> a[:,1,:] = ma.masked - >>> print a + >>> print(a) [[[0 -- 2 3] [-- -- -- --] [8 9 10 11]] @@ -447,14 +447,14 @@ def apply_over_axes(func, a, axes): [[12 -- 14 15] [-- -- -- --] [20 21 22 23]]] - >>> print ma.apply_over_axes(ma.sum, a, [0,2]) + >>> print(ma.apply_over_axes(ma.sum, a, [0,2])) [[[46] [--] [124]]] Tuple axis arguments to ufuncs are equivalent: - >>> print ma.sum(a, axis=(0,2)).reshape((1,-1,1)) + >>> print(ma.sum(a, axis=(0,2)).reshape((1,-1,1))) [[[46] [--] [124]]] @@ -502,13 +502,13 @@ def average(a, axis=None, weights=None, returned=False): 1.25 >>> x = np.ma.arange(6.).reshape(3, 2) - >>> print x + >>> print(x) [[ 0. 1.] [ 2. 3.] [ 4. 5.]] >>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3], ... returned=True) - >>> print avg + >>> print(avg) [2.66666666667 3.66666666667] """ @@ -1476,7 +1476,7 @@ def flatnotmasked_edges(a): array([3, 8]) >>> a[:] = np.ma.masked - >>> print flatnotmasked_edges(ma) + >>> print(flatnotmasked_edges(ma)) None """ @@ -1578,7 +1578,7 @@ def flatnotmasked_contiguous(a): >>> np.ma.flatnotmasked_contiguous(a) [slice(3, 5, None), slice(6, 9, None)] >>> a[:] = np.ma.masked - >>> print np.ma.flatnotmasked_edges(a) + >>> print(np.ma.flatnotmasked_edges(a)) None """ diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py index a32f358c09c8..d9a93efd4bea 100644 --- a/numpy/ma/tests/test_old_ma.py +++ b/numpy/ma/tests/test_old_ma.py @@ -831,13 +831,13 @@ def eqmask(m1, m2): # t = testta(n, f) # t1 = testtb(n, f) # t2 = testtc(n, f) -# print f.test_name -# print """\ +# print(f.test_name) +# print("""\ #n = %7d #numpy time (ms) %6.1f #MA maskless ratio %6.1f #MA masked ratio %6.1f -#""" % (n, t*1000.0, t1/t, t2/t) +#""" % (n, t*1000.0, t1/t, t2/t)) #def testta(n, f): # x=np.arange(n) + 1.0 diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index 170db87c8d51..134f4d20341f 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -233,7 +233,7 @@ class matrix(N.ndarray): Examples -------- >>> a = np.matrix('1 2; 3 4') - >>> print a + >>> print(a) [[1 2] [3 4]] diff --git a/numpy/testing/decorators.py b/numpy/testing/decorators.py index df3d297ff2fe..6cde298e1cab 100644 --- a/numpy/testing/decorators.py +++ b/numpy/testing/decorators.py @@ -48,7 +48,7 @@ def slow(t): @dec.slow def test_big(self): - print 'Big, slow test' + print('Big, slow test') """ diff --git a/numpy/testing/noseclasses.py b/numpy/testing/noseclasses.py index 197e20bacb50..4a8e9b0d69e9 100644 --- a/numpy/testing/noseclasses.py +++ b/numpy/testing/noseclasses.py @@ -34,33 +34,33 @@ def _from_module(self, module, object): module. """ if module is None: - #print '_fm C1' # dbg + #print('_fm C1') # dbg return True elif inspect.isfunction(object): - #print '_fm C2' # dbg + #print('_fm C2') # dbg return module.__dict__ is object.__globals__ elif inspect.isbuiltin(object): - #print '_fm C2-1' # dbg + #print('_fm C2-1') # dbg return module.__name__ == object.__module__ elif inspect.isclass(object): - #print '_fm C3' # dbg + #print('_fm C3') # dbg return module.__name__ == object.__module__ elif inspect.ismethod(object): # This one may be a bug in cython that fails to correctly set the # __module__ attribute of methods, but since the same error is easy # to make by extension code writers, having this safety in place # isn't such a bad idea - #print '_fm C3-1' # dbg + #print('_fm C3-1') # dbg return module.__name__ == object.__self__.__class__.__module__ elif inspect.getmodule(object) is not None: - #print '_fm C4' # dbg - #print 'C4 mod',module,'obj',object # dbg + #print('_fm C4') # dbg + #print('C4 mod',module,'obj',object) # dbg return module is inspect.getmodule(object) elif hasattr(object, '__module__'): - #print '_fm C5' # dbg + #print('_fm C5') # dbg return module.__name__ == object.__module__ elif isinstance(object, property): - #print '_fm C6' # dbg + #print('_fm C6') # dbg return True # [XX] no way not be sure. else: raise ValueError("object must be a class or function") @@ -95,10 +95,10 @@ def _find(self, tests, obj, name, module, source_lines, globs, seen): # Look for tests in a class's contained objects. if isclass(obj) and self._recurse: - #print 'RECURSE into class:',obj # dbg + #print('RECURSE into class:',obj) # dbg for valname, val in obj.__dict__.items(): #valname1 = '%s.%s' % (name, valname) # dbg - #print 'N',name,'VN:',valname,'val:',str(val)[:77] # dbg + #print('N',name,'VN:',valname,'val:',str(val)[:77]) # dbg # Special handling for staticmethod/classmethod. if isinstance(val, staticmethod): val = getattr(obj, valname) diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py index 00f7ce4d1976..49d249339a77 100644 --- a/numpy/testing/utils.py +++ b/numpy/testing/utils.py @@ -1293,7 +1293,7 @@ def measure(code_str,times=1,label=None): -------- >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', ... times=times) - >>> print "Time for a single execution : ", etime / times, "s" + >>> print("Time for a single execution : ", etime / times, "s") Time for a single execution : 0.005 s """ diff --git a/tools/swig/test/testFortran.py b/tools/swig/test/testFortran.py index be4134d4eb7c..c77d728e96cc 100644 --- a/tools/swig/test/testFortran.py +++ b/tools/swig/test/testFortran.py @@ -29,7 +29,7 @@ def __init__(self, methodName="runTests"): # commenting it out. --WFS # def testSecondElementContiguous(self): # "Test Fortran matrix initialized from reshaped default array" - # print >>sys.stderr, self.typeStr, "... ", + # print(self.typeStr, "... ", end="", file=sys.stderr) # second = Fortran.__dict__[self.typeStr + "SecondElement"] # matrix = np.arange(9).reshape(3, 3).astype(self.typeCode) # self.assertEquals(second(matrix), 3) diff --git a/tools/win32build/misc/x86analysis.py b/tools/win32build/misc/x86analysis.py index 39b7cca795f5..0b558640628c 100644 --- a/tools/win32build/misc/x86analysis.py +++ b/tools/win32build/misc/x86analysis.py @@ -148,9 +148,9 @@ def analyse(filename): sse3 = has_sse3(inst) #mmx = has_mmx(inst) #ppro = has_ppro(inst) - #print sse - #print sse2 - #print sse3 + #print(sse) + #print(sse2) + #print(sse3) print("SSE3 inst %d" % cntset(sse3)) print("SSE2 inst %d" % cntset(sse2)) print("SSE inst %d" % cntset(sse)) From 0574f62bf58eada5860fe0620151aede85a8dae4 Mon Sep 17 00:00:00 2001 From: gfyoung Date: Sun, 20 Dec 2015 01:08:29 -0800 Subject: [PATCH 259/496] MAINT: Remove commented out code blocks --- numpy/distutils/system_info.py | 30 ------------ numpy/f2py/auxfuncs.py | 4 -- numpy/linalg/lapack_lite/clapack_scrub.py | 4 -- numpy/ma/tests/test_old_ma.py | 58 ----------------------- numpy/testing/noseclasses.py | 12 ----- tools/swig/test/testFortran.py | 10 ---- tools/win32build/misc/x86analysis.py | 9 ---- 7 files changed, 127 deletions(-) diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index bf2762523b14..dde18dfa5619 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -677,11 +677,6 @@ def library_extensions(self): exts.append('.dll.a') if sys.platform == 'darwin': exts.append('.dylib') - # Debian and Ubuntu added a g3f suffix to shared library to deal with - # g77 -> gfortran ABI transition - # XXX: disabled, it hides more problem than it solves. - #if sys.platform[:5] == 'linux': - # exts.append('.so.3gf') return exts def check_libs(self, lib_dirs, libs, opt_libs=[]): @@ -995,13 +990,10 @@ def __init__(self): l = 'mkl' # use shared library if cpu.is_Itanium(): plt = '64' - #l = 'mkl_ipf' elif cpu.is_Xeon(): plt = 'intel64' - #l = 'mkl_intel64' else: plt = '32' - #l = 'mkl_ia32' if l not in self._lib_mkl: self._lib_mkl.insert(0, l) system_info.__init__( @@ -1243,8 +1235,6 @@ def calc_info(self): class atlas_3_10_threads_info(atlas_3_10_info): dir_env_var = ['PTATLAS', 'ATLAS'] _lib_names = ['tatlas'] - #if sys.platfcorm[:7] == 'freebsd': - ## I don't think freebsd supports 3.10 at this time - 2014 _lib_atlas = _lib_names _lib_lapack = _lib_names @@ -1535,7 +1525,6 @@ def calc_info(self): ('HAVE_CBLAS', None)]) return - #atlas_info = {} ## uncomment for testing need_lapack = 0 need_blas = 0 info = {} @@ -1567,7 +1556,6 @@ def calc_info(self): if need_blas: blas_info = get_info('blas') - #blas_info = {} ## uncomment for testing if blas_info: dict_append(info, **blas_info) else: @@ -1941,13 +1929,6 @@ def calc_info(self): '"\\"%s\\""' % (vrs)), (self.modulename.upper(), None)] break -## try: -## macros.append( -## (self.modulename.upper()+'_VERSION_HEX', -## hex(vstr2hex(module.__version__))), -## ) -## except Exception as msg: -## print(msg) dict_append(info, define_macros=macros) include_dirs = self.get_include_dirs() inc_dir = None @@ -2322,17 +2303,6 @@ def calc_info(self): self.set_info(**info) return -## def vstr2hex(version): -## bits = [] -## n = [24,16,8,4,0] -## r = 0 -## for s in version.split('.'): -## r |= int(s) << n[0] -## del n[0] -## return r - -#-------------------------------------------------------------------- - def combine_paths(*args, **kws): """ Return a list of existing paths composed by all combinations of diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 289102e5abd9..d27b95947230 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -430,9 +430,6 @@ def isintent_nothide(var): def isintent_c(var): return 'c' in var.get('intent', []) -# def isintent_f(var): -# return not isintent_c(var) - def isintent_cache(var): return 'cache' in var.get('intent', []) @@ -673,7 +670,6 @@ def getcallprotoargument(rout, cb_map={}): proto_args = ','.join(arg_types + arg_types2) if not proto_args: proto_args = 'void' - # print(proto_args) return proto_args diff --git a/numpy/linalg/lapack_lite/clapack_scrub.py b/numpy/linalg/lapack_lite/clapack_scrub.py index 9dfee0a84901..f3d29aa4ed82 100644 --- a/numpy/linalg/lapack_lite/clapack_scrub.py +++ b/numpy/linalg/lapack_lite/clapack_scrub.py @@ -13,10 +13,6 @@ def __init__(self, info, name=''): Scanner.__init__(self, self.lexicon, info, name) def begin(self, state_name): -# if self.state_name == '': -# print('') -# else: -# print(self.state_name) Scanner.begin(self, state_name) def sep_seq(sequence, sep): diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py index d9a93efd4bea..6ce29cc030a2 100644 --- a/numpy/ma/tests/test_old_ma.py +++ b/numpy/ma/tests/test_old_ma.py @@ -522,11 +522,6 @@ def test_testMasked(self): self.assertTrue(str(masked) == '--') self.assertTrue(xx[1] is masked) self.assertEqual(filled(xx[1], 0), 0) - # don't know why these should raise an exception... - #self.assertRaises(Exception, lambda x,y: x+y, masked, masked) - #self.assertRaises(Exception, lambda x,y: x+y, masked, 2) - #self.assertRaises(Exception, lambda x,y: x+y, masked, xx) - #self.assertRaises(Exception, lambda x,y: x+y, xx, masked) def test_testAverage1(self): # Test of average. @@ -681,9 +676,7 @@ def test_testUfuncRegression(self): 'arccosh', 'arctanh', 'absolute', 'fabs', 'negative', - # 'nonzero', 'around', 'floor', 'ceil', - # 'sometrue', 'alltrue', 'logical_not', 'add', 'subtract', 'multiply', 'divide', 'true_divide', 'floor_divide', @@ -754,7 +747,6 @@ def setUp(self): self.d = (x, X, XX, m, mx, mX, mXX) - #------------------------------------------------------ def test_trace(self): (x, X, XX, m, mx, mX, mXX,) = self.d mXdiag = mX.diagonal() @@ -825,55 +817,5 @@ def eqmask(m1, m2): return m1 is nomask return (m1 == m2).all() -#def timingTest(): -# for f in [testf, testinplace]: -# for n in [1000,10000,50000]: -# t = testta(n, f) -# t1 = testtb(n, f) -# t2 = testtc(n, f) -# print(f.test_name) -# print("""\ -#n = %7d -#numpy time (ms) %6.1f -#MA maskless ratio %6.1f -#MA masked ratio %6.1f -#""" % (n, t*1000.0, t1/t, t2/t)) - -#def testta(n, f): -# x=np.arange(n) + 1.0 -# tn0 = time.time() -# z = f(x) -# return time.time() - tn0 - -#def testtb(n, f): -# x=arange(n) + 1.0 -# tn0 = time.time() -# z = f(x) -# return time.time() - tn0 - -#def testtc(n, f): -# x=arange(n) + 1.0 -# x[0] = masked -# tn0 = time.time() -# z = f(x) -# return time.time() - tn0 - -#def testf(x): -# for i in range(25): -# y = x **2 + 2.0 * x - 1.0 -# w = x **2 + 1.0 -# z = (y / w) ** 2 -# return z -#testf.test_name = 'Simple arithmetic' - -#def testinplace(x): -# for i in range(25): -# y = x**2 -# y += 2.0*x -# y -= 1.0 -# y /= x -# return y -#testinplace.test_name = 'Inplace operations' - if __name__ == "__main__": run_module_suite() diff --git a/numpy/testing/noseclasses.py b/numpy/testing/noseclasses.py index 4a8e9b0d69e9..ee9d1b4dfec9 100644 --- a/numpy/testing/noseclasses.py +++ b/numpy/testing/noseclasses.py @@ -34,33 +34,24 @@ def _from_module(self, module, object): module. """ if module is None: - #print('_fm C1') # dbg return True elif inspect.isfunction(object): - #print('_fm C2') # dbg return module.__dict__ is object.__globals__ elif inspect.isbuiltin(object): - #print('_fm C2-1') # dbg return module.__name__ == object.__module__ elif inspect.isclass(object): - #print('_fm C3') # dbg return module.__name__ == object.__module__ elif inspect.ismethod(object): # This one may be a bug in cython that fails to correctly set the # __module__ attribute of methods, but since the same error is easy # to make by extension code writers, having this safety in place # isn't such a bad idea - #print('_fm C3-1') # dbg return module.__name__ == object.__self__.__class__.__module__ elif inspect.getmodule(object) is not None: - #print('_fm C4') # dbg - #print('C4 mod',module,'obj',object) # dbg return module is inspect.getmodule(object) elif hasattr(object, '__module__'): - #print('_fm C5') # dbg return module.__name__ == object.__module__ elif isinstance(object, property): - #print('_fm C6') # dbg return True # [XX] no way not be sure. else: raise ValueError("object must be a class or function") @@ -95,10 +86,7 @@ def _find(self, tests, obj, name, module, source_lines, globs, seen): # Look for tests in a class's contained objects. if isclass(obj) and self._recurse: - #print('RECURSE into class:',obj) # dbg for valname, val in obj.__dict__.items(): - #valname1 = '%s.%s' % (name, valname) # dbg - #print('N',name,'VN:',valname,'val:',str(val)[:77]) # dbg # Special handling for staticmethod/classmethod. if isinstance(val, staticmethod): val = getattr(obj, valname) diff --git a/tools/swig/test/testFortran.py b/tools/swig/test/testFortran.py index c77d728e96cc..b7783be90969 100644 --- a/tools/swig/test/testFortran.py +++ b/tools/swig/test/testFortran.py @@ -24,16 +24,6 @@ def __init__(self, methodName="runTests"): self.typeStr = "double" self.typeCode = "d" - # This test used to work before the update to avoid deprecated code. Now it - # doesn't work. As best I can tell, it never should have worked, so I am - # commenting it out. --WFS - # def testSecondElementContiguous(self): - # "Test Fortran matrix initialized from reshaped default array" - # print(self.typeStr, "... ", end="", file=sys.stderr) - # second = Fortran.__dict__[self.typeStr + "SecondElement"] - # matrix = np.arange(9).reshape(3, 3).astype(self.typeCode) - # self.assertEquals(second(matrix), 3) - # Test (type* IN_FARRAY2, int DIM1, int DIM2) typemap def testSecondElementFortran(self): "Test Fortran matrix initialized from reshaped NumPy fortranarray" diff --git a/tools/win32build/misc/x86analysis.py b/tools/win32build/misc/x86analysis.py index 0b558640628c..870e2c98035d 100644 --- a/tools/win32build/misc/x86analysis.py +++ b/tools/win32build/misc/x86analysis.py @@ -132,8 +132,6 @@ def cntset(seq): return cnt def main(): - #parser = optparse.OptionParser() - #parser.add_option("-f", "--filename args = sys.argv[1:] filename = args[0] analyse(filename) @@ -146,11 +144,6 @@ def analyse(filename): sse = has_sse(inst) sse2 = has_sse2(inst) sse3 = has_sse3(inst) - #mmx = has_mmx(inst) - #ppro = has_ppro(inst) - #print(sse) - #print(sse2) - #print(sse3) print("SSE3 inst %d" % cntset(sse3)) print("SSE2 inst %d" % cntset(sse2)) print("SSE inst %d" % cntset(sse)) @@ -158,5 +151,3 @@ def analyse(filename): if __name__ == '__main__': main() - #filename = "/usr/lib/sse2/libatlas.a" - ##filename = "/usr/lib/sse2/libcblas.a" From c4156cfbe9c22ab99473346b7757d2b54b46baa3 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 19 Dec 2015 15:36:58 -0700 Subject: [PATCH 260/496] MAINT: Use temppath in test_not_closing_opened_fid. The test is in numpy/lib/tests/test_io.py. This commit is intended as a demonstration of using temppath. --- numpy/lib/tests/test_io.py | 33 +++++++++++-------------------- numpy/testing/tests/test_utils.py | 8 ++++++-- 2 files changed, 18 insertions(+), 23 deletions(-) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index bffc5c63e066..45ee0a477889 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -19,7 +19,7 @@ from numpy.testing import ( TestCase, run_module_suite, assert_warns, assert_, assert_raises_regex, assert_raises, assert_allclose, - assert_array_equal, + assert_array_equal,temppath ) from numpy.testing.utils import tempdir @@ -259,26 +259,17 @@ def writer(error_list): def test_not_closing_opened_fid(self): # Test that issue #2178 is fixed: # verify could seek on 'loaded' file - - fd, tmp = mkstemp(suffix='.npz') - os.close(fd) - try: - fp = open(tmp, 'wb') - np.savez(fp, data='LOVELY LOAD') - fp.close() - - fp = open(tmp, 'rb', 10000) - fp.seek(0) - assert_(not fp.closed) - np.load(fp)['data'] - # fp must not get closed by .load - assert_(not fp.closed) - fp.seek(0) - assert_(not fp.closed) - - finally: - fp.close() - os.remove(tmp) + with temppath(suffix='.npz') as tmp: + with open(tmp, 'wb') as fp: + np.savez(fp, data='LOVELY LOAD') + with open(tmp, 'rb', 10000) as fp: + fp.seek(0) + assert_(not fp.closed) + np.load(fp)['data'] + # fp must not get closed by .load + assert_(not fp.closed) + fp.seek(0) + assert_(not fp.closed) def test_closing_fid(self): # Test that issue #1517 (too many opened files) remains closed diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index b558270b3b14..23bd491bc1e8 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -788,11 +788,13 @@ def test_tempdir(): pass assert_(not os.path.isdir(tdir)) + raised = False try: with tempdir() as tdir: raise ValueError() except ValueError: - pass + raised = True + assert_(raised) assert_(not os.path.isdir(tdir)) @@ -803,11 +805,13 @@ def test_temppath(): pass assert_(not os.path.isfile(fpath)) + raised = False try: with temppath() as fpath: raise ValueError() except ValueError: - pass + raised = True + assert_(raised) assert_(not os.path.isfile(fpath)) From 68bcaa13904f6e9efaef9ec3ae27797f648ee80c Mon Sep 17 00:00:00 2001 From: Ian Henriksen Date: Tue, 22 Dec 2015 13:22:18 -0700 Subject: [PATCH 261/496] TST: Add a 32 bit Python 3 build to the appveyor build matrix. --- appveyor.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/appveyor.yml b/appveyor.yml index 026db34ea75c..68c3f279e72c 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -4,10 +4,13 @@ clone_depth: 1 os: Visual Studio 2015 environment: - PYTHON_ARCH: "x86_64" matrix: - PY_MAJOR_VER: 2 + PYTHON_ARCH: "x86_64" - PY_MAJOR_VER: 3 + PYTHON_ARCH: "x86_64" + - PY_MAJOR_VER: 3 + PYTHON_ARCH: "x86" matrix: #fast_finish: true From 33140978603936fe2c93555e6badce55a841d432 Mon Sep 17 00:00:00 2001 From: gfyoung Date: Mon, 21 Dec 2015 19:12:32 -0800 Subject: [PATCH 262/496] DOC: Fix poly_val description for 'x' input Closes gh-6849. --- numpy/lib/polynomial.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py index a5d3f5f5f4b8..189e59154dc1 100644 --- a/numpy/lib/polynomial.py +++ b/numpy/lib/polynomial.py @@ -61,7 +61,7 @@ def poly(seq_of_zeros): See Also -------- - polyval : Evaluate a polynomial at a point. + polyval : Compute polynomial values. roots : Return the roots of a polynomial. polyfit : Least squares polynomial fit. poly1d : A one-dimensional polynomial class. @@ -182,7 +182,7 @@ def roots(p): -------- poly : Find the coefficients of a polynomial with a given sequence of roots. - polyval : Evaluate a polynomial at a point. + polyval : Compute polynomial values. polyfit : Least squares polynomial fit. poly1d : A one-dimensional polynomial class. @@ -466,7 +466,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): See Also -------- - polyval : Computes polynomial values. + polyval : Compute polynomial values. linalg.lstsq : Computes a least-squares fit. scipy.interpolate.UnivariateSpline : Computes spline fits. @@ -631,7 +631,7 @@ def polyval(p, x): to zero) from highest degree to the constant term, or an instance of poly1d. x : array_like or poly1d object - A number, a 1D array of numbers, or an instance of poly1d, "at" + A number, an array of numbers, or an instance of poly1d, at which to evaluate `p`. Returns From f1e4ad4bc086dd8cd234054e6c28ea5dc8fb7725 Mon Sep 17 00:00:00 2001 From: Ryosuke Okuta Date: Wed, 23 Dec 2015 23:59:15 +0900 Subject: [PATCH 263/496] Fix version number in the document This PR fix version number of the format specification. --- doc/neps/npy-format.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/neps/npy-format.rst b/doc/neps/npy-format.rst index bf88c3feea9e..3f12e1bf1e3a 100644 --- a/doc/neps/npy-format.rst +++ b/doc/neps/npy-format.rst @@ -199,7 +199,7 @@ bytes of the array. Consumers can figure out the number of bytes by multiplying the number of elements given by the shape (noting that shape=() means there is 1 element) by dtype.itemsize. -Format Specification: Version 1.0 +Format Specification: Version 2.0 --------------------------------- The version 1.0 format only allowed the array header to have a From e5d61d6f946ef75b822d937b801a02f64736e53f Mon Sep 17 00:00:00 2001 From: Daniel Date: Wed, 23 Dec 2015 18:45:38 +0100 Subject: [PATCH 264/496] typo corrrected. --- numpy/doc/structured_arrays.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/doc/structured_arrays.py b/numpy/doc/structured_arrays.py index fe17c133ebae..1135c1395cc7 100644 --- a/numpy/doc/structured_arrays.py +++ b/numpy/doc/structured_arrays.py @@ -27,7 +27,7 @@ Conveniently, one can access any field of the array by indexing using the string that names that field. :: - >>> y = x['foo'] + >>> y = x['bar'] >>> y array([ 2., 3.], dtype=float32) >>> y[:] = 2*y From 29a33301d7c90fb22e8ee6533195b0a7c203de91 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 23 Dec 2015 11:25:27 -0700 Subject: [PATCH 265/496] BUG: Fix tempfile failures on window. Temporary files on windows cannot be held open by two files at once. --- numpy/core/tests/test_longdouble.py | 75 +++++++++++------------------ 1 file changed, 28 insertions(+), 47 deletions(-) diff --git a/numpy/core/tests/test_longdouble.py b/numpy/core/tests/test_longdouble.py index fcc79ecbc0ad..1c561a48f50e 100644 --- a/numpy/core/tests/test_longdouble.py +++ b/numpy/core/tests/test_longdouble.py @@ -1,12 +1,11 @@ from __future__ import division, absolute_import, print_function import locale -from tempfile import NamedTemporaryFile import numpy as np from numpy.testing import ( run_module_suite, assert_, assert_equal, dec, assert_raises, - assert_array_equal, TestCase + assert_array_equal, TestCase, temppath, ) from numpy.compat import sixu from test_print import in_foreign_locale @@ -109,66 +108,48 @@ def test_fromstring_missing(): class FileBased(TestCase): - def setUp(self): - self.o = 1 + np.finfo(np.longdouble).eps - self.f = NamedTemporaryFile(mode="wt") - def tearDown(self): - self.f.close() - del self.f + ldbl = 1 + np.finfo(np.longdouble).eps + tgt = np.array([ldbl]*5) + out = ''.join([repr(t) + '\n' for t in tgt]) def test_fromfile_bogus(self): - self.f.write("1. 2. 3. flop 4.\n") - self.f.flush() - F = open(self.f.name, "rt") - try: - assert_equal(np.fromfile(F, dtype=float, sep=" "), - np.array([1., 2., 3.])) - finally: - F.close() + with temppath() as path: + with open(path, 'wt') as f: + f.write("1. 2. 3. flop 4.\n") + res = np.fromfile(path, dtype=float, sep=" ") + assert_equal(res, np.array([1., 2., 3.])) @dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l") def test_fromfile(self): - for i in range(5): - self.f.write(repr(self.o) + "\n") - self.f.flush() - a = np.array([self.o]*5) - F = open(self.f.name, "rt") - b = np.fromfile(F, - dtype=np.longdouble, - sep="\n") - F.close() - F = open(self.f.name, "rt") - s = F.read() - F.close() - assert_equal(b, a, err_msg="decoded %s as %s" % (repr(s), repr(b))) + with temppath() as path: + with open(path, 'wt') as f: + f.write(self.out) + res = np.fromfile(path, dtype=np.longdouble, sep="\n") + assert_equal(res, self.tgt) @dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l") def test_genfromtxt(self): - for i in range(5): - self.f.write(repr(self.o) + "\n") - self.f.flush() - a = np.array([self.o]*5) - assert_equal(np.genfromtxt(self.f.name, dtype=np.longdouble), a) + with temppath() as path: + with open(path, 'wt') as f: + f.write(self.out) + res = np.genfromtxt(path, dtype=np.longdouble) + assert_equal(res, self.tgt) @dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l") def test_loadtxt(self): - for i in range(5): - self.f.write(repr(self.o) + "\n") - self.f.flush() - a = np.array([self.o]*5) - assert_equal(np.loadtxt(self.f.name, dtype=np.longdouble), a) + with temppath() as path: + with open(path, 'wt') as f: + f.write(self.out) + res = np.loadtxt(path, dtype=np.longdouble) + assert_equal(res, self.tgt) @dec.knownfailureif(string_to_longdouble_inaccurate, "Need strtold_l") def test_tofile_roundtrip(self): - a = np.array([self.o]*3) - a.tofile(self.f.name, sep=" ") - F = open(self.f.name, "rt") - try: - assert_equal(np.fromfile(F, dtype=np.longdouble, sep=" "), - a) - finally: - F.close() + with temppath() as path: + self.tgt.tofile(path, sep=" ") + res = np.fromfile(path, dtype=np.longdouble, sep=" ") + assert_equal(res, self.tgt) @in_foreign_locale From f9976c82d33431693b1a1af30549cdb1d7bee4ad Mon Sep 17 00:00:00 2001 From: Vincent Legoll Date: Sat, 26 Dec 2015 10:13:01 +0100 Subject: [PATCH 266/496] Fix carriage return inside commented python code This looks more conventionnal --- doc/source/reference/arrays.ndarray.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/arrays.ndarray.rst b/doc/source/reference/arrays.ndarray.rst index 14bac443dc1f..0f5fb92d750d 100644 --- a/doc/source/reference/arrays.ndarray.rst +++ b/doc/source/reference/arrays.ndarray.rst @@ -45,8 +45,8 @@ objects implementing the :class:`buffer` or :ref:`array The array can be indexed using Python container-like syntax: - >>> x[1,2] # i.e., the element of x in the *second* row, *third* - column, namely, 6. + >>> # The element of x in the *second* row, *third* column, namely, 6. + >>> x[1, 2] For example :ref:`slicing ` can produce views of the array: From 1301507a4a7679a2aaa80b82f4b99af9b84bfa85 Mon Sep 17 00:00:00 2001 From: anatoly techtonik Date: Sat, 26 Dec 2015 13:50:41 +0300 Subject: [PATCH 267/496] Update and rename README.txt to README.md Add Travis build status on the main GitHub page. Prettified markup --- README.txt => README.md | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) rename README.txt => README.md (52%) diff --git a/README.txt b/README.md similarity index 52% rename from README.txt rename to README.md index a20163a30af7..e63d2718e184 100644 --- a/README.txt +++ b/README.md @@ -1,22 +1,24 @@ +[![Travis](https://img.shields.io/travis/numpy/numpy.svg)](https://travis-ci.org/numpy/numpy) + NumPy is the fundamental package needed for scientific computing with Python. This package contains: - * a powerful N-dimensional array object - * sophisticated (broadcasting) functions - * tools for integrating C/C++ and Fortran code - * useful linear algebra, Fourier transform, and random number capabilities. + * a powerful N-dimensional array object + * sophisticated (broadcasting) functions + * tools for integrating C/C++ and Fortran code + * useful linear algebra, Fourier transform, and random number capabilities. It derives from the old Numeric code base and can be used as a replacement for Numeric. It also adds the features introduced by numarray and can be used to replace numarray. More information can be found at the website: -http://www.numpy.org +* http://www.numpy.org After installation, tests can be run with: -python -c 'import numpy; numpy.test()' + python -c 'import numpy; numpy.test()' The most current development version is always available from our git repository: -http://github.com/numpy/numpy +* http://github.com/numpy/numpy From 555787a5b6a0ec4e27ce05a2c96d97b2aa48cef7 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 24 Dec 2015 21:04:13 -0700 Subject: [PATCH 268/496] MAINT: Simplify some tests using temppath context manager. This replaces code of the pattern ``` fd, name = tempfile.mkstemp(...) os.close(fd) try: do stuff with name finally: os.remove(name) ``` with ``` with temppath() as name: do stuff with name ``` A few more complicated cases are also handled. The remains some particularly gnarly code the could probably be refactored to use temppath, but that is a more demanding project. --- numpy/distutils/tests/test_npy_pkg_config.py | 54 +++++------- numpy/f2py/tests/util.py | 32 +++----- numpy/lib/tests/test_io.py | 86 +++++++------------- numpy/ma/tests/test_mrecords.py | 16 ++-- 4 files changed, 70 insertions(+), 118 deletions(-) diff --git a/numpy/distutils/tests/test_npy_pkg_config.py b/numpy/distutils/tests/test_npy_pkg_config.py index 9a72842704c7..bdef47167b99 100644 --- a/numpy/distutils/tests/test_npy_pkg_config.py +++ b/numpy/distutils/tests/test_npy_pkg_config.py @@ -1,10 +1,9 @@ from __future__ import division, absolute_import, print_function import os -from tempfile import mkstemp from numpy.distutils.npy_pkg_config import read_config, parse_flags -from numpy.testing import TestCase, run_module_suite +from numpy.testing import TestCase, run_module_suite, temppath simple = """\ [meta] @@ -39,41 +38,30 @@ class TestLibraryInfo(TestCase): def test_simple(self): - fd, filename = mkstemp('foo.ini') - try: - pkg = os.path.splitext(filename)[0] - try: - os.write(fd, simple.encode('ascii')) - finally: - os.close(fd) - + with temppath('foo.ini') as path: + with open(path, 'w') as f: + f.write(simple) + pkg = os.path.splitext(path)[0] out = read_config(pkg) - self.assertTrue(out.cflags() == simple_d['cflags']) - self.assertTrue(out.libs() == simple_d['libflags']) - self.assertTrue(out.name == simple_d['name']) - self.assertTrue(out.version == simple_d['version']) - finally: - os.remove(filename) - def test_simple_variable(self): - fd, filename = mkstemp('foo.ini') - try: - pkg = os.path.splitext(filename)[0] - try: - os.write(fd, simple_variable.encode('ascii')) - finally: - os.close(fd) + self.assertTrue(out.cflags() == simple_d['cflags']) + self.assertTrue(out.libs() == simple_d['libflags']) + self.assertTrue(out.name == simple_d['name']) + self.assertTrue(out.version == simple_d['version']) + def test_simple_variable(self): + with temppath('foo.ini') as path: + with open(path, 'w') as f: + f.write(simple_variable) + pkg = os.path.splitext(path)[0] out = read_config(pkg) - self.assertTrue(out.cflags() == simple_variable_d['cflags']) - self.assertTrue(out.libs() == simple_variable_d['libflags']) - self.assertTrue(out.name == simple_variable_d['name']) - self.assertTrue(out.version == simple_variable_d['version']) - - out.vars['prefix'] = '/Users/david' - self.assertTrue(out.cflags() == '-I/Users/david/include') - finally: - os.remove(filename) + + self.assertTrue(out.cflags() == simple_variable_d['cflags']) + self.assertTrue(out.libs() == simple_variable_d['libflags']) + self.assertTrue(out.name == simple_variable_d['name']) + self.assertTrue(out.version == simple_variable_d['version']) + out.vars['prefix'] = '/Users/david' + self.assertTrue(out.cflags() == '-I/Users/david/include') class TestParseFlags(TestCase): def test_simple_cflags(self): diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index 8d06d96800ae..0c9e91568a66 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -19,7 +19,7 @@ from numpy.compat import asbytes, asstr import numpy.f2py -from numpy.testing import SkipTest +from numpy.testing import SkipTest, temppath try: from hashlib import md5 @@ -159,16 +159,11 @@ def build_code(source_code, options=[], skip=[], only=[], suffix=None, """ if suffix is None: suffix = '.f' - - fd, tmp_fn = tempfile.mkstemp(suffix=suffix) - os.write(fd, asbytes(source_code)) - os.close(fd) - - try: - return build_module([tmp_fn], options=options, skip=skip, only=only, + with temppath(suffix=suffix) as path: + with open(path, 'w') as f: + f.write(source_code) + return build_module([path], options=options, skip=skip, only=only, module_name=module_name) - finally: - os.unlink(tmp_fn) # # Check if compilers are available at all... @@ -209,22 +204,19 @@ def configuration(parent_name='',top_path=None): """ code = code % dict(syspath=repr(sys.path)) - fd, script = tempfile.mkstemp(suffix='.py') - os.write(fd, asbytes(code)) - os.close(fd) + with temppath(suffix='.py') as script: + with open(script, 'w') as f: + f.write(code) - try: cmd = [sys.executable, script, 'config'] p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, err = p.communicate() - m = re.search(asbytes(r'COMPILERS:(\d+),(\d+),(\d+)'), out) - if m: - _compiler_status = (bool(int(m.group(1))), bool(int(m.group(2))), - bool(int(m.group(3)))) - finally: - os.unlink(script) + m = re.search(asbytes(r'COMPILERS:(\d+),(\d+),(\d+)'), out) + if m: + _compiler_status = (bool(int(m.group(1))), bool(int(m.group(2))), + bool(int(m.group(3)))) # Finished return _compiler_status diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 45ee0a477889..32e0c32ded03 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -4,7 +4,7 @@ import gzip import os import threading -from tempfile import mkstemp, NamedTemporaryFile +from tempfile import NamedTemporaryFile import time import warnings import gc @@ -194,8 +194,7 @@ def roundtrip(self, *args, **kwargs): def test_big_arrays(self): L = (1 << 31) + 100000 a = np.empty(L, dtype=np.uint8) - with tempdir(prefix="numpy_test_big_arrays_") as tmpdir: - tmp = os.path.join(tmpdir, "file.npz") + with temppath(prefix="numpy_test_big_arrays_") as tmp: np.savez(tmp, a=a) del a npfile = np.load(tmp) @@ -234,16 +233,12 @@ def test_savez_filename_clashes(self): # and savez functions in multithreaded environment def writer(error_list): - fd, tmp = mkstemp(suffix='.npz') - os.close(fd) - try: + with temppath(suffix='.npz') as tmp: arr = np.random.randn(500, 500) try: np.savez(tmp, arr=arr) except OSError as err: error_list.append(err) - finally: - os.remove(tmp) errors = [] threads = [threading.Thread(target=writer, args=(errors,)) @@ -277,13 +272,8 @@ def test_closing_fid(self): # e.g. Debian sid of 2012 Jul 05 but was reported to # trigger the failure on Ubuntu 10.04: # http://projects.scipy.org/numpy/ticket/1517#comment:2 - fd, tmp = mkstemp(suffix='.npz') - os.close(fd) - - try: - fp = open(tmp, 'wb') - np.savez(fp, data='LOVELY LOAD') - fp.close() + with temppath(suffix='.npz') as tmp: + np.savez(tmp, data='LOVELY LOAD') # We need to check if the garbage collector can properly close # numpy npz file returned by np.load when their reference count # goes to zero. Python 3 running in debug mode raises a @@ -299,16 +289,14 @@ def test_closing_fid(self): except Exception as e: msg = "Failed to load data from a file: %s" % e raise AssertionError(msg) - finally: - os.remove(tmp) def test_closing_zipfile_after_load(self): - # Check that zipfile owns file and can close it. - # This needs to pass a file name to load for the - # test. - with tempdir(prefix="numpy_test_closing_zipfile_after_load_") as tmpdir: - fd, tmp = mkstemp(suffix='.npz', dir=tmpdir) - os.close(fd) + # Check that zipfile owns file and can close it. This needs to + # pass a file name to load for the test. On windows failure will + # cause a second error will be raised when the attempt to remove + # the open file is made. + prefix = 'numpy_test_closing_zipfile_after_load_' + with temppath(suffix='.npz', prefix=prefix) as tmp: np.savez(tmp, lab='place holder') data = np.load(tmp) fp = data.zip.fp @@ -416,15 +404,11 @@ def test_header_footer(self): asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n')) def test_file_roundtrip(self): - f, name = mkstemp() - os.close(f) - try: + with temppath() as name: a = np.array([(1, 2), (3, 4)]) np.savetxt(name, a) b = np.loadtxt(name) assert_array_equal(a, b) - finally: - os.unlink(name) def test_complex_arrays(self): ncols = 2 @@ -739,15 +723,11 @@ def test_from_complex(self): assert_equal(res, tgt) def test_universal_newline(self): - f, name = mkstemp() - os.write(f, b'1 21\r3 42\r') - os.close(f) - - try: + with temppath() as name: + with open(name, 'w') as f: + f.write('1 21\r3 42\r') data = np.loadtxt(name) - assert_array_equal(data, [[1, 21], [3, 42]]) - finally: - os.unlink(name) + assert_array_equal(data, [[1, 21], [3, 42]]) def test_empty_field_after_tab(self): c = TextIO() @@ -1760,8 +1740,9 @@ def test_max_rows(self): assert_equal(test, control) def test_gft_using_filename(self): - # Test that we can load data from a filename as well as a file object - wanted = np.arange(6).reshape((2, 3)) + # Test that we can load data from a filename as well as a file + # object + tgt = np.arange(6).reshape((2, 3)) if sys.version_info[0] >= 3: # python 3k is known to fail for '\r' linesep = ('\n', '\r\n') @@ -1770,15 +1751,11 @@ def test_gft_using_filename(self): for sep in linesep: data = '0 1 2' + sep + '3 4 5' - f, name = mkstemp() - # We can't use NamedTemporaryFile on windows, because we cannot - # reopen the file. - try: - os.write(f, asbytes(data)) - assert_array_equal(np.genfromtxt(name), wanted) - finally: - os.close(f) - os.unlink(name) + with temppath() as name: + with open(name, 'w') as f: + f.write(data) + res = np.genfromtxt(name) + assert_array_equal(res, tgt) def test_gft_using_generator(self): # gft doesn't work with unicode. @@ -1838,16 +1815,15 @@ def test_gzip_loadtxt(): g = gzip.GzipFile(fileobj=s, mode='w') g.write(b'1 2 3\n') g.close() + s.seek(0) + with temppath(suffix='.gz') as name: + with open(name, 'wb') as f: + f.write(s.read()) + res = np.loadtxt(name) + s.close() - f, name = mkstemp(suffix='.gz') - try: - os.write(f, s.read()) - s.close() - assert_array_equal(np.loadtxt(name), [1, 2, 3]) - finally: - os.close(f) - os.unlink(name) + assert_array_equal(res, [1, 2, 3]) def test_gzip_loadtxt_from_string(): diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py index 84b68ba0f3e6..574c652710cc 100644 --- a/numpy/ma/tests/test_mrecords.py +++ b/numpy/ma/tests/test_mrecords.py @@ -15,7 +15,7 @@ from numpy import recarray from numpy.compat import asbytes, asbytes_nested from numpy.ma import masked, nomask -from numpy.testing import TestCase, run_module_suite +from numpy.testing import TestCase, run_module_suite, temppath from numpy.core.records import ( fromrecords as recfromrecords, fromarrays as recfromarrays ) @@ -476,7 +476,7 @@ def test_fromrecords_wmask(self): def test_fromtextfile(self): # Tests reading from a text file. - fcontent = asbytes( + fcontent = ( """# 'One (S)','Two (I)','Three (F)','Four (M)','Five (-)','Six (C)' 'strings',1,1.0,'mixed column',,1 @@ -484,14 +484,10 @@ def test_fromtextfile(self): 'strings',3,3.0E5,3,,1 'strings',4,-1e-10,,,1 """) - import os - import tempfile - (tmp_fd, tmp_fl) = tempfile.mkstemp() - os.write(tmp_fd, fcontent) - os.close(tmp_fd) - mrectxt = fromtextfile(tmp_fl, delimitor=',', varnames='ABCDEFG') - os.remove(tmp_fl) - + with temppath() as path: + with open(path, 'w') as f: + f.write(fcontent) + mrectxt = fromtextfile(path, delimitor=',', varnames='ABCDEFG') self.assertTrue(isinstance(mrectxt, MaskedRecords)) assert_equal(mrectxt.F, [1, 1, 1, 1]) assert_equal(mrectxt.E._mask, [1, 1, 1, 1]) From e89d9bb2e16c1746ce234b81aff4731277b31ddd Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 26 Dec 2015 15:58:07 -0700 Subject: [PATCH 269/496] BUG: ignore exceptions in numpy/tests/test_scripts.py/test_f2p The test was checking whether the f2py script was installed as either of two names, but was only catching OSError, so the second check was skipped if the first failed for another reason. The caused the runtests.py script to fail it does not install the script as f2py but rather with the python version appended. --- numpy/tests/test_scripts.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py index 74efd2650381..94587e80795d 100644 --- a/numpy/tests/test_scripts.py +++ b/numpy/tests/test_scripts.py @@ -14,7 +14,7 @@ from numpy.testing.decorators import skipif from numpy.testing import assert_ -skipif_inplace = skipif(isfile(pathjoin(dirname(np.__file__), '..', 'setup.py'))) +is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py')) def run_command(cmd, check_code=True): """ Run command sequence `cmd` returning exit code, stdout, stderr @@ -58,7 +58,7 @@ def run_command(cmd, check_code=True): return proc.returncode, stdout, stderr -@skipif_inplace +@skipif(is_inplace) def test_f2py(): # test that we can run f2py script if sys.platform == 'win32': @@ -77,6 +77,7 @@ def test_f2py(): assert_equal(stdout.strip(), asbytes('2')) success = True break - except OSError: + except: pass - assert_(success, "Warning: neither %s nor %s found in path" % f2py_cmds) + msg = "Warning: neither %s nor %s found in path" % f2py_cmds + assert_(success, msg) From 7cebe883b612efe2a3bf51b83917ea6cdb8f679f Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Mon, 28 Dec 2015 12:46:45 -0500 Subject: [PATCH 270/496] FIX: Fix MKL for Linux --- numpy/distutils/fcompiler/intel.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py index 2dd08e744a07..ae8bfbed74b9 100644 --- a/numpy/distutils/fcompiler/intel.py +++ b/numpy/distutils/fcompiler/intel.py @@ -56,7 +56,7 @@ def get_flags(self): return ['-fPIC'] def get_flags_opt(self): - return ['-xhost -openmp -fp-model strict'] + return ['-xhost -openmp -fp-model strict -O1'] def get_flags_arch(self): return [] @@ -120,7 +120,7 @@ def get_flags(self): return ['-fPIC'] def get_flags_opt(self): - return ['-openmp -fp-model strict'] + return ['-openmp -fp-model strict -O1'] def get_flags_arch(self): return ['-xSSE4.2'] From 66bd83dd9eb11d6127d222a5cfa79b4ac640fadf Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Mon, 28 Dec 2015 13:38:30 -0500 Subject: [PATCH 271/496] DOC: Comment all O1s [ci skip] --- numpy/distutils/fcompiler/intel.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py index ae8bfbed74b9..c4f15a073a2b 100644 --- a/numpy/distutils/fcompiler/intel.py +++ b/numpy/distutils/fcompiler/intel.py @@ -55,7 +55,7 @@ def get_flags_free(self): def get_flags(self): return ['-fPIC'] - def get_flags_opt(self): + def get_flags_opt(self): # Scipy test failures with -O2 return ['-xhost -openmp -fp-model strict -O1'] def get_flags_arch(self): @@ -119,7 +119,7 @@ class IntelEM64TFCompiler(IntelFCompiler): def get_flags(self): return ['-fPIC'] - def get_flags_opt(self): + def get_flags_opt(self): # Scipy test failures with -O2 return ['-openmp -fp-model strict -O1'] def get_flags_arch(self): From d7a68afd3dddb65bd8edad0d484e1333daafa631 Mon Sep 17 00:00:00 2001 From: gfyoung Date: Wed, 23 Dec 2015 21:45:38 -0800 Subject: [PATCH 272/496] ENH: Allow random_integers to include the maximum np.iinfo('l').max Redistributes the code between the randint and random_integers methods so that we can generate integers up to and including np.iinfo('l').max with random_integers, which previously would have caused an OverflowError. --- numpy/random/mtrand/mtrand.pyx | 66 ++++++++++++++++--------------- numpy/random/tests/test_random.py | 11 ++++++ 2 files changed, 46 insertions(+), 31 deletions(-) diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index e12c7669d13c..5120857d04b2 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -936,37 +936,14 @@ cdef class RandomState: [3, 2, 2, 0]]) """ - cdef long lo, hi, rv - cdef unsigned long diff - cdef long *array_data - cdef ndarray array "arrayObject" - cdef npy_intp length - cdef npy_intp i + if high is not None and low >= high: + raise ValueError("low >= high") if high is None: - lo = 0 - hi = low - else: - lo = low - hi = high - - if lo >= hi : - raise ValueError("low >= high") + high = low + low = 0 - diff = hi - lo - 1UL - if size is None: - with self.lock: - rv = lo + rk_interval(diff, self. internal_state) - return rv - else: - array = np.empty(size, int) - length = PyArray_SIZE(array) - array_data = PyArray_DATA(array) - with self.lock, nogil: - for i from 0 <= i < length: - rv = lo + rk_interval(diff, self. internal_state) - array_data[i] = rv - return array + return self.random_integers(low, high - 1, size) def bytes(self, npy_intp length): """ @@ -1449,10 +1426,37 @@ cdef class RandomState: >>> plt.show() """ + if high is not None and low > high: + raise ValueError("low > high") + + cdef long lo, hi, rv + cdef unsigned long diff + cdef long *array_data + cdef ndarray array "arrayObject" + cdef npy_intp length + cdef npy_intp i + if high is None: - high = low - low = 1 - return self.randint(low, high+1, size) + lo = 1 + hi = low + else: + lo = low + hi = high + + diff = hi - lo + if size is None: + with self.lock: + rv = lo + rk_interval(diff, self. internal_state) + return rv + else: + array = np.empty(size, int) + length = PyArray_SIZE(array) + array_data = PyArray_DATA(array) + with self.lock, nogil: + for i from 0 <= i < length: + rv = lo + rk_interval(diff, self. internal_state) + array_data[i] = rv + return array # Complicated, continuous distributions: def standard_normal(self, size=None): diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index 193844030efd..0ce341eadb6b 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -167,6 +167,17 @@ def test_random_integers(self): [-48, -66]]) np.testing.assert_array_equal(actual, desired) + def test_random_integers_max_int(self): + # Tests whether random_integers can generate the + # maximum allowed Python int that can be converted + # into a C long. Previous implementations of this + # method have thrown an OverflowError when attemping + # to generate this integer. + actual = np.random.random_integers(np.iinfo('l').max, + np.iinfo('l').max) + desired = np.iinfo('l').max + np.testing.assert_equal(actual, desired) + def test_random_sample(self): np.random.seed(self.seed) actual = np.random.random_sample((3, 2)) From 4f7a60538eb031092ab84816afa56b51ee84f8bd Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 23 Dec 2015 13:53:49 -0700 Subject: [PATCH 273/496] TST: Only use 32 bit Python 2.7 to test numpy on appveyor. This avoids a test error on appveyor that might be fixed if Visual C++ Compiler for Python 2.7 is used. Numpy itself builds and runs on appveyor with both 32 and 64 bit python 2.7, so this is only a test issue. Another option might be to disable the test, but it may be useful to some on other platforms. Closes #6882. --- appveyor.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/appveyor.yml b/appveyor.yml index 68c3f279e72c..59389462d2b6 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -6,7 +6,7 @@ os: Visual Studio 2015 environment: matrix: - PY_MAJOR_VER: 2 - PYTHON_ARCH: "x86_64" + PYTHON_ARCH: "x86" - PY_MAJOR_VER: 3 PYTHON_ARCH: "x86_64" - PY_MAJOR_VER: 3 From 237ab4398ac880be30fc262e7bf6163e9baff921 Mon Sep 17 00:00:00 2001 From: "Nathaniel J. Smith" Date: Tue, 29 Dec 2015 17:52:41 -0800 Subject: [PATCH 274/496] [FIX] fix NoseTester's raise_warning default Our test-runner's raise_warning mode traditionally has varied depending on whether we have a development or release version of numpy: for development versions we raise on warnings, and for release versions we don't. This is all very sensible... *if* you're running numpy's test suite. But our test-runner is also used by other packages like scipy, and it doesn't make sense for scipy's raise_warning mode to vary depending on whether *numpy* is a development or release version. (It should vary depending on whether the scipy-under-test is a development or release version.) So this commit moves the numpy-version-dependent raise_warning logic out of the generic NoseTester class and into numpy-specific code. (See scipy/scipy#5609 for more discussion.) --- numpy/__init__.py | 8 ++++++-- numpy/testing/nosetester.py | 25 +++++++++++-------------- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/numpy/__init__.py b/numpy/__init__.py index d4ef54d8390e..5fda535f2a46 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -185,8 +185,12 @@ def pkgload(*packages, **options): pkgload.__doc__ = PackageLoader.__call__.__doc__ from .testing import Tester - test = Tester().test - bench = Tester().bench + if ".dev0" in __version__: + test = Tester(raise_warnings="develop").test + bench = Tester(raise_warnings="develop").bench + else: + test = Tester(raise_warnings="release").test + bench = Tester(raise_warnings="release").bench from . import core from .core import * diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py index 551e630ec2cd..e65416224d26 100644 --- a/numpy/testing/nosetester.py +++ b/numpy/testing/nosetester.py @@ -158,15 +158,7 @@ class NoseTester(object): - "develop" : equals ``(DeprecationWarning, RuntimeWarning)`` - "release" : equals ``()``, don't raise on any warnings. - See Notes for more details. - - Notes - ----- - The default for `raise_warnings` is - ``(DeprecationWarning, RuntimeWarning)`` for development versions of NumPy, - and ``()`` for released versions. The purpose of this switching behavior - is to catch as many warnings as possible during development, but not give - problems for packaging of released versions. + Default is "release". """ # Stuff to exclude from tests. These are from numpy.distutils @@ -176,11 +168,16 @@ class NoseTester(object): 'pyrex_ext', 'swig_ext'] - def __init__(self, package=None, raise_warnings=None): - if raise_warnings is None and ( - not hasattr(np, '__version__') or '.dev0' in np.__version__): - raise_warnings = "develop" - elif raise_warnings is None: + def __init__(self, package=None, raise_warnings="release"): + # Back-compat: 'None' used to mean either "release" or "develop" + # depending on whether this was a release or develop version of + # numpy. Those semantics were fine for testing numpy, but not so + # helpful for downstream projects like scipy that use + # numpy.testing. (They want to set this based on whether *they* are a + # release or develop version, not whether numpy is.) So we continue to + # accept 'None' for back-compat, but it's now just an alias for the + # default "release". + if raise_warnings is None: raise_warnings = "release" package_name = None From eebb304a0c91c9f52bc883a352b2520e3ca7c88e Mon Sep 17 00:00:00 2001 From: gfyoung Date: Tue, 29 Dec 2015 23:51:41 -0800 Subject: [PATCH 275/496] MAINT: Cleaned up unused variables and spelling mistakes in np.random modules --- numpy/random/mtrand/mtrand.pyx | 5 ----- numpy/random/tests/test_random.py | 2 +- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index 5120857d04b2..655b708b32fc 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -232,7 +232,6 @@ cdef object cont2_array(rk_state *state, rk_cont2 func, object size, cdef double *oa_data cdef double *ob_data cdef ndarray array "arrayObject" - cdef npy_intp length cdef npy_intp i cdef broadcast multi @@ -288,7 +287,6 @@ cdef object cont3_array(rk_state *state, rk_cont3 func, object size, cdef double *ob_data cdef double *oc_data cdef ndarray array "arrayObject" - cdef npy_intp length cdef npy_intp i cdef broadcast multi @@ -358,7 +356,6 @@ cdef object discnp_array(rk_state *state, rk_discnp func, object size, ndarray on, ndarray op, object lock): cdef long *array_data cdef ndarray array "arrayObject" - cdef npy_intp length cdef npy_intp i cdef double *op_data cdef long *on_data @@ -412,7 +409,6 @@ cdef object discdd_array(rk_state *state, rk_discdd func, object size, ndarray on, ndarray op, object lock): cdef long *array_data cdef ndarray array "arrayObject" - cdef npy_intp length cdef npy_intp i cdef double *op_data cdef double *on_data @@ -469,7 +465,6 @@ cdef object discnmN_array(rk_state *state, rk_discnmN func, object size, cdef long *om_data cdef long *oN_data cdef ndarray array "arrayObject" - cdef npy_intp length cdef npy_intp i cdef broadcast multi diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index 0ce341eadb6b..c3aa43f0eafa 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -171,7 +171,7 @@ def test_random_integers_max_int(self): # Tests whether random_integers can generate the # maximum allowed Python int that can be converted # into a C long. Previous implementations of this - # method have thrown an OverflowError when attemping + # method have thrown an OverflowError when attempting # to generate this integer. actual = np.random.random_integers(np.iinfo('l').max, np.iinfo('l').max) From a61ddd3812cc95f9c9e6eeac7f8bcfb92130f978 Mon Sep 17 00:00:00 2001 From: "Nathaniel J. Smith" Date: Wed, 30 Dec 2015 03:01:03 -0800 Subject: [PATCH 276/496] [TST] Refactor new raise_warnings logic for subpackage test suites --- numpy/__init__.py | 10 ++++------ numpy/core/__init__.py | 6 +++--- numpy/distutils/__init__.py | 6 +++--- numpy/f2py/__init__.py | 6 +++--- numpy/fft/__init__.py | 6 +++--- numpy/lib/__init__.py | 6 +++--- numpy/linalg/__init__.py | 6 +++--- numpy/ma/__init__.py | 6 +++--- numpy/matrixlib/__init__.py | 6 +++--- numpy/polynomial/__init__.py | 6 +++--- numpy/random/__init__.py | 6 +++--- numpy/testing/__init__.py | 2 +- numpy/testing/nosetester.py | 17 +++++++++++++++-- 13 files changed, 50 insertions(+), 39 deletions(-) diff --git a/numpy/__init__.py b/numpy/__init__.py index 5fda535f2a46..0fcd5097d2d1 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -184,13 +184,11 @@ def pkgload(*packages, **options): pkgload.__doc__ = PackageLoader.__call__.__doc__ + # We don't actually use this ourselves anymore, but I'm not 100% sure that + # no-one else in the world is using it (though I hope not) from .testing import Tester - if ".dev0" in __version__: - test = Tester(raise_warnings="develop").test - bench = Tester(raise_warnings="develop").bench - else: - test = Tester(raise_warnings="release").test - bench = Tester(raise_warnings="release").bench + test = testing.nosetester._numpy_tester().test + bench = testing.nosetester._numpy_tester().bench from . import core from .core import * diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py index 16dcbe0b15ec..e8719ca75fda 100644 --- a/numpy/core/__init__.py +++ b/numpy/core/__init__.py @@ -55,9 +55,9 @@ __all__ += shape_base.__all__ -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench +from numpy.testing.nosetester import _numpy_tester +test = _numpy_tester().test +bench = _numpy_tester().bench # Make it possible so that ufuncs can be pickled # Here are the loading and unloading functions diff --git a/numpy/distutils/__init__.py b/numpy/distutils/__init__.py index 9297185ef66e..766439d92546 100644 --- a/numpy/distutils/__init__.py +++ b/numpy/distutils/__init__.py @@ -18,6 +18,6 @@ _INSTALLED = False if _INSTALLED: - from numpy.testing import Tester - test = Tester().test - bench = Tester().bench + from numpy.testing.nosetester import _numpy_tester + test = _numpy_tester().test + bench = _numpy_tester().bench diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py index ef92114edd63..50566ccc237a 100644 --- a/numpy/f2py/__init__.py +++ b/numpy/f2py/__init__.py @@ -62,6 +62,6 @@ def compile(source, f.close() return status -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench +from numpy.testing.nosetester import _numpy_tester +test = _numpy_tester().test +bench = _numpy_tester().bench diff --git a/numpy/fft/__init__.py b/numpy/fft/__init__.py index 96809a94f847..a1f9e90e0ae0 100644 --- a/numpy/fft/__init__.py +++ b/numpy/fft/__init__.py @@ -6,6 +6,6 @@ from .fftpack import * from .helper import * -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench +from numpy.testing.nosetester import _numpy_tester +test = _numpy_tester().test +bench = _numpy_tester().bench diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py index 0606dfbbd4a3..1d65db55e18e 100644 --- a/numpy/lib/__init__.py +++ b/numpy/lib/__init__.py @@ -41,6 +41,6 @@ __all__ += financial.__all__ __all__ += nanfunctions.__all__ -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench +from numpy.testing.nosetester import _numpy_tester +test = _numpy_tester().test +bench = _numpy_tester().bench diff --git a/numpy/linalg/__init__.py b/numpy/linalg/__init__.py index bc2a1ff6ce9f..69445f541db7 100644 --- a/numpy/linalg/__init__.py +++ b/numpy/linalg/__init__.py @@ -50,6 +50,6 @@ from .linalg import * -from numpy.testing import Tester -test = Tester().test -bench = Tester().test +from numpy.testing.nosetester import _numpy_tester +test = _numpy_tester().test +bench = _numpy_tester().bench diff --git a/numpy/ma/__init__.py b/numpy/ma/__init__.py index 05b641dff69e..af3468b01c58 100644 --- a/numpy/ma/__init__.py +++ b/numpy/ma/__init__.py @@ -51,6 +51,6 @@ __all__ += core.__all__ __all__ += extras.__all__ -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench +from numpy.testing.nosetester import _numpy_tester +test = _numpy_tester().test +bench = _numpy_tester().bench diff --git a/numpy/matrixlib/__init__.py b/numpy/matrixlib/__init__.py index d20696154ab2..b2b76837a854 100644 --- a/numpy/matrixlib/__init__.py +++ b/numpy/matrixlib/__init__.py @@ -7,6 +7,6 @@ __all__ = defmatrix.__all__ -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench +from numpy.testing.nosetester import _numpy_tester +test = _numpy_tester().test +bench = _numpy_tester().bench diff --git a/numpy/polynomial/__init__.py b/numpy/polynomial/__init__.py index 1200d1c8dd5a..82c350e9b2a1 100644 --- a/numpy/polynomial/__init__.py +++ b/numpy/polynomial/__init__.py @@ -22,6 +22,6 @@ from .hermite_e import HermiteE from .laguerre import Laguerre -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench +from numpy.testing.nosetester import _numpy_tester +test = _numpy_tester().test +bench = _numpy_tester().bench diff --git a/numpy/random/__init__.py b/numpy/random/__init__.py index 388267c97532..6c7d3140fec3 100644 --- a/numpy/random/__init__.py +++ b/numpy/random/__init__.py @@ -117,6 +117,6 @@ def __RandomState_ctor(): """ return RandomState(seed=0) -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench +from numpy.testing.nosetester import _numpy_tester +test = _numpy_tester().test +bench = _numpy_tester().bench diff --git a/numpy/testing/__init__.py b/numpy/testing/__init__.py index dcc02ad571b1..625fdecdc95a 100644 --- a/numpy/testing/__init__.py +++ b/numpy/testing/__init__.py @@ -12,4 +12,4 @@ from . import decorators as dec from .nosetester import run_module_suite, NoseTester as Tester from .utils import * -test = Tester().test +test = nosetester._numpy_tester().test diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py index e65416224d26..6cf7defab1d4 100644 --- a/numpy/testing/nosetester.py +++ b/numpy/testing/nosetester.py @@ -159,6 +159,12 @@ class NoseTester(object): - "release" : equals ``()``, don't raise on any warnings. Default is "release". + depth : int, optional + If `package` is None, then this can be used to initialize from the + module of the caller of (the caller of (...)) the code that + initializes `NoseTester`. Default of 0 means the module of the + immediate caller; higher values are useful for utility routines that + want to initialize `NoseTester` objects on behalf of other code. """ # Stuff to exclude from tests. These are from numpy.distutils @@ -168,7 +174,7 @@ class NoseTester(object): 'pyrex_ext', 'swig_ext'] - def __init__(self, package=None, raise_warnings="release"): + def __init__(self, package=None, raise_warnings="release", depth=0): # Back-compat: 'None' used to mean either "release" or "develop" # depending on whether this was a release or develop version of # numpy. Those semantics were fine for testing numpy, but not so @@ -182,7 +188,7 @@ def __init__(self, package=None, raise_warnings="release"): package_name = None if package is None: - f = sys._getframe(1) + f = sys._getframe(1 + depth) package_path = f.f_locals.get('__file__', None) if package_path is None: raise AssertionError @@ -511,3 +517,10 @@ def bench(self, label='fast', verbose=1, extra_argv=None): add_plugins = [Unplugger('doctest')] return nose.run(argv=argv, addplugins=add_plugins) + +def _numpy_tester(): + if hasattr(np, "__version__") and ".dev0" in np.__version__: + mode = "develop" + else: + mode = "release" + return NoseTester(raise_warnings=mode, depth=1) From fdadc1a6d7d2dd9e1dbd4261116d0f40b0911679 Mon Sep 17 00:00:00 2001 From: gfyoung Date: Wed, 30 Dec 2015 20:47:35 -0800 Subject: [PATCH 277/496] MAINT: Cleanup and spelling fixups in ma.core tests --- numpy/ma/tests/test_core.py | 26 ++------------------------ 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index e0d9f072cad2..020bf1e62783 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -201,7 +201,7 @@ def test_creation_maskcreation(self): assert_(not np.may_share_memory(x.mask, y.mask)) def test_creation_with_list_of_maskedarrays(self): - # Tests creaating a masked array from alist of masked arrays. + # Tests creating a masked array from a list of masked arrays. x = array(np.arange(5), mask=[1, 0, 0, 0, 0]) data = array((x, x[::-1])) assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) @@ -237,11 +237,6 @@ def test_maskedelement(self): self.assertTrue(str(masked) == '--') self.assertTrue(x[1] is masked) assert_equal(filled(x[1], 0), 0) - # don't know why these should raise an exception... - #self.assertRaises(Exception, lambda x,y: x+y, masked, masked) - #self.assertRaises(Exception, lambda x,y: x+y, masked, 2) - #self.assertRaises(Exception, lambda x,y: x+y, masked, xx) - #self.assertRaises(Exception, lambda x,y: x+y, xx, masked) def test_set_element_as_object(self): # Tests setting elements with object @@ -360,10 +355,8 @@ def test_copy(self): x1 = np.arange(5) y1 = array(x1, mask=m) - #self.assertTrue( y1._data is x1) assert_equal(y1._data.__array_interface__, x1.__array_interface__) self.assertTrue(allequal(x1, y1.data)) - #self.assertTrue( y1.mask is m) assert_equal(y1._mask.__array_interface__, m.__array_interface__) y1a = array(y1) @@ -373,12 +366,10 @@ def test_copy(self): y2 = array(x1, mask=m) self.assertTrue(y2._data.__array_interface__ == x1.__array_interface__) - #self.assertTrue( y2.mask is m) self.assertTrue(y2._mask.__array_interface__ == m.__array_interface__) self.assertTrue(y2[2] is masked) y2[2] = 9 self.assertTrue(y2[2] is not masked) - #self.assertTrue( y2.mask is not m) self.assertTrue(y2._mask.__array_interface__ != m.__array_interface__) self.assertTrue(allequal(y2.mask, 0)) @@ -1364,7 +1355,6 @@ def test_hardmask(self): xs[[1, 4]] = [10, 40] assert_equal(xh._data, [0, 10, 2, 3, 4]) assert_equal(xs._data, [0, 10, 2, 3, 40]) - #assert_equal(xh.mask.ctypes._data, m.ctypes._data) assert_equal(xs.mask, [0, 0, 0, 1, 0]) self.assertTrue(xh._hardmask) self.assertTrue(not xs._hardmask) @@ -1372,7 +1362,6 @@ def test_hardmask(self): xs[1:4] = [10, 20, 30] assert_equal(xh._data, [0, 10, 20, 3, 4]) assert_equal(xs._data, [0, 10, 20, 30, 40]) - #assert_equal(xh.mask.ctypes._data, m.ctypes._data) assert_equal(xs.mask, nomask) xh[0] = masked xs[0] = masked @@ -1416,7 +1405,6 @@ def test_hardmask_again(self): m = make_mask(n) xh = array(d, mask=m, hard_mask=True) xh[4:5] = 999 - #assert_equal(xh.mask.ctypes._data, m.ctypes._data) xh[0:1] = 999 assert_equal(xh._data, [999, 1, 2, 3, 4]) @@ -1835,9 +1823,7 @@ def test_testUfuncRegression(self): 'arccosh', 'arctanh', 'absolute', 'fabs', 'negative', - # 'nonzero', 'around', 'floor', 'ceil', - # 'sometrue', 'alltrue', 'logical_not', 'add', 'subtract', 'multiply', 'divide', 'true_divide', 'floor_divide', @@ -2060,15 +2046,12 @@ def test_inplace_division_misc(self): assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) assert_equal(z._data, [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) - #assert_equal(z._data, [0.2,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.]) xm = xm.copy() xm /= ym assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) assert_equal(z._data, [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) - #assert_equal(xm._data, - # [1/5.,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.]) def test_datafriendly_add(self): # Test keeping data w/ (inplace) addition @@ -2497,7 +2480,7 @@ def test_allclose(self): self.assertTrue(not allclose(a, b)) b[0] = np.inf self.assertTrue(allclose(a, b)) - # Test all close w/ masked + # Test allclose w/ masked a = masked_array(a) a[-1] = masked self.assertTrue(allclose(a, b, masked_equal=True)) @@ -2700,7 +2683,6 @@ def test_put(self): self.assertTrue(x[3] is masked) self.assertTrue(x[4] is masked) x[[1, 4]] = [10, 40] - #self.assertTrue(x.mask is not m) self.assertTrue(x[3] is masked) self.assertTrue(x[4] is not masked) assert_equal(x, [0, 10, 2, -1, 40]) @@ -3875,10 +3857,6 @@ def test_mask_or(self): # Using False as input test = mask_or(mask, False) assert_equal(test, mask) - # Using True as input. Won't work, but keep it for the kicks - # test = mask_or(mask, True) - # control = np.array([(1, 1), (1, 1), (1, 1), (1, 1)], dtype=mtype) - # assert_equal(test, control) # Using another array w / the same dtype other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype) test = mask_or(mask, other) From a547d0e40c2b1b14b4448da51018bfccd4d55edb Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 31 Dec 2015 10:06:52 +0100 Subject: [PATCH 278/496] DOC: fix broken link in user guide. Closes gh-6906. [ci skip] --- doc/source/user/building.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/user/building.rst b/doc/source/user/building.rst index c5f8fea1fa40..8acb2fa3b319 100644 --- a/doc/source/user/building.rst +++ b/doc/source/user/building.rst @@ -60,8 +60,8 @@ The NumPy build system uses ``distutils`` and ``numpy.distutils``. ``setuptools`` is only used when building via ``pip`` or with ``python setupegg.py``. Using ``virtualenv`` should work as expected. -*Note: for build instructions to do development work on NumPy itself, see -:ref:`development-environment`*. +*Note: for build instructions to do development work on NumPy itself, see* +:ref:`development-environment`. .. _parallel-builds: From a496e11c52d4dc595cfa3110fe2e0debff566ebb Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 31 Dec 2015 13:28:06 +0100 Subject: [PATCH 279/496] DOC: add note not to run tests from repo root in devguide. Closes gh-6907. [ci skip] --- doc/source/dev/development_environment.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index b09728e03bc4..0fb5a666d976 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -137,6 +137,9 @@ run the test suite with Python 3.4, use:: For more extensive info on running and writing tests, see https://github.com/numpy/numpy/blob/master/doc/TESTS.rst.txt . +*Note: do not run the tests from the root directory of your numpy git repo, +that will result in strange test errors.* + Rebuilding & cleaning the workspace ----------------------------------- From f68cadd7b1c4f5c4d4d94acce63f86d279925a7a Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 31 Dec 2015 13:57:13 +0100 Subject: [PATCH 280/496] DOC: update min nose version in import error message, and add note to README Addresses comment in gh-4074. --- README.md | 2 +- numpy/testing/nosetester.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index e63d2718e184..6031279b044b 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ More information can be found at the website: * http://www.numpy.org -After installation, tests can be run with: +After installation, tests can be run (if ``nose`` is installed) with: python -c 'import numpy; numpy.test()' diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py index 6cf7defab1d4..42113676a6d0 100644 --- a/numpy/testing/nosetester.py +++ b/numpy/testing/nosetester.py @@ -57,7 +57,7 @@ def import_nose(): """ Import nose only when needed. """ fine_nose = True - minimum_nose_version = (0, 10, 0) + minimum_nose_version = (1, 0, 0) try: import nose except ImportError: From 6a871df50947c4ebda79a966fba09b5336e1e061 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 31 Dec 2015 19:02:03 -0700 Subject: [PATCH 281/496] BUG: Fix test_f2py so it runs correctly in runtests.py. The loop checking for command line versions can terminate early as the errors are not always of OSError type. In particular, runtests.py may only store the command with the python version and the check for that is not executed, leading to a test failure. --- numpy/tests/test_scripts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py index 74efd2650381..434f17a4a2c5 100644 --- a/numpy/tests/test_scripts.py +++ b/numpy/tests/test_scripts.py @@ -77,6 +77,6 @@ def test_f2py(): assert_equal(stdout.strip(), asbytes('2')) success = True break - except OSError: + except: pass assert_(success, "Warning: neither %s nor %s found in path" % f2py_cmds) From 46f19615d4726244bc13d36ad5be31539f3a13e9 Mon Sep 17 00:00:00 2001 From: Tapasweni Pathak Date: Fri, 1 Jan 2016 20:09:37 +0530 Subject: [PATCH 282/496] Updated copyright to 2016 --- LICENSE.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE.txt b/LICENSE.txt index b4139af86816..9014534ab434 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2005-2015, NumPy Developers. +Copyright (c) 2005-2016, NumPy Developers. All rights reserved. Redistribution and use in source and binary forms, with or without From 6a61be6b67a747d3228ffc8882a03c86a378ea10 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 30 Dec 2015 10:19:31 -0700 Subject: [PATCH 283/496] ENH: Add dtype argument to random.randint. Random ndarrays of the following types can now be generated: * np.bool, * np.int8, np.uint8, * np.int16, np.uint16, * np.int32, np.uint32, * np.int64, np.uint64, * np.int_ (long), np.intp The specification is by precision rather than by C type. Hence, on some platforms np.int64 may be a `long` instead of `long long` even if the specified dtype is `long long` because the two may have the same precision. The resulting type depends on which c type numpy uses for the given precision. The byteorder specification is also ignored, the generated arrays are always in native byte order. The dtype of the result could be made more explicit if desired without changing the user visible results. --- numpy/core/include/numpy/npy_common.h | 3 + numpy/random/mtrand/mt_compat.h | 68 +++++ numpy/random/mtrand/mtrand.pyx | 390 +++++++++++++++++++++++--- numpy/random/mtrand/numpy.pxd | 16 ++ numpy/random/mtrand/randomkit.c | 224 ++++++++++++++- numpy/random/mtrand/randomkit.h | 41 ++- 6 files changed, 697 insertions(+), 45 deletions(-) create mode 100644 numpy/random/mtrand/mt_compat.h diff --git a/numpy/core/include/numpy/npy_common.h b/numpy/core/include/numpy/npy_common.h index 47ef94c9283c..baf5549d970f 100644 --- a/numpy/core/include/numpy/npy_common.h +++ b/numpy/core/include/numpy/npy_common.h @@ -7,6 +7,9 @@ #include #endif +/* need Python.h for npy_intp, npy_uintp */ +#include + /* * gcc does not unroll even with -O3 * use with care, unrolling on modern cpus rarely speeds things up diff --git a/numpy/random/mtrand/mt_compat.h b/numpy/random/mtrand/mt_compat.h new file mode 100644 index 000000000000..ab56a553c5cf --- /dev/null +++ b/numpy/random/mtrand/mt_compat.h @@ -0,0 +1,68 @@ +/* + * This is a convenience header file providing compatibility utilities + * for supporting Python 2 and Python 3 in the same code base. + * + * It can be removed when Python 2.6 is dropped as PyCapsule is available + * in both Python 3.1+ and Python 2.7. + */ + +#ifndef _MT_COMPAT_H_ +#define _MT_COMPAT_H_ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + + +/* + * PyCObject functions adapted to PyCapsules. + * + * The main job here is to get rid of the improved error handling + * of PyCapsules. It's a shame... + */ +#if PY_VERSION_HEX >= 0x03000000 + +static NPY_INLINE PyObject * +NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)) +{ + PyObject *ret = PyCapsule_New(ptr, NULL, dtor); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +static NPY_INLINE void * +NpyCapsule_AsVoidPtr(PyObject *obj) +{ + void *ret = PyCapsule_GetPointer(obj, NULL); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +#else + +static NPY_INLINE PyObject * +NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *)) +{ + return PyCObject_FromVoidPtr(ptr, dtor); +} + +static NPY_INLINE void * +NpyCapsule_AsVoidPtr(PyObject *ptr) +{ + return PyCObject_AsVoidPtr(ptr); +} + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _COMPAT_H_ */ diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index 5120857d04b2..b83b2a588c64 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -67,6 +67,17 @@ cdef extern from "randomkit.h": rk_error rk_altfill(void *buffer, size_t size, int strong, rk_state *state) nogil double rk_gauss(rk_state *state) nogil + void rk_random_uint64(npy_uint64 off, npy_uint64 rng, npy_intp cnt, + npy_uint64 *out, rk_state *state) nogil + void rk_random_uint32(npy_uint32 off, npy_uint32 rng, npy_intp cnt, + npy_uint32 *out, rk_state *state) nogil + void rk_random_uint16(npy_uint16 off, npy_uint16 rng, npy_intp cnt, + npy_uint16 *out, rk_state *state) nogil + void rk_random_uint8(npy_uint8 off, npy_uint8 rng, npy_intp cnt, + npy_uint8 *out, rk_state *state) nogil + void rk_random_bool(npy_bool off, npy_bool rng, npy_intp cnt, + npy_bool *out, rk_state *state) nogil + cdef extern from "distributions.h": # do not need the GIL, but they do need a lock on the state !! */ @@ -131,6 +142,7 @@ cimport cython import numpy as np import operator import warnings + try: from threading import Lock except ImportError: @@ -574,6 +586,304 @@ def _shape_from_size(size, d): shape = tuple(size) + (d,) return shape + +# Set up dictionary of integer types and relevant functions. +# +# The dictionary is keyed by dtype(...).name and the values +# are a tuple (low, high, function), where low and high are +# the bounds of the largest half open interval `[low, high)` +# and the function is the relevant function to call for +# that precision. +# +# The functions are all the same except for changed types in +# a few places. It would be easy to template them. + +def _rand_bool(low, high, size, rngstate): + """ + _rand_bool(low, high, size, rngstate) + + See `_rand_int32` for documentation, only the return type changes. + + """ + cdef npy_bool off, rng, buf + cdef npy_bool *out + cdef ndarray array "arrayObject" + cdef npy_intp cnt + cdef rk_state *state = NpyCapsule_AsVoidPtr(rngstate) + + rng = (high - low) + off = (low) + if size is None: + rk_random_bool(off, rng, 1, &buf, state) + return buf + else: + array = np.empty(size, np.bool_) + cnt = PyArray_SIZE(array) + out = PyArray_DATA(array) + with nogil: + rk_random_bool(off, rng, cnt, out, state) + return array + + +def _rand_int8(low, high, size, rngstate): + """ + _rand_int8(low, high, size, rngstate) + + See `_rand_int32` for documentation, only the return type changes. + + """ + cdef npy_uint8 off, rng, buf + cdef npy_uint8 *out + cdef ndarray array "arrayObject" + cdef npy_intp cnt + cdef rk_state *state = NpyCapsule_AsVoidPtr(rngstate) + + rng = (high - low) + off = (low) + if size is None: + rk_random_uint8(off, rng, 1, &buf, state) + return buf + else: + array = np.empty(size, np.int8) + cnt = PyArray_SIZE(array) + out = PyArray_DATA(array) + with nogil: + rk_random_uint8(off, rng, cnt, out, state) + return array + + +def _rand_int16(low, high, size, rngstate): + """ + _rand_int16(low, high, size, rngstate) + + See `_rand_int32` for documentation, only the return type changes. + + """ + cdef npy_uint16 off, rng, buf + cdef npy_uint16 *out + cdef ndarray array "arrayObject" + cdef npy_intp cnt + cdef rk_state *state = NpyCapsule_AsVoidPtr(rngstate) + + rng = (high - low) + off = (low) + if size is None: + rk_random_uint16(off, rng, 1, &buf, state) + return buf + else: + array = np.empty(size, np.int16) + cnt = PyArray_SIZE(array) + out = PyArray_DATA(array) + with nogil: + rk_random_uint16(off, rng, cnt, out, state) + return array + + +def _rand_int32(low, high, size, rngstate): + """ + _rand_int32(self, low, high, size, rngstate) + + Return random np.int32 integers between `low` and `high`, inclusive. + + Return random integers from the "discrete uniform" distribution in the + closed interval [`low`, `high`). If `high` is None (the default), + then results are from [0, `low`). On entry the arguments are presumed + to have been validated for size and order for the np.int32 type. + + Parameters + ---------- + low : int + Lowest (signed) integer to be drawn from the distribution (unless + ``high=None``, in which case this parameter is the *highest* such + integer). + high : int + If provided, the largest (signed) integer to be drawn from the + distribution (see above for behavior if ``high=None``). + size : int or tuple of ints + Output shape. If the given shape is, e.g., ``(m, n, k)``, then + ``m * n * k`` samples are drawn. Default is None, in which case a + single value is returned. + rngstate : encapsulated pointer to rk_state + The specific type depends on the python version. In Python 2 it is + a PyCObject, in Python 3 a PyCapsule object. + + Returns + ------- + out : python scalar or ndarray of np.int32 + `size`-shaped array of random integers from the appropriate + distribution, or a single such random int if `size` not provided. + + """ + cdef npy_uint32 off, rng, buf + cdef npy_uint32 *out + cdef ndarray array "arrayObject" + cdef npy_intp cnt + cdef rk_state *state = NpyCapsule_AsVoidPtr(rngstate) + + rng = (high - low) + off = (low) + if size is None: + rk_random_uint32(off, rng, 1, &buf, state) + return buf + else: + array = np.empty(size, np.int32) + cnt = PyArray_SIZE(array) + out = PyArray_DATA(array) + with nogil: + rk_random_uint32(off, rng, cnt, out, state) + return array + + +def _rand_int64(low, high, size, rngstate): + """ + _rand_int64(low, high, size, rngstate) + + See `_rand_int32` for documentation, only the return type changes. + + """ + cdef npy_uint64 off, rng, buf + cdef npy_uint64 *out + cdef ndarray array "arrayObject" + cdef npy_intp cnt + cdef rk_state *state = NpyCapsule_AsVoidPtr(rngstate) + + rng = (high - low) + off = (low) + if size is None: + rk_random_uint64(off, rng, 1, &buf, state) + return buf + else: + array = np.empty(size, np.int64) + cnt = PyArray_SIZE(array) + out = PyArray_DATA(array) + with nogil: + rk_random_uint64(off, rng, cnt, out, state) + return array + +def _rand_uint8(low, high, size, rngstate): + """ + _rand_uint8(low, high, size, rngstate) + + See `_rand_int32` for documentation, only the return type changes. + + """ + cdef npy_uint8 off, rng, buf + cdef npy_uint8 *out + cdef ndarray array "arrayObject" + cdef npy_intp cnt + cdef rk_state *state = NpyCapsule_AsVoidPtr(rngstate) + + rng = (high - low) + off = (low) + if size is None: + rk_random_uint8(off, rng, 1, &buf, state) + return buf + else: + array = np.empty(size, np.uint8) + cnt = PyArray_SIZE(array) + out = PyArray_DATA(array) + with nogil: + rk_random_uint8(off, rng, cnt, out, state) + return array + + +def _rand_uint16(low, high, size, rngstate): + """ + _rand_uint16(low, high, size, rngstate) + + See `_rand_int32` for documentation, only the return type changes. + + """ + cdef npy_uint16 off, rng, buf + cdef npy_uint16 *out + cdef ndarray array "arrayObject" + cdef npy_intp cnt + cdef rk_state *state = NpyCapsule_AsVoidPtr(rngstate) + + rng = (high - low) + off = (low) + if size is None: + rk_random_uint16(off, rng, 1, &buf, state) + return buf + else: + array = np.empty(size, np.uint16) + cnt = PyArray_SIZE(array) + out = PyArray_DATA(array) + with nogil: + rk_random_uint16(off, rng, cnt, out, state) + return array + + +def _rand_uint32(low, high, size, rngstate): + """ + _rand_uint32(self, low, high, size, rngstate) + + See `_rand_int32` for documentation, only the return type changes. + + """ + cdef npy_uint32 off, rng, buf + cdef npy_uint32 *out + cdef ndarray array "arrayObject" + cdef npy_intp cnt + cdef rk_state *state = NpyCapsule_AsVoidPtr(rngstate) + + rng = (high - low) + off = (low) + if size is None: + rk_random_uint32(off, rng, 1, &buf, state) + return buf + else: + array = np.empty(size, np.uint32) + cnt = PyArray_SIZE(array) + out = PyArray_DATA(array) + with nogil: + rk_random_uint32(off, rng, cnt, out, state) + return array + + +def _rand_uint64(low, high, size, rngstate): + """ + _rand_uint64(low, high, size, rngstate) + + See `_rand_int32` for documentation, only the return type changes. + + """ + cdef npy_uint64 off, rng, buf + cdef npy_uint64 *out + cdef ndarray array "arrayObject" + cdef npy_intp cnt + cdef rk_state *state = NpyCapsule_AsVoidPtr(rngstate) + + rng = (high - low) + off = (low) + if size is None: + rk_random_uint64(off, rng, 1, &buf, state) + return buf + else: + array = np.empty(size, np.uint64) + cnt = PyArray_SIZE(array) + out = PyArray_DATA(array) + with nogil: + rk_random_uint64(off, rng, cnt, out, state) + return array + +# Look up table for randint functions keyed by type name. The stored data +# is a tuple (lbnd, ubnd, func), where lbnd is the smallest value for the +# type, ubnd is one greater than the largest value, and func is the +# function to call. +_randint_type = { + 'bool': (0, 2, _rand_bool), + 'int8': (-2**7, 2**7, _rand_int8), + 'int16': (-2**15, 2**15, _rand_int16), + 'int32': (-2**31, 2**31, _rand_int32), + 'int64': (-2**63, 2**63, _rand_int64), + 'uint8': (0, 2**8, _rand_uint8), + 'uint16': (0, 2**16, _rand_uint16), + 'uint32': (0, 2**32, _rand_uint32), + 'uint64': (0, 2**64, _rand_uint64) + } + + cdef class RandomState: """ RandomState(seed=None) @@ -618,11 +928,12 @@ cdef class RandomState: """ cdef rk_state *internal_state cdef object lock + cdef object state_address poisson_lam_max = np.iinfo('l').max - np.sqrt(np.iinfo('l').max)*10 def __init__(self, seed=None): self.internal_state = PyMem_Malloc(sizeof(rk_state)) - + self.state_address = NpyCapsule_FromVoidPtr(self.internal_state, NULL) self.lock = Lock() self.seed(seed) @@ -885,15 +1196,15 @@ cdef class RandomState: """ return disc0_array(self.internal_state, rk_long, size, self.lock) - def randint(self, low, high=None, size=None): + def randint(self, low, high=None, size=None, dtype='l'): """ - randint(low, high=None, size=None) + randint(low, high=None, size=None, dtype='l') Return random integers from `low` (inclusive) to `high` (exclusive). - Return random integers from the "discrete uniform" distribution in the - "half-open" interval [`low`, `high`). If `high` is None (the default), - then results are from [0, `low`). + Return random integers from the "discrete uniform" distribution of + the specified dtype in the "half-open" interval [`low`, `high`). If + `high` is None (the default), then results are from [0, `low`). Parameters ---------- @@ -908,6 +1219,13 @@ cdef class RandomState: Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. Default is None, in which case a single value is returned. + dtype : dtype, optional + Desired dtype of the result. All dtypes are determined by their + name, i.e., 'int64', 'int`, etc, so byteorder is not available + and a specific precision may have different C types depending + on the platform. The default value is 'l' (C long). + + .. versionadded:: 1.11.0 Returns ------- @@ -936,14 +1254,24 @@ cdef class RandomState: [3, 2, 2, 0]]) """ - if high is not None and low >= high: - raise ValueError("low >= high") - if high is None: high = low low = 0 - return self.random_integers(low, high - 1, size) + key = np.dtype(dtype).name + if not key in _randint_type: + raise TypeError('Unsupported dtype "%s" for randint' % key) + lowbnd, highbnd, randfunc = _randint_type[key] + + if low < lowbnd: + raise ValueError("low is out of bounds for %s" % (key,)) + if high > highbnd: + raise ValueError("high is out of bounds for %s" % (key,)) + if low >= high: + raise ValueError("low >= high") + + with self.lock: + return randfunc(low, high - 1, size, self.state_address) def bytes(self, npy_intp length): """ @@ -1356,11 +1684,13 @@ cdef class RandomState: """ random_integers(low, high=None, size=None) - Return random integers between `low` and `high`, inclusive. + Random integers of type np.int between `low` and `high`, inclusive. - Return random integers from the "discrete uniform" distribution in the - closed interval [`low`, `high`]. If `high` is None (the default), - then results are from [1, `low`]. + Return random integers of type np.int from the "discrete uniform" + distribution in the closed interval [`low`, `high`]. If `high` is + None (the default), then results are from [1, `low`]. The np.int + type translates to the C long type used by Python 2 for "short" + integers and its precision is platform dependent. Parameters ---------- @@ -1426,37 +1756,13 @@ cdef class RandomState: >>> plt.show() """ - if high is not None and low > high: - raise ValueError("low > high") + if high is None: + high = low + low = 1 - cdef long lo, hi, rv - cdef unsigned long diff - cdef long *array_data - cdef ndarray array "arrayObject" - cdef npy_intp length - cdef npy_intp i + return self.randint(low, high + 1, size=size, dtype='l') - if high is None: - lo = 1 - hi = low - else: - lo = low - hi = high - diff = hi - lo - if size is None: - with self.lock: - rv = lo + rk_interval(diff, self. internal_state) - return rv - else: - array = np.empty(size, int) - length = PyArray_SIZE(array) - array_data = PyArray_DATA(array) - with self.lock, nogil: - for i from 0 <= i < length: - rv = lo + rk_interval(diff, self. internal_state) - array_data[i] = rv - return array # Complicated, continuous distributions: def standard_normal(self, size=None): diff --git a/numpy/random/mtrand/numpy.pxd b/numpy/random/mtrand/numpy.pxd index c54f79c0af38..488278d6c7d9 100644 --- a/numpy/random/mtrand/numpy.pxd +++ b/numpy/random/mtrand/numpy.pxd @@ -2,6 +2,12 @@ cdef extern from "numpy/npy_no_deprecated_api.h": pass +cdef extern from "mt_compat.h": + + object NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(object o)) + void * NpyCapsule_AsVoidPtr(object o) + + cdef extern from "numpy/arrayobject.h": cdef enum NPY_TYPES: @@ -71,7 +77,17 @@ cdef extern from "numpy/arrayobject.h": double real double imag + ctypedef int npy_int ctypedef int npy_intp + ctypedef int npy_int64 + ctypedef int npy_uint64 + ctypedef int npy_int32 + ctypedef int npy_uint32 + ctypedef int npy_int16 + ctypedef int npy_uint16 + ctypedef int npy_int8 + ctypedef int npy_uint8 + ctypedef int npy_bool ctypedef extern class numpy.dtype [object PyArray_Descr]: pass diff --git a/numpy/random/mtrand/randomkit.c b/numpy/random/mtrand/randomkit.c index b18897e2c088..3a95efeeb204 100644 --- a/numpy/random/mtrand/randomkit.c +++ b/numpy/random/mtrand/randomkit.c @@ -70,6 +70,7 @@ #include #include #include +#include #ifdef _WIN32 /* @@ -115,6 +116,10 @@ #include #endif +/* + * Do not move this include. randomkit.h must be included + * after windows timeb.h is included. + */ #include "randomkit.h" #ifndef RK_DEV_URANDOM @@ -207,7 +212,11 @@ rk_randomseed(rk_state *state) #define UPPER_MASK 0x80000000UL #define LOWER_MASK 0x7fffffffUL -/* Slightly optimised reference implementation of the Mersenne Twister */ +/* + * Slightly optimised reference implementation of the Mersenne Twister + * Note that regardless of the precision of long, only 32 bit random + * integers are produced + */ unsigned long rk_random(rk_state *state) { @@ -240,6 +249,219 @@ rk_random(rk_state *state) return y; } + +/* + * Returns an unsigned 64 bit random integer. + */ +NPY_INLINE static npy_uint64 +rk_uint64(rk_state *state) +{ + npy_uint64 upper = (npy_uint64)rk_random(state) << 32; + npy_uint64 lower = (npy_uint64)rk_random(state); + return upper | lower; +} + + +/* + * Returns an unsigned 32 bit random integer. + */ +NPY_INLINE static npy_uint32 +rk_uint32(rk_state *state) +{ + return (npy_uint32)rk_random(state); +} + + +/* + * Fills an array with cnt random npy_uint64 between off and off + rng + * inclusive. The numbers wrap if rng is sufficiently large. + */ +void +rk_random_uint64(npy_uint64 off, npy_uint64 rng, npy_intp cnt, + npy_uint64 *out, rk_state *state) +{ + npy_uint64 val, mask = rng; + npy_intp i; + + if (rng == 0) { + for (i = 0; i < cnt; i++) { + out[i] = off; + } + return; + } + + /* Smallest bit mask >= max */ + mask |= mask >> 1; + mask |= mask >> 2; + mask |= mask >> 4; + mask |= mask >> 8; + mask |= mask >> 16; + mask |= mask >> 32; + + for (i = 0; i < cnt; i++) { + if (rng <= 0xffffffffUL) { + while ((val = (rk_uint32(state) & mask)) > rng); + } + else { + while ((val = (rk_uint64(state) & mask)) > rng); + } + out[i] = off + val; + } +} + + +/* + * Fills an array with cnt random npy_uint32 between off and off + rng + * inclusive. The numbers wrap if rng is sufficiently large. + */ +void +rk_random_uint32(npy_uint32 off, npy_uint32 rng, npy_intp cnt, + npy_uint32 *out, rk_state *state) +{ + npy_uint32 val, mask = rng; + npy_intp i; + + if (rng == 0) { + for (i = 0; i < cnt; i++) { + out[i] = off; + } + return; + } + + /* Smallest bit mask >= max */ + mask |= mask >> 1; + mask |= mask >> 2; + mask |= mask >> 4; + mask |= mask >> 8; + mask |= mask >> 16; + + for (i = 0; i < cnt; i++) { + while ((val = (rk_uint32(state) & mask)) > rng); + out[i] = off + val; + } +} + + +/* + * Fills an array with cnt random npy_uint16 between off and off + rng + * inclusive. The numbers wrap if rng is sufficiently large. + */ +void +rk_random_uint16(npy_uint16 off, npy_uint16 rng, npy_intp cnt, + npy_uint16 *out, rk_state *state) +{ + npy_uint16 val, mask = rng; + npy_intp i; + npy_uint32 buf; + int bcnt = 0; + + if (rng == 0) { + for (i = 0; i < cnt; i++) { + out[i] = off; + } + return; + } + + /* Smallest bit mask >= max */ + mask |= mask >> 1; + mask |= mask >> 2; + mask |= mask >> 4; + mask |= mask >> 8; + + for (i = 0; i < cnt; i++) { + do { + if (!bcnt) { + buf = rk_uint32(state); + bcnt = 1; + } + else { + buf >>= 16; + bcnt--; + } + val = (npy_uint16)buf & mask; + } while (val > rng); + out[i] = off + val; + } +} + + +/* + * Fills an array with cnt random npy_uint8 between off and off + rng + * inclusive. The numbers wrap if rng is sufficiently large. + */ +void +rk_random_uint8(npy_uint8 off, npy_uint8 rng, npy_intp cnt, + npy_uint8 *out, rk_state *state) +{ + npy_uint8 val, mask = rng; + npy_intp i; + npy_uint32 buf; + int bcnt = 0; + + if (rng == 0) { + for (i = 0; i < cnt; i++) { + out[i] = off; + } + return; + } + + /* Smallest bit mask >= max */ + mask |= mask >> 1; + mask |= mask >> 2; + mask |= mask >> 4; + + for (i = 0; i < cnt; i++) { + do { + if (!bcnt) { + buf = rk_uint32(state); + bcnt = 3; + } + else { + buf >>= 8; + bcnt--; + } + val = (npy_uint8)buf & mask; + } while (val > rng); + out[i] = off + val; + } +} + + +/* + * Fills an array with cnt random npy_bool between off and off + rng + * inclusive. + */ +void +rk_random_bool(npy_bool off, npy_bool rng, npy_intp cnt, + npy_bool *out, rk_state *state) +{ + npy_intp i; + npy_uint32 buf; + int bcnt = 0; + + if (rng == 0) { + for (i = 0; i < cnt; i++) { + out[i] = off; + } + return; + } + + /* If we reach here rng and mask are one and off is zero */ + assert(rng == 1 && off == 0); + for (i = 0; i < cnt; i++) { + if (!bcnt) { + buf = rk_uint32(state); + bcnt = 31; + } + else { + buf >>= 1; + bcnt--; + } + out[i] = (buf & 0x00000001) != 0; + } +} + + long rk_long(rk_state *state) { diff --git a/numpy/random/mtrand/randomkit.h b/numpy/random/mtrand/randomkit.h index e049488eeb14..fcdd606a14f5 100644 --- a/numpy/random/mtrand/randomkit.h +++ b/numpy/random/mtrand/randomkit.h @@ -56,11 +56,13 @@ * defaults to "/dev/urandom" */ -#include - #ifndef _RANDOMKIT_ #define _RANDOMKIT_ +#include +#include + + #define RK_STATE_LEN 624 typedef struct rk_state_ @@ -148,6 +150,41 @@ extern unsigned long rk_ulong(rk_state *state); */ extern unsigned long rk_interval(unsigned long max, rk_state *state); +/* + * Fills an array with cnt random npy_uint64 between off and off + rng + * inclusive. The numbers wrap if rng is sufficiently large. + */ +extern void rk_random_uint64(npy_uint64 off, npy_uint64 rng, npy_intp cnt, + npy_uint64 *out, rk_state *state); + +/* + * Fills an array with cnt random npy_uint32 between off and off + rng + * inclusive. The numbers wrap if rng is sufficiently large. + */ +extern void rk_random_uint32(npy_uint32 off, npy_uint32 rng, npy_intp cnt, + npy_uint32 *out, rk_state *state); + +/* + * Fills an array with cnt random npy_uint16 between off and off + rng + * inclusive. The numbers wrap if rng is sufficiently large. + */ +extern void rk_random_uint16(npy_uint16 off, npy_uint16 rng, npy_intp cnt, + npy_uint16 *out, rk_state *state); + +/* + * Fills an array with cnt random npy_uint8 between off and off + rng + * inclusive. The numbers wrap if rng is sufficiently large. + */ +extern void rk_random_uint8(npy_uint8 off, npy_uint8 rng, npy_intp cnt, + npy_uint8 *out, rk_state *state); + +/* + * Fills an array with cnt random npy_bool between off and off + rng + * inclusive. It is assumed tha npy_bool as the same size as npy_uint8. + */ +extern void rk_random_bool(npy_bool off, npy_bool rng, npy_intp cnt, + npy_bool *out, rk_state *state); + /* * Returns a random double between 0.0 and 1.0, 1.0 excluded. */ From bba8647dbf7098ffcf6df70b959b6079b5682e0c Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 2 Jan 2016 15:40:08 -0700 Subject: [PATCH 284/496] BUG: #6922: Fix segfault introduced in 23901aa. Revert troublesome parts of gh-5929. Copyswap cannot be relied upon for void types containing objects. --- numpy/core/src/multiarray/arraytypes.c.src | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src index 060f250980c7..b2ba831f4eb6 100644 --- a/numpy/core/src/multiarray/arraytypes.c.src +++ b/numpy/core/src/multiarray/arraytypes.c.src @@ -2866,7 +2866,9 @@ VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) if (nip1 == NULL) { goto finish; } - new->f->copyswap(nip1, ip1 + offset, swap, dummy); + memcpy(nip1, ip1 + offset, new->elsize); + if (swap) + new->f->copyswap(nip1, NULL, swap, dummy); } if (swap || !npy_is_aligned(nip2, new->alignment)) { /* create buffer and copy */ @@ -2877,7 +2879,9 @@ VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) } goto finish; } - new->f->copyswap(nip2, ip2 + offset, swap, dummy); + memcpy(nip2, ip2 + offset, new->elsize); + if (swap) + new->f->copyswap(nip2, NULL, swap, dummy); } } res = new->f->compare(nip1, nip2, dummy); From fe46c47a96e191b028823280fe3451a48d0fc18e Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 2 Jan 2016 14:12:00 -0700 Subject: [PATCH 285/496] TST: Add regression test for gh-6922. Sorting and unaligned void type should not segfault. --- numpy/core/tests/test_regression.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py index ac34cfa5343b..a61e64d8de90 100644 --- a/numpy/core/tests/test_regression.py +++ b/numpy/core/tests/test_regression.py @@ -2177,5 +2177,11 @@ def test_empty_percentile(self): # gh-6530 / gh-6553 assert_array_equal(np.percentile(np.arange(10), []), np.array([])) + def test_void_compare_segfault(self): + # gh-6922. The following should not segfault + a = np.ones(3, dtype=[('object', 'O'), ('int', ' Date: Fri, 1 Jan 2016 18:38:44 -0700 Subject: [PATCH 286/496] TST: Add tests for new randint functionality. * check exceptions * check extreme bounds are reachable * check that all values are in the specified bounds * check repeatability of sequences More exact statistical tests would be nice, but that is another project. --- numpy/random/tests/test_random.py | 77 +++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index 0ce341eadb6b..c2e1adca3bd8 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -128,6 +128,83 @@ def test_negative_binomial(self): # arguments without truncation. self.prng.negative_binomial(0.5, 0.5) +class TestRandint(TestCase): + + rfunc = np.random.randint + + # valid integer/boolean types + itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + + def test_unsupported_type(self): + assert_raises(TypeError, self.rfunc, 1, dtype=np.float) + + def test_bounds_checking(self): + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + assert_raises(ValueError, self.rfunc, lbnd - 1 , ubnd, dtype=dt) + assert_raises(ValueError, self.rfunc, lbnd , ubnd + 1, dtype=dt) + assert_raises(ValueError, self.rfunc, ubnd , lbnd, dtype=dt) + assert_raises(ValueError, self.rfunc, 1 , 0, dtype=dt) + + def test_rng_zero_and_extremes(self): + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + tgt = ubnd - 1 + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + tgt = lbnd + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + tgt = (lbnd + ubnd)//2 + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + def test_in_bounds_fuzz(self): + # Don't use fixed seed + np.random.seed() + for dt in self.itype[1:]: + for ubnd in [4, 8, 16]: + vals = self.rfunc(2, ubnd, size=2**16, dtype=dt) + assert_(vals.max() < ubnd) + assert_(vals.min() >= 2) + vals = self.rfunc(0, 2, size=2**16, dtype=np.bool) + assert_(vals.max() < 2) + assert_(vals.min() >= 0) + + def test_repeatability(self): + import hashlib + # We use a md5 hash of generated sequences of 1000 samples + # in the range [0, 6) for all but np.bool, where the range + # is [0, 2). Hashes are for little endian numbers. + tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0', + 'int16': '1b7741b80964bb190c50d541dca1cac1', + 'int32': '4dc9fcc2b395577ebb51793e58ed1a05', + 'int64': '17db902806f448331b5a758d7d2ee672', + 'int8': '27dd30c4e08a797063dffac2490b0be6', + 'uint16': '1b7741b80964bb190c50d541dca1cac1', + 'uint32': '4dc9fcc2b395577ebb51793e58ed1a05', + 'uint64': '17db902806f448331b5a758d7d2ee672', + 'uint8': '27dd30c4e08a797063dffac2490b0be6'} + + for dt in self.itype[1:]: + np.random.seed(1234) + + # view as little endian for hash + if sys.byteorder == 'little': + val = self.rfunc(0, 6, size=1000, dtype=dt) + else: + val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap() + + res = hashlib.md5(val.view(np.int8)).hexdigest() + assert_(tgt[np.dtype(dt).name] == res) + + # bools do not depend on endianess + np.random.seed(1234) + val = self.rfunc(0, 2, size=1000, dtype=np.bool).view(np.int8) + res = hashlib.md5(val).hexdigest() + assert_(tgt[np.dtype(np.bool).name] == res) + + class TestRandomDist(TestCase): # Make sure the random distribution returns the correct value for a # given seed From 51726e59137aa0ea6cf216d95f23fe788a7164ea Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 30 Dec 2015 14:42:23 -0700 Subject: [PATCH 287/496] TST,BUG: Fix use of randint in test_mem_overlap.py. The default randint function returns a C long type which does not have enough range to test indexes on Windows 64. The fix here is to use specify a np.intp dtype for the randint call now that we have that option. Closes #6812. --- numpy/core/tests/test_mem_overlap.py | 41 ++++++++++++++++------------ 1 file changed, 24 insertions(+), 17 deletions(-) diff --git a/numpy/core/tests/test_mem_overlap.py b/numpy/core/tests/test_mem_overlap.py index a8b29ecd1b39..82e66db5b798 100644 --- a/numpy/core/tests/test_mem_overlap.py +++ b/numpy/core/tests/test_mem_overlap.py @@ -110,17 +110,19 @@ def test_diophantine_fuzz(): numbers = [] while min(feasible_count, infeasible_count) < min_count: # Ensure big and small integer problems - A_max = 1 + rng.randint(0, 11)**6 - U_max = rng.randint(0, 11)**6 + A_max = 1 + rng.randint(0, 11, dtype=np.intp)**6 + U_max = rng.randint(0, 11, dtype=np.intp)**6 A_max = min(max_int, A_max) U_max = min(max_int-1, U_max) - A = tuple(rng.randint(1, A_max+1) for j in range(ndim)) - U = tuple(rng.randint(0, U_max+2) for j in range(ndim)) + A = tuple(rng.randint(1, A_max+1, dtype=np.intp) + for j in range(ndim)) + U = tuple(rng.randint(0, U_max+2, dtype=np.intp) + for j in range(ndim)) b_ub = min(max_int-2, sum(a*ub for a, ub in zip(A, U))) - b = rng.randint(-1, b_ub+2) + b = rng.randint(-1, b_ub+2, dtype=np.intp) if ndim == 0 and feasible_count < min_count: b = 0 @@ -258,9 +260,9 @@ def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count): rng = np.random.RandomState(1234) def random_slice(n, step): - start = rng.randint(0, n+1) - stop = rng.randint(start, n+1) - if rng.randint(0, 2) == 0: + start = rng.randint(0, n+1, dtype=np.intp) + stop = rng.randint(start, n+1, dtype=np.intp) + if rng.randint(0, 2, dtype=np.intp) == 0: stop, start = start, stop step *= -1 return slice(start, stop, step) @@ -269,12 +271,14 @@ def random_slice(n, step): infeasible = 0 while min(feasible, infeasible) < min_count: - steps = tuple(rng.randint(1, 11) if rng.randint(0, 5) == 0 else 1 + steps = tuple(rng.randint(1, 11, dtype=np.intp) + if rng.randint(0, 5, dtype=np.intp) == 0 else 1 for j in range(x.ndim)) if same_steps: steps2 = steps else: - steps2 = tuple(rng.randint(1, 11) if rng.randint(0, 5) == 0 else 1 + steps2 = tuple(rng.randint(1, 11, dtype=np.intp) + if rng.randint(0, 5, dtype=np.intp) == 0 else 1 for j in range(x.ndim)) t1 = np.arange(x.ndim) @@ -374,9 +378,9 @@ def test_internal_overlap_slices(): rng = np.random.RandomState(1234) def random_slice(n, step): - start = rng.randint(0, n+1) - stop = rng.randint(start, n+1) - if rng.randint(0, 2) == 0: + start = rng.randint(0, n+1, dtype=np.intp) + stop = rng.randint(start, n+1, dtype=np.intp) + if rng.randint(0, 2, dtype=np.intp) == 0: stop, start = start, stop step *= -1 return slice(start, stop, step) @@ -385,7 +389,8 @@ def random_slice(n, step): min_count = 5000 while cases < min_count: - steps = tuple(rng.randint(1, 11) if rng.randint(0, 5) == 0 else 1 + steps = tuple(rng.randint(1, 11, dtype=np.intp) + if rng.randint(0, 5, dtype=np.intp) == 0 else 1 for j in range(x.ndim)) t1 = np.arange(x.ndim) rng.shuffle(t1) @@ -469,10 +474,12 @@ def test_internal_overlap_fuzz(): rng = np.random.RandomState(1234) while min(overlap, no_overlap) < min_count: - ndim = rng.randint(1, 4) + ndim = rng.randint(1, 4, dtype=np.intp) - strides = tuple(rng.randint(-1000, 1000) for j in range(ndim)) - shape = tuple(rng.randint(1, 30) for j in range(ndim)) + strides = tuple(rng.randint(-1000, 1000, dtype=np.intp) + for j in range(ndim)) + shape = tuple(rng.randint(1, 30, dtype=np.intp) + for j in range(ndim)) a = as_strided(x, strides=strides, shape=shape) result = check_internal_overlap(a) From 5132ce0ad0ff4e7c448c80eeaa806678d558f81c Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 31 Dec 2015 21:51:59 -0700 Subject: [PATCH 288/496] BUG: Get rid of C++ style comment in multiarray_tests.c.src --- numpy/core/src/multiarray/multiarray_tests.c.src | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/multiarray_tests.c.src b/numpy/core/src/multiarray/multiarray_tests.c.src index 5e247e15e145..45092dc0c403 100644 --- a/numpy/core/src/multiarray/multiarray_tests.c.src +++ b/numpy/core/src/multiarray/multiarray_tests.c.src @@ -778,7 +778,7 @@ static PyObject * test_as_c_array(PyObject *NPY_UNUSED(self), PyObject *args) { PyArrayObject *array_obj; - npy_intp dims[3]; // max 3-dim + npy_intp dims[3]; /* max 3-dim */ npy_intp i=0, j=0, k=0; npy_intp num_dims = 0; PyArray_Descr *descr = NULL; From 6a04b9217675a1a072b01ade0f2664b30f67bde6 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 1 Jan 2016 10:57:27 -0700 Subject: [PATCH 289/496] TST: Increase the allowable warning count to 1 for i386 debug test. Cython generated C code contains the number '-2147483648L', which leads to a warning on 32 bit platforms: "Warning: this decimal constant is unsigned only in ISO C90" See the discussion at http://stackoverflow.com/questions/9941261/ The compiled code seems to run correctly despite the warning and if there are problems, they should turn up in the nose testing. --- tools/travis-test.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/travis-test.sh b/tools/travis-test.sh index 939594d8ce27..40e266b2616d 100755 --- a/tools/travis-test.sh +++ b/tools/travis-test.sh @@ -50,8 +50,10 @@ setup_base() | grep -E "warning\>" \ | tee warnings # Check for an acceptable number of warnings. Some warnings are out of - # our control, so adjust the number as needed. - [[ $(wc -l < warnings) -lt 1 ]] + # our control, so adjust the number as needed. At the moment a + # cython generated code produces a warning about '-2147483648L', but + # the code seems to compile OK. + [[ $(wc -l < warnings) -lt 2 ]] fi else sysflags="$($PYTHON -c "from distutils import sysconfig; \ From 07fa8cc957880614319aeb5b88c88ae367d954d9 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 1 Jan 2016 15:00:24 -0700 Subject: [PATCH 290/496] TST: Enable AppVeyor failure to be reported. AppVeyor failures were filtered out until numpy issues were fixed. The last issues were the test_mem_overlap failures on 64 bit windows, and those are fixed. --- appveyor.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index 59389462d2b6..e9467e093961 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -12,12 +12,6 @@ environment: - PY_MAJOR_VER: 3 PYTHON_ARCH: "x86" -matrix: - #fast_finish: true - allow_failures: - - PY_MAJOR_VER: 2 - - PY_MAJOR_VER: 3 - build_script: - ps: Start-FileDownload "https://repo.continuum.io/miniconda/Miniconda$env:PY_MAJOR_VER-latest-Windows-$env:PYTHON_ARCH.exe" C:\Miniconda.exe; echo "Finished downloading miniconda" - cmd: C:\Miniconda.exe /S /D=C:\Py From bc2a97bde26a026e75adec7ec70566be3005c47c Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 1 Jan 2016 19:00:27 -0700 Subject: [PATCH 291/496] DOC: Document new randint dtype parameter in 1.11.0 release notes. --- doc/release/1.11.0-notes.rst | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index 7c078eed972f..705ce73c1bc8 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -86,6 +86,23 @@ New Features the files can be specifies to be ``*.f90``. The ``verbose`` argument is also activated, it was previously ignored. +* A ``dtype`` parameter has been added to ``np.random.randint`` + Random ndarrays of the following types can now be generated: + + - np.bool, + - np.int8, np.uint8, + - np.int16, np.uint16, + - np.int32, np.uint32, + - np.int64, np.uint64, + - np.int_ (long), np.intp + + The specification is by precision rather than by C type. Hence, on some + platforms np.int64 may be a `long` instead of `long long` even if the + specified dtype is `long long` because the two may have the same + precision. The resulting type depends on which c type numpy uses for the + given precision. The byteorder specification is also ignored, the + generated arrays are always in native byte order. + Improvements ============ From 277eabe390c6f67962a48764df3d77443776b3cc Mon Sep 17 00:00:00 2001 From: Holger Kohr Date: Wed, 30 Dec 2015 01:39:05 +0100 Subject: [PATCH 292/496] ENH: clarify error message of broadcast --- numpy/core/src/multiarray/iterators.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c index 829994b1e517..702f9e21ac45 100644 --- a/numpy/core/src/multiarray/iterators.c +++ b/numpy/core/src/multiarray/iterators.c @@ -1458,8 +1458,8 @@ PyArray_MultiIterFromObjects(PyObject **mps, int n, int nadd, ...) ntot = n + nadd; if (ntot < 2 || ntot > NPY_MAXARGS) { PyErr_Format(PyExc_ValueError, - "Need between 2 and (%d) " \ - "array objects (inclusive).", NPY_MAXARGS); + "Need at least 2 and at most %d " + "array objects.", NPY_MAXARGS); return NULL; } multi = PyArray_malloc(sizeof(PyArrayMultiIterObject)); @@ -1524,8 +1524,8 @@ PyArray_MultiIterNew(int n, ...) if (n < 2 || n > NPY_MAXARGS) { PyErr_Format(PyExc_ValueError, - "Need between 2 and (%d) " \ - "array objects (inclusive).", NPY_MAXARGS); + "Need at least 2 and at most %d " + "array objects.", NPY_MAXARGS); return NULL; } @@ -1608,7 +1608,7 @@ arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, PyObject *k return NULL; } PyErr_Format(PyExc_ValueError, - "Need at least two and fewer than (%d) " + "Need at least 2 and at most %d " "array objects.", NPY_MAXARGS); return NULL; } From a1e9bf56d80e5f412729b938db427a87f39e1122 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 3 Jan 2016 15:48:50 -0700 Subject: [PATCH 293/496] MAINT: Fix typos in 1.11.0-notes.rst and mtrand.pyx documentation. --- doc/release/1.11.0-notes.rst | 2 +- numpy/random/mtrand/mtrand.pyx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index 705ce73c1bc8..6de10b5539c0 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -99,7 +99,7 @@ New Features The specification is by precision rather than by C type. Hence, on some platforms np.int64 may be a `long` instead of `long long` even if the specified dtype is `long long` because the two may have the same - precision. The resulting type depends on which c type numpy uses for the + precision. The resulting type depends on which C type numpy uses for the given precision. The byteorder specification is also ignored, the generated arrays are always in native byte order. diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index 110b60a9bc27..489cc9e6e24a 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -1216,7 +1216,7 @@ cdef class RandomState: single value is returned. dtype : dtype, optional Desired dtype of the result. All dtypes are determined by their - name, i.e., 'int64', 'int`, etc, so byteorder is not available + name, i.e., 'int64', 'int', etc, so byteorder is not available and a specific precision may have different C types depending on the platform. The default value is 'l' (C long). From 4d129a4cc72d0d86d666545890cf010db27fcd8e Mon Sep 17 00:00:00 2001 From: gfyoung Date: Fri, 1 Jan 2016 19:33:56 -0800 Subject: [PATCH 294/496] DOC: Match Documentation to Behavior for MaskedArray.filled Closes gh-6647. --- numpy/ma/core.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index de716a6699aa..9b0b1cc79543 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -3526,6 +3526,8 @@ def set_fill_value(self, value=None): def filled(self, fill_value=None): """ Return a copy of self, with masked values filled with a given value. + **However**, if there are no masked values to fill, self will be + returned instead as an ndarray. Parameters ---------- @@ -3537,7 +3539,9 @@ def filled(self, fill_value=None): ------- filled_array : ndarray A copy of ``self`` with invalid entries replaced by *fill_value* - (be it the function argument or the attribute of ``self``. + (be it the function argument or the attribute of ``self``), or + ``self`` itself as an ndarray if there are no invalid entries to + be replaced. Notes ----- From 450cb8c2d77a8becdeae30afd90d0ec743e6f3ec Mon Sep 17 00:00:00 2001 From: Holger Kohr Date: Wed, 30 Dec 2015 12:55:32 +0100 Subject: [PATCH 295/496] ENH: allow single input argument in numpy.broadcast --- numpy/core/src/multiarray/iterators.c | 12 ++++++------ numpy/lib/stride_tricks.py | 3 --- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c index 702f9e21ac45..5099e3e193c8 100644 --- a/numpy/core/src/multiarray/iterators.c +++ b/numpy/core/src/multiarray/iterators.c @@ -1456,9 +1456,9 @@ PyArray_MultiIterFromObjects(PyObject **mps, int n, int nadd, ...) int i, ntot, err=0; ntot = n + nadd; - if (ntot < 2 || ntot > NPY_MAXARGS) { + if (ntot < 1 || ntot > NPY_MAXARGS) { PyErr_Format(PyExc_ValueError, - "Need at least 2 and at most %d " + "Need at least 1 and at most %d " "array objects.", NPY_MAXARGS); return NULL; } @@ -1522,9 +1522,9 @@ PyArray_MultiIterNew(int n, ...) int i, err = 0; - if (n < 2 || n > NPY_MAXARGS) { + if (n < 1 || n > NPY_MAXARGS) { PyErr_Format(PyExc_ValueError, - "Need at least 2 and at most %d " + "Need at least 1 and at most %d " "array objects.", NPY_MAXARGS); return NULL; } @@ -1603,12 +1603,12 @@ arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, PyObject *k ++n; } } - if (n < 2 || n > NPY_MAXARGS) { + if (n < 1 || n > NPY_MAXARGS) { if (PyErr_Occurred()) { return NULL; } PyErr_Format(PyExc_ValueError, - "Need at least 2 and at most %d " + "Need at least 1 and at most %d " "array objects.", NPY_MAXARGS); return NULL; } diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py index f4b43a5a92d8..4c23ab355599 100644 --- a/numpy/lib/stride_tricks.py +++ b/numpy/lib/stride_tricks.py @@ -121,9 +121,6 @@ def _broadcast_shape(*args): """ if not args: raise ValueError('must provide at least one argument') - if len(args) == 1: - # a single argument does not work with np.broadcast - return np.asarray(args[0]).shape # use the old-iterator because np.nditer does not handle size 0 arrays # consistently b = np.broadcast(*args[:32]) From 361c0d5ce1dc3d9b68bad42ec05ef7a4b33285ce Mon Sep 17 00:00:00 2001 From: Holger Kohr Date: Sun, 3 Jan 2016 21:32:57 +0100 Subject: [PATCH 296/496] TST: add test for broadcast with one argument --- numpy/core/tests/test_indexing.py | 5 +---- numpy/core/tests/test_numeric.py | 11 ++++++++++- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py index 38280d05e452..deb2130b7c2d 100644 --- a/numpy/core/tests/test_indexing.py +++ b/numpy/core/tests/test_indexing.py @@ -895,10 +895,7 @@ def _get_multi_index(self, arr, indices): + arr.shape[ax + len(indx[1:]):])) # Check if broadcasting works - if len(indx[1:]) != 1: - res = np.broadcast(*indx[1:]) # raises ValueError... - else: - res = indx[1] + res = np.broadcast(*indx[1:]) # unfortunately the indices might be out of bounds. So check # that first, and use mode='wrap' then. However only if # there are any indices... diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index b7e146b5a203..d631180809a2 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -2207,11 +2207,20 @@ def test_broadcast_in_args(self): for a, ia in zip(arrs, mit.iters): assert_(a is ia.base) + def test_broadcast_single_arg(self): + # gh-6899 + arrs = [np.empty((5, 6, 7))] + mit = np.broadcast(*arrs) + assert_equal(mit.shape, (5, 6, 7)) + assert_equal(mit.nd, 3) + assert_equal(mit.numiter, 1) + assert_(arrs[0] is mit.iters[0].base) + def test_number_of_arguments(self): arr = np.empty((5,)) for j in range(35): arrs = [arr] * j - if j < 2 or j > 32: + if j < 1 or j > 32: assert_raises(ValueError, np.broadcast, *arrs) else: mit = np.broadcast(*arrs) From 600595ff67caff21b1a062be6215164af8525a49 Mon Sep 17 00:00:00 2001 From: Julian Taylor Date: Mon, 4 Jan 2016 13:41:54 +0100 Subject: [PATCH 297/496] TST: make pointer-arith error fatal matches windows compiler behavior --- tools/travis-test.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/travis-test.sh b/tools/travis-test.sh index 40e266b2616d..d105c15c71ea 100755 --- a/tools/travis-test.sh +++ b/tools/travis-test.sh @@ -25,7 +25,8 @@ if [ -n "$PYTHON_OO" ]; then fi # make some warnings fatal, mostly to match windows compilers -werrors="-Werror=declaration-after-statement -Werror=vla -Werror=nonnull" +werrors="-Werror=declaration-after-statement -Werror=vla " +werrors+="-Werror=nonnull -Werror=pointer-arith" setup_base() { From fad4dd7f25fcf9524ae2b20d3d012ebb7d3e2385 Mon Sep 17 00:00:00 2001 From: ldoddema Date: Mon, 4 Jan 2016 16:40:08 +0100 Subject: [PATCH 298/496] MAINT: Fix notation in mtrand.pyx documentation. --- numpy/random/mtrand/mtrand.pyx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index 489cc9e6e24a..91434cd76554 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -681,8 +681,8 @@ def _rand_int32(low, high, size, rngstate): Return random np.int32 integers between `low` and `high`, inclusive. Return random integers from the "discrete uniform" distribution in the - closed interval [`low`, `high`). If `high` is None (the default), - then results are from [0, `low`). On entry the arguments are presumed + closed interval [`low`, `high`]. If `high` is None (the default), + then results are from [0, `low`]. On entry the arguments are presumed to have been validated for size and order for the np.int32 type. Parameters From 2edc202a2a70a17893e523dd4eb56b6668522981 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Mon, 4 Jan 2016 15:58:55 +0000 Subject: [PATCH 299/496] DEP: deprecate np.testing.rand --- doc/release/1.11.0-notes.rst | 7 +++++++ numpy/testing/utils.py | 3 +++ 2 files changed, 10 insertions(+) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index 6de10b5539c0..c15936cc3f57 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -158,3 +158,10 @@ parameter in methods like ```array.flatten``` or ```array.ravel``` that were not one of the following: 'C', 'F', 'A', 'K' (note that all of these possible values are unicode- and case-insensitive). Such behaviour will not be allowed in future releases. + +Random number generator in the ``testing`` namespace +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Python standard library random number generator was previously exposed in the +``testing`` namespace as ``testing.rand``. Using this generator is not +recommended and it will be removed in a future release. Use generators from +``numpy.random`` namespace instead. diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py index 0c4ebe1b9a9c..f545cd3c2af1 100644 --- a/numpy/testing/utils.py +++ b/numpy/testing/utils.py @@ -16,6 +16,7 @@ from .nosetester import import_nose from numpy.core import float32, empty, arange, array_repr, ndarray +from numpy.lib.utils import deprecate if sys.version_info[0] >= 3: from io import StringIO @@ -122,6 +123,8 @@ def gisinf(x): raise TypeError("isinf not supported for this type") return st +@deprecate(message="numpy.testing.rand is deprecated in numpy 1.11. " + "Use numpy.random.rand instead.") def rand(*args): """Returns an array of random numbers with the given shape. From 1cbc14e43560ce59b267839ca1e6b1e402fe0d7a Mon Sep 17 00:00:00 2001 From: ldoddema Date: Mon, 4 Jan 2016 19:23:59 +0100 Subject: [PATCH 300/496] DOC: high arg is mandatory for mtrand._rand_int32 --- numpy/random/mtrand/mtrand.pyx | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index 91434cd76554..3a4e132ec7c0 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -681,19 +681,15 @@ def _rand_int32(low, high, size, rngstate): Return random np.int32 integers between `low` and `high`, inclusive. Return random integers from the "discrete uniform" distribution in the - closed interval [`low`, `high`]. If `high` is None (the default), - then results are from [0, `low`]. On entry the arguments are presumed + closed interval [`low`, `high`]. On entry the arguments are presumed to have been validated for size and order for the np.int32 type. Parameters ---------- low : int - Lowest (signed) integer to be drawn from the distribution (unless - ``high=None``, in which case this parameter is the *highest* such - integer). + Lowest (signed) integer to be drawn from the distribution. high : int - If provided, the largest (signed) integer to be drawn from the - distribution (see above for behavior if ``high=None``). + Highest (signed) integer to be drawn from the distribution. size : int or tuple of ints Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. Default is None, in which case a From 346c700ab16da1deaf95e9f0331ffd76fb2162fa Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 4 Jan 2016 19:02:25 -0700 Subject: [PATCH 301/496] DOC: Create Numpy 1.10.3 release notes. --- doc/release/1.10.3-notes.rst | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 doc/release/1.10.3-notes.rst diff --git a/doc/release/1.10.3-notes.rst b/doc/release/1.10.3-notes.rst new file mode 100644 index 000000000000..9a01dee76560 --- /dev/null +++ b/doc/release/1.10.3-notes.rst @@ -0,0 +1,22 @@ +NumPy 1.10.3 Release Notes +************************** + +This release is a bugfix release motivated by a segfault regression. + +Issues Fixed +============ + +* gh-6922 BUG: numpy.recarray.sort segfaults on Windows + +Merged PRs +========== + +The following PRs have been merged into 1.10.3. When the PR is a backport, +the PR number for the original PR against master is listed. + +* gh-6840 TST: Update travis testing script in 1.10.x +* gh-6843 BUG: Fix use of python 3 only FileNotFoundError in test_f2py. +* gh-6884 REL: Update pavement.py and setup.py to reflect current version. +* gh-6916 BUG: Fix test_f2py so it runs correctly in runtests.py. +* gh-6924 BUG: Fix segfault gh-6922. + From 8c55f58b7fe75d24ea9535a9dd1dffc65c0eae49 Mon Sep 17 00:00:00 2001 From: Mark Wiebe Date: Tue, 5 Jan 2016 14:19:56 -0800 Subject: [PATCH 302/496] TST: Add datetime test distinguishing modified following from preceding --- numpy/core/tests/test_datetime.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py index 563aa48fb49c..8a8eafee8a20 100644 --- a/numpy/core/tests/test_datetime.py +++ b/numpy/core/tests/test_datetime.py @@ -1524,6 +1524,12 @@ def test_datetime_busday_offset(self): assert_equal( np.busday_offset('2010-10-30', 0, roll='modifiedpreceding'), np.datetime64('2010-10-29')) + assert_equal( + np.busday_offset('2010-10-16', 0, roll='modifiedfollowing'), + np.datetime64('2010-10-18')) + assert_equal( + np.busday_offset('2010-10-16', 0, roll='modifiedpreceding'), + np.datetime64('2010-10-15')) # roll='raise' by default assert_raises(ValueError, np.busday_offset, '2011-06-04', 0) From 029b502bf4946b2b0943fed1ccbb6dadd7bcc394 Mon Sep 17 00:00:00 2001 From: Mark Wiebe Date: Tue, 5 Jan 2016 14:21:50 -0800 Subject: [PATCH 303/496] BUG: Fix copy/paste error treating modifiedpreceding as modifiedfollowing --- numpy/core/src/multiarray/datetime_busday.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/datetime_busday.c b/numpy/core/src/multiarray/datetime_busday.c index 331e104969ed..4fade4d20a7c 100644 --- a/numpy/core/src/multiarray/datetime_busday.c +++ b/numpy/core/src/multiarray/datetime_busday.c @@ -889,7 +889,7 @@ PyArray_BusDayRollConverter(PyObject *roll_in, NPY_BUSDAY_ROLL *roll) break; case 'p': if (strcmp(str, "modifiedpreceding") == 0) { - *roll = NPY_BUSDAY_MODIFIEDFOLLOWING; + *roll = NPY_BUSDAY_MODIFIEDPRECEDING; goto finish; } break; From 40e47e06e741797efef60211c58e30ff82c7191f Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 5 Jan 2016 12:56:26 -0700 Subject: [PATCH 304/496] DOC,BUG: Fix some latex generation problems. Some of the documentation for newbyteorder, copy and pasted in several spots, had paragraphs ending in `::`, initiating a sphinx generated Verbatim environment and resulting in "LaTeX Error: Too deeply nested". The user_array.container class needed non-empty class documentation. That that caused a problem is probably a numpydoc bug, but it is easy to fix. [skip ci] --- doc/release/1.10.3-notes.rst | 1 + numpy/add_newdocs.py | 31 +++++++++++++++---------------- numpy/lib/user_array.py | 17 +++++++++++++++++ 3 files changed, 33 insertions(+), 16 deletions(-) diff --git a/doc/release/1.10.3-notes.rst b/doc/release/1.10.3-notes.rst index 9a01dee76560..3b5dbc5e3433 100644 --- a/doc/release/1.10.3-notes.rst +++ b/doc/release/1.10.3-notes.rst @@ -19,4 +19,5 @@ the PR number for the original PR against master is listed. * gh-6884 REL: Update pavement.py and setup.py to reflect current version. * gh-6916 BUG: Fix test_f2py so it runs correctly in runtests.py. * gh-6924 BUG: Fix segfault gh-6922. +* gh-6943 DOC,BUG: Fix some latex generation problems. diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py index 7eef07c4a2f0..e79720c773a8 100644 --- a/numpy/add_newdocs.py +++ b/numpy/add_newdocs.py @@ -3888,13 +3888,13 @@ def luf(lamdaexpr, *args, **kwargs): ---------- new_order : string, optional Byte order to force; a value from the byte order specifications - above. `new_order` codes can be any of:: + below. `new_order` codes can be any of: - * 'S' - swap dtype from current to opposite endian - * {'<', 'L'} - little endian - * {'>', 'B'} - big endian - * {'=', 'N'} - native order - * {'|', 'I'} - ignore (no change to byte order) + * 'S' - swap dtype from current to opposite endian + * {'<', 'L'} - little endian + * {'>', 'B'} - big endian + * {'=', 'N'} - native order + * {'|', 'I'} - ignore (no change to byte order) The default value ('S') results in swapping the current byte order. The code does a case-insensitive check on the first @@ -6359,16 +6359,15 @@ def luf(lamdaexpr, *args, **kwargs): Parameters ---------- new_order : string, optional - Byte order to force; a value from the byte order - specifications below. The default value ('S') results in - swapping the current byte order. - `new_order` codes can be any of:: + Byte order to force; a value from the byte order specifications + below. The default value ('S') results in swapping the current + byte order. `new_order` codes can be any of: - * 'S' - swap dtype from current to opposite endian - * {'<', 'L'} - little endian - * {'>', 'B'} - big endian - * {'=', 'N'} - native order - * {'|', 'I'} - ignore (no change to byte order) + * 'S' - swap dtype from current to opposite endian + * {'<', 'L'} - little endian + * {'>', 'B'} - big endian + * {'=', 'N'} - native order + * {'|', 'I'} - ignore (no change to byte order) The code does a case-insensitive check on the first letter of `new_order` for these alternatives. For example, any of '>' @@ -7231,10 +7230,10 @@ def luf(lamdaexpr, *args, **kwargs): The `new_order` code can be any from the following: + * 'S' - swap dtype from current to opposite endian * {'<', 'L'} - little endian * {'>', 'B'} - big endian * {'=', 'N'} - native order - * 'S' - swap dtype from current to opposite endian * {'|', 'I'} - ignore (no change to byte order) Parameters diff --git a/numpy/lib/user_array.py b/numpy/lib/user_array.py index bb5bec628f12..3103da57b7d0 100644 --- a/numpy/lib/user_array.py +++ b/numpy/lib/user_array.py @@ -1,5 +1,6 @@ """ Standard container-class for easy multiple-inheritance. + Try to inherit from the ndarray instead of using this class as this is not complete. @@ -16,7 +17,19 @@ class container(object): + """ + container(data, dtype=None, copy=True) + + Standard container-class for easy multiple-inheritance. + + Methods + ------- + copy + tostring + byteswap + astype + """ def __init__(self, data, dtype=None, copy=True): self.array = array(data, dtype, copy=copy) @@ -219,15 +232,19 @@ def __ge__(self, other): return self._rc(greater_equal(self.array, other)) def copy(self): + "" return self._rc(self.array.copy()) def tostring(self): + "" return self.array.tostring() def byteswap(self): + "" return self._rc(self.array.byteswap()) def astype(self, typecode): + "" return self._rc(self.array.astype(typecode)) def _rc(self, a): From 8d8add29f198f5f174e8572337a82a203e4d4516 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 5 Jan 2016 16:58:39 -0700 Subject: [PATCH 305/496] DOC: Update the 1.10.3 release notes for release. [skip ci] --- doc/release/1.10.3-notes.rst | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/doc/release/1.10.3-notes.rst b/doc/release/1.10.3-notes.rst index 3b5dbc5e3433..ae08394b501e 100644 --- a/doc/release/1.10.3-notes.rst +++ b/doc/release/1.10.3-notes.rst @@ -1,12 +1,17 @@ NumPy 1.10.3 Release Notes ************************** -This release is a bugfix release motivated by a segfault regression. +This release is a bugfix source release motivated by a segfault regression. +No windows binaries are provided for this release, as there appear to be +bugs in the toolchain we use to generate those files. Hopefully that +problem will be fixed for the next release. In the meantime, we suggest +using one of the providers of windows binaries. Issues Fixed ============ * gh-6922 BUG: numpy.recarray.sort segfaults on Windows +* gh-6937 BUG: busday_offset does the wrong thing with modifiedpreceding roll. Merged PRs ========== @@ -19,5 +24,5 @@ the PR number for the original PR against master is listed. * gh-6884 REL: Update pavement.py and setup.py to reflect current version. * gh-6916 BUG: Fix test_f2py so it runs correctly in runtests.py. * gh-6924 BUG: Fix segfault gh-6922. +* gh-6942 Fix datetime roll='modifiedpreceding' bug. * gh-6943 DOC,BUG: Fix some latex generation problems. - From e72e1510d892ce4464cf102000e89582327953a0 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Sun, 3 Jan 2016 18:11:15 -0500 Subject: [PATCH 306/496] TST: Verify that certain variations of dot products of a matrix with views of its self work correctly. --- numpy/core/tests/test_multiarray.py | 75 ++++++++++++++++++++++++++++- 1 file changed, 74 insertions(+), 1 deletion(-) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index d03c5f54727d..0541016d9ede 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -1936,7 +1936,80 @@ def test_dot(self): a = np.array([[1, 0], [0, 1]]) b = np.array([[0, 1], [1, 0]]) c = np.array([[9, 1], [1, -9]]) - + d = np.arange(24).reshape(4, 6) + ddt = np.array( + [[ 55, 145, 235, 325], + [ 145, 451, 757, 1063], + [ 235, 757, 1279, 1801], + [ 325, 1063, 1801, 2539]] + ) + dtd = np.array( + [[504, 540, 576, 612, 648, 684], + [540, 580, 620, 660, 700, 740], + [576, 620, 664, 708, 752, 796], + [612, 660, 708, 756, 804, 852], + [648, 700, 752, 804, 856, 908], + [684, 740, 796, 852, 908, 964]] + ) + + + # gemm vs syrk optimizations + for et in [np.float32, np.float64, np.complex64, np.complex128]: + eaf = a.astype(et) + assert_equal(np.dot(eaf, eaf), eaf) + assert_equal(np.dot(eaf.T, eaf), eaf) + assert_equal(np.dot(eaf, eaf.T), eaf) + assert_equal(np.dot(eaf.T, eaf.T), eaf) + assert_equal(np.dot(eaf.T.copy(), eaf), eaf) + assert_equal(np.dot(eaf, eaf.T.copy()), eaf) + assert_equal(np.dot(eaf.T.copy(), eaf.T.copy()), eaf) + + # syrk validations + for et in [np.float32, np.float64, np.complex64, np.complex128]: + eaf = a.astype(et) + ebf = b.astype(et) + assert_equal(np.dot(ebf, ebf), eaf) + assert_equal(np.dot(ebf.T, ebf), eaf) + assert_equal(np.dot(ebf, ebf.T), eaf) + assert_equal(np.dot(ebf.T, ebf.T), eaf) + + # syrk - different shape, stride, and view validations + for et in [np.float32, np.float64, np.complex64, np.complex128]: + edf = d.astype(et) + assert_equal( + np.dot(edf[::-1, :], edf.T), + np.dot(edf[::-1, :].copy(), edf.T.copy()) + ) + assert_equal( + np.dot(edf[:, ::-1], edf.T), + np.dot(edf[:, ::-1].copy(), edf.T.copy()) + ) + assert_equal( + np.dot(edf, edf[::-1, :].T), + np.dot(edf, edf[::-1, :].T.copy()) + ) + assert_equal( + np.dot(edf, edf[:, ::-1].T), + np.dot(edf, edf[:, ::-1].T.copy()) + ) + assert_equal( + np.dot(edf[:edf.shape[0] // 2, :], edf[::2, :].T), + np.dot(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy()) + ) + assert_equal( + np.dot(edf[::2, :], edf[:edf.shape[0] // 2, :].T), + np.dot(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy()) + ) + + # syrk - different shape + for et in [np.float32, np.float64, np.complex64, np.complex128]: + edf = d.astype(et) + eddtf = ddt.astype(et) + edtdf = dtd.astype(et) + assert_equal(np.dot(edf, edf.T), eddtf) + assert_equal(np.dot(edf.T, edf), edtdf) + + # function versus methods assert_equal(np.dot(a, b), a.dot(b)) assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c)) From 816cd4983b0c6cddf3c2e51331d822188ddc7aa0 Mon Sep 17 00:00:00 2001 From: Holger Kohr Date: Wed, 6 Jan 2016 16:16:32 +0100 Subject: [PATCH 307/496] DOC: Mention single-arg broadcast in release notes --- doc/release/1.11.0-notes.rst | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index 6de10b5539c0..da8ec3470845 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -133,6 +133,17 @@ diskspace on filesystems that support it. Changes ======= +*np.broadcast* can now be called with a single argument +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The resulting object in that case will simply mimic iteration over +a single array. This change obsoletes distinctions like + + if len(x) == 1: + shape = x[0].shape + else: + shape = np.broadcast(*x).shape + +Instead, ``np.broadcast`` can be used in all cases. Deprecations ============ From dbf3fcb19ec710732531c268aeba7aa348e872f1 Mon Sep 17 00:00:00 2001 From: Marten van Kerkwijk Date: Wed, 6 Jan 2016 11:37:47 -0500 Subject: [PATCH 308/496] BUG trace is not subclass aware, such that np.trace(ma) != ma.trace(). --- numpy/core/fromnumeric.py | 6 +++++- numpy/core/tests/test_multiarray.py | 27 +++++++++++++++++++++++++++ numpy/ma/tests/test_core.py | 1 + 3 files changed, 33 insertions(+), 1 deletion(-) diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 362c29cb825d..a2937c5c507a 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -1367,7 +1367,11 @@ def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None): (2, 3) """ - return asarray(a).trace(offset, axis1, axis2, dtype, out) + if isinstance(a, np.matrix): + # Get trace of matrix via an array to preserve backward compatibility. + return asarray(a).trace(offset, axis1, axis2, dtype, out) + else: + return asanyarray(a).trace(offset, axis1, axis2, dtype, out) def ravel(a, order='C'): diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 0541016d9ede..c66e49e5fb70 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -2083,6 +2083,33 @@ def test_diagonal_memleak(self): a.diagonal() assert_(sys.getrefcount(a) < 50) + def test_trace(self): + a = np.arange(12).reshape((3, 4)) + assert_equal(a.trace(), 15) + assert_equal(a.trace(0), 15) + assert_equal(a.trace(1), 18) + assert_equal(a.trace(-1), 13) + + b = np.arange(8).reshape((2, 2, 2)) + assert_equal(b.trace(), [6, 8]) + assert_equal(b.trace(0), [6, 8]) + assert_equal(b.trace(1), [2, 3]) + assert_equal(b.trace(-1), [4, 5]) + assert_equal(b.trace(0, 0, 1), [6, 8]) + assert_equal(b.trace(0, 0, 2), [5, 9]) + assert_equal(b.trace(0, 1, 2), [3, 11]) + assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3]) + + def test_trace_subclass(self): + # The class would need to overwrite trace to ensure single-element + # output also has the right subclass. + class MyArray(np.ndarray): + pass + + b = np.arange(8).reshape((2, 2, 2)).view(MyArray) + t = b.trace() + assert isinstance(t, MyArray) + def test_put(self): icodes = np.typecodes['AllInteger'] fcodes = np.typecodes['AllFloat'] diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 020bf1e62783..adbbb26d1a29 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -3198,6 +3198,7 @@ def test_trace(self): assert_almost_equal(mX.trace(), X.trace() - sum(mXdiag.mask * X.diagonal(), axis=0)) + assert_equal(np.trace(mX), mX.trace()) def test_dot(self): # Tests dot on MaskedArrays. From 777a8241c8a810c5e7a1bb04afbb6be4259ff9d1 Mon Sep 17 00:00:00 2001 From: Marten van Kerkwijk Date: Wed, 6 Jan 2016 12:35:45 -0500 Subject: [PATCH 309/496] BUG recarray slices should preserve subclass. --- numpy/core/records.py | 6 +++--- numpy/core/tests/test_records.py | 9 ++++++++- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/numpy/core/records.py b/numpy/core/records.py index ca6070cf76a4..9f5dcc8110ca 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -425,7 +425,7 @@ def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None, def __array_finalize__(self, obj): if self.dtype.type is not record: - # if self.dtype is not np.record, invoke __setattr__ which will + # if self.dtype is not np.record, invoke __setattr__ which will # convert it to a record if it is a void dtype. self.dtype = self.dtype @@ -496,13 +496,13 @@ def __setattr__(self, attr, val): return self.setfield(val, *res) def __getitem__(self, indx): - obj = ndarray.__getitem__(self, indx) + obj = super(recarray, self).__getitem__(indx) # copy behavior of getattr, except that here # we might also be returning a single element if isinstance(obj, ndarray): if obj.dtype.fields: - obj = obj.view(recarray) + obj = obj.view(type(self)) if issubclass(obj.dtype.type, nt.void): return obj.view(dtype=(self.dtype.type, obj.dtype)) return obj diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py index e0f0a3a8ff51..9fbdf51d60aa 100644 --- a/numpy/core/tests/test_records.py +++ b/numpy/core/tests/test_records.py @@ -122,13 +122,20 @@ def test_recarray_views(self): assert_equal(rv.dtype.type, np.record) #check that getitem also preserves np.recarray and np.record - r = np.rec.array(np.ones(4, dtype=[('a', 'i4'), ('b', 'i4'), + r = np.rec.array(np.ones(4, dtype=[('a', 'i4'), ('b', 'i4'), ('c', 'i4,i4')])) assert_equal(r['c'].dtype.type, np.record) assert_equal(type(r['c']), np.recarray) assert_equal(r[['a', 'b']].dtype.type, np.record) assert_equal(type(r[['a', 'b']]), np.recarray) + #and that it preserves subclasses (gh-6949) + class C(np.recarray): + pass + + c = r.view(C) + assert_equal(type(c['c']), C) + # check that accessing nested structures keep record type, but # not for subarrays, non-void structures, non-structured voids test_dtype = [('a', 'f4,f4'), ('b', 'V8'), ('c', ('f4',2)), From 0bdcd6ef394ec2adbbf5913e67860670f2a03a7c Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 6 Jan 2016 12:34:08 -0700 Subject: [PATCH 310/496] DOC: Update 1.10.3 release notes with last minute additions. [skip ci] --- doc/release/1.10.3-notes.rst | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/doc/release/1.10.3-notes.rst b/doc/release/1.10.3-notes.rst index ae08394b501e..b9a890d646e1 100644 --- a/doc/release/1.10.3-notes.rst +++ b/doc/release/1.10.3-notes.rst @@ -10,8 +10,9 @@ using one of the providers of windows binaries. Issues Fixed ============ -* gh-6922 BUG: numpy.recarray.sort segfaults on Windows +* gh-6922 BUG: numpy.recarray.sort segfaults on Windows. * gh-6937 BUG: busday_offset does the wrong thing with modifiedpreceding roll. +* gh-6949 BUG: Type is lost when slicing a subclass of recarray. Merged PRs ========== @@ -26,3 +27,5 @@ the PR number for the original PR against master is listed. * gh-6924 BUG: Fix segfault gh-6922. * gh-6942 Fix datetime roll='modifiedpreceding' bug. * gh-6943 DOC,BUG: Fix some latex generation problems. +* gh-6950 BUG trace is not subclass aware, np.trace(ma) != ma.trace(). +* gh-6952 BUG recarray slices should preserve subclass. From fee3ccbe8c2248bb8946d057fab94fa7012df41b Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Sun, 3 Jan 2016 19:42:22 -0500 Subject: [PATCH 311/496] ENH: Added the helper function `syrk` that computes `a.T @ a` or `a @ a.T`. --- numpy/core/src/multiarray/cblasfuncs.c | 68 ++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/numpy/core/src/multiarray/cblasfuncs.c b/numpy/core/src/multiarray/cblasfuncs.c index 67f325ba1577..1789b2cafa00 100644 --- a/numpy/core/src/multiarray/cblasfuncs.c +++ b/numpy/core/src/multiarray/cblasfuncs.c @@ -111,6 +111,74 @@ gemv(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans, } +/* + * Helper: dispatch to appropriate cblas_?syrk for typenum. + */ +static void +syrk(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans, + int n, int k, + PyArrayObject *A, int lda, PyArrayObject *R) +{ + const void *Adata = PyArray_DATA(A); + void *Rdata = PyArray_DATA(R); + int ldc = PyArray_DIM(R, 1) > 1 ? PyArray_DIM(R, 1) : 1; + + npy_intp i; + npy_intp j; + + switch (typenum) { + case NPY_DOUBLE: + cblas_dsyrk(order, CblasUpper, trans, n, k, 1., + Adata, lda, 0., Rdata, ldc); + + for (i = 0; i < n; i++) + { + for (j = i + 1; j < n; j++) + { + *((npy_double*)PyArray_GETPTR2(R, j, i)) = *((npy_double*)PyArray_GETPTR2(R, i, j)); + } + } + break; + case NPY_FLOAT: + cblas_ssyrk(order, CblasUpper, trans, n, k, 1.f, + Adata, lda, 0.f, Rdata, ldc); + + for (i = 0; i < n; i++) + { + for (j = i + 1; j < n; j++) + { + *((npy_float*)PyArray_GETPTR2(R, j, i)) = *((npy_float*)PyArray_GETPTR2(R, i, j)); + } + } + break; + case NPY_CDOUBLE: + cblas_zsyrk(order, CblasUpper, trans, n, k, oneD, + Adata, lda, zeroD, Rdata, ldc); + + for (i = 0; i < n; i++) + { + for (j = i + 1; j < n; j++) + { + *((npy_cdouble*)PyArray_GETPTR2(R, j, i)) = *((npy_cdouble*)PyArray_GETPTR2(R, i, j)); + } + } + break; + case NPY_CFLOAT: + cblas_csyrk(order, CblasUpper, trans, n, k, oneF, + Adata, lda, zeroF, Rdata, ldc); + + for (i = 0; i < n; i++) + { + for (j = i + 1; j < n; j++) + { + *((npy_cfloat*)PyArray_GETPTR2(R, j, i)) = *((npy_cfloat*)PyArray_GETPTR2(R, i, j)); + } + } + break; + } +} + + typedef enum {_scalar, _column, _row, _matrix} MatrixShape; From 924e08f8eabe0aafb77d6c3ce435e2a6cf2df2e6 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Sun, 3 Jan 2016 19:42:51 -0500 Subject: [PATCH 312/496] ENH: Use the helper function `syrk` to compute `dot` more quickly and accurately in certain special cases. --- numpy/core/src/multiarray/cblasfuncs.c | 29 +++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/cblasfuncs.c b/numpy/core/src/multiarray/cblasfuncs.c index 1789b2cafa00..516c6e8ae0be 100644 --- a/numpy/core/src/multiarray/cblasfuncs.c +++ b/numpy/core/src/multiarray/cblasfuncs.c @@ -715,7 +715,34 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2, Trans2 = CblasTrans; ldb = (PyArray_DIM(ap2, 0) > 1 ? PyArray_DIM(ap2, 0) : 1); } - gemm(typenum, Order, Trans1, Trans2, L, N, M, ap1, lda, ap2, ldb, ret); + + /* + * Use syrk if we have a case of a matrix times its transpose. + * Otherwise, use gemm for all other cases. + */ + if ( + (PyArray_BYTES(ap1) == PyArray_BYTES(ap2)) && + (PyArray_DIM(ap1, 0) == PyArray_DIM(ap2, 1)) && + (PyArray_DIM(ap1, 1) == PyArray_DIM(ap2, 0)) && + (PyArray_STRIDE(ap1, 0) == PyArray_STRIDE(ap2, 1)) && + (PyArray_STRIDE(ap1, 1) == PyArray_STRIDE(ap2, 0)) && + ((Trans1 == CblasTrans) ^ (Trans2 == CblasTrans)) && + ((Trans1 == CblasNoTrans) ^ (Trans2 == CblasNoTrans)) + ) + { + if (Trans1 == CblasNoTrans) + { + syrk(typenum, Order, Trans1, N, M, ap1, lda, ret); + } + else + { + syrk(typenum, Order, Trans1, N, M, ap2, ldb, ret); + } + } + else + { + gemm(typenum, Order, Trans1, Trans2, L, N, M, ap1, lda, ap2, ldb, ret); + } NPY_END_ALLOW_THREADS; } From 01acef82fdd02c968601b41d27f3ac6461dbc459 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Tue, 5 Jan 2016 22:15:08 -0500 Subject: [PATCH 313/496] DOC: Update the 1.11.0 release notes to mention optimizations of `A.T @ A` and `A @ A.T`. --- doc/release/1.11.0-notes.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index c15936cc3f57..6f79e3c8b9d8 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -130,6 +130,14 @@ useless computations when printing a masked array. The function now uses the fallocate system call to reserve sufficient diskspace on filesystems that support it. +``np.dot`` optimized for operations of the form ``A.T @ A`` and ``A @ A.T`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Previously, ``gemm`` BLAS operations were used for all matrix products. Now, +if the matrix product is between a matrix and its transpose, it will use +``syrk`` BLAS operations for a performance boost. + +**Note:** Requires the transposed and non-transposed matrices to share data. + Changes ======= From 8d8a74d8b2f86d2548a04565744ab122537a4f62 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Wed, 6 Jan 2016 11:22:19 -0500 Subject: [PATCH 314/496] BENCH: Add benchmarks between an array and its transpose that share data, which should be optimized, versus the same configuration without shared data. --- benchmarks/benchmarks/bench_linalg.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py index a323609b7594..c230d985a3be 100644 --- a/benchmarks/benchmarks/bench_linalg.py +++ b/benchmarks/benchmarks/bench_linalg.py @@ -8,6 +8,8 @@ class Eindot(Benchmark): def setup(self): self.a = np.arange(60000.0).reshape(150, 400) + self.at = self.a.T + self.atc = self.a.T.copy() self.b = np.arange(240000.0).reshape(400, 600) self.c = np.arange(600) self.d = np.arange(400) @@ -21,6 +23,18 @@ def time_einsum_ij_jk_a_b(self): def time_dot_a_b(self): np.dot(self.a, self.b) + def time_dot_trans_a_at(self): + np.dot(self.a, self.at) + + def time_dot_trans_a_atc(self): + np.dot(self.a, self.atc) + + def time_dot_trans_at_a(self): + np.dot(self.at, self.a) + + def time_dot_trans_atc_a(self): + np.dot(self.atc, self.a) + def time_einsum_i_ij_j(self): np.einsum('i,ij,j', self.d, self.b, self.c) From 1d1eaef896a66059163f0baaaaa162e1f7c225a9 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 6 Jan 2016 20:31:02 +0100 Subject: [PATCH 315/496] MAINT: remove outdated Pyrex support from distutils (as far as possible). --- numpy/distutils/command/build_src.py | 47 ++++--------------- numpy/distutils/setup.py | 1 + numpy/distutils/tests/pyrex_ext/__init__.py | 1 - numpy/distutils/tests/pyrex_ext/primes.pyx | 22 --------- numpy/distutils/tests/pyrex_ext/setup.py | 14 ------ .../tests/pyrex_ext/tests/test_primes.py | 13 ----- numpy/distutils/tests/setup.py | 2 - numpy/testing/nosetester.py | 1 - 8 files changed, 9 insertions(+), 92 deletions(-) delete mode 100644 numpy/distutils/tests/pyrex_ext/__init__.py delete mode 100644 numpy/distutils/tests/pyrex_ext/primes.pyx delete mode 100644 numpy/distutils/tests/pyrex_ext/setup.py delete mode 100644 numpy/distutils/tests/pyrex_ext/tests/test_primes.py diff --git a/numpy/distutils/command/build_src.py b/numpy/distutils/command/build_src.py index 7463a0e1745f..2efcdea60c2b 100644 --- a/numpy/distutils/command/build_src.py +++ b/numpy/distutils/command/build_src.py @@ -1,4 +1,4 @@ -""" Build swig, f2py, pyrex sources. +""" Build swig and f2py sources. """ from __future__ import division, absolute_import, print_function @@ -13,12 +13,6 @@ from distutils.util import get_platform from distutils.errors import DistutilsError, DistutilsSetupError -def have_pyrex(): - try: - import Pyrex.Compiler.Main - return True - except ImportError: - return False # this import can't be done here, as it uses numpy stuff only available # after it's installed @@ -327,13 +321,9 @@ def build_extension_sources(self, ext): self.ext_target_dir = self.get_package_dir(package) sources = self.generate_sources(sources, ext) - sources = self.template_sources(sources, ext) - sources = self.swig_sources(sources, ext) - sources = self.f2py_sources(sources, ext) - sources = self.pyrex_sources(sources, ext) sources, py_files = self.filter_py_files(sources) @@ -450,6 +440,7 @@ def template_sources(self, sources, extension): return new_sources def pyrex_sources(self, sources, extension): + """Pyrex not supported; this remains for Cython support (see below)""" new_sources = [] ext_name = extension.name.split('.')[-1] for source in sources: @@ -464,34 +455,12 @@ def pyrex_sources(self, sources, extension): return new_sources def generate_a_pyrex_source(self, base, ext_name, source, extension): - if self.inplace or not have_pyrex(): - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - target_file = os.path.join(target_dir, ext_name + '.c') - depends = [source] + extension.depends - if self.force or newer_group(depends, target_file, 'newer'): - if have_pyrex(): - import Pyrex.Compiler.Main - log.info("pyrexc:> %s" % (target_file)) - self.mkpath(target_dir) - options = Pyrex.Compiler.Main.CompilationOptions( - defaults=Pyrex.Compiler.Main.default_options, - include_path=extension.include_dirs, - output_file=target_file) - pyrex_result = Pyrex.Compiler.Main.compile(source, - options=options) - if pyrex_result.num_errors != 0: - raise DistutilsError("%d errors while compiling %r with Pyrex" \ - % (pyrex_result.num_errors, source)) - elif os.path.isfile(target_file): - log.warn("Pyrex required for compiling %r but not available,"\ - " using old target %r"\ - % (source, target_file)) - else: - raise DistutilsError("Pyrex required for compiling %r"\ - " but notavailable" % (source,)) - return target_file + """Pyrex is not supported, but some projects monkeypatch this method. + + That allows compiling Cython code, see gh-6955. + This method will remain here for compatibility reasons. + """ + return [] def f2py_sources(self, sources, extension): new_sources = [] diff --git a/numpy/distutils/setup.py b/numpy/distutils/setup.py index 82a53bd08dbe..eac9c0f18b03 100644 --- a/numpy/distutils/setup.py +++ b/numpy/distutils/setup.py @@ -6,6 +6,7 @@ def configuration(parent_package='',top_path=None): config = Configuration('distutils', parent_package, top_path) config.add_subpackage('command') config.add_subpackage('fcompiler') + config.add_subpackage('tests') config.add_data_dir('tests') config.add_data_files('site.cfg') config.add_data_files('mingw/gfortran_vs2003_hack.c') diff --git a/numpy/distutils/tests/pyrex_ext/__init__.py b/numpy/distutils/tests/pyrex_ext/__init__.py deleted file mode 100644 index 1d0f69b67d8f..000000000000 --- a/numpy/distutils/tests/pyrex_ext/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from __future__ import division, absolute_import, print_function diff --git a/numpy/distutils/tests/pyrex_ext/primes.pyx b/numpy/distutils/tests/pyrex_ext/primes.pyx deleted file mode 100644 index 2ada0c5a08d4..000000000000 --- a/numpy/distutils/tests/pyrex_ext/primes.pyx +++ /dev/null @@ -1,22 +0,0 @@ -# -# Calculate prime numbers -# - -def primes(int kmax): - cdef int n, k, i - cdef int p[1000] - result = [] - if kmax > 1000: - kmax = 1000 - k = 0 - n = 2 - while k < kmax: - i = 0 - while i < k and n % p[i] <> 0: - i = i + 1 - if i == k: - p[k] = n - k = k + 1 - result.append(n) - n = n + 1 - return result diff --git a/numpy/distutils/tests/pyrex_ext/setup.py b/numpy/distutils/tests/pyrex_ext/setup.py deleted file mode 100644 index 819dd3154a11..000000000000 --- a/numpy/distutils/tests/pyrex_ext/setup.py +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('pyrex_ext', parent_package, top_path) - config.add_extension('primes', - ['primes.pyx']) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy/distutils/tests/pyrex_ext/tests/test_primes.py b/numpy/distutils/tests/pyrex_ext/tests/test_primes.py deleted file mode 100644 index 1ae436b65caf..000000000000 --- a/numpy/distutils/tests/pyrex_ext/tests/test_primes.py +++ /dev/null @@ -1,13 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.testing import TestCase, run_module_suite, assert_equal -from pyrex_ext.primes import primes - -class TestPrimes(TestCase): - def test_simple(self, level=1): - l = primes(10) - assert_equal(l, [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy/distutils/tests/setup.py b/numpy/distutils/tests/setup.py index 135de7c470d5..07bafb0b71c8 100644 --- a/numpy/distutils/tests/setup.py +++ b/numpy/distutils/tests/setup.py @@ -4,9 +4,7 @@ def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('testnumpydistutils', parent_package, top_path) - config.add_subpackage('pyrex_ext') config.add_subpackage('f2py_ext') - #config.add_subpackage('f2py_f90_ext') config.add_subpackage('swig_ext') config.add_subpackage('gen_ext') return config diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py index 42113676a6d0..23ba8cc3a500 100644 --- a/numpy/testing/nosetester.py +++ b/numpy/testing/nosetester.py @@ -171,7 +171,6 @@ class NoseTester(object): excludes = ['f2py_ext', 'f2py_f90_ext', 'gen_ext', - 'pyrex_ext', 'swig_ext'] def __init__(self, package=None, raise_warnings="release", depth=0): From 15fc41f5f9ecb5da72d6bd3a4932aee8a9d78f08 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 6 Jan 2016 20:33:18 +0100 Subject: [PATCH 316/496] MAINT: remove unused f2py and SWIG tests from numpy.distutils. --- numpy/distutils/setup.py | 1 - numpy/distutils/tests/f2py_ext/__init__.py | 1 - numpy/distutils/tests/f2py_ext/setup.py | 13 ----- numpy/distutils/tests/f2py_ext/src/fib1.f | 18 ------- numpy/distutils/tests/f2py_ext/src/fib2.pyf | 9 ---- .../tests/f2py_ext/tests/test_fib2.py | 12 ----- .../distutils/tests/f2py_f90_ext/__init__.py | 1 - .../tests/f2py_f90_ext/include/body.f90 | 5 -- numpy/distutils/tests/f2py_f90_ext/setup.py | 18 ------- .../tests/f2py_f90_ext/src/foo_free.f90 | 6 --- .../tests/f2py_f90_ext/tests/test_foo.py | 11 ----- numpy/distutils/tests/gen_ext/__init__.py | 1 - numpy/distutils/tests/gen_ext/setup.py | 48 ------------------- .../tests/gen_ext/tests/test_fib3.py | 11 ----- numpy/distutils/tests/setup.py | 14 ------ numpy/distutils/tests/swig_ext/__init__.py | 1 - numpy/distutils/tests/swig_ext/setup.py | 20 -------- numpy/distutils/tests/swig_ext/src/example.c | 14 ------ numpy/distutils/tests/swig_ext/src/example.i | 14 ------ numpy/distutils/tests/swig_ext/src/zoo.cc | 23 --------- numpy/distutils/tests/swig_ext/src/zoo.h | 9 ---- numpy/distutils/tests/swig_ext/src/zoo.i | 10 ---- .../tests/swig_ext/tests/test_example.py | 17 ------- .../tests/swig_ext/tests/test_example2.py | 15 ------ numpy/testing/nosetester.py | 9 ---- 25 files changed, 301 deletions(-) delete mode 100644 numpy/distutils/tests/f2py_ext/__init__.py delete mode 100644 numpy/distutils/tests/f2py_ext/setup.py delete mode 100644 numpy/distutils/tests/f2py_ext/src/fib1.f delete mode 100644 numpy/distutils/tests/f2py_ext/src/fib2.pyf delete mode 100644 numpy/distutils/tests/f2py_ext/tests/test_fib2.py delete mode 100644 numpy/distutils/tests/f2py_f90_ext/__init__.py delete mode 100644 numpy/distutils/tests/f2py_f90_ext/include/body.f90 delete mode 100644 numpy/distutils/tests/f2py_f90_ext/setup.py delete mode 100644 numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90 delete mode 100644 numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py delete mode 100644 numpy/distutils/tests/gen_ext/__init__.py delete mode 100644 numpy/distutils/tests/gen_ext/setup.py delete mode 100644 numpy/distutils/tests/gen_ext/tests/test_fib3.py delete mode 100644 numpy/distutils/tests/setup.py delete mode 100644 numpy/distutils/tests/swig_ext/__init__.py delete mode 100644 numpy/distutils/tests/swig_ext/setup.py delete mode 100644 numpy/distutils/tests/swig_ext/src/example.c delete mode 100644 numpy/distutils/tests/swig_ext/src/example.i delete mode 100644 numpy/distutils/tests/swig_ext/src/zoo.cc delete mode 100644 numpy/distutils/tests/swig_ext/src/zoo.h delete mode 100644 numpy/distutils/tests/swig_ext/src/zoo.i delete mode 100644 numpy/distutils/tests/swig_ext/tests/test_example.py delete mode 100644 numpy/distutils/tests/swig_ext/tests/test_example2.py diff --git a/numpy/distutils/setup.py b/numpy/distutils/setup.py index eac9c0f18b03..82a53bd08dbe 100644 --- a/numpy/distutils/setup.py +++ b/numpy/distutils/setup.py @@ -6,7 +6,6 @@ def configuration(parent_package='',top_path=None): config = Configuration('distutils', parent_package, top_path) config.add_subpackage('command') config.add_subpackage('fcompiler') - config.add_subpackage('tests') config.add_data_dir('tests') config.add_data_files('site.cfg') config.add_data_files('mingw/gfortran_vs2003_hack.c') diff --git a/numpy/distutils/tests/f2py_ext/__init__.py b/numpy/distutils/tests/f2py_ext/__init__.py deleted file mode 100644 index 1d0f69b67d8f..000000000000 --- a/numpy/distutils/tests/f2py_ext/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from __future__ import division, absolute_import, print_function diff --git a/numpy/distutils/tests/f2py_ext/setup.py b/numpy/distutils/tests/f2py_ext/setup.py deleted file mode 100644 index bb7d4bc1c8c8..000000000000 --- a/numpy/distutils/tests/f2py_ext/setup.py +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('f2py_ext', parent_package, top_path) - config.add_extension('fib2', ['src/fib2.pyf', 'src/fib1.f']) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy/distutils/tests/f2py_ext/src/fib1.f b/numpy/distutils/tests/f2py_ext/src/fib1.f deleted file mode 100644 index cfbb1eea0df7..000000000000 --- a/numpy/distutils/tests/f2py_ext/src/fib1.f +++ /dev/null @@ -1,18 +0,0 @@ -C FILE: FIB1.F - SUBROUTINE FIB(A,N) -C -C CALCULATE FIRST N FIBONACCI NUMBERS -C - INTEGER N - REAL*8 A(N) - DO I=1,N - IF (I.EQ.1) THEN - A(I) = 0.0D0 - ELSEIF (I.EQ.2) THEN - A(I) = 1.0D0 - ELSE - A(I) = A(I-1) + A(I-2) - ENDIF - ENDDO - END -C END FILE FIB1.F diff --git a/numpy/distutils/tests/f2py_ext/src/fib2.pyf b/numpy/distutils/tests/f2py_ext/src/fib2.pyf deleted file mode 100644 index 90a8cf00cb47..000000000000 --- a/numpy/distutils/tests/f2py_ext/src/fib2.pyf +++ /dev/null @@ -1,9 +0,0 @@ -! -*- f90 -*- -python module fib2 - interface - subroutine fib(a,n) - real*8 dimension(n),intent(out),depend(n) :: a - integer intent(in) :: n - end subroutine fib - end interface -end python module fib2 diff --git a/numpy/distutils/tests/f2py_ext/tests/test_fib2.py b/numpy/distutils/tests/f2py_ext/tests/test_fib2.py deleted file mode 100644 index 0e5bab9255ed..000000000000 --- a/numpy/distutils/tests/f2py_ext/tests/test_fib2.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.testing import TestCase, run_module_suite, assert_array_equal -from f2py_ext import fib2 - -class TestFib2(TestCase): - - def test_fib(self): - assert_array_equal(fib2.fib(6), [0, 1, 1, 2, 3, 5]) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy/distutils/tests/f2py_f90_ext/__init__.py b/numpy/distutils/tests/f2py_f90_ext/__init__.py deleted file mode 100644 index 1d0f69b67d8f..000000000000 --- a/numpy/distutils/tests/f2py_f90_ext/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from __future__ import division, absolute_import, print_function diff --git a/numpy/distutils/tests/f2py_f90_ext/include/body.f90 b/numpy/distutils/tests/f2py_f90_ext/include/body.f90 deleted file mode 100644 index 90b44e29dc85..000000000000 --- a/numpy/distutils/tests/f2py_f90_ext/include/body.f90 +++ /dev/null @@ -1,5 +0,0 @@ - subroutine bar13(a) - !f2py intent(out) a - integer a - a = 13 - end subroutine bar13 diff --git a/numpy/distutils/tests/f2py_f90_ext/setup.py b/numpy/distutils/tests/f2py_f90_ext/setup.py deleted file mode 100644 index 7cca81637c57..000000000000 --- a/numpy/distutils/tests/f2py_f90_ext/setup.py +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('f2py_f90_ext', parent_package, top_path) - config.add_extension('foo', - ['src/foo_free.f90'], - include_dirs=['include'], - f2py_options=['--include_paths', - config.paths('include')[0]] - ) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90 b/numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90 deleted file mode 100644 index c7713be59e16..000000000000 --- a/numpy/distutils/tests/f2py_f90_ext/src/foo_free.f90 +++ /dev/null @@ -1,6 +0,0 @@ -module foo_free -contains - -include "body.f90" - -end module foo_free diff --git a/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py b/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py deleted file mode 100644 index 499b9ebc34bd..000000000000 --- a/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py +++ /dev/null @@ -1,11 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.testing import TestCase, run_module_suite, assert_equal -from f2py_f90_ext import foo - -class TestFoo(TestCase): - def test_foo_free(self): - assert_equal(foo.foo_free.bar13(), 13) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy/distutils/tests/gen_ext/__init__.py b/numpy/distutils/tests/gen_ext/__init__.py deleted file mode 100644 index 1d0f69b67d8f..000000000000 --- a/numpy/distutils/tests/gen_ext/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from __future__ import division, absolute_import, print_function diff --git a/numpy/distutils/tests/gen_ext/setup.py b/numpy/distutils/tests/gen_ext/setup.py deleted file mode 100644 index de6b941e07f0..000000000000 --- a/numpy/distutils/tests/gen_ext/setup.py +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - -fib3_f = ''' -C FILE: FIB3.F - SUBROUTINE FIB(A,N) -C -C CALCULATE FIRST N FIBONACCI NUMBERS -C - INTEGER N - REAL*8 A(N) -Cf2py intent(in) n -Cf2py intent(out) a -Cf2py depend(n) a - DO I=1,N - IF (I.EQ.1) THEN - A(I) = 0.0D0 - ELSEIF (I.EQ.2) THEN - A(I) = 1.0D0 - ELSE - A(I) = A(I-1) + A(I-2) - ENDIF - ENDDO - END -C END FILE FIB3.F -''' - -def source_func(ext, build_dir): - import os - from distutils.dep_util import newer - target = os.path.join(build_dir, 'fib3.f') - if newer(__file__, target): - f = open(target, 'w') - f.write(fib3_f) - f.close() - return [target] - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('gen_ext', parent_package, top_path) - config.add_extension('fib3', - [source_func] - ) - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy/distutils/tests/gen_ext/tests/test_fib3.py b/numpy/distutils/tests/gen_ext/tests/test_fib3.py deleted file mode 100644 index e02ca81034c4..000000000000 --- a/numpy/distutils/tests/gen_ext/tests/test_fib3.py +++ /dev/null @@ -1,11 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from gen_ext import fib3 -from numpy.testing import TestCase, run_module_suite, assert_array_equal - -class TestFib3(TestCase): - def test_fib(self): - assert_array_equal(fib3.fib(6), [0, 1, 1, 2, 3, 5]) - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy/distutils/tests/setup.py b/numpy/distutils/tests/setup.py deleted file mode 100644 index 07bafb0b71c8..000000000000 --- a/numpy/distutils/tests/setup.py +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('testnumpydistutils', parent_package, top_path) - config.add_subpackage('f2py_ext') - config.add_subpackage('swig_ext') - config.add_subpackage('gen_ext') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy/distutils/tests/swig_ext/__init__.py b/numpy/distutils/tests/swig_ext/__init__.py deleted file mode 100644 index 1d0f69b67d8f..000000000000 --- a/numpy/distutils/tests/swig_ext/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from __future__ import division, absolute_import, print_function diff --git a/numpy/distutils/tests/swig_ext/setup.py b/numpy/distutils/tests/swig_ext/setup.py deleted file mode 100644 index f6e07303bea6..000000000000 --- a/numpy/distutils/tests/swig_ext/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env python -from __future__ import division, print_function - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('swig_ext', parent_package, top_path) - config.add_extension('_example', - ['src/example.i', 'src/example.c'] - ) - config.add_extension('_example2', - ['src/zoo.i', 'src/zoo.cc'], - depends=['src/zoo.h'], - include_dirs=['src'] - ) - config.add_data_dir('tests') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) diff --git a/numpy/distutils/tests/swig_ext/src/example.c b/numpy/distutils/tests/swig_ext/src/example.c deleted file mode 100644 index be151725ce7f..000000000000 --- a/numpy/distutils/tests/swig_ext/src/example.c +++ /dev/null @@ -1,14 +0,0 @@ -/* File : example.c */ - -double My_variable = 3.0; - -/* Compute factorial of n */ -int fact(int n) { - if (n <= 1) return 1; - else return n*fact(n-1); -} - -/* Compute n mod m */ -int my_mod(int n, int m) { - return(n % m); -} diff --git a/numpy/distutils/tests/swig_ext/src/example.i b/numpy/distutils/tests/swig_ext/src/example.i deleted file mode 100644 index f4fc11e66370..000000000000 --- a/numpy/distutils/tests/swig_ext/src/example.i +++ /dev/null @@ -1,14 +0,0 @@ -/* -*- c -*- */ - -/* File : example.i */ -%module example -%{ -/* Put headers and other declarations here */ -extern double My_variable; -extern int fact(int); -extern int my_mod(int n, int m); -%} - -extern double My_variable; -extern int fact(int); -extern int my_mod(int n, int m); diff --git a/numpy/distutils/tests/swig_ext/src/zoo.cc b/numpy/distutils/tests/swig_ext/src/zoo.cc deleted file mode 100644 index 0a643d1e5d4f..000000000000 --- a/numpy/distutils/tests/swig_ext/src/zoo.cc +++ /dev/null @@ -1,23 +0,0 @@ -#include "zoo.h" -#include -#include - -Zoo::Zoo() -{ - n = 0; -} - -void Zoo::shut_up(char *animal) -{ - if (n < 10) { - strcpy(animals[n], animal); - n++; - } -} - -void Zoo::display() -{ - int i; - for(i = 0; i < n; i++) - printf("%s\n", animals[i]); -} diff --git a/numpy/distutils/tests/swig_ext/src/zoo.h b/numpy/distutils/tests/swig_ext/src/zoo.h deleted file mode 100644 index cb26e6ceff5d..000000000000 --- a/numpy/distutils/tests/swig_ext/src/zoo.h +++ /dev/null @@ -1,9 +0,0 @@ - -class Zoo{ - int n; - char animals[10][50]; -public: - Zoo(); - void shut_up(char *animal); - void display(); -}; diff --git a/numpy/distutils/tests/swig_ext/src/zoo.i b/numpy/distutils/tests/swig_ext/src/zoo.i deleted file mode 100644 index a029c03e844b..000000000000 --- a/numpy/distutils/tests/swig_ext/src/zoo.i +++ /dev/null @@ -1,10 +0,0 @@ -// -*- c++ -*- -// Example copied from http://linuxgazette.net/issue49/pramode.html - -%module example2 - -%{ -#include "zoo.h" -%} - -%include "zoo.h" diff --git a/numpy/distutils/tests/swig_ext/tests/test_example.py b/numpy/distutils/tests/swig_ext/tests/test_example.py deleted file mode 100644 index 81b82c849d5a..000000000000 --- a/numpy/distutils/tests/swig_ext/tests/test_example.py +++ /dev/null @@ -1,17 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.testing import TestCase, run_module_suite, assert_equal -from swig_ext import example - -class TestExample(TestCase): - def test_fact(self): - assert_equal(example.fact(10), 3628800) - - def test_cvar(self): - assert_equal(example.cvar.My_variable, 3.0) - example.cvar.My_variable = 5 - assert_equal(example.cvar.My_variable, 5.0) - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy/distutils/tests/swig_ext/tests/test_example2.py b/numpy/distutils/tests/swig_ext/tests/test_example2.py deleted file mode 100644 index 381b30d6a9df..000000000000 --- a/numpy/distutils/tests/swig_ext/tests/test_example2.py +++ /dev/null @@ -1,15 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy.testing import TestCase, run_module_suite -from swig_ext import example2 - -class TestExample2(TestCase): - def test_zoo(self): - z = example2.Zoo() - z.shut_up('Tiger') - z.shut_up('Lion') - z.display() - - -if __name__ == "__main__": - run_module_suite() diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py index 23ba8cc3a500..e3205837c94d 100644 --- a/numpy/testing/nosetester.py +++ b/numpy/testing/nosetester.py @@ -167,12 +167,6 @@ class NoseTester(object): want to initialize `NoseTester` objects on behalf of other code. """ - # Stuff to exclude from tests. These are from numpy.distutils - excludes = ['f2py_ext', - 'f2py_f90_ext', - 'gen_ext', - 'swig_ext'] - def __init__(self, package=None, raise_warnings="release", depth=0): # Back-compat: 'None' used to mean either "release" or "develop" # depending on whether this was a release or develop version of @@ -294,9 +288,6 @@ def prepare_test_args(self, label='fast', verbose=1, extra_argv=None, import_nose() # compile argv argv = self._test_argv(label, verbose, extra_argv) - # bypass tests noted for exclude - for ename in self.excludes: - argv += ['--exclude', ename] # our way of doing coverage if coverage: argv += ['--cover-package=%s' % self.package_name, '--with-coverage', From ee1c555d2fe025fd2bb87c87e0673575be90c251 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 6 Jan 2016 14:12:07 -0700 Subject: [PATCH 317/496] DOC: update 1.10.3 release notes. [ci skip] --- doc/release/1.10.3-notes.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/doc/release/1.10.3-notes.rst b/doc/release/1.10.3-notes.rst index ae08394b501e..67e646a0bacd 100644 --- a/doc/release/1.10.3-notes.rst +++ b/doc/release/1.10.3-notes.rst @@ -7,6 +7,14 @@ bugs in the toolchain we use to generate those files. Hopefully that problem will be fixed for the next release. In the meantime, we suggest using one of the providers of windows binaries. +Compatibility notes +=================== + +* The trace function now calls the trace method on subclasses of ndarray, + except for matrix, for which the current behavior is preserved. This is + to help with the units package of AstroPy and hopefully will not cause + problems. + Issues Fixed ============ From fbdc1ae92edbb43e0c25bd3acc5a06723c3f7761 Mon Sep 17 00:00:00 2001 From: Marten van Kerkwijk Date: Wed, 6 Jan 2016 16:50:44 -0500 Subject: [PATCH 318/496] Release note entry for change in behaviour for np.trace. --- doc/release/1.11.0-notes.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index e66e680d34d1..54b6874cc5c5 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -145,6 +145,12 @@ a single array. This change obsoletes distinctions like Instead, ``np.broadcast`` can be used in all cases. +*np.trace* now respects array subclasses +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This behaviour mimics that of other functions such as ``np.diagonal`` and +ensures, e.g., that for masked arrays ``np.trace(ma)`` and ``ma.trace()`` give +the same result. + Deprecations ============ From 1c3615f16239808d518c0dc86bfc34541fd1ce4a Mon Sep 17 00:00:00 2001 From: rehassachdeva Date: Thu, 7 Jan 2016 08:16:05 +0530 Subject: [PATCH 319/496] MAINT, STY: Removed unused variable in f2py/f90mod_rules.py --- numpy/f2py/f90mod_rules.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py index ec3a248397f4..85eae8047928 100644 --- a/numpy/f2py/f90mod_rules.py +++ b/numpy/f2py/f90mod_rules.py @@ -49,7 +49,7 @@ def findf90modules(m): fgetdims1 = """\ external f2pysetdata logical ns - integer r,i,j + integer r,i integer(%d) s(*) ns = .FALSE. if (allocated(d)) then From b8f9418f4324b08be78bd80ea8ca45fe91579ecd Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 6 Jan 2016 22:27:26 -0700 Subject: [PATCH 320/496] DOC: Sync 1.10.3 and 1.10.4 release notes, update reference doc. --- doc/release/1.10.3-notes.rst | 37 +--------------------------------- doc/release/1.10.4-notes.rst | 39 ++++++++++++++++++++++++++++++++++++ doc/source/release.rst | 2 ++ 3 files changed, 42 insertions(+), 36 deletions(-) create mode 100644 doc/release/1.10.4-notes.rst diff --git a/doc/release/1.10.3-notes.rst b/doc/release/1.10.3-notes.rst index 77b4d0a58b3b..036827274c27 100644 --- a/doc/release/1.10.3-notes.rst +++ b/doc/release/1.10.3-notes.rst @@ -1,39 +1,4 @@ NumPy 1.10.3 Release Notes ************************** -This release is a bugfix source release motivated by a segfault regression. -No windows binaries are provided for this release, as there appear to be -bugs in the toolchain we use to generate those files. Hopefully that -problem will be fixed for the next release. In the meantime, we suggest -using one of the providers of windows binaries. - -Compatibility notes -=================== - -* The trace function now calls the trace method on subclasses of ndarray, - except for matrix, for which the current behavior is preserved. This is - to help with the units package of AstroPy and hopefully will not cause - problems. - -Issues Fixed -============ - -* gh-6922 BUG: numpy.recarray.sort segfaults on Windows. -* gh-6937 BUG: busday_offset does the wrong thing with modifiedpreceding roll. -* gh-6949 BUG: Type is lost when slicing a subclass of recarray. - -Merged PRs -========== - -The following PRs have been merged into 1.10.3. When the PR is a backport, -the PR number for the original PR against master is listed. - -* gh-6840 TST: Update travis testing script in 1.10.x -* gh-6843 BUG: Fix use of python 3 only FileNotFoundError in test_f2py. -* gh-6884 REL: Update pavement.py and setup.py to reflect current version. -* gh-6916 BUG: Fix test_f2py so it runs correctly in runtests.py. -* gh-6924 BUG: Fix segfault gh-6922. -* gh-6942 Fix datetime roll='modifiedpreceding' bug. -* gh-6943 DOC,BUG: Fix some latex generation problems. -* gh-6950 BUG trace is not subclass aware, np.trace(ma) != ma.trace(). -* gh-6952 BUG recarray slices should preserve subclass. +N/A this release did not happen due to various screwups involving PyPi. diff --git a/doc/release/1.10.4-notes.rst b/doc/release/1.10.4-notes.rst new file mode 100644 index 000000000000..03eaf5e6b5d2 --- /dev/null +++ b/doc/release/1.10.4-notes.rst @@ -0,0 +1,39 @@ +NumPy 1.10.4 Release Notes +************************** + +This release is a bugfix source release motivated by a segfault regression. +No windows binaries are provided for this release, as there appear to be +bugs in the toolchain we use to generate those files. Hopefully that +problem will be fixed for the next release. In the meantime, we suggest +using one of the providers of windows binaries. + +Compatibility notes +=================== + +* The trace function now calls the trace method on subclasses of ndarray, + except for matrix, for which the current behavior is preserved. This is + to help with the units package of AstroPy and hopefully will not cause + problems. + +Issues Fixed +============ + +* gh-6922 BUG: numpy.recarray.sort segfaults on Windows. +* gh-6937 BUG: busday_offset does the wrong thing with modifiedpreceding roll. +* gh-6949 BUG: Type is lost when slicing a subclass of recarray. + +Merged PRs +========== + +The following PRs have been merged into 1.10.3. When the PR is a backport, +the PR number for the original PR against master is listed. + +* gh-6840 TST: Update travis testing script in 1.10.x +* gh-6843 BUG: Fix use of python 3 only FileNotFoundError in test_f2py. +* gh-6884 REL: Update pavement.py and setup.py to reflect current version. +* gh-6916 BUG: Fix test_f2py so it runs correctly in runtests.py. +* gh-6924 BUG: Fix segfault gh-6922. +* gh-6942 Fix datetime roll='modifiedpreceding' bug. +* gh-6943 DOC,BUG: Fix some latex generation problems. +* gh-6950 BUG trace is not subclass aware, np.trace(ma) != ma.trace(). +* gh-6952 BUG recarray slices should preserve subclass. diff --git a/doc/source/release.rst b/doc/source/release.rst index 9e908dd98446..6da61763f832 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -3,6 +3,8 @@ Release Notes ************* .. include:: ../release/1.11.0-notes.rst +.. include:: ../release/1.10.4-notes.rst +.. include:: ../release/1.10.3-notes.rst .. include:: ../release/1.10.2-notes.rst .. include:: ../release/1.10.1-notes.rst .. include:: ../release/1.10.0-notes.rst From 04bee1c34e40bc6c82c8df476baeac8c2edd863c Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 7 Jan 2016 08:11:19 +0100 Subject: [PATCH 321/496] DOC: mention removal of Pyrex support in 1.11.0 release notes. [ci skip] --- doc/release/1.11.0-notes.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index c15936cc3f57..e1a52a5d4e7a 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -132,6 +132,10 @@ diskspace on filesystems that support it. Changes ======= +Pyrex support was removed from ``numpy.distutils``. The method +``build_src.generate_a_pyrex_source`` will remain available; it has been +monkeypatched by users to support Cython instead of Pyrex. It's recommended to +switch to a better supported method of build Cython extensions though. Deprecations From 931e2d1e8ba3fd6b129a6d74e3a1ad9984c1938a Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 7 Jan 2016 17:53:21 -0700 Subject: [PATCH 322/496] ENH: Add benchmark tests for numpy.random.randint. This add benchmarks randint. There is one set of benchmarks for the default dtype, 'l', that can be tracked back, and another set for the new dtypes 'bool', 'uint8', 'uint16', 'uint32', and 'uint64'. --- benchmarks/benchmarks/bench_random.py | 38 +++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/benchmarks/benchmarks/bench_random.py b/benchmarks/benchmarks/bench_random.py index a3c3566b0109..18444b9a1052 100644 --- a/benchmarks/benchmarks/bench_random.py +++ b/benchmarks/benchmarks/bench_random.py @@ -3,6 +3,7 @@ from .common import Benchmark import numpy as np +from numpy.lib import NumpyVersion class Random(Benchmark): @@ -27,3 +28,40 @@ def setup(self): def time_100000(self): np.random.shuffle(self.a) + + +class Randint(Benchmark): + + def time_randint_fast(self): + """Compare to uint32 below""" + np.random.randint(0, 2**30, size=10**5) + + def time_randint_slow(self): + """Compare to uint32 below""" + np.random.randint(0, 2**30 + 1, size=10**5) + + +class Randint_dtype(Benchmark): + high = { + 'bool': 1, + 'uint8': 2**7, + 'uint16': 2**15, + 'uint32': 2**31, + 'uint64': 2**63 + } + + param_names = ['dtype'] + params = ['bool', 'uint8', 'uint16', 'uint32', 'uint64'] + + def setup(self, name): + if NumpyVersion(np.__version__) < '1.11.0.dev0': + raise NotImplementedError + + def time_randint_fast(self, name): + high = self.high[name] + np.random.randint(0, high, size=10**5, dtype=name) + + def time_randint_slow(self, name): + high = self.high[name] + np.random.randint(0, high + 1, size=10**5, dtype=name) + From 52ee0f2ac972522e538d9d66471bf473f00e3ee0 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 8 Jan 2016 13:09:57 -0700 Subject: [PATCH 323/496] BUG: Add more complex trig functions to glibc < 2.16 blacklist. Added functions are - cacos - cacosf - cacosl - cacosh - cacoshf - cacoshl Closes #6063. --- numpy/core/src/private/npy_config.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/core/src/private/npy_config.h b/numpy/core/src/private/npy_config.h index fa20eb4f38f0..eb9c1e19d342 100644 --- a/numpy/core/src/private/npy_config.h +++ b/numpy/core/src/private/npy_config.h @@ -93,6 +93,12 @@ #undef HAVE_CATANH #undef HAVE_CATANHF #undef HAVE_CATANHL +#undef HAVE_CACOS +#undef HAVE_CACOSF +#undef HAVE_CACOSL +#undef HAVE_CACOSH +#undef HAVE_CACOSHF +#undef HAVE_CACOSHL #endif #undef TRIG_OK From 3af3753f8f518c8ea542511f8a117508b12e2802 Mon Sep 17 00:00:00 2001 From: Eric Moore Date: Fri, 8 Jan 2016 17:31:07 -0500 Subject: [PATCH 324/496] BUG: npy_acosh fallback too simple. Fixes gh-6712. --- numpy/core/src/npymath/npy_math.c.src | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/npymath/npy_math.c.src b/numpy/core/src/npymath/npy_math.c.src index 32fa41788e0b..4dcb01986d7b 100644 --- a/numpy/core/src/npymath/npy_math.c.src +++ b/numpy/core/src/npymath/npy_math.c.src @@ -221,7 +221,20 @@ double npy_hypot(double x, double y) #ifndef HAVE_ACOSH double npy_acosh(double x) { - return 2*npy_log(npy_sqrt((x + 1.0)/2) + npy_sqrt((x - 1.0)/2)); + if (x < 1.0) { + return NPY_NAN; + } + + if (npy_isfinite(x)) { + if (x > 1e8) { + return npy_log(x) + NPY_LOGE2; + } + else { + double u = x - 1.0; + return npy_log1p(u + npy_sqrt(2*u + u*u)); + } + } + return x; } #endif From 5a1d64e85724118714fa76bb67695c884c67ec92 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Fri, 8 Jan 2016 18:05:46 -0500 Subject: [PATCH 325/496] BENCH: Perform benchmarking for the computation of `inner` on a matrix with itself. --- benchmarks/benchmarks/bench_linalg.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py index c230d985a3be..3d26b800c646 100644 --- a/benchmarks/benchmarks/bench_linalg.py +++ b/benchmarks/benchmarks/bench_linalg.py @@ -8,6 +8,7 @@ class Eindot(Benchmark): def setup(self): self.a = np.arange(60000.0).reshape(150, 400) + self.ac = self.a.copy() self.at = self.a.T self.atc = self.a.T.copy() self.b = np.arange(240000.0).reshape(400, 600) @@ -35,6 +36,12 @@ def time_dot_trans_at_a(self): def time_dot_trans_atc_a(self): np.dot(self.atc, self.a) + def time_inner_trans_a_a(self): + np.inner(self.a, self.a) + + def time_inner_trans_a_ac(self): + np.inner(self.a, self.ac) + def time_einsum_i_ij_j(self): np.einsum('i,ij,j', self.d, self.b, self.c) From f54ed5d5ebaada0a214076977cc6c11484edd099 Mon Sep 17 00:00:00 2001 From: Konstantinos Psychas Date: Fri, 8 Jan 2016 22:49:00 -0500 Subject: [PATCH 326/496] fix for windows Related Issue http://stackoverflow.com/questions/33886558/numpy-installation-error-mingw32ccompiler-instance-has-no-attribute-compile-o --- numpy/core/setup_common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py index e0cb3f6305df..57ddf3396c2e 100644 --- a/numpy/core/setup_common.py +++ b/numpy/core/setup_common.py @@ -192,7 +192,7 @@ def check_long_double_representation(cmd): if sys.platform == "win32" and not mingw32(): try: cmd.compiler.compile_options.remove("/GL") - except ValueError: + except (AttributeError, ValueError): pass # We need to use _compile because we need the object filename From 5a07468dda02a386d1193ab01af8d31dfb5b47d6 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Sat, 9 Jan 2016 04:43:26 -0500 Subject: [PATCH 327/496] STY: Place braces on the same lines as control structure requiring them. --- numpy/core/src/multiarray/cblasfuncs.c | 36 +++++++++----------------- 1 file changed, 12 insertions(+), 24 deletions(-) diff --git a/numpy/core/src/multiarray/cblasfuncs.c b/numpy/core/src/multiarray/cblasfuncs.c index 516c6e8ae0be..90294670679f 100644 --- a/numpy/core/src/multiarray/cblasfuncs.c +++ b/numpy/core/src/multiarray/cblasfuncs.c @@ -131,10 +131,8 @@ syrk(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans, cblas_dsyrk(order, CblasUpper, trans, n, k, 1., Adata, lda, 0., Rdata, ldc); - for (i = 0; i < n; i++) - { - for (j = i + 1; j < n; j++) - { + for (i = 0; i < n; i++) { + for (j = i + 1; j < n; j++) { *((npy_double*)PyArray_GETPTR2(R, j, i)) = *((npy_double*)PyArray_GETPTR2(R, i, j)); } } @@ -143,10 +141,8 @@ syrk(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans, cblas_ssyrk(order, CblasUpper, trans, n, k, 1.f, Adata, lda, 0.f, Rdata, ldc); - for (i = 0; i < n; i++) - { - for (j = i + 1; j < n; j++) - { + for (i = 0; i < n; i++) { + for (j = i + 1; j < n; j++) { *((npy_float*)PyArray_GETPTR2(R, j, i)) = *((npy_float*)PyArray_GETPTR2(R, i, j)); } } @@ -155,10 +151,8 @@ syrk(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans, cblas_zsyrk(order, CblasUpper, trans, n, k, oneD, Adata, lda, zeroD, Rdata, ldc); - for (i = 0; i < n; i++) - { - for (j = i + 1; j < n; j++) - { + for (i = 0; i < n; i++) { + for (j = i + 1; j < n; j++) { *((npy_cdouble*)PyArray_GETPTR2(R, j, i)) = *((npy_cdouble*)PyArray_GETPTR2(R, i, j)); } } @@ -167,10 +161,8 @@ syrk(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans, cblas_csyrk(order, CblasUpper, trans, n, k, oneF, Adata, lda, zeroF, Rdata, ldc); - for (i = 0; i < n; i++) - { - for (j = i + 1; j < n; j++) - { + for (i = 0; i < n; i++) { + for (j = i + 1; j < n; j++) { *((npy_cfloat*)PyArray_GETPTR2(R, j, i)) = *((npy_cfloat*)PyArray_GETPTR2(R, i, j)); } } @@ -728,19 +720,15 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2, (PyArray_STRIDE(ap1, 1) == PyArray_STRIDE(ap2, 0)) && ((Trans1 == CblasTrans) ^ (Trans2 == CblasTrans)) && ((Trans1 == CblasNoTrans) ^ (Trans2 == CblasNoTrans)) - ) - { - if (Trans1 == CblasNoTrans) - { + ) { + if (Trans1 == CblasNoTrans) { syrk(typenum, Order, Trans1, N, M, ap1, lda, ret); } - else - { + else { syrk(typenum, Order, Trans1, N, M, ap2, ldb, ret); } } - else - { + else { gemm(typenum, Order, Trans1, Trans2, L, N, M, ap1, lda, ap2, ldb, ret); } NPY_END_ALLOW_THREADS; From a8b10bb18c1810efa3b63891e28250d931525c5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicol=C3=A1s=20Della=20Penna?= Date: Fri, 8 Jan 2016 16:02:42 +1100 Subject: [PATCH 328/496] DOC: Update defmatrix.argmax docstring. [ci skip] --- numpy/matrixlib/defmatrix.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index 134f4d20341f..22855772aae1 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -783,7 +783,7 @@ def max(self, axis=None, out=None): def argmax(self, axis=None, out=None): """ - Indices of the maximum values along an axis. + Index of the maximum value along an axis, if multiple returns the first one Parameters ---------- From bacce4d2031b7e360bff8c7577e2b1e413caa217 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 9 Jan 2016 10:14:48 -0700 Subject: [PATCH 329/496] DOC: Clarify the docstrings of matrix.argmin and matrix.argmax. [ci skip] --- numpy/matrixlib/defmatrix.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index 22855772aae1..1a29fb67b063 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -783,7 +783,11 @@ def max(self, axis=None, out=None): def argmax(self, axis=None, out=None): """ - Index of the maximum value along an axis, if multiple returns the first one + Indexes of the maximum values along an axis. + + Return the indexes of the first occurrences of the maximum values + along the specified axis. If axis is None, the index is for the + flattened matrix. Parameters ---------- @@ -853,7 +857,11 @@ def min(self, axis=None, out=None): def argmin(self, axis=None, out=None): """ - Return the indices of the minimum values along an axis. + Indexes of the minimum values along an axis. + + Return the indexes of the first occurrences of the minimum values + along the specified axis. If axis is None, the index is for the + flattened matrix. Parameters ---------- From 6f1da5f8f5db99e7ca8397d925c8d2a013a305e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Boulogne?= Date: Fri, 8 Jan 2016 12:04:52 -0500 Subject: [PATCH 330/496] DOC: fix typos in trapz() [ci skip] --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index c69185c1cb09..299af66a9ad1 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -3648,7 +3648,7 @@ def trapz(y, x=None, dx=1.0, axis=-1): x : array_like, optional If `x` is None, then spacing between all `y` elements is `dx`. dx : scalar, optional - If `x` is None, spacing given by `dx` is assumed. Default is 1. + If `dx` is None, spacing given by `x` is assumed. Default is 1. axis : int, optional Specify the axis. From a8c97b94c7a8d41fa9a887d64b25b5253dd2ef4b Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 9 Jan 2016 10:20:55 -0700 Subject: [PATCH 331/496] DOC: Update trapz docstring. [ci skip] --- numpy/lib/function_base.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 299af66a9ad1..9bc128f92db1 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -3646,11 +3646,13 @@ def trapz(y, x=None, dx=1.0, axis=-1): y : array_like Input array to integrate. x : array_like, optional - If `x` is None, then spacing between all `y` elements is `dx`. + The sample points corresponding to the `y` values. If `x` is None, + the sample points are assumed to be evenly spaced `dx` apart. The + default is None. dx : scalar, optional - If `dx` is None, spacing given by `x` is assumed. Default is 1. + The spacing between sample points when `x` is None. The default is 1. axis : int, optional - Specify the axis. + The axis along which to integrate. Returns ------- From e1b77f1323e9e981b907390508883d9bccff3ebf Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Sat, 9 Jan 2016 14:16:35 -0500 Subject: [PATCH 332/496] TST: Try using `inner` with some different orderings for matrix and vector products. Add some tests for matrix products. Include a `syrk` vs. `gemm` test case. --- numpy/core/tests/test_multiarray.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index c66e49e5fb70..6fa4b6bbb9e6 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -4849,7 +4849,16 @@ def test_inner_product_with_various_contiguities(self): C = np.array([1, 1], dtype=dt) desired = np.array([4, 6], dtype=dt) assert_equal(np.inner(A.T, C), desired) + assert_equal(np.inner(C, A.T), desired) assert_equal(np.inner(B, C), desired) + assert_equal(np.inner(C, B), desired) + # check a matrix product + desired = np.array([[7, 10], [15, 22]], dtype=dt) + assert_equal(np.inner(A, B), desired) + # check the syrk vs. gemm paths + desired = np.array([[5, 11], [11, 25]], dtype=dt) + assert_equal(np.inner(A, A), desired) + assert_equal(np.inner(A, A.copy()), desired) # check an inner product involving an aliased and reversed view a = np.arange(5).astype(dt) b = a[::-1] From 50e4e3a2c81e6cb624c36e32b2526cec85d37efb Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Sat, 9 Jan 2016 14:26:54 -0500 Subject: [PATCH 333/496] TST: Add more scalar tests to ensure `inner` keeps the answer with the right form. --- numpy/core/tests/test_multiarray.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 6fa4b6bbb9e6..7bb267cfb6de 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -4825,6 +4825,22 @@ def test_matmul_inplace(): class TestInner(TestCase): + def test_inner_scalar_and_vector(self): + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + sca = np.array(3, dtype=dt)[()] + vec = np.array([1, 2], dtype=dt) + desired = np.array([3, 6], dtype=dt) + assert_equal(np.inner(vec, sca), desired) + assert_equal(np.inner(sca, vec), desired) + + def test_inner_scalar_and_matrix(self): + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + sca = np.array(3, dtype=dt)[()] + arr = np.matrix([[1, 2], [3, 4]], dtype=dt) + desired = np.matrix([[3, 6], [9, 12]], dtype=dt) + assert_equal(np.inner(arr, sca), desired) + assert_equal(np.inner(sca, arr), desired) + def test_inner_scalar_and_matrix_of_objects(self): # Ticket #4482 arr = np.matrix([1, 2], dtype=object) From aaa16ed8ecae49c41507a99c8a6c196d13327bb0 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Sat, 9 Jan 2016 14:40:54 -0500 Subject: [PATCH 334/496] TST: Add an `inner` test with two 3D tensors. --- numpy/core/tests/test_multiarray.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 7bb267cfb6de..04e09d37fea0 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -4881,6 +4881,33 @@ def test_inner_product_with_various_contiguities(self): desired = np.array(10, dtype=dt).item() assert_equal(np.inner(b, a), desired) + def test_3d_tensor(self): + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + a = np.arange(24).reshape(2,3,4).astype(dt) + b = np.arange(24, 48).reshape(2,3,4).astype(dt) + desired = np.array( + [[[[ 158, 182, 206], + [ 230, 254, 278]], + + [[ 566, 654, 742], + [ 830, 918, 1006]], + + [[ 974, 1126, 1278], + [1430, 1582, 1734]]], + + [[[1382, 1598, 1814], + [2030, 2246, 2462]], + + [[1790, 2070, 2350], + [2630, 2910, 3190]], + + [[2198, 2542, 2886], + [3230, 3574, 3918]]]], + dtype=dt + ) + assert_equal(np.inner(a, b), desired) + assert_equal(np.inner(b, a).transpose(2,3,0,1), desired) + class TestSummarization(TestCase): def test_1d(self): From 8ffde7f488eb583ed2a200702e85a6518c4f94ec Mon Sep 17 00:00:00 2001 From: Stephan Hoyer Date: Wed, 4 Nov 2015 20:08:57 -0800 Subject: [PATCH 335/496] ENH: moveaxis function Fixes GH2039 This function provides a much more intuitive interface than `np.rollaxis`, which has a confusing behavior with the position of the `start` argument: http://stackoverflow.com/questions/29891583/reason-why-numpy-rollaxis-is-so-confusing It was independently suggested several times over the years after discussions on the mailing list and GitHub (GH2039), but never made it into a pull request: https://mail.scipy.org/pipermail/numpy-discussion/2010-September/052882.html My version adds support for a sequence of axis arguments. I find this behavior to be very useful. It is often more intuitive than supplying a list of arguments to `transpose` and also nicely generalizes NumPy's existing axis manipulation routines, e.g., def transpose(a, order=None): if order is None: order = reversed(range(a.ndim)) return moveaxes(a, order, range(a.ndim)) def swapaxes(a, axis1, axis2): return moveaxes(a, [axis1, axis2], [axis2, axis1]) def rollaxis(a, axis, start=0): if axis < start: start -= 1 return moveaxes(a, axis, start) --- doc/release/1.11.0-notes.rst | 2 + .../reference/routines.array-manipulation.rst | 1 + numpy/core/fromnumeric.py | 2 +- numpy/core/numeric.py | 107 ++++++++++++++++-- numpy/core/tests/test_numeric.py | 77 ++++++++++++- 5 files changed, 178 insertions(+), 11 deletions(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index c83ca0861a31..b0ef00208a6a 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -103,6 +103,8 @@ New Features given precision. The byteorder specification is also ignored, the generated arrays are always in native byte order. +* ``np.moveaxis`` allows for moving one or more array axes to a new position + by explicitly providing source and destination axes. Improvements ============ diff --git a/doc/source/reference/routines.array-manipulation.rst b/doc/source/reference/routines.array-manipulation.rst index a8aa2d0d80ab..f3ce4889b8bf 100644 --- a/doc/source/reference/routines.array-manipulation.rst +++ b/doc/source/reference/routines.array-manipulation.rst @@ -26,6 +26,7 @@ Transpose-like operations .. autosummary:: :toctree: generated/ + moveaxis rollaxis swapaxes ndarray.T diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index a2937c5c507a..67d2c5b4893d 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -518,7 +518,7 @@ def transpose(a, axes=None): See Also -------- - rollaxis + moveaxis argsort Notes diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index 4f3d418e61d8..a18b380727b3 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -1,6 +1,7 @@ from __future__ import division, absolute_import, print_function import sys +import operator import warnings import collections from numpy.core import multiarray @@ -15,8 +16,10 @@ if sys.version_info[0] >= 3: import pickle basestring = str + import builtins else: import cPickle as pickle + import __builtin__ as builtins loads = pickle.loads @@ -31,15 +34,15 @@ 'ascontiguousarray', 'asfortranarray', 'isfortran', 'empty_like', 'zeros_like', 'ones_like', 'correlate', 'convolve', 'inner', 'dot', 'einsum', 'outer', 'vdot', 'alterdot', 'restoredot', 'roll', - 'rollaxis', 'cross', 'tensordot', 'array2string', 'get_printoptions', - 'set_printoptions', 'array_repr', 'array_str', 'set_string_function', - 'little_endian', 'require', 'fromiter', 'array_equal', 'array_equiv', - 'indices', 'fromfunction', 'isclose', 'load', 'loads', 'isscalar', - 'binary_repr', 'base_repr', 'ones', 'identity', 'allclose', - 'compare_chararrays', 'putmask', 'seterr', 'geterr', 'setbufsize', - 'getbufsize', 'seterrcall', 'geterrcall', 'errstate', 'flatnonzero', - 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', 'False_', 'True_', - 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', + 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'array2string', + 'get_printoptions', 'set_printoptions', 'array_repr', 'array_str', + 'set_string_function', 'little_endian', 'require', 'fromiter', + 'array_equal', 'array_equiv', 'indices', 'fromfunction', 'isclose', 'load', + 'loads', 'isscalar', 'binary_repr', 'base_repr', 'ones', 'identity', + 'allclose', 'compare_chararrays', 'putmask', 'seterr', 'geterr', + 'setbufsize', 'getbufsize', 'seterrcall', 'geterrcall', 'errstate', + 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', 'False_', + 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like', 'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'TooHardError', @@ -1422,6 +1425,7 @@ def rollaxis(a, axis, start=0): See Also -------- + moveaxis : Move array axes to new positions. roll : Roll the elements of an array by a number of positions along a given axis. @@ -1457,6 +1461,91 @@ def rollaxis(a, axis, start=0): return a.transpose(axes) +def _validate_axis(axis, ndim, argname): + try: + axis = [operator.index(axis)] + except TypeError: + axis = list(axis) + axis = [a + ndim if a < 0 else a for a in axis] + if not builtins.all(0 <= a < ndim for a in axis): + raise ValueError('invalid axis for this array in `%s` argument' % + argname) + if len(set(axis)) != len(axis): + raise ValueError('repeated axis in `%s` argument' % argname) + return axis + + +def moveaxis(a, source, destination): + """ + Move axes of an array to new positions. + + Other axes remain in their original order. + + .. versionadded::1.11.0 + + Parameters + ---------- + a : np.ndarray + The array whose axes should be reordered. + source : int or sequence of int + Original positions of the axes to move. These must be unique. + destination : int or sequence of int + Destination positions for each of the original axes. These must also be + unique. + + Returns + ------- + result : np.ndarray + Array with moved axes. This array is a view of the input array. + + See Also + -------- + transpose: Permute the dimensions of an array. + swapaxes: Interchange two axes of an array. + + Examples + -------- + + >>> x = np.zeros((3, 4, 5)) + >>> np.moveaxis(x, 0, -1).shape + (4, 5, 3) + >>> np.moveaxis(x, -1, 0).shape + (5, 3, 4) + + These all achieve the same result: + + >>> np.transpose(x).shape + (5, 4, 3) + >>> np.swapaxis(x, 0, -1).shape + (5, 4, 3) + >>> np.moveaxis(x, [0, 1], [-1, -2]).shape + (5, 4, 3) + >>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape + (5, 4, 3) + + """ + try: + # allow duck-array types if they define transpose + transpose = a.transpose + except AttributeError: + a = asarray(a) + transpose = a.transpose + + source = _validate_axis(source, a.ndim, 'source') + destination = _validate_axis(destination, a.ndim, 'destination') + if len(source) != len(destination): + raise ValueError('`source` and `destination` arguments must have ' + 'the same number of elements') + + order = [n for n in range(a.ndim) if n not in source] + + for dest, src in sorted(zip(destination, source)): + order.insert(dest, src) + + result = transpose(order) + return result + + # fix hack in scipy which imports this function def _move_axis_to_0(a, axis): return rollaxis(a, axis, 0) diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index d631180809a2..a8ad4c7639b5 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -11,7 +11,8 @@ from numpy.random import rand, randint, randn from numpy.testing import ( TestCase, run_module_suite, assert_, assert_equal, assert_raises, - assert_array_equal, assert_almost_equal, assert_array_almost_equal, dec + assert_raises_regex, assert_array_equal, assert_almost_equal, + assert_array_almost_equal, dec ) @@ -2029,6 +2030,80 @@ def test_results(self): assert_(not res.flags['OWNDATA']) +class TestMoveaxis(TestCase): + def test_move_to_end(self): + x = np.random.randn(5, 6, 7) + for source, expected in [(0, (6, 7, 5)), + (1, (5, 7, 6)), + (2, (5, 6, 7)), + (-1, (5, 6, 7))]: + actual = np.moveaxis(x, source, -1).shape + assert_(actual, expected) + + def test_move_new_position(self): + x = np.random.randn(1, 2, 3, 4) + for source, destination, expected in [ + (0, 1, (2, 1, 3, 4)), + (1, 2, (1, 3, 2, 4)), + (1, -1, (1, 3, 4, 2)), + ]: + actual = np.moveaxis(x, source, destination).shape + assert_(actual, expected) + + def test_preserve_order(self): + x = np.zeros((1, 2, 3, 4)) + for source, destination in [ + (0, 0), + (3, -1), + (-1, 3), + ([0, -1], [0, -1]), + ([2, 0], [2, 0]), + (range(4), range(4)), + ]: + actual = np.moveaxis(x, source, destination).shape + assert_(actual, (1, 2, 3, 4)) + + def test_move_multiples(self): + x = np.zeros((0, 1, 2, 3)) + for source, destination, expected in [ + ([0, 1], [2, 3], (2, 3, 0, 1)), + ([2, 3], [0, 1], (2, 3, 0, 1)), + ([0, 1, 2], [2, 3, 0], (2, 3, 0, 1)), + ([3, 0], [1, 0], (0, 3, 1, 2)), + ([0, 3], [0, 1], (0, 3, 1, 2)), + ]: + actual = np.moveaxis(x, source, destination).shape + assert_(actual, expected) + + def test_errors(self): + x = np.random.randn(1, 2, 3) + assert_raises_regex(ValueError, 'invalid axis .* `source`', + np.moveaxis, x, 3, 0) + assert_raises_regex(ValueError, 'invalid axis .* `source`', + np.moveaxis, x, -4, 0) + assert_raises_regex(ValueError, 'invalid axis .* `destination`', + np.moveaxis, x, 0, 5) + assert_raises_regex(ValueError, 'repeated axis in `source`', + np.moveaxis, x, [0, 0], [0, 1]) + assert_raises_regex(ValueError, 'repeated axis in `destination`', + np.moveaxis, x, [0, 1], [1, 1]) + assert_raises_regex(ValueError, 'must have the same number', + np.moveaxis, x, 0, [0, 1]) + assert_raises_regex(ValueError, 'must have the same number', + np.moveaxis, x, [0, 1], [0]) + + def test_array_likes(self): + x = np.ma.zeros((1, 2, 3)) + result = np.moveaxis(x, 0, 0) + assert_(x.shape, result.shape) + assert_(isinstance(result, np.ma.MaskedArray)) + + x = [1, 2, 3] + result = np.moveaxis(x, 0, 0) + assert_(x, list(result)) + assert_(isinstance(result, np.ndarray)) + + class TestCross(TestCase): def test_2x2(self): u = [1, 2] From c776f398fc72ae392e664b0aac48822522595eda Mon Sep 17 00:00:00 2001 From: Julian Taylor Date: Fri, 8 Jan 2016 22:31:10 +0100 Subject: [PATCH 336/496] ENH: vectorize isinf, isfinite and signbit isfinite is especially valuable as its needed to verify inputs are suitable for lapack. --- numpy/core/src/umath/loops.c.src | 5 +- numpy/core/src/umath/simd.inc.src | 166 ++++++++++++++++++++++++------ numpy/core/tests/test_numeric.py | 33 ++++++ 3 files changed, 169 insertions(+), 35 deletions(-) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index aff6180c7150..fc9ffec9480c 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -1558,14 +1558,11 @@ NPY_NO_EXPORT void /**begin repeat1 * #kind = isnan, isinf, isfinite, signbit# * #func = npy_isnan, npy_isinf, npy_isfinite, npy_signbit# - * #isnan = 1, 0*3# **/ NPY_NO_EXPORT void @TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { - char * margs[] = {args[0], args[0], args[1]}; - npy_intp msteps[] = {steps[0], steps[0], steps[1]}; - if (!@isnan@ || !run_binary_simd_not_equal_@TYPE@(margs, dimensions, msteps)) { + if (!run_@kind@_simd_@TYPE@(args, dimensions, steps)) { UNARY_LOOP { const @type@ in1 = *(@type@ *)ip1; *((npy_bool *)op1) = @func@(in1) != 0; diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src index 84695f5d609c..21ff9778427d 100644 --- a/numpy/core/src/umath/simd.inc.src +++ b/numpy/core/src/umath/simd.inc.src @@ -25,6 +25,7 @@ #endif #include #include +#include #include /* for memcpy */ /* Figure out the right abs function for pointer addresses */ @@ -259,6 +260,32 @@ run_binary_simd_@kind@_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps /**end repeat1**/ +/**begin repeat1 + * #kind = isnan, isfinite, isinf, signbit# + */ + +#if @vector@ && defined NPY_HAVE_SSE2_INTRINSICS + +static void +sse2_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, npy_intp n); + +#endif + +static NPY_INLINE int +run_@kind@_simd_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps) +{ +#if @vector@ && defined NPY_HAVE_SSE2_INTRINSICS + if (steps[0] == sizeof(@type@) && steps[1] == 1 && + npy_is_aligned(args[0], sizeof(@type@))) { + sse2_@kind@_@TYPE@((npy_bool*)args[1], (@type@*)args[0], dimensions[0]); + return 1; + } +#endif + return 0; +} + +/**end repeat1**/ + /**end repeat**/ /* @@ -528,11 +555,104 @@ sse2_compress4_to_byte_@TYPE@(@vtype@ r1, @vtype@ r2, @vtype@ r3, @vtype@ * r4, #endif } +static void +sse2_signbit_@TYPE@(npy_bool * op, @type@ * ip1, npy_intp n) +{ + LOOP_BLOCK_ALIGN_VAR(ip1, @type@, 16) { + op[i] = npy_signbit(ip1[i]); + } + LOOP_BLOCKED(@type@, 16) { + @vtype@ a = @vpre@_load_@vsuf@(&ip1[i]); + int r = @vpre@_movemask_@vsuf@(a); + if (sizeof(@type@) == 8) { + op[i] = r & 1; + op[i + 1] = (r >> 1); + } + else { + op[i] = r & 1; + op[i + 1] = (r >> 1) & 1; + op[i + 2] = (r >> 2) & 1; + op[i + 3] = (r >> 3); + } + } + LOOP_BLOCKED_END { + op[i] = npy_signbit(ip1[i]); + } +} + +/**begin repeat1 + * #kind = isnan, isfinite, isinf# + * #var = 0, 1, 2# + */ + +static void +sse2_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, npy_intp n) +{ +#if @var@ != 0 /* isinf/isfinite */ + /* signbit mask 0x7FFFFFFF after andnot */ + const @vtype@ mask = @vpre@_set1_@vsuf@(-0.@c@); + const @vtype@ ones = @vpre@_cmpeq_@vsuf@(@vpre@_setzero_@vsuf@(), + @vpre@_setzero_@vsuf@()); +#if @double@ + const @vtype@ fltmax = @vpre@_set1_@vsuf@(DBL_MAX); +#else + const @vtype@ fltmax = @vpre@_set1_@vsuf@(FLT_MAX); +#endif +#endif + LOOP_BLOCK_ALIGN_VAR(ip1, @type@, 16) { + op[i] = npy_@kind@(ip1[i]); + } + LOOP_BLOCKED(@type@, 64) { + @vtype@ a = @vpre@_load_@vsuf@(&ip1[i + 0 * 16 / sizeof(@type@)]); + @vtype@ b = @vpre@_load_@vsuf@(&ip1[i + 1 * 16 / sizeof(@type@)]); + @vtype@ c = @vpre@_load_@vsuf@(&ip1[i + 2 * 16 / sizeof(@type@)]); + @vtype@ d = @vpre@_load_@vsuf@(&ip1[i + 3 * 16 / sizeof(@type@)]); + @vtype@ r1, r2, r3, r4; +#if @var@ != 0 /* isinf/isfinite */ + /* fabs via masking of sign bit */ + r1 = @vpre@_andnot_@vsuf@(mask, a); + r2 = @vpre@_andnot_@vsuf@(mask, b); + r3 = @vpre@_andnot_@vsuf@(mask, c); + r4 = @vpre@_andnot_@vsuf@(mask, d); +#if @var@ == 1 /* isfinite */ + /* negative compare against max float, nan is always true */ + r1 = @vpre@_cmpnle_@vsuf@(r1, fltmax); + r2 = @vpre@_cmpnle_@vsuf@(r2, fltmax); + r3 = @vpre@_cmpnle_@vsuf@(r3, fltmax); + r4 = @vpre@_cmpnle_@vsuf@(r4, fltmax); +#else /* isinf */ + r1 = @vpre@_cmpnlt_@vsuf@(fltmax, r1); + r2 = @vpre@_cmpnlt_@vsuf@(fltmax, r2); + r3 = @vpre@_cmpnlt_@vsuf@(fltmax, r3); + r4 = @vpre@_cmpnlt_@vsuf@(fltmax, r4); +#endif + /* flip results to what we want (andnot as there is no sse not) */ + r1 = @vpre@_andnot_@vsuf@(r1, ones); + r2 = @vpre@_andnot_@vsuf@(r2, ones); + r3 = @vpre@_andnot_@vsuf@(r3, ones); + r4 = @vpre@_andnot_@vsuf@(r4, ones); +#endif +#if @var@ == 0 /* isnan */ + r1 = @vpre@_cmpneq_@vsuf@(a, a); + r2 = @vpre@_cmpneq_@vsuf@(b, b); + r3 = @vpre@_cmpneq_@vsuf@(c, c); + r4 = @vpre@_cmpneq_@vsuf@(d, d); +#endif + sse2_compress4_to_byte_@TYPE@(r1, r2, r3, &r4, &op[i]); + } + LOOP_BLOCKED_END { + op[i] = npy_@kind@(ip1[i]); + } + /* silence exceptions from comparisons */ + npy_clear_floatstatus(); +} + +/**end repeat1**/ + /**begin repeat1 * #kind = equal, not_equal, less, less_equal, greater, greater_equal# * #OP = ==, !=, <, <=, >, >=# * #VOP = cmpeq, cmpneq, cmplt, cmple, cmpgt, cmpge# - * #neq = 0, 1, 0*4# */ /* sets invalid fpu flag on QNaN for consistency with packed compare */ @@ -554,36 +674,20 @@ sse2_binary_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, @type@ * ip2, npy_intp n) LOOP_BLOCK_ALIGN_VAR(ip1, @type@, 16) { op[i] = sse2_ordered_cmp_@kind@_@TYPE@(ip1[i], ip2[i]); } - /* isnan special unary case */ - if (@neq@ && ip1 == ip2) { - LOOP_BLOCKED(@type@, 64) { - @vtype@ a = @vpre@_load_@vsuf@(&ip1[i + 0 * 16 / sizeof(@type@)]); - @vtype@ b = @vpre@_load_@vsuf@(&ip1[i + 1 * 16 / sizeof(@type@)]); - @vtype@ c = @vpre@_load_@vsuf@(&ip1[i + 2 * 16 / sizeof(@type@)]); - @vtype@ d = @vpre@_load_@vsuf@(&ip1[i + 3 * 16 / sizeof(@type@)]); - @vtype@ r1 = @vpre@_@VOP@_@vsuf@(a, a); - @vtype@ r2 = @vpre@_@VOP@_@vsuf@(b, b); - @vtype@ r3 = @vpre@_@VOP@_@vsuf@(c, c); - @vtype@ r4 = @vpre@_@VOP@_@vsuf@(d, d); - sse2_compress4_to_byte_@TYPE@(r1, r2, r3, &r4, &op[i]); - } - } - else { - LOOP_BLOCKED(@type@, 64) { - @vtype@ a1 = @vpre@_load_@vsuf@(&ip1[i + 0 * 16 / sizeof(@type@)]); - @vtype@ b1 = @vpre@_load_@vsuf@(&ip1[i + 1 * 16 / sizeof(@type@)]); - @vtype@ c1 = @vpre@_load_@vsuf@(&ip1[i + 2 * 16 / sizeof(@type@)]); - @vtype@ d1 = @vpre@_load_@vsuf@(&ip1[i + 3 * 16 / sizeof(@type@)]); - @vtype@ a2 = @vpre@_loadu_@vsuf@(&ip2[i + 0 * 16 / sizeof(@type@)]); - @vtype@ b2 = @vpre@_loadu_@vsuf@(&ip2[i + 1 * 16 / sizeof(@type@)]); - @vtype@ c2 = @vpre@_loadu_@vsuf@(&ip2[i + 2 * 16 / sizeof(@type@)]); - @vtype@ d2 = @vpre@_loadu_@vsuf@(&ip2[i + 3 * 16 / sizeof(@type@)]); - @vtype@ r1 = @vpre@_@VOP@_@vsuf@(a1, a2); - @vtype@ r2 = @vpre@_@VOP@_@vsuf@(b1, b2); - @vtype@ r3 = @vpre@_@VOP@_@vsuf@(c1, c2); - @vtype@ r4 = @vpre@_@VOP@_@vsuf@(d1, d2); - sse2_compress4_to_byte_@TYPE@(r1, r2, r3, &r4, &op[i]); - } + LOOP_BLOCKED(@type@, 64) { + @vtype@ a1 = @vpre@_load_@vsuf@(&ip1[i + 0 * 16 / sizeof(@type@)]); + @vtype@ b1 = @vpre@_load_@vsuf@(&ip1[i + 1 * 16 / sizeof(@type@)]); + @vtype@ c1 = @vpre@_load_@vsuf@(&ip1[i + 2 * 16 / sizeof(@type@)]); + @vtype@ d1 = @vpre@_load_@vsuf@(&ip1[i + 3 * 16 / sizeof(@type@)]); + @vtype@ a2 = @vpre@_loadu_@vsuf@(&ip2[i + 0 * 16 / sizeof(@type@)]); + @vtype@ b2 = @vpre@_loadu_@vsuf@(&ip2[i + 1 * 16 / sizeof(@type@)]); + @vtype@ c2 = @vpre@_loadu_@vsuf@(&ip2[i + 2 * 16 / sizeof(@type@)]); + @vtype@ d2 = @vpre@_loadu_@vsuf@(&ip2[i + 3 * 16 / sizeof(@type@)]); + @vtype@ r1 = @vpre@_@VOP@_@vsuf@(a1, a2); + @vtype@ r2 = @vpre@_@VOP@_@vsuf@(b1, b2); + @vtype@ r3 = @vpre@_@VOP@_@vsuf@(c1, c2); + @vtype@ r4 = @vpre@_@VOP@_@vsuf@(d1, d2); + sse2_compress4_to_byte_@TYPE@(r1, r2, r3, &r4, &op[i]); } LOOP_BLOCKED_END { op[i] = sse2_ordered_cmp_@kind@_@TYPE@(ip1[i], ip2[i]); diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index d631180809a2..5602530dde8a 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -234,6 +234,31 @@ def setUp(self): self.nf[self.ef] = np.nan self.nd[self.ed] = np.nan + self.inff = self.f.copy() + self.infd = self.d.copy() + self.inff[::3][self.ef[::3]] = np.inf + self.infd[::3][self.ed[::3]] = np.inf + self.inff[1::3][self.ef[1::3]] = -np.inf + self.infd[1::3][self.ed[1::3]] = -np.inf + self.inff[2::3][self.ef[2::3]] = np.nan + self.infd[2::3][self.ed[2::3]] = np.nan + self.efnonan = self.ef.copy() + self.efnonan[2::3] = False + self.ednonan = self.ed.copy() + self.ednonan[2::3] = False + + self.signf = self.f.copy() + self.signd = self.d.copy() + self.signf[self.ef] *= -1. + self.signd[self.ed] *= -1. + self.signf[1::6][self.ef[1::6]] = -np.inf + self.signd[1::6][self.ed[1::6]] = -np.inf + self.signf[3::6][self.ef[3::6]] = -np.nan + self.signd[3::6][self.ed[3::6]] = -np.nan + self.signf[4::6][self.ef[4::6]] = -0. + self.signd[4::6][self.ed[4::6]] = -0. + + def test_float(self): # offset for alignment test for i in range(4): @@ -255,6 +280,10 @@ def test_float(self): # isnan on amd64 takes the same codepath assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:]) + assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:]) + assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:]) + assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:]) + assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:]) def test_double(self): # offset for alignment test @@ -277,6 +306,10 @@ def test_double(self): # isnan on amd64 takes the same codepath assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:]) + assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:]) + assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:]) + assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:]) + assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:]) class TestSeterr(TestCase): From 599dee9d8c29bdf427d6d381d77583bb65177a13 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Sun, 10 Jan 2016 12:38:43 -0500 Subject: [PATCH 337/496] STY: Fix indentation in example from docs. --- doc/newdtype_example/floatint.c | 47 ++++++++++++++++----------------- 1 file changed, 23 insertions(+), 24 deletions(-) diff --git a/doc/newdtype_example/floatint.c b/doc/newdtype_example/floatint.c index cf698a7f908f..fa97a882da24 100644 --- a/doc/newdtype_example/floatint.c +++ b/doc/newdtype_example/floatint.c @@ -14,10 +14,10 @@ typedef struct _floatint { } PyFloatIntObject; static PyTypeObject PyFloatInt_Type = { - PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "floatint.floatint", /*tp_name*/ - sizeof(PyFloatIntObject), /*tp_basicsize*/ + PyObject_HEAD_INIT(NULL) + 0, /*ob_size*/ + "floatint.floatint", /*tp_name*/ + sizeof(PyFloatIntObject), /*tp_basicsize*/ }; static PyArray_ArrFuncs _PyFloatInt_Funcs; @@ -45,17 +45,18 @@ static PyArray_Descr _PyFloatInt_Dtype = { static void twoint_copyswap(void *dst, void *src, int swap, void *arr) { - if (src != NULL) - memcpy(dst, src, sizeof(double)); - + if (src != NULL) { + memcpy(dst, src, sizeof(double)); + } + if (swap) { - register char *a, *b, c; - a = (char *)dst; - b = a + 7; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b-- = c; - c = *a; *a++ = *b; *b = c; + register char *a, *b, c; + a = (char *)dst; + b = a + 7; + c = *a; *a++ = *b; *b-- = c; + c = *a; *a++ = *b; *b-- = c; + c = *a; *a++ = *b; *b-- = c; + c = *a; *a++ = *b; *b = c; } } @@ -64,12 +65,11 @@ twoint_getitem(char *ip, PyArrayObject *ap) { npy_int32 a[2]; if ((ap==NULL) || PyArray_ISBEHAVED_RO(ap)) { - a[0] = *((npy_int32 *)ip); - a[1] = *((npy_int32 *)ip + 1); + a[0] = *((npy_int32 *)ip); + a[1] = *((npy_int32 *)ip + 1); } else { - ap->descr->f->copyswap(a, ip, !PyArray_ISNOTSWAPPED(ap), - ap); + ap->descr->f->copyswap(a, ip, !PyArray_ISNOTSWAPPED(ap), ap); } return Py_BuildValue("(ii)", a[0], a[1]); } @@ -79,17 +79,16 @@ twoint_setitem(PyObject *op, char *ov, PyArrayObject *ap) { npy_int32 a[2]; if (!PyTuple_Check(op)) { - PyErr_SetString(PyExc_TypeError, "must be a tuple"); - return -1; + PyErr_SetString(PyExc_TypeError, "must be a tuple"); + return -1; } if (!PyArg_ParseTuple(op, "ii", a, a+1)) return -1; if (ap == NULL || PyArray_ISBEHAVED(ap)) { - memcpy(ov, a, sizeof(double)); + memcpy(ov, a, sizeof(double)); } else { - ap->descr->f->copyswap(ov, a, !PyArray_ISNOTSWAPPED(ap), - ap); + ap->descr->f->copyswap(ov, a, !PyArray_ISNOTSWAPPED(ap), ap); } return 0; } @@ -145,7 +144,7 @@ PyMODINIT_FUNC initfloatint(void) { dtype = _register_dtype(); Py_XINCREF(dtype); if (dtype != NULL) { - PyDict_SetItemString(d, "floatint_type", (PyObject *)dtype); + PyDict_SetItemString(d, "floatint_type", (PyObject *)dtype); } Py_INCREF(&PyFloatInt_Type); PyDict_SetItemString(d, "floatint", (PyObject *)&PyFloatInt_Type); From 5fd70ba6bb82d0505ac677118e69ce93a623e2cb Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Sun, 10 Jan 2016 12:41:59 -0500 Subject: [PATCH 338/496] DOC: Fix typos. --- doc/newdtype_example/floatint.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/newdtype_example/floatint.c b/doc/newdtype_example/floatint.c index fa97a882da24..0cc198388f97 100644 --- a/doc/newdtype_example/floatint.c +++ b/doc/newdtype_example/floatint.c @@ -1,10 +1,10 @@ #include "Python.h" -#include "structmember.h" /* for offsetof macro if needed */ +#include "structmember.h" /* for offset of macro if needed */ #include "numpy/arrayobject.h" -/* Use a Python float as the cannonical type being added +/* Use a Python float as the canonical type being added */ typedef struct _floatint { From b23ec897fcbf1a0279a599c1ba301a2b01c09c88 Mon Sep 17 00:00:00 2001 From: gfyoung Date: Sun, 3 Jan 2016 15:22:40 -0800 Subject: [PATCH 339/496] DEP: Deprecate random_integers --- doc/release/1.11.0-notes.rst | 14 +++++++++++--- numpy/random/mtrand/mtrand.pyx | 12 ++++++++++++ numpy/random/tests/test_random.py | 16 ++++++++++++++++ 3 files changed, 39 insertions(+), 3 deletions(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index b0ef00208a6a..73beab52ee43 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -184,15 +184,23 @@ that will not be backward compatible. Invalid arguments for array ordering ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -It is currently possible to pass in arguments for the ```order``` -parameter in methods like ```array.flatten``` or ```array.ravel``` +It is currently possible to pass in arguments for the ``order`` +parameter in methods like ``array.flatten`` or ``array.ravel`` that were not one of the following: 'C', 'F', 'A', 'K' (note that all of these possible values are unicode- and case-insensitive). Such behaviour will not be allowed in future releases. Random number generator in the ``testing`` namespace -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Python standard library random number generator was previously exposed in the ``testing`` namespace as ``testing.rand``. Using this generator is not recommended and it will be removed in a future release. Use generators from ``numpy.random`` namespace instead. + +Random integer generation on a closed interval +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +In accordance with the Python C API, which gives preference to the half-open +interval over the closed one, ``np.random.random_integers`` is being +deprecated in favor of calling ``np.random.randint``, which has been +enhanced with the ``dtype`` parameter as described under "New Features". +However, ``np.random.random_integers`` will not be removed anytime soon. diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index 3a4e132ec7c0..ff8171d45fa8 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -1683,6 +1683,10 @@ cdef class RandomState: type translates to the C long type used by Python 2 for "short" integers and its precision is platform dependent. + This function has been deprecated. Use randint instead. + + .. deprecated:: 1.11.0 + Parameters ---------- low : int @@ -1748,9 +1752,17 @@ cdef class RandomState: """ if high is None: + warnings.warn(("This function is deprecated. Please call " + "randint(1, {low} + 1) instead".format(low=low)), + DeprecationWarning) high = low low = 1 + else: + warnings.warn(("This function is deprecated. Please call " + "randint({low}, {high} + 1) instead".format( + low=low, high=high)), DeprecationWarning) + return self.randint(low, high + 1, size=size, dtype='l') diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index a6783fe8f478..37c1876bf314 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -7,6 +7,8 @@ from numpy import random from numpy.compat import asbytes import sys +import warnings + class TestSeed(TestCase): def test_scalar(self): @@ -255,6 +257,20 @@ def test_random_integers_max_int(self): desired = np.iinfo('l').max np.testing.assert_equal(actual, desired) + def test_random_integers_deprecated(self): + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + + # DeprecationWarning raised with high == None + assert_raises(DeprecationWarning, + np.random.random_integers, + np.iinfo('l').max) + + # DeprecationWarning raised with high != None + assert_raises(DeprecationWarning, + np.random.random_integers, + np.iinfo('l').max, np.iinfo('l').max) + def test_random_sample(self): np.random.seed(self.seed) actual = np.random.random_sample((3, 2)) From dde1112f4476862e1e028d048bde23027ec5441c Mon Sep 17 00:00:00 2001 From: Julian Taylor Date: Mon, 11 Jan 2016 20:55:46 +0100 Subject: [PATCH 340/496] BUG: make result of isfinite/isinf/signbit a boolean may return something else than one or zero and npy_bool is unfortunately an int8 not a c99 bool --- numpy/core/src/umath/simd.inc.src | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src index 21ff9778427d..5da87ef60143 100644 --- a/numpy/core/src/umath/simd.inc.src +++ b/numpy/core/src/umath/simd.inc.src @@ -559,7 +559,7 @@ static void sse2_signbit_@TYPE@(npy_bool * op, @type@ * ip1, npy_intp n) { LOOP_BLOCK_ALIGN_VAR(ip1, @type@, 16) { - op[i] = npy_signbit(ip1[i]); + op[i] = npy_signbit(ip1[i]) != 0; } LOOP_BLOCKED(@type@, 16) { @vtype@ a = @vpre@_load_@vsuf@(&ip1[i]); @@ -576,7 +576,7 @@ sse2_signbit_@TYPE@(npy_bool * op, @type@ * ip1, npy_intp n) } } LOOP_BLOCKED_END { - op[i] = npy_signbit(ip1[i]); + op[i] = npy_signbit(ip1[i]) != 0; } } @@ -600,7 +600,7 @@ sse2_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, npy_intp n) #endif #endif LOOP_BLOCK_ALIGN_VAR(ip1, @type@, 16) { - op[i] = npy_@kind@(ip1[i]); + op[i] = npy_@kind@(ip1[i]) != 0; } LOOP_BLOCKED(@type@, 64) { @vtype@ a = @vpre@_load_@vsuf@(&ip1[i + 0 * 16 / sizeof(@type@)]); @@ -641,7 +641,7 @@ sse2_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, npy_intp n) sse2_compress4_to_byte_@TYPE@(r1, r2, r3, &r4, &op[i]); } LOOP_BLOCKED_END { - op[i] = npy_@kind@(ip1[i]); + op[i] = npy_@kind@(ip1[i]) != 0; } /* silence exceptions from comparisons */ npy_clear_floatstatus(); From b491cc8916e7ac1bf974fab8c2f9b65b17a90457 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Sat, 9 Jan 2016 16:24:44 -0500 Subject: [PATCH 341/496] TST: Ensure `dot` fails correctly if array types cannot be coerced into a common type. --- numpy/core/tests/test_multiarray.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 04e09d37fea0..881c9c783891 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -2041,6 +2041,13 @@ def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): assert_raises(TypeError, np.dot, b, c) assert_raises(TypeError, c.dot, b) + def test_dot_type_mismatch(self): + c = 1. + A = np.array((1,1), dtype='i,i') + + assert_raises(ValueError, np.dot, c, A) + assert_raises(TypeError, np.dot, A, c) + def test_diagonal(self): a = np.arange(12).reshape((3, 4)) assert_equal(a.diagonal(), [0, 5, 10]) From 67592d34fa0bc09fb20686cc76565e3153c0c958 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Sat, 9 Jan 2016 16:26:40 -0500 Subject: [PATCH 342/496] TST: Ensure `inner` fails correctly if array types cannot be coerced into a common type. --- numpy/core/tests/test_multiarray.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 881c9c783891..c9e610cbff12 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -4832,6 +4832,13 @@ def test_matmul_inplace(): class TestInner(TestCase): + def test_inner_type_mismatch(self): + c = 1. + A = np.array((1,1), dtype='i,i') + + assert_raises(TypeError, np.inner, c, A) + assert_raises(TypeError, np.inner, A, c) + def test_inner_scalar_and_vector(self): for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': sca = np.array(3, dtype=dt)[()] From bab118da49f051aecf35296bb9d8a00edd5b4198 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Sun, 10 Jan 2016 17:11:28 -0500 Subject: [PATCH 343/496] BUG: Clear error before constructing error message using calls to `PyObject_Repr`. Also, do a better job of handling any errors raised while constructing the error message. --- numpy/core/src/multiarray/ctors.c | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index e23cbe3c9db2..2b8c35234304 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -1926,14 +1926,34 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) /* Raise an error if the casting rule isn't followed */ if (!PyArray_CanCastArrayTo(arr, newtype, casting)) { PyObject *errmsg; + PyArray_Descr *arr_descr = NULL; + PyObject *arr_descr_repr = NULL; + PyObject *newtype_repr = NULL; + PyErr_Clear(); errmsg = PyUString_FromString("Cannot cast array data from "); - PyUString_ConcatAndDel(&errmsg, - PyObject_Repr((PyObject *)PyArray_DESCR(arr))); + arr_descr = PyArray_DESCR(arr); + if (arr_descr == NULL) { + Py_DECREF(newtype); + Py_DECREF(errmsg); + return NULL; + } + arr_descr_repr = PyObject_Repr((PyObject *)arr_descr); + if (arr_descr_repr == NULL) { + Py_DECREF(newtype); + Py_DECREF(errmsg); + return NULL; + } + PyUString_ConcatAndDel(&errmsg, arr_descr_repr); PyUString_ConcatAndDel(&errmsg, PyUString_FromString(" to ")); - PyUString_ConcatAndDel(&errmsg, - PyObject_Repr((PyObject *)newtype)); + newtype_repr = PyObject_Repr((PyObject *)newtype); + if (newtype_repr == NULL) { + Py_DECREF(newtype); + Py_DECREF(errmsg); + return NULL; + } + PyUString_ConcatAndDel(&errmsg, newtype_repr); PyUString_ConcatAndDel(&errmsg, PyUString_FromFormat(" according to the rule %s", npy_casting_to_string(casting))); From 88c8a9c22013eb6aa876adfe895b339bc602e6ac Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Thu, 7 Jan 2016 15:53:13 -0500 Subject: [PATCH 344/496] MAINT: Refactor `cblas_innerproduct` to use `cblas_matrixproduct`. --- numpy/core/src/multiarray/cblasfuncs.c | 138 ++----------------------- 1 file changed, 9 insertions(+), 129 deletions(-) diff --git a/numpy/core/src/multiarray/cblasfuncs.c b/numpy/core/src/multiarray/cblasfuncs.c index 90294670679f..180d96e30fca 100644 --- a/numpy/core/src/multiarray/cblasfuncs.c +++ b/numpy/core/src/multiarray/cblasfuncs.c @@ -765,148 +765,28 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2, NPY_NO_EXPORT PyObject * cblas_innerproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2) { - int j, l, lda, ldb; - int nd; - double prior1, prior2; - PyArrayObject *ret = NULL; - npy_intp dimensions[NPY_MAXDIMS]; - PyTypeObject *subtype; + PyArrayObject* ap2t = NULL; + PyArrayObject* ret = NULL; - /* assure contiguous arrays */ - if (!PyArray_IS_C_CONTIGUOUS(ap1)) { - PyObject *op1 = PyArray_NewCopy(ap1, NPY_CORDER); - Py_DECREF(ap1); - ap1 = (PyArrayObject *)op1; - if (ap1 == NULL) { - goto fail; - } - } - if (!PyArray_IS_C_CONTIGUOUS(ap2)) { - PyObject *op2 = PyArray_NewCopy(ap2, NPY_CORDER); - Py_DECREF(ap2); - ap2 = (PyArrayObject *)op2; - if (ap2 == NULL) { - goto fail; - } - } - - if (PyArray_NDIM(ap1) == 0 || PyArray_NDIM(ap2) == 0) { - /* One of ap1 or ap2 is a scalar */ - if (PyArray_NDIM(ap1) == 0) { - /* Make ap2 the scalar */ - PyArrayObject *t = ap1; - ap1 = ap2; - ap2 = t; - } - for (l = 1, j = 0; j < PyArray_NDIM(ap1); j++) { - dimensions[j] = PyArray_DIM(ap1, j); - l *= dimensions[j]; - } - nd = PyArray_NDIM(ap1); + if ((ap1 == NULL) || (ap2 == NULL)) { + goto fail; } - else { - /* - * (PyArray_NDIM(ap1) <= 2 && PyArray_NDIM(ap2) <= 2) - * Both ap1 and ap2 are vectors or matrices - */ - l = PyArray_DIM(ap1, PyArray_NDIM(ap1) - 1); - - if (PyArray_DIM(ap2, PyArray_NDIM(ap2) - 1) != l) { - dot_alignment_error(ap1, PyArray_NDIM(ap1) - 1, - ap2, PyArray_NDIM(ap2) - 1); - goto fail; - } - nd = PyArray_NDIM(ap1) + PyArray_NDIM(ap2) - 2; - if (nd == 1) - dimensions[0] = (PyArray_NDIM(ap1) == 2) ? - PyArray_DIM(ap1, 0) : PyArray_DIM(ap2, 0); - else if (nd == 2) { - dimensions[0] = PyArray_DIM(ap1, 0); - dimensions[1] = PyArray_DIM(ap2, 0); - } + ap2t = (PyArrayObject *)PyArray_Transpose(ap2, NULL); + if (ap2t == NULL) { + goto fail; } - /* Choose which subtype to return */ - prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0); - prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0); - subtype = (prior2 > prior1 ? Py_TYPE(ap2) : Py_TYPE(ap1)); - - ret = (PyArrayObject *)PyArray_New(subtype, nd, dimensions, - typenum, NULL, NULL, 0, 0, - (PyObject *) - (prior2 > prior1 ? ap2 : ap1)); - + ret = (PyArrayObject *)cblas_matrixproduct(typenum, ap1, ap2t, NULL); if (ret == NULL) { goto fail; } - NPY_BEGIN_ALLOW_THREADS; - memset(PyArray_DATA(ret), 0, PyArray_NBYTES(ret)); - - if (PyArray_NDIM(ap2) == 0) { - /* Multiplication by a scalar -- Level 1 BLAS */ - if (typenum == NPY_DOUBLE) { - cblas_daxpy(l, - *((double *)PyArray_DATA(ap2)), - (double *)PyArray_DATA(ap1), 1, - (double *)PyArray_DATA(ret), 1); - } - else if (typenum == NPY_CDOUBLE) { - cblas_zaxpy(l, - (double *)PyArray_DATA(ap2), - (double *)PyArray_DATA(ap1), 1, - (double *)PyArray_DATA(ret), 1); - } - else if (typenum == NPY_FLOAT) { - cblas_saxpy(l, - *((float *)PyArray_DATA(ap2)), - (float *)PyArray_DATA(ap1), 1, - (float *)PyArray_DATA(ret), 1); - } - else if (typenum == NPY_CFLOAT) { - cblas_caxpy(l, - (float *)PyArray_DATA(ap2), - (float *)PyArray_DATA(ap1), 1, - (float *)PyArray_DATA(ret), 1); - } - } - else if (PyArray_NDIM(ap1) == 1 && PyArray_NDIM(ap2) == 1) { - /* Dot product between two vectors -- Level 1 BLAS */ - blas_dot(typenum, l, - PyArray_DATA(ap1), PyArray_ITEMSIZE(ap1), - PyArray_DATA(ap2), PyArray_ITEMSIZE(ap2), - PyArray_DATA(ret)); - } - else if (PyArray_NDIM(ap1) == 2 && PyArray_NDIM(ap2) == 1) { - /* Matrix-vector multiplication -- Level 2 BLAS */ - lda = (PyArray_DIM(ap1, 1) > 1 ? PyArray_DIM(ap1, 1) : 1); - gemv(typenum, CblasRowMajor, CblasNoTrans, ap1, lda, ap2, 1, ret); - } - else if (PyArray_NDIM(ap1) == 1 && PyArray_NDIM(ap2) == 2) { - /* Vector matrix multiplication -- Level 2 BLAS */ - lda = (PyArray_DIM(ap2, 1) > 1 ? PyArray_DIM(ap2, 1) : 1); - gemv(typenum, CblasRowMajor, CblasNoTrans, ap2, lda, ap1, 1, ret); - } - else { - /* - * (PyArray_NDIM(ap1) == 2 && PyArray_NDIM(ap2) == 2) - * Matrix matrix multiplication -- Level 3 BLAS - */ - lda = (PyArray_DIM(ap1, 1) > 1 ? PyArray_DIM(ap1, 1) : 1); - ldb = (PyArray_DIM(ap2, 1) > 1 ? PyArray_DIM(ap2, 1) : 1); - gemm(typenum, CblasRowMajor, CblasNoTrans, CblasTrans, - PyArray_DIM(ap1, 0), PyArray_DIM(ap2, 0), PyArray_DIM(ap1, 1), - ap1, lda, ap2, ldb, ret); - } - NPY_END_ALLOW_THREADS; - Py_DECREF(ap1); Py_DECREF(ap2); return PyArray_Return(ret); - fail: - Py_XDECREF(ap1); +fail: Py_XDECREF(ap2); Py_XDECREF(ret); return NULL; From 223513a24d7e28d471d8697016dbb035c959e12a Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Fri, 8 Jan 2016 11:24:44 -0500 Subject: [PATCH 345/496] MAINT: Refactor `PyArray_InnerProduct` so that it just performs a transpose and calls `PyArray_MatrixProduct2`. --- numpy/core/src/multiarray/cblasfuncs.c | 46 ------- numpy/core/src/multiarray/cblasfuncs.h | 3 - numpy/core/src/multiarray/multiarraymodule.c | 120 ++++++------------- 3 files changed, 34 insertions(+), 135 deletions(-) diff --git a/numpy/core/src/multiarray/cblasfuncs.c b/numpy/core/src/multiarray/cblasfuncs.c index 180d96e30fca..b11505c0e56a 100644 --- a/numpy/core/src/multiarray/cblasfuncs.c +++ b/numpy/core/src/multiarray/cblasfuncs.c @@ -745,49 +745,3 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2, Py_XDECREF(ret); return NULL; } - - -/* - * innerproduct(a,b) - * - * Returns the inner product of a and b for arrays of - * floating point types. Like the generic NumPy equivalent the product - * sum is over the last dimension of a and b. - * NB: The first argument is not conjugated. - * - * This is for use by PyArray_InnerProduct. It is assumed on entry that the - * arrays ap1 and ap2 have a common data type given by typenum that is - * float, double, cfloat, or cdouble and have dimension <= 2. - * The * __numpy_ufunc__ nonsense is also assumed to - * have been taken care of. - */ - -NPY_NO_EXPORT PyObject * -cblas_innerproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2) -{ - PyArrayObject* ap2t = NULL; - PyArrayObject* ret = NULL; - - if ((ap1 == NULL) || (ap2 == NULL)) { - goto fail; - } - - ap2t = (PyArrayObject *)PyArray_Transpose(ap2, NULL); - if (ap2t == NULL) { - goto fail; - } - - ret = (PyArrayObject *)cblas_matrixproduct(typenum, ap1, ap2t, NULL); - if (ret == NULL) { - goto fail; - } - - - Py_DECREF(ap2); - return PyArray_Return(ret); - -fail: - Py_XDECREF(ap2); - Py_XDECREF(ret); - return NULL; -} diff --git a/numpy/core/src/multiarray/cblasfuncs.h b/numpy/core/src/multiarray/cblasfuncs.h index d3ec08db608b..66ce4ca5becb 100644 --- a/numpy/core/src/multiarray/cblasfuncs.h +++ b/numpy/core/src/multiarray/cblasfuncs.h @@ -4,7 +4,4 @@ NPY_NO_EXPORT PyObject * cblas_matrixproduct(int, PyArrayObject *, PyArrayObject *, PyArrayObject *); -NPY_NO_EXPORT PyObject * -cblas_innerproduct(int, PyArrayObject *, PyArrayObject *); - #endif diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index b9d79029eb2f..2c17ebe09790 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -813,121 +813,69 @@ new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out, NPY_NO_EXPORT PyObject * PyArray_InnerProduct(PyObject *op1, PyObject *op2) { - PyArrayObject *ap1, *ap2, *ret = NULL; - PyArrayIterObject *it1, *it2; - npy_intp i, j, l; - int typenum, nd, axis; - npy_intp is1, is2, os; - char *op; - npy_intp dimensions[NPY_MAXDIMS]; - PyArray_DotFunc *dot; - PyArray_Descr *typec; - NPY_BEGIN_THREADS_DEF; + PyArrayObject *ap1 = NULL; + PyArrayObject *ap2 = NULL; + int typenum; + PyArray_Descr *typec = NULL; + PyObject* ap2t = NULL; + npy_intp dims[NPY_MAXDIMS]; + PyArray_Dims newaxes = {dims, 0}; + int i; + PyObject* ret = NULL; typenum = PyArray_ObjectType(op1, 0); typenum = PyArray_ObjectType(op2, typenum); - typec = PyArray_DescrFromType(typenum); if (typec == NULL) { - return NULL; + goto fail; } + Py_INCREF(typec); ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 0, 0, - NPY_ARRAY_ALIGNED, NULL); + NPY_ARRAY_ALIGNED, NULL); if (ap1 == NULL) { Py_DECREF(typec); - return NULL; + goto fail; } ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 0, 0, - NPY_ARRAY_ALIGNED, NULL); + NPY_ARRAY_ALIGNED, NULL); if (ap2 == NULL) { - Py_DECREF(ap1); - return NULL; - } - -#if defined(HAVE_CBLAS) - if (PyArray_NDIM(ap1) <= 2 && PyArray_NDIM(ap2) <= 2 && - (NPY_DOUBLE == typenum || NPY_CDOUBLE == typenum || - NPY_FLOAT == typenum || NPY_CFLOAT == typenum)) { - return cblas_innerproduct(typenum, ap1, ap2); - } -#endif - - if (PyArray_NDIM(ap1) == 0 || PyArray_NDIM(ap2) == 0) { - ret = (PyArray_NDIM(ap1) == 0 ? ap1 : ap2); - ret = (PyArrayObject *)Py_TYPE(ret)->tp_as_number->nb_multiply( - (PyObject *)ap1, (PyObject *)ap2); - Py_DECREF(ap1); - Py_DECREF(ap2); - return (PyObject *)ret; - } - - l = PyArray_DIMS(ap1)[PyArray_NDIM(ap1) - 1]; - if (PyArray_DIMS(ap2)[PyArray_NDIM(ap2) - 1] != l) { - dot_alignment_error(ap1, PyArray_NDIM(ap1) - 1, - ap2, PyArray_NDIM(ap2) - 1); goto fail; } - nd = PyArray_NDIM(ap1) + PyArray_NDIM(ap2) - 2; - j = 0; - for (i = 0; i < PyArray_NDIM(ap1) - 1; i++) { - dimensions[j++] = PyArray_DIMS(ap1)[i]; + newaxes.len = PyArray_NDIM(ap2); + if ((PyArray_NDIM(ap1) >= 1) && (newaxes.len >= 2)) { + for (i = 0; i < newaxes.len - 2; i++) { + dims[i] = (npy_intp)i; + } + dims[newaxes.len - 2] = newaxes.len - 1; + dims[newaxes.len - 1] = newaxes.len - 2; + + ap2t = PyArray_Transpose(ap2, &newaxes); + if (ap2t == NULL) { + goto fail; + } } - for (i = 0; i < PyArray_NDIM(ap2) - 1; i++) { - dimensions[j++] = PyArray_DIMS(ap2)[i]; + else { + ap2t = (PyObject *)ap2; + Py_INCREF(ap2); } - /* - * Need to choose an output array that can hold a sum - * -- use priority to determine which subtype. - */ - ret = new_array_for_sum(ap1, ap2, NULL, nd, dimensions, typenum); + ret = PyArray_MatrixProduct2((PyObject *)ap1, ap2t, NULL); if (ret == NULL) { goto fail; } - /* Ensure that multiarray.inner(,) -> zeros((N,M)) */ - if (PyArray_SIZE(ap1) == 0 && PyArray_SIZE(ap2) == 0) { - memset(PyArray_DATA(ret), 0, PyArray_NBYTES(ret)); - } - dot = (PyArray_DESCR(ret)->f->dotfunc); - if (dot == NULL) { - PyErr_SetString(PyExc_ValueError, - "dot not available for this type"); - goto fail; - } - is1 = PyArray_STRIDES(ap1)[PyArray_NDIM(ap1) - 1]; - is2 = PyArray_STRIDES(ap2)[PyArray_NDIM(ap2) - 1]; - op = PyArray_DATA(ret); - os = PyArray_DESCR(ret)->elsize; - axis = PyArray_NDIM(ap1) - 1; - it1 = (PyArrayIterObject *) PyArray_IterAllButAxis((PyObject *)ap1, &axis); - axis = PyArray_NDIM(ap2) - 1; - it2 = (PyArrayIterObject *) PyArray_IterAllButAxis((PyObject *)ap2, &axis); - NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(ap2)); - while (it1->index < it1->size) { - while (it2->index < it2->size) { - dot(it1->dataptr, is1, it2->dataptr, is2, op, l, ret); - op += os; - PyArray_ITER_NEXT(it2); - } - PyArray_ITER_NEXT(it1); - PyArray_ITER_RESET(it2); - } - NPY_END_THREADS_DESCR(PyArray_DESCR(ap2)); - Py_DECREF(it1); - Py_DECREF(it2); - if (PyErr_Occurred()) { - goto fail; - } + Py_DECREF(ap1); Py_DECREF(ap2); - return (PyObject *)ret; + Py_DECREF(ap2t); + return ret; fail: Py_XDECREF(ap1); Py_XDECREF(ap2); + Py_XDECREF(ap2t); Py_XDECREF(ret); return NULL; } From 6e699948c2b6098ec1a6e135241bc24e4df9a4d1 Mon Sep 17 00:00:00 2001 From: David Freese Date: Thu, 18 Jun 2015 11:53:11 -0700 Subject: [PATCH 346/496] BUG: Fix nanpercentile crash on all-nan slices Fix bug where nanpercentile would crash with an all-nan slices when given multiple percentiles. Also corrects behavior where array sizes different from numpy.percentile would be returned with keepdims enabled. Fix #5760 --- numpy/lib/nanfunctions.py | 11 +++++++++-- numpy/lib/tests/test_nanfunctions.py | 26 +++++++++++++++++++++++++- 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py index 1e3208ac8e85..6b28b4a35874 100644 --- a/numpy/lib/nanfunctions.py +++ b/numpy/lib/nanfunctions.py @@ -975,7 +975,11 @@ def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False, else: result = np.apply_along_axis(_nanpercentile1d, axis, a, q, overwrite_input, interpolation) - + # apply_along_axis fills in collapsed axis with results. + # Move that axis to the beginning to match percentile's + # convention. + if q.ndim != 0: + result = np.swapaxes(result, 0, axis) if out is not None: out[...] = result return result @@ -991,7 +995,10 @@ def _nanpercentile1d(arr1d, q, overwrite_input=False, interpolation='linear'): s = np.where(c)[0] if s.size == arr1d.size: warnings.warn("All-NaN slice encountered", RuntimeWarning) - return np.nan + if q.ndim == 0: + return np.nan + else: + return np.nan * np.ones((len(q),)) elif s.size == 0: return np.percentile(arr1d, q, overwrite_input=overwrite_input, interpolation=interpolation) diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index 7a7b37b98c8d..51cdf2af2957 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -654,7 +654,8 @@ def test_result_values(self): tgt = [np.percentile(d, 28) for d in _rdat] res = np.nanpercentile(_ndat, 28, axis=1) assert_almost_equal(res, tgt) - tgt = [np.percentile(d, (28, 98)) for d in _rdat] + # Transpose the array to fit the output convention of numpy.percentile + tgt = np.transpose([np.percentile(d, (28, 98)) for d in _rdat]) res = np.nanpercentile(_ndat, (28, 98), axis=1) assert_almost_equal(res, tgt) @@ -702,6 +703,29 @@ def test_extended_axis_invalid(self): assert_raises(IndexError, np.nanpercentile, d, q=5, axis=(0, 4)) assert_raises(ValueError, np.nanpercentile, d, q=5, axis=(1, 1)) + def test_multiple_percentiles(self): + perc = [50, 100] + mat = np.ones((4, 3)) + nan_mat = np.nan * mat + # For checking consistency in higher dimensional case + large_mat = np.ones((3, 4, 5)) + large_mat[:, 0:2:4, :] = 0 + large_mat[:, :, 3:] = 2*large_mat[:, :, 3:] + for axis in [None, 0, 1]: + for keepdim in [False, True]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + val = np.percentile(mat, perc, axis=axis, keepdims=keepdim) + nan_val = np.nanpercentile(nan_mat, perc, axis=axis, + keepdims=keepdim) + assert_equal(nan_val.shape, val.shape) + + val = np.percentile(large_mat, perc, axis=axis, + keepdims=keepdim) + nan_val = np.nanpercentile(large_mat, perc, axis=axis, + keepdims=keepdim) + assert_equal(nan_val, val) + if __name__ == "__main__": run_module_suite() From df70490874e33e1fad18720f5c74fb5e319c9e06 Mon Sep 17 00:00:00 2001 From: Julian Taylor Date: Wed, 13 Jan 2016 19:00:11 +0100 Subject: [PATCH 347/496] BUG: skip invalid path distutils warning for empty strings empty strings are the default for the new rpath, extra_compile_args and extra_link_args sections --- numpy/distutils/system_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index dde18dfa5619..d7b9bfbed69a 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -629,7 +629,7 @@ def get_paths(self, section, key): dirs.extend(default_dirs) ret = [] for d in dirs: - if not os.path.isdir(d): + if len(d) > 0 and not os.path.isdir(d): warnings.warn('Specified path %s is invalid.' % d) continue From 2b732ba7b51dd137a9ed8f55f83f48b9f8743bf5 Mon Sep 17 00:00:00 2001 From: Abdullah Alrasheed Date: Thu, 14 Jan 2016 17:25:48 +0800 Subject: [PATCH 348/496] Fix number sequence I have found that there are two missing numbers in a sequence in the documentation. http://docs.scipy.org/doc/numpy/user/misc.html#interfacing-to-c It goes 1,2,3,5,7,8 with missing 4 and 6. --- numpy/doc/misc.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/doc/misc.py b/numpy/doc/misc.py index e30caf0cb555..37ebca572411 100644 --- a/numpy/doc/misc.py +++ b/numpy/doc/misc.py @@ -138,7 +138,7 @@ - Can write code in non-standard form which may become obsolete - Not as flexible as manual wrapping -4) ctypes +3) ctypes - Plusses: @@ -160,7 +160,7 @@ - can't use for writing code to be turned into C extensions, only a wrapper tool. -5) SWIG (automatic wrapper generator) +4) SWIG (automatic wrapper generator) - Plusses: @@ -178,7 +178,7 @@ - doesn't necessarily avoid reference counting issues or needing to know API's -7) scipy.weave +5) scipy.weave - Plusses: @@ -192,7 +192,7 @@ - Future very uncertain: it's the only part of Scipy not ported to Python 3 and is effectively deprecated in favor of Cython. -8) Psyco +6) Psyco - Plusses: From 02bcbd7e99f7b73c2abcb2726f79ea01a6bba2da Mon Sep 17 00:00:00 2001 From: gfyoung Date: Tue, 12 Jan 2016 23:16:57 +0000 Subject: [PATCH 349/496] DOC, MAINT: Enforce np.ndarray arg for np.put and np.place np.put and np.place do something only when the first argument is an instance of np.ndarray. These changes will cause a TypeError to be thrown in either function should that requirement not be satisfied. --- numpy/core/fromnumeric.py | 8 +++++++- numpy/core/tests/test_fromnumeric.py | 17 +++++++++++++++++ numpy/lib/function_base.py | 6 +++++- numpy/lib/tests/test_function_base.py | 4 ++++ numpy/lib/tests/test_regression.py | 4 ---- 5 files changed, 33 insertions(+), 6 deletions(-) create mode 100644 numpy/core/tests/test_fromnumeric.py diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 67d2c5b4893d..1bb9738fbdd7 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -445,7 +445,13 @@ def put(a, ind, v, mode='raise'): array([ 0, 1, 2, 3, -5]) """ - return a.put(ind, v, mode) + try: + put = a.put + except AttributeError: + raise TypeError("argument 1 must be numpy.ndarray, " + "not {name}".format(name=type(a).__name__)) + + return put(ind, v, mode) def swapaxes(a, axis1, axis2): diff --git a/numpy/core/tests/test_fromnumeric.py b/numpy/core/tests/test_fromnumeric.py new file mode 100644 index 000000000000..0fba10b6e8b9 --- /dev/null +++ b/numpy/core/tests/test_fromnumeric.py @@ -0,0 +1,17 @@ +from __future__ import division, absolute_import, print_function + +from numpy import put +from numpy.testing import TestCase, assert_raises + + +class TestPut(TestCase): + + def test_bad_array(self): + # We want to raise a TypeError in the + # case that a non-ndarray object is passed + # in since `np.put` modifies in place and + # hence would do nothing to a non-ndarray + v = 5 + indx = [0, 2] + bad_array = [1, 2, 3] + assert_raises(TypeError, put, bad_array, indx, v) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 9bc128f92db1..844c069c0bfc 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1779,7 +1779,7 @@ def place(arr, mask, vals): Parameters ---------- - arr : array_like + arr : ndarray Array to put data into. mask : array_like Boolean mask array. Must have the same size as `a`. @@ -1801,6 +1801,10 @@ def place(arr, mask, vals): [44, 55, 44]]) """ + if not isinstance(arr, np.ndarray): + raise TypeError("argument 1 must be numpy.ndarray, " + "not {name}".format(name=type(arr).__name__)) + return _insert(arr, mask, vals) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index a5ac78e33719..88a590517f02 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -674,6 +674,10 @@ def test_basic(self): assert_array_equal(b, [3, 2, 2, 3, 3]) def test_place(self): + # Make sure that non-np.ndarray objects + # raise an error instead of doing nothing + assert_raises(TypeError, place, [1, 2, 3], [True, False], [0, 1]) + a = np.array([1, 4, 3, 2, 5, 8, 7]) place(a, [0, 1, 0, 1, 0, 1, 0], [2, 4, 6]) assert_array_equal(a, [1, 2, 3, 4, 5, 6, 7]) diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py index 00fa3f195a5d..ee50dcfa4e62 100644 --- a/numpy/lib/tests/test_regression.py +++ b/numpy/lib/tests/test_regression.py @@ -85,10 +85,6 @@ def test_poly_eq(self, level=rlevel): assert_(x != y) assert_(x == x) - def test_mem_insert(self, level=rlevel): - # Ticket #572 - np.lib.place(1, 1, 1) - def test_polyfit_build(self): # Ticket #628 ref = [-1.06123820e-06, 5.70886914e-04, -1.13822012e-01, From 53ad26a84ac2aa6f5a37f09aa9feae5afed44f79 Mon Sep 17 00:00:00 2001 From: Stephan Hoyer Date: Tue, 12 Jan 2016 20:44:34 -0800 Subject: [PATCH 350/496] TST, ENH: make all comparisons with NaT false Now, NaT compares like NaN: - NaT != NaT -> True - NaT == NaT (and all other comparisons) -> False We discussed this on the mailing list back in October: https://mail.scipy.org/pipermail/numpy-discussion/2015-October/073968.html --- numpy/core/arrayprint.py | 6 +-- numpy/core/src/multiarray/scalartypes.c.src | 2 +- numpy/core/src/umath/loops.c.src | 26 +++++++++-- numpy/core/tests/test_datetime.py | 36 ++++++++++++--- numpy/ma/tests/test_extras.py | 2 +- numpy/ma/testutils.py | 5 +-- numpy/testing/tests/test_utils.py | 33 +++++++++++++- numpy/testing/utils.py | 49 +++++++++++++++++++-- 8 files changed, 135 insertions(+), 24 deletions(-) diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index fefcb649393d..c5b5b5a8f0ea 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -739,8 +739,8 @@ def __call__(self, x): class TimedeltaFormat(object): def __init__(self, data): if data.dtype.kind == 'm': - nat_value = array(['NaT'], dtype=data.dtype)[0] - v = data[not_equal(data, nat_value)].view('i8') + # select non-NaT elements + v = data[data == data].view('i8') if len(v) > 0: # Max str length of non-NaT elements max_str_len = max(len(str(maximum.reduce(v))), @@ -754,7 +754,7 @@ def __init__(self, data): self._nat = "'NaT'".rjust(max_str_len) def __call__(self, x): - if x + 1 == x: + if x != x: return self._nat else: return self.format % x.astype('i8') diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index 1bd5b22d2124..7c73822dd2f4 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -1673,7 +1673,7 @@ voidtype_setfield(PyVoidScalarObject *self, PyObject *args, PyObject *kwds) * However, as a special case, void-scalar assignment broadcasts * differently from ndarrays when assigning to an object field: Assignment * to an ndarray object field broadcasts, but assignment to a void-scalar - * object-field should not, in order to allow nested ndarrays. + * object-field should not, in order to allow nested ndarrays. * These lines should then behave identically: * * b = np.zeros(1, dtype=[('x', 'O')]) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index fc9ffec9480c..563761bc0679 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -1117,8 +1117,8 @@ NPY_NO_EXPORT void } /**begin repeat1 - * #kind = equal, not_equal, greater, greater_equal, less, less_equal# - * #OP = ==, !=, >, >=, <, <=# + * #kind = equal, greater, greater_equal, less, less_equal# + * #OP = ==, >, >=, <, <=# */ NPY_NO_EXPORT void @TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) @@ -1126,11 +1126,31 @@ NPY_NO_EXPORT void BINARY_LOOP { const @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; - *((npy_bool *)op1) = in1 @OP@ in2; + if (in1 == NPY_DATETIME_NAT || in2 == NPY_DATETIME_NAT) { + *((npy_bool *)op1) = NPY_FALSE; + } + else { + *((npy_bool *)op1) = in1 @OP@ in2; + } } } /**end repeat1**/ +NPY_NO_EXPORT void +@TYPE@_not_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + if (in1 == NPY_DATETIME_NAT || in2 == NPY_DATETIME_NAT) { + *((npy_bool *)op1) = NPY_TRUE; + } + else { + *((npy_bool *)op1) = in1 != in2; + } + } +} + /**begin repeat1 * #kind = maximum, minimum# * #OP = >, <# diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py index 360463d381c4..65b1d460a3c3 100644 --- a/numpy/core/tests/test_datetime.py +++ b/numpy/core/tests/test_datetime.py @@ -130,10 +130,11 @@ def test_compare_generic_nat(self): # regression tests for GH6452 assert_equal(np.datetime64('NaT'), np.datetime64('2000') + np.timedelta64('NaT')) - # nb. we may want to make NaT != NaT true in the future; this test - # verifies the existing behavior (and that it should not warn) - assert_(np.datetime64('NaT') == np.datetime64('NaT', 'us')) - assert_(np.datetime64('NaT', 'us') == np.datetime64('NaT')) + assert_equal(np.datetime64('NaT'), np.datetime64('NaT', 'us')) + assert_equal(np.timedelta64('NaT'), np.timedelta64('NaT', 'us')) + # neither of these should issue a warning + assert_(np.datetime64('NaT') != np.datetime64('NaT', 'us')) + assert_(np.datetime64('NaT', 'us') != np.datetime64('NaT')) def test_datetime_scalar_construction(self): # Construct with different units @@ -552,6 +553,9 @@ def test_datetime_array_str(self): "'%s'" % np.datetime_as_string(x, timezone='UTC')}), "['2011-03-16T13:55Z', '1920-01-01T03:12Z']") + a = np.array(['NaT', 'NaT'], dtype='datetime64[ns]') + assert_equal(str(a), "['NaT' 'NaT']") + # Check that one NaT doesn't corrupt subsequent entries a = np.array(['2010', 'NaT', '2030']).astype('M') assert_equal(str(a), "['2010' 'NaT' '2030']") @@ -658,7 +662,7 @@ def test_pyobject_roundtrip(self): b[8] = 'NaT' assert_equal(b.astype(object).astype(unit), b, - "Error roundtripping unit %s" % unit) + "Error roundtripping unit %s" % unit) # With time units for unit in ['M8[as]', 'M8[16fs]', 'M8[ps]', 'M8[us]', 'M8[300as]', 'M8[20us]']: @@ -674,7 +678,7 @@ def test_pyobject_roundtrip(self): b[8] = 'NaT' assert_equal(b.astype(object).astype(unit), b, - "Error roundtripping unit %s" % unit) + "Error roundtripping unit %s" % unit) def test_month_truncation(self): # Make sure that months are truncating correctly @@ -1081,6 +1085,26 @@ def test_datetime_compare(self): assert_equal(np.greater(a, b), [0, 1, 0, 1, 0]) assert_equal(np.greater_equal(a, b), [1, 1, 0, 1, 0]) + def test_datetime_compare_nat(self): + dt_nat = np.datetime64('NaT', 'D') + dt_other = np.datetime64('2000-01-01') + td_nat = np.timedelta64('NaT', 'h') + td_other = np.timedelta64(1, 'h') + for op in [np.equal, np.less, np.less_equal, + np.greater, np.greater_equal]: + assert_(not op(dt_nat, dt_nat)) + assert_(not op(dt_nat, dt_other)) + assert_(not op(dt_other, dt_nat)) + assert_(not op(td_nat, td_nat)) + assert_(not op(td_nat, td_other)) + assert_(not op(td_other, td_nat)) + assert_(np.not_equal(dt_nat, dt_nat)) + assert_(np.not_equal(dt_nat, dt_other)) + assert_(np.not_equal(dt_other, dt_nat)) + assert_(np.not_equal(td_nat, td_nat)) + assert_(np.not_equal(td_nat, td_other)) + assert_(np.not_equal(td_other, td_nat)) + def test_datetime_minmax(self): # The metadata of the result should become the GCD # of the operand metadata diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index 6138d0573967..c2428fa10ccf 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -154,7 +154,7 @@ def test_testAverage1(self): ott = ott.reshape(2, 2) ott[:, 1] = masked assert_equal(average(ott, axis=0), [2.0, 0.0]) - assert_equal(average(ott, axis=1).mask[0], [True]) + assert_equal(average(ott, axis=1).mask[0], True) assert_equal([2., 0.], average(ott, axis=0)) result, wts = average(ott, axis=0, returned=1) assert_equal(wts, [1., 0.]) diff --git a/numpy/ma/testutils.py b/numpy/ma/testutils.py index 8dc8218784c9..40b9fa1be487 100644 --- a/numpy/ma/testutils.py +++ b/numpy/ma/testutils.py @@ -125,10 +125,7 @@ def assert_equal(actual, desired, err_msg=''): if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): return _assert_equal_on_sequences(actual, desired, err_msg='') if not (isinstance(actual, ndarray) or isinstance(desired, ndarray)): - msg = build_err_msg([actual, desired], err_msg,) - if not desired == actual: - raise AssertionError(msg) - return + return utils.assert_equal(actual, desired) # Case #4. arrays or equivalent if ((actual is masked) and not (desired is masked)) or \ ((desired is masked) and not (actual is masked)): diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 23bd491bc1e8..92a00f71213f 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -11,7 +11,7 @@ assert_warns, assert_no_warnings, assert_allclose, assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp, clear_and_catch_warnings, run_module_suite, - assert_string_equal, assert_, tempdir, temppath, + assert_string_equal, assert_, tempdir, temppath, ) import unittest @@ -119,6 +119,25 @@ def test_nan_array(self): c = np.array([1, 2, 3]) self._test_not_equal(c, b) + def test_nat_array_datetime(self): + a = np.array([np.datetime64('2000-01'), np.datetime64('NaT')]) + b = np.array([np.datetime64('2000-01'), np.datetime64('NaT')]) + self._test_equal(a, b) + + c = np.array([np.datetime64('NaT'), np.datetime64('NaT')]) + self._test_not_equal(c, b) + + def test_nat_array_timedelta(self): + a = np.array([np.timedelta64(1, 'h'), np.timedelta64('NaT')]) + b = np.array([np.timedelta64(1, 'h'), np.timedelta64('NaT')]) + self._test_equal(a, b) + + c = np.array([np.timedelta64('NaT'), np.timedelta64('NaT')]) + self._test_not_equal(c, b) + + d = np.array([np.datetime64('NaT'), np.datetime64('NaT')]) + self._test_not_equal(c, d) + def test_string_arrays(self): """Test two arrays with different shapes are found not equal.""" a = np.array(['floupi', 'floupa']) @@ -227,6 +246,16 @@ def test_complex(self): self._assert_func(x, x) self._test_not_equal(x, y) + def test_nat(self): + dt = np.datetime64('2000-01-01') + dt_nat = np.datetime64('NaT') + td_nat = np.timedelta64('NaT') + self._assert_func(dt_nat, dt_nat) + self._assert_func(td_nat, td_nat) + self._test_not_equal(dt_nat, td_nat) + self._test_not_equal(dt, td_nat) + self._test_not_equal(dt, dt_nat) + class TestArrayAlmostEqual(_GenericTest, unittest.TestCase): @@ -457,7 +486,7 @@ def f(): class TestAssertAllclose(unittest.TestCase): - + def test_simple(self): x = 1e-3 y = 1e-9 diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py index f545cd3c2af1..8e71a3399f96 100644 --- a/numpy/testing/utils.py +++ b/numpy/testing/utils.py @@ -15,7 +15,7 @@ from tempfile import mkdtemp, mkstemp from .nosetester import import_nose -from numpy.core import float32, empty, arange, array_repr, ndarray +from numpy.core import float32, empty, arange, array_repr, ndarray, dtype from numpy.lib.utils import deprecate if sys.version_info[0] >= 3: @@ -343,16 +343,31 @@ def assert_equal(actual,desired,err_msg='',verbose=True): except AssertionError: raise AssertionError(msg) + def isnat(x): + return (hasattr(x, 'dtype') + and getattr(x.dtype, 'kind', '_') in 'mM' + and x != x) + # Inf/nan/negative zero handling try: # isscalar test to check cases such as [np.nan] != np.nan - if isscalar(desired) != isscalar(actual): + # dtypes compare equal to strings, but unlike strings aren't scalars, + # so we need to exclude them from this check + if (isscalar(desired) != isscalar(actual) + and not (isinstance(desired, dtype) + or isinstance(actual, dtype))): raise AssertionError(msg) + # check NaT before NaN, because isfinite errors on datetime dtypes + if isnat(desired) and isnat(actual): + if desired.dtype.kind != actual.dtype.kind: + # datetime64 and timedelta64 NaT should not be comparable + raise AssertionError(msg) + return # If one of desired/actual is not finite, handle it specially here: # check that both are nan if any is a nan, and test for equality # otherwise - if not (gisfinite(desired) and gisfinite(actual)): + elif not (gisfinite(desired) and gisfinite(actual)): isdesnan = gisnan(desired) isactnan = gisnan(actual) if isdesnan or isactnan: @@ -663,6 +678,9 @@ def safe_comparison(*args, **kwargs): def isnumber(x): return x.dtype.char in '?bhilqpBHILQPefdgFDG' + def isdatetime(x): + return x.dtype.char in 'mM' + def chk_same_position(x_id, y_id, hasval='nan'): """Handling nan/inf: check that x and y have the nan/inf at the same locations.""" @@ -675,6 +693,15 @@ def chk_same_position(x_id, y_id, hasval='nan'): names=('x', 'y'), precision=precision) raise AssertionError(msg) + def chk_same_dtype(x_dt, y_dt): + try: + assert_equal(x_dt, y_dt) + except AssertionError: + msg = build_err_msg([x, y], err_msg + '\nx and y dtype mismatch', + verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + raise AssertionError(msg) + try: cond = (x.shape == () or y.shape == ()) or x.shape == y.shape if not cond: @@ -712,6 +739,20 @@ def chk_same_position(x_id, y_id, hasval='nan'): val = safe_comparison(x[~x_id], y[~y_id]) else: val = safe_comparison(x, y) + elif isdatetime(x) and isdatetime(y): + x_isnat, y_isnat = (x != x), (y != y) + + if any(x_isnat) or any(y_isnat): + # cannot mix timedelta64/datetime64 NaT + chk_same_dtype(x.dtype, y.dtype) + chk_same_position(x_isnat, y_isnat, hasval='nat') + + if all(x_isnat): + return + if any(x_isnat): + val = safe_comparison(x[~x_isnat], y[~y_isnat]) + else: + val = safe_comparison(x, y) else: val = safe_comparison(x, y) @@ -1826,7 +1867,7 @@ def temppath(*args, **kwargs): parameters are the same as for tempfile.mkstemp and are passed directly to that function. The underlying file is removed when the context is exited, so it should be closed at that time. - + Windows does not allow a temporary file to be opened if it is already open, so the underlying file must be closed after opening before it can be opened again. From 01046460ddc1bef7aa5d3a6ce2e10202a3c954c5 Mon Sep 17 00:00:00 2001 From: gfyoung Date: Wed, 30 Dec 2015 17:19:06 -0800 Subject: [PATCH 351/496] BUG: Enforce order param for MaskedArray construction Adds the 'order' parameter to the __new__ override in MaskedArray construction, enabling it to be enforced in methods like np.ma.core.array and np.ma.core.asarray. Closes gh-6646. --- doc/release/1.11.0-notes.rst | 9 ++++++++ numpy/ma/core.py | 41 ++++++++++++++++++++++-------------- numpy/ma/tests/test_core.py | 16 ++++++++++++++ 3 files changed, 50 insertions(+), 16 deletions(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index 73beab52ee43..b5d22d77051c 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -120,6 +120,15 @@ The function now internally calls the generic ``npy_amergesort`` when the type does not implement a merge-sort kind of ``argsort`` method. +*np.ma.core.MaskedArray* now supports an ``order`` argument +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +When constructing a new ``MaskedArray`` instance, it can be +configured with an ``order`` argument analogous to the one +when calling ``np.ndarray``. The addition of this argument +allows for the proper processing of an ``order`` argument +in several MaskedArray-related utility functions such as +``np.ma.core.array`` and ``np.ma.core.asarray``. + Memory and speed improvements for masked arrays ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Creating a masked array with ``mask=True`` (resp. ``mask=False``) now uses diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 25b926e6b2bb..6b1f09f19c08 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -2642,9 +2642,9 @@ class MaskedArray(ndarray): Construction:: - x = MaskedArray(data, mask=nomask, dtype=None, - copy=False, subok=True, ndmin=0, fill_value=None, - keep_mask=True, hard_mask=None, shrink=True) + x = MaskedArray(data, mask=nomask, dtype=None, copy=False, subok=True, + ndmin=0, fill_value=None, keep_mask=True, hard_mask=None, + shrink=True, order=None) Parameters ---------- @@ -2677,6 +2677,14 @@ class MaskedArray(ndarray): cannot be unmasked. Default is False. shrink : bool, optional Whether to force compression of an empty mask. Default is True. + order : {'C', 'F', 'A'}, optional + Specify the order of the array. If order is 'C', then the array + will be in C-contiguous order (last-index varies the fastest). + If order is 'F', then the returned array will be in + Fortran-contiguous order (first-index varies the fastest). + If order is 'A' (default), then the returned array may be + in any order (either C-, Fortran-contiguous, or even discontiguous), + unless a copy is required, in which case it will be C-contiguous. """ @@ -2688,9 +2696,8 @@ class MaskedArray(ndarray): _print_width = 100 def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, - subok=True, ndmin=0, fill_value=None, - keep_mask=True, hard_mask=None, shrink=True, - **options): + subok=True, ndmin=0, fill_value=None, keep_mask=True, + hard_mask=None, shrink=True, order=None, **options): """ Create a new masked array from scratch. @@ -2700,7 +2707,8 @@ def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, """ # Process data. - _data = np.array(data, dtype=dtype, copy=copy, subok=True, ndmin=ndmin) + _data = np.array(data, dtype=dtype, copy=copy, + order=order, subok=True, ndmin=ndmin) _baseclass = getattr(data, '_baseclass', type(_data)) # Check that we're not erasing the mask. if isinstance(data, MaskedArray) and (data.shape != _data.shape): @@ -6086,10 +6094,9 @@ def __reduce__(self): masked_array = MaskedArray -def array(data, dtype=None, copy=False, order=False, - mask=nomask, fill_value=None, - keep_mask=True, hard_mask=False, shrink=True, subok=True, ndmin=0, - ): +def array(data, dtype=None, copy=False, order=None, + mask=nomask, fill_value=None, keep_mask=True, + hard_mask=False, shrink=True, subok=True, ndmin=0): """ Shortcut to MaskedArray. @@ -6097,10 +6104,10 @@ def array(data, dtype=None, copy=False, order=False, compatibility. """ - # we should try to put 'order' somewhere - return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, subok=subok, - keep_mask=keep_mask, hard_mask=hard_mask, - fill_value=fill_value, ndmin=ndmin, shrink=shrink) + return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, + subok=subok, keep_mask=keep_mask, + hard_mask=hard_mask, fill_value=fill_value, + ndmin=ndmin, shrink=shrink, order=order) array.__doc__ = masked_array.__doc__ @@ -7530,7 +7537,9 @@ def asarray(a, dtype=None, order=None): """ - return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=False) + order = order or 'C' + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, + subok=False, order=order) def asanyarray(a, dtype=None): diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 9e5ad51bd727..b163d3b2642a 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -222,6 +222,22 @@ def test_asarray(self): assert_equal(xmm.fill_value, xm.fill_value) assert_equal(xmm._hardmask, xm._hardmask) + def test_asarray_default_order(self): + # See Issue #6646 + m = np.eye(3).T + self.assertFalse(m.flags.c_contiguous) + + new_m = asarray(m) + self.assertTrue(new_m.flags.c_contiguous) + + def test_asarray_enforce_order(self): + # See Issue #6646 + m = np.eye(3).T + self.assertFalse(m.flags.c_contiguous) + + new_m = asarray(m, order='C') + self.assertTrue(new_m.flags.c_contiguous) + def test_fix_invalid(self): # Checks fix_invalid. with np.errstate(invalid='ignore'): From c0f6c3744ff6a54d4e9be5a98aa2d4253f537f49 Mon Sep 17 00:00:00 2001 From: "Nathaniel J. Smith" Date: Thu, 14 Jan 2016 17:27:40 -0800 Subject: [PATCH 352/496] DOC: Clean up/fix several references to the "future" 1.10 release Fixes gh-7010 --- doc/source/reference/ufuncs.rst | 4 ++-- numpy/add_newdocs.py | 2 +- numpy/core/fromnumeric.py | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst index a975346125d0..f14a2c16802a 100644 --- a/doc/source/reference/ufuncs.rst +++ b/doc/source/reference/ufuncs.rst @@ -333,8 +333,8 @@ advanced usage and will not typically be used. with previous versions of NumPy, this defaults to 'unsafe' for numpy < 1.7. In numpy 1.7 a transition to 'same_kind' was begun where ufuncs produce a DeprecationWarning for calls which are allowed under the 'unsafe' - rules, but not under the 'same_kind' rules. In numpy 1.10 the default - will be 'same_kind'. + rules, but not under the 'same_kind' rules. From numpy 1.10 and + onwards, the default is 'same_kind'. *order* diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py index e79720c773a8..8940f537d508 100644 --- a/numpy/add_newdocs.py +++ b/numpy/add_newdocs.py @@ -3466,7 +3466,7 @@ def luf(lamdaexpr, *args, **kwargs): Return specified diagonals. In NumPy 1.9 the returned array is a read-only view instead of a copy as in previous NumPy versions. In - NumPy 1.10 the read-only restriction will be removed. + a future version the read-only restriction will be removed. Refer to :func:`numpy.diagonal` for full documentation. diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 67d2c5b4893d..5d74bbda0aba 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -1217,11 +1217,11 @@ def diagonal(a, offset=0, axis1=0, axis2=1): but depending on this fact is deprecated. Writing to the resulting array continues to work as it used to, but a FutureWarning is issued. - In NumPy 1.9 it returns a read-only view on the original array. + Starting in NumPy 1.9 it returns a read-only view on the original array. Attempting to write to the resulting array will produce an error. - In NumPy 1.10, it will return a read/write view and writing to the - returned array will alter your original array. The returned array + In some future release, it will return a read/write view and writing to + the returned array will alter your original array. The returned array will have the same type as the input array. If you don't write to the array returned by this function, then you can From 544be6da1b572b4bcff0355bfdaa573887a60830 Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Fri, 15 Jan 2016 15:49:55 +1100 Subject: [PATCH 353/496] MAINT: ensureisclose returns scalar when called with two scalars --- numpy/core/numeric.py | 6 +++++- numpy/core/tests/test_numeric.py | 14 +++++++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index a18b380727b3..0b728f8043f2 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -2467,7 +2467,11 @@ def within_tol(x, y, atol, rtol): # Make NaN == NaN both_nan = isnan(x) & isnan(y) cond[both_nan] = both_nan[both_nan] - return cond + + if isscalar(a) and isscalar(b): + return bool(cond) + else: + return cond def array_equal(a1, a2): """ diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index a114d5a5a3df..17ea6212c959 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -1589,7 +1589,11 @@ def tst_none_isclose(self, x, y): def tst_isclose_allclose(self, x, y): msg = "isclose.all() and allclose aren't same for %s and %s" - assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y)) + msg2 = "isclose and allclose aren't same for %s and %s" + if np.isscalar(x) and np.isscalar(y): + assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg % (x, y)) + else: + assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y)) def test_ip_all_isclose(self): self.setup() @@ -1650,6 +1654,14 @@ def test_no_parameter_modification(self): assert_array_equal(x, np.array([np.inf, 1])) assert_array_equal(y, np.array([0, np.inf])) + def test_non_finite_scalar(self): + # GH7014, when two scalars are compared the output should also be a + # scalar + assert_(np.isclose(np.inf, -np.inf) is False) + assert_(np.isclose(0, np.inf) is False) + assert_(type(np.isclose(0, np.inf)) is bool) + + class TestStdVar(TestCase): def setUp(self): self.A = np.array([1, -1, 1, -1]) From d588b48a0e2fd4a78cadc1336571f59ba6be83c6 Mon Sep 17 00:00:00 2001 From: Stephan Hoyer Date: Thu, 14 Jan 2016 22:06:15 -0800 Subject: [PATCH 354/496] TST: Make assert_warns an optional contextmanager --- numpy/testing/tests/test_utils.py | 15 +++++++ numpy/testing/utils.py | 69 +++++++++++++++++++++++-------- 2 files changed, 66 insertions(+), 18 deletions(-) diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 92a00f71213f..46c7fde5be16 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -465,6 +465,21 @@ def f(): assert_equal(before_filters, after_filters, "assert_warns does not preserver warnings state") + def test_context_manager(self): + + before_filters = sys.modules['warnings'].filters[:] + with assert_warns(UserWarning): + warnings.warn("yo") + after_filters = sys.modules['warnings'].filters + + def no_warnings(): + with assert_no_warnings(): + warnings.warn("yo") + + assert_raises(AssertionError, no_warnings) + assert_equal(before_filters, after_filters, + "assert_warns does not preserver warnings state") + def test_warn_wrong_warning(self): def f(): warnings.warn("yo", DeprecationWarning) diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py index 8e71a3399f96..72105ca31a2f 100644 --- a/numpy/testing/utils.py +++ b/numpy/testing/utils.py @@ -1706,7 +1706,22 @@ def __exit__(self): self._module.showwarning = self._showwarning -def assert_warns(warning_class, func, *args, **kw): +@contextlib.contextmanager +def _assert_warns_context(warning_class, name=None): + __tracebackhide__ = True # Hide traceback for py.test + with warnings.catch_warnings(record=True) as l: + warnings.simplefilter('always') + yield + if not len(l) > 0: + name_str = " when calling %s" % name if name is not None else "" + raise AssertionError("No warning raised" + name_str) + if not l[0].category is warning_class: + name_str = "%s " % name if name is not None else "" + raise AssertionError("First warning %sis not a %s (is %s)" + % (name_str, warning_class, l[0])) + + +def assert_warns(warning_class, *args, **kwargs): """ Fail unless the given callable throws the specified warning. @@ -1715,6 +1730,12 @@ def assert_warns(warning_class, func, *args, **kw): If a different type of warning is thrown, it will not be caught, and the test case will be deemed to have suffered an error. + If called with all arguments other than the warning class omitted, may be + used as a context manager: + + with assert_warns(SomeWarning): + do_something() + .. versionadded:: 1.4.0 Parameters @@ -1733,22 +1754,35 @@ def assert_warns(warning_class, func, *args, **kw): The value returned by `func`. """ + if not args: + return _assert_warns_context(warning_class) + + func = args[0] + args = args[1:] + with _assert_warns_context(warning_class, name=func.__name__): + return func(*args, **kwargs) + + +@contextlib.contextmanager +def _assert_no_warnings_context(name=None): __tracebackhide__ = True # Hide traceback for py.test with warnings.catch_warnings(record=True) as l: warnings.simplefilter('always') - result = func(*args, **kw) - if not len(l) > 0: - raise AssertionError("No warning raised when calling %s" - % func.__name__) - if not l[0].category is warning_class: - raise AssertionError("First warning for %s is not a " - "%s( is %s)" % (func.__name__, warning_class, l[0])) - return result + yield + if len(l) > 0: + name_str = " when calling %s" % name if name is not None else "" + raise AssertionError("Got warnings%s: %s" % (name_str, l)) + -def assert_no_warnings(func, *args, **kw): +def assert_no_warnings(*args, **kwargs): """ Fail if the given callable produces any warnings. + If called with all arguments omitted, may be used as a context manager: + + with assert_no_warnings(): + do_something() + .. versionadded:: 1.7.0 Parameters @@ -1765,14 +1799,13 @@ def assert_no_warnings(func, *args, **kw): The value returned by `func`. """ - __tracebackhide__ = True # Hide traceback for py.test - with warnings.catch_warnings(record=True) as l: - warnings.simplefilter('always') - result = func(*args, **kw) - if len(l) > 0: - raise AssertionError("Got warnings when calling %s: %s" - % (func.__name__, l)) - return result + if not args: + return _assert_no_warnings_context() + + func = args[0] + args = args[1:] + with _assert_no_warnings_context(name=func.__name__): + return func(*args, **kwargs) def _gen_alignment_data(dtype=float32, type='binary', max_size=24): From 34c236939116c5ad75b79b1ccfbecb9c9157a165 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 14 Jan 2016 20:18:03 -0700 Subject: [PATCH 355/496] BUG: Make divmod behave better under roundoff error. This is apropos #6127. The fix is to make the functions floor_division and remainder consistent, i.e., b * floor_division(a, b) + remainder(a, b) == a Previous to this fix remainder was computed a the C level using the '%' operator, and the result was not always consistent with the floor function. The current approach is to compute the remainder using b * (a/b - floor(a/b)) which is both consistent with the Python '%' operator and numerically consistent with floor_division implemented using the floor function. Closes #6127. --- numpy/core/src/umath/loops.c.src | 9 ++------- numpy/core/src/umath/scalarmath.c.src | 25 +++++++++---------------- 2 files changed, 11 insertions(+), 23 deletions(-) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 563761bc0679..e74ac9d40e7f 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -1688,13 +1688,8 @@ NPY_NO_EXPORT void BINARY_LOOP { const @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; - const @type@ res = npy_fmod@c@(in1,in2); - if (res && ((in2 < 0) != (res < 0))) { - *((@type@ *)op1) = res + in2; - } - else { - *((@type@ *)op1) = res; - } + const @type@ div = in1/in2; + *((@type@ *)op1) = in2*(div - npy_floor@c@(div)); } } diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src index c371a079f654..c35b96d5ce10 100644 --- a/numpy/core/src/umath/scalarmath.c.src +++ b/numpy/core/src/umath/scalarmath.c.src @@ -272,10 +272,10 @@ static void static @type@ (*_basic_@name@_floor)(@type@); static @type@ (*_basic_@name@_sqrt)(@type@); static @type@ (*_basic_@name@_fmod)(@type@, @type@); -#define @name@_ctype_add(a, b, outp) *(outp) = a + b -#define @name@_ctype_subtract(a, b, outp) *(outp) = a - b -#define @name@_ctype_multiply(a, b, outp) *(outp) = a * b -#define @name@_ctype_divide(a, b, outp) *(outp) = a / b +#define @name@_ctype_add(a, b, outp) *(outp) = (a) + (b) +#define @name@_ctype_subtract(a, b, outp) *(outp) = (a) - (b) +#define @name@_ctype_multiply(a, b, outp) *(outp) = (a) * (b) +#define @name@_ctype_divide(a, b, outp) *(outp) = (a) / (b) #define @name@_ctype_true_divide @name@_ctype_divide #define @name@_ctype_floor_divide(a, b, outp) \ *(outp) = _basic_@name@_floor((a) / (b)) @@ -343,23 +343,16 @@ static npy_half (*_basic_half_fmod)(npy_half, npy_half); */ static void @name@_ctype_remainder(@type@ a, @type@ b, @type@ *out) { - @type@ mod; - mod = _basic_@name@_fmod(a, b); - if (mod && (((b < 0) != (mod < 0)))) { - mod += b; - } - *out = mod; + @type@ tmp = a/b; + *out = b * (tmp - _basic_@name@_floor(tmp)); } /**end repeat**/ static void half_ctype_remainder(npy_half a, npy_half b, npy_half *out) { - float mod, fa = npy_half_to_float(a), fb = npy_half_to_float(b); - mod = _basic_float_fmod(fa, fb); - if (mod && (((fb < 0) != (mod < 0)))) { - mod += fb; - } - *out = npy_float_to_half(mod); + float tmp, fa = npy_half_to_float(a), fb = npy_half_to_float(b); + float_ctype_remainder(fa, fb, &tmp); + *out = npy_float_to_half(tmp); } From 5fc07a2f5357a638a979abfae9f208784a00a5d7 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Sat, 9 Jan 2016 16:26:40 -0500 Subject: [PATCH 356/496] MAINT: Ensure `inner` is raising a ValueError just as `dot` does in the same case. --- numpy/core/src/multiarray/multiarraymodule.c | 3 ++- numpy/core/tests/test_multiarray.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index 2c17ebe09790..1df3d653da49 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -827,6 +827,7 @@ PyArray_InnerProduct(PyObject *op1, PyObject *op2) typenum = PyArray_ObjectType(op2, typenum); typec = PyArray_DescrFromType(typenum); if (typec == NULL) { + PyErr_SetString(PyExc_TypeError, "Cannot find a common data type."); goto fail; } @@ -912,7 +913,7 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) typenum = PyArray_ObjectType(op2, typenum); typec = PyArray_DescrFromType(typenum); if (typec == NULL) { - PyErr_SetString(PyExc_ValueError, "Cannot find a common data type."); + PyErr_SetString(PyExc_TypeError, "Cannot find a common data type."); return NULL; } diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index c9e610cbff12..26617c1fc7bd 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -2045,7 +2045,7 @@ def test_dot_type_mismatch(self): c = 1. A = np.array((1,1), dtype='i,i') - assert_raises(ValueError, np.dot, c, A) + assert_raises(TypeError, np.dot, c, A) assert_raises(TypeError, np.dot, A, c) def test_diagonal(self): From fb41f0047c5dbd33b344f51dc3faca4acba45293 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Fri, 15 Jan 2016 15:38:32 -0500 Subject: [PATCH 357/496] DOC: Fix markdown style inline code to restructured text style inline code. --- doc/release/1.11.0-notes.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index b5d22d77051c..3965b52dcd06 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -63,7 +63,7 @@ mention it here for completeness. New Features ============ -* `np.histogram` now provides plugin estimators for automatically +* ``np.histogram`` now provides plugin estimators for automatically estimating the optimal number of bins. Passing one of ['auto', 'fd', 'scott', 'rice', 'sturges'] as the argument to 'bins' results in the corresponding estimator being used. @@ -97,8 +97,8 @@ New Features - np.int_ (long), np.intp The specification is by precision rather than by C type. Hence, on some - platforms np.int64 may be a `long` instead of `long long` even if the - specified dtype is `long long` because the two may have the same + platforms np.int64 may be a ``long`` instead of ``long long`` even if the + specified dtype is ``long long`` because the two may have the same precision. The resulting type depends on which C type numpy uses for the given precision. The byteorder specification is also ignored, the generated arrays are always in native byte order. @@ -187,7 +187,7 @@ more such dual contiguous arrays and breaks some existing code as a result. Note that this also affects changing the dtype by assigning to the dtype attribute of an array. The aim of this deprecation is to restrict views to c_contiguous arrays at some future time. A work around that is backward -compatible is to use `a.T.view(...).T` instead. A parameter will also be +compatible is to use ``a.T.view(...).T`` instead. A parameter will also be added to the view method to explicitly ask for Fortran order views, but that will not be backward compatible. From ef09a84e10fa3439ea17f8531431ae874df3afa1 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Fri, 15 Jan 2016 15:00:07 -0500 Subject: [PATCH 358/496] DOC: Explain the new exception behavior of `np.dot` when its types cannot be cast to a common type. --- doc/release/1.11.0-notes.rst | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index b5d22d77051c..cd4f835577cc 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -59,6 +59,13 @@ to preserve struct layout). These were never used for anything, so it's unlikely that any third-party code is using them either, but we mention it here for completeness. +*np.dot* now raises ``TypeError`` instead of ``ValueError`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This behaviour mimics that of other functions such as ``np.inner``. If the two +arguments cannot be cast to a common type, it could have raised a ``TypeError`` +or ``ValueError`` depending on their order. Now, ``np.dot`` will now always +raise a ``TypeError``. + New Features ============ @@ -174,6 +181,13 @@ This behaviour mimics that of other functions such as ``np.diagonal`` and ensures, e.g., that for masked arrays ``np.trace(ma)`` and ``ma.trace()`` give the same result. +*np.dot* now raises ``TypeError`` instead of ``ValueError`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This behaviour mimics that of other functions such as ``np.inner``. If the two +arguments cannot be cast to a common type, it could have raised a ``TypeError`` +or ``ValueError`` depending on their order. Now, ``np.dot`` will now always +raise a ``TypeError``. + Deprecations ============ From 2f7e491aeeb77fb9d40c9108e57922b876077c3c Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Fri, 15 Jan 2016 16:11:10 -0500 Subject: [PATCH 359/496] DEP: Add warnings to `__getitem__` and `__setitem__` to point out the behavior of `MaskedArray`'s masks is changing. --- numpy/ma/core.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 6b1f09f19c08..690655b361b8 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -3105,6 +3105,14 @@ def __getitem__(self, indx): Return the item described by i, as a masked array. """ + # 2016.01.15 -- v1.11.0 + warnings.warn( + "Currently, slicing will try to return a view of the data," + + " but will return a copy of the mask. In the future, it will try" + + " to return both as views.", + FutureWarning + ) + dout = self.data[indx] # We could directly use ndarray.__getitem__ on self. # But then we would have to modify __array_finalize__ to prevent the @@ -3175,6 +3183,15 @@ def __setitem__(self, indx, value): locations. """ + # 2016.01.15 -- v1.11.0 + warnings.warn( + "Currently, slicing will try to return a view of the data," + + " but will return a copy of the mask. In the future, it will try" + + " to return both as views. This means that using `__setitem__`" + + " will propagate values back through all masks that are present.", + FutureWarning + ) + if self is masked: raise MaskError('Cannot alter the masked element.') _data = self._data From 852eabaa98962d4eb203e1eb2e4d6468cb20ecd0 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Fri, 15 Jan 2016 16:12:20 -0500 Subject: [PATCH 360/496] TEST: Ignore `FutureWarning` if raised from running masked array operations. --- numpy/lib/tests/test_nanfunctions.py | 1 + numpy/ma/tests/test_core.py | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index 7a7b37b98c8d..dafc194eb80a 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -539,6 +539,7 @@ def test_allnans(self): for axis in [None, 0, 1]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') + warnings.simplefilter('ignore', FutureWarning) assert_(np.isnan(np.nanmedian(mat, axis=axis)).all()) if axis is None: assert_(len(w) == 1) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index b163d3b2642a..d68e63358e57 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -2223,6 +2223,7 @@ def test_inplace_addition_scalar_type(self): for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") + warnings.simplefilter('ignore', FutureWarning) (x, y, xm) = (_.astype(t) for _ in self.uint8data) xm[2] = masked x += t(1) @@ -2237,6 +2238,7 @@ def test_inplace_addition_array_type(self): for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") + warnings.simplefilter('ignore', FutureWarning) (x, y, xm) = (_.astype(t) for _ in self.uint8data) m = xm.mask a = arange(10, dtype=t) @@ -2267,6 +2269,7 @@ def test_inplace_subtraction_array_type(self): for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") + warnings.simplefilter('ignore', FutureWarning) (x, y, xm) = (_.astype(t) for _ in self.uint8data) m = xm.mask a = arange(10, dtype=t) @@ -2297,6 +2300,7 @@ def test_inplace_multiplication_array_type(self): for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") + warnings.simplefilter('ignore', FutureWarning) (x, y, xm) = (_.astype(t) for _ in self.uint8data) m = xm.mask a = arange(10, dtype=t) @@ -2314,6 +2318,7 @@ def test_inplace_floor_division_scalar_type(self): for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") + warnings.simplefilter('ignore', FutureWarning) (x, y, xm) = (_.astype(t) for _ in self.uint8data) x = arange(10, dtype=t) * t(2) xm = arange(10, dtype=t) * t(2) @@ -2330,6 +2335,7 @@ def test_inplace_floor_division_array_type(self): for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") + warnings.simplefilter('ignore', FutureWarning) (x, y, xm) = (_.astype(t) for _ in self.uint8data) m = xm.mask a = arange(10, dtype=t) @@ -2350,6 +2356,7 @@ def test_inplace_division_scalar_type(self): for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") + warnings.simplefilter('ignore', FutureWarning) (x, y, xm) = (_.astype(t) for _ in self.uint8data) x = arange(10, dtype=t) * t(2) xm = arange(10, dtype=t) * t(2) @@ -2385,6 +2392,7 @@ def test_inplace_division_array_type(self): for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") + warnings.simplefilter('ignore', FutureWarning) (x, y, xm) = (_.astype(t) for _ in self.uint8data) m = xm.mask a = arange(10, dtype=t) From 4989360f6bb57e45e1a6f624144117bfd3511313 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Fri, 15 Jan 2016 16:13:01 -0500 Subject: [PATCH 361/496] DOC: Explain that `MaskedArray`s will try to consistently return view of their masks when they are also returning views of their data. --- doc/release/1.11.0-notes.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index b5d22d77051c..7f78e387a604 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -20,6 +20,7 @@ Future Changes * Relaxed stride checking will become the default in 1.12.0. * Support for Python 2.6, 3.2, and 3.3 will be dropped in 1.12.0. +* ``MaskedArray``s take views of data **and** masks when slicing in 1.12.0. Compatibility notes From 426114879da49bf9a586b1991dcaf38ce594c4b6 Mon Sep 17 00:00:00 2001 From: Stephan Hoyer Date: Thu, 14 Jan 2016 23:47:37 -0800 Subject: [PATCH 362/496] API: Make datetime64 timezone naive Fixes GH3290 With apologies to mwiebe, this rips out most of the time zone parsing from the datetime64 type. I think we mostly sorted out the API design in discussions last year, but I'll be posting this to the mailing list shortly to get feedback. Old behavior: # string parsing and printing defaults to your local timezone :( >>> np.datetime64('2000-01-01T00') numpy.datetime64('2000-01-01T00:00-0800','h') New behavior: # datetime64 is parsed and printed as timezone naive >>> np.datetime64('2000-01-01T00') numpy.datetime64('2000-01-01T00','h') # you can still supply a timezone, but you get a deprecation warning >>> np.datetime64('2000-01-01T00Z') DeprecationWarning: parsing timezone aware datetimes is deprecated; this will raise an error in the future numpy.datetime64('2000-01-01T00','h') --- doc/release/1.11.0-notes.rst | 37 ++ doc/source/reference/arrays.datetime.rst | 62 ++-- numpy/core/arrayprint.py | 14 +- numpy/core/src/multiarray/datetime.c | 32 +- numpy/core/src/multiarray/datetime_strings.c | 228 ++---------- numpy/core/src/multiarray/datetime_strings.h | 7 +- numpy/core/src/multiarray/dtype_transfer.c | 6 +- numpy/core/src/multiarray/scalartypes.c.src | 22 +- numpy/core/tests/test_datetime.py | 369 ++++++++++--------- numpy/core/tests/test_deprecations.py | 29 +- 10 files changed, 349 insertions(+), 457 deletions(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index b5d22d77051c..ac3c1578c18f 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -7,6 +7,8 @@ This release supports Python 2.6 - 2.7 and 3.2 - 3.5. Highlights ========== +* The datetime64 type is now timezone naive. See "datetime64 changes" below + for more details. Dropped Support =============== @@ -25,6 +27,41 @@ Future Changes Compatibility notes =================== +datetime64 changes +~~~~~~~~~~~~~~~~~~ + +In prior versions of NumPy the experimental datetime64 type always stored +times in UTC. By default, creating a datetime64 object from a string or +printing it would convert from or to local time:: + + # old behavior + >>>> np.datetime64('2000-01-01T00:00:00') + numpy.datetime64('2000-01-01T00:00:00-0800') # note the timezone offset -08:00 + +A concensus of datetime64 users agreed that this behavior is undesirable +and at odds with how datetime64 is usually used (e.g., by pandas_). For +most use cases, a timezone naive datetime type is preferred, similar to the +``datetime.datetime`` type in the Python standard library. Accordingly, +datetime64 no longer assumes that input is in local time, nor does it print +local times:: + + >>>> np.datetime64('2000-01-01T00:00:00') + numpy.datetime64('2000-01-01T00:00:00') + +For backwards compatibility, datetime64 still parses timezone offsets, which +it handles by converting to UTC. However, the resulting datetime is timezone +naive:: + + >>> np.datetime64('2000-01-01T00:00:00-08') + DeprecationWarning: parsing timezone aware datetimes is deprecated; this will raise an error in the future + numpy.datetime64('2000-01-01T08:00:00') + +As a corollary to this change, we no longer prohibit casting between datetimes +with date units and datetimes with timeunits. With timezone naive datetimes, +the rule for casting from dates to times is no longer ambiguous. + +pandas_: http://pandas.pydata.org + DeprecationWarning to error ~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 0e8050b01ef0..f5b454875d19 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -45,16 +45,10 @@ some additional SI-prefix seconds-based units. >>> np.datetime64('2005-02', 'D') numpy.datetime64('2005-02-01') - Using UTC "Zulu" time: - - >>> np.datetime64('2005-02-25T03:30Z') - numpy.datetime64('2005-02-24T21:30-0600') - - ISO 8601 specifies to use the local time zone - if none is explicitly given: + From a date and time: >>> np.datetime64('2005-02-25T03:30') - numpy.datetime64('2005-02-25T03:30-0600') + numpy.datetime64('2005-02-25T03:30') When creating an array of datetimes from a string, it is still possible to automatically select the unit from the inputs, by using the @@ -100,23 +94,6 @@ because the moment of time is still being represented exactly. >>> np.datetime64('2010-03-14T15Z') == np.datetime64('2010-03-14T15:00:00.00Z') True -An important exception to this rule is between datetimes with -:ref:`date units ` and datetimes with -:ref:`time units `. This is because this kind -of conversion generally requires a choice of timezone and -particular time of day on the given date. - -.. admonition:: Example - - >>> np.datetime64('2003-12-25', 's') - Traceback (most recent call last): - File "", line 1, in - TypeError: Cannot parse "2003-12-25" as unit 's' using casting rule 'same_kind' - - >>> np.datetime64('2003-12-25') == np.datetime64('2003-12-25T00Z') - False - - Datetime and Timedelta Arithmetic ================================= @@ -353,6 +330,41 @@ Some examples:: # any amount of whitespace is allowed; abbreviations are case-sensitive. weekmask = "MonTue Wed Thu\tFri" +Changes with NumPy 1.11 +======================= + +In prior versions of NumPy, the datetime64 type always stored +times in UTC. By default, creating a datetime64 object from a string or +printing it would convert from or to local time:: + + # old behavior + >>>> np.datetime64('2000-01-01T00:00:00') + numpy.datetime64('2000-01-01T00:00:00-0800') # note the timezone offset -08:00 + +A concensus of datetime64 users agreed that this behavior is undesirable +and at odds with how datetime64 is usually used (e.g., by pandas_). For +most use cases, a timezone naive datetime type is preferred, similar to the +``datetime.datetime`` type in the Python standard library. Accordingly, +datetime64 no longer assumes that input is in local time, nor does it print +local times:: + + >>>> np.datetime64('2000-01-01T00:00:00') + numpy.datetime64('2000-01-01T00:00:00') + +For backwards compatibility, datetime64 still parses timezone offsets, which +it handles by converting to UTC. However, the resulting datetime is timezone +naive:: + + >>> np.datetime64('2000-01-01T00:00:00-08') + DeprecationWarning: parsing timezone aware datetimes is deprecated; this will raise an error in the future + numpy.datetime64('2000-01-01T08:00:00') + +As a corollary to this change, we no longer prohibit casting between datetimes +with date units and datetimes with timeunits. With timezone naive datetimes, +the rule for casting from dates to times is no longer ambiguous. + +pandas_: http://pandas.pydata.org + Differences Between 1.6 and 1.7 Datetimes ========================================= diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index c5b5b5a8f0ea..74a9d3da346f 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -708,9 +708,9 @@ def __call__(self, x): i = i + 'j' return r + i + class DatetimeFormat(object): - def __init__(self, x, unit=None, - timezone=None, casting='same_kind'): + def __init__(self, x, unit=None, timezone=None, casting='same_kind'): # Get the unit from the dtype if unit is None: if x.dtype.kind == 'M': @@ -718,15 +718,9 @@ def __init__(self, x, unit=None, else: unit = 's' - # If timezone is default, make it 'local' or 'UTC' based on the unit if timezone is None: - # Date units -> UTC, time units -> local - if unit in ('Y', 'M', 'W', 'D'): - self.timezone = 'UTC' - else: - self.timezone = 'local' - else: - self.timezone = timezone + timezone = 'naive' + self.timezone = timezone self.unit = unit self.casting = casting diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c index 264178d30bcf..3cf9a2bd5991 100644 --- a/numpy/core/src/multiarray/datetime.c +++ b/numpy/core/src/multiarray/datetime.c @@ -1316,9 +1316,6 @@ datetime_metadata_divides( /* * This provides the casting rules for the DATETIME data type units. - * - * Notably, there is a barrier between 'date units' and 'time units' - * for all but 'unsafe' casting. */ NPY_NO_EXPORT npy_bool can_cast_datetime64_units(NPY_DATETIMEUNIT src_unit, @@ -1331,31 +1328,26 @@ can_cast_datetime64_units(NPY_DATETIMEUNIT src_unit, return 1; /* - * Only enforce the 'date units' vs 'time units' barrier with - * 'same_kind' casting. + * Can cast between all units with 'same_kind' casting. */ case NPY_SAME_KIND_CASTING: if (src_unit == NPY_FR_GENERIC || dst_unit == NPY_FR_GENERIC) { return src_unit == NPY_FR_GENERIC; } else { - return (src_unit <= NPY_FR_D && dst_unit <= NPY_FR_D) || - (src_unit > NPY_FR_D && dst_unit > NPY_FR_D); + return 1; } /* - * Enforce the 'date units' vs 'time units' barrier and that - * casting is only allowed towards more precise units with - * 'safe' casting. + * Casting is only allowed towards more precise units with 'safe' + * casting. */ case NPY_SAFE_CASTING: if (src_unit == NPY_FR_GENERIC || dst_unit == NPY_FR_GENERIC) { return src_unit == NPY_FR_GENERIC; } else { - return (src_unit <= dst_unit) && - ((src_unit <= NPY_FR_D && dst_unit <= NPY_FR_D) || - (src_unit > NPY_FR_D && dst_unit > NPY_FR_D)); + return (src_unit <= dst_unit); } /* Enforce equality with 'no' or 'equiv' casting */ @@ -2254,6 +2246,14 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out, PyObject *offset; int seconds_offset, minutes_offset; + /* 2016-01-14, 1.11 */ + PyErr_Clear(); + if (DEPRECATE( + "parsing timezone aware datetimes is deprecated; " + "this will raise an error in the future") < 0) { + return -1; + } + /* The utcoffset function should return a timedelta */ offset = PyObject_CallMethod(tmp, "utcoffset", "O", obj); if (offset == NULL) { @@ -2386,7 +2386,7 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj, /* Parse the ISO date */ if (parse_iso_8601_datetime(str, len, meta->base, casting, - &dts, NULL, &bestunit, NULL) < 0) { + &dts, &bestunit, NULL) < 0) { Py_DECREF(bytes); return -1; } @@ -3500,7 +3500,7 @@ find_string_array_datetime64_type(PyArrayObject *arr, tmp_meta.base = -1; if (parse_iso_8601_datetime(tmp_buffer, maxlen, -1, - NPY_UNSAFE_CASTING, &dts, NULL, + NPY_UNSAFE_CASTING, &dts, &tmp_meta.base, NULL) < 0) { goto fail; } @@ -3509,7 +3509,7 @@ find_string_array_datetime64_type(PyArrayObject *arr, else { tmp_meta.base = -1; if (parse_iso_8601_datetime(data, tmp - data, -1, - NPY_UNSAFE_CASTING, &dts, NULL, + NPY_UNSAFE_CASTING, &dts, &tmp_meta.base, NULL) < 0) { goto fail; } diff --git a/numpy/core/src/multiarray/datetime_strings.c b/numpy/core/src/multiarray/datetime_strings.c index 54587cb5c309..09ddc46d4120 100644 --- a/numpy/core/src/multiarray/datetime_strings.c +++ b/numpy/core/src/multiarray/datetime_strings.c @@ -110,82 +110,6 @@ get_localtime(NPY_TIME_T *ts, struct tm *tms) return -1; } -/* - * Wraps `gmtime` functionality for multiple platforms. This - * converts a time value to a time structure in UTC. - * - * Returns 0 on success, -1 on failure. - */ -static int -get_gmtime(NPY_TIME_T *ts, struct tm *tms) -{ - char *func_name = ""; -#if defined(_WIN32) - #if defined(_MSC_VER) && (_MSC_VER >= 1400) - if (gmtime_s(tms, ts) != 0) { - func_name = "gmtime_s"; - goto fail; - } - #elif defined(NPY_MINGW_USE_CUSTOM_MSVCR) - if (_gmtime64_s(tms, ts) != 0) { - func_name = "_gmtime64_s"; - goto fail; - } - #else - struct tm *tms_tmp; - tms_tmp = gmtime(ts); - if (tms_tmp == NULL) { - func_name = "gmtime"; - goto fail; - } - memcpy(tms, tms_tmp, sizeof(struct tm)); - #endif -#else - if (gmtime_r(ts, tms) == NULL) { - func_name = "gmtime_r"; - goto fail; - } -#endif - - return 0; - -fail: - PyErr_Format(PyExc_OSError, "Failed to use '%s' to convert " - "to a UTC time", func_name); - return -1; -} - -/* - * Wraps `mktime` functionality for multiple platforms. This - * converts a local time struct to an UTC value. - * - * Returns timestamp on success, -1 on failure. - */ -static NPY_TIME_T -get_mktime(struct tm *tms) -{ - char *func_name = ""; - NPY_TIME_T ts; -#if defined(NPY_MINGW_USE_CUSTOM_MSVCR) - ts = _mktime64(tms); - if (ts == -1) { - func_name = "_mktime64"; - goto fail; - } -#else - ts = mktime(tms); - if (ts == -1) { - func_name = "mktime"; - goto fail; - } -#endif - return ts; -fail: - PyErr_Format(PyExc_OSError, "Failed to use '%s' to convert " - "local time to UTC", func_name); - return -1; -} - /* * Converts a datetimestruct in UTC to a datetimestruct in local time, * also returning the timezone offset applied. This function works for any year @@ -262,85 +186,6 @@ convert_datetimestruct_utc_to_local(npy_datetimestruct *out_dts_local, return 0; } -/* - * Converts a datetimestruct in local time to a datetimestruct in UTC. - * - * Returns 0 on success, -1 on failure. - */ -static int -convert_datetimestruct_local_to_utc(npy_datetimestruct *out_dts_utc, - const npy_datetimestruct *dts_local) -{ - npy_int64 year_correction = 0; - - /* Make a copy of the input 'dts' to modify */ - *out_dts_utc = *dts_local; - - /* - * For 32 bit NPY_TIME_T, the get_mktime()/get_gmtime() functions do not - * work for years later than 2038. So if the year >= 2038, we instead call - * get_mktime()/get_gmtime() for the year 2036 or 2037 (depending on the - * leap year) which must work and at the end we add the 'year_correction' - * back. - */ - if (sizeof(NPY_TIME_T) == 4 && out_dts_utc->year >= 2038) { - if (is_leapyear(out_dts_utc->year)) { - /* 2036 is a leap year */ - year_correction = out_dts_utc->year - 2036; - out_dts_utc->year -= year_correction; /* = 2036 */ - } - else { - /* 2037 is not a leap year */ - year_correction = out_dts_utc->year - 2037; - out_dts_utc->year -= year_correction; /* = 2037 */ - } - } - - /* - * ISO 8601 states to treat date-times without a timezone offset - * or 'Z' for UTC as local time. The C standard libary functions - * mktime and gmtime allow us to do this conversion. - * - * Only do this timezone adjustment for recent and future years. - * In this case, "recent" is defined to be 1970 and later, because - * on MS Windows, mktime raises an error when given an earlier date. - */ - if (out_dts_utc->year >= 1970) { - NPY_TIME_T rawtime = 0; - struct tm tm_; - - tm_.tm_sec = out_dts_utc->sec; - tm_.tm_min = out_dts_utc->min; - tm_.tm_hour = out_dts_utc->hour; - tm_.tm_mday = out_dts_utc->day; - tm_.tm_mon = out_dts_utc->month - 1; - tm_.tm_year = out_dts_utc->year - 1900; - tm_.tm_isdst = -1; - - /* mktime converts a local 'struct tm' into a time_t */ - rawtime = get_mktime(&tm_); - if (rawtime == -1) { - return -1; - } - - /* gmtime converts a 'time_t' into a UTC 'struct tm' */ - if (get_gmtime(&rawtime, &tm_) < 0) { - return -1; - } - out_dts_utc->sec = tm_.tm_sec; - out_dts_utc->min = tm_.tm_min; - out_dts_utc->hour = tm_.tm_hour; - out_dts_utc->day = tm_.tm_mday; - out_dts_utc->month = tm_.tm_mon + 1; - out_dts_utc->year = tm_.tm_year + 1900; - } - - /* Reapply the year 2038 year correction */ - out_dts_utc->year += year_correction; - - return 0; -} - /* * Parses (almost) standard ISO 8601 date strings. The differences are: * @@ -363,10 +208,6 @@ convert_datetimestruct_local_to_utc(npy_datetimestruct *out_dts_utc, * to be cast to the 'unit' parameter. * * 'out' gets filled with the parsed date-time. - * 'out_local' gets set to 1 if the parsed time was in local time, - * to 0 otherwise. The values 'now' and 'today' don't get counted - * as local, and neither do UTC +/-#### timezone offsets, because - * they aren't using the computer's local timezone offset. * 'out_bestunit' gives a suggested unit based on the amount of * resolution provided in the string, or -1 for NaT. * 'out_special' gets set to 1 if the parsed time was 'today', @@ -381,7 +222,6 @@ parse_iso_8601_datetime(char *str, Py_ssize_t len, NPY_DATETIMEUNIT unit, NPY_CASTING casting, npy_datetimestruct *out, - npy_bool *out_local, NPY_DATETIMEUNIT *out_bestunit, npy_bool *out_special) { @@ -411,9 +251,6 @@ parse_iso_8601_datetime(char *str, Py_ssize_t len, * Indicate that this was a special value, and * recommend generic units. */ - if (out_local != NULL) { - *out_local = 0; - } if (out_bestunit != NULL) { *out_bestunit = NPY_FR_GENERIC; } @@ -462,9 +299,6 @@ parse_iso_8601_datetime(char *str, Py_ssize_t len, * Indicate that this was a special value, and * is a date (unit 'D'). */ - if (out_local != NULL) { - *out_local = 0; - } if (out_bestunit != NULL) { *out_bestunit = bestunit; } @@ -505,9 +339,6 @@ parse_iso_8601_datetime(char *str, Py_ssize_t len, * use 's' because the time() function has resolution * seconds. */ - if (out_local != NULL) { - *out_local = 0; - } if (out_bestunit != NULL) { *out_bestunit = bestunit; } @@ -569,9 +400,6 @@ parse_iso_8601_datetime(char *str, Py_ssize_t len, /* Next character must be a '-' or the end of the string */ if (sublen == 0) { - if (out_local != NULL) { - *out_local = 0; - } bestunit = NPY_FR_Y; goto finish; } @@ -606,9 +434,6 @@ parse_iso_8601_datetime(char *str, Py_ssize_t len, /* Next character must be a '-' or the end of the string */ if (sublen == 0) { - if (out_local != NULL) { - *out_local = 0; - } bestunit = NPY_FR_M; goto finish; } @@ -644,9 +469,6 @@ parse_iso_8601_datetime(char *str, Py_ssize_t len, /* Next character must be a 'T', ' ', or end of string */ if (sublen == 0) { - if (out_local != NULL) { - *out_local = 0; - } bestunit = NPY_FR_D; goto finish; } @@ -811,25 +633,20 @@ parse_iso_8601_datetime(char *str, Py_ssize_t len, parse_timezone: if (sublen == 0) { - if (convert_datetimestruct_local_to_utc(out, out) < 0) { - goto error; - } - - /* Since neither "Z" nor a time-zone was specified, it's local */ - if (out_local != NULL) { - *out_local = 1; - } - goto finish; } + else { + /* 2016-01-14, 1.11 */ + PyErr_Clear(); + if (DEPRECATE( + "parsing timezone aware datetimes is deprecated; " + "this will raise an error in the future") < 0) { + return -1; + } + } /* UTC specifier */ if (*substr == 'Z') { - /* "Z" means not local */ - if (out_local != NULL) { - *out_local = 0; - } - if (sublen == 1) { goto finish; } @@ -842,14 +659,6 @@ parse_iso_8601_datetime(char *str, Py_ssize_t len, else if (*substr == '-' || *substr == '+') { int offset_neg = 0, offset_hour = 0, offset_minute = 0; - /* - * Since "local" means local with respect to the current - * machine, we say this is non-local. - */ - if (out_local != NULL) { - *out_local = 0; - } - if (*substr == '-') { offset_neg = 1; } @@ -1056,7 +865,9 @@ lossless_unit_from_datetimestruct(npy_datetimestruct *dts) * the number of year digits is >= 4 instead of strictly 4. * * If 'local' is non-zero, it produces a string in local time with - * a +-#### timezone offset, otherwise it uses timezone Z (UTC). + * a +-#### timezone offset. If 'local' is zero and 'utc' is non-zero, + * produce a string ending with 'Z' to denote UTC. By default, no time + * zone information is attached. * * 'base' restricts the output to that unit. Set 'base' to * -1 to auto-detect a base after which all the values are zero. @@ -1075,7 +886,7 @@ lossless_unit_from_datetimestruct(npy_datetimestruct *dts) */ NPY_NO_EXPORT int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, - int local, NPY_DATETIMEUNIT base, int tzoffset, + int local, int utc, NPY_DATETIMEUNIT base, int tzoffset, NPY_CASTING casting) { npy_datetimestruct dts_local; @@ -1491,7 +1302,7 @@ make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, sublen -= 4; } /* UTC "Zulu" time */ - else { + else if (utc) { if (sublen < 1) { goto string_too_short; } @@ -1528,6 +1339,7 @@ array_datetime_as_string(PyObject *NPY_UNUSED(self), PyObject *args, NPY_CASTING casting = NPY_SAME_KIND_CASTING; int local = 0; + int utc = 0; PyArray_DatetimeMetaData *meta; int strsize; @@ -1643,11 +1455,19 @@ array_datetime_as_string(PyObject *NPY_UNUSED(self), PyObject *args, if (strcmp(str, "local") == 0) { local = 1; + utc = 0; Py_DECREF(timezone_obj); timezone_obj = NULL; } else if (strcmp(str, "UTC") == 0) { local = 0; + utc = 1; + Py_DECREF(timezone_obj); + timezone_obj = NULL; + } + else if (strcmp(str, "naive") == 0) { + local = 0; + utc = 0; Py_DECREF(timezone_obj); timezone_obj = NULL; } @@ -1738,7 +1558,7 @@ array_datetime_as_string(PyObject *NPY_UNUSED(self), PyObject *args, memset(dataptr[1], 0, strsize); /* Convert that into a string */ if (make_iso_8601_datetime(&dts, (char *)dataptr[1], strsize, - local, unit, tzoffset, casting) < 0) { + local, utc, unit, tzoffset, casting) < 0) { goto fail; } } while(iternext(iter)); diff --git a/numpy/core/src/multiarray/datetime_strings.h b/numpy/core/src/multiarray/datetime_strings.h index 4280f6de44a3..d7608565c125 100644 --- a/numpy/core/src/multiarray/datetime_strings.h +++ b/numpy/core/src/multiarray/datetime_strings.h @@ -23,10 +23,6 @@ * to be cast to the 'unit' parameter. * * 'out' gets filled with the parsed date-time. - * 'out_local' gets set to 1 if the parsed time was in local time, - * to 0 otherwise. The values 'now' and 'today' don't get counted - * as local, and neither do UTC +/-#### timezone offsets, because - * they aren't using the computer's local timezone offset. * 'out_bestunit' gives a suggested unit based on the amount of * resolution provided in the string, or -1 for NaT. * 'out_special' gets set to 1 if the parsed time was 'today', @@ -41,7 +37,6 @@ parse_iso_8601_datetime(char *str, Py_ssize_t len, NPY_DATETIMEUNIT unit, NPY_CASTING casting, npy_datetimestruct *out, - npy_bool *out_local, NPY_DATETIMEUNIT *out_bestunit, npy_bool *out_special); @@ -76,7 +71,7 @@ get_datetime_iso_8601_strlen(int local, NPY_DATETIMEUNIT base); */ NPY_NO_EXPORT int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, - int local, NPY_DATETIMEUNIT base, int tzoffset, + int local, int utc, NPY_DATETIMEUNIT base, int tzoffset, NPY_CASTING casting); /* diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c index f11ea395f9ed..bfb22ac30f0d 100644 --- a/numpy/core/src/multiarray/dtype_transfer.c +++ b/numpy/core/src/multiarray/dtype_transfer.c @@ -868,7 +868,7 @@ _strided_to_strided_datetime_to_string(char *dst, npy_intp dst_stride, * to use PyErr_Occurred(). */ make_iso_8601_datetime(&dts, dst, dst_itemsize, - 0, d->src_meta.base, -1, + 0, 0, d->src_meta.base, -1, NPY_UNSAFE_CASTING); dst += dst_stride; @@ -901,7 +901,7 @@ _strided_to_strided_string_to_datetime(char *dst, npy_intp dst_stride, if (parse_iso_8601_datetime(tmp_buffer, src_itemsize, d->dst_meta.base, NPY_SAME_KIND_CASTING, - &dts, NULL, NULL, NULL) < 0) { + &dts, NULL, NULL) < 0) { dt = NPY_DATETIME_NAT; } } @@ -909,7 +909,7 @@ _strided_to_strided_string_to_datetime(char *dst, npy_intp dst_stride, else { if (parse_iso_8601_datetime(src, tmp - src, d->dst_meta.base, NPY_SAME_KIND_CASTING, - &dts, NULL, NULL, NULL) < 0) { + &dts, NULL, NULL) < 0) { dt = NPY_DATETIME_NAT; } } diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index 7c73822dd2f4..2b3dc18172a1 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -647,7 +647,6 @@ datetimetype_repr(PyObject *self) npy_datetimestruct dts; PyObject *ret; char iso[NPY_DATETIME_MAX_ISO8601_STRLEN]; - int local; NPY_DATETIMEUNIT unit; if (!PyArray_IsScalar(self, Datetime)) { @@ -663,16 +662,8 @@ datetimetype_repr(PyObject *self) return NULL; } - local = (scal->obmeta.base > NPY_FR_D); - /* - * Because we're defaulting to local time, display hours with - * minutes precision, so that 30-minute timezone offsets can work. - */ unit = scal->obmeta.base; - if (unit == NPY_FR_h) { - unit = NPY_FR_m; - } - if (make_iso_8601_datetime(&dts, iso, sizeof(iso), local, + if (make_iso_8601_datetime(&dts, iso, sizeof(iso), 0, 0, unit, -1, NPY_SAFE_CASTING) < 0) { return NULL; } @@ -758,7 +749,6 @@ datetimetype_str(PyObject *self) PyDatetimeScalarObject *scal; npy_datetimestruct dts; char iso[NPY_DATETIME_MAX_ISO8601_STRLEN]; - int local; NPY_DATETIMEUNIT unit; if (!PyArray_IsScalar(self, Datetime)) { @@ -774,16 +764,8 @@ datetimetype_str(PyObject *self) return NULL; } - local = (scal->obmeta.base > NPY_FR_D); - /* - * Because we're defaulting to local time, display hours with - * minutes precision, so that 30-minute timezone offsets can work. - */ unit = scal->obmeta.base; - if (unit == NPY_FR_h) { - unit = NPY_FR_m; - } - if (make_iso_8601_datetime(&dts, iso, sizeof(iso), local, + if (make_iso_8601_datetime(&dts, iso, sizeof(iso), 0, 0, unit, -1, NPY_SAFE_CASTING) < 0) { return NULL; } diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py index 65b1d460a3c3..3a4dcc8d35db 100644 --- a/numpy/core/tests/test_datetime.py +++ b/numpy/core/tests/test_datetime.py @@ -9,7 +9,7 @@ from numpy.compat import asbytes from numpy.testing import ( TestCase, run_module_suite, assert_, assert_equal, assert_raises, - dec + assert_warns, dec ) # Use pytz to test out various time zones if available @@ -99,9 +99,8 @@ def test_datetime_casting_rules(self): # Can't cast timedelta same_kind from months/years to days assert_(not np.can_cast('m8[M]', 'm8[D]', casting='same_kind')) assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='same_kind')) - # Can't cast datetime same_kind across the date/time boundary - assert_(not np.can_cast('M8[D]', 'M8[h]', casting='same_kind')) - assert_(not np.can_cast('M8[h]', 'M8[D]', casting='same_kind')) + # Can cast datetime same_kind across the date/time boundary + assert_(np.can_cast('M8[D]', 'M8[h]', casting='same_kind')) # Can cast timedelta same_kind across the date/time boundary assert_(np.can_cast('m8[D]', 'm8[h]', casting='same_kind')) assert_(np.can_cast('m8[h]', 'm8[D]', casting='same_kind')) @@ -140,8 +139,8 @@ def test_datetime_scalar_construction(self): # Construct with different units assert_equal(np.datetime64('1950-03-12', 'D'), np.datetime64('1950-03-12')) - assert_equal(np.datetime64('1950-03-12T13Z', 's'), - np.datetime64('1950-03-12T13Z', 'm')) + assert_equal(np.datetime64('1950-03-12T13', 's'), + np.datetime64('1950-03-12T13', 'm')) # Default construction means NaT assert_equal(np.datetime64(), np.datetime64('NaT')) @@ -166,8 +165,8 @@ def test_datetime_scalar_construction(self): # When constructing from a scalar or zero-dimensional array, # it either keeps the units or you can override them. - a = np.datetime64('2000-03-18T16Z', 'h') - b = np.array('2000-03-18T16Z', dtype='M8[h]') + a = np.datetime64('2000-03-18T16', 'h') + b = np.array('2000-03-18T16', dtype='M8[h]') assert_equal(a.dtype, np.dtype('M8[h]')) assert_equal(b.dtype, np.dtype('M8[h]')) @@ -190,22 +189,36 @@ def test_datetime_scalar_construction(self): assert_equal(np.datetime64('2045-03-25', 'D'), np.datetime64(datetime.date(2045, 3, 25), 'D')) # Construction from datetime.datetime - assert_equal(np.datetime64('1980-01-25T14:36:22.5Z'), + assert_equal(np.datetime64('1980-01-25T14:36:22.5'), np.datetime64(datetime.datetime(1980, 1, 25, 14, 36, 22, 500000))) - # Construction with time units from a date raises - assert_raises(TypeError, np.datetime64, '1920-03-13', 'h') - assert_raises(TypeError, np.datetime64, '1920-03', 'm') - assert_raises(TypeError, np.datetime64, '1920', 's') - assert_raises(TypeError, np.datetime64, datetime.date(2045, 3, 25), 'ms') - # Construction with date units from a datetime raises - assert_raises(TypeError, np.datetime64, '1920-03-13T18Z', 'D') - assert_raises(TypeError, np.datetime64, '1920-03-13T18:33Z', 'W') - assert_raises(TypeError, np.datetime64, '1920-03-13T18:33:12Z', 'M') - assert_raises(TypeError, np.datetime64, '1920-03-13T18:33:12.5Z', 'Y') - assert_raises(TypeError, np.datetime64, - datetime.datetime(1920, 4, 14, 13, 20), 'D') + # Construction with time units from a date is okay + assert_equal(np.datetime64('1920-03-13', 'h'), + np.datetime64('1920-03-13T00')) + assert_equal(np.datetime64('1920-03', 'm'), + np.datetime64('1920-03-01T00:00')) + assert_equal(np.datetime64('1920', 's'), + np.datetime64('1920-01-01T00:00:00')) + assert_equal(np.datetime64(datetime.date(2045, 3, 25), 'ms'), + np.datetime64('2045-03-25T00:00:00.000')) + + # Construction with date units from a datetime is also okay + assert_equal(np.datetime64('1920-03-13T18', 'D'), + np.datetime64('1920-03-13')) + assert_equal(np.datetime64('1920-03-13T18:33:12', 'M'), + np.datetime64('1920-03')) + assert_equal(np.datetime64('1920-03-13T18:33:12.5', 'Y'), + np.datetime64('1920')) + + def test_datetime_scalar_construction_timezone(self): + # verify that supplying an explicit timezone works, but is deprecated + with assert_warns(DeprecationWarning): + assert_equal(np.datetime64('2000-01-01T00Z'), + np.datetime64('2000-01-01T00')) + with assert_warns(DeprecationWarning): + assert_equal(np.datetime64('2000-01-01T00-08'), + np.datetime64('2000-01-01T08')) def test_datetime_array_find_type(self): dt = np.datetime64('1970-01-01', 'M') @@ -324,57 +337,57 @@ def test_timedelta_scalar_construction_units(self): np.dtype('M8[D]')) assert_equal(np.datetime64('2010-03-12T17').dtype, np.dtype('M8[h]')) - assert_equal(np.datetime64('2010-03-12T17:15Z').dtype, + assert_equal(np.datetime64('2010-03-12T17:15').dtype, np.dtype('M8[m]')) - assert_equal(np.datetime64('2010-03-12T17:15:08Z').dtype, + assert_equal(np.datetime64('2010-03-12T17:15:08').dtype, np.dtype('M8[s]')) - assert_equal(np.datetime64('2010-03-12T17:15:08.1Z').dtype, + assert_equal(np.datetime64('2010-03-12T17:15:08.1').dtype, np.dtype('M8[ms]')) - assert_equal(np.datetime64('2010-03-12T17:15:08.12Z').dtype, + assert_equal(np.datetime64('2010-03-12T17:15:08.12').dtype, np.dtype('M8[ms]')) - assert_equal(np.datetime64('2010-03-12T17:15:08.123Z').dtype, + assert_equal(np.datetime64('2010-03-12T17:15:08.123').dtype, np.dtype('M8[ms]')) - assert_equal(np.datetime64('2010-03-12T17:15:08.1234Z').dtype, + assert_equal(np.datetime64('2010-03-12T17:15:08.1234').dtype, np.dtype('M8[us]')) - assert_equal(np.datetime64('2010-03-12T17:15:08.12345Z').dtype, + assert_equal(np.datetime64('2010-03-12T17:15:08.12345').dtype, np.dtype('M8[us]')) - assert_equal(np.datetime64('2010-03-12T17:15:08.123456Z').dtype, + assert_equal(np.datetime64('2010-03-12T17:15:08.123456').dtype, np.dtype('M8[us]')) - assert_equal(np.datetime64('1970-01-01T00:00:02.1234567Z').dtype, + assert_equal(np.datetime64('1970-01-01T00:00:02.1234567').dtype, np.dtype('M8[ns]')) - assert_equal(np.datetime64('1970-01-01T00:00:02.12345678Z').dtype, + assert_equal(np.datetime64('1970-01-01T00:00:02.12345678').dtype, np.dtype('M8[ns]')) - assert_equal(np.datetime64('1970-01-01T00:00:02.123456789Z').dtype, + assert_equal(np.datetime64('1970-01-01T00:00:02.123456789').dtype, np.dtype('M8[ns]')) - assert_equal(np.datetime64('1970-01-01T00:00:02.1234567890Z').dtype, + assert_equal(np.datetime64('1970-01-01T00:00:02.1234567890').dtype, np.dtype('M8[ps]')) - assert_equal(np.datetime64('1970-01-01T00:00:02.12345678901Z').dtype, + assert_equal(np.datetime64('1970-01-01T00:00:02.12345678901').dtype, np.dtype('M8[ps]')) - assert_equal(np.datetime64('1970-01-01T00:00:02.123456789012Z').dtype, + assert_equal(np.datetime64('1970-01-01T00:00:02.123456789012').dtype, np.dtype('M8[ps]')) assert_equal(np.datetime64( - '1970-01-01T00:00:02.1234567890123Z').dtype, + '1970-01-01T00:00:02.1234567890123').dtype, np.dtype('M8[fs]')) assert_equal(np.datetime64( - '1970-01-01T00:00:02.12345678901234Z').dtype, + '1970-01-01T00:00:02.12345678901234').dtype, np.dtype('M8[fs]')) assert_equal(np.datetime64( - '1970-01-01T00:00:02.123456789012345Z').dtype, + '1970-01-01T00:00:02.123456789012345').dtype, np.dtype('M8[fs]')) assert_equal(np.datetime64( - '1970-01-01T00:00:02.1234567890123456Z').dtype, + '1970-01-01T00:00:02.1234567890123456').dtype, np.dtype('M8[as]')) assert_equal(np.datetime64( - '1970-01-01T00:00:02.12345678901234567Z').dtype, + '1970-01-01T00:00:02.12345678901234567').dtype, np.dtype('M8[as]')) assert_equal(np.datetime64( - '1970-01-01T00:00:02.123456789012345678Z').dtype, + '1970-01-01T00:00:02.123456789012345678').dtype, np.dtype('M8[as]')) # Python date object @@ -390,18 +403,10 @@ def test_timedelta_scalar_construction_units(self): assert_equal(np.datetime64('today').dtype, np.dtype('M8[D]')) - assert_raises(TypeError, np.datetime64, 'today', 'h') - assert_raises(TypeError, np.datetime64, 'today', 's') - assert_raises(TypeError, np.datetime64, 'today', 'as') - # 'now' special value assert_equal(np.datetime64('now').dtype, np.dtype('M8[s]')) - assert_raises(TypeError, np.datetime64, 'now', 'Y') - assert_raises(TypeError, np.datetime64, 'now', 'M') - assert_raises(TypeError, np.datetime64, 'now', 'D') - def test_datetime_nat_casting(self): a = np.array('NaT', dtype='M8[D]') b = np.datetime64('NaT', '[D]') @@ -508,9 +513,9 @@ def test_pydatetime_creation(self): #a = np.array(['now', datetime.datetime.now()], dtype='M8[s]') #assert_equal(a[0], a[1]) - # A datetime.date will raise if you try to give it time units - assert_raises(TypeError, np.array, datetime.date(1960, 3, 12), - dtype='M8[s]') + # we can give a datetime.date time units + assert_equal(np.array(datetime.date(1960, 3, 12), dtype='M8[s]'), + np.array(np.datetime64('1960-03-12T00:00:00'))) def test_datetime_string_conversion(self): a = ['2011-03-16', '1920-01-01', '2013-05-19'] @@ -547,7 +552,7 @@ def test_datetime_array_str(self): a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M') assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']") - a = np.array(['2011-03-16T13:55Z', '1920-01-01T03:12Z'], dtype='M') + a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M') assert_equal(np.array2string(a, separator=', ', formatter={'datetime': lambda x: "'%s'" % np.datetime_as_string(x, timezone='UTC')}), @@ -667,14 +672,14 @@ def test_pyobject_roundtrip(self): for unit in ['M8[as]', 'M8[16fs]', 'M8[ps]', 'M8[us]', 'M8[300as]', 'M8[20us]']: b = a.copy().view(dtype=unit) - b[0] = '-0001-01-01T00Z' - b[1] = '-0001-12-31T00Z' - b[2] = '0000-01-01T00Z' - b[3] = '0001-01-01T00Z' - b[4] = '1969-12-31T23:59:59.999999Z' - b[5] = '1970-01-01T00Z' - b[6] = '9999-12-31T23:59:59.999999Z' - b[7] = '10000-01-01T00Z' + b[0] = '-0001-01-01T00' + b[1] = '-0001-12-31T00' + b[2] = '0000-01-01T00' + b[3] = '0001-01-01T00' + b[4] = '1969-12-31T23:59:59.999999' + b[5] = '1970-01-01T00' + b[6] = '9999-12-31T23:59:59.999999' + b[7] = '10000-01-01T00' b[8] = 'NaT' assert_equal(b.astype(object).astype(unit), b, @@ -685,13 +690,13 @@ def test_month_truncation(self): assert_equal(np.array('1945-03-01', dtype='M8[M]'), np.array('1945-03-31', dtype='M8[M]')) assert_equal(np.array('1969-11-01', dtype='M8[M]'), - np.array('1969-11-30T23:59:59.99999Z', dtype='M').astype('M8[M]')) + np.array('1969-11-30T23:59:59.99999', dtype='M').astype('M8[M]')) assert_equal(np.array('1969-12-01', dtype='M8[M]'), - np.array('1969-12-31T23:59:59.99999Z', dtype='M').astype('M8[M]')) + np.array('1969-12-31T23:59:59.99999', dtype='M').astype('M8[M]')) assert_equal(np.array('1970-01-01', dtype='M8[M]'), - np.array('1970-01-31T23:59:59.99999Z', dtype='M').astype('M8[M]')) + np.array('1970-01-31T23:59:59.99999', dtype='M').astype('M8[M]')) assert_equal(np.array('1980-02-01', dtype='M8[M]'), - np.array('1980-02-29T23:59:59.99999Z', dtype='M').astype('M8[M]')) + np.array('1980-02-29T23:59:59.99999', dtype='M').astype('M8[M]')) def test_different_unit_comparison(self): # Check some years with date units @@ -720,32 +725,32 @@ def test_different_unit_comparison(self): dt1 = np.dtype('M8[%s]' % unit1) for unit2 in ['h', 'm', 's', 'ms', 'us']: dt2 = np.dtype('M8[%s]' % unit2) - assert_equal(np.array('1945-03-12T18Z', dtype=dt1), - np.array('1945-03-12T18Z', dtype=dt2)) - assert_equal(np.array('1970-03-12T18Z', dtype=dt1), - np.array('1970-03-12T18Z', dtype=dt2)) - assert_equal(np.array('9999-03-12T18Z', dtype=dt1), - np.array('9999-03-12T18Z', dtype=dt2)) - assert_equal(np.array('10000-01-01T00Z', dtype=dt1), - np.array('10000-01-01T00Z', dtype=dt2)) - assert_equal(np.datetime64('1945-03-12T18Z', unit1), - np.datetime64('1945-03-12T18Z', unit2)) - assert_equal(np.datetime64('1970-03-12T18Z', unit1), - np.datetime64('1970-03-12T18Z', unit2)) - assert_equal(np.datetime64('9999-03-12T18Z', unit1), - np.datetime64('9999-03-12T18Z', unit2)) - assert_equal(np.datetime64('10000-01-01T00Z', unit1), - np.datetime64('10000-01-01T00Z', unit2)) + assert_equal(np.array('1945-03-12T18', dtype=dt1), + np.array('1945-03-12T18', dtype=dt2)) + assert_equal(np.array('1970-03-12T18', dtype=dt1), + np.array('1970-03-12T18', dtype=dt2)) + assert_equal(np.array('9999-03-12T18', dtype=dt1), + np.array('9999-03-12T18', dtype=dt2)) + assert_equal(np.array('10000-01-01T00', dtype=dt1), + np.array('10000-01-01T00', dtype=dt2)) + assert_equal(np.datetime64('1945-03-12T18', unit1), + np.datetime64('1945-03-12T18', unit2)) + assert_equal(np.datetime64('1970-03-12T18', unit1), + np.datetime64('1970-03-12T18', unit2)) + assert_equal(np.datetime64('9999-03-12T18', unit1), + np.datetime64('9999-03-12T18', unit2)) + assert_equal(np.datetime64('10000-01-01T00', unit1), + np.datetime64('10000-01-01T00', unit2)) # Check some days with units that won't overflow for unit1 in ['D', '12h', 'h', 'm', 's', '4s', 'ms', 'us']: dt1 = np.dtype('M8[%s]' % unit1) for unit2 in ['D', 'h', 'm', 's', 'ms', 'us']: dt2 = np.dtype('M8[%s]' % unit2) assert_(np.equal(np.array('1932-02-17', dtype='M').astype(dt1), - np.array('1932-02-17T00:00:00Z', dtype='M').astype(dt2), + np.array('1932-02-17T00:00:00', dtype='M').astype(dt2), casting='unsafe')) assert_(np.equal(np.array('10000-04-27', dtype='M').astype(dt1), - np.array('10000-04-27T00:00:00Z', dtype='M').astype(dt2), + np.array('10000-04-27T00:00:00', dtype='M').astype(dt2), casting='unsafe')) # Shouldn't be able to compare datetime and timedelta @@ -807,7 +812,7 @@ def test_datetime_add(self): # One-dimensional arrays (np.array(['2012-12-21'], dtype='M8[D]'), np.array(['2012-12-24'], dtype='M8[D]'), - np.array(['2012-12-21T11Z'], dtype='M8[h]'), + np.array(['2012-12-21T11'], dtype='M8[h]'), np.array(['NaT'], dtype='M8[D]'), np.array([3], dtype='m8[D]'), np.array([11], dtype='m8[h]'), @@ -815,7 +820,7 @@ def test_datetime_add(self): # NumPy scalars (np.datetime64('2012-12-21', '[D]'), np.datetime64('2012-12-24', '[D]'), - np.datetime64('2012-12-21T11Z', '[h]'), + np.datetime64('2012-12-21T11', '[h]'), np.datetime64('NaT', '[D]'), np.timedelta64(3, '[D]'), np.timedelta64(11, '[h]'), @@ -878,8 +883,8 @@ def test_datetime_subtract(self): (np.array(['2012-12-21'], dtype='M8[D]'), np.array(['2012-12-24'], dtype='M8[D]'), np.array(['1940-12-24'], dtype='M8[D]'), - np.array(['1940-12-24T00Z'], dtype='M8[h]'), - np.array(['1940-12-23T13Z'], dtype='M8[h]'), + np.array(['1940-12-24T00'], dtype='M8[h]'), + np.array(['1940-12-23T13'], dtype='M8[h]'), np.array(['NaT'], dtype='M8[D]'), np.array([3], dtype='m8[D]'), np.array([11], dtype='m8[h]'), @@ -888,8 +893,8 @@ def test_datetime_subtract(self): (np.datetime64('2012-12-21', '[D]'), np.datetime64('2012-12-24', '[D]'), np.datetime64('1940-12-24', '[D]'), - np.datetime64('1940-12-24T00Z', '[h]'), - np.datetime64('1940-12-23T13Z', '[h]'), + np.datetime64('1940-12-24T00', '[h]'), + np.datetime64('1940-12-23T13', '[h]'), np.datetime64('NaT', '[D]'), np.timedelta64(3, '[D]'), np.timedelta64(11, '[h]'), @@ -1071,12 +1076,12 @@ def test_datetime_divide(self): def test_datetime_compare(self): # Test all the comparison operators - a = np.datetime64('2000-03-12T18:00:00.000000-0600') - b = np.array(['2000-03-12T18:00:00.000000-0600', - '2000-03-12T17:59:59.999999-0600', - '2000-03-12T18:00:00.000001-0600', - '1970-01-11T12:00:00.909090-0600', - '2016-01-11T12:00:00.909090-0600'], + a = np.datetime64('2000-03-12T18:00:00.000000') + b = np.array(['2000-03-12T18:00:00.000000', + '2000-03-12T17:59:59.999999', + '2000-03-12T18:00:00.000001', + '1970-01-11T12:00:00.909090', + '2016-01-11T12:00:00.909090'], dtype='datetime64[us]') assert_equal(np.equal(a, b), [1, 0, 0, 0, 0]) assert_equal(np.not_equal(a, b), [0, 1, 1, 1, 1]) @@ -1108,8 +1113,8 @@ def test_datetime_compare_nat(self): def test_datetime_minmax(self): # The metadata of the result should become the GCD # of the operand metadata - a = np.array('1999-03-12T13Z', dtype='M8[2m]') - b = np.array('1999-03-12T12Z', dtype='M8[s]') + a = np.array('1999-03-12T13', dtype='M8[2m]') + b = np.array('1999-03-12T12', dtype='M8[s]') assert_equal(np.minimum(a, b), b) assert_equal(np.minimum(a, b).dtype, np.dtype('M8[s]')) assert_equal(np.fmin(a, b), b) @@ -1123,7 +1128,7 @@ def test_datetime_minmax(self): assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8')) # Interaction with NaT - a = np.array('1999-03-12T13Z', dtype='M8[2m]') + a = np.array('1999-03-12T13', dtype='M8[2m]') dtnat = np.array('NaT', dtype='M8[h]') assert_equal(np.minimum(a, dtnat), a) assert_equal(np.minimum(dtnat, a), a) @@ -1150,7 +1155,7 @@ def test_datetime_minmax(self): # TODO: Allowing unsafe casting by # default in ufuncs strikes again... :( a = np.array(3, dtype='m8[h]') - b = np.array('1999-03-12T12Z', dtype='M8[s]') + b = np.array('1999-03-12T12', dtype='M8[s]') #assert_raises(TypeError, np.minimum, a, b) #assert_raises(TypeError, np.maximum, a, b) #assert_raises(TypeError, np.fmin, a, b) @@ -1212,17 +1217,26 @@ def test_string_parser_variants(self): assert_equal(np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')), np.array(['-1980-02-29 01:02:03'], np.dtype('M8[s]'))) # UTC specifier - assert_equal(np.array(['-1980-02-29T01:02:03Z'], np.dtype('M8[s]')), - np.array(['-1980-02-29 01:02:03Z'], np.dtype('M8[s]'))) + with assert_warns(DeprecationWarning): + assert_equal( + np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')), + np.array(['-1980-02-29 01:02:03Z'], np.dtype('M8[s]'))) # Time zone offset - assert_equal(np.array(['1980-02-29T02:02:03Z'], np.dtype('M8[s]')), - np.array(['1980-02-29 00:32:03-0130'], np.dtype('M8[s]'))) - assert_equal(np.array(['1980-02-28T22:32:03Z'], np.dtype('M8[s]')), - np.array(['1980-02-29 00:02:03+01:30'], np.dtype('M8[s]'))) - assert_equal(np.array(['1980-02-29T02:32:03.506Z'], np.dtype('M8[s]')), - np.array(['1980-02-29 00:32:03.506-02'], np.dtype('M8[s]'))) - assert_equal(np.datetime64('1977-03-02T12:30-0230'), - np.datetime64('1977-03-02T15:00Z')) + with assert_warns(DeprecationWarning): + assert_equal( + np.array(['1980-02-29T02:02:03'], np.dtype('M8[s]')), + np.array(['1980-02-29 00:32:03-0130'], np.dtype('M8[s]'))) + with assert_warns(DeprecationWarning): + assert_equal( + np.array(['1980-02-28T22:32:03'], np.dtype('M8[s]')), + np.array(['1980-02-29 00:02:03+01:30'], np.dtype('M8[s]'))) + with assert_warns(DeprecationWarning): + assert_equal( + np.array(['1980-02-29T02:32:03.506'], np.dtype('M8[s]')), + np.array(['1980-02-29 00:32:03.506-02'], np.dtype('M8[s]'))) + with assert_warns(DeprecationWarning): + assert_equal(np.datetime64('1977-03-02T12:30-0230'), + np.datetime64('1977-03-02T15:00')) def test_string_parser_error_check(self): # Arbitrary bad string @@ -1291,19 +1305,24 @@ def test_string_parser_error_check(self): assert_raises(ValueError, np.array, ['1980-02-03 01:01:60'], np.dtype('M8[us]')) # Timezone offset must within a reasonable range - assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+0661'], - np.dtype('M8[us]')) - assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+2500'], - np.dtype('M8[us]')) - assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-0070'], - np.dtype('M8[us]')) - assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-3000'], - np.dtype('M8[us]')) - assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-25:00'], - np.dtype('M8[us]')) + with assert_warns(DeprecationWarning): + assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+0661'], + np.dtype('M8[us]')) + with assert_warns(DeprecationWarning): + assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+2500'], + np.dtype('M8[us]')) + with assert_warns(DeprecationWarning): + assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-0070'], + np.dtype('M8[us]')) + with assert_warns(DeprecationWarning): + assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-3000'], + np.dtype('M8[us]')) + with assert_warns(DeprecationWarning): + assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-25:00'], + np.dtype('M8[us]')) def test_creation_overflow(self): - date = '1980-03-23 20:00:00Z' + date = '1980-03-23 20:00:00' timesteps = np.array([date], dtype='datetime64[s]')[0].astype(np.int64) for unit in ['ms', 'us', 'ns']: timesteps *= 1000 @@ -1317,7 +1336,7 @@ def test_creation_overflow(self): def test_datetime_as_string(self): # Check all the units with default string conversion date = '1959-10-13' - datetime = '1959-10-13T12:34:56.789012345678901234Z' + datetime = '1959-10-13T12:34:56.789012345678901234' assert_equal(np.datetime_as_string(np.datetime64(date, 'Y')), '1959') @@ -1326,45 +1345,45 @@ def test_datetime_as_string(self): assert_equal(np.datetime_as_string(np.datetime64(date, 'D')), '1959-10-13') assert_equal(np.datetime_as_string(np.datetime64(datetime, 'h')), - '1959-10-13T12Z') + '1959-10-13T12') assert_equal(np.datetime_as_string(np.datetime64(datetime, 'm')), - '1959-10-13T12:34Z') + '1959-10-13T12:34') assert_equal(np.datetime_as_string(np.datetime64(datetime, 's')), - '1959-10-13T12:34:56Z') + '1959-10-13T12:34:56') assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ms')), - '1959-10-13T12:34:56.789Z') + '1959-10-13T12:34:56.789') assert_equal(np.datetime_as_string(np.datetime64(datetime, 'us')), - '1959-10-13T12:34:56.789012Z') + '1959-10-13T12:34:56.789012') - datetime = '1969-12-31T23:34:56.789012345678901234Z' + datetime = '1969-12-31T23:34:56.789012345678901234' assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')), - '1969-12-31T23:34:56.789012345Z') + '1969-12-31T23:34:56.789012345') assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')), - '1969-12-31T23:34:56.789012345678Z') + '1969-12-31T23:34:56.789012345678') assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')), - '1969-12-31T23:34:56.789012345678901Z') + '1969-12-31T23:34:56.789012345678901') - datetime = '1969-12-31T23:59:57.789012345678901234Z' + datetime = '1969-12-31T23:59:57.789012345678901234' assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')), datetime) - datetime = '1970-01-01T00:34:56.789012345678901234Z' + datetime = '1970-01-01T00:34:56.789012345678901234' assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')), - '1970-01-01T00:34:56.789012345Z') + '1970-01-01T00:34:56.789012345') assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')), - '1970-01-01T00:34:56.789012345678Z') + '1970-01-01T00:34:56.789012345678') assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')), - '1970-01-01T00:34:56.789012345678901Z') + '1970-01-01T00:34:56.789012345678901') - datetime = '1970-01-01T00:00:05.789012345678901234Z' + datetime = '1970-01-01T00:00:05.789012345678901234' assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')), datetime) # String conversion with the unit= parameter - a = np.datetime64('2032-07-18T12:23:34.123456Z', 'us') + a = np.datetime64('2032-07-18T12:23:34.123456', 'us') assert_equal(np.datetime_as_string(a, unit='Y', casting='unsafe'), '2032') assert_equal(np.datetime_as_string(a, unit='M', casting='unsafe'), @@ -1373,62 +1392,66 @@ def test_datetime_as_string(self): '2032-07-18') assert_equal(np.datetime_as_string(a, unit='D', casting='unsafe'), '2032-07-18') - assert_equal(np.datetime_as_string(a, unit='h'), '2032-07-18T12Z') + assert_equal(np.datetime_as_string(a, unit='h'), '2032-07-18T12') assert_equal(np.datetime_as_string(a, unit='m'), - '2032-07-18T12:23Z') + '2032-07-18T12:23') assert_equal(np.datetime_as_string(a, unit='s'), - '2032-07-18T12:23:34Z') + '2032-07-18T12:23:34') assert_equal(np.datetime_as_string(a, unit='ms'), - '2032-07-18T12:23:34.123Z') + '2032-07-18T12:23:34.123') assert_equal(np.datetime_as_string(a, unit='us'), - '2032-07-18T12:23:34.123456Z') + '2032-07-18T12:23:34.123456') assert_equal(np.datetime_as_string(a, unit='ns'), - '2032-07-18T12:23:34.123456000Z') + '2032-07-18T12:23:34.123456000') assert_equal(np.datetime_as_string(a, unit='ps'), - '2032-07-18T12:23:34.123456000000Z') + '2032-07-18T12:23:34.123456000000') assert_equal(np.datetime_as_string(a, unit='fs'), - '2032-07-18T12:23:34.123456000000000Z') + '2032-07-18T12:23:34.123456000000000') assert_equal(np.datetime_as_string(a, unit='as'), - '2032-07-18T12:23:34.123456000000000000Z') + '2032-07-18T12:23:34.123456000000000000') # unit='auto' parameter assert_equal(np.datetime_as_string( - np.datetime64('2032-07-18T12:23:34.123456Z', 'us'), unit='auto'), - '2032-07-18T12:23:34.123456Z') + np.datetime64('2032-07-18T12:23:34.123456', 'us'), unit='auto'), + '2032-07-18T12:23:34.123456') assert_equal(np.datetime_as_string( - np.datetime64('2032-07-18T12:23:34.12Z', 'us'), unit='auto'), - '2032-07-18T12:23:34.120Z') + np.datetime64('2032-07-18T12:23:34.12', 'us'), unit='auto'), + '2032-07-18T12:23:34.120') assert_equal(np.datetime_as_string( - np.datetime64('2032-07-18T12:23:34Z', 'us'), unit='auto'), - '2032-07-18T12:23:34Z') + np.datetime64('2032-07-18T12:23:34', 'us'), unit='auto'), + '2032-07-18T12:23:34') assert_equal(np.datetime_as_string( - np.datetime64('2032-07-18T12:23:00Z', 'us'), unit='auto'), - '2032-07-18T12:23Z') + np.datetime64('2032-07-18T12:23:00', 'us'), unit='auto'), + '2032-07-18T12:23') # 'auto' doesn't split up hour and minute assert_equal(np.datetime_as_string( - np.datetime64('2032-07-18T12:00:00Z', 'us'), unit='auto'), - '2032-07-18T12:00Z') + np.datetime64('2032-07-18T12:00:00', 'us'), unit='auto'), + '2032-07-18T12:00') assert_equal(np.datetime_as_string( - np.datetime64('2032-07-18T00:00:00Z', 'us'), unit='auto'), + np.datetime64('2032-07-18T00:00:00', 'us'), unit='auto'), '2032-07-18') # 'auto' doesn't split up the date assert_equal(np.datetime_as_string( - np.datetime64('2032-07-01T00:00:00Z', 'us'), unit='auto'), + np.datetime64('2032-07-01T00:00:00', 'us'), unit='auto'), '2032-07-01') assert_equal(np.datetime_as_string( - np.datetime64('2032-01-01T00:00:00Z', 'us'), unit='auto'), + np.datetime64('2032-01-01T00:00:00', 'us'), unit='auto'), '2032-01-01') @dec.skipif(not _has_pytz, "The pytz module is not available.") def test_datetime_as_string_timezone(self): # timezone='local' vs 'UTC' - a = np.datetime64('2010-03-15T06:30Z', 'm') + a = np.datetime64('2010-03-15T06:30', 'm') + assert_equal(np.datetime_as_string(a), + '2010-03-15T06:30') + assert_equal(np.datetime_as_string(a, timezone='naive'), + '2010-03-15T06:30') assert_equal(np.datetime_as_string(a, timezone='UTC'), '2010-03-15T06:30Z') assert_(np.datetime_as_string(a, timezone='local') != - '2010-03-15T06:30Z') + '2010-03-15T06:30') - b = np.datetime64('2010-02-15T06:30Z', 'm') + b = np.datetime64('2010-02-15T06:30', 'm') assert_equal(np.datetime_as_string(a, timezone=tz('US/Central')), '2010-03-15T01:30-0500') @@ -1493,7 +1516,7 @@ def test_datetime_arange(self): assert_raises(TypeError, np.arange, np.datetime64('2011-03-01', 'D'), np.timedelta64(5, 'M')) assert_raises(TypeError, np.arange, - np.datetime64('2012-02-03T14Z', 's'), + np.datetime64('2012-02-03T14', 's'), np.timedelta64(5, 'Y')) def test_datetime_arange_no_dtype(self): @@ -1845,21 +1868,23 @@ def test_datetime_is_busday(self): def test_datetime_y2038(self): # Test parsing on either side of the Y2038 boundary - a = np.datetime64('2038-01-19T03:14:07Z') + a = np.datetime64('2038-01-19T03:14:07') assert_equal(a.view(np.int64), 2**31 - 1) - a = np.datetime64('2038-01-19T03:14:08Z') + a = np.datetime64('2038-01-19T03:14:08') assert_equal(a.view(np.int64), 2**31) # Test parsing on either side of the Y2038 boundary with # a manually specified timezone offset - a = np.datetime64('2038-01-19T04:14:07+0100') - assert_equal(a.view(np.int64), 2**31 - 1) - a = np.datetime64('2038-01-19T04:14:08+0100') - assert_equal(a.view(np.int64), 2**31) - - # Test parsing a date after Y2038 in the local timezone + with assert_warns(DeprecationWarning): + a = np.datetime64('2038-01-19T04:14:07+0100') + assert_equal(a.view(np.int64), 2**31 - 1) + with assert_warns(DeprecationWarning): + a = np.datetime64('2038-01-19T04:14:08+0100') + assert_equal(a.view(np.int64), 2**31) + + # Test parsing a date after Y2038 a = np.datetime64('2038-01-20T13:21:14') - assert_equal(str(a)[:-5], '2038-01-20T13:21:14') + assert_equal(str(a), '2038-01-20T13:21:14') class TestDateTimeData(TestCase): diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index 65ddc1e77387..f0998901d172 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -5,6 +5,7 @@ """ from __future__ import division, absolute_import, print_function +import datetime import sys import operator import warnings @@ -12,7 +13,13 @@ import numpy as np from numpy.testing import ( run_module_suite, assert_raises, assert_warns, assert_no_warnings, - assert_array_equal, assert_) + assert_array_equal, assert_, dec) + +try: + import pytz + _has_pytz = True +except ImportError: + _has_pytz = False class _DeprecationTestCase(object): @@ -386,6 +393,26 @@ def test_full_default_dtype(self): assert_no_warnings(np.full, 1, 1, float) +class TestDatetime64Timezone(_DeprecationTestCase): + """Parsing of datetime64 with timezones deprecated in 1.11.0, because + datetime64 is now timezone naive rather than UTC only. + + It will be quite a while before we can remove this, because, at the very + least, a lot of existing code uses the 'Z' modifier to avoid conversion + from local time to UTC, even if otherwise it handles time in a timezone + naive fashion. + """ + def test_string(self): + self.assert_deprecated(np.datetime64, args=('2000-01-01T00+01',)) + self.assert_deprecated(np.datetime64, args=('2000-01-01T00Z',)) + + @dec.skipif(not _has_pytz, "The pytz module is not available.") + def test_datetime(self): + tz = pytz.timezone('US/Eastern') + dt = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=tz) + self.assert_deprecated(np.datetime64, args=(dt,)) + + class TestNonCContiguousViewDeprecation(_DeprecationTestCase): """View of non-C-contiguous arrays deprecated in 1.11.0. From 2fbc3f4f86b5b621853d3326a89fd565fd795f9b Mon Sep 17 00:00:00 2001 From: = <=> Date: Sat, 16 Jan 2016 16:06:54 +0530 Subject: [PATCH 363/496] Fix issue 7021 --- numpy/core/src/multiarray/datetime_busday.c | 1 + numpy/core/tests/test_datetime.py | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/numpy/core/src/multiarray/datetime_busday.c b/numpy/core/src/multiarray/datetime_busday.c index 4fade4d20a7c..c04a6c125ca2 100644 --- a/numpy/core/src/multiarray/datetime_busday.c +++ b/numpy/core/src/multiarray/datetime_busday.c @@ -288,6 +288,7 @@ apply_business_day_offset(npy_datetime date, npy_int64 offset, /* If we get a NaT, just return it */ if (date == NPY_DATETIME_NAT) { + *out = NPY_DATETIME_NAT; return 0; } diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py index 65b1d460a3c3..c79f59c7091f 100644 --- a/numpy/core/tests/test_datetime.py +++ b/numpy/core/tests/test_datetime.py @@ -1588,6 +1588,15 @@ def test_datetime_busday_offset(self): assert_equal(np.busday_offset('2007-04-07', -11, weekmask='SatSun'), np.datetime64('2007-02-25')) + # NaT values when roll is not raise + assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='nat'), + np.datetime64('NaT')) + assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='following'), + np.datetime64('NaT')) + assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='preceding'), + np.datetime64('NaT')) + + def test_datetime_busdaycalendar(self): # Check that it removes NaT, duplicates, and weekends # and sorts the result. From 4b0ed79a959ea8b6c5dfef67f32a0d5d7370fb91 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 29 Dec 2015 11:29:38 +0100 Subject: [PATCH 364/496] BLD: require setuptools for Numpy builds. Closes gh-6551. --- setup.py | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/setup.py b/setup.py index 80ddd8ac386c..e5697bc8edc7 100755 --- a/setup.py +++ b/setup.py @@ -21,6 +21,8 @@ import sys import subprocess +from setuptools import setup + if sys.version_info[:2] < (2, 6) or (3, 0) <= sys.version_info[0:2] < (3, 2): raise RuntimeError("Python version 2.6, 2.7 or >= 3.2 required.") @@ -86,9 +88,11 @@ def _minimal_ext_cmd(cmd): return GIT_REVISION -# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly -# update it when the contents of directories change. -if os.path.exists('MANIFEST'): os.remove('MANIFEST') +# BEFORE importing setuptools, remove MANIFEST. Otherwise it may not be +# properly updated when the contents of directories change (true for distutils, +# not sure about setuptools). +if os.path.exists('MANIFEST'): + os.remove('MANIFEST') # This is a bit hackish: we are setting a global variable so that the main # numpy __init__ can detect if it is being loaded by the setup routine, to @@ -159,6 +163,7 @@ def configuration(parent_package='',top_path=None): return config + def check_submodules(): """ verify that the submodules are checked out and clean use `git submodule update --init`; on failure @@ -181,13 +186,15 @@ def check_submodules(): if line.startswith('-') or line.startswith('+'): raise ValueError('Submodule not clean: %s' % line) -from distutils.command.sdist import sdist + +from setuptools.command.sdist import sdist class sdist_checked(sdist): """ check submodules on sdist to prevent incomplete tarballs """ def run(self): check_submodules() sdist.run(self) + def generate_cython(): cwd = os.path.abspath(os.path.dirname(__file__)) print("Cythonizing sources") @@ -198,6 +205,7 @@ def generate_cython(): if p != 0: raise RuntimeError("Running cythonize failed!") + def setup_package(): src_path = os.path.dirname(os.path.abspath(sys.argv[0])) old_path = os.getcwd() @@ -223,32 +231,20 @@ def setup_package(): cmdclass={"sdist": sdist_checked}, ) + FULLVERSION, GIT_REVISION = get_version_info() + metadata['version'] = FULLVERSION # Run build if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or sys.argv[1] in ('--help-commands', 'egg_info', '--version', 'clean')): - # Use setuptools for these commands (they don't work well or at all - # with distutils). For normal builds use distutils. - try: - from setuptools import setup - except ImportError: - from distutils.core import setup - - FULLVERSION, GIT_REVISION = get_version_info() - metadata['version'] = FULLVERSION + pass else: - if (len(sys.argv) >= 2 and sys.argv[1] in ('bdist_wheel', 'bdist_egg') or - sys.version_info[0] < 3 and sys.platform == "win32"): - # bdist_wheel, bdist_egg and the MS python2.7 VS sdk needs setuptools - # the latter can also be triggered by (see python issue23246) - # SET DISTUTILS_USE_SDK=1 - # SET MSSdk=1 - import setuptools from numpy.distutils.core import setup cwd = os.path.abspath(os.path.dirname(__file__)) if not os.path.exists(os.path.join(cwd, 'PKG-INFO')): # Generate Cython sources, unless building from source release generate_cython() + metadata['configuration'] = configuration try: From f820c521b3d95a6c6642120b215d1fc5da803af9 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 29 Dec 2015 11:32:06 +0100 Subject: [PATCH 365/496] MAINT: remove unnecessary setupegg.py file, now that we depend on setuptools. --- setupegg.py | 25 ------------------------- 1 file changed, 25 deletions(-) delete mode 100755 setupegg.py diff --git a/setupegg.py b/setupegg.py deleted file mode 100755 index 36185db488fb..000000000000 --- a/setupegg.py +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -""" -A setup.py script to use setuptools, which gives egg goodness, etc. - -This is used to build installers for OS X through bdist_mpkg. - -Notes ------ -Using ``python setupegg.py install`` directly results in file permissions being -set wrong, with nose refusing to run any tests. To run the tests anyway, use:: - - >>> np.test(extra_argv=['--exe']) - -""" -from __future__ import division, absolute_import, print_function - -import sys -from setuptools import setup - -if sys.version_info[0] >= 3: - import imp - setupfile = imp.load_source('setupfile', 'setup.py') - setupfile.setup_package() -else: - exec(compile(open('setup.py').read(), 'setup.py', 'exec')) From b9f480928551e8ef719e4020206ce3a6298c54ec Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 29 Dec 2015 12:05:30 +0100 Subject: [PATCH 366/496] BLD: disable (half-)broken setuptools commands and give clear error messages. --- setup.py | 66 ++++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 64 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index e5697bc8edc7..6c6f0bbf183e 100755 --- a/setup.py +++ b/setup.py @@ -21,8 +21,6 @@ import sys import subprocess -from setuptools import setup - if sys.version_info[:2] < (2, 6) or (3, 0) <= sys.version_info[0:2] < (3, 2): raise RuntimeError("Python version 2.6, 2.7 or >= 3.2 required.") @@ -206,6 +204,64 @@ def generate_cython(): raise RuntimeError("Running cythonize failed!") +def parse_setuppy_commands(): + """Check the commands and respond appropriately. Disable broken commands.""" + if len(sys.argv) < 2: + # User forgot to give an argument probably, let setuptools handle that. + return + + # TODO: 'alias' seems broken, but check after the rest works + # TODO: 'rotate' command - not sure if it works or is useful + # TODO: 'saveopts' and 'setopt' commands - not sure if they work or are useful + # TODO: 'bdist_*' commands + good_commands = ('--help-commands', 'egg_info', '--version', 'develop', + 'install', 'install_egg_info', 'sdist', 'build', + 'build_ext', 'build_clib', 'bdist_wheel', 'bdist_rpm', + 'bdist_wininst', 'bdist_msi', 'bdist_mpkg') + for command in good_commands: + if command in sys.argv[1:]: + return + + bad_commands = dict( + test=""" + `setup.py test` is not supported. Use one of the following + instead: + + - `python runtests.py` (to build and test) + - `python runtests.py --no-build` (to test installed numpy) + - `>>> numpy.test()` (run tests for installed numpy + from within an interpreter) + """, + upload=""" + `setup.py upload` is not supported, because it's insecure. + Instead, build what you want to upload and upload those files + with `twine upload -s ` instead. + """, + upload_docs="`setup.py upload_docs` is not supported", + easy_install="`setup.py easy_install` is not supported", + clean=""" + `setup.py clean` is not supported, use one of the following instead: + + - `git clean -xdf` (cleans all files) + - `git clean -Xdf` (cleans all versioned files, doesn't touch + files that aren't checked into the git repo) + """, + check="`setup.py check` is not supported", + register="`setup.py register` is not supported", + bdist_dumb="`setup.py bdist_dumb` is not supported", + + ) + for command in bad_commands.keys(): + if command in sys.argv[1:]: + import textwrap + print(textwrap.dedent(bad_commands[command]) + + "\nAdd `--force` to your command to use it anyway if you " + "must (unsupported).\n") + sys.exit(1) + + modify_commands = dict() + + def setup_package(): src_path = os.path.dirname(os.path.abspath(sys.argv[0])) old_path = os.getcwd() @@ -233,6 +289,12 @@ def setup_package(): FULLVERSION, GIT_REVISION = get_version_info() metadata['version'] = FULLVERSION + + from setuptools import setup + # Raise errors for unsupported commands, improve help output, etc. + if not "--force" in sys.argv: + parse_setuppy_commands() + # Run build if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or sys.argv[1] in ('--help-commands', 'egg_info', '--version', From 983eb78b906a7b9a07e8ee300359377e63014fec Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 29 Dec 2015 14:59:36 +0100 Subject: [PATCH 367/496] BUG: make distutils.misc_util.is_string recognize unicode. This was triggered by the numpy version string (which is unicde) being included in metadata. This could also solve other problems for external users of numpy.distutils. --- numpy/distutils/misc_util.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py index 345e60f26e47..79b6f25f3bf9 100644 --- a/numpy/distutils/misc_util.py +++ b/numpy/distutils/misc_util.py @@ -38,6 +38,7 @@ def clean_up_temporary_directory(): from sets import Set as set from numpy.distutils.compat import get_exception +from numpy.compat import basestring __all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', 'dict_append', 'appendpath', 'generate_config_py', @@ -429,7 +430,7 @@ def _get_f90_modules(source): return modules def is_string(s): - return isinstance(s, str) + return isinstance(s, basestring) def all_strings(lst): """Return True if all items in lst are string objects. """ From 99e99e90db4c73a6baae178879937f4baebd3241 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 29 Dec 2015 15:24:22 +0100 Subject: [PATCH 368/496] BLD: finish handling of setuptools commands. Also ignore setup.cfg: this file is created/modified by the alias/setopt/saveopts commands, and therefore needs to be in .gitignore. --- .gitignore | 1 + setup.py | 115 +++++++++++++++++++++++++++++++++++++++++------------ 2 files changed, 90 insertions(+), 26 deletions(-) diff --git a/.gitignore b/.gitignore index 1f04f254e709..72d31d74cd6c 100644 --- a/.gitignore +++ b/.gitignore @@ -105,6 +105,7 @@ numpy/core/include/numpy/__ufunc_api.h numpy/core/include/numpy/_numpyconfig.h numpy/version.py site.cfg +setup.cfg .tox numpy/core/include/numpy/__multiarray_api.c numpy/core/include/numpy/__ufunc_api.c diff --git a/setup.py b/setup.py index 6c6f0bbf183e..c0646597020d 100755 --- a/setup.py +++ b/setup.py @@ -20,6 +20,7 @@ import os import sys import subprocess +import textwrap if sys.version_info[:2] < (2, 6) or (3, 0) <= sys.version_info[0:2] < (3, 2): @@ -205,23 +206,74 @@ def generate_cython(): def parse_setuppy_commands(): - """Check the commands and respond appropriately. Disable broken commands.""" + """Check the commands and respond appropriately. Disable broken commands. + + Return a boolean value for whether or not to run the build or not (avoid + parsing Cython and template files if False). + """ if len(sys.argv) < 2: # User forgot to give an argument probably, let setuptools handle that. - return + return True + + info_commands = ['--help-commands', '--name', '--version', '-V', + '--fullname', '--author', '--author-email', + '--maintainer', '--maintainer-email', '--contact', + '--contact-email', '--url', '--license', '--description', + '--long-description', '--platforms', '--classifiers', + '--keywords', '--provides', '--requires', '--obsoletes'] + # Add commands that do more than print info, but also don't need Cython and + # template parsing. + info_commands.extend(['egg_info', 'install_egg_info', 'rotate']) + + for command in info_commands: + if command in sys.argv[1:]: + return False - # TODO: 'alias' seems broken, but check after the rest works - # TODO: 'rotate' command - not sure if it works or is useful - # TODO: 'saveopts' and 'setopt' commands - not sure if they work or are useful - # TODO: 'bdist_*' commands - good_commands = ('--help-commands', 'egg_info', '--version', 'develop', - 'install', 'install_egg_info', 'sdist', 'build', - 'build_ext', 'build_clib', 'bdist_wheel', 'bdist_rpm', + # Note that 'alias', 'saveopts' and 'setopt' commands also seem to work + # fine as they are, but are usually used together with one of the commands + # below and not standalone. Hence they're not added to good_commands. + good_commands = ('develop', 'sdist', 'build', 'build_ext', 'build_py', + 'build_clib', 'buld_scripts', 'bdist_wheel', 'bdist_rpm', 'bdist_wininst', 'bdist_msi', 'bdist_mpkg') + for command in good_commands: if command in sys.argv[1:]: - return - + return True + + # The following commands are supported, but we need to show some more + # useful messages to the user + if 'install' in sys.argv[1:]: + print(textwrap.dedent(""" + Note: if you need reliable uninstall behavior, then install + with pip instead of using `setup.py install`: + + - `pip install .` (from a git repo or downloaded source + release) + - `pip install numpy` (last Numpy release on PyPi) + + """)) + return True + + if '--help' in sys.argv[1:] or '-h' in sys.argv[1]: + print(textwrap.dedent(""" + Numpy-specific help + ------------------- + + To install Numpy from here with reliable uninstall, we recommend + that you use `pip install .`. To install the latest Numpy release + from PyPi, use `pip install numpy`. + + For help with build/installation issues, please ask on the + numpy-discussion mailing list. If you are sure that you have run + into a bug, please report it at https://github.com/numpy/numpy/issues. + + Setuptools commands help + ------------------------ + """)) + return False + + # The following commands aren't supported. They can only be executed when + # the user explicitly adds a --force command-line argument. bad_commands = dict( test=""" `setup.py test` is not supported. Use one of the following @@ -249,17 +301,30 @@ def parse_setuppy_commands(): check="`setup.py check` is not supported", register="`setup.py register` is not supported", bdist_dumb="`setup.py bdist_dumb` is not supported", - + bdist="`setup.py bdist` is not supported", + build_sphinx=""" + `setup.py build_sphinx` is not supported, use the + Makefile under doc/""", + flake8="`setup.py flake8` is not supported, use flake8 standalone", ) + bad_commands['nosetests'] = bad_commands['test'] + for commands in ('upload_docs', 'easy_install', 'bdist', 'bdist_dumb', + 'register', 'check', 'install_data', 'install_headers', + 'install_lib', 'install_scripts', ): + bad_commands[command] = "`setup.py %s` is not supported" % command + for command in bad_commands.keys(): if command in sys.argv[1:]: - import textwrap print(textwrap.dedent(bad_commands[command]) + "\nAdd `--force` to your command to use it anyway if you " "must (unsupported).\n") sys.exit(1) - modify_commands = dict() + # If we got here, we didn't detect what setup.py command was given + import warnings + warnings.warn("Unrecognized setuptools command, proceeding with " + "generating Cython sources and expanding templates") + return True def setup_package(): @@ -287,20 +352,14 @@ def setup_package(): cmdclass={"sdist": sdist_checked}, ) - FULLVERSION, GIT_REVISION = get_version_info() - metadata['version'] = FULLVERSION + if "--force" in sys.argv: + run_build = True + else: + # Raise errors for unsupported commands, improve help output, etc. + run_build = parse_setuppy_commands() from setuptools import setup - # Raise errors for unsupported commands, improve help output, etc. - if not "--force" in sys.argv: - parse_setuppy_commands() - - # Run build - if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or - sys.argv[1] in ('--help-commands', 'egg_info', '--version', - 'clean')): - pass - else: + if run_build: from numpy.distutils.core import setup cwd = os.path.abspath(os.path.dirname(__file__)) if not os.path.exists(os.path.join(cwd, 'PKG-INFO')): @@ -308,6 +367,10 @@ def setup_package(): generate_cython() metadata['configuration'] = configuration + else: + # Version number is added to metadata inside configuration() if build + # is run. + metadata['version'] = get_version_info()[0] try: setup(**metadata) From 4b43d20fec0eda4a8f02e843b3e6e454bb243f49 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 29 Dec 2015 17:24:51 +0100 Subject: [PATCH 369/496] BUG: fix TravisCI test issues when using setuptools unconditionally. Also remove all mentions of setupegg.py from the documentation. --- .travis.yml | 1 + MANIFEST.in | 1 - doc/HOWTO_BUILD_DOCS.rst.txt | 4 +--- doc/Makefile | 2 +- doc/source/dev/development_environment.rst | 7 ++++--- doc/source/user/building.rst | 6 +++--- pavement.py | 4 ++-- tools/travis-test.sh | 1 + 8 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.travis.yml b/.travis.yml index e0887a82a3f3..1832e317c70f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -48,6 +48,7 @@ matrix: - python3-dbg - python3-dev - python3-nose + - python3-setuptools - python: 2.7 env: NPY_RELAXED_STRIDES_CHECKING=0 PYTHON_OO=1 - python: 3.5 diff --git a/MANIFEST.in b/MANIFEST.in index 3695dfe57cfa..4e5206b942b2 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -5,7 +5,6 @@ # include MANIFEST.in include *.txt -include setupegg.py include site.cfg.example include numpy/random/mtrand/generate_mtrand_c.py recursive-include numpy/random/mtrand *.pyx *.pxd diff --git a/doc/HOWTO_BUILD_DOCS.rst.txt b/doc/HOWTO_BUILD_DOCS.rst.txt index 8107aaa81012..dc0145855282 100644 --- a/doc/HOWTO_BUILD_DOCS.rst.txt +++ b/doc/HOWTO_BUILD_DOCS.rst.txt @@ -36,9 +36,7 @@ that the correct version is imported by >>> import numpy Note that you can eg. install Numpy to a temporary location and set -the PYTHONPATH environment variable appropriately. Also note that if -you have a system Numpy installed via Python eggs, you will also need -to use ``setupegg.py`` to install the temporary Numpy. +the PYTHONPATH environment variable appropriately. After Numpy is installed, write:: diff --git a/doc/Makefile b/doc/Makefile index 063ab0db83bc..52840be92571 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -82,7 +82,7 @@ real-dist: dist-build html html-scipyorg dist-build: rm -f ../dist/*.egg - cd .. && $(PYTHON) setupegg.py bdist_egg + cd .. && $(PYTHON) setup.py bdist_egg install -d $(subst :, ,$(INSTALL_PPH)) $(PYTHON) `which easy_install` --prefix=$(INSTALL_DIR) ../dist/*.egg diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index 0fb5a666d976..baf8972cdd0d 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -62,9 +62,10 @@ test and use your changes (in ``.py`` files), by simply restarting the interpreter. Note that another way to do an inplace build visible outside the repo base dir -is with ``python setup.py develop``. This doesn't work for NumPy, because -NumPy builds don't use ``setuptools`` by default. ``python setupegg.py -develop`` will work though. +is with ``python setup.py develop``. The difference is that this instead of +adjusting ``PYTHONPATH``, this installs a ``.egg-link`` file into your +site-packages as well as adjusts ``easy-install.pth`` there, so its a more +permanent (and magical) operation. Other build options diff --git a/doc/source/user/building.rst b/doc/source/user/building.rst index 8acb2fa3b319..d6ca622f9139 100644 --- a/doc/source/user/building.rst +++ b/doc/source/user/building.rst @@ -56,9 +56,9 @@ To perform an in-place build that can be run from the source folder run:: python setup.py build_ext --inplace -The NumPy build system uses ``distutils`` and ``numpy.distutils``. -``setuptools`` is only used when building via ``pip`` or with ``python -setupegg.py``. Using ``virtualenv`` should work as expected. +The NumPy build system uses ``setuptools`` (from numpy 1.11.0, before that it +was plain ``distutils``) and ``numpy.distutils``. +Using ``virtualenv`` should work as expected. *Note: for build instructions to do development work on NumPy itself, see* :ref:`development-environment`. diff --git a/pavement.py b/pavement.py index ef6c6af52574..45a6943fe4de 100644 --- a/pavement.py +++ b/pavement.py @@ -18,7 +18,7 @@ paver bootstrap && source bootstrap/bin/activate # Installing numpy is necessary to build the correct documentation (because # of autodoc) - python setupegg.py install + python setup.py install paver dmg Building a simple (no-superpack) windows installer from wine @@ -440,7 +440,7 @@ def _build_mpkg(pyver): ldflags = "-undefined dynamic_lookup -bundle -arch i386 -arch ppc -Wl,-search_paths_first" ldflags += " -L%s" % os.path.join(os.path.dirname(__file__), "build") - sh("LDFLAGS='%s' %s setupegg.py bdist_mpkg" % (ldflags, " ".join(MPKG_PYTHON[pyver]))) + sh("LDFLAGS='%s' %s setup.py bdist_mpkg" % (ldflags, " ".join(MPKG_PYTHON[pyver]))) @task def simple_dmg(): diff --git a/tools/travis-test.sh b/tools/travis-test.sh index d105c15c71ea..3de1ca78de2b 100755 --- a/tools/travis-test.sh +++ b/tools/travis-test.sh @@ -132,6 +132,7 @@ run_test() export PYTHON export PIP +$PIP install setuptools if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then # Build wheel $PIP install wheel From 4aa9d578b85889530baec8a79a5570435eb0d5e1 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 29 Dec 2015 20:47:51 +0100 Subject: [PATCH 370/496] DOC: add Cython to list of dependencies in INSTALL.txt. Closes gh-6682. --- INSTALL.txt | 4 +++- doc/source/user/building.rst | 6 ++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/INSTALL.txt b/INSTALL.txt index 6339cbb8746d..863acbd53bf3 100644 --- a/INSTALL.txt +++ b/INSTALL.txt @@ -34,7 +34,9 @@ Building NumPy requires the following software installed: Python must also be compiled with the zlib module enabled. -2) nose__ (optional) 1.0 or later +2) Cython >= 0.19 (for development versions of numpy, not for released + versions) +3) nose__ (optional) 1.0 or later This is required for testing numpy, but not for using it. diff --git a/doc/source/user/building.rst b/doc/source/user/building.rst index d6ca622f9139..6d5f8c1b36c0 100644 --- a/doc/source/user/building.rst +++ b/doc/source/user/building.rst @@ -45,6 +45,12 @@ Building NumPy requires the following software installed: can be used, including optimized LAPACK libraries such as ATLAS, MKL or the Accelerate/vecLib framework on OS X. +4) Cython + + To build development versions of Numpy, you'll need a recent version of + Cython. Released Numpy sources on PyPi include the C files generated from + Cython code, so for released versions having Cython installed isn't needed. + Basic Installation ------------------ From 08e75cad2b736cbed625ab3a831bb6f4887ca52e Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 29 Dec 2015 21:50:09 +0100 Subject: [PATCH 371/496] TST: add test to check for correct version string format. Implements idea suggested in gh-6431. --- numpy/tests/test_numpy_version.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 numpy/tests/test_numpy_version.py diff --git a/numpy/tests/test_numpy_version.py b/numpy/tests/test_numpy_version.py new file mode 100644 index 000000000000..68d1508cea1e --- /dev/null +++ b/numpy/tests/test_numpy_version.py @@ -0,0 +1,23 @@ +from __future__ import division, absolute_import, print_function + +import re + +import numpy as np +from numpy.testing import assert_, run_module_suite + + +def test_valid_numpy_version(): + # Verify that the numpy version is a valid one (no .post suffix or other + # nonsense). See gh-6431 for an issue caused by an invalid version. + version_pattern = "^[0-9]+\.[0-9]+\.[0-9]+(|a[0-9]|b[0-9]|rc[0-9])" + dev_suffix = "+(\.dev0\+([0-9a-f]{7}|Unknown))" + if np.version.release: + res = re.match(version_pattern + "?$", np.__version__) + else: + res = re.match(version_pattern + dev_suffix + "?$", np.__version__) + + assert_(res is not None, np.__version__) + + +if __name__ == "__main__": + run_module_suite() From 105a498fcc987f9592b7a8a632f50544fe23ff31 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 29 Dec 2015 21:58:36 +0100 Subject: [PATCH 372/496] DOC: add NumpyVersion to the docs, and mention it in version.py --- doc/source/reference/routines.other.rst | 7 +++++++ setup.py | 2 ++ 2 files changed, 9 insertions(+) diff --git a/doc/source/reference/routines.other.rst b/doc/source/reference/routines.other.rst index a3a1f8a06f6d..b7a924eba78e 100644 --- a/doc/source/reference/routines.other.rst +++ b/doc/source/reference/routines.other.rst @@ -31,3 +31,10 @@ Memory ranges shares_memory may_share_memory + +Numpy version comparison +------------------------ +.. autosummary:: + :toctree: generated/ + + lib.NumpyVersion diff --git a/setup.py b/setup.py index c0646597020d..ff8f96247c4a 100755 --- a/setup.py +++ b/setup.py @@ -126,6 +126,8 @@ def get_version_info(): def write_version_py(filename='numpy/version.py'): cnt = """ # THIS FILE IS GENERATED FROM NUMPY SETUP.PY +# +# To compare versions robustly, use `numpy.lib.NumpyVersion` short_version = '%(version)s' version = '%(version)s' full_version = '%(full_version)s' From a003d8da113365065bb8e84e6980e7e16693f6e5 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 16 Jan 2016 11:25:52 +0100 Subject: [PATCH 373/496] TST: fix issues with test for correctness of numpy version string. Addresses comments of @pv on gh-6895. --- numpy/tests/test_numpy_version.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/tests/test_numpy_version.py b/numpy/tests/test_numpy_version.py index 68d1508cea1e..b61d0d5f193e 100644 --- a/numpy/tests/test_numpy_version.py +++ b/numpy/tests/test_numpy_version.py @@ -9,12 +9,12 @@ def test_valid_numpy_version(): # Verify that the numpy version is a valid one (no .post suffix or other # nonsense). See gh-6431 for an issue caused by an invalid version. - version_pattern = "^[0-9]+\.[0-9]+\.[0-9]+(|a[0-9]|b[0-9]|rc[0-9])" - dev_suffix = "+(\.dev0\+([0-9a-f]{7}|Unknown))" + version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(|a[0-9]|b[0-9]|rc[0-9])" + dev_suffix = r"(\.dev0\+([0-9a-f]{7}|Unknown))" if np.version.release: - res = re.match(version_pattern + "?$", np.__version__) + res = re.match(version_pattern, np.__version__) else: - res = re.match(version_pattern + dev_suffix + "?$", np.__version__) + res = re.match(version_pattern + dev_suffix, np.__version__) assert_(res is not None, np.__version__) From 036e41efe92222e2be68db1c9c0740c7ef9b8ef4 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 16 Jan 2016 12:00:32 +0100 Subject: [PATCH 374/496] DOC: update INSTALL.txt. --- INSTALL.txt | 159 ++++++++++++++++++++++------------------------------ 1 file changed, 68 insertions(+), 91 deletions(-) diff --git a/INSTALL.txt b/INSTALL.txt index 863acbd53bf3..915f4ed5e0bb 100644 --- a/INSTALL.txt +++ b/INSTALL.txt @@ -10,7 +10,7 @@ Building and installing NumPy **IMPORTANT**: the below notes are about building Numpy, which for most users is *not* the recommended way to install Numpy. Instead, use either a complete -scientific Python distribution or a binary installer - see +scientific Python distribution (recommended) or a binary installer - see http://scipy.org/install.html. @@ -43,128 +43,105 @@ Building NumPy requires the following software installed: Python__ http://www.python.org nose__ http://somethingaboutorange.com/mrl/projects/nose/ + +.. note:: + + If you want to build Numpy in order to work on Numpy itself, use + ``runtests.py``. For more details, see + http://docs.scipy.org/doc/numpy-dev/dev/development_environment.html + +.. note:: + + More extensive information on building Numpy (and Scipy) is maintained at + http://scipy.org/scipylib/building/index.html + + Basic Installation ================== -To install numpy run: +To install numpy run:: python setup.py build -j 4 install --prefix $HOME/.local This will compile numpy on 4 CPUs and install it into the specified prefix. -To perform an inplace build that can be run from the source folder run: +To perform an inplace build that can be run from the source folder run:: - python setup.py build_ext --inplace -j 4 + python setup.py build_ext --inplace -j 4 The number of build jobs can also be specified via the environment variable NPY_NUM_BUILD_JOBS. -Fortran ABI mismatch -==================== -The two most popular open source fortran compilers are g77 and gfortran. -Unfortunately, they are not ABI compatible, which means that concretely you -should avoid mixing libraries built with one with another. In particular, -if your blas/lapack/atlas is built with g77, you *must* use g77 when -building numpy and scipy; on the contrary, if your atlas is built with -gfortran, you *must* build numpy/scipy with gfortran. +Choosing compilers +================== -Choosing the fortran compiler ------------------------------ +On OS X and Linux, all common compilers will work. Note that for Fortran, +``gfortran`` is strongly preferred over ``g77``, but if you happen to have both +installed then ``g77`` will be detected and used first. To explicitly select +``gfortran`` in that case, do:: -To build with g77: + python setup.py build --fcompiler=gnu95 - python setup.py build --fcompiler=gnu +Windows +------- -To build with gfortran: +On Windows, building from source can be difficult. Currently the most robust +option is to use the Intel compilers, or alternatively MSVC (the same version +as used to build Python itself) with Intel ifort. Intel itself maintains a +good `application note `_ +on this. - python setup.py build --fcompiler=gnu95 +If you want to use a free compiler toolchain, the recommended compiler is MingwPy__. +The older MinGW32 compiler set used to produce older .exe installers for Numpy +itself is still available at https://github.com/numpy/numpy-vendor, but not +recommended for use anymore. -How to check the ABI of blas/lapack/atlas ------------------------------------------ +MingwPy__ http://mingwpy.github.io -One relatively simple and reliable way to check for the compiler used to -build a library is to use ldd on the library. If libg2c.so is a dependency, -this means that g77 has been used. If libgfortran.so is a dependency, -gfortran has been used. If both are dependencies, this means both have been -used, which is almost always a very bad idea. Building with optimized BLAS support ==================================== -Ubuntu/Debian -------------- - -In order to build with optimized a BLAS providing development package must be installed. -Options are for example: +Configuring which BLAS/LAPACK is used if you have multiple libraries installed, +or you have only one installed but in a non-standard location, is done via a +``site.cfg`` file. See the ``site.cfg.example`` shipped with Numpy for more +details. - - libblas-dev - reference BLAS not very optimized - - libatlas-base-dev - generic tuned ATLAS, it is recommended to tune it to the available hardware, - see /usr/share/doc/libatlas3-base/README.Debian for instructions - - libopenblas-base - fast and runtime detected so no tuning required but as of version 2.11 still - suffers from correctness issues on some CPUs, test your applications - thoughly. +Windows +------- -The actual implementation can be exchanged also after installation via the -alternatives mechanism: +The Intel compilers work with Intel MKL, see the application note linked above. +MingwPy__ works with OpenBLAS. +For an overview of the state of BLAS/LAPACK libraries on Windows, see +`here `_. - update-alternatives --config libblas.so.3 - update-alternatives --config liblapack.so.3 +OS X +---- -Or by preloading a specific BLAS library with - LD_PRELOAD=/usr/lib/atlas-base/atlas/libblas.so.3 python ... +OS X ships the Accelerate framework, which Numpy can build against without any +manual configuration. Other BLAS/LAPACK implementations (OpenBLAS, Intel MKL, +ATLAS) will also work. +Ubuntu/Debian +------------- -Windows 32 bits notes -===================== - -The MinGW compilers used to build the official Numpy binary installers for -32-bit Python on Windows can be found in https://github.com/numpy/numpy-vendor. -That repo also contains pre-built ATLAS binarues. The command to build and -install Numpy is: - - $ python setup.py config --compiler=mingw32 build --compiler=mingw32 install - -Typically, one needs to use a site.cfg file that looks like: - - [atlas] - library_dirs = C:\local\lib\atlas - include_dirs = C:\local\lib\atlas - -Windows 64 bits notes -===================== - -Note: only AMD64 is supported (IA64 is not) - AMD64 is the version most -people want. - -Free compilers (mingw-w64) --------------------------- - -http://mingw-w64.sourceforge.net/ - -To use the free compilers (mingw-w64), you need to build your own -toolchain, as the mingw project only distribute cross-compilers -(cross-compilation is not supported by numpy). Since this toolchain is -still being worked on, serious compiler bugs can be expected. binutil 2.19 -+ gcc 4.3.3 + mingw-w64 runtime gives you a working C compiler (but the C++ -is broken). gcc 4.4 will hopefully be able to run natively. +In order to build with optimized a BLAS providing development package must be installed. +Options are for example: -This is the only tested way to get a numpy with a FULL blas/lapack (scipy -does not work because of C++). + - ``libblas-dev``: reference BLAS (not very optimized) + - ``libatlas-base-dev``: generic tuned ATLAS, it is recommended to tune it to + the available hardware, see /usr/share/doc/libatlas3-base/README.Debian for + instructions + - ``libopenblas-base``: fast and runtime detected so no tuning required but a + very recent version is needed (>=0.2.15 is recommended). Older versions of + OpenBLAS suffered from correctness issues on some CPUs. -MS compilers ------------- +The actual implementation can be exchanged also after installation via the +alternatives mechanism:: -If you are familiar with MS tools, that's obviously the easiest path, and -the compilers are hopefully more mature (although in my experience, they -are quite fragile, and often segfault on invalid C code). The main drawback -is that mingw-w64 gfortran + MSVC does not work at all (it is unclear -whether it ever will). MSVC + ifort + MKL does work. + update-alternatives --config libblas.so.3 + update-alternatives --config liblapack.so.3 -For python 2.6, you need VS 2008. The freely available version does not -contains 64 bits compilers (you also need the PSDK, v6.1). +Or by preloading a specific BLAS library with:: -It is crucial to use the right MS compiler version. For python 2.6, you -must use version 15. You can check the compiler version with cl.exe /?. + LD_PRELOAD=/usr/lib/atlas-base/atlas/libblas.so.3 python ... From 88ffedf439b25c567503e74d9f67935a6af55aff Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 16 Jan 2016 12:12:38 +0100 Subject: [PATCH 375/496] DOC: some tweaks to the install and build info in the user guide. --- doc/source/user/building.rst | 4 ---- doc/source/user/install.rst | 5 ++--- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/doc/source/user/building.rst b/doc/source/user/building.rst index 6d5f8c1b36c0..78dbc9fa27ac 100644 --- a/doc/source/user/building.rst +++ b/doc/source/user/building.rst @@ -101,10 +101,6 @@ where different FORTRAN compilers might have been used. Choosing the fortran compiler ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To build with g77:: - - python setup.py build --fcompiler=gnu - To build with gfortran:: python setup.py build --fcompiler=gnu95 diff --git a/doc/source/user/install.rst b/doc/source/user/install.rst index ebb6bce623fa..a9ac735b8eb6 100644 --- a/doc/source/user/install.rst +++ b/doc/source/user/install.rst @@ -3,9 +3,8 @@ Installing NumPy **************** In most use cases the best way to install NumPy on your system is by using an -pre-built package for your operating system. - -Please see http://scipy.org/install.html for links to available options. +pre-built package for your operating system. Please see +http://scipy.org/install.html for links to available options. For instructions on building for source package, see :doc:`building`. This information is useful mainly for advanced users. From 1316a8a17fd83daeb39f5245b4df4ef8f3e7f012 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 16 Jan 2016 12:17:35 +0100 Subject: [PATCH 376/496] DOC: some more cleanup in INSTALL.txt, and rename to INSTALL.rst.txt --- INSTALL.txt => INSTALL.rst.txt | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) rename INSTALL.txt => INSTALL.rst.txt (93%) diff --git a/INSTALL.txt b/INSTALL.rst.txt similarity index 93% rename from INSTALL.txt rename to INSTALL.rst.txt index 915f4ed5e0bb..41f23b8d02d7 100644 --- a/INSTALL.txt +++ b/INSTALL.rst.txt @@ -1,13 +1,6 @@ -.. -*- rest -*- -.. vim:syntax=rest -.. NB! Keep this document a valid restructured document. - Building and installing NumPy +++++++++++++++++++++++++++++ -:Authors: Numpy Developers -:Discussions to: numpy-discussion@scipy.org - **IMPORTANT**: the below notes are about building Numpy, which for most users is *not* the recommended way to install Numpy. Instead, use either a complete scientific Python distribution (recommended) or a binary installer - see @@ -16,7 +9,7 @@ http://scipy.org/install.html. .. Contents:: -PREREQUISITES +Prerequisites ============= Building NumPy requires the following software installed: @@ -145,3 +138,12 @@ alternatives mechanism:: Or by preloading a specific BLAS library with:: LD_PRELOAD=/usr/lib/atlas-base/atlas/libblas.so.3 python ... + + +Build issues +============ + +If you run into build issues and need help, the Numpy +`mailing list `_ is the best +place to ask. If the issue is clearly a bug in Numpy, please file an issue (or +even better, a pull request) at https://github.com/numpy/numpy. From b27e9b12c9d94492a273162704ace3a23351db25 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 16 Jan 2016 12:53:33 +0100 Subject: [PATCH 377/496] BLD: fix rebuilding after a failed build. Closes gh-5467. Also remove bench(), does't do anything here after the move to asv. --- numpy/distutils/__init__.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/distutils/__init__.py b/numpy/distutils/__init__.py index 766439d92546..602a3d1170d1 100644 --- a/numpy/distutils/__init__.py +++ b/numpy/distutils/__init__.py @@ -11,13 +11,13 @@ from .info import __doc__ from .npy_pkg_config import * +# If numpy is installed, add distutils.test() try: from . import __config__ - _INSTALLED = True -except ImportError: - _INSTALLED = False - -if _INSTALLED: + # Normally numpy is installed if the above import works, but an interrupted + # in-place build could also have left a __config__.py. In that case the + # next import may still fail, so keep it inside the try block. from numpy.testing.nosetester import _numpy_tester test = _numpy_tester().test - bench = _numpy_tester().bench +except ImportError: + pass From ab5c6d01da88c05255427a8b6db72c66f67c849a Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 16 Jan 2016 15:21:23 +0100 Subject: [PATCH 378/496] DOC/BUG: textual improvements in install docs, and fix a typo in setup.py Address comments of @charris on gh-6895. --- INSTALL.rst.txt | 28 +++++++++++++--------- doc/source/dev/development_environment.rst | 7 +++--- setup.py | 4 ++-- 3 files changed, 22 insertions(+), 17 deletions(-) diff --git a/INSTALL.rst.txt b/INSTALL.rst.txt index 41f23b8d02d7..0b778d9174b0 100644 --- a/INSTALL.rst.txt +++ b/INSTALL.rst.txt @@ -68,6 +68,12 @@ NPY_NUM_BUILD_JOBS. Choosing compilers ================== +Numpy needs a C compiler, and for development versions also Cython. A Fortran +compiler isn't needed to build Numpy itself; the ``numpy.f2py`` tests will be +skipped when running the test suite if no Fortran compiler is available. For +building Scipy a Fortran compiler is needed though, so we include some details +on Fortran compilers in the rest of this section. + On OS X and Linux, all common compilers will work. Note that for Fortran, ``gfortran`` is strongly preferred over ``g77``, but if you happen to have both installed then ``g77`` will be detected and used first. To explicitly select @@ -118,19 +124,19 @@ ATLAS) will also work. Ubuntu/Debian ------------- -In order to build with optimized a BLAS providing development package must be installed. -Options are for example: +For best performance a development package providing BLAS and CBLAS should be +installed. Some of the options available are: - - ``libblas-dev``: reference BLAS (not very optimized) - - ``libatlas-base-dev``: generic tuned ATLAS, it is recommended to tune it to - the available hardware, see /usr/share/doc/libatlas3-base/README.Debian for - instructions - - ``libopenblas-base``: fast and runtime detected so no tuning required but a - very recent version is needed (>=0.2.15 is recommended). Older versions of - OpenBLAS suffered from correctness issues on some CPUs. +- ``libblas-dev``: reference BLAS (not very optimized) +- ``libatlas-base-dev``: generic tuned ATLAS, it is recommended to tune it to + the available hardware, see /usr/share/doc/libatlas3-base/README.Debian for + instructions +- ``libopenblas-base``: fast and runtime detected so no tuning required but a + very recent version is needed (>=0.2.15 is recommended). Older versions of + OpenBLAS suffered from correctness issues on some CPUs. -The actual implementation can be exchanged also after installation via the -alternatives mechanism:: +The package linked to when numpy is loaded can be chosen after installation via +the alternatives mechanism:: update-alternatives --config libblas.so.3 update-alternatives --config liblapack.so.3 diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index baf8972cdd0d..f3f24aab7a92 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -62,10 +62,9 @@ test and use your changes (in ``.py`` files), by simply restarting the interpreter. Note that another way to do an inplace build visible outside the repo base dir -is with ``python setup.py develop``. The difference is that this instead of -adjusting ``PYTHONPATH``, this installs a ``.egg-link`` file into your -site-packages as well as adjusts ``easy-install.pth`` there, so its a more -permanent (and magical) operation. +is with ``python setup.py develop``. Instead of adjusting ``PYTHONPATH``, this +installs a ``.egg-link`` file into your site-packages as well as adjusts the +``easy-install.pth`` there, so its a more permanent (and magical) operation. Other build options diff --git a/setup.py b/setup.py index ff8f96247c4a..ded914b11dc4 100755 --- a/setup.py +++ b/setup.py @@ -235,14 +235,14 @@ def parse_setuppy_commands(): # fine as they are, but are usually used together with one of the commands # below and not standalone. Hence they're not added to good_commands. good_commands = ('develop', 'sdist', 'build', 'build_ext', 'build_py', - 'build_clib', 'buld_scripts', 'bdist_wheel', 'bdist_rpm', + 'build_clib', 'build_scripts', 'bdist_wheel', 'bdist_rpm', 'bdist_wininst', 'bdist_msi', 'bdist_mpkg') for command in good_commands: if command in sys.argv[1:]: return True - # The following commands are supported, but we need to show some more + # The following commands are supported, but we need to show more # useful messages to the user if 'install' in sys.argv[1:]: print(textwrap.dedent(""" From 8b8f4648cbef9c64152d6f1f95703a400b156d83 Mon Sep 17 00:00:00 2001 From: Stuart Archibald Date: Tue, 17 Nov 2015 16:30:02 +0000 Subject: [PATCH 379/496] BUG: fix issues with signed zeros in scalar math complex division. The current algorithm used in scalar math complex division appears to incorrectly handle signed zeros. This patch duplicates the algorithm used for complex division in the loops.c.src file into the scalarmath.c.src file so the algorithms are consistent regardless of context. Unit tests are added in the scalar context for testing the correctness of sign when zeros are encountered and also to trip the new branches in the now consistent algorithm. --- numpy/core/src/umath/scalarmath.c.src | 30 +++++++++++---- numpy/core/tests/test_scalarmath.py | 53 +++++++++++++++++++++++++++ 2 files changed, 76 insertions(+), 7 deletions(-) diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src index c371a079f654..d7ce767dadad 100644 --- a/numpy/core/src/umath/scalarmath.c.src +++ b/numpy/core/src/umath/scalarmath.c.src @@ -316,16 +316,32 @@ static npy_half (*_basic_half_fmod)(npy_half, npy_half); (outp)->real = (a).real * (b).real - (a).imag * (b).imag; \ (outp)->imag = (a).real * (b).imag + (a).imag * (b).real; \ } while(0) -/* Note: complex division by zero must yield some complex inf */ +/* Algorithm identical to that in loops.c.src, for consistency */ #define @name@_ctype_divide(a, b, outp) do{ \ - @rtype@ d = (b).real*(b).real + (b).imag*(b).imag; \ - if (d != 0) { \ - (outp)->real = ((a).real*(b).real + (a).imag*(b).imag)/d; \ - (outp)->imag = ((a).imag*(b).real - (a).real*(b).imag)/d; \ + @rtype@ in1r = (a).real; \ + @rtype@ in1i = (a).imag; \ + @rtype@ in2r = (b).real; \ + @rtype@ in2i = (b).imag; \ + @rtype@ in2r_abs = npy_fabs@c@(in2r); \ + @rtype@ in2i_abs = npy_fabs@c@(in2i); \ + if (in2r_abs >= in2i_abs) { \ + if (in2r_abs == 0 && in2i_abs == 0) { \ + /* divide by zero should yield a complex inf or nan */ \ + (outp)->real = in1r/in2r_abs; \ + (outp)->imag = in1i/in2i_abs; \ + } \ + else { \ + @rtype@ rat = in2i/in2r; \ + @rtype@ scl = 1.0@c@/(in2r + in2i*rat); \ + (outp)->real = (in1r + in1i*rat)*scl; \ + (outp)->imag = (in1i - in1r*rat)*scl; \ + } \ } \ else { \ - (outp)->real = (a).real/d; \ - (outp)->imag = (a).imag/d; \ + @rtype@ rat = in2r/in2i; \ + @rtype@ scl = 1.0@c@/(in2i + in2r*rat); \ + (outp)->real = (in1r*rat + in1i)*scl; \ + (outp)->imag = (in1i*rat - in1r)*scl; \ } \ } while(0) #define @name@_ctype_true_divide @name@_ctype_divide diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py index 6dd9aa4550ad..5298b0387398 100644 --- a/numpy/core/tests/test_scalarmath.py +++ b/numpy/core/tests/test_scalarmath.py @@ -153,6 +153,59 @@ def test_zero_division(self): b = t(0.) assert_(np.isnan(b/a)) + def test_signed_zeros(self): + with np.errstate(all="ignore"): + for t in [np.complex64, np.complex128]: + # tupled (numerator, denominator, expected) + # for testing as expected == numerator/denominator + data = ( + (( 0.0,-1.0), ( 0.0, 1.0), (-1.0,-0.0)), + (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), + (( 0.0,-1.0), (-0.0,-1.0), ( 1.0, 0.0)), + (( 0.0,-1.0), (-0.0, 1.0), (-1.0, 0.0)), + (( 0.0, 1.0), ( 0.0,-1.0), (-1.0, 0.0)), + (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), + ((-0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), + ((-0.0, 1.0), ( 0.0,-1.0), (-1.0,-0.0)) + ) + for cases in data: + n = cases[0] + d = cases[1] + ex = cases[2] + result = t(complex(n[0], n[1])) / t(complex(d[0], d[1])) + # check real and imag parts separately to avoid comparison + # in array context, which does not account for signed zeros + assert_equal(result.real, ex[0]) + assert_equal(result.imag, ex[1]) + + def test_branches(self): + with np.errstate(all="ignore"): + for t in [np.complex64, np.complex128]: + # tupled (numerator, denominator, expected) + # for testing as expected == numerator/denominator + data = list() + + # trigger branch: real(fabs(denom)) > imag(fabs(denom)) + # followed by else condition as neither are == 0 + data.append((( 2.0, 1.0), ( 2.0, 1.0), (1.0, 0.0))) + + # trigger branch: real(fabs(denom)) > imag(fabs(denom)) + # followed by if condition as both are == 0 + # is performed in test_zero_division(), so this is skipped + + # trigger else if branch: real(fabs(denom)) < imag(fabs(denom)) + data.append((( 1.0, 2.0), ( 1.0, 2.0), (1.0, 0.0))) + + for cases in data: + n = cases[0] + d = cases[1] + ex = cases[2] + result = t(complex(n[0], n[1])) / t(complex(d[0], d[1])) + # check real and imag parts separately to avoid comparison + # in array context, which does not account for signed zeros + assert_equal(result.real, ex[0]) + assert_equal(result.imag, ex[1]) + class TestConversion(TestCase): def test_int_from_long(self): From 18aeffd93797df872539a9bd16efa4f67887f568 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 14 Jan 2016 19:33:29 -0700 Subject: [PATCH 380/496] TST: Add tests for divmod --- numpy/core/tests/test_multiarray.py | 41 +++++++++++++++++++++++++++-- numpy/core/tests/test_scalarmath.py | 41 ++++++++++++++++++++++++++++- 2 files changed, 79 insertions(+), 3 deletions(-) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index c9e610cbff12..f16222f120b0 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -143,7 +143,8 @@ def test_int_subclassing(self): numpy_int = np.int_(0) if sys.version_info[0] >= 3: - # On Py3k int_ should not inherit from int, because it's not fixed-width anymore + # On Py3k int_ should not inherit from int, because it's not + # fixed-width anymore assert_equal(isinstance(numpy_int, int), False) else: # Otherwise, it should inherit from int... @@ -175,7 +176,8 @@ def test_set_stridesattr(self): def make_array(size, offset, strides): try: - r = np.ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize) + r = np.ndarray([size], dtype=int, buffer=x, + offset=offset*x.itemsize) except: raise RuntimeError(getexception()) r.strides = strides = strides*x.itemsize @@ -2327,6 +2329,41 @@ def test_conjugate(self): assert_raises(AttributeError, lambda: a.conj()) assert_raises(AttributeError, lambda: a.conjugate()) + def test_divmod_basic(self): + dt = np.typecodes['AllInteger'] + np.typecodes['Float'] + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): + if sg1 == -1 and dt1 in np.typecodes['UnsignedInteger']: + continue + if sg2 == -1 and dt2 in np.typecodes['UnsignedInteger']: + continue + fmt = 'dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (dt1, dt2, sg1, sg2) + a = np.array(sg1*71, dtype=dt1) + b = np.array(sg2*19, dtype=dt2) + div, rem = divmod(a, b) + assert_allclose(div*b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + def test_divmod_roundoff(self): + # gh-6127 + dt = 'fdg' + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): + fmt = 'dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (dt1, dt2, sg1, sg2) + a = np.array(sg1*78*6e-8, dtype=dt1) + b = np.array(sg2*6e-8, dtype=dt2) + div, rem = divmod(a, b) + assert_allclose(div*b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + class TestBinop(object): def test_inplace(self): diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py index 6dd9aa4550ad..f54e3d76ad03 100644 --- a/numpy/core/tests/test_scalarmath.py +++ b/numpy/core/tests/test_scalarmath.py @@ -1,12 +1,13 @@ from __future__ import division, absolute_import, print_function import sys +import itertools import numpy as np from numpy.testing.utils import _gen_alignment_data from numpy.testing import ( TestCase, run_module_suite, assert_, assert_equal, assert_raises, - assert_almost_equal + assert_almost_equal, assert_allclose ) types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, @@ -135,6 +136,44 @@ def test_mixed_types(self): else: assert_almost_equal(result, 9, err_msg=msg) + +class TestDivmod(TestCase): + def test_divmod_basic(self): + dt = np.typecodes['AllInteger'] + np.typecodes['Float'] + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): + if sg1 == -1 and dt1 in np.typecodes['UnsignedInteger']: + continue + if sg2 == -1 and dt2 in np.typecodes['UnsignedInteger']: + continue + fmt = 'dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (dt1, dt2, sg1, sg2) + a = np.array(sg1*71, dtype=dt1)[()] + b = np.array(sg2*19, dtype=dt2)[()] + div, rem = divmod(a, b) + assert_allclose(div*b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + def test_divmod_roundoff(self): + # gh-6127 + dt = 'fdg' + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): + fmt = 'dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (dt1, dt2, sg1, sg2) + a = np.array(sg1*78*6e-8, dtype=dt1)[()] + b = np.array(sg2*6e-8, dtype=dt2)[()] + div, rem = divmod(a, b) + assert_allclose(div*b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + class TestComplexDivision(TestCase): def test_zero_division(self): with np.errstate(all="ignore"): From 4a000fb061e89b289c0af145332589e0491143c9 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 16 Jan 2016 10:31:46 -0700 Subject: [PATCH 381/496] REL: Do not include merge commits in Changelog. Cleans up the Changelog. [ci skip] --- pavement.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pavement.py b/pavement.py index 45a6943fe4de..ace0a5c022fc 100644 --- a/pavement.py +++ b/pavement.py @@ -617,8 +617,9 @@ def write_release_task(options, filename='NOTES.txt'): def write_log_task(options, filename='Changelog'): st = subprocess.Popen( - ['git', 'log', '%s..%s' % (LOG_START, LOG_END)], - stdout=subprocess.PIPE) + ['git', 'log', '--no-merges', '--use-mailmap', + '%s..%s' % (LOG_START, LOG_END)], + stdout=subprocess.PIPE) out = st.communicate()[0] a = open(filename, 'w') From 4c504072b9914c0bf16ba70e41c2afbf4807f0e6 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Sat, 16 Jan 2016 13:50:50 -0500 Subject: [PATCH 382/496] BENCH: Reorganize existing benchmarks by the order they show up when run in the benchmarking suite. --- benchmarks/benchmarks/bench_linalg.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py index 3d26b800c646..6cccf74f8218 100644 --- a/benchmarks/benchmarks/bench_linalg.py +++ b/benchmarks/benchmarks/bench_linalg.py @@ -18,12 +18,12 @@ def setup(self): self.a3 = np.arange(480000.).reshape(60, 80, 100) self.b3 = np.arange(192000.).reshape(80, 60, 40) - def time_einsum_ij_jk_a_b(self): - np.einsum('ij,jk', self.a, self.b) - def time_dot_a_b(self): np.dot(self.a, self.b) + def time_dot_d_dot_b_c(self): + np.dot(self.d, np.dot(self.b, self.c)) + def time_dot_trans_a_at(self): np.dot(self.a, self.at) @@ -36,21 +36,21 @@ def time_dot_trans_at_a(self): def time_dot_trans_atc_a(self): np.dot(self.atc, self.a) - def time_inner_trans_a_a(self): - np.inner(self.a, self.a) - - def time_inner_trans_a_ac(self): - np.inner(self.a, self.ac) - def time_einsum_i_ij_j(self): np.einsum('i,ij,j', self.d, self.b, self.c) - def time_dot_d_dot_b_c(self): - np.dot(self.d, np.dot(self.b, self.c)) + def time_einsum_ij_jk_a_b(self): + np.einsum('ij,jk', self.a, self.b) def time_einsum_ijk_jil_kl(self): np.einsum('ijk,jil->kl', self.a3, self.b3) + def time_inner_trans_a_a(self): + np.inner(self.a, self.a) + + def time_inner_trans_a_ac(self): + np.inner(self.a, self.ac) + def time_tensordot_a_b_axes_1_0_0_1(self): np.tensordot(self.a3, self.b3, axes=([1, 0], [0, 1])) From e5b108c8f3fe5d60decb6a43b57c994909c8d3a8 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Sat, 16 Jan 2016 13:53:20 -0500 Subject: [PATCH 383/496] BENCH: Add some benchmarks for `matmul`. --- benchmarks/benchmarks/bench_linalg.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py index 6cccf74f8218..a65d510be276 100644 --- a/benchmarks/benchmarks/bench_linalg.py +++ b/benchmarks/benchmarks/bench_linalg.py @@ -51,6 +51,24 @@ def time_inner_trans_a_a(self): def time_inner_trans_a_ac(self): np.inner(self.a, self.ac) + def time_matmul_a_b(self): + np.matmul(self.a, self.b) + + def time_matmul_d_matmul_b_c(self): + np.matmul(self.d, np.matmul(self.b, self.c)) + + def time_matmul_trans_a_at(self): + np.matmul(self.a, self.at) + + def time_matmul_trans_a_atc(self): + np.matmul(self.a, self.atc) + + def time_matmul_trans_at_a(self): + np.matmul(self.at, self.a) + + def time_matmul_trans_atc_a(self): + np.matmul(self.atc, self.a) + def time_tensordot_a_b_axes_1_0_0_1(self): np.tensordot(self.a3, self.b3, axes=([1, 0], [0, 1])) From 363c5021f7428f19a56cd081a5475e9d2b5e884d Mon Sep 17 00:00:00 2001 From: Olivier Grisel Date: Sat, 16 Jan 2016 19:21:27 +0100 Subject: [PATCH 384/496] BLD: build travis dev wheels for py27 --- .travis.yml | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/.travis.yml b/.travis.yml index 1832e317c70f..3066cbbaa207 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,6 +20,16 @@ cache: directories: - $HOME/.cache/pip +env: + global: + - WHEELHOUSE_UPLOADER_USERNAME=travis.numpy + # The following is generated with the command: + # travis encrypt -r numpy/numpy WHEELHOUSE_UPLOADER_SECRET=tH3AP1KeY + - secure: "IEicLPrP2uW+jW51GRwkONQpdPqMVtQL5bdroqR/U8r9Tr\ + XrbCVRhp4AP8JYZT0ptoBpmZWWGjmKBndB68QlMiUjQPow\ + iFWt9Ka92CaqYdU7nqfWp9VImSndPmssjmCXJ1v1IjZPAM\ + ahp7Qnm0rWRmA0z9SomuRUQOJQ6s684vU=" + python: - 2.6 - 2.7 @@ -51,16 +61,10 @@ matrix: - python3-setuptools - python: 2.7 env: NPY_RELAXED_STRIDES_CHECKING=0 PYTHON_OO=1 + - python: 2.7 + env: USE_WHEEL=1 - python: 3.5 - env: - - USE_WHEEL=1 - - WHEELHOUSE_UPLOADER_USERNAME=travis.numpy - # The following is generated with the command: - # travis encrypt -r numpy/numpy WHEELHOUSE_UPLOADER_SECRET=tH3AP1KeY - - secure: "IEicLPrP2uW+jW51GRwkONQpdPqMVtQL5bdroqR/U8r9Tr\ - XrbCVRhp4AP8JYZT0ptoBpmZWWGjmKBndB68QlMiUjQPow\ - iFWt9Ka92CaqYdU7nqfWp9VImSndPmssjmCXJ1v1IjZPAM\ - ahp7Qnm0rWRmA0z9SomuRUQOJQ6s684vU=" + env: USE_WHEEL=1 - python: 2.7 env: - PYTHONOPTIMIZE=2 From 150497560047956f3c179a5ff10e9b2f5ec83508 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Sat, 16 Jan 2016 13:56:09 -0500 Subject: [PATCH 385/496] DOC: Update the release notes to state that the `A.T @ A` optimization has been extended to several NumPy operations. --- doc/release/1.11.0-notes.rst | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index 16af02440dd8..c4ff892305fc 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -149,11 +149,12 @@ useless computations when printing a masked array. The function now uses the fallocate system call to reserve sufficient diskspace on filesystems that support it. -``np.dot`` optimized for operations of the form ``A.T @ A`` and ``A @ A.T`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Optimizations for operations of the form ``A.T @ A`` and ``A @ A.T`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Previously, ``gemm`` BLAS operations were used for all matrix products. Now, if the matrix product is between a matrix and its transpose, it will use -``syrk`` BLAS operations for a performance boost. +``syrk`` BLAS operations for a performance boost. This optimization has been +extended to ``@``, ``numpy.dot``, ``numpy.inner``, and ``numpy.matmul``. **Note:** Requires the transposed and non-transposed matrices to share data. From 22af11f70da01306ec6034b1d4d4bcf68362c5ed Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Sat, 16 Jan 2016 17:27:04 -0500 Subject: [PATCH 386/496] DOC: Fix typo. --- doc/release/1.10.4-notes.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/release/1.10.4-notes.rst b/doc/release/1.10.4-notes.rst index 03eaf5e6b5d2..7de732a22495 100644 --- a/doc/release/1.10.4-notes.rst +++ b/doc/release/1.10.4-notes.rst @@ -25,7 +25,7 @@ Issues Fixed Merged PRs ========== -The following PRs have been merged into 1.10.3. When the PR is a backport, +The following PRs have been merged into 1.10.4. When the PR is a backport, the PR number for the original PR against master is listed. * gh-6840 TST: Update travis testing script in 1.10.x From ab6ba2cbc460c8a7c000815bee8595776ce4ec98 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Sat, 16 Jan 2016 17:42:22 -0500 Subject: [PATCH 387/496] STY: Wrap some long lines. --- numpy/core/src/multiarray/cblasfuncs.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/numpy/core/src/multiarray/cblasfuncs.c b/numpy/core/src/multiarray/cblasfuncs.c index b11505c0e56a..ef05c72057f4 100644 --- a/numpy/core/src/multiarray/cblasfuncs.c +++ b/numpy/core/src/multiarray/cblasfuncs.c @@ -133,7 +133,8 @@ syrk(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans, for (i = 0; i < n; i++) { for (j = i + 1; j < n; j++) { - *((npy_double*)PyArray_GETPTR2(R, j, i)) = *((npy_double*)PyArray_GETPTR2(R, i, j)); + *((npy_double*)PyArray_GETPTR2(R, j, i)) = + *((npy_double*)PyArray_GETPTR2(R, i, j)); } } break; @@ -143,7 +144,8 @@ syrk(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans, for (i = 0; i < n; i++) { for (j = i + 1; j < n; j++) { - *((npy_float*)PyArray_GETPTR2(R, j, i)) = *((npy_float*)PyArray_GETPTR2(R, i, j)); + *((npy_float*)PyArray_GETPTR2(R, j, i)) = + *((npy_float*)PyArray_GETPTR2(R, i, j)); } } break; @@ -153,7 +155,8 @@ syrk(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans, for (i = 0; i < n; i++) { for (j = i + 1; j < n; j++) { - *((npy_cdouble*)PyArray_GETPTR2(R, j, i)) = *((npy_cdouble*)PyArray_GETPTR2(R, i, j)); + *((npy_cdouble*)PyArray_GETPTR2(R, j, i)) = + *((npy_cdouble*)PyArray_GETPTR2(R, i, j)); } } break; @@ -163,7 +166,8 @@ syrk(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans, for (i = 0; i < n; i++) { for (j = i + 1; j < n; j++) { - *((npy_cfloat*)PyArray_GETPTR2(R, j, i)) = *((npy_cfloat*)PyArray_GETPTR2(R, i, j)); + *((npy_cfloat*)PyArray_GETPTR2(R, j, i)) = + *((npy_cfloat*)PyArray_GETPTR2(R, i, j)); } } break; @@ -319,8 +323,8 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2, */ if (nd == 1) { /* - * Either PyArray_NDIM(ap1) is 1 dim or PyArray_NDIM(ap2) is 1 dim - * and the other is 2-dim + * Either PyArray_NDIM(ap1) is 1 dim or PyArray_NDIM(ap2) is + * 1 dim and the other is 2 dim */ dimensions[0] = (PyArray_NDIM(oap1) == 2) ? PyArray_DIM(oap1, 0) : PyArray_DIM(oap2, 1); @@ -729,7 +733,8 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2, } } else { - gemm(typenum, Order, Trans1, Trans2, L, N, M, ap1, lda, ap2, ldb, ret); + gemm(typenum, Order, Trans1, Trans2, L, N, M, ap1, lda, ap2, ldb, + ret); } NPY_END_ALLOW_THREADS; } From 9def5a0019c1a4d42186e1523d7f39c796f937f0 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 17 Jan 2016 01:19:07 +0100 Subject: [PATCH 388/496] BLD: fix runtests.py, was broken by the move to setuptools. Issue was introduced by gh-6895. --- runtests.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/runtests.py b/runtests.py index 957cbef100bc..52905a8fc87c 100755 --- a/runtests.py +++ b/runtests.py @@ -135,8 +135,13 @@ def main(argv): if not args.no_build: site_dir = build_project(args) - sys.path.insert(0, site_dir) - os.environ['PYTHONPATH'] = site_dir + for dirname in os.listdir(site_dir): + if dirname.startswith('numpy'): + # The .pth file isn't re-parsed, so need to put the numpy egg + # produced by easy-install on the path manually. + egg_dir = os.path.join(site_dir, dirname) + sys.path.insert(0, egg_dir) + os.environ['PYTHONPATH'] = egg_dir extra_argv = args.args[:] if extra_argv and extra_argv[0] == '--': @@ -346,6 +351,14 @@ def build_project(args): cmd += ["-j", str(args.parallel)] cmd += ['install', '--prefix=' + dst_dir] + from distutils.sysconfig import get_python_lib + site_dir = get_python_lib(prefix=dst_dir, plat_specific=True) + # easy_install won't install to a path that Python by default cannot see + # and isn't on the PYTHONPATH. Plus, it has to exist. + if not os.path.exists(site_dir): + os.makedirs(site_dir) + env['PYTHONPATH'] = site_dir + log_filename = os.path.join(ROOT_DIR, 'build.log') if args.show_build_log: @@ -383,9 +396,6 @@ def build_project(args): print("Build failed!") sys.exit(1) - from distutils.sysconfig import get_python_lib - site_dir = get_python_lib(prefix=dst_dir, plat_specific=True) - return site_dir @@ -421,8 +431,8 @@ def lcov_generate(): '--output-file', LCOV_OUTPUT_FILE]) print("Generating lcov HTML output...") - ret = subprocess.call(['genhtml', '-q', LCOV_OUTPUT_FILE, - '--output-directory', LCOV_HTML_DIR, + ret = subprocess.call(['genhtml', '-q', LCOV_OUTPUT_FILE, + '--output-directory', LCOV_HTML_DIR, '--legend', '--highlight']) if ret != 0: print("genhtml failed!") From c4c4178c7c9e34c9a86992c649469d52447f28b9 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 17 Jan 2016 01:32:37 +0100 Subject: [PATCH 389/496] DOC: add build system changes to 1.11.0 release notes. [ci skip] --- doc/release/1.11.0-notes.rst | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index c4ff892305fc..0305688d8b0e 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -8,9 +8,14 @@ Highlights ========== -Dropped Support -=============== - +Build System Changes +==================== + +* Numpy now uses ``setuptools`` for its builds instead of plain distutils. + This fixes usage of ``install_requires='numpy'`` in the ``setup.py`` files of + projects that depend on Numpy (see gh-6551). It potentially affects the way + that build/install methods for Numpy itself behave though. Please report any + unexpected behavior on the Numpy issue tracker. * Bento build support and related files have been removed. * Single file build support and related files have been removed. From da98bbc030c272edb1a8548a458b3957e29ce346 Mon Sep 17 00:00:00 2001 From: Stephan Hoyer Date: Sat, 16 Jan 2016 16:35:28 -0800 Subject: [PATCH 390/496] DOC: document changes to assert_warns --- doc/release/1.11.0-notes.rst | 4 ++++ numpy/testing/utils.py | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index ac3c1578c18f..8ac8e963f7b9 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -186,6 +186,10 @@ if the matrix product is between a matrix and its transpose, it will use **Note:** Requires the transposed and non-transposed matrices to share data. +*np.testing.assert_warns* can now be used as a context manager +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This matches the behavior of ``assert_raises``. + Changes ======= Pyrex support was removed from ``numpy.distutils``. The method diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py index 72105ca31a2f..10a48ad79f49 100644 --- a/numpy/testing/utils.py +++ b/numpy/testing/utils.py @@ -1736,6 +1736,8 @@ def assert_warns(warning_class, *args, **kwargs): with assert_warns(SomeWarning): do_something() + The ability to be used as a context manager is new in NumPy v1.11.0. + .. versionadded:: 1.4.0 Parameters @@ -1783,6 +1785,8 @@ def assert_no_warnings(*args, **kwargs): with assert_no_warnings(): do_something() + The ability to be used as a context manager is new in NumPy v1.11.0. + .. versionadded:: 1.7.0 Parameters From e0cb3f79936656d6d2f48cbad46a3a9f2bad5ae1 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 16 Jan 2016 19:00:43 -0700 Subject: [PATCH 391/496] Revert "Merge pull request #7001 from shoyer/NaT-comparison" This reverts commit 7141f40b58ed1e7071cde78ab7bc8ab37e9c5983, reversing changes made to 8fa6e3bef26a6d4a2c92f2824129aa4409be2590. The original broke some pandas tests. The current plan to get this in is * reversion * issue FutureWarning in 1.11 and 1.12 * make the change in 1.13. --- numpy/core/arrayprint.py | 6 +-- numpy/core/src/multiarray/scalartypes.c.src | 2 +- numpy/core/src/umath/loops.c.src | 26 ++--------- numpy/core/tests/test_datetime.py | 36 +++------------ numpy/ma/tests/test_extras.py | 2 +- numpy/ma/testutils.py | 5 ++- numpy/testing/tests/test_utils.py | 33 +------------- numpy/testing/utils.py | 49 ++------------------- 8 files changed, 24 insertions(+), 135 deletions(-) diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index c5b5b5a8f0ea..fefcb649393d 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -739,8 +739,8 @@ def __call__(self, x): class TimedeltaFormat(object): def __init__(self, data): if data.dtype.kind == 'm': - # select non-NaT elements - v = data[data == data].view('i8') + nat_value = array(['NaT'], dtype=data.dtype)[0] + v = data[not_equal(data, nat_value)].view('i8') if len(v) > 0: # Max str length of non-NaT elements max_str_len = max(len(str(maximum.reduce(v))), @@ -754,7 +754,7 @@ def __init__(self, data): self._nat = "'NaT'".rjust(max_str_len) def __call__(self, x): - if x != x: + if x + 1 == x: return self._nat else: return self.format % x.astype('i8') diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index 7c73822dd2f4..1bd5b22d2124 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -1673,7 +1673,7 @@ voidtype_setfield(PyVoidScalarObject *self, PyObject *args, PyObject *kwds) * However, as a special case, void-scalar assignment broadcasts * differently from ndarrays when assigning to an object field: Assignment * to an ndarray object field broadcasts, but assignment to a void-scalar - * object-field should not, in order to allow nested ndarrays. + * object-field should not, in order to allow nested ndarrays. * These lines should then behave identically: * * b = np.zeros(1, dtype=[('x', 'O')]) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index e74ac9d40e7f..2261a80dbeab 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -1117,8 +1117,8 @@ NPY_NO_EXPORT void } /**begin repeat1 - * #kind = equal, greater, greater_equal, less, less_equal# - * #OP = ==, >, >=, <, <=# + * #kind = equal, not_equal, greater, greater_equal, less, less_equal# + * #OP = ==, !=, >, >=, <, <=# */ NPY_NO_EXPORT void @TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) @@ -1126,31 +1126,11 @@ NPY_NO_EXPORT void BINARY_LOOP { const @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; - if (in1 == NPY_DATETIME_NAT || in2 == NPY_DATETIME_NAT) { - *((npy_bool *)op1) = NPY_FALSE; - } - else { - *((npy_bool *)op1) = in1 @OP@ in2; - } + *((npy_bool *)op1) = in1 @OP@ in2; } } /**end repeat1**/ -NPY_NO_EXPORT void -@TYPE@_not_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - if (in1 == NPY_DATETIME_NAT || in2 == NPY_DATETIME_NAT) { - *((npy_bool *)op1) = NPY_TRUE; - } - else { - *((npy_bool *)op1) = in1 != in2; - } - } -} - /**begin repeat1 * #kind = maximum, minimum# * #OP = >, <# diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py index c79f59c7091f..25a56767ff3b 100644 --- a/numpy/core/tests/test_datetime.py +++ b/numpy/core/tests/test_datetime.py @@ -130,11 +130,10 @@ def test_compare_generic_nat(self): # regression tests for GH6452 assert_equal(np.datetime64('NaT'), np.datetime64('2000') + np.timedelta64('NaT')) - assert_equal(np.datetime64('NaT'), np.datetime64('NaT', 'us')) - assert_equal(np.timedelta64('NaT'), np.timedelta64('NaT', 'us')) - # neither of these should issue a warning - assert_(np.datetime64('NaT') != np.datetime64('NaT', 'us')) - assert_(np.datetime64('NaT', 'us') != np.datetime64('NaT')) + # nb. we may want to make NaT != NaT true in the future; this test + # verifies the existing behavior (and that it should not warn) + assert_(np.datetime64('NaT') == np.datetime64('NaT', 'us')) + assert_(np.datetime64('NaT', 'us') == np.datetime64('NaT')) def test_datetime_scalar_construction(self): # Construct with different units @@ -553,9 +552,6 @@ def test_datetime_array_str(self): "'%s'" % np.datetime_as_string(x, timezone='UTC')}), "['2011-03-16T13:55Z', '1920-01-01T03:12Z']") - a = np.array(['NaT', 'NaT'], dtype='datetime64[ns]') - assert_equal(str(a), "['NaT' 'NaT']") - # Check that one NaT doesn't corrupt subsequent entries a = np.array(['2010', 'NaT', '2030']).astype('M') assert_equal(str(a), "['2010' 'NaT' '2030']") @@ -662,7 +658,7 @@ def test_pyobject_roundtrip(self): b[8] = 'NaT' assert_equal(b.astype(object).astype(unit), b, - "Error roundtripping unit %s" % unit) + "Error roundtripping unit %s" % unit) # With time units for unit in ['M8[as]', 'M8[16fs]', 'M8[ps]', 'M8[us]', 'M8[300as]', 'M8[20us]']: @@ -678,7 +674,7 @@ def test_pyobject_roundtrip(self): b[8] = 'NaT' assert_equal(b.astype(object).astype(unit), b, - "Error roundtripping unit %s" % unit) + "Error roundtripping unit %s" % unit) def test_month_truncation(self): # Make sure that months are truncating correctly @@ -1085,26 +1081,6 @@ def test_datetime_compare(self): assert_equal(np.greater(a, b), [0, 1, 0, 1, 0]) assert_equal(np.greater_equal(a, b), [1, 1, 0, 1, 0]) - def test_datetime_compare_nat(self): - dt_nat = np.datetime64('NaT', 'D') - dt_other = np.datetime64('2000-01-01') - td_nat = np.timedelta64('NaT', 'h') - td_other = np.timedelta64(1, 'h') - for op in [np.equal, np.less, np.less_equal, - np.greater, np.greater_equal]: - assert_(not op(dt_nat, dt_nat)) - assert_(not op(dt_nat, dt_other)) - assert_(not op(dt_other, dt_nat)) - assert_(not op(td_nat, td_nat)) - assert_(not op(td_nat, td_other)) - assert_(not op(td_other, td_nat)) - assert_(np.not_equal(dt_nat, dt_nat)) - assert_(np.not_equal(dt_nat, dt_other)) - assert_(np.not_equal(dt_other, dt_nat)) - assert_(np.not_equal(td_nat, td_nat)) - assert_(np.not_equal(td_nat, td_other)) - assert_(np.not_equal(td_other, td_nat)) - def test_datetime_minmax(self): # The metadata of the result should become the GCD # of the operand metadata diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index c2428fa10ccf..6138d0573967 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -154,7 +154,7 @@ def test_testAverage1(self): ott = ott.reshape(2, 2) ott[:, 1] = masked assert_equal(average(ott, axis=0), [2.0, 0.0]) - assert_equal(average(ott, axis=1).mask[0], True) + assert_equal(average(ott, axis=1).mask[0], [True]) assert_equal([2., 0.], average(ott, axis=0)) result, wts = average(ott, axis=0, returned=1) assert_equal(wts, [1., 0.]) diff --git a/numpy/ma/testutils.py b/numpy/ma/testutils.py index 40b9fa1be487..8dc8218784c9 100644 --- a/numpy/ma/testutils.py +++ b/numpy/ma/testutils.py @@ -125,7 +125,10 @@ def assert_equal(actual, desired, err_msg=''): if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): return _assert_equal_on_sequences(actual, desired, err_msg='') if not (isinstance(actual, ndarray) or isinstance(desired, ndarray)): - return utils.assert_equal(actual, desired) + msg = build_err_msg([actual, desired], err_msg,) + if not desired == actual: + raise AssertionError(msg) + return # Case #4. arrays or equivalent if ((actual is masked) and not (desired is masked)) or \ ((desired is masked) and not (actual is masked)): diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 92a00f71213f..23bd491bc1e8 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -11,7 +11,7 @@ assert_warns, assert_no_warnings, assert_allclose, assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp, clear_and_catch_warnings, run_module_suite, - assert_string_equal, assert_, tempdir, temppath, + assert_string_equal, assert_, tempdir, temppath, ) import unittest @@ -119,25 +119,6 @@ def test_nan_array(self): c = np.array([1, 2, 3]) self._test_not_equal(c, b) - def test_nat_array_datetime(self): - a = np.array([np.datetime64('2000-01'), np.datetime64('NaT')]) - b = np.array([np.datetime64('2000-01'), np.datetime64('NaT')]) - self._test_equal(a, b) - - c = np.array([np.datetime64('NaT'), np.datetime64('NaT')]) - self._test_not_equal(c, b) - - def test_nat_array_timedelta(self): - a = np.array([np.timedelta64(1, 'h'), np.timedelta64('NaT')]) - b = np.array([np.timedelta64(1, 'h'), np.timedelta64('NaT')]) - self._test_equal(a, b) - - c = np.array([np.timedelta64('NaT'), np.timedelta64('NaT')]) - self._test_not_equal(c, b) - - d = np.array([np.datetime64('NaT'), np.datetime64('NaT')]) - self._test_not_equal(c, d) - def test_string_arrays(self): """Test two arrays with different shapes are found not equal.""" a = np.array(['floupi', 'floupa']) @@ -246,16 +227,6 @@ def test_complex(self): self._assert_func(x, x) self._test_not_equal(x, y) - def test_nat(self): - dt = np.datetime64('2000-01-01') - dt_nat = np.datetime64('NaT') - td_nat = np.timedelta64('NaT') - self._assert_func(dt_nat, dt_nat) - self._assert_func(td_nat, td_nat) - self._test_not_equal(dt_nat, td_nat) - self._test_not_equal(dt, td_nat) - self._test_not_equal(dt, dt_nat) - class TestArrayAlmostEqual(_GenericTest, unittest.TestCase): @@ -486,7 +457,7 @@ def f(): class TestAssertAllclose(unittest.TestCase): - + def test_simple(self): x = 1e-3 y = 1e-9 diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py index 8e71a3399f96..f545cd3c2af1 100644 --- a/numpy/testing/utils.py +++ b/numpy/testing/utils.py @@ -15,7 +15,7 @@ from tempfile import mkdtemp, mkstemp from .nosetester import import_nose -from numpy.core import float32, empty, arange, array_repr, ndarray, dtype +from numpy.core import float32, empty, arange, array_repr, ndarray from numpy.lib.utils import deprecate if sys.version_info[0] >= 3: @@ -343,31 +343,16 @@ def assert_equal(actual,desired,err_msg='',verbose=True): except AssertionError: raise AssertionError(msg) - def isnat(x): - return (hasattr(x, 'dtype') - and getattr(x.dtype, 'kind', '_') in 'mM' - and x != x) - # Inf/nan/negative zero handling try: # isscalar test to check cases such as [np.nan] != np.nan - # dtypes compare equal to strings, but unlike strings aren't scalars, - # so we need to exclude them from this check - if (isscalar(desired) != isscalar(actual) - and not (isinstance(desired, dtype) - or isinstance(actual, dtype))): + if isscalar(desired) != isscalar(actual): raise AssertionError(msg) - # check NaT before NaN, because isfinite errors on datetime dtypes - if isnat(desired) and isnat(actual): - if desired.dtype.kind != actual.dtype.kind: - # datetime64 and timedelta64 NaT should not be comparable - raise AssertionError(msg) - return # If one of desired/actual is not finite, handle it specially here: # check that both are nan if any is a nan, and test for equality # otherwise - elif not (gisfinite(desired) and gisfinite(actual)): + if not (gisfinite(desired) and gisfinite(actual)): isdesnan = gisnan(desired) isactnan = gisnan(actual) if isdesnan or isactnan: @@ -678,9 +663,6 @@ def safe_comparison(*args, **kwargs): def isnumber(x): return x.dtype.char in '?bhilqpBHILQPefdgFDG' - def isdatetime(x): - return x.dtype.char in 'mM' - def chk_same_position(x_id, y_id, hasval='nan'): """Handling nan/inf: check that x and y have the nan/inf at the same locations.""" @@ -693,15 +675,6 @@ def chk_same_position(x_id, y_id, hasval='nan'): names=('x', 'y'), precision=precision) raise AssertionError(msg) - def chk_same_dtype(x_dt, y_dt): - try: - assert_equal(x_dt, y_dt) - except AssertionError: - msg = build_err_msg([x, y], err_msg + '\nx and y dtype mismatch', - verbose=verbose, header=header, - names=('x', 'y'), precision=precision) - raise AssertionError(msg) - try: cond = (x.shape == () or y.shape == ()) or x.shape == y.shape if not cond: @@ -739,20 +712,6 @@ def chk_same_dtype(x_dt, y_dt): val = safe_comparison(x[~x_id], y[~y_id]) else: val = safe_comparison(x, y) - elif isdatetime(x) and isdatetime(y): - x_isnat, y_isnat = (x != x), (y != y) - - if any(x_isnat) or any(y_isnat): - # cannot mix timedelta64/datetime64 NaT - chk_same_dtype(x.dtype, y.dtype) - chk_same_position(x_isnat, y_isnat, hasval='nat') - - if all(x_isnat): - return - if any(x_isnat): - val = safe_comparison(x[~x_isnat], y[~y_isnat]) - else: - val = safe_comparison(x, y) else: val = safe_comparison(x, y) @@ -1867,7 +1826,7 @@ def temppath(*args, **kwargs): parameters are the same as for tempfile.mkstemp and are passed directly to that function. The underlying file is removed when the context is exited, so it should be closed at that time. - + Windows does not allow a temporary file to be opened if it is already open, so the underlying file must be closed after opening before it can be opened again. From 5be93a2580a232705e897984d0f920bc6346990e Mon Sep 17 00:00:00 2001 From: Antony Lee Date: Sat, 5 Dec 2015 20:46:09 -0800 Subject: [PATCH 392/496] MAINT: memcpy-based ~4x faster, typed shuffle. Only for 1d-ndarrays exactly, as subtypes (e.g. masked arrays) may not allow direct shuffle of the underlying buffer (in fact, the old implementation destroyed the underlying values of masked arrays while shuffling). Also handles struct-containing-object 1d ndarrays properly. See #6776 for an earlier, less general (but even faster: ~6x) improvement attempt, #5514 for the original issue. --- doc/release/1.11.0-notes.rst | 4 +++ numpy/random/mtrand/mtrand.pyx | 45 ++++++++++++++++----------- numpy/random/tests/test_random.py | 51 +++++++++++++++++-------------- 3 files changed, 60 insertions(+), 40 deletions(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index 770938b60f75..e541a4739754 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -204,6 +204,10 @@ extended to ``@``, ``numpy.dot``, ``numpy.inner``, and ``numpy.matmul``. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This matches the behavior of ``assert_raises``. +Speed improvement for np.random.shuffle +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``np.random.shuffle`` is now much faster for 1d ndarrays. + Changes ======= Pyrex support was removed from ``numpy.distutils``. The method diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index ff8171d45fa8..f70f578cc8e0 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -24,6 +24,8 @@ include "Python.pxi" include "numpy.pxd" +from libc cimport string + cdef extern from "math.h": double exp(double x) double log(double x) @@ -4979,33 +4981,42 @@ cdef class RandomState: [0, 1, 2]]) """ - cdef npy_intp i, j - - i = len(x) - 1 - - # Logic adapted from random.shuffle() - if isinstance(x, np.ndarray) and \ - (x.ndim > 1 or x.dtype.fields is not None): - # For a multi-dimensional ndarray, indexing returns a view onto - # each row. So we can't just use ordinary assignment to swap the - # rows; we need a bounce buffer. + cdef: + npy_intp i, j, n = len(x) + size_t stride, nbytes + char* x_ptr + char* buf_ptr + + if type(x) is np.ndarray and x.ndim == 1 and x.size: + # Fast, statically typed path: shuffle the underlying buffer. + # Only for non-empty, 1d objects of class ndarray (subclasses such + # as MaskedArrays may not support this approach). + x_ptr = x.ctypes.data + stride = x.strides[0] + nbytes = x[:1].nbytes + buf = np.empty_like(x[0]) # GC'd at function exit + buf_ptr = buf.ctypes.data + with self.lock: + for i in reversed(range(1, n)): + j = rk_interval(i, self.internal_state) + string.memcpy(buf_ptr, x_ptr + j * stride, nbytes) + string.memcpy(x_ptr + j * stride, x_ptr + i * stride, nbytes) + string.memcpy(x_ptr + i * stride, buf_ptr, nbytes) + elif isinstance(x, np.ndarray) and x.ndim > 1 and x.size: + # Multidimensional ndarrays require a bounce buffer. buf = np.empty_like(x[0]) with self.lock: - while i > 0: + for i in reversed(range(1, n)): j = rk_interval(i, self.internal_state) buf[...] = x[j] x[j] = x[i] x[i] = buf - i = i - 1 else: - # For single-dimensional arrays, lists, and any other Python - # sequence types, indexing returns a real object that's - # independent of the array contents, so we can just swap directly. + # Untyped path. with self.lock: - while i > 0: + for i in reversed(range(1, n)): j = rk_interval(i, self.internal_state) x[i], x[j] = x[j], x[i] - i = i - 1 def permutation(self, object x): """ diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index 37c1876bf314..e3391a9a20eb 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -10,6 +10,7 @@ import warnings + class TestSeed(TestCase): def test_scalar(self): s = np.random.RandomState(0) @@ -40,6 +41,7 @@ def test_invalid_array(self): assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296]) assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296]) + class TestBinomial(TestCase): def test_n_zero(self): # Tests the corner case of n == 0 for the binomial distribution. @@ -130,6 +132,7 @@ def test_negative_binomial(self): # arguments without truncation. self.prng.negative_binomial(0.5, 0.5) + class TestRandint(TestCase): rfunc = np.random.randint @@ -207,12 +210,13 @@ def test_repeatability(self): assert_(tgt[np.dtype(np.bool).name] == res) -class TestRandomDist(TestCase): +class TestRandomDist: # Make sure the random distribution returns the correct value for a # given seed - def setUp(self): - self.seed = 1234567890 + @classmethod + def setup_class(cls): + cls.seed = 1234567890 def test_rand(self): np.random.seed(self.seed) @@ -368,40 +372,41 @@ def test_bytes(self): np.testing.assert_equal(actual, desired) def test_shuffle(self): - # Test lists, arrays, and multidimensional versions of both: - for conv in [lambda x: x, - np.asarray, + # Test lists, arrays (of various dtypes), and multidimensional versions + # of both, c-contiguous or not: + for conv in [lambda x: np.array([]), + lambda x: x, + lambda x: np.asarray(x).astype(np.int8), + lambda x: np.asarray(x).astype(np.float32), + lambda x: np.asarray(x).astype(np.complex64), + lambda x: np.asarray(x).astype(object), lambda x: [(i, i) for i in x], - lambda x: np.asarray([(i, i) for i in x])]: + lambda x: np.asarray([[i, i] for i in x]), + lambda x: np.vstack([x, x]).T, + # gh-4270 + lambda x: np.asarray([(i, i) for i in x], + [("a", object, 1), + ("b", np.int32, 1)])]: np.random.seed(self.seed) alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) np.random.shuffle(alist) actual = alist desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) - np.testing.assert_array_equal(actual, desired) - - def test_shuffle_flexible(self): - # gh-4270 - arr = [(0, 1), (2, 3)] - dt = np.dtype([('a', np.int32, 1), ('b', np.int32, 1)]) - nparr = np.array(arr, dtype=dt) - a, b = nparr[0].copy(), nparr[1].copy() - for i in range(50): - np.random.shuffle(nparr) - assert_(a in nparr) - assert_(b in nparr) + yield np.testing.assert_array_equal, actual, desired def test_shuffle_masked(self): # gh-3263 a = np.ma.masked_values(np.reshape(range(20), (5,4)) % 3 - 1, -1) b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) - ma = np.ma.count_masked(a) - mb = np.ma.count_masked(b) + a_orig = a.copy() + b_orig = b.copy() for i in range(50): np.random.shuffle(a) - self.assertEqual(ma, np.ma.count_masked(a)) + assert_equal( + sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask])) np.random.shuffle(b) - self.assertEqual(mb, np.ma.count_masked(b)) + assert_equal( + sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) def test_beta(self): np.random.seed(self.seed) From b8cf7f904974294d4e3af43c68ef23f87385f2f6 Mon Sep 17 00:00:00 2001 From: Antony Lee Date: Wed, 6 Jan 2016 21:04:59 -0800 Subject: [PATCH 393/496] Top shuffle speed for machine-sized ints/floats. Apparently gcc only specializes one branch (the last one) so I went for another 33% performance increase (matching #6776) in what's likely the most common use case. --- numpy/random/mtrand/mtrand.pyx | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index f70f578cc8e0..c8738cf6f5f3 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -4982,8 +4982,7 @@ cdef class RandomState: """ cdef: - npy_intp i, j, n = len(x) - size_t stride, nbytes + npy_intp i, j, n = len(x), stride, itemsize char* x_ptr char* buf_ptr @@ -4993,15 +4992,17 @@ cdef class RandomState: # as MaskedArrays may not support this approach). x_ptr = x.ctypes.data stride = x.strides[0] - nbytes = x[:1].nbytes + itemsize = x.dtype.itemsize buf = np.empty_like(x[0]) # GC'd at function exit buf_ptr = buf.ctypes.data with self.lock: - for i in reversed(range(1, n)): - j = rk_interval(i, self.internal_state) - string.memcpy(buf_ptr, x_ptr + j * stride, nbytes) - string.memcpy(x_ptr + j * stride, x_ptr + i * stride, nbytes) - string.memcpy(x_ptr + i * stride, buf_ptr, nbytes) + # We trick gcc into providing a specialized implementation for + # the most common case, yielding a ~33% performance improvement. + # Note that apparently, only one branch can ever be specialized. + if itemsize == sizeof(npy_intp): + self._shuffle_raw(n, sizeof(npy_intp), stride, x_ptr, buf_ptr) + else: + self._shuffle_raw(n, itemsize, stride, x_ptr, buf_ptr) elif isinstance(x, np.ndarray) and x.ndim > 1 and x.size: # Multidimensional ndarrays require a bounce buffer. buf = np.empty_like(x[0]) @@ -5018,6 +5019,15 @@ cdef class RandomState: j = rk_interval(i, self.internal_state) x[i], x[j] = x[j], x[i] + cdef inline _shuffle_raw(self, npy_intp n, npy_intp itemsize, + npy_intp stride, char* data, char* buf): + cdef npy_intp i, j + for i in reversed(range(1, n)): + j = rk_interval(i, self.internal_state) + string.memcpy(buf, data + j * stride, itemsize) + string.memcpy(data + j * stride, data + i * stride, itemsize) + string.memcpy(data + i * stride, buf, itemsize) + def permutation(self, object x): """ permutation(x) From 309fdd4cd1400fc392acfa418226bd7f8b10073d Mon Sep 17 00:00:00 2001 From: Antony Lee Date: Tue, 12 Jan 2016 17:43:22 -0800 Subject: [PATCH 394/496] Revert to non-generative test. --- numpy/random/tests/test_random.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index e3391a9a20eb..96aa3790f4d4 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -210,13 +210,12 @@ def test_repeatability(self): assert_(tgt[np.dtype(np.bool).name] == res) -class TestRandomDist: +class TestRandomDist(TestCase): # Make sure the random distribution returns the correct value for a # given seed - @classmethod - def setup_class(cls): - cls.seed = 1234567890 + def setUp(self): + self.seed = 1234567890 def test_rand(self): np.random.seed(self.seed) @@ -392,7 +391,7 @@ def test_shuffle(self): np.random.shuffle(alist) actual = alist desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) - yield np.testing.assert_array_equal, actual, desired + np.testing.assert_array_equal(actual, desired) def test_shuffle_masked(self): # gh-3263 From 0ed7960e32fa0b5cd2191dc9c7347c9929e8e22b Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 17 Jan 2016 16:15:24 +0100 Subject: [PATCH 395/496] TST: de-uglify the setuptools appeasement in runtests.py a bit. This is a cleaner fix than the one committed in gh-7040. --- runtests.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/runtests.py b/runtests.py index 52905a8fc87c..2a836d9cfc21 100755 --- a/runtests.py +++ b/runtests.py @@ -135,13 +135,8 @@ def main(argv): if not args.no_build: site_dir = build_project(args) - for dirname in os.listdir(site_dir): - if dirname.startswith('numpy'): - # The .pth file isn't re-parsed, so need to put the numpy egg - # produced by easy-install on the path manually. - egg_dir = os.path.join(site_dir, dirname) - sys.path.insert(0, egg_dir) - os.environ['PYTHONPATH'] = egg_dir + sys.path.insert(0, site_dir) + os.environ['PYTHONPATH'] = site_dir extra_argv = args.args[:] if extra_argv and extra_argv[0] == '--': @@ -349,7 +344,10 @@ def build_project(args): cmd += ["build"] if args.parallel > 1: cmd += ["-j", str(args.parallel)] - cmd += ['install', '--prefix=' + dst_dir] + # Install; avoid producing eggs so numpy can be imported from dst_dir. + cmd += ['install', '--prefix=' + dst_dir, + '--single-version-externally-managed', + '--record=' + dst_dir + 'tmp_install_log.txt'] from distutils.sysconfig import get_python_lib site_dir = get_python_lib(prefix=dst_dir, plat_specific=True) From 91ae5fbfe5664a4bdb4522dc7a5dda75040ce35c Mon Sep 17 00:00:00 2001 From: Stuart Berg Date: Sun, 1 Nov 2015 23:49:54 -0500 Subject: [PATCH 396/496] MAINT: Add '0x' to API version error for clarity. --- numpy/core/code_generators/generate_numpy_api.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/core/code_generators/generate_numpy_api.py b/numpy/core/code_generators/generate_numpy_api.py index a97564fa40a9..d376ffd29286 100644 --- a/numpy/core/code_generators/generate_numpy_api.py +++ b/numpy/core/code_generators/generate_numpy_api.py @@ -84,13 +84,13 @@ /* Perform runtime check of C API version */ if (NPY_VERSION != PyArray_GetNDArrayCVersion()) { PyErr_Format(PyExc_RuntimeError, "module compiled against "\ - "ABI version %%x but this version of numpy is %%x", \ + "ABI version 0x%%x but this version of numpy is 0x%%x", \ (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); return -1; } if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) { PyErr_Format(PyExc_RuntimeError, "module compiled against "\ - "API version %%x but this version of numpy is %%x", \ + "API version 0x%%x but this version of numpy is 0x%%x", \ (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion()); return -1; } From 2da37a0e49c169f79f98e0e9d5b0370e9344029f Mon Sep 17 00:00:00 2001 From: Nick Papior Date: Sun, 17 Jan 2016 19:23:59 +0100 Subject: [PATCH 397/496] ENH: Allow site.cfg information with libraries key This PR fixes the case when users create a site.cfg to fix library locations, but does not change the library names. Now numpy.distutils correctly checks all options related to libraries by defaulting to the library from the class via _lib_names Signed-off-by: Nick Papior --- numpy/distutils/system_info.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index d7b9bfbed69a..a541bfae2744 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -663,7 +663,10 @@ def get_libs(self, key, default): return [b for b in [a.strip() for a in libs.split(',')] if b] def get_libraries(self, key='libraries'): - return self.get_libs(key, '') + if hasattr(self, '_lib_names'): + return self.get_libs(key, default=self._lib_names) + else: + return self.get_libs(key, '') def library_extensions(self): static_exts = ['.a'] From 7f0d97c2b6b1cf5001dc7af88f029adf01b14eff Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Mon, 18 Jan 2016 09:47:16 +1100 Subject: [PATCH 398/496] TST: np.isclose, correct error msg on test --- numpy/core/tests/test_numeric.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index 17ea6212c959..dafdbc48be73 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -1591,7 +1591,7 @@ def tst_isclose_allclose(self, x, y): msg = "isclose.all() and allclose aren't same for %s and %s" msg2 = "isclose and allclose aren't same for %s and %s" if np.isscalar(x) and np.isscalar(y): - assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg % (x, y)) + assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y)) else: assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y)) From d54e7351aeda804de6e7b5963bb2b9fa5c76d027 Mon Sep 17 00:00:00 2001 From: Jonathan Underwood Date: Fri, 25 Sep 2015 16:29:09 +0100 Subject: [PATCH 399/496] ENH: Allow specification of terms to fit in legfit The argument `deg` is enhanced to allow an array_like argument to past which specifies which terms to include in the fit. The returned coef array is exapnded to have entries of 0 for all coefficients which were not included in the fit. --- numpy/polynomial/legendre.py | 47 ++++++++++++++++++++++++++++++------ 1 file changed, 39 insertions(+), 8 deletions(-) diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index c91cb72ec5f4..2035ba6e930a 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -1418,8 +1418,14 @@ def legfit(x, y, deg, rcond=None, full=False, w=None): y-coordinates of the sample points. Several data sets of sample points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column. - deg : int - Degree of the fitting polynomial + deg : int or array_like + Degree of the fitting polynomial. If `deg` is a single integer + all terms up to and including the `deg`'th term are included. + `deg` may alternatively be a list or array specifying which + terms in the Legendre expansion to include in the fit. + + .. versionchanged:: 1.11.0 + `deg` may be a list specifying which terms to fit rcond : float, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be ignored. The @@ -1440,9 +1446,11 @@ def legfit(x, y, deg, rcond=None, full=False, w=None): Returns ------- coef : ndarray, shape (M,) or (M, K) - Legendre coefficients ordered from low to high. If `y` was 2-D, - the coefficients for the data in column k of `y` are in column - `k`. + Legendre coefficients ordered from low to high. If `y` was + 2-D, the coefficients for the data in column k of `y` are in + column `k`. If `deg` is specified as a list, coefficients for + terms not included in the fit are set equal to zero in the + returned `coef`. [residuals, rank, singular_values, rcond] : list These values are only returned if `full` = True @@ -1511,12 +1519,14 @@ def legfit(x, y, deg, rcond=None, full=False, w=None): -------- """ - order = int(deg) + 1 x = np.asarray(x) + 0.0 y = np.asarray(y) + 0.0 + deg = np.asarray([deg,], dtype=int).flatten() # check arguments. - if deg < 0: + if deg.size < 1: + raise TypeError("expected deg to be one or more integers") + if deg.min() < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: raise TypeError("expected 1D vector for x") @@ -1527,8 +1537,20 @@ def legfit(x, y, deg, rcond=None, full=False, w=None): if len(x) != len(y): raise TypeError("expected x and y to have same length") + if deg.size == 1: + restricted_fit = False + lmax = deg[0] + order = lmax + 1 + else: + restricted_fit = True + lmax = deg.max() + order = deg.size + # set up the least squares matrices in transposed form - lhs = legvander(x, deg).T + van = legvander(x, lmax) + if restricted_fit: + van = van[:, deg] + lhs = van.T rhs = y.T if w is not None: w = np.asarray(w) + 0.0 @@ -1556,6 +1578,15 @@ def legfit(x, y, deg, rcond=None, full=False, w=None): c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) c = (c.T/scl).T + # Expand c to include non-fitted coefficients which are set to zero + if restricted_fit: + if c.ndim == 2: + cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) + else: + cc = np.zeros(lmax+1, dtype=c.dtype) + cc[deg] = c + c = cc + # warn on rank reduction if rank != order and not full: msg = "The fit may be poorly conditioned" From 40951167a51392205427c59a9d8425c36e89df08 Mon Sep 17 00:00:00 2001 From: Jonathan Underwood Date: Fri, 25 Sep 2015 23:01:57 +0100 Subject: [PATCH 400/496] TST: Add tests for legfit with deg specified as list --- numpy/polynomial/tests/test_legendre.py | 31 +++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py index 8ac1feb589d4..9c259d14c4bf 100644 --- a/numpy/polynomial/tests/test_legendre.py +++ b/numpy/polynomial/tests/test_legendre.py @@ -388,6 +388,9 @@ def test_legfit(self): def f(x): return x*(x - 1)*(x - 2) + def f2(x): + return x**4 + x**2 + 1 + # Test exceptions assert_raises(ValueError, leg.legfit, [1], [1], -1) assert_raises(TypeError, leg.legfit, [[1]], [1], 0) @@ -397,6 +400,9 @@ def f(x): assert_raises(TypeError, leg.legfit, [1], [1, 2], 0) assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, leg.legfit, [1], [1], [-1,]) + assert_raises(ValueError, leg.legfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, leg.legfit, [1], [1], []) # Test fit x = np.linspace(0, 2) @@ -405,13 +411,25 @@ def f(x): coef3 = leg.legfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(leg.legval(x, coef3), y) + coef3 = leg.legfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(leg.legval(x, coef3), y) # coef4 = leg.legfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(leg.legval(x, coef4), y) + coef4 = leg.legfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = leg.legfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) # coef2d = leg.legfit(x, np.array([y, y]).T, 3) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = leg.legfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() @@ -419,13 +437,26 @@ def f(x): y[0::2] = 0 wcoef3 = leg.legfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) + wcoef3 = leg.legfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) # wcoef2d = leg.legfit(x, np.array([yw, yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = leg.legfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) # test scaling with complex values x points whose square # is zero when summed. x = [1, 1j, -1, -1j] assert_almost_equal(leg.legfit(x, x, 1), [0, 1]) + assert_almost_equal(leg.legfit(x, x, [0, 1]), [0, 1]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = leg.legfit(x, y, 4) + assert_almost_equal(leg.legval(x, coef1), y) + coef2 = leg.legfit(x, y, [0, 2, 4]) + assert_almost_equal(leg.legval(x, coef2), y) + assert_almost_equal(coef1, coef2) class TestCompanion(TestCase): From 942f294c06b0285ea3cf2bf223a63700a1ed50f5 Mon Sep 17 00:00:00 2001 From: Jonathan Underwood Date: Mon, 12 Oct 2015 20:04:36 +0100 Subject: [PATCH 401/496] ENH: Allow specification of terms to fit in chebfit The argument `deg` is enhanced to allow an array_like argument to past which specifies which terms to include in the fit. The returned coef array is exapnded to have entries of 0 for all coefficients which were not included in the fit. --- numpy/polynomial/chebyshev.py | 39 ++++++++++++++++++++++++++++++----- 1 file changed, 34 insertions(+), 5 deletions(-) diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index de5cbb734194..9db613b78b23 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -1617,8 +1617,14 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None): y-coordinates of the sample points. Several data sets of sample points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column. - deg : int - Degree of the fitting series + deg : int or array_like + Degree of the fitting series. If `deg` is a single integer + all terms up to and including the `deg`'th term are included. + `deg` may alternatively be a list or array specifying which + terms in the Legendre expansion to include in the fit. + + .. versionchanged:: 1.11.0 + `deg` may be a list specifying which terms to fit rcond : float, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be ignored. The @@ -1710,12 +1716,14 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None): -------- """ - order = int(deg) + 1 x = np.asarray(x) + 0.0 y = np.asarray(y) + 0.0 + deg = np.asarray([deg,], dtype=int).flatten() # check arguments. - if deg < 0: + if deg.size < 1: + raise TypeError("expected deg to be one or more integers") + if deg.min() < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: raise TypeError("expected 1D vector for x") @@ -1726,8 +1734,20 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None): if len(x) != len(y): raise TypeError("expected x and y to have same length") + if deg.size == 1: + restricted_fit = False + lmax = deg[0] + order = lmax + 1 + else: + restricted_fit = True + lmax = deg.max() + order = deg.size + # set up the least squares matrices in transposed form - lhs = chebvander(x, deg).T + van = chebvander(x, lmax) + if restricted_fit: + van = van[:, deg] + lhs = van.T rhs = y.T if w is not None: w = np.asarray(w) + 0.0 @@ -1755,6 +1775,15 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None): c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) c = (c.T/scl).T + # Expand c to include non-fitted coefficients which are set to zero + if restricted_fit: + if c.ndim == 2: + cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) + else: + cc = np.zeros(lmax+1, dtype=c.dtype) + cc[deg] = c + c = cc + # warn on rank reduction if rank != order and not full: msg = "The fit may be poorly conditioned" From 1a9fb061bb4f217f335616c65abd36644c2f2ac7 Mon Sep 17 00:00:00 2001 From: Jonathan Underwood Date: Mon, 12 Oct 2015 20:23:56 +0100 Subject: [PATCH 402/496] TST: Add tests for chebfit with deg specified as list --- numpy/polynomial/tests/test_chebyshev.py | 31 ++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/numpy/polynomial/tests/test_chebyshev.py b/numpy/polynomial/tests/test_chebyshev.py index a596905f6771..8d992c4f0930 100644 --- a/numpy/polynomial/tests/test_chebyshev.py +++ b/numpy/polynomial/tests/test_chebyshev.py @@ -399,6 +399,9 @@ def test_chebfit(self): def f(x): return x*(x - 1)*(x - 2) + def f2(x): + return x**4 + x**2 + 1 + # Test exceptions assert_raises(ValueError, cheb.chebfit, [1], [1], -1) assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0) @@ -408,6 +411,9 @@ def f(x): assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0) assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, cheb.chebfit, [1], [1], [-1,]) + assert_raises(ValueError, cheb.chebfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, cheb.chebfit, [1], [1], []) # Test fit x = np.linspace(0, 2) @@ -416,13 +422,25 @@ def f(x): coef3 = cheb.chebfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(cheb.chebval(x, coef3), y) + coef3 = cheb.chebfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(cheb.chebval(x, coef3), y) # coef4 = cheb.chebfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(cheb.chebval(x, coef4), y) + coef4 = cheb.chebfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = cheb.chebfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) # coef2d = cheb.chebfit(x, np.array([y, y]).T, 3) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = cheb.chebfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() @@ -430,13 +448,26 @@ def f(x): y[0::2] = 0 wcoef3 = cheb.chebfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) + wcoef3 = cheb.chebfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) # wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) # test scaling with complex values x points whose square # is zero when summed. x = [1, 1j, -1, -1j] assert_almost_equal(cheb.chebfit(x, x, 1), [0, 1]) + assert_almost_equal(cheb.chebfit(x, x, [0, 1]), [0, 1]) + # test fitting only even polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = cheb.chebfit(x, y, 4) + assert_almost_equal(cheb.chebval(x, coef1), y) + coef2 = cheb.chebfit(x, y, [0, 2, 4]) + assert_almost_equal(cheb.chebval(x, coef2), y) + assert_almost_equal(coef1, coef2) class TestCompanion(TestCase): From acc294dcd7d3998f8f2b82cad8f3ed6db48c1f00 Mon Sep 17 00:00:00 2001 From: Jonathan Underwood Date: Mon, 12 Oct 2015 21:03:09 +0100 Subject: [PATCH 403/496] ENH: Allow specification of terms to fit in hermfit The argument `deg` is enhanced to allow an array_like argument to past which specifies which terms to include in the fit. The returned coef array is exapnded to have entries of 0 for all coefficients which were not included in the fit. --- numpy/polynomial/hermite.py | 39 ++++++++++++++++++++++++++++++++----- 1 file changed, 34 insertions(+), 5 deletions(-) diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index 2ce1d97a887e..5d4b357fed7e 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -1388,8 +1388,14 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): y-coordinates of the sample points. Several data sets of sample points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column. - deg : int - Degree of the fitting polynomial + deg : int or array_like + Degree of the fitting polynomial. If `deg` is a single integer + all terms up to and including the `deg`'th term are included. + `deg` may alternatively be a list or array specifying which + terms in the Legendre expansion to include in the fit. + + .. versionchanged:: 1.11.0 + `deg` may be a list specifying which terms to fit rcond : float, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be ignored. The @@ -1486,12 +1492,14 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): array([ 0.97902637, 1.99849131, 3.00006 ]) """ - order = int(deg) + 1 x = np.asarray(x) + 0.0 y = np.asarray(y) + 0.0 + deg = np.asarray([deg,], dtype=int).flatten() # check arguments. - if deg < 0: + if deg.size < 1: + raise TypeError("expected deg to be one or more integers") + if deg.min() < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: raise TypeError("expected 1D vector for x") @@ -1502,8 +1510,20 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): if len(x) != len(y): raise TypeError("expected x and y to have same length") + if deg.size == 1: + restricted_fit = False + lmax = deg[0] + order = lmax + 1 + else: + restricted_fit = True + lmax = deg.max() + order = deg.size + # set up the least squares matrices in transposed form - lhs = hermvander(x, deg).T + van = hermvander(x, lmax) + if restricted_fit: + van = van[:, deg] + lhs = van.T rhs = y.T if w is not None: w = np.asarray(w) + 0.0 @@ -1531,6 +1551,15 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) c = (c.T/scl).T + # Expand c to include non-fitted coefficients which are set to zero + if restricted_fit: + if c.ndim == 2: + cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) + else: + cc = np.zeros(lmax+1, dtype=c.dtype) + cc[deg] = c + c = cc + # warn on rank reduction if rank != order and not full: msg = "The fit may be poorly conditioned" From b8180bbf97505a544324c90f407cdf2c5913c612 Mon Sep 17 00:00:00 2001 From: Jonathan Underwood Date: Mon, 12 Oct 2015 21:10:22 +0100 Subject: [PATCH 404/496] TST: Add tests for hermfit with deg specified as list --- numpy/polynomial/tests/test_hermite.py | 31 ++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py index e67625a88139..04da72b26560 100644 --- a/numpy/polynomial/tests/test_hermite.py +++ b/numpy/polynomial/tests/test_hermite.py @@ -387,6 +387,9 @@ def test_hermfit(self): def f(x): return x*(x - 1)*(x - 2) + def f2(x): + return x**4 + x**2 + 1 + # Test exceptions assert_raises(ValueError, herm.hermfit, [1], [1], -1) assert_raises(TypeError, herm.hermfit, [[1]], [1], 0) @@ -396,6 +399,9 @@ def f(x): assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0) assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, herm.hermfit, [1], [1], [-1,]) + assert_raises(ValueError, herm.hermfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, herm.hermfit, [1], [1], []) # Test fit x = np.linspace(0, 2) @@ -404,13 +410,25 @@ def f(x): coef3 = herm.hermfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(herm.hermval(x, coef3), y) + coef3 = herm.hermfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(herm.hermval(x, coef3), y) # coef4 = herm.hermfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(herm.hermval(x, coef4), y) + coef4 = herm.hermfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = herm.hermfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) # coef2d = herm.hermfit(x, np.array([y, y]).T, 3) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = herm.hermfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() @@ -418,13 +436,26 @@ def f(x): y[0::2] = 0 wcoef3 = herm.hermfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) + wcoef3 = herm.hermfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) # wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) # test scaling with complex values x points whose square # is zero when summed. x = [1, 1j, -1, -1j] assert_almost_equal(herm.hermfit(x, x, 1), [0, .5]) + assert_almost_equal(herm.hermfit(x, x, [0, 1]), [0, .5]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = herm.hermfit(x, y, 4) + assert_almost_equal(herm.hermval(x, coef1), y) + coef2 = herm.hermfit(x, y, [0, 2, 4]) + assert_almost_equal(herm.hermval(x, coef2), y) + assert_almost_equal(coef1, coef2) class TestCompanion(TestCase): From 8dcf03358a97bf3a577025c03438d381c8b4cb72 Mon Sep 17 00:00:00 2001 From: Jonathan Underwood Date: Thu, 5 Nov 2015 15:03:27 +0000 Subject: [PATCH 405/496] ENH: Allow specification of terms to fit in lagfit The argument `deg` is enhanced to allow an array_like argument to past which specifies which terms to include in the fit. The returned coef array is exapnded to have entries of 0 for all coefficients which were not included in the fit. --- numpy/polynomial/laguerre.py | 39 +++++++++++++++++++++++++++++++----- 1 file changed, 34 insertions(+), 5 deletions(-) diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index fffe9e6b6546..280e28159451 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -1387,8 +1387,14 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): y-coordinates of the sample points. Several data sets of sample points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column. - deg : int - Degree of the fitting polynomial + deg : int or array_like + Degree of the fitting polynomial. If `deg` is a single integer + all terms up to and including the `deg`'th term are included. + `deg` may alternatively be a list or array specifying which + terms in the Legendre expansion to include in the fit. + + .. versionchanged:: 1.11.0 + `deg` may be a list specifying which terms to fit rcond : float, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be ignored. The @@ -1485,12 +1491,14 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): array([ 0.96971004, 2.00193749, 3.00288744]) """ - order = int(deg) + 1 x = np.asarray(x) + 0.0 y = np.asarray(y) + 0.0 + deg = np.asarray([deg,], dtype=int).flatten() # check arguments. - if deg < 0: + if deg.size < 1: + raise TypeError("expected deg to be one or more integers") + if deg.min() < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: raise TypeError("expected 1D vector for x") @@ -1501,8 +1509,20 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): if len(x) != len(y): raise TypeError("expected x and y to have same length") + if deg.size == 1: + restricted_fit = False + lmax = deg[0] + order = lmax + 1 + else: + restricted_fit = True + lmax = deg.max() + order = deg.size + # set up the least squares matrices in transposed form - lhs = lagvander(x, deg).T + van = lagvander(x, lmax) + if restricted_fit: + van = van[:, deg] + lhs = van.T rhs = y.T if w is not None: w = np.asarray(w) + 0.0 @@ -1530,6 +1550,15 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) c = (c.T/scl).T + # Expand c to include non-fitted coefficients which are set to zero + if restricted_fit: + if c.ndim == 2: + cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) + else: + cc = np.zeros(lmax+1, dtype=c.dtype) + cc[deg] = c + c = cc + # warn on rank reduction if rank != order and not full: msg = "The fit may be poorly conditioned" From 84e0b6ec1b686b908eb2d8bba38337a13f02cb82 Mon Sep 17 00:00:00 2001 From: Jonathan Underwood Date: Thu, 5 Nov 2015 15:40:25 +0000 Subject: [PATCH 406/496] TST: Add tests for lagfit with deg specified as list --- numpy/polynomial/tests/test_laguerre.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py index 1dc57a960294..c25476088817 100644 --- a/numpy/polynomial/tests/test_laguerre.py +++ b/numpy/polynomial/tests/test_laguerre.py @@ -393,6 +393,9 @@ def f(x): assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0) assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, lag.lagfit, [1], [1], [-1,]) + assert_raises(ValueError, lag.lagfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, lag.lagfit, [1], [1], []) # Test fit x = np.linspace(0, 2) @@ -401,13 +404,21 @@ def f(x): coef3 = lag.lagfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(lag.lagval(x, coef3), y) + coef3 = lag.lagfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(lag.lagval(x, coef3), y) # coef4 = lag.lagfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(lag.lagval(x, coef4), y) + coef4 = lag.lagfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(lag.lagval(x, coef4), y) # coef2d = lag.lagfit(x, np.array([y, y]).T, 3) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = lag.lagfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() @@ -415,13 +426,18 @@ def f(x): y[0::2] = 0 wcoef3 = lag.lagfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) + wcoef3 = lag.lagfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) # wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) # test scaling with complex values x points whose square # is zero when summed. x = [1, 1j, -1, -1j] assert_almost_equal(lag.lagfit(x, x, 1), [1, -1]) + assert_almost_equal(lag.lagfit(x, x, [0, 1]), [1, -1]) class TestCompanion(TestCase): From b904ef19b55796ea6d0e43d00a551fc841833b78 Mon Sep 17 00:00:00 2001 From: Jonathan Underwood Date: Wed, 2 Dec 2015 20:27:25 +0000 Subject: [PATCH 407/496] ENH: Allow specification of terms to fit in polyfit The argument `deg` is enhanced to allow an array_like argument to past which specifies which terms to include in the fit. The returned coef array is exapnded to have entries of 0 for all coefficients which were not included in the fit. --- numpy/polynomial/polynomial.py | 39 +++++++++++++++++++++++++++++----- 1 file changed, 34 insertions(+), 5 deletions(-) diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 60e339a1d2ca..7c922c11bf7f 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -1217,8 +1217,14 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): sharing the same x-coordinates can be (independently) fit with one call to `polyfit` by passing in for `y` a 2-D array that contains one data set per column. - deg : int - Degree of the polynomial(s) to be fit. + deg : int or array_like + Degree of the fitting polynomial. If `deg` is a single integer + all terms up to and including the `deg`'th term are included. + `deg` may alternatively be a list or array specifying which + terms in the Legendre expansion to include in the fit. + + .. versionchanged:: 1.11.0 + `deg` may be a list specifying which terms to fit rcond : float, optional Relative condition number of the fit. Singular values smaller than `rcond`, relative to the largest singular value, will be @@ -1332,12 +1338,14 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): 0.50443316, 0.28853036]), 1.1324274851176597e-014] """ - order = int(deg) + 1 x = np.asarray(x) + 0.0 y = np.asarray(y) + 0.0 + deg = np.asarray([deg,], dtype=int).flatten() # check arguments. - if deg < 0: + if deg.size < 1: + raise TypeError("expected deg to be one or more integers") + if deg.min() < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: raise TypeError("expected 1D vector for x") @@ -1348,8 +1356,20 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): if len(x) != len(y): raise TypeError("expected x and y to have same length") + if deg.size == 1: + restricted_fit = False + lmax = deg[0] + order = lmax + 1 + else: + restricted_fit = True + lmax = deg.max() + order = deg.size + # set up the least squares matrices in transposed form - lhs = polyvander(x, deg).T + van = polyvander(x, lmax) + if restricted_fit: + van = van[:, deg] + lhs = van.T rhs = y.T if w is not None: w = np.asarray(w) + 0.0 @@ -1377,6 +1397,15 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) c = (c.T/scl).T + # Expand c to include non-fitted coefficients which are set to zero + if restricted_fit: + if c.ndim == 2: + cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) + else: + cc = np.zeros(lmax+1, dtype=c.dtype) + cc[deg] = c + c = cc + # warn on rank reduction if rank != order and not full: msg = "The fit may be poorly conditioned" From fd5d1a4893d1a4b04d6df94ff83e09a6e4f12df6 Mon Sep 17 00:00:00 2001 From: Jonathan Underwood Date: Thu, 3 Dec 2015 14:11:59 +0000 Subject: [PATCH 408/496] TST: Add tests for polyfit with deg specified as list --- numpy/polynomial/tests/test_polynomial.py | 27 +++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index c806a8497492..00a52ebce8b7 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -419,6 +419,9 @@ def test_polyfit(self): def f(x): return x*(x - 1)*(x - 2) + def f2(x): + return x**4 + x**2 + 1 + # Test exceptions assert_raises(ValueError, poly.polyfit, [1], [1], -1) assert_raises(TypeError, poly.polyfit, [[1]], [1], 0) @@ -428,6 +431,9 @@ def f(x): assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0) assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, poly.polyfit, [1], [1], [-1,]) + assert_raises(ValueError, poly.polyfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, poly.polyfit, [1], [1], []) # Test fit x = np.linspace(0, 2) @@ -436,13 +442,21 @@ def f(x): coef3 = poly.polyfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(poly.polyval(x, coef3), y) + coef3 = poly.polyfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(poly.polyval(x, coef3), y) # coef4 = poly.polyfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(poly.polyval(x, coef4), y) + coef4 = poly.polyfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(poly.polyval(x, coef4), y) # coef2d = poly.polyfit(x, np.array([y, y]).T, 3) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = poly.polyfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() @@ -450,13 +464,26 @@ def f(x): yw[0::2] = 0 wcoef3 = poly.polyfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) + wcoef3 = poly.polyfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) # wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) # test scaling with complex values x points whose square # is zero when summed. x = [1, 1j, -1, -1j] assert_almost_equal(poly.polyfit(x, x, 1), [0, 1]) + assert_almost_equal(poly.polyfit(x, x, [0, 1]), [0, 1]) + # test fitting only even Polyendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = poly.polyfit(x, y, 4) + assert_almost_equal(poly.polyval(x, coef1), y) + coef2 = poly.polyfit(x, y, [0, 2, 4]) + assert_almost_equal(poly.polyval(x, coef2), y) + assert_almost_equal(coef1, coef2) def test_polytrim(self): coef = [2, -1, 1, 0] From 6411ec505ed7b36c9768accd338a96f5a64eba93 Mon Sep 17 00:00:00 2001 From: Jonathan Underwood Date: Thu, 3 Dec 2015 15:30:16 +0000 Subject: [PATCH 409/496] ENH: Allow specification of terms to fit in hermefit The argument `deg` is enhanced to allow an array_like argument to past which specifies which terms to include in the fit. The returned coef array is exapnded to have entries of 0 for all coefficients which were not included in the fit. --- numpy/polynomial/hermite_e.py | 39 ++++++++++++++++++++++++++++++----- 1 file changed, 34 insertions(+), 5 deletions(-) diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index 394f256a59cf..da441af8316b 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -1385,8 +1385,14 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): y-coordinates of the sample points. Several data sets of sample points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column. - deg : int - Degree of the fitting polynomial + deg : int or array_like + Degree of the fitting polynomial. If `deg` is a single integer + all terms up to and including the `deg`'th term are included. + `deg` may alternatively be a list or array specifying which + terms in the Legendre expansion to include in the fit. + + .. versionchanged:: 1.11.0 + `deg` may be a list specifying which terms to fit rcond : float, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be ignored. The @@ -1483,12 +1489,14 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): array([ 1.01690445, 1.99951418, 2.99948696]) """ - order = int(deg) + 1 x = np.asarray(x) + 0.0 y = np.asarray(y) + 0.0 + deg = np.asarray([deg,], dtype=int).flatten() # check arguments. - if deg < 0: + if deg.size < 1: + raise TypeError("expected deg to be one or more integers") + if deg.min() < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: raise TypeError("expected 1D vector for x") @@ -1499,8 +1507,20 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): if len(x) != len(y): raise TypeError("expected x and y to have same length") + if deg.size == 1: + restricted_fit = False + lmax = deg[0] + order = lmax + 1 + else: + restricted_fit = True + lmax = deg.max() + order = deg.size + # set up the least squares matrices in transposed form - lhs = hermevander(x, deg).T + van = hermevander(x, lmax) + if restricted_fit: + van = van[:, deg] + lhs = van.T rhs = y.T if w is not None: w = np.asarray(w) + 0.0 @@ -1528,6 +1548,15 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) c = (c.T/scl).T + # Expand c to include non-fitted coefficients which are set to zero + if restricted_fit: + if c.ndim == 2: + cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) + else: + cc = np.zeros(lmax+1, dtype=c.dtype) + cc[deg] = c + c = cc + # warn on rank reduction if rank != order and not full: msg = "The fit may be poorly conditioned" From c65093c96746fd5513a648522e105d7df8e8c912 Mon Sep 17 00:00:00 2001 From: Jonathan Underwood Date: Thu, 3 Dec 2015 15:45:38 +0000 Subject: [PATCH 410/496] TST: Add tests for hermefit with deg specified as list --- numpy/polynomial/tests/test_hermite_e.py | 31 ++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/numpy/polynomial/tests/test_hermite_e.py b/numpy/polynomial/tests/test_hermite_e.py index f8601a82846a..1162502dc5f1 100644 --- a/numpy/polynomial/tests/test_hermite_e.py +++ b/numpy/polynomial/tests/test_hermite_e.py @@ -388,6 +388,9 @@ def test_hermefit(self): def f(x): return x*(x - 1)*(x - 2) + def f2(x): + return x**4 + x**2 + 1 + # Test exceptions assert_raises(ValueError, herme.hermefit, [1], [1], -1) assert_raises(TypeError, herme.hermefit, [[1]], [1], 0) @@ -397,6 +400,9 @@ def f(x): assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0) assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, herme.hermefit, [1], [1], [-1,]) + assert_raises(ValueError, herme.hermefit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, herme.hermefit, [1], [1], []) # Test fit x = np.linspace(0, 2) @@ -405,13 +411,25 @@ def f(x): coef3 = herme.hermefit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(herme.hermeval(x, coef3), y) + coef3 = herme.hermefit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(herme.hermeval(x, coef3), y) # coef4 = herme.hermefit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(herme.hermeval(x, coef4), y) + coef4 = herme.hermefit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = herme.hermefit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) # coef2d = herme.hermefit(x, np.array([y, y]).T, 3) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = herme.hermefit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() @@ -419,13 +437,26 @@ def f(x): y[0::2] = 0 wcoef3 = herme.hermefit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) + wcoef3 = herme.hermefit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) # wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) # test scaling with complex values x points whose square # is zero when summed. x = [1, 1j, -1, -1j] assert_almost_equal(herme.hermefit(x, x, 1), [0, 1]) + assert_almost_equal(herme.hermefit(x, x, [0, 1]), [0, 1]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = herme.hermefit(x, y, 4) + assert_almost_equal(herme.hermeval(x, coef1), y) + coef2 = herme.hermefit(x, y, [0, 2, 4]) + assert_almost_equal(herme.hermeval(x, coef2), y) + assert_almost_equal(coef1, coef2) class TestCompanion(TestCase): From 5f7b1af4e652c2ab631634d9778a3a4015e41ced Mon Sep 17 00:00:00 2001 From: Jonathan Underwood Date: Thu, 3 Dec 2015 14:41:00 +0000 Subject: [PATCH 411/496] ENH: Allow specification of terms to fit in fit method --- numpy/polynomial/_polybase.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py index 41bdf0484949..37eb59f039aa 100644 --- a/numpy/polynomial/_polybase.py +++ b/numpy/polynomial/_polybase.py @@ -739,8 +739,14 @@ def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None, y-coordinates of the sample points. Several data sets of sample points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column. - deg : int - Degree of the fitting polynomial. + deg : int or array_like + Degree of the fitting polynomial. If `deg` is a single integer + all terms up to and including the `deg`'th term are included. + `deg` may alternatively be a list or array specifying which + terms in the Legendre expansion to include in the fit. + + .. versionchanged:: 1.11.0 + `deg` may be a list specifying which terms to fit domain : {None, [beg, end], []}, optional Domain to use for the returned series. If ``None``, then a minimal domain that covers the points `x` is chosen. If From 4dd71a3ab7bd019e36998e7c5f98ec2345539f18 Mon Sep 17 00:00:00 2001 From: Jonathan Underwood Date: Thu, 3 Dec 2015 14:49:31 +0000 Subject: [PATCH 412/496] TST: Add tests for check_fit with deg specified as list --- numpy/polynomial/tests/test_classes.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/numpy/polynomial/tests/test_classes.py b/numpy/polynomial/tests/test_classes.py index cd5a54687939..a7cf7209c6bf 100644 --- a/numpy/polynomial/tests/test_classes.py +++ b/numpy/polynomial/tests/test_classes.py @@ -173,11 +173,18 @@ def f(x): assert_almost_equal(p(x), y) assert_almost_equal(p.domain, d) assert_almost_equal(p.window, w) + p = Poly.fit(x, y, [0, 1, 2, 3], domain=d, window=w) + assert_almost_equal(p(x), y) + assert_almost_equal(p.domain, d) + assert_almost_equal(p.window, w) # check with class domain default p = Poly.fit(x, y, 3, []) assert_equal(p.domain, Poly.domain) assert_equal(p.window, Poly.window) + p = Poly.fit(x, y, [0, 1, 2, 3], []) + assert_equal(p.domain, Poly.domain) + assert_equal(p.window, Poly.window) # check that fit accepts weights. w = np.zeros_like(x) @@ -185,7 +192,9 @@ def f(x): w[::2] = 1 p1 = Poly.fit(x[::2], z[::2], 3) p2 = Poly.fit(x, z, 3, w=w) + p3 = Poly.fit(x, z, [0, 1, 2, 3], w=w) assert_almost_equal(p1(x), p2(x)) + assert_almost_equal(p2(x), p3(x)) def check_equal(Poly): From 082e1a8e690f304d1ce5b73aa03d179a25cdf3c6 Mon Sep 17 00:00:00 2001 From: Jonathan Underwood Date: Mon, 18 Jan 2016 14:30:49 +0000 Subject: [PATCH 413/496] Add release note for polynomial fit deg changes --- doc/release/1.11.0-notes.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index e541a4739754..ea0e41694bbb 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -240,6 +240,15 @@ arguments cannot be cast to a common type, it could have raised a ``TypeError`` or ``ValueError`` depending on their order. Now, ``np.dot`` will now always raise a ``TypeError``. +numpy.polynomial.*fit now supports restricted fitting +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The ``deg`` parameter was extended to allow restricted fitting of +specified terms in the polynomial expansion for all polynomial +types. This change is backward compatible and it is still possible to +specify ``deg`` as a single integer to specify the maximum +order/degree of polynomial used in the fit, but it is now possible for +``deg`` to be a list specifying which terms in the series to fit. + Deprecations ============ From 44c49f311f3c2e0fa6440ddde1c8fef9a4b5a93e Mon Sep 17 00:00:00 2001 From: gfyoung Date: Sat, 16 Jan 2016 10:25:28 +0000 Subject: [PATCH 414/496] TST: Added lots of new tests for fromnumeric.py --- numpy/core/tests/test_fromnumeric.py | 17 -- numpy/core/tests/test_multiarray.py | 313 +++++++++++++++++++++----- numpy/core/tests/test_numeric.py | 297 ++++++++++++++++-------- numpy/lib/tests/test_function_base.py | 61 +++-- 4 files changed, 490 insertions(+), 198 deletions(-) delete mode 100644 numpy/core/tests/test_fromnumeric.py diff --git a/numpy/core/tests/test_fromnumeric.py b/numpy/core/tests/test_fromnumeric.py deleted file mode 100644 index 0fba10b6e8b9..000000000000 --- a/numpy/core/tests/test_fromnumeric.py +++ /dev/null @@ -1,17 +0,0 @@ -from __future__ import division, absolute_import, print_function - -from numpy import put -from numpy.testing import TestCase, assert_raises - - -class TestPut(TestCase): - - def test_bad_array(self): - # We want to raise a TypeError in the - # case that a non-ndarray object is passed - # in since `np.put` modifies in place and - # hence would do nothing to a non-ndarray - v = 5 - indx = [0, 2] - bad_array = [1, 2, 3] - assert_raises(TypeError, put, bad_array, indx, v) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 1666af4f1dc8..f432aa975885 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -37,7 +37,7 @@ if sys.version_info[:2] > (3, 2): - # In Python 3.3 the representation of empty shape, strides and suboffsets + # In Python 3.3 the representation of empty shape, strides and sub-offsets # is an empty tuple instead of None. # http://docs.python.org/dev/whatsnew/3.3.html#api-changes EMPTY = () @@ -72,7 +72,7 @@ def test_otherflags(self): def test_string_align(self): a = np.zeros(4, dtype=np.dtype('|S4')) assert_(a.flags.aligned) - # not power of two are accessed bytewise and thus considered aligned + # not power of two are accessed byte-wise and thus considered aligned a = np.zeros(5, dtype=np.dtype('|S4')) assert_(a.flags.aligned) @@ -80,6 +80,7 @@ def test_void_align(self): a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")])) assert_(a.flags.aligned) + class TestHash(TestCase): # see #3793 def test_int(self): @@ -101,6 +102,7 @@ def test_int(self): assert_equal(hash(ut(2**i - 1)), hash(2**i - 1), err_msg="%r: 2**%d - 1" % (ut, i)) + class TestAttributes(TestCase): def setUp(self): self.one = np.arange(10) @@ -159,8 +161,8 @@ def test_stridesattr(self): def make_array(size, offset, strides): return np.ndarray(size, buffer=x, dtype=int, - offset=offset*x.itemsize, - strides=strides*x.itemsize) + offset=offset*x.itemsize, + strides=strides*x.itemsize) assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) self.assertRaises(ValueError, make_array, 4, 4, -2) @@ -351,6 +353,7 @@ def assign(v): assert_raises((AttributeError, TypeError), assign, C()) assert_raises(ValueError, assign, [1]) + class TestDtypedescr(TestCase): def test_construction(self): d1 = np.dtype('i4') @@ -362,6 +365,7 @@ def test_byteorders(self): self.assertNotEqual(np.dtype('i4')) self.assertNotEqual(np.dtype([('a', 'i4')])) + class TestZeroRank(TestCase): def setUp(self): self.d = np.array(0), np.array('x', object) @@ -539,6 +543,7 @@ def test_overlapping_assignment(self): a[:1:-1] = a[2::-1] assert_equal(a, [0, 1, 0, 1, 2]) + class TestCreation(TestCase): def test_from_attribute(self): class x(object): @@ -583,7 +588,7 @@ def test_zeros(self): @dec.slow def test_zeros_big(self): - # test big array as they might be allocated different by the sytem + # test big array as they might be allocated different by the system types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] for dt in types: d = np.zeros((30 * 1024**2,), dtype=dt) @@ -867,7 +872,7 @@ def test_setfield_object(self): b[0]['x'] = np.arange(3) assert_equal(b[0]['x'], np.arange(3)) - #check that broadcasting check still works + # check that broadcasting check still works c = np.zeros(1, dtype=[('x', 'O', 5)]) def testassign(): @@ -875,6 +880,7 @@ def testassign(): assert_raises(ValueError, testassign) + class TestBool(TestCase): def test_test_interning(self): a0 = np.bool_(0) @@ -933,7 +939,107 @@ def test_count_nonzero_unaligned(self): a[:o] = False self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist())) + class TestMethods(TestCase): + def test_compress(self): + tgt = [[5, 6, 7, 8, 9]] + arr = np.arange(10).reshape(2, 5) + out = arr.compress([0, 1], axis=0) + assert_equal(out, tgt) + + tgt = [[1, 3], [6, 8]] + out = arr.compress([0, 1, 0, 1, 0], axis=1) + assert_equal(out, tgt) + + tgt = [[1], [6]] + arr = np.arange(10).reshape(2, 5) + out = arr.compress([0, 1], axis=1) + assert_equal(out, tgt) + + arr = np.arange(10).reshape(2, 5) + out = arr.compress([0, 1]) + assert_equal(out, 1) + + def test_choose(self): + x = 2*np.ones((3,), dtype=int) + y = 3*np.ones((3,), dtype=int) + x2 = 2*np.ones((2, 3), dtype=int) + y2 = 3*np.ones((2, 3), dtype=int) + ind = np.array([0, 0, 1]) + + A = ind.choose((x, y)) + assert_equal(A, [2, 2, 3]) + + A = ind.choose((x2, y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + A = ind.choose((x, y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + def test_prod(self): + ba = [1, 2, 10, 11, 6, 5, 4] + ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] + + for ctype in [np.int16, np.uint16, np.int32, np.uint32, + np.float32, np.float64, np.complex64, np.complex128]: + a = np.array(ba, ctype) + a2 = np.array(ba2, ctype) + if ctype in ['1', 'b']: + self.assertRaises(ArithmeticError, a.prod) + self.assertRaises(ArithmeticError, a2.prod, axis=1) + else: + assert_equal(a.prod(axis=0), 26400) + assert_array_equal(a2.prod(axis=0), + np.array([50, 36, 84, 180], ctype)) + assert_array_equal(a2.prod(axis=-1), + np.array([24, 1890, 600], ctype)) + + def test_repeat(self): + m = np.array([1, 2, 3, 4, 5, 6]) + m_rect = m.reshape((2, 3)) + + A = m.repeat([1, 3, 2, 1, 1, 2]) + assert_equal(A, [1, 2, 2, 2, 3, + 3, 4, 5, 6, 6]) + + A = m.repeat(2) + assert_equal(A, [1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6]) + + A = m_rect.repeat([2, 1], axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6]]) + + A = m_rect.repeat([1, 3, 2], axis=1) + assert_equal(A, [[1, 2, 2, 2, 3, 3], + [4, 5, 5, 5, 6, 6]]) + + A = m_rect.repeat(2, axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6], + [4, 5, 6]]) + + A = m_rect.repeat(2, axis=1) + assert_equal(A, [[1, 1, 2, 2, 3, 3], + [4, 4, 5, 5, 6, 6]]) + + def test_reshape(self): + arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) + + tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]] + assert_equal(arr.reshape(2, 6), tgt) + + tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] + assert_equal(arr.reshape(3, 4), tgt) + + tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]] + assert_equal(arr.reshape((3, 4), order='F'), tgt) + + tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]] + assert_equal(arr.T.reshape((3, 4), order='C'), tgt) + def test_round(self): def check_round(arr, expected, *round_args): assert_equal(arr.round(*round_args), expected) @@ -951,6 +1057,13 @@ def check_round(arr, expected, *round_args): check_round(np.array([4.5 + 1.5j]), [4 + 2j]) check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1) + def test_squeeze(self): + a = np.array([[[1], [2], [3]]]) + assert_equal(a.squeeze(), [1, 2, 3]) + assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]]) + assert_raises(ValueError, a.squeeze, axis=(1,)) + assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]]) + def test_transpose(self): a = np.array([[1, 2], [3, 4]]) assert_equal(a.transpose(), [[1, 3], [2, 4]]) @@ -960,7 +1073,7 @@ def test_transpose(self): def test_sort(self): # test ordering for floats and complex containing nans. It is only - # necessary to check the lessthan comparison, so sorts that + # necessary to check the less-than comparison, so sorts that # only follow the insertion sort path are sufficient. We only # test doubles and complex doubles as the logic is the same. @@ -1198,7 +1311,7 @@ def test_argsort(self): assert_equal(b.copy().argsort(kind=kind), b, msg) # test complex argsorts. These use the same code as the scalars - # but the compare fuction differs. + # but the compare function differs. ai = a*1j + 1 bi = b*1j + 1 for kind in ['q', 'm', 'h']: @@ -1292,8 +1405,6 @@ def test_argsort(self): assert_equal(a.copy().argsort(axis=0), b) assert_equal(a.copy().argsort(axis=1), c) assert_equal(a.copy().argsort(), c) - # using None is known fail at this point - #assert_equal(a.copy().argsort(axis=None, c) # check axis handling for multidimensional empty arrays a = np.array([]) @@ -1440,7 +1551,7 @@ def test_searchsorted_unicode(self): 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'], - dtype=np.unicode) + dtype=np.unicode) ind = np.arange(len(a)) assert_equal([a.searchsorted(v, 'left') for v in a], ind) assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1) @@ -1596,7 +1707,7 @@ def test_partition(self): d = np.array([]) assert_array_equal(np.partition(d, 0, kind=k), d) assert_array_equal(np.argpartition(d, 0, kind=k), d) - d = np.ones((1)) + d = np.ones(1) assert_array_equal(np.partition(d, 0, kind=k)[0], d) assert_array_equal(d[np.argpartition(d, 0, kind=k)], np.partition(d, 0, kind=k)) @@ -1637,13 +1748,13 @@ def test_partition(self): d[i:].partition(0, kind=k) assert_array_equal(d, tgt) - d = np.ones((50)) + d = np.ones(50) assert_array_equal(np.partition(d, 0, kind=k), d) assert_array_equal(d[np.argpartition(d, 0, kind=k)], np.partition(d, 0, kind=k)) # sorted - d = np.arange((49)) + d = np.arange(49) self.assertEqual(np.partition(d, 5, kind=k)[5], 5) self.assertEqual(np.partition(d, 15, kind=k)[15], 15) assert_array_equal(d[np.argpartition(d, 5, kind=k)], @@ -1652,7 +1763,7 @@ def test_partition(self): np.partition(d, 15, kind=k)) # rsorted - d = np.arange((47))[::-1] + d = np.arange(47)[::-1] self.assertEqual(np.partition(d, 6, kind=k)[6], 6) self.assertEqual(np.partition(d, 16, kind=k)[16], 16) assert_array_equal(d[np.argpartition(d, 6, kind=k)], @@ -1690,8 +1801,8 @@ def test_partition(self): assert_(np.isnan(np.partition(d, (2, -1))[-1])) # equal elements - d = np.arange((47)) % 7 - tgt = np.sort(np.arange((47)) % 7) + d = np.arange(47) % 7 + tgt = np.sort(np.arange(47) % 7) np.random.shuffle(d) for i in range(d.size): self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i]) @@ -1743,7 +1854,7 @@ def test_partition(self): assert_raises(ValueError, np.argpartition, d, 11, axis=None) td = [(dt, s) for dt in [np.int32, np.float32, np.complex64] - for s in (9, 16)] + for s in (9, 16)] for dt, s in td: aae = assert_array_equal at = self.assertTrue @@ -1773,15 +1884,14 @@ def test_partition(self): np.argpartition(d1, i, axis=1, kind=k)]) p = np.partition(d0, i, axis=0, kind=k) - aae(p[i,:], np.array([i] * d1.shape[0], - dtype=dt)) + aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt)) # array_less does not seem to work right - at((p[:i,:] <= p[i,:]).all(), - msg="%d: %r <= %r" % (i, p[i,:], p[:i,:])) - at((p[i + 1:,:] > p[i,:]).all(), - msg="%d: %r < %r" % (i, p[i,:], p[:, i + 1:])) + at((p[:i, :] <= p[i, :]).all(), + msg="%d: %r <= %r" % (i, p[i, :], p[:i, :])) + at((p[i + 1:, :] > p[i, :]).all(), + msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:])) aae(p, d0[np.argpartition(d0, i, axis=0, kind=k), - np.arange(d0.shape[1])[None,:]]) + np.arange(d0.shape[1])[None, :]]) # check inplace dc = d.copy() @@ -2153,6 +2263,12 @@ def test_put(self): a.flags.writeable = False assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5]) + # when calling np.put, make sure a + # TypeError is raised if the object + # isn't an ndarray + bad_array = [1, 2, 3] + assert_raises(TypeError, np.put, bad_array, [0, 2], 5) + def test_ravel(self): a = np.array([[0, 1], [2, 3]]) assert_equal(a.ravel(), [0, 1, 2, 3]) @@ -2778,7 +2894,7 @@ def test_version0_float32(self): def test_version0_object(self): s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.' - a = np.array([{'a':1}, {'b':2}]) + a = np.array([{'a': 1}, {'b': 2}]) p = self._loads(asbytes(s)) assert_equal(a, p) @@ -2797,7 +2913,7 @@ def test_version1_float32(self): def test_version1_object(self): s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.' - a = np.array([{'a':1}, {'b':2}]) + a = np.array([{'a': 1}, {'b': 2}]) p = self._loads(asbytes(s)) assert_equal(a, p) @@ -2815,7 +2931,7 @@ def test_list(self): assert_array_equal(x, np.array([[2.0]])) x = np.ones((1, 1, 1)) - x[:,:, [0]] = 2.0 + x[:, :, [0]] = 2.0 assert_array_equal(x, np.array([[[2.0]]])) def test_tuple(self): @@ -2823,7 +2939,7 @@ def test_tuple(self): x[:, (0,)] = 2.0 assert_array_equal(x, np.array([[2.0]])) x = np.ones((1, 1, 1)) - x[:,:, (0,)] = 2.0 + x[:, :, (0,)] = 2.0 assert_array_equal(x, np.array([[[2.0]]])) def test_mask(self): @@ -2957,11 +3073,6 @@ class TestArgmax(TestCase): ([False, False, False, True, False], 3), ([True, False, False, False, False], 0), ([True, False, True, False, False], 0), - - # Can't reduce a "flexible type" - #(['a', 'z', 'aa', 'zz'], 3), - #(['zz', 'a', 'aa', 'a'], 0), - #(['aa', 'z', 'zz', 'a'], 2), ] def test_all(self): @@ -2990,7 +3101,7 @@ def test_output_shape(self): # these could be relaxed possibly (used to allow even the previous) out = np.ones((1, 10), dtype=np.int_) - assert_raises(ValueError, a.argmax, -1, np.ones((1, 10))) + assert_raises(ValueError, a.argmax, -1, out) out = np.ones(10, dtype=np.int_) a.argmax(-1, out=out) @@ -3005,13 +3116,13 @@ def test_np_vs_ndarray(self): # make sure both ndarray.argmax and numpy.argmax support out/axis args a = np.random.normal(size=(2,3)) - #check positional args + # check positional args out1 = np.zeros(2, dtype=int) out2 = np.zeros(2, dtype=int) assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2)) assert_equal(out1, out2) - #check keyword args + # check keyword args out1 = np.zeros(3, dtype=int) out2 = np.zeros(3, dtype=int) assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0)) @@ -3091,11 +3202,6 @@ class TestArgmin(TestCase): ([True, True, True, False, True], 3), ([False, True, True, True, True], 0), ([False, True, False, True, True], 0), - - # Can't reduce a "flexible type" - #(['a', 'z', 'aa', 'zz'], 0), - #(['zz', 'a', 'aa', 'a'], 1), - #(['aa', 'z', 'zz', 'a'], 3), ] def test_all(self): @@ -3138,7 +3244,7 @@ def test_output_shape(self): # these could be relaxed possibly (used to allow even the previous) out = np.ones((1, 10), dtype=np.int_) - assert_raises(ValueError, a.argmin, -1, np.ones((1, 10))) + assert_raises(ValueError, a.argmin, -1, out) out = np.ones(10, dtype=np.int_) a.argmin(-1, out=out) @@ -3151,15 +3257,15 @@ def test_argmin_unicode(self): def test_np_vs_ndarray(self): # make sure both ndarray.argmin and numpy.argmin support out/axis args - a = np.random.normal(size=(2,3)) + a = np.random.normal(size=(2, 3)) - #check positional args + # check positional args out1 = np.zeros(2, dtype=int) out2 = np.ones(2, dtype=int) assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2)) assert_equal(out1, out2) - #check keyword args + # check keyword args out1 = np.zeros(3, dtype=int) out2 = np.ones(3, dtype=int) assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0)) @@ -3281,6 +3387,29 @@ def test_max_or_min(self): assert_(np.all(x <= 4)) +class TestCompress(TestCase): + def test_axis(self): + tgt = [[5, 6, 7, 8, 9]] + arr = np.arange(10).reshape(2, 5) + out = np.compress([0, 1], arr, axis=0) + assert_equal(out, tgt) + + tgt = [[1, 3], [6, 8]] + out = np.compress([0, 1, 0, 1, 0], arr, axis=1) + assert_equal(out, tgt) + + def test_truncate(self): + tgt = [[1], [6]] + arr = np.arange(10).reshape(2, 5) + out = np.compress([0, 1], arr, axis=1) + assert_equal(out, tgt) + + def test_flatten(self): + arr = np.arange(10).reshape(2, 5) + out = np.compress([0, 1], arr) + assert_equal(out, 1) + + class TestPutmask(object): def tst_basic(self, x, T, mask, val): np.putmask(x, mask, val) @@ -3324,12 +3453,6 @@ def test_record_array(self): assert_array_equal(rec['y'], [11, 4]) assert_array_equal(rec['z'], [3, 3]) - def test_masked_array(self): - ## x = np.array([1,2,3]) - ## z = np.ma.array(x,mask=[True,False,False]) - ## np.putmask(z,[True,True,True],3) - pass - class TestTake(object): def tst_basic(self, x): @@ -3774,6 +3897,7 @@ def test___array__(self): assert_(f.flags.updateifcopy is True) assert_(f.base is self.b0) + class TestResize(TestCase): def test_basic(self): x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) @@ -3919,7 +4043,7 @@ def test_field_names(self): assert_equal(b[fn3][sfn1], 1) assert_raises(ValueError, b[fn3].__setitem__, fnn, 1) assert_raises(ValueError, b[fn3].__getitem__, fnn) - # multiple Subfields + # multiple subfields fn2 = func('f2') b[fn2] = 3 assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3)) @@ -4010,15 +4134,19 @@ def test_basic(self): assert_array_equal(y, z) assert_array_equal(y, [67305985, 134678021]) + def _mean(a, **args): return a.mean(**args) + def _var(a, **args): return a.var(**args) + def _std(a, **args): return a.std(**args) + class TestStats(TestCase): funcs = [_mean, _var, _std] @@ -4111,7 +4239,7 @@ def test_dtype_from_dtype(self): # this needs definition as there are lots places along the line # where type casting may take place. - #for f in self.funcs: + # for f in self.funcs: # for c in np.typecodes['AllInteger']: # tgt = np.dtype(c).type # res = f(mat, axis=1, dtype=c).dtype.type @@ -4867,6 +4995,7 @@ def test_matmul_inplace(): exec_ = getattr(builtins, "exec") assert_raises(TypeError, exec_, "a @= b", globals(), locals()) + class TestInner(TestCase): def test_inner_type_mismatch(self): @@ -4979,6 +5108,25 @@ def test_2d(self): ' [ 501, 502, 503, ..., 999, 1000, 1001]])' assert_(repr(A) == reprA) + +class TestAlen(TestCase): + def test_basic(self): + m = np.array([1, 2, 3]) + self.assertEqual(np.alen(m), 3) + + m = np.array([[1, 2, 3], [4, 5, 7]]) + self.assertEqual(np.alen(m), 2) + + m = [1, 2, 3] + self.assertEqual(np.alen(m), 3) + + m = [[1, 2, 3], [4, 5, 7]] + self.assertEqual(np.alen(m), 2) + + def test_singleton(self): + self.assertEqual(np.alen(5), 1) + + class TestChoose(TestCase): def setUp(self): self.x = 2*np.ones((3,), dtype=int) @@ -5000,8 +5148,47 @@ def test_broadcast2(self): assert_equal(A, [[2, 2, 3], [2, 2, 3]]) +class TestRepeat(TestCase): + def setUp(self): + self.m = np.array([1, 2, 3, 4, 5, 6]) + self.m_rect = self.m.reshape((2, 3)) + + def test_basic(self): + A = np.repeat(self.m, [1, 3, 2, 1, 1, 2]) + assert_equal(A, [1, 2, 2, 2, 3, + 3, 4, 5, 6, 6]) + + def test_broadcast1(self): + A = np.repeat(self.m, 2) + assert_equal(A, [1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6]) + + def test_axis_spec(self): + A = np.repeat(self.m_rect, [2, 1], axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6]]) + + A = np.repeat(self.m_rect, [1, 3, 2], axis=1) + assert_equal(A, [[1, 2, 2, 2, 3, 3], + [4, 5, 5, 5, 6, 6]]) + + def test_broadcast2(self): + A = np.repeat(self.m_rect, 2, axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6], + [4, 5, 6]]) + + A = np.repeat(self.m_rect, 2, axis=1) + assert_equal(A, [[1, 1, 2, 2, 3, 3], + [4, 4, 5, 5, 6, 6]]) + + # TODO: test for multidimensional NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4} + + class TestNeighborhoodIter(TestCase): # Simple, 2d tests def _test_simple2d(self, dt): @@ -5266,6 +5453,7 @@ def test_complex_warning(self): assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y) assert_equal(x, [1, 2]) + class TestMinScalarType(object): def test_usigned_shortshort(self): @@ -5299,6 +5487,7 @@ def test_object(self): from numpy.core._internal import _dtype_from_pep3118 + class TestPEP3118Dtype(object): def _check(self, spec, wanted): dt = np.dtype(wanted) @@ -5381,6 +5570,7 @@ def VV(n): self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,))) + class TestNewBufferProtocol(object): def _check_roundtrip(self, obj): obj = np.asarray(obj) @@ -5762,10 +5952,12 @@ def test_flat_element_deletion(): except: raise AssertionError + def test_scalar_element_deletion(): a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')]) assert_raises(ValueError, a[0].__delitem__, 'x') + class TestMemEventHook(TestCase): def test_mem_seteventhook(self): # The actual tests are within the C code in @@ -5818,7 +6010,7 @@ def test_3darray(self): class TestConversion(TestCase): def test_array_scalar_relational_operation(self): - #All integer + # All integer for dt1 in np.typecodes['AllInteger']: assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,)) assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,)) @@ -5829,13 +6021,13 @@ def test_array_scalar_relational_operation(self): assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) - #Unsigned integers + # Unsigned integers for dt1 in 'BHILQP': assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,)) assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,)) assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,)) - #unsigned vs signed + # Unsigned vs signed for dt2 in 'bhilqp': assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) @@ -5844,7 +6036,7 @@ def test_array_scalar_relational_operation(self): assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) - #Signed integers and floats + # Signed integers and floats for dt1 in 'bhlqp' + np.typecodes['Float']: assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) @@ -5902,7 +6094,8 @@ def test_exotic(self): e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan, 'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'], dtype=object) - m = np.array([0,0,1,0,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0], dtype=bool) + m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1, + 0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool) r = e[:] r[np.where(m)] = d[np.where(m)] @@ -5971,7 +6164,7 @@ def test_foreign(self): dtype=np.float64) a = np.ones(1, dtype='>i4') b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], - dtype=np.float64) + dtype=np.float64) assert_equal(np.where(c, a, b), r) b = b.astype('>f8') diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index dafdbc48be73..34be84135a9f 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -44,19 +44,41 @@ def test_reshape_from_zero(self): class TestNonarrayArgs(TestCase): # check that non-array arguments to functions wrap them in arrays - def test_squeeze(self): - A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]] - assert_(np.squeeze(A).shape == (3, 3)) + def test_choose(self): + choices = [[0, 1, 2], + [3, 4, 5], + [5, 6, 7]] + tgt = [5, 1, 5] + a = [2, 0, 1] + + out = np.choose(a, choices) + assert_equal(out, tgt) + + def test_clip(self): + arr = [-1, 5, 2, 3, 10, -4, -9] + out = np.clip(arr, 2, 7) + tgt = [2, 5, 2, 3, 7, 2, 2] + assert_equal(out, tgt) + + def test_compress(self): + arr = [[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]] + tgt = [[5, 6, 7, 8, 9]] + out = np.compress([0, 1], arr, axis=0) + assert_equal(out, tgt) def test_cumproduct(self): A = [[1, 2, 3], [4, 5, 6]] assert_(np.all(np.cumproduct(A) == np.array([1, 2, 6, 24, 120, 720]))) - def test_size(self): - A = [[1, 2, 3], [4, 5, 6]] - assert_(np.size(A) == 6) - assert_(np.size(A, 0) == 2) - assert_(np.size(A, 1) == 3) + def test_diagonal(self): + a = [[0, 1, 2, 3], + [4, 5, 6, 7], + [8, 9, 10, 11]] + out = np.diagonal(a) + tgt = [0, 5, 10] + + assert_equal(out, tgt) def test_mean(self): A = [[1, 2, 3], [4, 5, 6]] @@ -69,6 +91,55 @@ def test_mean(self): assert_(np.isnan(np.mean([]))) assert_(w[0].category is RuntimeWarning) + def test_ptp(self): + a = [3, 4, 5, 10, -3, -5, 6.0] + assert_equal(np.ptp(a, axis=0), 15.0) + + def test_prod(self): + arr = [[1, 2, 3, 4], + [5, 6, 7, 9], + [10, 3, 4, 5]] + tgt = [24, 1890, 600] + + assert_equal(np.prod(arr, axis=-1), tgt) + + def test_ravel(self): + a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]] + tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] + assert_equal(np.ravel(a), tgt) + + def test_repeat(self): + a = [1, 2, 3] + tgt = [1, 1, 2, 2, 3, 3] + + out = np.repeat(a, 2) + assert_equal(out, tgt) + + def test_reshape(self): + arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]] + tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]] + assert_equal(np.reshape(arr, (2, 6)), tgt) + + def test_round(self): + arr = [1.56, 72.54, 6.35, 3.25] + tgt = [1.6, 72.5, 6.4, 3.2] + assert_equal(np.around(arr, decimals=1), tgt) + + def test_searchsorted(self): + arr = [-8, -5, -1, 3, 6, 10] + out = np.searchsorted(arr, 0) + assert_equal(out, 3) + + def test_size(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_(np.size(A) == 6) + assert_(np.size(A, 0) == 2) + assert_(np.size(A, 1) == 3) + + def test_squeeze(self): + A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]] + assert_(np.squeeze(A).shape == (3, 3)) + def test_std(self): A = [[1, 2, 3], [4, 5, 6]] assert_almost_equal(np.std(A), 1.707825127659933) @@ -80,6 +151,38 @@ def test_std(self): assert_(np.isnan(np.std([]))) assert_(w[0].category is RuntimeWarning) + def test_swapaxes(self): + tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]] + a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]] + out = np.swapaxes(a, 0, 2) + assert_equal(out, tgt) + + def test_sum(self): + m = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + tgt = [[6], [15], [24]] + out = np.sum(m, axis=1, keepdims=True) + + assert_equal(tgt, out) + + def test_take(self): + tgt = [2, 3, 5] + indices = [1, 2, 4] + a = [1, 2, 3, 4, 5] + + out = np.take(a, indices) + assert_equal(out, tgt) + + def test_trace(self): + c = [[1, 2], [3, 4], [5, 6]] + assert_equal(np.trace(c), 5) + + def test_transpose(self): + arr = [[1, 2], [3, 4], [5, 6]] + tgt = [[1, 3, 5], [2, 4, 6]] + assert_equal(np.transpose(arr, (1, 0)), tgt) + def test_var(self): A = [[1, 2, 3], [4, 5, 6]] assert_almost_equal(np.var(A), 2.9166666666666665) @@ -259,7 +362,6 @@ def setUp(self): self.signf[4::6][self.ef[4::6]] = -0. self.signd[4::6][self.ed[4::6]] = -0. - def test_float(self): # offset for alignment test for i in range(4): @@ -279,7 +381,7 @@ def test_float(self): assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) - # isnan on amd64 takes the same codepath + # isnan on amd64 takes the same code path assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:]) assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:]) assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:]) @@ -305,7 +407,7 @@ def test_double(self): assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) - # isnan on amd64 takes the same codepath + # isnan on amd64 takes the same code path assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:]) assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:]) assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:]) @@ -385,7 +487,7 @@ def test_errobj_noerrmask(self): # set errobj to something non default np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT + 1, None]) - #call a ufunc + # call a ufunc np.isnan(np.array([6])) # same with the default, lots of times to get rid of possible # pre-existing stack in the code @@ -413,7 +515,7 @@ def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2): # # Given a floating operation `flop` and two scalar values, check that # the operation raises the floating point exception specified by - #`fpeerr`. Tests all variants with 0-d array scalars as well. + # `fpeerr`. Tests all variants with 0-d array scalars as well. self.assert_raises_fpe(fpeerr, flop, sc1, sc2) self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2) @@ -449,31 +551,31 @@ def test_floating_exceptions(self): invalid = 'invalid' self.assert_raises_fpe(underflow, - lambda a, b:a/b, ft_tiny, ft_max) + lambda a, b: a/b, ft_tiny, ft_max) self.assert_raises_fpe(underflow, - lambda a, b:a*b, ft_tiny, ft_tiny) + lambda a, b: a*b, ft_tiny, ft_tiny) self.assert_raises_fpe(overflow, - lambda a, b:a*b, ft_max, ftype(2)) + lambda a, b: a*b, ft_max, ftype(2)) self.assert_raises_fpe(overflow, - lambda a, b:a/b, ft_max, ftype(0.5)) + lambda a, b: a/b, ft_max, ftype(0.5)) self.assert_raises_fpe(overflow, - lambda a, b:a+b, ft_max, ft_max*ft_eps) + lambda a, b: a+b, ft_max, ft_max*ft_eps) self.assert_raises_fpe(overflow, - lambda a, b:a-b, -ft_max, ft_max*ft_eps) + lambda a, b: a-b, -ft_max, ft_max*ft_eps) self.assert_raises_fpe(overflow, - np.power, ftype(2), ftype(2**fi.nexp)) + np.power, ftype(2), ftype(2**fi.nexp)) self.assert_raises_fpe(divbyzero, - lambda a, b:a/b, ftype(1), ftype(0)) + lambda a, b: a/b, ftype(1), ftype(0)) self.assert_raises_fpe(invalid, - lambda a, b:a/b, ftype(np.inf), ftype(np.inf)) + lambda a, b: a/b, ftype(np.inf), ftype(np.inf)) self.assert_raises_fpe(invalid, - lambda a, b:a/b, ftype(0), ftype(0)) + lambda a, b: a/b, ftype(0), ftype(0)) self.assert_raises_fpe(invalid, - lambda a, b:a-b, ftype(np.inf), ftype(np.inf)) + lambda a, b: a-b, ftype(np.inf), ftype(np.inf)) self.assert_raises_fpe(invalid, - lambda a, b:a+b, ftype(np.inf), ftype(-np.inf)) + lambda a, b: a+b, ftype(np.inf), ftype(-np.inf)) self.assert_raises_fpe(invalid, - lambda a, b:a*b, ftype(0), ftype(np.inf)) + lambda a, b: a*b, ftype(0), ftype(np.inf)) def test_warnings(self): # test warning code path @@ -496,7 +598,7 @@ def test_warnings(self): class TestTypes(TestCase): def check_promotion_cases(self, promote_func): - #Tests that the scalars get coerced correctly. + # tests that the scalars get coerced correctly. b = np.bool_(0) i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0) u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0) @@ -582,7 +684,7 @@ def res_type(a, b): assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) b = np.longdouble(1.234) * a assert_equal(b.dtype, np.dtype(np.longdouble), - "array type %s" % a.dtype) + "array type %s" % a.dtype) b = np.float64(1.234) * a assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) b = np.float32(1.234) * a @@ -594,7 +696,7 @@ def res_type(a, b): assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) b = np.clongdouble(1.234j) * a assert_equal(b.dtype, np.dtype(np.clongdouble), - "array type %s" % a.dtype) + "array type %s" % a.dtype) b = np.complex128(1.234j) * a assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) b = np.complex64(1.234j) * a @@ -603,22 +705,25 @@ def res_type(a, b): # The following use-case is problematic, and to resolve its # tricky side-effects requires more changes. # - ## Use-case: (1-t)*a, where 't' is a boolean array and 'a' is - ## a float32, shouldn't promote to float64 - #a = np.array([1.0, 1.5], dtype=np.float32) - #t = np.array([True, False]) - #b = t*a - #assert_equal(b, [1.0, 0.0]) - #assert_equal(b.dtype, np.dtype('f4')) - #b = (1-t)*a - #assert_equal(b, [0.0, 1.5]) - #assert_equal(b.dtype, np.dtype('f4')) - ## Probably ~t (bitwise negation) is more proper to use here, - ## but this is arguably less intuitive to understand at a glance, and - ## would fail if 't' is actually an integer array instead of boolean: - #b = (~t)*a - #assert_equal(b, [0.0, 1.5]) - #assert_equal(b.dtype, np.dtype('f4')) + # Use-case: (1-t)*a, where 't' is a boolean array and 'a' is + # a float32, shouldn't promote to float64 + # + # a = np.array([1.0, 1.5], dtype=np.float32) + # t = np.array([True, False]) + # b = t*a + # assert_equal(b, [1.0, 0.0]) + # assert_equal(b.dtype, np.dtype('f4')) + # b = (1-t)*a + # assert_equal(b, [0.0, 1.5]) + # assert_equal(b.dtype, np.dtype('f4')) + # + # Probably ~t (bitwise negation) is more proper to use here, + # but this is arguably less intuitive to understand at a glance, and + # would fail if 't' is actually an integer array instead of boolean: + # + # b = (~t)*a + # assert_equal(b, [0.0, 1.5]) + # assert_equal(b.dtype, np.dtype('f4')) def test_result_type(self): self.check_promotion_cases(np.result_type) @@ -831,7 +936,7 @@ def test_nonzero_onedim(self): assert_equal(np.nonzero(x), ([0, 2, 3, 6],)) x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)], - dtype=[('a', 'i4'), ('b', 'i2')]) + dtype=[('a', 'i4'), ('b', 'i2')]) assert_equal(np.count_nonzero(x['a']), 3) assert_equal(np.count_nonzero(x['b']), 4) assert_equal(np.nonzero(x['a']), ([0, 2, 3],)) @@ -886,6 +991,14 @@ class C(np.ndarray): assert_(type(nzx_i) is np.ndarray) assert_(nzx_i.flags.writeable) + # Tests that the array method + # call works + def test_array_method(self): + m = np.array([[1, 0, 0], [4, 0, 6]]) + tgt = [[0, 1, 1], [0, 0, 2]] + + assert_equal(m.nonzero(), tgt) + class TestIndex(TestCase): def test_boolean(self): @@ -931,6 +1044,7 @@ def test_negative(self): assert_equal(np.base_repr(-12, 10, 4), '-000012') assert_equal(np.base_repr(-12, 4), '-30') + class TestArrayComparisons(TestCase): def test_array_equal(self): res = np.array_equal(np.array([1, 2]), np.array([1, 2])) @@ -1049,7 +1163,7 @@ def _generate_int32_data(self, n, m): # Now the real test cases def test_simple_double(self): - #Test native double input with scalar min/max. + # Test native double input with scalar min/max. a = self._generate_data(self.nr, self.nc) m = 0.1 M = 0.6 @@ -1058,7 +1172,7 @@ def test_simple_double(self): assert_array_strict_equal(ac, act) def test_simple_int(self): - #Test native int input with scalar min/max. + # Test native int input with scalar min/max. a = self._generate_int_data(self.nr, self.nc) a = a.astype(int) m = -2 @@ -1068,7 +1182,7 @@ def test_simple_int(self): assert_array_strict_equal(ac, act) def test_array_double(self): - #Test native double input with array min/max. + # Test native double input with array min/max. a = self._generate_data(self.nr, self.nc) m = np.zeros(a.shape) M = m + 0.5 @@ -1077,8 +1191,8 @@ def test_array_double(self): assert_array_strict_equal(ac, act) def test_simple_nonnative(self): - #Test non native double input with scalar min/max. - #Test native double input with non native double scalar min/max. + # Test non native double input with scalar min/max. + # Test native double input with non native double scalar min/max. a = self._generate_non_native_data(self.nr, self.nc) m = -0.5 M = 0.6 @@ -1086,7 +1200,7 @@ def test_simple_nonnative(self): act = self.clip(a, m, M) assert_array_equal(ac, act) - #Test native double input with non native double scalar min/max. + # Test native double input with non native double scalar min/max. a = self._generate_data(self.nr, self.nc) m = -0.5 M = self._neg_byteorder(0.6) @@ -1096,8 +1210,8 @@ def test_simple_nonnative(self): assert_array_equal(ac, act) def test_simple_complex(self): - #Test native complex input with native double scalar min/max. - #Test native input with complex double scalar min/max. + # Test native complex input with native double scalar min/max. + # Test native input with complex double scalar min/max. a = 3 * self._generate_data_complex(self.nr, self.nc) m = -0.5 M = 1. @@ -1105,7 +1219,7 @@ def test_simple_complex(self): act = self.clip(a, m, M) assert_array_strict_equal(ac, act) - #Test native input with complex double scalar min/max. + # Test native input with complex double scalar min/max. a = 3 * self._generate_data(self.nr, self.nc) m = -0.5 + 1.j M = 1. + 2.j @@ -1126,7 +1240,7 @@ def test_clip_complex(self): assert_array_strict_equal(aM, a) def test_clip_non_contig(self): - #Test clip for non contiguous native input and native scalar min/max. + # Test clip for non contiguous native input and native scalar min/max. a = self._generate_data(self.nr * 2, self.nc * 3) a = a[::2, ::3] assert_(not a.flags['F_CONTIGUOUS']) @@ -1136,7 +1250,7 @@ def test_clip_non_contig(self): assert_array_strict_equal(ac, act) def test_simple_out(self): - #Test native double input with scalar min/max. + # Test native double input with scalar min/max. a = self._generate_data(self.nr, self.nc) m = -0.5 M = 0.6 @@ -1147,7 +1261,7 @@ def test_simple_out(self): assert_array_strict_equal(ac, act) def test_simple_int32_inout(self): - #Test native int32 input with double min/max and int32 out. + # Test native int32 input with double min/max and int32 out. a = self._generate_int32_data(self.nr, self.nc) m = np.float64(0) M = np.float64(2) @@ -1158,7 +1272,7 @@ def test_simple_int32_inout(self): assert_array_strict_equal(ac, act) def test_simple_int64_out(self): - #Test native int32 input with int32 scalar min/max and int64 out. + # Test native int32 input with int32 scalar min/max and int64 out. a = self._generate_int32_data(self.nr, self.nc) m = np.int32(-1) M = np.int32(1) @@ -1169,7 +1283,7 @@ def test_simple_int64_out(self): assert_array_strict_equal(ac, act) def test_simple_int64_inout(self): - #Test native int32 input with double array min/max and int32 out. + # Test native int32 input with double array min/max and int32 out. a = self._generate_int32_data(self.nr, self.nc) m = np.zeros(a.shape, np.float64) M = np.float64(1) @@ -1180,7 +1294,7 @@ def test_simple_int64_inout(self): assert_array_strict_equal(ac, act) def test_simple_int32_out(self): - #Test native double input with scalar min/max and int out. + # Test native double input with scalar min/max and int out. a = self._generate_data(self.nr, self.nc) m = -1.0 M = 2.0 @@ -1191,7 +1305,7 @@ def test_simple_int32_out(self): assert_array_strict_equal(ac, act) def test_simple_inplace_01(self): - #Test native double input with array min/max in-place. + # Test native double input with array min/max in-place. a = self._generate_data(self.nr, self.nc) ac = a.copy() m = np.zeros(a.shape) @@ -1201,7 +1315,7 @@ def test_simple_inplace_01(self): assert_array_strict_equal(a, ac) def test_simple_inplace_02(self): - #Test native double input with scalar min/max in-place. + # Test native double input with scalar min/max in-place. a = self._generate_data(self.nr, self.nc) ac = a.copy() m = -0.5 @@ -1211,7 +1325,7 @@ def test_simple_inplace_02(self): assert_array_strict_equal(a, ac) def test_noncontig_inplace(self): - #Test non contiguous double input with double scalar min/max in-place. + # Test non contiguous double input with double scalar min/max in-place. a = self._generate_data(self.nr * 2, self.nc * 3) a = a[::2, ::3] assert_(not a.flags['F_CONTIGUOUS']) @@ -1224,7 +1338,7 @@ def test_noncontig_inplace(self): assert_array_equal(a, ac) def test_type_cast_01(self): - #Test native double input with scalar min/max. + # Test native double input with scalar min/max. a = self._generate_data(self.nr, self.nc) m = -0.5 M = 0.6 @@ -1233,7 +1347,7 @@ def test_type_cast_01(self): assert_array_strict_equal(ac, act) def test_type_cast_02(self): - #Test native int32 input with int32 scalar min/max. + # Test native int32 input with int32 scalar min/max. a = self._generate_int_data(self.nr, self.nc) a = a.astype(np.int32) m = -2 @@ -1243,7 +1357,7 @@ def test_type_cast_02(self): assert_array_strict_equal(ac, act) def test_type_cast_03(self): - #Test native int32 input with float64 scalar min/max. + # Test native int32 input with float64 scalar min/max. a = self._generate_int32_data(self.nr, self.nc) m = -2 M = 4 @@ -1252,7 +1366,7 @@ def test_type_cast_03(self): assert_array_strict_equal(ac, act) def test_type_cast_04(self): - #Test native int32 input with float32 scalar min/max. + # Test native int32 input with float32 scalar min/max. a = self._generate_int32_data(self.nr, self.nc) m = np.float32(-2) M = np.float32(4) @@ -1261,7 +1375,7 @@ def test_type_cast_04(self): assert_array_strict_equal(ac, act) def test_type_cast_05(self): - #Test native int32 with double arrays min/max. + # Test native int32 with double arrays min/max. a = self._generate_int_data(self.nr, self.nc) m = -0.5 M = 1. @@ -1270,7 +1384,7 @@ def test_type_cast_05(self): assert_array_strict_equal(ac, act) def test_type_cast_06(self): - #Test native with NON native scalar min/max. + # Test native with NON native scalar min/max. a = self._generate_data(self.nr, self.nc) m = 0.5 m_s = self._neg_byteorder(m) @@ -1280,7 +1394,7 @@ def test_type_cast_06(self): assert_array_strict_equal(ac, act) def test_type_cast_07(self): - #Test NON native with native array min/max. + # Test NON native with native array min/max. a = self._generate_data(self.nr, self.nc) m = -0.5 * np.ones(a.shape) M = 1. @@ -1291,7 +1405,7 @@ def test_type_cast_07(self): assert_array_strict_equal(ac, act) def test_type_cast_08(self): - #Test NON native with native scalar min/max. + # Test NON native with native scalar min/max. a = self._generate_data(self.nr, self.nc) m = -0.5 M = 1. @@ -1302,7 +1416,7 @@ def test_type_cast_08(self): assert_array_strict_equal(ac, act) def test_type_cast_09(self): - #Test native with NON native array min/max. + # Test native with NON native array min/max. a = self._generate_data(self.nr, self.nc) m = -0.5 * np.ones(a.shape) M = 1. @@ -1313,7 +1427,7 @@ def test_type_cast_09(self): assert_array_strict_equal(ac, act) def test_type_cast_10(self): - #Test native int32 with float min/max and float out for output argument. + # Test native int32 with float min/max and float out for output argument. a = self._generate_int_data(self.nr, self.nc) b = np.zeros(a.shape, dtype=np.float32) m = np.float32(-0.5) @@ -1323,7 +1437,7 @@ def test_type_cast_10(self): assert_array_strict_equal(ac, act) def test_type_cast_11(self): - #Test non native with native scalar, min/max, out non native + # Test non native with native scalar, min/max, out non native a = self._generate_non_native_data(self.nr, self.nc) b = a.copy() b = b.astype(b.dtype.newbyteorder('>')) @@ -1335,7 +1449,7 @@ def test_type_cast_11(self): assert_array_strict_equal(b, bt) def test_type_cast_12(self): - #Test native int32 input and min/max and float out + # Test native int32 input and min/max and float out a = self._generate_int_data(self.nr, self.nc) b = np.zeros(a.shape, dtype=np.float32) m = np.int32(0) @@ -1345,7 +1459,7 @@ def test_type_cast_12(self): assert_array_strict_equal(ac, act) def test_clip_with_out_simple(self): - #Test native double input with scalar min/max + # Test native double input with scalar min/max a = self._generate_data(self.nr, self.nc) m = -0.5 M = 0.6 @@ -1356,7 +1470,7 @@ def test_clip_with_out_simple(self): assert_array_strict_equal(ac, act) def test_clip_with_out_simple2(self): - #Test native int32 input with double min/max and int32 out + # Test native int32 input with double min/max and int32 out a = self._generate_int32_data(self.nr, self.nc) m = np.float64(0) M = np.float64(2) @@ -1367,7 +1481,7 @@ def test_clip_with_out_simple2(self): assert_array_strict_equal(ac, act) def test_clip_with_out_simple_int32(self): - #Test native int32 input with int32 scalar min/max and int64 out + # Test native int32 input with int32 scalar min/max and int64 out a = self._generate_int32_data(self.nr, self.nc) m = np.int32(-1) M = np.int32(1) @@ -1378,7 +1492,7 @@ def test_clip_with_out_simple_int32(self): assert_array_strict_equal(ac, act) def test_clip_with_out_array_int32(self): - #Test native int32 input with double array min/max and int32 out + # Test native int32 input with double array min/max and int32 out a = self._generate_int32_data(self.nr, self.nc) m = np.zeros(a.shape, np.float64) M = np.float64(1) @@ -1389,7 +1503,7 @@ def test_clip_with_out_array_int32(self): assert_array_strict_equal(ac, act) def test_clip_with_out_array_outint32(self): - #Test native double input with scalar min/max and int out + # Test native double input with scalar min/max and int out a = self._generate_data(self.nr, self.nc) m = -1.0 M = 2.0 @@ -1400,7 +1514,7 @@ def test_clip_with_out_array_outint32(self): assert_array_strict_equal(ac, act) def test_clip_inplace_array(self): - #Test native double input with array min/max + # Test native double input with array min/max a = self._generate_data(self.nr, self.nc) ac = a.copy() m = np.zeros(a.shape) @@ -1410,7 +1524,7 @@ def test_clip_inplace_array(self): assert_array_strict_equal(a, ac) def test_clip_inplace_simple(self): - #Test native double input with scalar min/max + # Test native double input with scalar min/max a = self._generate_data(self.nr, self.nc) ac = a.copy() m = -0.5 @@ -1420,7 +1534,7 @@ def test_clip_inplace_simple(self): assert_array_strict_equal(a, ac) def test_clip_func_takes_out(self): - # Ensure that the clip() function takes an out= argument. + # Ensure that the clip() function takes an out=argument. a = self._generate_data(self.nr, self.nc) ac = a.copy() m = -0.5 @@ -1456,7 +1570,7 @@ def tst_not_allclose(self, x, y): assert_(not np.allclose(x, y), "%s and %s shouldn't be close" % (x, y)) def test_ip_allclose(self): - #Parametric test factory. + # Parametric test factory. arr = np.array([100, 1000]) aran = np.arange(125).reshape((5, 5, 5)) @@ -1476,7 +1590,7 @@ def test_ip_allclose(self): yield (self.tst_allclose, x, y) def test_ip_not_allclose(self): - #Parametric test factory. + # Parametric test factory. aran = np.arange(125).reshape((5, 5, 5)) atol = self.atol @@ -1714,7 +1828,7 @@ def test_scalars(self): class TestCreationFuncs(TestCase): - #Test ones, zeros, empty and filled + # Test ones, zeros, empty and filled def setUp(self): self.dtypes = ('b', 'i', 'u', 'f', 'c', 'S', 'a', 'U', 'V') @@ -1902,6 +2016,7 @@ def test_filled_like(self): self.check_like_function(np.full_like, 123.456, True) self.check_like_function(np.full_like, np.inf, True) + class TestCorrelate(TestCase): def _setup(self, dt): self.x = np.array([1, 2, 3, 4, 5], dtype=dt) @@ -1966,6 +2081,7 @@ def test_no_overwrite(self): assert_array_equal(d, np.ones(100)) assert_array_equal(k, np.ones(3)) + class TestArgwhere(object): def test_2D(self): x = np.arange(6).reshape((2, 3)) @@ -1978,7 +2094,9 @@ def test_2D(self): def test_list(self): assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]]) + class TestStringFunction(object): + def test_set_string_function(self): a = np.array([1]) np.set_string_function(lambda x: "FOO", repr=True) @@ -1991,6 +2109,7 @@ def test_set_string_function(self): np.set_string_function(None, repr=False) assert_equal(str(a), "[1]") + class TestRoll(TestCase): def test_roll1d(self): x = np.arange(10) @@ -2299,7 +2418,7 @@ def test_ensure_array(self): class ArraySubclass(np.ndarray): pass - a = ArraySubclass((2,2)) + a = ArraySubclass((2, 2)) b = np.require(a, None, ['E']) assert_(type(b) is np.ndarray) @@ -2308,7 +2427,7 @@ class ArraySubclass(np.ndarray): pass for flag in self.flag_names: - a = ArraySubclass((2,2)) + a = ArraySubclass((2, 2)) yield self.set_and_check_flag, flag, None, a diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 88a590517f02..d6a838f3a9a5 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -255,7 +255,7 @@ def test_basic(self): assert_equal(insert(b, 0, b[0]), [0., 0., 1.]) assert_equal(insert(b, [], []), b) # Bools will be treated differently in the future: - #assert_equal(insert(a, np.array([True]*4), 9), [9,1,9,2,9,3,9]) + # assert_equal(insert(a, np.array([True]*4), 9), [9, 1, 9, 2, 9, 3, 9]) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', FutureWarning) assert_equal( @@ -294,15 +294,15 @@ def test_multidim(self): insert(a, 1, a[:, 2,:], axis=1)) # invalid axis value - assert_raises(IndexError, insert, a, 1, a[:, 2,:], axis=3) - assert_raises(IndexError, insert, a, 1, a[:, 2,:], axis=-4) + assert_raises(IndexError, insert, a, 1, a[:, 2, :], axis=3) + assert_raises(IndexError, insert, a, 1, a[:, 2, :], axis=-4) # negative axis value a = np.arange(24).reshape((2, 3, 4)) - assert_equal(insert(a, 1, a[:,:, 3], axis=-1), - insert(a, 1, a[:,:, 3], axis=2)) - assert_equal(insert(a, 1, a[:, 2,:], axis=-2), - insert(a, 1, a[:, 2,:], axis=1)) + assert_equal(insert(a, 1, a[:, :, 3], axis=-1), + insert(a, 1, a[:, :, 3], axis=2)) + assert_equal(insert(a, 1, a[:, 2, :], axis=-2), + insert(a, 1, a[:, 2, :], axis=1)) def test_0d(self): # This is an error in the future @@ -368,13 +368,13 @@ def test_basic(self): class TestPtp(TestCase): def test_basic(self): - a = [3, 4, 5, 10, -3, -5, 6.0] - assert_equal(np.ptp(a, axis=0), 15.0) - b = [[3, 6.0, 9.0], - [4, 10.0, 5.0], - [8, 3.0, 2.0]] - assert_equal(np.ptp(b, axis=0), [5.0, 7.0, 7.0]) - assert_equal(np.ptp(b, axis=-1), [6.0, 6.0, 6.0]) + a = np.array([3, 4, 5, 10, -3, -5, 6.0]) + assert_equal(a.ptp(axis=0), 15.0) + b = np.array([[3, 6.0, 9.0], + [4, 10.0, 5.0], + [8, 3.0, 2.0]]) + assert_equal(b.ptp(axis=0), [5.0, 7.0, 7.0]) + assert_equal(b.ptp(axis=-1), [6.0, 6.0, 6.0]) class TestCumsum(TestCase): @@ -411,12 +411,11 @@ def test_basic(self): if ctype in ['1', 'b']: self.assertRaises(ArithmeticError, np.prod, a) self.assertRaises(ArithmeticError, np.prod, a2, 1) - self.assertRaises(ArithmeticError, np.prod, a) else: - assert_equal(np.prod(a, axis=0), 26400) - assert_array_equal(np.prod(a2, axis=0), + assert_equal(a.prod(axis=0), 26400) + assert_array_equal(a2.prod(axis=0), np.array([50, 36, 84, 180], ctype)) - assert_array_equal(np.prod(a2, axis=-1), + assert_array_equal(a2.prod(axis=-1), np.array([24, 1890, 600], ctype)) @@ -460,10 +459,10 @@ def test_basic(self): def test_nd(self): x = 20 * rand(10, 20, 30) - out1 = x[:,:, 1:] - x[:,:, :-1] - out2 = out1[:,:, 1:] - out1[:,:, :-1] - out3 = x[1:,:,:] - x[:-1,:,:] - out4 = out3[1:,:,:] - out3[:-1,:,:] + out1 = x[:, :, 1:] - x[:, :, :-1] + out2 = out1[:, :, 1:] - out1[:, :, :-1] + out3 = x[1:, :, :] - x[:-1, :, :] + out4 = out3[1:, :, :] - out3[:-1, :, :] assert_array_equal(diff(x), out1) assert_array_equal(diff(x, n=2), out2) assert_array_equal(diff(x, axis=0), out3) @@ -610,7 +609,7 @@ def test_specific_axes(self): assert_array_equal(gradient(x, axis=0), dx[0]) assert_array_equal(gradient(x, axis=1), dx[1]) assert_array_equal(gradient(x, axis=-1), dx[1]) - assert_array_equal(gradient(x, axis=(1,0)), [dx[1], dx[0]]) + assert_array_equal(gradient(x, axis=(1, 0)), [dx[1], dx[0]]) # test axis=None which means all axes assert_almost_equal(gradient(x, axis=None), [dx[0], dx[1]]) @@ -618,7 +617,7 @@ def test_specific_axes(self): assert_almost_equal(gradient(x, axis=None), gradient(x)) # test vararg order - assert_array_equal(gradient(x, 2, 3, axis=(1,0)), [dx[1]/2.0, dx[0]/3.0]) + assert_array_equal(gradient(x, 2, 3, axis=(1, 0)), [dx[1]/2.0, dx[0]/3.0]) # test maximal number of varargs assert_raises(SyntaxError, gradient, x, 1, 2, axis=1) @@ -1018,8 +1017,8 @@ def test_ndim(self): q = x[:, None, None] + y[None,:, None] + z[None, None,:] qx = (q * wx[:, None, None]).sum(axis=0) - qy = (q * wy[None,:, None]).sum(axis=1) - qz = (q * wz[None, None,:]).sum(axis=2) + qy = (q * wy[None, :, None]).sum(axis=1) + qz = (q * wz[None, None, :]).sum(axis=2) # n-d `x` r = trapz(q, x=x[:, None, None], axis=0) @@ -1501,14 +1500,12 @@ def test_rightmost_binedge(self): assert_(hist[1] == 0.0) def test_finite_range(self): - vals = np.random.random((100,3)) - histogramdd(vals, range=[[0.0,1.0],[0.25,0.75],[0.25,0.5]]) + vals = np.random.random((100, 3)) + histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]]) assert_raises(ValueError, histogramdd, vals, - range=[[0.0,1.0],[0.25,0.75],[0.25,np.inf]]) + range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]]) assert_raises(ValueError, histogramdd, vals, - range=[[0.0,1.0],[np.nan,0.75],[0.25,0.5]]) - - + range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]]) class TestUnique(TestCase): From d426ed91b41293f75c90e1795b72c017e8d435ef Mon Sep 17 00:00:00 2001 From: Tushar Gautam Date: Tue, 19 Jan 2016 01:11:13 +0530 Subject: [PATCH 415/496] BUG:Should fix astype cast bug in numpy.fft module Closes #6693 --- numpy/fft/fftpack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/fft/fftpack.py b/numpy/fft/fftpack.py index c3bb732b2615..275be0d77ae8 100644 --- a/numpy/fft/fftpack.py +++ b/numpy/fft/fftpack.py @@ -183,7 +183,7 @@ def fft(a, n=None, axis=-1, norm=None): """ - a = asarray(a).astype(complex) + a = asarray(a).astype(complex, copy=False) if n is None: n = a.shape[axis] output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache) From ae85a33e8f9be721361c8d5cb3f18eee8af30c44 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 18 Jan 2016 15:30:37 -0700 Subject: [PATCH 416/496] DEP: Emit FutureWarning for NAT comparisons. In Numpy 1.13 the plan is for NAT comparisons to behave like NaN comparisons, e.g., False except for 'NAT != NAT', which will be True. See the discussion at gh-7019 for details. --- numpy/core/src/umath/loops.c.src | 44 +++++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 3 deletions(-) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 2261a80dbeab..7b8dcdbafd6e 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -1117,8 +1117,8 @@ NPY_NO_EXPORT void } /**begin repeat1 - * #kind = equal, not_equal, greater, greater_equal, less, less_equal# - * #OP = ==, !=, >, >=, <, <=# + * #kind = equal, greater, greater_equal, less, less_equal# + * #OP = ==, >, >=, <, <=# */ NPY_NO_EXPORT void @TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) @@ -1126,11 +1126,49 @@ NPY_NO_EXPORT void BINARY_LOOP { const @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; - *((npy_bool *)op1) = in1 @OP@ in2; + const npy_bool res = in1 @OP@ in2; + *((npy_bool *)op1) = res; + + if ((in1 == NPY_DATETIME_NAT || in2 == NPY_DATETIME_NAT) && res) { + NPY_ALLOW_C_API_DEF + NPY_ALLOW_C_API; + /* 2016-01-18, 1.11 */ + if (DEPRECATE_FUTUREWARNING( + "In the future, 'NAT @OP@ x' and 'x @OP@ NAT' " + "will always be False.") < 0) { + NPY_DISABLE_C_API; + return; + } + NPY_DISABLE_C_API; + } } } /**end repeat1**/ +NPY_NO_EXPORT void +@TYPE@_not_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + *((npy_bool *)op1) = in1 != in2; + + if (in1 == NPY_DATETIME_NAT && in1 == NPY_DATETIME_NAT) { + NPY_ALLOW_C_API_DEF + NPY_ALLOW_C_API; + /* 2016-01-18, 1.11 */ + if (DEPRECATE_FUTUREWARNING( + "In the future, NAT != NAT will be True " + "rather than False.") < 0) { + NPY_DISABLE_C_API; + return; + } + NPY_DISABLE_C_API; + } + } +} + + /**begin repeat1 * #kind = maximum, minimum# * #OP = >, <# From 31dbc05d86c4018c76562466de0ea569eb1f8a62 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 18 Jan 2016 19:07:12 -0700 Subject: [PATCH 417/496] TST: Add tests for NAT comparison FutureWarning. The behavior of NAT comparisons will change in Numpy 1.13. Make sure that a FutureWarning is emitted when the results will change. --- numpy/core/tests/test_datetime.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py index 8aeaf51acb38..601f09c09192 100644 --- a/numpy/core/tests/test_datetime.py +++ b/numpy/core/tests/test_datetime.py @@ -1086,6 +1086,34 @@ def test_datetime_compare(self): assert_equal(np.greater(a, b), [0, 1, 0, 1, 0]) assert_equal(np.greater_equal(a, b), [1, 1, 0, 1, 0]) + def test_datetime_compare_nat(self): + dt_nat = np.datetime64('NaT', 'D') + dt_other = np.datetime64('2000-01-01') + td_nat = np.timedelta64('NaT', 'h') + td_other = np.timedelta64(1, 'h') + + for op in [np.equal, np.less, np.less_equal, + np.greater, np.greater_equal]: + if op(dt_nat, dt_nat): + assert_warns(FutureWarning, op, dt_nat, dt_nat) + if op(dt_nat, dt_other): + assert_warns(FutureWarning, op, dt_nat, dt_other) + if op(dt_other, dt_nat): + assert_warns(FutureWarning, op, dt_other, dt_nat) + if op(td_nat, td_nat): + assert_warns(FutureWarning, op, td_nat, td_nat) + if op(td_nat, td_other): + assert_warns(FutureWarning, op, td_nat, td_other) + if op(td_other, td_nat): + assert_warns(FutureWarning, op, td_other, td_nat) + + assert_warns(FutureWarning, np.not_equal, dt_nat, dt_nat) + assert_(np.not_equal(dt_nat, dt_other)) + assert_(np.not_equal(dt_other, dt_nat)) + assert_warns(FutureWarning, np.not_equal, td_nat, td_nat) + assert_(np.not_equal(td_nat, td_other)) + assert_(np.not_equal(td_other, td_nat)) + def test_datetime_minmax(self): # The metadata of the result should become the GCD # of the operand metadata From 48596210faa134149f7bd4a1a82a665e02bb8104 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 18 Jan 2016 12:09:41 -0700 Subject: [PATCH 418/496] MAINT: Refactor polynomial leastsquares fits. * Rewrite the documentation for `deg`. * Check that the passed degrees are int and raise if not. * Do not accept `deg` arguments that do not convert as 0-D or 1-D. * Sort passed degree terms for repeatability. --- numpy/polynomial/_polybase.py | 13 +++++------ numpy/polynomial/chebyshev.py | 41 +++++++++++++++------------------- numpy/polynomial/hermite.py | 37 +++++++++++++----------------- numpy/polynomial/hermite_e.py | 37 +++++++++++++----------------- numpy/polynomial/laguerre.py | 37 +++++++++++++----------------- numpy/polynomial/legendre.py | 37 +++++++++++++----------------- numpy/polynomial/polynomial.py | 41 +++++++++++++++------------------- 7 files changed, 105 insertions(+), 138 deletions(-) diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py index 37eb59f039aa..6fa72b6f925e 100644 --- a/numpy/polynomial/_polybase.py +++ b/numpy/polynomial/_polybase.py @@ -739,14 +739,11 @@ def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None, y-coordinates of the sample points. Several data sets of sample points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column. - deg : int or array_like - Degree of the fitting polynomial. If `deg` is a single integer - all terms up to and including the `deg`'th term are included. - `deg` may alternatively be a list or array specifying which - terms in the Legendre expansion to include in the fit. - - .. versionchanged:: 1.11.0 - `deg` may be a list specifying which terms to fit + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For Numpy versions >= 1.11 a list of integers specifying the + degrees of the terms to include may be used instead. domain : {None, [beg, end], []}, optional Domain to use for the returned series. If ``None``, then a minimal domain that covers the points `x` is chosen. If diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index 9db613b78b23..2537bea32d43 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -1617,14 +1617,11 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None): y-coordinates of the sample points. Several data sets of sample points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column. - deg : int or array_like - Degree of the fitting series. If `deg` is a single integer - all terms up to and including the `deg`'th term are included. - `deg` may alternatively be a list or array specifying which - terms in the Legendre expansion to include in the fit. - - .. versionchanged:: 1.11.0 - `deg` may be a list specifying which terms to fit + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For Numpy versions >= 1.11 a list of integers specifying the + degrees of the terms to include may be used instead. rcond : float, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be ignored. The @@ -1718,11 +1715,11 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None): """ x = np.asarray(x) + 0.0 y = np.asarray(y) + 0.0 - deg = np.asarray([deg,], dtype=int).flatten() + deg = np.asarray(deg) # check arguments. - if deg.size < 1: - raise TypeError("expected deg to be one or more integers") + if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int or non-empty 1-D array of int") if deg.min() < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: @@ -1734,19 +1731,17 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None): if len(x) != len(y): raise TypeError("expected x and y to have same length") - if deg.size == 1: - restricted_fit = False - lmax = deg[0] + if deg.ndim == 0: + lmax = deg order = lmax + 1 + van = chebvander(x, lmax) else: - restricted_fit = True - lmax = deg.max() - order = deg.size + deg = np.sort(deg) + lmax = deg[-1] + order = len(deg) + van = chebvander(x, lmax)[:, deg] # set up the least squares matrices in transposed form - van = chebvander(x, lmax) - if restricted_fit: - van = van[:, deg] lhs = van.T rhs = y.T if w is not None: @@ -1776,11 +1771,11 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None): c = (c.T/scl).T # Expand c to include non-fitted coefficients which are set to zero - if restricted_fit: + if deg.ndim > 0: if c.ndim == 2: - cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) + cc = np.zeros((lmax + 1, c.shape[1]), dtype=c.dtype) else: - cc = np.zeros(lmax+1, dtype=c.dtype) + cc = np.zeros(lmax + 1, dtype=c.dtype) cc[deg] = c c = cc diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index 5d4b357fed7e..e234c8e2319c 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -1388,14 +1388,11 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): y-coordinates of the sample points. Several data sets of sample points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column. - deg : int or array_like - Degree of the fitting polynomial. If `deg` is a single integer - all terms up to and including the `deg`'th term are included. - `deg` may alternatively be a list or array specifying which - terms in the Legendre expansion to include in the fit. - - .. versionchanged:: 1.11.0 - `deg` may be a list specifying which terms to fit + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For Numpy versions >= 1.11 a list of integers specifying the + degrees of the terms to include may be used instead. rcond : float, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be ignored. The @@ -1494,11 +1491,11 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): """ x = np.asarray(x) + 0.0 y = np.asarray(y) + 0.0 - deg = np.asarray([deg,], dtype=int).flatten() + deg = np.asarray(deg) # check arguments. - if deg.size < 1: - raise TypeError("expected deg to be one or more integers") + if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int or non-empty 1-D array of int") if deg.min() < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: @@ -1510,19 +1507,17 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): if len(x) != len(y): raise TypeError("expected x and y to have same length") - if deg.size == 1: - restricted_fit = False - lmax = deg[0] + if deg.ndim == 0: + lmax = deg order = lmax + 1 + van = hermvander(x, lmax) else: - restricted_fit = True - lmax = deg.max() - order = deg.size + deg = np.sort(deg) + lmax = deg[-1] + order = len(deg) + van = hermvander(x, lmax)[:, deg] # set up the least squares matrices in transposed form - van = hermvander(x, lmax) - if restricted_fit: - van = van[:, deg] lhs = van.T rhs = y.T if w is not None: @@ -1552,7 +1547,7 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): c = (c.T/scl).T # Expand c to include non-fitted coefficients which are set to zero - if restricted_fit: + if deg.ndim > 0: if c.ndim == 2: cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) else: diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index da441af8316b..08e83899aa70 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -1385,14 +1385,11 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): y-coordinates of the sample points. Several data sets of sample points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column. - deg : int or array_like - Degree of the fitting polynomial. If `deg` is a single integer - all terms up to and including the `deg`'th term are included. - `deg` may alternatively be a list or array specifying which - terms in the Legendre expansion to include in the fit. - - .. versionchanged:: 1.11.0 - `deg` may be a list specifying which terms to fit + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For Numpy versions >= 1.11 a list of integers specifying the + degrees of the terms to include may be used instead. rcond : float, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be ignored. The @@ -1491,11 +1488,11 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): """ x = np.asarray(x) + 0.0 y = np.asarray(y) + 0.0 - deg = np.asarray([deg,], dtype=int).flatten() + deg = np.asarray(deg) # check arguments. - if deg.size < 1: - raise TypeError("expected deg to be one or more integers") + if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int or non-empty 1-D array of int") if deg.min() < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: @@ -1507,19 +1504,17 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): if len(x) != len(y): raise TypeError("expected x and y to have same length") - if deg.size == 1: - restricted_fit = False - lmax = deg[0] + if deg.ndim == 0: + lmax = deg order = lmax + 1 + van = hermevander(x, lmax) else: - restricted_fit = True - lmax = deg.max() - order = deg.size + deg = np.sort(deg) + lmax = deg[-1] + order = len(deg) + van = hermevander(x, lmax)[:, deg] # set up the least squares matrices in transposed form - van = hermevander(x, lmax) - if restricted_fit: - van = van[:, deg] lhs = van.T rhs = y.T if w is not None: @@ -1549,7 +1544,7 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): c = (c.T/scl).T # Expand c to include non-fitted coefficients which are set to zero - if restricted_fit: + if deg.ndim > 0: if c.ndim == 2: cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) else: diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index 280e28159451..d459551ae894 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -1387,14 +1387,11 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): y-coordinates of the sample points. Several data sets of sample points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column. - deg : int or array_like - Degree of the fitting polynomial. If `deg` is a single integer - all terms up to and including the `deg`'th term are included. - `deg` may alternatively be a list or array specifying which - terms in the Legendre expansion to include in the fit. - - .. versionchanged:: 1.11.0 - `deg` may be a list specifying which terms to fit + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For Numpy versions >= 1.11 a list of integers specifying the + degrees of the terms to include may be used instead. rcond : float, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be ignored. The @@ -1493,11 +1490,11 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): """ x = np.asarray(x) + 0.0 y = np.asarray(y) + 0.0 - deg = np.asarray([deg,], dtype=int).flatten() + deg = np.asarray(deg) # check arguments. - if deg.size < 1: - raise TypeError("expected deg to be one or more integers") + if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int or non-empty 1-D array of int") if deg.min() < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: @@ -1509,19 +1506,17 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): if len(x) != len(y): raise TypeError("expected x and y to have same length") - if deg.size == 1: - restricted_fit = False - lmax = deg[0] + if deg.ndim == 0: + lmax = deg order = lmax + 1 + van = lagvander(x, lmax) else: - restricted_fit = True - lmax = deg.max() - order = deg.size + deg = np.sort(deg) + lmax = deg[-1] + order = len(deg) + van = lagvander(x, lmax)[:, deg] # set up the least squares matrices in transposed form - van = lagvander(x, lmax) - if restricted_fit: - van = van[:, deg] lhs = van.T rhs = y.T if w is not None: @@ -1551,7 +1546,7 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): c = (c.T/scl).T # Expand c to include non-fitted coefficients which are set to zero - if restricted_fit: + if deg.ndim > 0: if c.ndim == 2: cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) else: diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index 2035ba6e930a..54e9895db95f 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -1418,14 +1418,11 @@ def legfit(x, y, deg, rcond=None, full=False, w=None): y-coordinates of the sample points. Several data sets of sample points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column. - deg : int or array_like - Degree of the fitting polynomial. If `deg` is a single integer - all terms up to and including the `deg`'th term are included. - `deg` may alternatively be a list or array specifying which - terms in the Legendre expansion to include in the fit. - - .. versionchanged:: 1.11.0 - `deg` may be a list specifying which terms to fit + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For Numpy versions >= 1.11 a list of integers specifying the + degrees of the terms to include may be used instead. rcond : float, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be ignored. The @@ -1521,11 +1518,11 @@ def legfit(x, y, deg, rcond=None, full=False, w=None): """ x = np.asarray(x) + 0.0 y = np.asarray(y) + 0.0 - deg = np.asarray([deg,], dtype=int).flatten() + deg = np.asarray(deg) # check arguments. - if deg.size < 1: - raise TypeError("expected deg to be one or more integers") + if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int or non-empty 1-D array of int") if deg.min() < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: @@ -1537,19 +1534,17 @@ def legfit(x, y, deg, rcond=None, full=False, w=None): if len(x) != len(y): raise TypeError("expected x and y to have same length") - if deg.size == 1: - restricted_fit = False - lmax = deg[0] + if deg.ndim == 0: + lmax = deg order = lmax + 1 + van = legvander(x, lmax) else: - restricted_fit = True - lmax = deg.max() - order = deg.size + deg = np.sort(deg) + lmax = deg[-1] + order = len(deg) + van = legvander(x, lmax)[:, deg] # set up the least squares matrices in transposed form - van = legvander(x, lmax) - if restricted_fit: - van = van[:, deg] lhs = van.T rhs = y.T if w is not None: @@ -1579,7 +1574,7 @@ def legfit(x, y, deg, rcond=None, full=False, w=None): c = (c.T/scl).T # Expand c to include non-fitted coefficients which are set to zero - if restricted_fit: + if deg.ndim > 0: if c.ndim == 2: cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) else: diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 7c922c11bf7f..5d05f599191e 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -1217,14 +1217,11 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): sharing the same x-coordinates can be (independently) fit with one call to `polyfit` by passing in for `y` a 2-D array that contains one data set per column. - deg : int or array_like - Degree of the fitting polynomial. If `deg` is a single integer - all terms up to and including the `deg`'th term are included. - `deg` may alternatively be a list or array specifying which - terms in the Legendre expansion to include in the fit. - - .. versionchanged:: 1.11.0 - `deg` may be a list specifying which terms to fit + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For Numpy versions >= 1.11 a list of integers specifying the + degrees of the terms to include may be used instead. rcond : float, optional Relative condition number of the fit. Singular values smaller than `rcond`, relative to the largest singular value, will be @@ -1340,11 +1337,11 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): """ x = np.asarray(x) + 0.0 y = np.asarray(y) + 0.0 - deg = np.asarray([deg,], dtype=int).flatten() + deg = np.asarray(deg) # check arguments. - if deg.size < 1: - raise TypeError("expected deg to be one or more integers") + if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int or non-empty 1-D array of int") if deg.min() < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: @@ -1356,19 +1353,17 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): if len(x) != len(y): raise TypeError("expected x and y to have same length") - if deg.size == 1: - restricted_fit = False - lmax = deg[0] + if deg.ndim == 0: + lmax = deg order = lmax + 1 + van = polyvander(x, lmax) else: - restricted_fit = True - lmax = deg.max() - order = deg.size + deg = np.sort(deg) + lmax = deg[-1] + order = len(deg) + van = polyvander(x, lmax)[:, deg] # set up the least squares matrices in transposed form - van = polyvander(x, lmax) - if restricted_fit: - van = van[:, deg] lhs = van.T rhs = y.T if w is not None: @@ -1398,11 +1393,11 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): c = (c.T/scl).T # Expand c to include non-fitted coefficients which are set to zero - if restricted_fit: + if deg.ndim == 1: if c.ndim == 2: - cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) + cc = np.zeros((lmax + 1, c.shape[1]), dtype=c.dtype) else: - cc = np.zeros(lmax+1, dtype=c.dtype) + cc = np.zeros(lmax + 1, dtype=c.dtype) cc[deg] = c c = cc From d99acf9d561fbe535eccf344160c6e698d9ab6bb Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 18 Jan 2016 12:23:55 -0700 Subject: [PATCH 419/496] DOC: Update the 1.10.0 release notes. Rewrite the documentation of added functionality of the deg parameter of the polynomial fitting functions in the numpy.polynomial package and put it in the 'New Features' section. --- doc/release/1.11.0-notes.rst | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index ea0e41694bbb..3ce63b116278 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -27,7 +27,7 @@ Future Changes * Relaxed stride checking will become the default in 1.12.0. * Support for Python 2.6, 3.2, and 3.3 will be dropped in 1.12.0. -* ``MaskedArray``s take views of data **and** masks when slicing in 1.12.0. +* ``MaskedArray`` takes view of data **and** mask when slicing in 1.12.0. Compatibility notes @@ -59,7 +59,8 @@ it handles by converting to UTC. However, the resulting datetime is timezone naive:: >>> np.datetime64('2000-01-01T00:00:00-08') - DeprecationWarning: parsing timezone aware datetimes is deprecated; this will raise an error in the future + DeprecationWarning: parsing timezone aware datetimes is deprecated; + this will raise an error in the future numpy.datetime64('2000-01-01T08:00:00') As a corollary to this change, we no longer prohibit casting between datetimes @@ -68,6 +69,11 @@ the rule for casting from dates to times is no longer ambiguous. pandas_: http://pandas.pydata.org +polynomial fit changes +~~~~~~~~~~~~~~~~~~~~~~ +The various fit functions in the numpy polynomial package no longer accept +non-integers for degree specification. + DeprecationWarning to error ~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -156,6 +162,14 @@ New Features * ``np.moveaxis`` allows for moving one or more array axes to a new position by explicitly providing source and destination axes. +* numpy.polynomial fits now support degree selection. The ``deg`` + parameter was extended to allow fitting using only specified terms in the + polynomial expansion for all polynomial types. The change is backward + compatible and it is still possible to specify ``deg`` as before, but it + is now possible pass ``deg`` as a list specifying which terms in the + series to use in the fit. + + Improvements ============ @@ -208,6 +222,7 @@ Speed improvement for np.random.shuffle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``np.random.shuffle`` is now much faster for 1d ndarrays. + Changes ======= Pyrex support was removed from ``numpy.distutils``. The method @@ -240,14 +255,6 @@ arguments cannot be cast to a common type, it could have raised a ``TypeError`` or ``ValueError`` depending on their order. Now, ``np.dot`` will now always raise a ``TypeError``. -numpy.polynomial.*fit now supports restricted fitting -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The ``deg`` parameter was extended to allow restricted fitting of -specified terms in the polynomial expansion for all polynomial -types. This change is backward compatible and it is still possible to -specify ``deg`` as a single integer to specify the maximum -order/degree of polynomial used in the fit, but it is now possible for -``deg`` to be a list specifying which terms in the series to fit. Deprecations ============ From 70350f4e2ca3afe5362a8b565f1d101221c4e6a8 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 19 Jan 2016 18:42:23 -0700 Subject: [PATCH 420/496] REL: Prepare for 1.10.x branch. * Add comment to cversions.txt (no change) * Add comment to setup_common.py (no change) * Nothing done for numpy/core/include/numpy/numpyconfig.h * Update log start to 1.10.0 in pavement.py. --- numpy/core/code_generators/cversions.txt | 1 + numpy/core/setup_common.py | 1 + pavement.py | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/core/code_generators/cversions.txt b/numpy/core/code_generators/cversions.txt index dea6d0ecf792..c66947fafe7c 100644 --- a/numpy/core/code_generators/cversions.txt +++ b/numpy/core/code_generators/cversions.txt @@ -31,4 +31,5 @@ 0x00000009 = 982c4ebb6e7e4c194bf46b1535b4ef1b # Version 10 (NumPy 1.10) Added PyArray_CheckAnyScalarExact +# Version 10 (NumPy 1.11) No change. 0x0000000a = 9b8bce614655d3eb02acddcb508203cb diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py index 57ddf3396c2e..ba7521e3043b 100644 --- a/numpy/core/setup_common.py +++ b/numpy/core/setup_common.py @@ -36,6 +36,7 @@ # 0x00000009 - 1.8.x # 0x00000009 - 1.9.x # 0x0000000a - 1.10.x +# 0x0000000a - 1.11.x C_API_VERSION = 0x0000000a class MismatchCAPIWarning(Warning): diff --git a/pavement.py b/pavement.py index ace0a5c022fc..01328442e0cd 100644 --- a/pavement.py +++ b/pavement.py @@ -102,7 +102,7 @@ RELEASE_NOTES = 'doc/release/1.11.0-notes.rst' # Start/end of the log (from git) -LOG_START = 'v1.10.0b1' +LOG_START = 'v1.10.0' LOG_END = 'master' From 11a9b710481f5932835c1c0cb1e5e2747be8cde0 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 19 Jan 2016 19:53:20 -0700 Subject: [PATCH 421/496] REL: Update master branch after 1.12.x branch has been made. * Drop testing of Python 2.6, 3.2, and 3.3 * Create 1.12.0-notes.rst and add to source/documentation. * Update pavement.py to use 1.10.x as LOG_START * Update version numpy in setup.py --- .travis.yml | 3 -- doc/release/1.12.0-notes.rst | 66 ++++++++++++++++++++++++++++++++++++ doc/source/release.rst | 1 + pavement.py | 4 +-- setup.py | 2 +- 5 files changed, 70 insertions(+), 6 deletions(-) create mode 100644 doc/release/1.12.0-notes.rst diff --git a/.travis.yml b/.travis.yml index 3066cbbaa207..c14994d0d81e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -31,10 +31,7 @@ env: ahp7Qnm0rWRmA0z9SomuRUQOJQ6s684vU=" python: - - 2.6 - 2.7 - - 3.2 - - 3.3 - 3.4 - 3.5 matrix: diff --git a/doc/release/1.12.0-notes.rst b/doc/release/1.12.0-notes.rst new file mode 100644 index 000000000000..ee4e2d24a2cd --- /dev/null +++ b/doc/release/1.12.0-notes.rst @@ -0,0 +1,66 @@ +NumPy 1.12.0 Release Notes +************************** + +This release supports Python 2.7 and 3.4 - 3.5. + +Highlights +========== + + +Dropped Support +=============== + +* Support for Python 2.6, 3.2, and 3.3 has been dropped. + + +Future Changes +============== + +* In 1.13 NAT will always compare False except for ``NAT != NAT``, + which will be True. In short, NAT will behave like NaN + + +Compatibility notes +=================== + +Relaxed stride checking is the default +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This will have some impact on code that assumed that ``F_CONTIGUOUS`` and +``C_CONTIGUOUS`` were mutually exclusive and could be set to determine the +default order for arrays that are now both. + +``MaskedArray`` takes view of data **and** mask when slicing +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +XXX + + +DeprecationWarning to error +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +FutureWarning to changed behavior +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +C API +~~~~~ + + +New Features +============ + + + +Improvements +============ + + + +Changes +======= + +Deprecations +============ + diff --git a/doc/source/release.rst b/doc/source/release.rst index 6da61763f832..1801b2067513 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -2,6 +2,7 @@ Release Notes ************* +.. include:: ../release/1.12.0-notes.rst .. include:: ../release/1.11.0-notes.rst .. include:: ../release/1.10.4-notes.rst .. include:: ../release/1.10.3-notes.rst diff --git a/pavement.py b/pavement.py index 01328442e0cd..d02cc48c5437 100644 --- a/pavement.py +++ b/pavement.py @@ -99,10 +99,10 @@ #----------------------------------- # Source of the release notes -RELEASE_NOTES = 'doc/release/1.11.0-notes.rst' +RELEASE_NOTES = 'doc/release/1.12.0-notes.rst' # Start/end of the log (from git) -LOG_START = 'v1.10.0' +LOG_START = 'maintenance/1.11.x' LOG_END = 'master' diff --git a/setup.py b/setup.py index ded914b11dc4..a7f66a03d851 100755 --- a/setup.py +++ b/setup.py @@ -57,7 +57,7 @@ """ MAJOR = 1 -MINOR = 11 +MINOR = 12 MICRO = 0 ISRELEASED = False VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) From 0b150b8cfafa4d8c75e47e6e9c8b3b23d7c0a2b6 Mon Sep 17 00:00:00 2001 From: gfyoung Date: Tue, 12 Jan 2016 02:14:38 +0000 Subject: [PATCH 422/496] MAINT: Simplified mtrand.pyx helpers Refactored methods that broadcast arguments together by finding additional common ground between code in the if...else branches that involved a size parameter being passed in. --- numpy/random/mtrand/mtrand.pyx | 178 ++++++++++++++------------------- 1 file changed, 74 insertions(+), 104 deletions(-) diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index c8738cf6f5f3..2f315c5d37fd 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -250,28 +250,23 @@ cdef object cont2_array(rk_state *state, rk_cont2 func, object size, cdef broadcast multi if size is None: - multi = PyArray_MultiIterNew(2, oa, ob) - array = PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_DOUBLE) - array_data = PyArray_DATA(array) - with lock, nogil: - for i from 0 <= i < multi.size: - oa_data = PyArray_MultiIter_DATA(multi, 0) - ob_data = PyArray_MultiIter_DATA(multi, 1) - array_data[i] = func(state, oa_data[0], ob_data[0]) - PyArray_MultiIter_NEXT(multi) + multi = np.broadcast(oa, ob) + array = np.empty(multi.shape, dtype=np.float64) else: - array = np.empty(size, np.float64) - array_data = PyArray_DATA(array) - multi = PyArray_MultiIterNew(3, array, oa, ob) - if (multi.size != PyArray_SIZE(array)): + array = np.empty(size, dtype=np.float64) + multi = np.broadcast(oa, ob, array) + if multi.shape != array.shape: raise ValueError("size is not compatible with inputs") - with lock, nogil: - for i from 0 <= i < multi.size: - oa_data = PyArray_MultiIter_DATA(multi, 1) - ob_data = PyArray_MultiIter_DATA(multi, 2) - array_data[i] = func(state, oa_data[0], ob_data[0]) - PyArray_MultiIter_NEXTi(multi, 1) - PyArray_MultiIter_NEXTi(multi, 2) + + array_data = PyArray_DATA(array) + + with lock, nogil: + for i in range(multi.size): + oa_data = PyArray_MultiIter_DATA(multi, 0) + ob_data = PyArray_MultiIter_DATA(multi, 1) + array_data[i] = func(state, oa_data[0], ob_data[0]) + PyArray_MultiIter_NEXT(multi) + return array cdef object cont3_array_sc(rk_state *state, rk_cont3 func, object size, double a, @@ -305,30 +300,24 @@ cdef object cont3_array(rk_state *state, rk_cont3 func, object size, cdef broadcast multi if size is None: - multi = PyArray_MultiIterNew(3, oa, ob, oc) - array = PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_DOUBLE) - array_data = PyArray_DATA(array) - with lock, nogil: - for i from 0 <= i < multi.size: - oa_data = PyArray_MultiIter_DATA(multi, 0) - ob_data = PyArray_MultiIter_DATA(multi, 1) - oc_data = PyArray_MultiIter_DATA(multi, 2) - array_data[i] = func(state, oa_data[0], ob_data[0], oc_data[0]) - PyArray_MultiIter_NEXT(multi) + multi = np.broadcast(oa, ob, oc) + array = np.empty(multi.shape, dtype=np.float64) else: - array = np.empty(size, np.float64) - array_data = PyArray_DATA(array) - multi = PyArray_MultiIterNew(4, array, oa, - ob, oc) - if (multi.size != PyArray_SIZE(array)): + array = np.empty(size, dtype=np.float64) + multi = np.broadcast(oa, ob, oc, array) + if multi.shape != array.shape: raise ValueError("size is not compatible with inputs") - with lock, nogil: - for i from 0 <= i < multi.size: - oa_data = PyArray_MultiIter_DATA(multi, 1) - ob_data = PyArray_MultiIter_DATA(multi, 2) - oc_data = PyArray_MultiIter_DATA(multi, 3) - array_data[i] = func(state, oa_data[0], ob_data[0], oc_data[0]) - PyArray_MultiIter_NEXT(multi) + + array_data = PyArray_DATA(array) + + with lock, nogil: + for i in range(multi.size): + oa_data = PyArray_MultiIter_DATA(multi, 0) + ob_data = PyArray_MultiIter_DATA(multi, 1) + oc_data = PyArray_MultiIter_DATA(multi, 2) + array_data[i] = func(state, oa_data[0], ob_data[0], oc_data[0]) + PyArray_MultiIter_NEXT(multi) + return array cdef object disc0_array(rk_state *state, rk_disc0 func, object size, object lock): @@ -376,28 +365,22 @@ cdef object discnp_array(rk_state *state, rk_discnp func, object size, cdef broadcast multi if size is None: - multi = PyArray_MultiIterNew(2, on, op) - array = PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG) - array_data = PyArray_DATA(array) - with lock, nogil: - for i from 0 <= i < multi.size: - on_data = PyArray_MultiIter_DATA(multi, 0) - op_data = PyArray_MultiIter_DATA(multi, 1) - array_data[i] = func(state, on_data[0], op_data[0]) - PyArray_MultiIter_NEXT(multi) + multi = np.broadcast(on, op) + array = np.empty(multi.shape, dtype=int) else: - array = np.empty(size, int) - array_data = PyArray_DATA(array) - multi = PyArray_MultiIterNew(3, array, on, op) - if (multi.size != PyArray_SIZE(array)): + array = np.empty(size, dtype=int) + multi = np.broadcast(on, op, array) + if multi.shape != array.shape: raise ValueError("size is not compatible with inputs") - with lock, nogil: - for i from 0 <= i < multi.size: - on_data = PyArray_MultiIter_DATA(multi, 1) - op_data = PyArray_MultiIter_DATA(multi, 2) - array_data[i] = func(state, on_data[0], op_data[0]) - PyArray_MultiIter_NEXTi(multi, 1) - PyArray_MultiIter_NEXTi(multi, 2) + + array_data = PyArray_DATA(array) + + with lock, nogil: + for i in range(multi.size): + on_data = PyArray_MultiIter_DATA(multi, 0) + op_data = PyArray_MultiIter_DATA(multi, 1) + array_data[i] = func(state, on_data[0], op_data[0]) + PyArray_MultiIter_NEXT(multi) return array @@ -429,28 +412,22 @@ cdef object discdd_array(rk_state *state, rk_discdd func, object size, cdef broadcast multi if size is None: - multi = PyArray_MultiIterNew(2, on, op) - array = PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG) - array_data = PyArray_DATA(array) - with lock, nogil: - for i from 0 <= i < multi.size: - on_data = PyArray_MultiIter_DATA(multi, 0) - op_data = PyArray_MultiIter_DATA(multi, 1) - array_data[i] = func(state, on_data[0], op_data[0]) - PyArray_MultiIter_NEXT(multi) + multi = np.broadcast(on, op) + array = np.empty(multi.shape, dtype=int) else: - array = np.empty(size, int) - array_data = PyArray_DATA(array) - multi = PyArray_MultiIterNew(3, array, on, op) - if (multi.size != PyArray_SIZE(array)): + array = np.empty(size, dtype=int) + multi = np.broadcast(on, op, array) + if multi.shape != array.shape: raise ValueError("size is not compatible with inputs") - with lock, nogil: - for i from 0 <= i < multi.size: - on_data = PyArray_MultiIter_DATA(multi, 1) - op_data = PyArray_MultiIter_DATA(multi, 2) - array_data[i] = func(state, on_data[0], op_data[0]) - PyArray_MultiIter_NEXTi(multi, 1) - PyArray_MultiIter_NEXTi(multi, 2) + + array_data = PyArray_DATA(array) + + with lock, nogil: + for i in range(multi.size): + on_data = PyArray_MultiIter_DATA(multi, 0) + op_data = PyArray_MultiIter_DATA(multi, 1) + array_data[i] = func(state, on_data[0], op_data[0]) + PyArray_MultiIter_NEXT(multi) return array @@ -483,30 +460,23 @@ cdef object discnmN_array(rk_state *state, rk_discnmN func, object size, cdef broadcast multi if size is None: - multi = PyArray_MultiIterNew(3, on, om, oN) - array = PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG) - array_data = PyArray_DATA(array) - with lock, nogil: - for i from 0 <= i < multi.size: - on_data = PyArray_MultiIter_DATA(multi, 0) - om_data = PyArray_MultiIter_DATA(multi, 1) - oN_data = PyArray_MultiIter_DATA(multi, 2) - array_data[i] = func(state, on_data[0], om_data[0], oN_data[0]) - PyArray_MultiIter_NEXT(multi) + multi = np.broadcast(on, om, oN) + array = np.empty(multi.shape, dtype=int) else: - array = np.empty(size, int) - array_data = PyArray_DATA(array) - multi = PyArray_MultiIterNew(4, array, on, om, - oN) - if (multi.size != PyArray_SIZE(array)): + array = np.empty(size, dtype=int) + multi = np.broadcast(on, om, oN, array) + if multi.shape != array.shape: raise ValueError("size is not compatible with inputs") - with lock, nogil: - for i from 0 <= i < multi.size: - on_data = PyArray_MultiIter_DATA(multi, 1) - om_data = PyArray_MultiIter_DATA(multi, 2) - oN_data = PyArray_MultiIter_DATA(multi, 3) - array_data[i] = func(state, on_data[0], om_data[0], oN_data[0]) - PyArray_MultiIter_NEXT(multi) + + array_data = PyArray_DATA(array) + + with lock, nogil: + for i in range(multi.size): + on_data = PyArray_MultiIter_DATA(multi, 0) + om_data = PyArray_MultiIter_DATA(multi, 1) + oN_data = PyArray_MultiIter_DATA(multi, 2) + array_data[i] = func(state, on_data[0], om_data[0], oN_data[0]) + PyArray_MultiIter_NEXT(multi) return array From 7e4baa91982f1f0901f4133a389438a0ca048544 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 20 Jan 2016 13:46:15 -0700 Subject: [PATCH 423/496] MAINT: Update the git .mailmap file. [ci skip] --- .mailmap | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/.mailmap b/.mailmap index 4850a114319d..d1714279769d 100644 --- a/.mailmap +++ b/.mailmap @@ -10,16 +10,23 @@ Aaron Baecker abaecker Abdul Muneer abdulmuneer +Adam Ginsburg Adam Ginsburg +Adam Ginsburg Adam Ginsburg Albert Jornet Puig jurnix Alex Griffing alex Alex Griffing argriffing Alex Griffing argriffing Alex Griffing argriffing Alexander Belopolsky Alexander Belopolsky +Alok Singhal Alok Singhal +Alok Singhal Alok Singhal Amir Sarabadani amir Anne Archibald aarchiba +Anne Archibald Anne Archibald Anže Starič astaric Aron Ahmadia ahmadia +Arun Persaud Arun Persaud +Arun Persaud Arun Persaud Behzad Nouri behzad nouri Benjamin Root Ben Root Benjamin Root weathergod @@ -28,10 +35,13 @@ Bryan Van de Ven Bryan Van de Ven Bryan Van de Ven Bryan Van de Ven Carl Kleffner carlkl Chris Burns chris.burns +Chris Kerr Chris Kerr +Chris Kerr Chris Kerr Christoph Gohlke Christolph Gohlke Christoph Gohlke cgholke Christoph Gohlke cgohlke Christopher Hanley chanley +Daniel Daniel Daniel J Farrell danieljfarrell Daniel Müllner Daniel Daniel Müllner dmuellner @@ -44,11 +54,25 @@ Derek Homeier Derek Homeier Derek Homeir Derek Homeier Derek Homier Egor Zindy zindy +Eric Fode Eric Fode +Eric Fode Eric Fode Ernest N. Mamikonyan mamikony +Evgeni Burovski Evgeni Burovski +Evgeni Burovski Evgeni Burovski Evgeny Toder eltjpm Fernando Perez Fernando Perez Gael Varoquaux GaelVaroquaux +Gerrit Holl Gerrit Holl +Gerrit Holl Gerrit Holl Giuseppe Venturini ggventurini +Greg Young gfyoung +Greg Young gfyoung +Jason Grout Jason Grout +Jason Grout Jason Grout +Joseph Martinot-Lagarde Joseph Martinot-Lagarde +Joseph Martinot-Lagarde Joseph Martinot-Lagarde +Julien Lhermitte Julien Lhermitte +Julien Lhermitte Julien Lhermitte Han Genuit 87 Han Genuit Han Han Genuit hangenuit@gmail.com @@ -90,6 +114,8 @@ Ralf Gommers rgommers Ritta Narita RittaNarita Robert Kern Robert Kern Robert LU RobberPhex +Ronan Lamy Ronan Lamy +Ronan Lamy Ronan Lamy Russell Hewett rhewett Ryan Blakemore ryanblak Sam Preston jspreston @@ -97,6 +123,8 @@ Saullo Giovani saullogiovani Sebastian Berg seberg Stefan van der Walt Stefan van der Walt Stefan van der Walt Stefan van der Walt +Stephan Hoyer Stephan Hoyer +Stephan Hoyer Stephan Hoyer Thomas A Caswell Thomas A Caswell Thomas A Caswell Thomas A Caswell Tim Cera tim cera From 8c84718365c073fb5ea84e7d3c05fa72bc5c5698 Mon Sep 17 00:00:00 2001 From: Dmitry Odzerikho Date: Thu, 21 Jan 2016 11:35:52 -0500 Subject: [PATCH 424/496] BLD: fix compilation on non glibc-Linuxes Non-glibc Linuxes dont have the __GLIBC_PREREQ function and compilation of numpy fails on such platforms. To avoid this the TRIG_OK check should be done only in the glibc environment The patch is taken from AlpineLinux repository http://git.alpinelinux.org/cgit/aports/tree/testing/py-numpy/numpy-1.10.0-musl.patch?id=2e5c4bfcf1c9746edd58a8e684d01403f234e71d --- numpy/core/src/private/npy_config.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/core/src/private/npy_config.h b/numpy/core/src/private/npy_config.h index eb9c1e19d342..4268f2982d77 100644 --- a/numpy/core/src/private/npy_config.h +++ b/numpy/core/src/private/npy_config.h @@ -70,8 +70,8 @@ #endif /* defined(_MSC_VER) && defined(__INTEL_COMPILER) */ -/* Disable broken gnu trig functions on linux */ -#if defined(__linux__) && defined(__GNUC__) +/* Disable broken glibc trig functions on linux */ +#if defined(__linux__) && defined(__GLIBC__) #if defined(HAVE_FEATURES_H) #include @@ -102,6 +102,6 @@ #endif #undef TRIG_OK -#endif /* defined(__linux__) && defined(__GNUC__) */ +#endif /* defined(__linux__) && defined(__GLIBC__) */ #endif From 8ff96f67a0b36079dd211d8a7cddb0262f489b33 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 21 Jan 2016 14:07:01 -0700 Subject: [PATCH 425/496] Revert "BLD: fix compilation on non glibc-Linuxes" --- numpy/core/src/private/npy_config.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/core/src/private/npy_config.h b/numpy/core/src/private/npy_config.h index 4268f2982d77..eb9c1e19d342 100644 --- a/numpy/core/src/private/npy_config.h +++ b/numpy/core/src/private/npy_config.h @@ -70,8 +70,8 @@ #endif /* defined(_MSC_VER) && defined(__INTEL_COMPILER) */ -/* Disable broken glibc trig functions on linux */ -#if defined(__linux__) && defined(__GLIBC__) +/* Disable broken gnu trig functions on linux */ +#if defined(__linux__) && defined(__GNUC__) #if defined(HAVE_FEATURES_H) #include @@ -102,6 +102,6 @@ #endif #undef TRIG_OK -#endif /* defined(__linux__) && defined(__GLIBC__) */ +#endif /* defined(__linux__) && defined(__GNUC__) */ #endif From f189e2adcdd05596a6f65b4097e2f12f9c0d9ce9 Mon Sep 17 00:00:00 2001 From: Dmitry Odzerikho Date: Fri, 22 Jan 2016 00:46:34 +0300 Subject: [PATCH 426/496] BLD: fix compilation on non glibc-Linuxes Non-glibc Linuxes dont have the __GLIBC_PREREQ function and compilation of numpy fails on such platforms. --- numpy/core/src/private/npy_config.h | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/numpy/core/src/private/npy_config.h b/numpy/core/src/private/npy_config.h index eb9c1e19d342..3d1470609650 100644 --- a/numpy/core/src/private/npy_config.h +++ b/numpy/core/src/private/npy_config.h @@ -70,17 +70,13 @@ #endif /* defined(_MSC_VER) && defined(__INTEL_COMPILER) */ -/* Disable broken gnu trig functions on linux */ -#if defined(__linux__) && defined(__GNUC__) - +/* Disable broken gnu trig functions */ #if defined(HAVE_FEATURES_H) #include -#define TRIG_OK __GLIBC_PREREQ(2, 16) -#else -#define TRIG_OK 0 -#endif -#if !TRIG_OK +#if defined(__GLIBC__) +#if !__GLIBC_PREREQ(2, 16) + #undef HAVE_CASIN #undef HAVE_CASINF #undef HAVE_CASINL @@ -99,9 +95,10 @@ #undef HAVE_CACOSH #undef HAVE_CACOSHF #undef HAVE_CACOSHL -#endif -#undef TRIG_OK -#endif /* defined(__linux__) && defined(__GNUC__) */ +#endif /* __GLIBC_PREREQ(2, 16) */ +#endif /* defined(__GLIBC_PREREQ) */ + +#endif /* defined(HAVE_FEATURES_H) */ #endif From e77b7b98df233f72a9d50934a4bf5b93c163b482 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Thu, 21 Jan 2016 13:24:59 -0500 Subject: [PATCH 427/496] BUG: In `norm`, always cast non-floating point arrays to 64-bit floats. Otherwise, weird integer roundoff errors give faulty results in some cases. --- numpy/linalg/linalg.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index 9dc879d31045..fe2031efbcdf 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -2112,6 +2112,9 @@ def norm(x, ord=None, axis=None, keepdims=False): """ x = asarray(x) + if not issubclass(x.dtype.type, inexact): + x = x.astype(float) + # Immediately handle some default, simple, fast, and common cases. if axis is None: ndim = x.ndim From bc4a17ed89004ed63558a3e7f0bd035580777aa7 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Fri, 22 Jan 2016 13:10:57 -0500 Subject: [PATCH 428/496] TST: Verify that `norm` is properly casting values to floats as needed. --- numpy/linalg/tests/test_linalg.py | 96 +++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index fc139be1923c..5c6142b7b55a 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -7,6 +7,7 @@ import sys import itertools import traceback +import warnings import numpy as np from numpy import array, single, double, csingle, cdouble, dot, identity @@ -845,6 +846,101 @@ def test_empty(self): assert_equal(norm(array([], dtype=self.dt)), 0.0) assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0) + def test_vector_return_type(self): + a = np.array([1, 0, 1]) + + exact_types = np.typecodes['AllInteger'] + inexact_types = np.typecodes['AllFloat'] + + all_types = exact_types + inexact_types + + for each_inexact_types in all_types: + at = a.astype(each_inexact_types) + + an = norm(at, -np.inf) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 0.0) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", RuntimeWarning) + an = norm(at, -1) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 0.0) + + an = norm(at, 0) + # Trying to assert equality to `np.dtype(int).type` fails on + # 32-bit platforms as it still becomes `np.int64` instead of + # `np.int32`. So, this is our workaround. + assert_(an.dtype.type in [np.int32, np.int64]) + assert_almost_equal(an, 2) + + an = norm(at, 1) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 2.0) + + an = norm(at, 2) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 2.0**(1.0/2.0)) + + an = norm(at, 4) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 2.0**(1.0/4.0)) + + an = norm(at, np.inf) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 1.0) + + def test_matrix_return_type(self): + a = np.array([[1, 0, 1], [0, 1, 1]]) + + exact_types = np.typecodes['AllInteger'] + + # float32, complex64, float64, complex128 types are the only types + # allowed by `linalg`, which performs the matrix operations used + # within `norm`. + inexact_types = 'fdFD' + + all_types = exact_types + inexact_types + + for each_inexact_types in all_types: + at = a.astype(each_inexact_types) + + an = norm(at, -np.inf) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 2.0) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", RuntimeWarning) + an = norm(at, -1) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 1.0) + + an = norm(at, 1) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 2.0) + + an = norm(at, 2) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 3.0**(1.0/2.0)) + + an = norm(at, -2) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 1.0) + + an = norm(at, np.inf) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 2.0) + + an = norm(at, 'fro') + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 2.0) + + an = norm(at, 'nuc') + assert_(issubclass(an.dtype.type, np.floating)) + # Lower bar needed to support low precision floats. + # They end up being off by 1 in the 7th place. + old_assert_almost_equal(an, 2.7320508075688772, decimal=6) + def test_vector(self): a = [1, 2, 3, 4] b = [-1, -2, -3, -4] From 43c6a89128347928c5fe26f67ba2a0a022f00822 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Fri, 22 Jan 2016 17:31:13 -0500 Subject: [PATCH 429/496] BUG: Make sure that the `ord=0` case returns a float. --- numpy/linalg/linalg.py | 2 +- numpy/linalg/tests/test_linalg.py | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index fe2031efbcdf..f333bde47d15 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -2150,7 +2150,7 @@ def norm(x, ord=None, axis=None, keepdims=False): return abs(x).min(axis=axis, keepdims=keepdims) elif ord == 0: # Zero norm - return (x != 0).sum(axis=axis, keepdims=keepdims) + return (x != 0).astype(float).sum(axis=axis, keepdims=keepdims) elif ord == 1: # special case for speedup return add.reduce(abs(x), axis=axis, keepdims=keepdims) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 5c6142b7b55a..60486d4cec19 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -868,10 +868,7 @@ def test_vector_return_type(self): assert_almost_equal(an, 0.0) an = norm(at, 0) - # Trying to assert equality to `np.dtype(int).type` fails on - # 32-bit platforms as it still becomes `np.int64` instead of - # `np.int32`. So, this is our workaround. - assert_(an.dtype.type in [np.int32, np.int64]) + assert_(issubclass(an.dtype.type, np.floating)) assert_almost_equal(an, 2) an = norm(at, 1) From 75d5b59bca181ee7e5ba872999014006c4b6c3f3 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Fri, 22 Jan 2016 17:35:42 -0500 Subject: [PATCH 430/496] DOC: Update `norm` docstring to include examples that reflect that all computations are done with floating point numbers. --- numpy/linalg/linalg.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index f333bde47d15..9d486d2a5d0f 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -2060,22 +2060,22 @@ def norm(x, ord=None, axis=None, keepdims=False): >>> LA.norm(b, 'fro') 7.745966692414834 >>> LA.norm(a, np.inf) - 4 + 4.0 >>> LA.norm(b, np.inf) - 9 + 9.0 >>> LA.norm(a, -np.inf) - 0 + 0.0 >>> LA.norm(b, -np.inf) - 2 + 2.0 >>> LA.norm(a, 1) - 20 + 20.0 >>> LA.norm(b, 1) - 7 + 7.0 >>> LA.norm(a, -1) -4.6566128774142013e-010 >>> LA.norm(b, -1) - 6 + 6.0 >>> LA.norm(a, 2) 7.745966692414834 >>> LA.norm(b, 2) @@ -2099,7 +2099,7 @@ def norm(x, ord=None, axis=None, keepdims=False): >>> LA.norm(c, axis=1) array([ 3.74165739, 4.24264069]) >>> LA.norm(c, ord=1, axis=1) - array([6, 6]) + array([ 6., 6.]) Using the `axis` argument to compute matrix norms: From 091db7d35249935913c84bfff1bd78da3cb4f556 Mon Sep 17 00:00:00 2001 From: gfyoung Date: Thu, 21 Jan 2016 03:30:28 +0000 Subject: [PATCH 431/496] TST: Added broadcasting tests in test_random.py Added a whole new suite of tests to ensure that functions in mtrand.pyx which are broadcastable actually broadcast their arguments properly. --- numpy/random/mtrand/mtrand.pyx | 15 +- numpy/random/tests/test_random.py | 716 ++++++++++++++++++++++++++---- 2 files changed, 644 insertions(+), 87 deletions(-) diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index 2f315c5d37fd..07b7c622b790 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -2296,9 +2296,9 @@ cdef class RandomState: fdfden = PyFloat_AsDouble(dfden) if not PyErr_Occurred(): if fdfnum <= 0: - raise ValueError("shape <= 0") + raise ValueError("dfnum <= 0") if fdfden <= 0: - raise ValueError("scale <= 0") + raise ValueError("dfden <= 0") return cont2_array_sc(self.internal_state, rk_f, size, fdfnum, fdfden, self.lock) @@ -3426,7 +3426,8 @@ cdef class RandomState: Returns ------- samples : ndarray or scalar - where the values are all integers in [0, n]. + Samples from logistic distribution, shaped according to + `size`. Otherwise, a single value is returned. See Also -------- @@ -4284,7 +4285,7 @@ cdef class RandomState: ------- out : ndarray Samples from the geometric distribution, shaped according to - `size`. + `size`. Otherwise, a single value is returned. Examples -------- @@ -4350,7 +4351,8 @@ cdef class RandomState: Returns ------- samples : ndarray or scalar - The values are all integers in [0, n]. + Samples from the hypergeometric distribution, shaped + according to `size`. Otherwise, a single value is returned. See Also -------- @@ -4466,7 +4468,8 @@ cdef class RandomState: Returns ------- samples : ndarray or scalar - where the values are all integers in [0, n]. + Samples from the logseries distribution, shaped according to + `size`. Otherwise, a single value is returned. See Also -------- diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index 96aa3790f4d4..7ec71e2e5342 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -3,14 +3,13 @@ import numpy as np from numpy.testing import ( TestCase, run_module_suite, assert_, assert_raises, assert_equal, - assert_warns) + assert_warns, assert_array_equal, assert_array_almost_equal) from numpy import random from numpy.compat import asbytes import sys import warnings - class TestSeed(TestCase): def test_scalar(self): s = np.random.RandomState(0) @@ -50,7 +49,7 @@ def test_n_zero(self): zeros = np.zeros(2, dtype='int') for p in [0, .5, 1]: assert_(random.binomial(0, p) == 0) - np.testing.assert_array_equal(random.binomial(zeros, p), zeros) + assert_array_equal(random.binomial(zeros, p), zeros) def test_p_is_nan(self): # Issue #4571. @@ -148,10 +147,10 @@ def test_bounds_checking(self): for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 - assert_raises(ValueError, self.rfunc, lbnd - 1 , ubnd, dtype=dt) - assert_raises(ValueError, self.rfunc, lbnd , ubnd + 1, dtype=dt) - assert_raises(ValueError, self.rfunc, ubnd , lbnd, dtype=dt) - assert_raises(ValueError, self.rfunc, 1 , 0, dtype=dt) + assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt) + assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt) + assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt) + assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt) def test_rng_zero_and_extremes(self): for dt in self.itype: @@ -223,7 +222,7 @@ def test_rand(self): desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) + assert_array_almost_equal(actual, desired, decimal=15) def test_randn(self): np.random.seed(self.seed) @@ -231,7 +230,7 @@ def test_randn(self): desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) + assert_array_almost_equal(actual, desired, decimal=15) def test_randint(self): np.random.seed(self.seed) @@ -239,7 +238,7 @@ def test_randint(self): desired = np.array([[31, 3], [-52, 41], [-48, -66]]) - np.testing.assert_array_equal(actual, desired) + assert_array_equal(actual, desired) def test_random_integers(self): np.random.seed(self.seed) @@ -247,7 +246,7 @@ def test_random_integers(self): desired = np.array([[31, 3], [-52, 41], [-48, -66]]) - np.testing.assert_array_equal(actual, desired) + assert_array_equal(actual, desired) def test_random_integers_max_int(self): # Tests whether random_integers can generate the @@ -258,7 +257,7 @@ def test_random_integers_max_int(self): actual = np.random.random_integers(np.iinfo('l').max, np.iinfo('l').max) desired = np.iinfo('l').max - np.testing.assert_equal(actual, desired) + assert_equal(actual, desired) def test_random_integers_deprecated(self): with warnings.catch_warnings(): @@ -280,38 +279,38 @@ def test_random_sample(self): desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) + assert_array_almost_equal(actual, desired, decimal=15) def test_choice_uniform_replace(self): np.random.seed(self.seed) actual = np.random.choice(4, 4) desired = np.array([2, 3, 2, 3]) - np.testing.assert_array_equal(actual, desired) + assert_array_equal(actual, desired) def test_choice_nonuniform_replace(self): np.random.seed(self.seed) actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) desired = np.array([1, 1, 2, 2]) - np.testing.assert_array_equal(actual, desired) + assert_array_equal(actual, desired) def test_choice_uniform_noreplace(self): np.random.seed(self.seed) actual = np.random.choice(4, 3, replace=False) desired = np.array([0, 1, 3]) - np.testing.assert_array_equal(actual, desired) + assert_array_equal(actual, desired) def test_choice_nonuniform_noreplace(self): np.random.seed(self.seed) actual = np.random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) desired = np.array([2, 3, 1]) - np.testing.assert_array_equal(actual, desired) + assert_array_equal(actual, desired) def test_choice_noninteger(self): np.random.seed(self.seed) actual = np.random.choice(['a', 'b', 'c', 'd'], 4) desired = np.array(['c', 'd', 'c', 'd']) - np.testing.assert_array_equal(actual, desired) + assert_array_equal(actual, desired) def test_choice_exceptions(self): sample = np.random.choice @@ -320,13 +319,13 @@ def test_choice_exceptions(self): assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3) assert_raises(ValueError, sample, [], 3) assert_raises(ValueError, sample, [1, 2, 3, 4], 3, - p=[[0.25, 0.25], [0.25, 0.25]]) + p=[[0.25, 0.25], [0.25, 0.25]]) assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) - assert_raises(ValueError, sample, [1, 2, 3], 2, replace=False, - p=[1, 0, 0]) + assert_raises(ValueError, sample, [1, 2, 3], 2, + replace=False, p=[1, 0, 0]) def test_choice_return_shape(self): p = [0.1, 0.9] @@ -368,7 +367,7 @@ def test_bytes(self): np.random.seed(self.seed) actual = np.random.bytes(10) desired = asbytes('\x82Ui\x9e\xff\x97+Wf\xa5') - np.testing.assert_equal(actual, desired) + assert_equal(actual, desired) def test_shuffle(self): # Test lists, arrays (of various dtypes), and multidimensional versions @@ -391,11 +390,11 @@ def test_shuffle(self): np.random.shuffle(alist) actual = alist desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) - np.testing.assert_array_equal(actual, desired) + assert_array_equal(actual, desired) def test_shuffle_masked(self): # gh-3263 - a = np.ma.masked_values(np.reshape(range(20), (5,4)) % 3 - 1, -1) + a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1) b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) a_orig = a.copy() b_orig = b.copy() @@ -414,15 +413,15 @@ def test_beta(self): [[1.45341850513746058e-02, 5.31297615662868145e-04], [1.85366619058432324e-06, 4.19214516800110563e-03], [1.58405155108498093e-04, 1.26252891949397652e-04]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) + assert_array_almost_equal(actual, desired, decimal=15) def test_binomial(self): np.random.seed(self.seed) actual = np.random.binomial(100.123, .456, size=(3, 2)) desired = np.array([[37, 43], - [42, 48], - [46, 45]]) - np.testing.assert_array_equal(actual, desired) + [42, 48], + [46, 45]]) + assert_array_equal(actual, desired) def test_chisquare(self): np.random.seed(self.seed) @@ -430,7 +429,7 @@ def test_chisquare(self): desired = np.array([[63.87858175501090585, 68.68407748911370447], [65.77116116901505904, 47.09686762438974483], [72.3828403199695174, 74.18408615260374006]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=13) + assert_array_almost_equal(actual, desired, decimal=13) def test_dirichlet(self): np.random.seed(self.seed) @@ -442,7 +441,7 @@ def test_dirichlet(self): [0.58964023305154301, 0.41035976694845688]], [[0.59266909280647828, 0.40733090719352177], [0.56974431743975207, 0.43025568256024799]]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) + assert_array_almost_equal(actual, desired, decimal=15) def test_dirichlet_size(self): # gh-3173 @@ -462,7 +461,7 @@ def test_exponential(self): desired = np.array([[1.08342649775011624, 1.00607889924557314], [2.46628830085216721, 2.49668106809923884], [0.68717433461363442, 1.69175666993575979]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) + assert_array_almost_equal(actual, desired, decimal=15) def test_f(self): np.random.seed(self.seed) @@ -470,7 +469,7 @@ def test_f(self): desired = np.array([[1.21975394418575878, 1.75135759791559775], [1.44803115017146489, 1.22108959480396262], [1.02176975757740629, 1.34431827623300415]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) + assert_array_almost_equal(actual, desired, decimal=15) def test_gamma(self): np.random.seed(self.seed) @@ -478,7 +477,7 @@ def test_gamma(self): desired = np.array([[24.60509188649287182, 28.54993563207210627], [26.13476110204064184, 12.56988482927716078], [31.71863275789960568, 33.30143302795922011]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) + assert_array_almost_equal(actual, desired, decimal=14) def test_geometric(self): np.random.seed(self.seed) @@ -486,7 +485,7 @@ def test_geometric(self): desired = np.array([[8, 7], [17, 17], [5, 12]]) - np.testing.assert_array_equal(actual, desired) + assert_array_equal(actual, desired) def test_gumbel(self): np.random.seed(self.seed) @@ -494,7 +493,7 @@ def test_gumbel(self): desired = np.array([[0.19591898743416816, 0.34405539668096674], [-1.4492522252274278, -1.47374816298446865], [1.10651090478803416, -0.69535848626236174]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) + assert_array_almost_equal(actual, desired, decimal=15) def test_hypergeometric(self): np.random.seed(self.seed) @@ -502,25 +501,25 @@ def test_hypergeometric(self): desired = np.array([[10, 10], [10, 10], [9, 9]]) - np.testing.assert_array_equal(actual, desired) + assert_array_equal(actual, desired) # Test nbad = 0 actual = np.random.hypergeometric(5, 0, 3, size=4) desired = np.array([3, 3, 3, 3]) - np.testing.assert_array_equal(actual, desired) + assert_array_equal(actual, desired) actual = np.random.hypergeometric(15, 0, 12, size=4) desired = np.array([12, 12, 12, 12]) - np.testing.assert_array_equal(actual, desired) + assert_array_equal(actual, desired) # Test ngood = 0 actual = np.random.hypergeometric(0, 5, 3, size=4) desired = np.array([0, 0, 0, 0]) - np.testing.assert_array_equal(actual, desired) + assert_array_equal(actual, desired) actual = np.random.hypergeometric(0, 15, 12, size=4) desired = np.array([0, 0, 0, 0]) - np.testing.assert_array_equal(actual, desired) + assert_array_equal(actual, desired) def test_laplace(self): np.random.seed(self.seed) @@ -528,7 +527,7 @@ def test_laplace(self): desired = np.array([[0.66599721112760157, 0.52829452552221945], [3.12791959514407125, 3.18202813572992005], [-0.05391065675859356, 1.74901336242837324]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) + assert_array_almost_equal(actual, desired, decimal=15) def test_logistic(self): np.random.seed(self.seed) @@ -536,7 +535,7 @@ def test_logistic(self): desired = np.array([[1.09232835305011444, 0.8648196662399954], [4.27818590694950185, 4.33897006346929714], [-0.21682183359214885, 2.63373365386060332]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) + assert_array_almost_equal(actual, desired, decimal=15) def test_lognormal(self): np.random.seed(self.seed) @@ -544,7 +543,7 @@ def test_lognormal(self): desired = np.array([[16.50698631688883822, 36.54846706092654784], [22.67886599981281748, 0.71617561058995771], [65.72798501792723869, 86.84341601437161273]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=13) + assert_array_almost_equal(actual, desired, decimal=13) def test_logseries(self): np.random.seed(self.seed) @@ -552,7 +551,7 @@ def test_logseries(self): desired = np.array([[2, 2], [6, 17], [3, 6]]) - np.testing.assert_array_equal(actual, desired) + assert_array_equal(actual, desired) def test_multinomial(self): np.random.seed(self.seed) @@ -563,7 +562,7 @@ def test_multinomial(self): [2, 1, 4, 3, 6, 4]], [[4, 4, 2, 5, 2, 3], [4, 3, 4, 2, 3, 4]]]) - np.testing.assert_array_equal(actual, desired) + assert_array_equal(actual, desired) def test_multivariate_normal(self): np.random.seed(self.seed) @@ -578,12 +577,12 @@ def test_multivariate_normal(self): [-1.77505606019580053, 10.]], [[-0.54970369430044119, 10.], [0.29768848031692957, 10.]]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) + assert_array_almost_equal(actual, desired, decimal=15) # Check for default size, was raising deprecation warning actual = np.random.multivariate_normal(mean, cov) desired = np.array([-0.79441224511977482, 10.]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) + assert_array_almost_equal(actual, desired, decimal=15) # Check that non positive-semidefinite covariance raises warning mean = [0, 0] @@ -596,7 +595,7 @@ def test_negative_binomial(self): desired = np.array([[848, 841], [892, 611], [779, 647]]) - np.testing.assert_array_equal(actual, desired) + assert_array_equal(actual, desired) def test_noncentral_chisquare(self): np.random.seed(self.seed) @@ -604,20 +603,20 @@ def test_noncentral_chisquare(self): desired = np.array([[23.91905354498517511, 13.35324692733826346], [31.22452661329736401, 16.60047399466177254], [5.03461598262724586, 17.94973089023519464]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) + assert_array_almost_equal(actual, desired, decimal=14) actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) - desired = np.array([[ 1.47145377828516666, 0.15052899268012659], - [ 0.00943803056963588, 1.02647251615666169], - [ 0.332334982684171 , 0.15451287602753125]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) + desired = np.array([[1.47145377828516666, 0.15052899268012659], + [0.00943803056963588, 1.02647251615666169], + [0.332334982684171, 0.15451287602753125]]) + assert_array_almost_equal(actual, desired, decimal=14) np.random.seed(self.seed) actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) desired = np.array([[9.597154162763948, 11.725484450296079], [10.413711048138335, 3.694475922923986], [13.484222138963087, 14.377255424602957]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) + assert_array_almost_equal(actual, desired, decimal=14) def test_noncentral_f(self): np.random.seed(self.seed) @@ -626,7 +625,7 @@ def test_noncentral_f(self): desired = np.array([[1.40598099674926669, 0.34207973179285761], [3.57715069265772545, 7.92632662577829805], [0.43741599463544162, 1.1774208752428319]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) + assert_array_almost_equal(actual, desired, decimal=14) def test_normal(self): np.random.seed(self.seed) @@ -634,7 +633,7 @@ def test_normal(self): desired = np.array([[2.80378370443726244, 3.59863924443872163], [3.121433477601256, -0.33382987590723379], [4.18552478636557357, 4.46410668111310471]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) + assert_array_almost_equal(actual, desired, decimal=15) def test_pareto(self): np.random.seed(self.seed) @@ -655,9 +654,9 @@ def test_poisson(self): np.random.seed(self.seed) actual = np.random.poisson(lam=.123456789, size=(3, 2)) desired = np.array([[0, 0], - [1, 0], - [0, 0]]) - np.testing.assert_array_equal(actual, desired) + [1, 0], + [0, 0]]) + assert_array_equal(actual, desired) def test_poisson_exceptions(self): lambig = np.iinfo('l').max @@ -673,7 +672,7 @@ def test_power(self): desired = np.array([[0.02048932883240791, 0.01424192241128213], [0.38446073748535298, 0.39499689943484395], [0.00177699707563439, 0.13115505880863756]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) + assert_array_almost_equal(actual, desired, decimal=15) def test_rayleigh(self): np.random.seed(self.seed) @@ -681,7 +680,7 @@ def test_rayleigh(self): desired = np.array([[13.8882496494248393, 13.383318339044731], [20.95413364294492098, 21.08285015800712614], [11.06066537006854311, 17.35468505778271009]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) + assert_array_almost_equal(actual, desired, decimal=14) def test_standard_cauchy(self): np.random.seed(self.seed) @@ -689,7 +688,7 @@ def test_standard_cauchy(self): desired = np.array([[0.77127660196445336, -6.55601161955910605], [0.93582023391158309, -2.07479293013759447], [-4.74601644297011926, 0.18338989290760804]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) + assert_array_almost_equal(actual, desired, decimal=15) def test_standard_exponential(self): np.random.seed(self.seed) @@ -697,7 +696,7 @@ def test_standard_exponential(self): desired = np.array([[0.96441739162374596, 0.89556604882105506], [2.1953785836319808, 2.22243285392490542], [0.6116915921431676, 1.50592546727413201]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) + assert_array_almost_equal(actual, desired, decimal=15) def test_standard_gamma(self): np.random.seed(self.seed) @@ -705,7 +704,7 @@ def test_standard_gamma(self): desired = np.array([[5.50841531318455058, 6.62953470301903103], [5.93988484943779227, 2.31044849402133989], [7.54838614231317084, 8.012756093271868]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) + assert_array_almost_equal(actual, desired, decimal=14) def test_standard_normal(self): np.random.seed(self.seed) @@ -713,7 +712,7 @@ def test_standard_normal(self): desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) + assert_array_almost_equal(actual, desired, decimal=15) def test_standard_t(self): np.random.seed(self.seed) @@ -721,7 +720,7 @@ def test_standard_t(self): desired = np.array([[0.97140611862659965, -0.08830486548450577], [1.36311143689505321, -0.55317463909867071], [-0.18473749069684214, 0.61181537341755321]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) + assert_array_almost_equal(actual, desired, decimal=15) def test_triangular(self): np.random.seed(self.seed) @@ -730,7 +729,7 @@ def test_triangular(self): desired = np.array([[12.68117178949215784, 12.4129206149193152], [16.20131377335158263, 16.25692138747600524], [11.20400690911820263, 14.4978144835829923]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) + assert_array_almost_equal(actual, desired, decimal=14) def test_uniform(self): np.random.seed(self.seed) @@ -738,16 +737,16 @@ def test_uniform(self): desired = np.array([[6.99097932346268003, 6.73801597444323974], [9.50364421400426274, 9.53130618907631089], [5.48995325769805476, 8.47493103280052118]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) + assert_array_almost_equal(actual, desired, decimal=15) def test_uniform_range_bounds(self): fmin = np.finfo('float').min fmax = np.finfo('float').max func = np.random.uniform - np.testing.assert_raises(OverflowError, func, -np.inf, 0) - np.testing.assert_raises(OverflowError, func, 0, np.inf) - np.testing.assert_raises(OverflowError, func, fmin, fmax) + assert_raises(OverflowError, func, -np.inf, 0) + assert_raises(OverflowError, func, 0, np.inf) + assert_raises(OverflowError, func, fmin, fmax) # (fmax / 1e17) - fmin is within range, so this should not throw np.random.uniform(low=fmin, high=fmax / 1e17) @@ -758,7 +757,7 @@ def test_vonmises(self): desired = np.array([[2.28567572673902042, 2.89163838442285037], [0.38198375564286025, 2.57638023113890746], [1.19153771588353052, 1.83509849681825354]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) + assert_array_almost_equal(actual, desired, decimal=15) def test_vonmises_small(self): # check infinite loop, gh-4720 @@ -772,7 +771,7 @@ def test_wald(self): desired = np.array([[3.82935265715889983, 5.13125249184285526], [0.35045403618358717, 1.50832396872003538], [0.24124319895843183, 0.22031101461955038]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=14) + assert_array_almost_equal(actual, desired, decimal=14) def test_weibull(self): np.random.seed(self.seed) @@ -780,7 +779,7 @@ def test_weibull(self): desired = np.array([[0.97097342648766727, 0.91422896443565516], [1.89517770034962929, 1.91414357960479564], [0.67057783752390987, 1.39494046635066793]]) - np.testing.assert_array_almost_equal(actual, desired, decimal=15) + assert_array_almost_equal(actual, desired, decimal=15) def test_zipf(self): np.random.seed(self.seed) @@ -788,10 +787,566 @@ def test_zipf(self): desired = np.array([[66, 29], [1, 1], [3, 13]]) - np.testing.assert_array_equal(actual, desired) + assert_array_equal(actual, desired) + + +class TestBroadcast(TestCase): + # tests that functions that broadcast behave + # correctly when presented with non-scalar arguments + def setUp(self): + self.seed = 123456789 + + def setSeed(self): + np.random.seed(self.seed) + + # TODO: Include test for randint once it can broadcast + # Can steal the test written in PR #6938 + + def test_uniform(self): + low = [0] + high = [1] + uniform = np.random.uniform + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + self.setSeed() + actual = uniform(low * 3, high) + assert_array_almost_equal(actual, desired, decimal=14) + + self.setSeed() + actual = uniform(low, high * 3) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + loc = [0] + scale = [1] + bad_scale = [-1] + normal = np.random.normal + desired = np.array([2.2129019979039612, + 2.1283977976520019, + 1.8417114045748335]) + + self.setSeed() + actual = normal(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc * 3, bad_scale) + + self.setSeed() + actual = normal(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc, bad_scale * 3) + + def test_beta(self): + a = [1] + b = [2] + bad_a = [-1] + bad_b = [-2] + beta = np.random.beta + desired = np.array([0.19843558305989056, + 0.075230336409423643, + 0.24976865978980844]) + + self.setSeed() + actual = beta(a * 3, b) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a * 3, b) + assert_raises(ValueError, beta, a * 3, bad_b) + + self.setSeed() + actual = beta(a, b * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a, b * 3) + assert_raises(ValueError, beta, a, bad_b * 3) + + def test_exponential(self): + scale = [1] + bad_scale = [-1] + exponential = np.random.exponential + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.setSeed() + actual = exponential(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, exponential, bad_scale * 3) + + def test_standard_gamma(self): + shape = [1] + bad_shape = [-1] + std_gamma = np.random.standard_gamma + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.setSeed() + actual = std_gamma(shape * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, std_gamma, bad_shape * 3) + + def test_gamma(self): + shape = [1] + scale = [2] + bad_shape = [-1] + bad_scale = [-2] + gamma = np.random.gamma + desired = np.array([1.5221370731769048, + 1.5277256455738331, + 1.4248762625178359]) + + self.setSeed() + actual = gamma(shape * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape * 3, scale) + assert_raises(ValueError, gamma, shape * 3, bad_scale) + + self.setSeed() + actual = gamma(shape, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape, scale * 3) + assert_raises(ValueError, gamma, shape, bad_scale * 3) + + def test_f(self): + dfnum = [1] + dfden = [2] + bad_dfnum = [-1] + bad_dfden = [-2] + f = np.random.f + desired = np.array([0.80038951638264799, + 0.86768719635363512, + 2.7251095168386801]) + + self.setSeed() + actual = f(dfnum * 3, dfden) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum * 3, dfden) + assert_raises(ValueError, f, dfnum * 3, bad_dfden) + + self.setSeed() + actual = f(dfnum, dfden * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum, dfden * 3) + assert_raises(ValueError, f, dfnum, bad_dfden * 3) + + def test_noncentral_f(self): + dfnum = [2] + dfden = [3] + nonc = [4] + bad_dfnum = [0] + bad_dfden = [-1] + bad_nonc = [-2] + nonc_f = np.random.noncentral_f + desired = np.array([9.1393943263705211, + 13.025456344595602, + 8.8018098359100545]) + + self.setSeed() + actual = nonc_f(dfnum * 3, dfden, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + + self.setSeed() + actual = nonc_f(dfnum, dfden * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + + self.setSeed() + actual = nonc_f(dfnum, dfden, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + + def test_chisquare(self): + df = [1] + bad_df = [-1] + chisquare = np.random.chisquare + desired = np.array([0.57022801133088286, + 0.51947702108840776, + 0.1320969254923558]) + + self.setSeed() + actual = chisquare(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, chisquare, bad_df * 3) + + def test_noncentral_chisquare(self): + df = [1] + nonc = [2] + bad_df = [-1] + bad_nonc = [-2] + nonc_chi = np.random.noncentral_chisquare + desired = np.array([9.0015599467913763, + 4.5804135049718742, + 6.0872302432834564]) + + self.setSeed() + actual = nonc_chi(df * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) + assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + + self.setSeed() + actual = nonc_chi(df, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) + assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + + def test_standard_t(self): + df = [1] + bad_df = [-1] + t = np.random.standard_t + desired = np.array([3.0702872575217643, + 5.8560725167361607, + 1.0274791436474273]) + + self.setSeed() + actual = t(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, t, bad_df * 3) + + def test_vonmises(self): + mu = [2] + kappa = [1] + bad_kappa = [-1] + vonmises = np.random.vonmises + desired = np.array([2.9883443664201312, + -2.7064099483995943, + -1.8672476700665914]) + + self.setSeed() + actual = vonmises(mu * 3, kappa) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, vonmises, mu * 3, bad_kappa) + + self.setSeed() + actual = vonmises(mu, kappa * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, vonmises, mu, bad_kappa * 3) + + def test_pareto(self): + a = [1] + bad_a = [-1] + pareto = np.random.pareto + desired = np.array([1.1405622680198362, + 1.1465519762044529, + 1.0389564467453547]) + + self.setSeed() + actual = pareto(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, pareto, bad_a * 3) + + def test_weibull(self): + a = [1] + bad_a = [-1] + weibull = np.random.weibull + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.setSeed() + actual = weibull(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, weibull, bad_a * 3) + + def test_power(self): + a = [1] + bad_a = [-1] + power = np.random.power + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + self.setSeed() + actual = power(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, power, bad_a * 3) + + def test_laplace(self): + loc = [0] + scale = [1] + bad_scale = [-1] + laplace = np.random.laplace + desired = np.array([0.067921356028507157, + 0.070715642226971326, + 0.019290950698972624]) + + self.setSeed() + actual = laplace(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc * 3, bad_scale) + + self.setSeed() + actual = laplace(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc, bad_scale * 3) + + def test_gumbel(self): + loc = [0] + scale = [1] + bad_scale = [-1] + gumbel = np.random.gumbel + desired = np.array([0.2730318639556768, + 0.26936705726291116, + 0.33906220393037939]) + + self.setSeed() + actual = gumbel(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc * 3, bad_scale) + + self.setSeed() + actual = gumbel(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc, bad_scale * 3) + + def test_logistic(self): + loc = [0] + scale = [1] + bad_scale = [-1] + logistic = np.random.logistic + desired = np.array([0.13152135837586171, + 0.13675915696285773, + 0.038216792802833396]) + + self.setSeed() + actual = logistic(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, logistic, loc * 3, bad_scale) + + self.setSeed() + actual = logistic(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, logistic, loc, bad_scale * 3) + + def test_lognormal(self): + mean = [0] + sigma = [1] + bad_sigma = [-1] + lognormal = np.random.lognormal + desired = np.array([9.1422086044848427, + 8.4013952870126261, + 6.3073234116578671]) + + self.setSeed() + actual = lognormal(mean * 3, sigma) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + + self.setSeed() + actual = lognormal(mean, sigma * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean, bad_sigma * 3) + + def test_rayleigh(self): + scale = [1] + bad_scale = [-1] + rayleigh = np.random.rayleigh + desired = np.array([1.2337491937897689, + 1.2360119924878694, + 1.1936818095781789]) + + self.setSeed() + actual = rayleigh(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rayleigh, bad_scale * 3) + + def test_wald(self): + mean = [0.5] + scale = [1] + bad_mean = [0] + bad_scale = [-2] + wald = np.random.wald + desired = np.array([0.11873681120271318, + 0.12450084820795027, + 0.9096122728408238]) + + self.setSeed() + actual = wald(mean * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, wald, bad_mean * 3, scale) + assert_raises(ValueError, wald, mean * 3, bad_scale) + + self.setSeed() + actual = wald(mean, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, wald, bad_mean, scale * 3) + assert_raises(ValueError, wald, mean, bad_scale * 3) + + def test_triangular(self): + left = [1] + right = [3] + mode = [2] + bad_left_one = [3] + bad_mode_one = [4] + bad_left_two, bad_mode_two = right * 2 + triangular = np.random.triangular + desired = np.array([2.03339048710429, + 2.0347400359389356, + 2.0095991069536208]) + + self.setSeed() + actual = triangular(left * 3, mode, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, right) + + self.setSeed() + actual = triangular(left, mode * 3, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, right) + + self.setSeed() + actual = triangular(left, mode, right * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, right * 3) + + def test_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + binom = np.random.binomial + desired = np.array([1, 1, 1]) + + self.setSeed() + actual = binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n * 3, p) + assert_raises(ValueError, binom, n * 3, bad_p_one) + assert_raises(ValueError, binom, n * 3, bad_p_two) + + self.setSeed() + actual = binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n, p * 3) + assert_raises(ValueError, binom, n, bad_p_one * 3) + assert_raises(ValueError, binom, n, bad_p_two * 3) + + def test_negative_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + neg_binom = np.random.negative_binomial + desired = np.array([1, 0, 1]) + + self.setSeed() + actual = neg_binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n * 3, p) + assert_raises(ValueError, neg_binom, n * 3, bad_p_one) + assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + + self.setSeed() + actual = neg_binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n, p * 3) + assert_raises(ValueError, neg_binom, n, bad_p_one * 3) + assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + + def test_poisson(self): + max_lam = np.random.RandomState().poisson_lam_max + + lam = [1] + bad_lam_one = [-1] + bad_lam_two = [max_lam * 2] + poisson = np.random.poisson + desired = np.array([1, 1, 0]) + + self.setSeed() + actual = poisson(lam * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, poisson, bad_lam_one * 3) + assert_raises(ValueError, poisson, bad_lam_two * 3) + + def test_zipf(self): + a = [2] + bad_a = [0] + zipf = np.random.zipf + desired = np.array([2, 2, 1]) + self.setSeed() + actual = zipf(a * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, zipf, bad_a * 3) -class TestThread(object): + def test_geometric(self): + p = [0.5] + bad_p_one = [-1] + bad_p_two = [1.5] + geom = np.random.geometric + desired = np.array([2, 2, 2]) + + self.setSeed() + actual = geom(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, geom, bad_p_one * 3) + assert_raises(ValueError, geom, bad_p_two * 3) + + def test_hypergeometric(self): + ngood = [1] + nbad = [2] + nsample = [2] + bad_ngood = [-1] + bad_nbad = [-2] + bad_nsample_one = [0] + bad_nsample_two = [4] + hypergeom = np.random.hypergeometric + desired = np.array([1, 1, 1]) + + self.setSeed() + actual = hypergeom(ngood * 3, nbad, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two) + + self.setSeed() + actual = hypergeom(ngood, nbad * 3, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two) + + self.setSeed() + actual = hypergeom(ngood, nbad, nsample * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + + def test_logseries(self): + p = [0.5] + bad_p_one = [2] + bad_p_two = [-1] + logseries = np.random.logseries + desired = np.array([1, 1, 1]) + + self.setSeed() + actual = logseries(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, logseries, bad_p_one * 3) + assert_raises(ValueError, logseries, bad_p_two * 3) + + +class TestThread(TestCase): # make sure each state produces the same sequence even in threads def setUp(self): self.seeds = range(4) @@ -813,10 +1368,10 @@ def check_function(self, function, sz): function(np.random.RandomState(s), o) # these platforms change x87 fpu precision mode in threads - if (np.intp().dtype.itemsize == 4 and sys.platform == "win32"): - np.testing.assert_array_almost_equal(out1, out2) + if np.intp().dtype.itemsize == 4 and sys.platform == "win32": + assert_array_almost_equal(out1, out2) else: - np.testing.assert_array_equal(out1, out2) + assert_array_equal(out1, out2) def test_normal(self): def gen_random(state, out): @@ -831,8 +1386,7 @@ def gen_random(state, out): def test_multinomial(self): def gen_random(state, out): out[...] = state.multinomial(10, [1/6.]*6, size=10000) - self.check_function(gen_random, sz=(10000,6)) - + self.check_function(gen_random, sz=(10000, 6)) if __name__ == "__main__": run_module_suite() From 5d1d6142ba2b6c9640c4a5b71a7ee7392d5ce176 Mon Sep 17 00:00:00 2001 From: Ka Wo Chen Date: Sat, 23 Jan 2016 08:40:35 -0500 Subject: [PATCH 432/496] MAINT: Removed conditionals that are always true in datetime_strings.c --- numpy/core/src/multiarray/datetime_strings.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/core/src/multiarray/datetime_strings.c b/numpy/core/src/multiarray/datetime_strings.c index 09ddc46d4120..4114acae24e0 100644 --- a/numpy/core/src/multiarray/datetime_strings.c +++ b/numpy/core/src/multiarray/datetime_strings.c @@ -484,7 +484,7 @@ parse_iso_8601_datetime(char *str, Py_ssize_t len, if (sublen >= 2 && isdigit(substr[0]) && isdigit(substr[1])) { out->hour = 10 * (substr[0] - '0') + (substr[1] - '0'); - if (out->hour < 0 || out->hour >= 24) { + if (out->hour >= 24) { PyErr_Format(PyExc_ValueError, "Hours out of range in datetime string \"%s\"", str); goto error; @@ -515,7 +515,7 @@ parse_iso_8601_datetime(char *str, Py_ssize_t len, if (sublen >= 2 && isdigit(substr[0]) && isdigit(substr[1])) { out->min = 10 * (substr[0] - '0') + (substr[1] - '0'); - if (out->hour < 0 || out->min >= 60) { + if (out->min >= 60) { PyErr_Format(PyExc_ValueError, "Minutes out of range in datetime string \"%s\"", str); goto error; @@ -546,7 +546,7 @@ parse_iso_8601_datetime(char *str, Py_ssize_t len, if (sublen >= 2 && isdigit(substr[0]) && isdigit(substr[1])) { out->sec = 10 * (substr[0] - '0') + (substr[1] - '0'); - if (out->sec < 0 || out->sec >= 60) { + if (out->sec >= 60) { PyErr_Format(PyExc_ValueError, "Seconds out of range in datetime string \"%s\"", str); goto error; From c0980ff9d32e690b13b8d3c6b0a797771ee33b57 Mon Sep 17 00:00:00 2001 From: gfyoung Date: Thu, 21 Jan 2016 14:38:34 +0000 Subject: [PATCH 433/496] DOC: Clarified output size for broadcastable mtrand.pyx functions Clarified the output size depending on whether scalar or non-scalar inputs are passed to functions in mtrand.pyx that can broadcast their arguments. --- numpy/random/mtrand/mtrand.pyx | 425 +++++++++++++++++++-------------- 1 file changed, 243 insertions(+), 182 deletions(-) diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index 07b7c622b790..b4335d72da8b 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -1460,21 +1460,22 @@ cdef class RandomState: Parameters ---------- - low : float, optional + low : float or array_like of floats, optional Lower boundary of the output interval. All values generated will be greater than or equal to low. The default value is 0. - high : float + high : float or array_like of floats Upper boundary of the output interval. All values generated will be less than high. The default value is 1.0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``low`` and ``high`` are both scalars. + Otherwise, ``np.broadcast(low, high).size`` samples are drawn. Returns ------- - out : ndarray - Drawn samples, with shape `size`. + out : ndarray or scalar + Drawn samples from the parameterized uniform distribution. See Also -------- @@ -1791,14 +1792,20 @@ cdef class RandomState: Parameters ---------- - loc : float + loc : float or array_like of floats Mean ("centre") of the distribution. - scale : float + scale : float or array_like of floats Standard deviation (spread or "width") of the distribution. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``loc`` and ``scale`` are both scalars. + Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn. + + Returns + ------- + out : ndarray or scalar + Drawn samples from the parameterized normal distribution. See Also -------- @@ -1898,20 +1905,20 @@ cdef class RandomState: Parameters ---------- - a : float + a : float or array_like of floats Alpha, non-negative. - b : float + b : float or array_like of floats Beta, non-negative. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``a`` and ``b`` are both scalars. + Otherwise, ``np.broadcast(a, b).size`` samples are drawn. Returns ------- - out : ndarray - Array of the given shape, containing values drawn from a - Beta distribution. + out : ndarray or scalar + Drawn samples from the parameterized beta distribution. """ cdef ndarray oa, ob @@ -1960,12 +1967,18 @@ cdef class RandomState: Parameters ---------- - scale : float + scale : float or array_like of floats The scale parameter, :math:`\\beta = 1/\\lambda`. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``scale`` is a scalar. Otherwise, + ``np.array(scale).size`` samples are drawn. + + Returns + ------- + out : ndarray or scalar + Drawn samples from the parameterized exponential distribution. References ---------- @@ -2038,17 +2051,18 @@ cdef class RandomState: Parameters ---------- - shape : float + shape : float or array_like of floats Parameter, should be > 0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``shape`` is a scalar. Otherwise, + ``np.array(shape).size`` samples are drawn. Returns ------- - samples : ndarray or scalar - The drawn samples. + out : ndarray or scalar + Drawn samples from the parameterized standard gamma distribution. See Also -------- @@ -2125,19 +2139,21 @@ cdef class RandomState: Parameters ---------- - shape : scalar > 0 - The shape of the gamma distribution. - scale : scalar > 0, optional - The scale of the gamma distribution. Default is equal to 1. + shape : float or array_like of floats + The shape of the gamma distribution. Should be greater than zero. + scale : float or array_like of floats, optional + The scale of the gamma distribution. Should be greater than zero. + Default is equal to 1. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``shape`` and ``scale`` are both scalars. + Otherwise, ``np.broadcast(shape, scale).size`` samples are drawn. Returns ------- - out : ndarray, float - Returns one sample unless `size` parameter is specified. + out : ndarray or scalar + Drawn samples from the parameterized gamma distribution. See Also -------- @@ -2225,19 +2241,20 @@ cdef class RandomState: Parameters ---------- - dfnum : float + dfnum : int or array_like of ints Degrees of freedom in numerator. Should be greater than zero. - dfden : float + dfden : int or array_like of ints Degrees of freedom in denominator. Should be greater than zero. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``dfnum`` and ``dfden`` are both scalars. + Otherwise, ``np.broadcast(dfnum, dfden).size`` samples are drawn. Returns ------- - samples : ndarray or scalar - Samples from the Fisher distribution. + out : ndarray or scalar + Drawn samples from the parameterized Fisher distribution. See Also -------- @@ -2326,21 +2343,23 @@ cdef class RandomState: Parameters ---------- - dfnum : int + dfnum : int or array_like of ints Parameter, should be > 1. - dfden : int + dfden : int or array_like of ints Parameter, should be > 1. - nonc : float + nonc : float or array_like of floats Parameter, should be >= 0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``dfnum``, ``dfden``, and ``nonc`` + are all scalars. Otherwise, ``np.broadcast(dfnum, dfden, nonc).size`` + samples are drawn. Returns ------- - samples : scalar or ndarray - Drawn samples. + out : ndarray or scalar + Drawn samples from the parameterized noncentral Fisher distribution. Notes ----- @@ -2422,18 +2441,18 @@ cdef class RandomState: Parameters ---------- - df : int + df : int or array_like of ints Number of degrees of freedom. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``df`` is a scalar. Otherwise, + ``np.array(df).size`` samples are drawn. Returns ------- - output : ndarray - Samples drawn from the distribution, packed in a `size`-shaped - array. + out : ndarray or scalar + Drawn samples from the parameterized chi-square distribution. Raises ------ @@ -2501,15 +2520,21 @@ cdef class RandomState: Parameters ---------- - df : int + df : int or array_like of ints Degrees of freedom, should be > 0 as of Numpy 1.10, should be > 1 for earlier versions. - nonc : float + nonc : float or array_like of floats Non-centrality, should be non-negative. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``df`` and ``nonc`` are both scalars. + Otherwise, ``np.broadcast(df, nonc).size`` samples are drawn. + + Returns + ------- + out : ndarray or scalar + Drawn samples from the parameterized noncentral chi-square distribution. Notes ----- @@ -2664,17 +2689,18 @@ cdef class RandomState: Parameters ---------- - df : int + df : int or array_like of ints Degrees of freedom, should be > 0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``df`` is a scalar. Otherwise, + ``np.array(df).size`` samples are drawn. Returns ------- - samples : ndarray or scalar - Drawn samples. + out : ndarray or scalar + Drawn samples from the parameterized standard Student's t distribution. Notes ----- @@ -2772,19 +2798,20 @@ cdef class RandomState: Parameters ---------- - mu : float + mu : float or array_like of floats Mode ("center") of the distribution. - kappa : float + kappa : float or array_like of floats Dispersion of the distribution, has to be >=0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``mu`` and ``kappa`` are both scalars. + Otherwise, ``np.broadcast(mu, kappa).size`` samples are drawn. Returns ------- - samples : scalar or ndarray - The returned samples, which are in the interval [-pi, pi]. + out : ndarray or scalar + Drawn samples from the parameterized von Mises distribution. See Also -------- @@ -2880,12 +2907,18 @@ cdef class RandomState: Parameters ---------- - shape : float, > 0. - Shape of the distribution. + a : float or array_like of floats + Shape of the distribution. Should be greater than zero. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``a`` is a scalar. Otherwise, + ``np.array(a).size`` samples are drawn. + + Returns + ------- + out : ndarray or scalar + Drawn samples from the parameterized Pareto distribution. See Also -------- @@ -2976,16 +3009,18 @@ cdef class RandomState: Parameters ---------- - a : float - Shape of the distribution. + a : float or array_like of floats + Shape of the distribution. Should be greater than zero. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``a`` is a scalar. Otherwise, + ``np.array(a).size`` samples are drawn. Returns ------- - samples : ndarray + out : ndarray or scalar + Drawn samples from the parameterized Weibull distribution. See Also -------- @@ -3078,17 +3113,18 @@ cdef class RandomState: Parameters ---------- - a : float - parameter, > 0 + a : float or array_like of floats + Parameter of the distribution. Should be greater than zero. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``a`` is a scalar. Otherwise, + ``np.array(a).size`` samples are drawn. Returns ------- - samples : ndarray or scalar - The returned samples lie in [0, 1]. + out : ndarray or scalar + Drawn samples from the parameterized power distribution. Raises ------ @@ -3192,18 +3228,20 @@ cdef class RandomState: Parameters ---------- - loc : float, optional - The position, :math:`\\mu`, of the distribution peak. - scale : float, optional - :math:`\\lambda`, the exponential decay. + loc : float or array_like of floats, optional + The position, :math:`\\mu`, of the distribution peak. Default is 0. + scale : float or array_like of floats, optional + :math:`\\lambda`, the exponential decay. Default is 1. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``loc`` and ``scale`` are both scalars. + Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn. Returns ------- - samples : ndarray or float + out : ndarray or scalar + Drawn samples from the parameterized Laplace distribution. Notes ----- @@ -3286,18 +3324,20 @@ cdef class RandomState: Parameters ---------- - loc : float - The location of the mode of the distribution. - scale : float - The scale parameter of the distribution. + loc : float or array_like of floats, optional + The location of the mode of the distribution. Default is 0. + scale : float or array_like of floats, optional + The scale parameter of the distribution. Default is 1. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``loc`` and ``scale`` are both scalars. + Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn. Returns ------- - samples : ndarray or scalar + out : ndarray or scalar + Drawn samples from the parameterized Gumbel distribution. See Also -------- @@ -3414,20 +3454,21 @@ cdef class RandomState: Parameters ---------- - loc : float - - scale : float > 0. - + loc : float or array_like of floats, optional + Parameter of the distribution. Default is 0. + scale : float or array_like of floats, optional + Parameter of the distribution. Should be greater than zero. + Default is 1. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``loc`` and ``scale`` are both scalars. + Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn. Returns ------- - samples : ndarray or scalar - Samples from logistic distribution, shaped according to - `size`. Otherwise, a single value is returned. + out : ndarray or scalar + Drawn samples from the parameterized logistic distribution. See Also -------- @@ -3508,20 +3549,21 @@ cdef class RandomState: Parameters ---------- - mean : float - Mean value of the underlying normal distribution - sigma : float, > 0. - Standard deviation of the underlying normal distribution + mean : float or array_like of floats, optional + Mean value of the underlying normal distribution. Default is 0. + sigma : float or array_like of floats, optional + Standard deviation of the underlying normal distribution. Should + be greater than zero. Default is 1. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``mean`` and ``sigma`` are both scalars. + Otherwise, ``np.broadcast(mean, sigma).size`` samples are drawn. Returns ------- - samples : ndarray or float - The desired samples. An array of the same shape as `size` if given, - if `size` is None a float is returned. + out : ndarray or scalar + Drawn samples from the parameterized log-normal distribution. See Also -------- @@ -3631,12 +3673,18 @@ cdef class RandomState: Parameters ---------- - scale : scalar - Scale, also equals the mode. Should be >= 0. + scale : float or array_like of floats, optional + Scale, also equals the mode. Should be >= 0. Default is 1. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``scale`` is a scalar. Otherwise, + ``np.array(scale).size`` samples are drawn. + + Returns + ------- + out : ndarray or scalar + Drawn samples from the parameterized Rayleigh distribution. Notes ----- @@ -3712,19 +3760,20 @@ cdef class RandomState: Parameters ---------- - mean : scalar + mean : float or array_like of floats Distribution mean, should be > 0. - scale : scalar + scale : float or array_like of floats Scale parameter, should be >= 0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``mean`` and ``scale`` are both scalars. + Otherwise, ``np.broadcast(mean, scale).size`` samples are drawn. Returns ------- - samples : ndarray or scalar - Drawn sample, all greater than zero. + out : ndarray or scalar + Drawn samples from the parameterized Wald distribution. Notes ----- @@ -3784,7 +3833,8 @@ cdef class RandomState: """ triangular(left, mode, right, size=None) - Draw samples from the triangular distribution. + Draw samples from the triangular distribution over the + interval ``[left, right]``. The triangular distribution is a continuous probability distribution with lower limit left, peak at mode, and upper @@ -3793,22 +3843,24 @@ cdef class RandomState: Parameters ---------- - left : scalar + left : float or array_like of floats Lower limit. - mode : scalar + mode : float or array_like of floats The value where the peak of the distribution occurs. The value should fulfill the condition ``left <= mode <= right``. - right : scalar + right : float or array_like of floats Upper limit, should be larger than `left`. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``left``, ``mode``, and ``right`` + are all scalars. Otherwise, ``np.broadcast(left, mode, right).size`` + samples are drawn. Returns ------- - samples : ndarray or scalar - The returned samples all lie in the interval [left, right]. + out : ndarray or scalar + Drawn samples from the parameterized triangular distribution. Notes ----- @@ -3884,19 +3936,22 @@ cdef class RandomState: Parameters ---------- - n : float (but truncated to an integer) - parameter, >= 0. - p : float - parameter, >= 0 and <=1. + n : int or array_like of ints + Parameter of the distribution, >= 0. Floats are also accepted, + but they will be truncated to integers. + p : float or array_like of floats + Parameter of the distribution, >= 0 and <=1. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``n`` and ``p`` are both scalars. + Otherwise, ``np.broadcast(n, p).size`` samples are drawn. Returns ------- - samples : ndarray or scalar - where the values are all integers in [0, n]. + out : ndarray or scalar + Drawn samples from the parameterized binomial distribution, where + each sample is equal to the number of successes over the n trials. See Also -------- @@ -3996,19 +4051,24 @@ cdef class RandomState: Parameters ---------- - n : int - Parameter, > 0. - p : float - Parameter, >= 0 and <=1. + n : int or array_like of ints + Parameter of the distribution, > 0. Floats are also accepted, + but they will be truncated to integers. + p : float or array_like of floats + Parameter of the distribution, >= 0 and <=1. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``n`` and ``p`` are both scalars. + Otherwise, ``np.broadcast(n, p).size`` samples are drawn. Returns ------- - samples : int or ndarray of ints - Drawn samples. + out : ndarray or scalar + Drawn samples from the parameterized negative binomial distribution, + where each sample is equal to N, the number of trials it took to + achieve n - 1 successes, N - (n - 1) failures, and a success on the, + (N + n)th trial. Notes ----- @@ -4091,18 +4151,19 @@ cdef class RandomState: Parameters ---------- - lam : float or sequence of float + lam : float or array_like of floats Expectation of interval, should be >= 0. A sequence of expectation intervals must be broadcastable over the requested size. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``lam`` is a scalar. Otherwise, + ``np.array(lam).size`` samples are drawn. Returns ------- - samples : ndarray or scalar - The drawn samples, of shape *size*, if it was provided. + out : ndarray or scalar + Drawn samples from the parameterized Poisson distribution. Notes ----- @@ -4182,17 +4243,18 @@ cdef class RandomState: Parameters ---------- - a : float > 1 - Distribution parameter. + a : float or array_like of floats + Distribution parameter. Should be greater than 1. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``a`` is a scalar. Otherwise, + ``np.array(a).size`` samples are drawn. Returns ------- - samples : scalar or ndarray - The returned samples are greater than or equal to one. + out : ndarray or scalar + Drawn samples from the parameterized Zipf distribution. See Also -------- @@ -4274,18 +4336,18 @@ cdef class RandomState: Parameters ---------- - p : float + p : float or array_like of floats The probability of success of an individual trial. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``p`` is a scalar. Otherwise, + ``np.array(p).size`` samples are drawn. Returns ------- - out : ndarray - Samples from the geometric distribution, shaped according to - `size`. Otherwise, a single value is returned. + out : ndarray or scalar + Drawn samples from the parameterized geometric distribution. Examples -------- @@ -4336,23 +4398,24 @@ cdef class RandomState: Parameters ---------- - ngood : int or array_like + ngood : int or array_like of ints Number of ways to make a good selection. Must be nonnegative. - nbad : int or array_like + nbad : int or array_like of ints Number of ways to make a bad selection. Must be nonnegative. - nsample : int or array_like + nsample : int or array_like of ints Number of items sampled. Must be at least 1 and at most ``ngood + nbad``. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``ngood``, ``nbad``, and ``nsample`` + are all scalars. Otherwise, ``np.broadcast(ngood, nbad, nsample).size`` + samples are drawn. Returns ------- - samples : ndarray or scalar - Samples from the hypergeometric distribution, shaped - according to `size`. Otherwise, a single value is returned. + out : ndarray or scalar + Drawn samples from the parameterized hypergeometric distribution. See Also -------- @@ -4456,20 +4519,18 @@ cdef class RandomState: Parameters ---------- - loc : float - - scale : float > 0. - + p : float or array_like of floats + Shape parameter for the distribution. Must be in the range (0, 1). size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then - ``m * n * k`` samples are drawn. Default is None, in which case a - single value is returned. + ``m * n * k`` samples are drawn. If size is ``None`` (default), + a single value is returned if ``p`` is a scalar. Otherwise, + ``np.array(p).size`` samples are drawn. Returns ------- - samples : ndarray or scalar - Samples from the logseries distribution, shaped according to - `size`. Otherwise, a single value is returned. + out : ndarray or scalar + Drawn samples from the parameterized logarithmic series distribution. See Also -------- From b8d61d4348b6907fed545d020c9f5631dfafc6e0 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 20 Jan 2016 11:27:20 -0700 Subject: [PATCH 434/496] DOC: Update the 1.11.0 release notes. [ci skip] --- doc/release/1.11.0-notes.rst | 226 +++++++++++++++++++++-------------- 1 file changed, 136 insertions(+), 90 deletions(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index 3ce63b116278..c9287ed3f18b 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -1,14 +1,27 @@ NumPy 1.11.0 Release Notes ************************** -This release supports Python 2.6 - 2.7 and 3.2 - 3.5. +This release supports Python 2.6 - 2.7 and 3.2 - 3.5 and contains a number +of enhancements and improvements. Note also the build system changes listed +below as they may have subtle effects. + +No Windows (TM) binaries are provided for this release due to a broken +toolchain. One of the providers of Python packages for Windows (TM) is your +best bet. Highlights ========== -* The datetime64 type is now timezone naive. See "datetime64 changes" below - for more details. +Details of these improvements can be found below. + +* The datetime64 type is now timezone naive. +* A dtype parameter has been added to ``randint``. +* Improved detection of two arrays possibly sharing memory. +* Automatic bin size estimation for ``np.histogram``. +* Speed optimization of A @ A.T and dot(A, A.T). +* New function ``np.moveaxis`` for reordering array axes. + Build System Changes ==================== @@ -25,9 +38,29 @@ Build System Changes Future Changes ============== -* Relaxed stride checking will become the default in 1.12.0. -* Support for Python 2.6, 3.2, and 3.3 will be dropped in 1.12.0. -* ``MaskedArray`` takes view of data **and** mask when slicing in 1.12.0. +The following changes are scheduled for Numpy 1.12.0. + +* Support for Python 2.6, 3.2, and 3.3 will be dropped. +* Slicing a ``MaskedArray`` will return views of both data **and** mask. + Currently the mask is returned as a copy. +* Relaxed stride checking will become the default. See the 1.8.0 release + notes for a more extended discussion of what this change implies. +* The behavior of the datetime64 "not a time" (NaT) value will be changed + to match that of floating point "not a number" (NaN) values: all + comparisons involving NaT will return False, except for NaT != NaT which + will return True. + +In a future release the following changes will be made. + +* The ``rand`` function exposed in ``numpy.testing`` will be removed. That + function is left over from early Numpy and was implemented using the + Python random module. The random number generators from ``numpy.random`` + should be used instead. +* The ``ndarray.view`` method will only allow c_contiguous arrays to be + viewed using a dtype of different size causing the last dimension to + change. That differs from the current behavior where arrays that are + f_contiguous but not c_contiguous can be viewed as a dtype type of + different size causing the first dimension to change. Compatibility notes @@ -35,7 +68,6 @@ Compatibility notes datetime64 changes ~~~~~~~~~~~~~~~~~~ - In prior versions of NumPy the experimental datetime64 type always stored times in UTC. By default, creating a datetime64 object from a string or printing it would convert from or to local time:: @@ -44,7 +76,7 @@ printing it would convert from or to local time:: >>>> np.datetime64('2000-01-01T00:00:00') numpy.datetime64('2000-01-01T00:00:00-0800') # note the timezone offset -08:00 -A concensus of datetime64 users agreed that this behavior is undesirable +A consensus of datetime64 users agreed that this behavior is undesirable and at odds with how datetime64 is usually used (e.g., by pandas_). For most use cases, a timezone naive datetime type is preferred, similar to the ``datetime.datetime`` type in the Python standard library. Accordingly, @@ -64,29 +96,41 @@ naive:: numpy.datetime64('2000-01-01T08:00:00') As a corollary to this change, we no longer prohibit casting between datetimes -with date units and datetimes with timeunits. With timezone naive datetimes, +with date units and datetimes with time units. With timezone naive datetimes, the rule for casting from dates to times is no longer ambiguous. pandas_: http://pandas.pydata.org -polynomial fit changes +``linalg.norm`` return type changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The return type of the ``linalg.norm`` function is now floating point without +exception. Some of the norm types previously returned integers. + +and returns floating results.polynomial fit changes ~~~~~~~~~~~~~~~~~~~~~~ The various fit functions in the numpy polynomial package no longer accept non-integers for degree specification. +*np.dot* now raises ``TypeError`` instead of ``ValueError`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This behaviour mimics that of other functions such as ``np.inner``. If the two +arguments cannot be cast to a common type, it could have raised a ``TypeError`` +or ``ValueError`` depending on their order. Now, ``np.dot`` will now always +raise a ``TypeError``. + DeprecationWarning to error ~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Indexing with floats raises IndexError, e.g., a[0, 0.0]. -* Indexing with non-integer array_like raises IndexError, - e.g., a['1', '2'] -* Indexing with multiple ellipsis raises IndexError, - e.g., a[..., ...]. -* Indexing with boolean where integer expected raises IndexError, - e.g., a[False:True:True]. -* Non-integers used as index values raise TypeError, - e.g., in reshape, take, and specifying reduce axis. +* Indexing with non-integer array_like raises ``IndexError``, + e.g., ``a['1', '2']`` +* Indexing with multiple ellipsis raises ``IndexError``, + e.g., ``a[..., ...]``. +* Indexing with boolean where integer expected raises ``IndexError``, + e.g., ``a[False:True:True]``. +* Non-integers used as index values raise ``TypeError``, + e.g., in ``reshape``, ``take``, and specifying reduce axis. FutureWarning to changed behavior @@ -108,13 +152,6 @@ to preserve struct layout). These were never used for anything, so it's unlikely that any third-party code is using them either, but we mention it here for completeness. -*np.dot* now raises ``TypeError`` instead of ``ValueError`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -This behaviour mimics that of other functions such as ``np.inner``. If the two -arguments cannot be cast to a common type, it could have raised a ``TypeError`` -or ``ValueError`` depending on their order. Now, ``np.dot`` will now always -raise a ``TypeError``. - New Features ============ @@ -145,65 +182,64 @@ New Features * A ``dtype`` parameter has been added to ``np.random.randint`` Random ndarrays of the following types can now be generated: - - np.bool, - - np.int8, np.uint8, - - np.int16, np.uint16, - - np.int32, np.uint32, - - np.int64, np.uint64, - - np.int_ (long), np.intp + - ``np.bool``, + - ``np.int8``, ``np.uint8``, + - ``np.int16``, ``np.uint16``, + - ``np.int32``, ``np.uint32``, + - ``np.int64``, ``np.uint64``, + - ``np.int_ ``, ``np.intp`` The specification is by precision rather than by C type. Hence, on some - platforms np.int64 may be a ``long`` instead of ``long long`` even if the - specified dtype is ``long long`` because the two may have the same + platforms ``np.int64`` may be a ``long`` instead of ``long long`` even if + the specified dtype is ``long long`` because the two may have the same precision. The resulting type depends on which C type numpy uses for the given precision. The byteorder specification is also ignored, the generated arrays are always in native byte order. -* ``np.moveaxis`` allows for moving one or more array axes to a new position - by explicitly providing source and destination axes. +* A new ``np.moveaxis`` function allows for moving one or more array axes + to a new position by explicitly providing source and destination axes. + This function should be easier to use than the current ``rollaxis`` + function as well as providing more functionality. -* numpy.polynomial fits now support degree selection. The ``deg`` - parameter was extended to allow fitting using only specified terms in the - polynomial expansion for all polynomial types. The change is backward - compatible and it is still possible to specify ``deg`` as before, but it - is now possible pass ``deg`` as a list specifying which terms in the - series to use in the fit. +* The ``deg`` parameter of the various ``numpy.polynomial`` fits has been + extended to accept a list of the degrees of the terms to be included in + the fit, the coefficients of all other terms being constrained to zero. + The change is backward compatible, passing a scalar ``deg`` will behave + as before. Improvements ============ -*np.gradient* now supports an ``axis`` argument -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The ``axis`` parameter was added to *np.gradient* for consistency. -It allows to specify over which axes the gradient is calculated. - -*np.lexsort* now supports arrays with object data-type -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The function now internally calls the generic ``npy_amergesort`` -when the type does not implement a merge-sort kind of ``argsort`` -method. - -*np.ma.core.MaskedArray* now supports an ``order`` argument -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -When constructing a new ``MaskedArray`` instance, it can be -configured with an ``order`` argument analogous to the one -when calling ``np.ndarray``. The addition of this argument -allows for the proper processing of an ``order`` argument -in several MaskedArray-related utility functions such as +``np.gradient`` now supports an ``axis`` argument +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The ``axis`` parameter was added to ``np.gradient`` for consistency. It +allows to specify over which axes the gradient is calculated. + +``np.lexsort`` now supports arrays with object data-type +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The function now internally calls the generic ``npy_amergesort`` when the +type does not implement a merge-sort kind of ``argsort`` method. + +``np.ma.core.MaskedArray`` now supports an ``order`` argument +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +When constructing a new ``MaskedArray`` instance, it can be configured with +an ``order`` argument analogous to the one when calling ``np.ndarray``. The +addition of this argument allows for the proper processing of an ``order`` +argument in several MaskedArray-related utility functions such as ``np.ma.core.array`` and ``np.ma.core.asarray``. Memory and speed improvements for masked arrays ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Creating a masked array with ``mask=True`` (resp. ``mask=False``) now uses -``np.ones`` (resp. ``np.zeros``) to create the mask, which is faster and avoid -a big memory peak. Another optimization was done to avoid a memory peak and -useless computations when printing a masked array. +``np.ones`` (resp. ``np.zeros``) to create the mask, which is faster and +avoid a big memory peak. Another optimization was done to avoid a memory +peak and useless computations when printing a masked array. -*ndarray.tofile* now uses fallocate on linux -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``ndarray.tofile`` now uses fallocate on linux +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The function now uses the fallocate system call to reserve sufficient -diskspace on filesystems that support it. +disk space on file systems that support it. Optimizations for operations of the form ``A.T @ A`` and ``A @ A.T`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -214,8 +250,8 @@ extended to ``@``, ``numpy.dot``, ``numpy.inner``, and ``numpy.matmul``. **Note:** Requires the transposed and non-transposed matrices to share data. -*np.testing.assert_warns* can now be used as a context manager -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``np.testing.assert_warns`` can now be used as a context manager +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This matches the behavior of ``assert_raises``. Speed improvement for np.random.shuffle @@ -225,51 +261,61 @@ Speed improvement for np.random.shuffle Changes ======= -Pyrex support was removed from ``numpy.distutils``. The method -``build_src.generate_a_pyrex_source`` will remain available; it has been -monkeypatched by users to support Cython instead of Pyrex. It's recommended to -switch to a better supported method of build Cython extensions though. -*np.broadcast* can now be called with a single argument -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Pyrex support was removed from ``numpy.distutils`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The method ``build_src.generate_a_pyrex_source`` will remain available; it +has been monkeypatched by users to support Cython instead of Pyrex. It's +recommended to switch to a better supported method of build Cython +extensions though. + +``np.broadcast`` can now be called with a single argument +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The resulting object in that case will simply mimic iteration over a single array. This change obsoletes distinctions like if len(x) == 1: shape = x[0].shape else: - shape = np.broadcast(*x).shape + shape = np.broadcast(\*x).shape Instead, ``np.broadcast`` can be used in all cases. -*np.trace* now respects array subclasses -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``np.trace`` now respects array subclasses +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This behaviour mimics that of other functions such as ``np.diagonal`` and ensures, e.g., that for masked arrays ``np.trace(ma)`` and ``ma.trace()`` give the same result. -*np.dot* now raises ``TypeError`` instead of ``ValueError`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``np.dot`` now raises ``TypeError`` instead of ``ValueError`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This behaviour mimics that of other functions such as ``np.inner``. If the two arguments cannot be cast to a common type, it could have raised a ``TypeError`` or ``ValueError`` depending on their order. Now, ``np.dot`` will now always raise a ``TypeError``. +``linalg.norm`` return type changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The ``linalg.norm`` function now does all its computations in floating point +and returns floating results. This change fixes bugs due to integer overflow +and the failure of abs with signed integers of minimum value, e.g., int8(-128). +For consistancy, floats are used even where an integer might work. + Deprecations ============ Views of arrays in Fortran order ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The f_contiguous flag was used to signal that views as a dtypes that -changed the element size would change the first index. This was always a -bit problematical for arrays that were both f_contiguous and c_contiguous -because c_contiguous took precedence. Relaxed stride checking results in +The F_CONTIGUOUS flag was used to signal that views using a dtype that +changed the element size would change the first index. This was always +problematical for arrays that were both F_CONTIGUOUS and C_CONTIGUOUS +because C_CONTIGUOUS took precedence. Relaxed stride checking results in more such dual contiguous arrays and breaks some existing code as a result. Note that this also affects changing the dtype by assigning to the dtype attribute of an array. The aim of this deprecation is to restrict views to -c_contiguous arrays at some future time. A work around that is backward -compatible is to use ``a.T.view(...).T`` instead. A parameter will also be +C_CONTIGUOUS arrays at some future time. A work around that is backward +compatible is to use ``a.T.view(...).T`` instead. A parameter may also be added to the view method to explicitly ask for Fortran order views, but that will not be backward compatible. @@ -278,15 +324,15 @@ Invalid arguments for array ordering It is currently possible to pass in arguments for the ``order`` parameter in methods like ``array.flatten`` or ``array.ravel`` that were not one of the following: 'C', 'F', 'A', 'K' (note that -all of these possible values are unicode- and case-insensitive). -Such behaviour will not be allowed in future releases. +all of these possible values are both unicode and case insensitive). +Such behavior will not be allowed in future releases. Random number generator in the ``testing`` namespace ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Python standard library random number generator was previously exposed in the -``testing`` namespace as ``testing.rand``. Using this generator is not -recommended and it will be removed in a future release. Use generators from -``numpy.random`` namespace instead. +The Python standard library random number generator was previously exposed +in the ``testing`` namespace as ``testing.rand``. Using this generator is +not recommended and it will be removed in a future release. Use generators +from ``numpy.random`` namespace instead. Random integer generation on a closed interval ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ From 61f872265b67b313058a07533eaed88f4170ff2c Mon Sep 17 00:00:00 2001 From: gfyoung Date: Mon, 18 Jan 2016 20:29:34 +0000 Subject: [PATCH 435/496] BUG: One element array inputs get one element arrays returned in np.random Fixes bug in np.random methods that would return scalars when passed one-element array inputs. This is because one-element ndarrays can be cast to integers / floats, which is what functions like PyFloat_AsDouble do before converting to the intended data type. This commit changes the check used to determine whether the inputs are purely scalar by converting all inputs to arrays and checking if the resulting shape is an empty tuple (scalar) or not (array). Closes gh-4263. --- numpy/random/mtrand/mtrand.pyx | 375 +++++++++++++++--------------- numpy/random/tests/test_random.py | 87 ++++++- 2 files changed, 274 insertions(+), 188 deletions(-) diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index b4335d72da8b..cf8d28cb0dd7 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -1522,20 +1522,20 @@ cdef class RandomState: cdef double flow, fhigh, fscale cdef object temp - flow = PyFloat_AsDouble(low) - fhigh = PyFloat_AsDouble(high) + olow = PyArray_FROM_OTF(low, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + ohigh = PyArray_FROM_OTF(high, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if olow.shape == ohigh.shape == (): + flow = PyFloat_AsDouble(low) + fhigh = PyFloat_AsDouble(high) + fscale = fhigh - flow - fscale = fhigh - flow - if not npy_isfinite(fscale): - raise OverflowError('Range exceeds valid bounds') + if not npy_isfinite(fscale): + raise OverflowError('Range exceeds valid bounds') - if not PyErr_Occurred(): return cont2_array_sc(self.internal_state, rk_uniform, size, flow, fscale, self.lock) - PyErr_Clear() - olow = PyArray_FROM_OTF(low, NPY_DOUBLE, NPY_ARRAY_ALIGNED) - ohigh = PyArray_FROM_OTF(high, NPY_DOUBLE, NPY_ARRAY_ALIGNED) temp = np.subtract(ohigh, olow) Py_INCREF(temp) # needed to get around Pyrex's automatic reference-counting # rules because EnsureArray steals a reference @@ -1866,18 +1866,19 @@ cdef class RandomState: cdef ndarray oloc, oscale cdef double floc, fscale - floc = PyFloat_AsDouble(loc) - fscale = PyFloat_AsDouble(scale) - if not PyErr_Occurred(): + oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if oloc.shape == oscale.shape == (): + floc = PyFloat_AsDouble(loc) + fscale = PyFloat_AsDouble(scale) + if fscale <= 0: raise ValueError("scale <= 0") + return cont2_array_sc(self.internal_state, rk_normal, size, floc, fscale, self.lock) - PyErr_Clear() - - oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) - oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) if np.any(np.less_equal(oscale, 0)): raise ValueError("scale <= 0") return cont2_array(self.internal_state, rk_normal, size, oloc, oscale, @@ -1924,9 +1925,13 @@ cdef class RandomState: cdef ndarray oa, ob cdef double fa, fb - fa = PyFloat_AsDouble(a) - fb = PyFloat_AsDouble(b) - if not PyErr_Occurred(): + oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + ob = PyArray_FROM_OTF(b, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if oa.shape == ob.shape == (): + fa = PyFloat_AsDouble(a) + fb = PyFloat_AsDouble(b) + if fa <= 0: raise ValueError("a <= 0") if fb <= 0: @@ -1934,10 +1939,6 @@ cdef class RandomState: return cont2_array_sc(self.internal_state, rk_beta, size, fa, fb, self.lock) - PyErr_Clear() - - oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) - ob = PyArray_FROM_OTF(b, NPY_DOUBLE, NPY_ARRAY_ALIGNED) if np.any(np.less_equal(oa, 0)): raise ValueError("a <= 0") if np.any(np.less_equal(ob, 0)): @@ -1993,17 +1994,16 @@ cdef class RandomState: cdef ndarray oscale cdef double fscale - fscale = PyFloat_AsDouble(scale) - if not PyErr_Occurred(): + oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if oscale.shape == (): + fscale = PyFloat_AsDouble(scale) + if fscale <= 0: raise ValueError("scale <= 0") return cont1_array_sc(self.internal_state, rk_exponential, size, fscale, self.lock) - PyErr_Clear() - - oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, - NPY_ARRAY_ALIGNED) if np.any(np.less_equal(oscale, 0.0)): raise ValueError("scale <= 0") return cont1_array(self.internal_state, rk_exponential, size, oscale, @@ -2112,16 +2112,16 @@ cdef class RandomState: cdef ndarray oshape cdef double fshape - fshape = PyFloat_AsDouble(shape) - if not PyErr_Occurred(): + oshape = PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if oshape.shape == (): + fshape = PyFloat_AsDouble(shape) + if fshape <= 0: raise ValueError("shape <= 0") return cont1_array_sc(self.internal_state, rk_standard_gamma, size, fshape, self.lock) - PyErr_Clear() - oshape = PyArray_FROM_OTF(shape, NPY_DOUBLE, - NPY_ARRAY_ALIGNED) if np.any(np.less_equal(oshape, 0.0)): raise ValueError("shape <= 0") return cont1_array(self.internal_state, rk_standard_gamma, size, @@ -2203,9 +2203,13 @@ cdef class RandomState: cdef ndarray oshape, oscale cdef double fshape, fscale - fshape = PyFloat_AsDouble(shape) - fscale = PyFloat_AsDouble(scale) - if not PyErr_Occurred(): + oshape = PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if oshape.shape == oscale.shape == (): + fshape = PyFloat_AsDouble(shape) + fscale = PyFloat_AsDouble(scale) + if fshape <= 0: raise ValueError("shape <= 0") if fscale <= 0: @@ -2213,9 +2217,6 @@ cdef class RandomState: return cont2_array_sc(self.internal_state, rk_gamma, size, fshape, fscale, self.lock) - PyErr_Clear() - oshape = PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED) - oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) if np.any(np.less_equal(oshape, 0.0)): raise ValueError("shape <= 0") if np.any(np.less_equal(oscale, 0.0)): @@ -2309,9 +2310,13 @@ cdef class RandomState: cdef ndarray odfnum, odfden cdef double fdfnum, fdfden - fdfnum = PyFloat_AsDouble(dfnum) - fdfden = PyFloat_AsDouble(dfden) - if not PyErr_Occurred(): + odfnum = PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + odfden = PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if odfnum.shape == odfden.shape == (): + fdfnum = PyFloat_AsDouble(dfnum) + fdfden = PyFloat_AsDouble(dfden) + if fdfnum <= 0: raise ValueError("dfnum <= 0") if fdfden <= 0: @@ -2319,10 +2324,6 @@ cdef class RandomState: return cont2_array_sc(self.internal_state, rk_f, size, fdfnum, fdfden, self.lock) - PyErr_Clear() - - odfnum = PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED) - odfden = PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED) if np.any(np.less_equal(odfnum, 0.0)): raise ValueError("dfnum <= 0") if np.any(np.less_equal(odfden, 0.0)): @@ -2400,10 +2401,15 @@ cdef class RandomState: cdef ndarray odfnum, odfden, ononc cdef double fdfnum, fdfden, fnonc - fdfnum = PyFloat_AsDouble(dfnum) - fdfden = PyFloat_AsDouble(dfden) - fnonc = PyFloat_AsDouble(nonc) - if not PyErr_Occurred(): + odfnum = PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + odfden = PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + ononc = PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if odfnum.shape == odfden.shape == ononc.shape == (): + fdfnum = PyFloat_AsDouble(dfnum) + fdfden = PyFloat_AsDouble(dfden) + fnonc = PyFloat_AsDouble(nonc) + if fdfnum <= 1: raise ValueError("dfnum <= 1") if fdfden <= 0: @@ -2413,12 +2419,6 @@ cdef class RandomState: return cont3_array_sc(self.internal_state, rk_noncentral_f, size, fdfnum, fdfden, fnonc, self.lock) - PyErr_Clear() - - odfnum = PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED) - odfden = PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED) - ononc = PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) - if np.any(np.less_equal(odfnum, 1.0)): raise ValueError("dfnum <= 1") if np.any(np.less_equal(odfden, 0.0)): @@ -2494,16 +2494,16 @@ cdef class RandomState: cdef ndarray odf cdef double fdf - fdf = PyFloat_AsDouble(df) - if not PyErr_Occurred(): + odf = PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if odf.shape == (): + fdf = PyFloat_AsDouble(df) + if fdf <= 0: raise ValueError("df <= 0") return cont1_array_sc(self.internal_state, rk_chisquare, size, fdf, self.lock) - PyErr_Clear() - - odf = PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) if np.any(np.less_equal(odf, 0.0)): raise ValueError("df <= 0") return cont1_array(self.internal_state, rk_chisquare, size, odf, @@ -2591,9 +2591,14 @@ cdef class RandomState: """ cdef ndarray odf, ononc cdef double fdf, fnonc - fdf = PyFloat_AsDouble(df) - fnonc = PyFloat_AsDouble(nonc) - if not PyErr_Occurred(): + + odf = PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + ononc = PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if odf.shape == ononc.shape == (): + fdf = PyFloat_AsDouble(df) + fnonc = PyFloat_AsDouble(nonc) + if fdf <= 0: raise ValueError("df <= 0") if fnonc < 0: @@ -2601,10 +2606,6 @@ cdef class RandomState: return cont2_array_sc(self.internal_state, rk_noncentral_chisquare, size, fdf, fnonc, self.lock) - PyErr_Clear() - - odf = PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) - ononc = PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) if np.any(np.less_equal(odf, 0.0)): raise ValueError("df <= 0") if np.any(np.less(ononc, 0.0)): @@ -2767,16 +2768,16 @@ cdef class RandomState: cdef ndarray odf cdef double fdf - fdf = PyFloat_AsDouble(df) - if not PyErr_Occurred(): + odf = PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if odf.shape == (): + fdf = PyFloat_AsDouble(df) + if fdf <= 0: raise ValueError("df <= 0") return cont1_array_sc(self.internal_state, rk_standard_t, size, fdf, self.lock) - PyErr_Clear() - - odf = PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) if np.any(np.less_equal(odf, 0.0)): raise ValueError("df <= 0") return cont1_array(self.internal_state, rk_standard_t, size, odf, @@ -2863,19 +2864,18 @@ cdef class RandomState: cdef ndarray omu, okappa cdef double fmu, fkappa - fmu = PyFloat_AsDouble(mu) - fkappa = PyFloat_AsDouble(kappa) - if not PyErr_Occurred(): + omu = PyArray_FROM_OTF(mu, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + okappa = PyArray_FROM_OTF(kappa, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if omu.shape == okappa.shape == (): + fmu = PyFloat_AsDouble(mu) + fkappa = PyFloat_AsDouble(kappa) + if fkappa < 0: raise ValueError("kappa < 0") return cont2_array_sc(self.internal_state, rk_vonmises, size, fmu, fkappa, self.lock) - PyErr_Clear() - - omu = PyArray_FROM_OTF(mu, NPY_DOUBLE, NPY_ARRAY_ALIGNED) - okappa = PyArray_FROM_OTF(kappa, NPY_DOUBLE, - NPY_ARRAY_ALIGNED) if np.any(np.less(okappa, 0.0)): raise ValueError("kappa < 0") return cont2_array(self.internal_state, rk_vonmises, size, omu, okappa, @@ -2977,16 +2977,16 @@ cdef class RandomState: cdef ndarray oa cdef double fa - fa = PyFloat_AsDouble(a) - if not PyErr_Occurred(): + oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if oa.shape == (): + fa = PyFloat_AsDouble(a) + if fa <= 0: raise ValueError("a <= 0") return cont1_array_sc(self.internal_state, rk_pareto, size, fa, self.lock) - PyErr_Clear() - - oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) if np.any(np.less_equal(oa, 0.0)): raise ValueError("a <= 0") return cont1_array(self.internal_state, rk_pareto, size, oa, self.lock) @@ -3087,16 +3087,16 @@ cdef class RandomState: cdef ndarray oa cdef double fa - fa = PyFloat_AsDouble(a) - if not PyErr_Occurred(): + oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if oa.shape == (): + fa = PyFloat_AsDouble(a) + if fa <= 0: raise ValueError("a <= 0") return cont1_array_sc(self.internal_state, rk_weibull, size, fa, self.lock) - PyErr_Clear() - - oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) if np.any(np.less_equal(oa, 0.0)): raise ValueError("a <= 0") return cont1_array(self.internal_state, rk_weibull, size, oa, @@ -3200,16 +3200,16 @@ cdef class RandomState: cdef ndarray oa cdef double fa - fa = PyFloat_AsDouble(a) - if not PyErr_Occurred(): + oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if oa.shape == (): + fa = PyFloat_AsDouble(a) + if fa <= 0: raise ValueError("a <= 0") return cont1_array_sc(self.internal_state, rk_power, size, fa, self.lock) - PyErr_Clear() - - oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) if np.any(np.less_equal(oa, 0.0)): raise ValueError("a <= 0") return cont1_array(self.internal_state, rk_power, size, oa, self.lock) @@ -3296,17 +3296,18 @@ cdef class RandomState: cdef ndarray oloc, oscale cdef double floc, fscale - floc = PyFloat_AsDouble(loc) - fscale = PyFloat_AsDouble(scale) - if not PyErr_Occurred(): + oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if oloc.shape == oscale.shape == (): + floc = PyFloat_AsDouble(loc) + fscale = PyFloat_AsDouble(scale) + if fscale <= 0: raise ValueError("scale <= 0") return cont2_array_sc(self.internal_state, rk_laplace, size, floc, fscale, self.lock) - PyErr_Clear() - oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) - oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) if np.any(np.less_equal(oscale, 0.0)): raise ValueError("scale <= 0") return cont2_array(self.internal_state, rk_laplace, size, oloc, oscale, @@ -3427,17 +3428,18 @@ cdef class RandomState: cdef ndarray oloc, oscale cdef double floc, fscale - floc = PyFloat_AsDouble(loc) - fscale = PyFloat_AsDouble(scale) - if not PyErr_Occurred(): + oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if oloc.shape == oscale.shape == (): + floc = PyFloat_AsDouble(loc) + fscale = PyFloat_AsDouble(scale) + if fscale <= 0: raise ValueError("scale <= 0") return cont2_array_sc(self.internal_state, rk_gumbel, size, floc, fscale, self.lock) - PyErr_Clear() - oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) - oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) if np.any(np.less_equal(oscale, 0.0)): raise ValueError("scale <= 0") return cont2_array(self.internal_state, rk_gumbel, size, oloc, oscale, @@ -3520,17 +3522,18 @@ cdef class RandomState: cdef ndarray oloc, oscale cdef double floc, fscale - floc = PyFloat_AsDouble(loc) - fscale = PyFloat_AsDouble(scale) - if not PyErr_Occurred(): + oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if oloc.shape == oscale.shape == (): + floc = PyFloat_AsDouble(loc) + fscale = PyFloat_AsDouble(scale) + if fscale <= 0: raise ValueError("scale <= 0") return cont2_array_sc(self.internal_state, rk_logistic, size, floc, fscale, self.lock) - PyErr_Clear() - oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) - oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) if np.any(np.less_equal(oscale, 0.0)): raise ValueError("scale <= 0") return cont2_array(self.internal_state, rk_logistic, size, oloc, @@ -3644,19 +3647,18 @@ cdef class RandomState: cdef ndarray omean, osigma cdef double fmean, fsigma - fmean = PyFloat_AsDouble(mean) - fsigma = PyFloat_AsDouble(sigma) + omean = PyArray_FROM_OTF(mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + osigma = PyArray_FROM_OTF(sigma, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if omean.shape == osigma.shape == (): + fmean = PyFloat_AsDouble(mean) + fsigma = PyFloat_AsDouble(sigma) - if not PyErr_Occurred(): if fsigma <= 0: raise ValueError("sigma <= 0") return cont2_array_sc(self.internal_state, rk_lognormal, size, fmean, fsigma, self.lock) - PyErr_Clear() - - omean = PyArray_FROM_OTF(mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED) - osigma = PyArray_FROM_OTF(sigma, NPY_DOUBLE, NPY_ARRAY_ALIGNED) if np.any(np.less_equal(osigma, 0.0)): raise ValueError("sigma <= 0.0") return cont2_array(self.internal_state, rk_lognormal, size, omean, @@ -3727,17 +3729,16 @@ cdef class RandomState: cdef ndarray oscale cdef double fscale - fscale = PyFloat_AsDouble(scale) + oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if oscale.shape == (): + fscale = PyFloat_AsDouble(scale) - if not PyErr_Occurred(): if fscale <= 0: raise ValueError("scale <= 0") return cont1_array_sc(self.internal_state, rk_rayleigh, size, fscale, self.lock) - PyErr_Clear() - - oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) if np.any(np.less_equal(oscale, 0.0)): raise ValueError("scale <= 0.0") return cont1_array(self.internal_state, rk_rayleigh, size, oscale, @@ -3809,9 +3810,13 @@ cdef class RandomState: cdef ndarray omean, oscale cdef double fmean, fscale - fmean = PyFloat_AsDouble(mean) - fscale = PyFloat_AsDouble(scale) - if not PyErr_Occurred(): + omean = PyArray_FROM_OTF(mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if omean.shape == oscale.shape == (): + fmean = PyFloat_AsDouble(mean) + fscale = PyFloat_AsDouble(scale) + if fmean <= 0: raise ValueError("mean <= 0") if fscale <= 0: @@ -3819,9 +3824,6 @@ cdef class RandomState: return cont2_array_sc(self.internal_state, rk_wald, size, fmean, fscale, self.lock) - PyErr_Clear() - omean = PyArray_FROM_OTF(mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED) - oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) if np.any(np.less_equal(omean,0.0)): raise ValueError("mean <= 0.0") elif np.any(np.less_equal(oscale,0.0)): @@ -3895,10 +3897,15 @@ cdef class RandomState: cdef ndarray oleft, omode, oright cdef double fleft, fmode, fright - fleft = PyFloat_AsDouble(left) - fright = PyFloat_AsDouble(right) - fmode = PyFloat_AsDouble(mode) - if not PyErr_Occurred(): + oleft = PyArray_FROM_OTF(left, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + omode = PyArray_FROM_OTF(mode, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + oright = PyArray_FROM_OTF(right, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if oleft.shape == omode.shape == oright.shape == (): + fleft = PyFloat_AsDouble(left) + fright = PyFloat_AsDouble(right) + fmode = PyFloat_AsDouble(mode) + if fleft > fmode: raise ValueError("left > mode") if fmode > fright: @@ -3908,11 +3915,6 @@ cdef class RandomState: return cont3_array_sc(self.internal_state, rk_triangular, size, fleft, fmode, fright, self.lock) - PyErr_Clear() - oleft = PyArray_FROM_OTF(left, NPY_DOUBLE, NPY_ARRAY_ALIGNED) - omode = PyArray_FROM_OTF(mode, NPY_DOUBLE, NPY_ARRAY_ALIGNED) - oright = PyArray_FROM_OTF(right, NPY_DOUBLE, NPY_ARRAY_ALIGNED) - if np.any(np.greater(oleft, omode)): raise ValueError("left > mode") if np.any(np.greater(omode, oright)): @@ -4012,9 +4014,13 @@ cdef class RandomState: cdef long ln cdef double fp - fp = PyFloat_AsDouble(p) - ln = PyInt_AsLong(n) - if not PyErr_Occurred(): + on = PyArray_FROM_OTF(n, NPY_LONG, NPY_ARRAY_ALIGNED) + op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if on.shape == op.shape == (): + fp = PyFloat_AsDouble(p) + ln = PyInt_AsLong(n) + if ln < 0: raise ValueError("n < 0") if fp < 0: @@ -4026,10 +4032,6 @@ cdef class RandomState: return discnp_array_sc(self.internal_state, rk_binomial, size, ln, fp, self.lock) - PyErr_Clear() - - on = PyArray_FROM_OTF(n, NPY_LONG, NPY_ARRAY_ALIGNED) - op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) if np.any(np.less(n, 0)): raise ValueError("n < 0") if np.any(np.less(p, 0)): @@ -4115,9 +4117,13 @@ cdef class RandomState: cdef double fn cdef double fp - fp = PyFloat_AsDouble(p) - fn = PyFloat_AsDouble(n) - if not PyErr_Occurred(): + on = PyArray_FROM_OTF(n, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if on.shape == op.shape == (): + fp = PyFloat_AsDouble(p) + fn = PyFloat_AsDouble(n) + if fn <= 0: raise ValueError("n <= 0") if fp < 0: @@ -4127,10 +4133,6 @@ cdef class RandomState: return discdd_array_sc(self.internal_state, rk_negative_binomial, size, fn, fp, self.lock) - PyErr_Clear() - - on = PyArray_FROM_OTF(n, NPY_DOUBLE, NPY_ARRAY_ALIGNED) - op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) if np.any(np.less_equal(n, 0)): raise ValueError("n <= 0") if np.any(np.less(p, 0)): @@ -4208,8 +4210,12 @@ cdef class RandomState: """ cdef ndarray olam cdef double flam - flam = PyFloat_AsDouble(lam) - if not PyErr_Occurred(): + + olam = PyArray_FROM_OTF(lam, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if olam.shape == (): + flam = PyFloat_AsDouble(lam) + if lam < 0: raise ValueError("lam < 0") if lam > self.poisson_lam_max: @@ -4217,9 +4223,6 @@ cdef class RandomState: return discd_array_sc(self.internal_state, rk_poisson, size, flam, self.lock) - PyErr_Clear() - - olam = PyArray_FROM_OTF(lam, NPY_DOUBLE, NPY_ARRAY_ALIGNED) if np.any(np.less(olam, 0)): raise ValueError("lam < 0") if np.any(np.greater(olam, self.poisson_lam_max)): @@ -4302,16 +4305,16 @@ cdef class RandomState: cdef ndarray oa cdef double fa - fa = PyFloat_AsDouble(a) - if not PyErr_Occurred(): + oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if oa.shape == (): + fa = PyFloat_AsDouble(a) + if fa <= 1.0: raise ValueError("a <= 1.0") return discd_array_sc(self.internal_state, rk_zipf, size, fa, self.lock) - PyErr_Clear() - - oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) if np.any(np.less_equal(oa, 1.0)): raise ValueError("a <= 1.0") return discd_array(self.internal_state, rk_zipf, size, oa, self.lock) @@ -4365,8 +4368,11 @@ cdef class RandomState: cdef ndarray op cdef double fp - fp = PyFloat_AsDouble(p) - if not PyErr_Occurred(): + op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if op.shape == (): + fp = PyFloat_AsDouble(p) + if fp < 0.0: raise ValueError("p < 0.0") if fp > 1.0: @@ -4374,10 +4380,6 @@ cdef class RandomState: return discd_array_sc(self.internal_state, rk_geometric, size, fp, self.lock) - PyErr_Clear() - - - op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) if np.any(np.less(op, 0.0)): raise ValueError("p < 0.0") if np.any(np.greater(op, 1.0)): @@ -4476,10 +4478,15 @@ cdef class RandomState: cdef ndarray ongood, onbad, onsample cdef long lngood, lnbad, lnsample - lngood = PyInt_AsLong(ngood) - lnbad = PyInt_AsLong(nbad) - lnsample = PyInt_AsLong(nsample) - if not PyErr_Occurred(): + ongood = PyArray_FROM_OTF(ngood, NPY_LONG, NPY_ARRAY_ALIGNED) + onbad = PyArray_FROM_OTF(nbad, NPY_LONG, NPY_ARRAY_ALIGNED) + onsample = PyArray_FROM_OTF(nsample, NPY_LONG, NPY_ARRAY_ALIGNED) + + if ongood.shape == onbad.shape == onsample.shape == (): + lngood = PyInt_AsLong(ngood) + lnbad = PyInt_AsLong(nbad) + lnsample = PyInt_AsLong(nsample) + if lngood < 0: raise ValueError("ngood < 0") if lnbad < 0: @@ -4491,12 +4498,6 @@ cdef class RandomState: return discnmN_array_sc(self.internal_state, rk_hypergeometric, size, lngood, lnbad, lnsample, self.lock) - PyErr_Clear() - - ongood = PyArray_FROM_OTF(ngood, NPY_LONG, NPY_ARRAY_ALIGNED) - onbad = PyArray_FROM_OTF(nbad, NPY_LONG, NPY_ARRAY_ALIGNED) - onsample = PyArray_FROM_OTF(nsample, NPY_LONG, - NPY_ARRAY_ALIGNED) if np.any(np.less(ongood, 0)): raise ValueError("ngood < 0") if np.any(np.less(onbad, 0)): @@ -4585,8 +4586,11 @@ cdef class RandomState: cdef ndarray op cdef double fp - fp = PyFloat_AsDouble(p) - if not PyErr_Occurred(): + op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) + + if op.shape == (): + fp = PyFloat_AsDouble(p) + if fp <= 0.0: raise ValueError("p <= 0.0") if fp >= 1.0: @@ -4594,9 +4598,6 @@ cdef class RandomState: return discd_array_sc(self.internal_state, rk_logseries, size, fp, self.lock) - PyErr_Clear() - - op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) if np.any(np.less_equal(op, 0.0)): raise ValueError("p <= 0.0") if np.any(np.greater_equal(op, 1.0)): diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index 7ec71e2e5342..19950936197d 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -1345,7 +1345,6 @@ def test_logseries(self): assert_raises(ValueError, logseries, bad_p_one * 3) assert_raises(ValueError, logseries, bad_p_two * 3) - class TestThread(TestCase): # make sure each state produces the same sequence even in threads def setUp(self): @@ -1388,5 +1387,91 @@ def gen_random(state, out): out[...] = state.multinomial(10, [1/6.]*6, size=10000) self.check_function(gen_random, sz=(10000, 6)) +# See Issue #4263 +class TestSingleEltArrayInput(TestCase): + def setUp(self): + self.argOne = np.array([2]) + self.argTwo = np.array([3]) + self.argThree = np.array([4]) + self.tgtShape = (1,) + + def test_one_arg_funcs(self): + funcs = (np.random.exponential, np.random.standard_gamma, + np.random.chisquare, np.random.standard_t, + np.random.pareto, np.random.weibull, + np.random.power, np.random.rayleigh, + np.random.poisson, np.random.zipf, + np.random.geometric, np.random.logseries) + + probfuncs = (np.random.geometric, np.random.logseries) + + for func in funcs: + if func in probfuncs: # p < 1.0 + out = func(np.array([0.5])) + + else: + out = func(self.argOne) + + self.assertEqual(out.shape, self.tgtShape) + + def test_two_arg_funcs(self): + funcs = (np.random.uniform, np.random.normal, + np.random.beta, np.random.gamma, + np.random.f, np.random.noncentral_chisquare, + np.random.vonmises, np.random.laplace, + np.random.gumbel, np.random.logistic, + np.random.lognormal, np.random.wald, + np.random.binomial, np.random.negative_binomial) + + probfuncs = (np.random.binomial, np.random.negative_binomial) + + for func in funcs: + if func in probfuncs: # p <= 1 + argTwo = np.array([0.5]) + + else: + argTwo = self.argTwo + + out = func(self.argOne, argTwo) + self.assertEqual(out.shape, self.tgtShape) + + out = func(self.argOne[0], argTwo) + self.assertEqual(out.shape, self.tgtShape) + + out = func(self.argOne, argTwo[0]) + self.assertEqual(out.shape, self.tgtShape) + +# TODO: Uncomment once randint can broadcast arguments +# def test_randint(self): +# itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, +# np.int32, np.uint32, np.int64, np.uint64] +# func = np.random.randint +# high = np.array([1]) +# low = np.array([0]) +# +# for dt in itype: +# out = func(low, high, dtype=dt) +# self.assert_equal(out.shape, self.tgtShape) +# +# out = func(low[0], high, dtype=dt) +# self.assert_equal(out.shape, self.tgtShape) +# +# out = func(low, high[0], dtype=dt) +# self.assert_equal(out.shape, self.tgtShape) + + def test_three_arg_funcs(self): + funcs = [np.random.noncentral_f, np.random.triangular, + np.random.hypergeometric] + + for func in funcs: + out = func(self.argOne, self.argTwo, self.argThree) + self.assertEqual(out.shape, self.tgtShape) + + out = func(self.argOne[0], self.argTwo, self.argThree) + self.assertEqual(out.shape, self.tgtShape) + + out = func(self.argOne, self.argTwo[0], self.argThree) + self.assertEqual(out.shape, self.tgtShape) + if __name__ == "__main__": run_module_suite() From 8f890ac6dede6f8da6ea83228f830f50757f7aec Mon Sep 17 00:00:00 2001 From: alex Date: Sun, 24 Jan 2016 13:47:30 -0500 Subject: [PATCH 436/496] DOC: update Python versions requirements in the install docs --- INSTALL.rst.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/INSTALL.rst.txt b/INSTALL.rst.txt index 0b778d9174b0..f4e96fc1f05a 100644 --- a/INSTALL.rst.txt +++ b/INSTALL.rst.txt @@ -14,8 +14,8 @@ Prerequisites Building NumPy requires the following software installed: -1) For Python 2, Python__ 2.6.x or newer. - For Python 3, Python__ 3.2.x or newer. +1) For Python 2, Python__ 2.7.x or newer. + For Python 3, Python__ 3.4.x or newer. On Debian and derivative (Ubuntu): python python-dev From 637ad965ae7555157e859a747f276c20c5f91b9a Mon Sep 17 00:00:00 2001 From: gfyoung Date: Sat, 16 Jan 2016 00:40:37 +0000 Subject: [PATCH 437/496] DOC: Clarify behavior in np.random.uniform Although the arguments are specified as 'high' and 'low', it is possible to pass in values for 'low' and 'high' where 'low' >= 'high' and still obtain well-defined behavior. The documentation has been expanded to reflect this fact, with a note to discourage passing in arguments satisfying 'low' > 'high'. --- numpy/random/mtrand/mtrand.pyx | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index cf8d28cb0dd7..a419e51a864e 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -1496,6 +1496,12 @@ cdef class RandomState: anywhere within the interval ``[a, b)``, and zero elsewhere. + When ``high`` == ``low``, values of ``low`` will be returned. + If ``high`` < ``low``, the results are officially undefined + and may eventually raise an error, i.e. do not rely on this + function to behave when passed arguments satisfying that + inequality condition. + Examples -------- Draw samples from the distribution: From df9df7f7e8340f9389543a4389022c07dbaf2e0d Mon Sep 17 00:00:00 2001 From: Dongjoon Hyun Date: Mon, 25 Jan 2016 00:33:14 -0800 Subject: [PATCH 438/496] MAINT: Fix typos in docs --- doc/CAPI.rst.txt | 2 +- doc/DISTUTILS.rst.txt | 8 ++++---- doc/f2py/index.html | 2 +- doc/neps/return-of-revenge-of-matmul-pep.rst | 8 ++++---- doc/source/reference/arrays.ndarray.rst | 2 +- doc/source/reference/c-api.array.rst | 2 +- doc/source/reference/c-api.iterator.rst | 2 +- doc/source/reference/c-api.types-and-structures.rst | 2 +- doc/source/reference/distutils.rst | 2 +- doc/source/reference/swig.interface-file.rst | 2 +- doc/source/user/building.rst | 2 +- doc/source/user/c-info.ufunc-tutorial.rst | 2 +- 12 files changed, 18 insertions(+), 18 deletions(-) diff --git a/doc/CAPI.rst.txt b/doc/CAPI.rst.txt index 474e37c026b0..7c9f10b5b981 100644 --- a/doc/CAPI.rst.txt +++ b/doc/CAPI.rst.txt @@ -120,7 +120,7 @@ This is a very flexible function. array of the type to be created (so the ``__array_finalize__`` method must handle an array argument. But, it can be anything...) -Note: The returned array object will be unitialized unless the type is +Note: The returned array object will be uninitialized unless the type is ``PyArray_OBJECT`` in which case the memory will be set to ``NULL``. ``PyArray_SimpleNew(nd, dims, typenum)`` is a drop-in replacement for diff --git a/doc/DISTUTILS.rst.txt b/doc/DISTUTILS.rst.txt index 01bc9cc43c79..f28a4298a6ba 100644 --- a/doc/DISTUTILS.rst.txt +++ b/doc/DISTUTILS.rst.txt @@ -70,7 +70,7 @@ Below is an example of a minimal ``setup.py`` file for a pure SciPy package:: #setup(**configuration(top_path='').todict()) setup(configuration=configuration) -The arguments of the ``configuration`` function specifiy the name of +The arguments of the ``configuration`` function specify the name of parent SciPy package (``parent_package``) and the directory location of the main ``setup.py`` script (``top_path``). These arguments, along with the name of the current package, should be passed to the @@ -269,10 +269,10 @@ in writing setup scripts: more information on arguments. + ``config.have_f77c()`` --- return True if Fortran 77 compiler is - available (read: a simple Fortran 77 code compiled succesfully). + available (read: a simple Fortran 77 code compiled successfully). + ``config.have_f90c()`` --- return True if Fortran 90 compiler is - available (read: a simple Fortran 90 code compiled succesfully). + available (read: a simple Fortran 90 code compiled successfully). + ``config.get_version()`` --- return version string of the current package, ``None`` if version information could not be detected. This methods @@ -426,7 +426,7 @@ at some point. Extra features in NumPy Distutils ''''''''''''''''''''''''''''''''' -Specifing config_fc options for libraries in setup.py script +Specifying config_fc options for libraries in setup.py script ------------------------------------------------------------ It is possible to specify config_fc options in setup.py scripts. diff --git a/doc/f2py/index.html b/doc/f2py/index.html index e162ed41a262..9f3720e6836f 100644 --- a/doc/f2py/index.html +++ b/doc/f2py/index.html @@ -2,7 +2,7 @@ - + Codestin Search App diff --git a/doc/neps/return-of-revenge-of-matmul-pep.rst b/doc/neps/return-of-revenge-of-matmul-pep.rst index b19f07d851df..ae75d9d18f9d 100644 --- a/doc/neps/return-of-revenge-of-matmul-pep.rst +++ b/doc/neps/return-of-revenge-of-matmul-pep.rst @@ -208,7 +208,7 @@ numeric operators also apply in an elementwise manner to arrays; the reverse convention would lead to more special cases.) So that's why matrix multiplication doesn't and can't just use ``*``. -Now, in the the rest of this section, we'll explain why it nonetheless +Now, in the rest of this section, we'll explain why it nonetheless meets the high bar for adding a new operator. @@ -451,7 +451,7 @@ appear in many important applications, and that numerical libraries like numpy are used by a substantial proportion of Python's user base. But numerical libraries aren't just about matrix formulas, and being important doesn't necessarily mean taking up a lot of code: if matrix -formulas only occured in one or two places in the average +formulas only occurred in one or two places in the average numerically-oriented project, then it still wouldn't be worth adding a new operator. So how common is matrix multiplication, really? @@ -1107,7 +1107,7 @@ by other means, and that causes painful reverberations through the larger ecosystem. Defining a new language (presumably with its own parser which would have to be kept in sync with Python's, etc.), just to support a single binary operator, is neither practical nor -desireable. In the numerical context, Python's competition is +desirable. In the numerical context, Python's competition is special-purpose numerical languages (Matlab, R, IDL, etc.). Compared to these, Python's killer feature is exactly that one can mix specialized numerical code with code for XML parsing, web page @@ -1195,7 +1195,7 @@ References test the null hypothesis that :math:`H\beta = r`; a large :math:`S` then indicates that this hypothesis is unlikely to be true. For example, in an analysis of human height, the vector :math:`\beta` - might contain one value which was the the average height of the + might contain one value which was the average height of the measured men, and another value which was the average height of the measured women, and then setting :math:`H = [1, -1], r = 0` would let us test whether men and women are the same height on diff --git a/doc/source/reference/arrays.ndarray.rst b/doc/source/reference/arrays.ndarray.rst index 0f5fb92d750d..201d01277056 100644 --- a/doc/source/reference/arrays.ndarray.rst +++ b/doc/source/reference/arrays.ndarray.rst @@ -467,7 +467,7 @@ Truth value of an array (:func:`bool()`): Truth-value testing of an array invokes :meth:`ndarray.__nonzero__`, which raises an error if the number of - elements in the the array is larger than 1, because the truth value + elements in the array is larger than 1, because the truth value of such arrays is ambiguous. Use :meth:`.any() ` and :meth:`.all() ` instead to be clear about what is meant in such cases. (If the number of elements is 0, the array evaluates diff --git a/doc/source/reference/c-api.array.rst b/doc/source/reference/c-api.array.rst index 32dace29f247..aba5f9caa86f 100644 --- a/doc/source/reference/c-api.array.rst +++ b/doc/source/reference/c-api.array.rst @@ -268,7 +268,7 @@ From scratch .. c:function:: PyObject* PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) - Create a new unitialized array of type, *typenum*, whose size in + Create a new uninitialized array of type, *typenum*, whose size in each of *nd* dimensions is given by the integer array, *dims*. This function cannot be used to create a flexible-type array (no itemsize given). diff --git a/doc/source/reference/c-api.iterator.rst b/doc/source/reference/c-api.iterator.rst index 16beabcf02f0..b38c21390b44 100644 --- a/doc/source/reference/c-api.iterator.rst +++ b/doc/source/reference/c-api.iterator.rst @@ -487,7 +487,7 @@ Construction and Destruction If the operand is flagged as write-only and a copy is needed, an uninitialized temporary array will be created and then copied to back to ``op[i]`` on destruction, instead of doing - the unecessary copy operation. + the unnecessary copy operation. .. c:var:: NPY_ITER_NBO .. c:var:: NPY_ITER_ALIGNED diff --git a/doc/source/reference/c-api.types-and-structures.rst b/doc/source/reference/c-api.types-and-structures.rst index d7342bd9e7a8..04c9bee3586e 100644 --- a/doc/source/reference/c-api.types-and-structures.rst +++ b/doc/source/reference/c-api.types-and-structures.rst @@ -219,7 +219,7 @@ PyArrayDescr_Type represents signed integer, a 'u' represents unsigned integer, 'f' represents floating point, 'c' represents complex floating point, 'S' represents 8-bit character string, 'U' represents 32-bit/character - unicode string, and 'V' repesents arbitrary. + unicode string, and 'V' represents arbitrary. .. c:member:: char PyArray_Descr.type diff --git a/doc/source/reference/distutils.rst b/doc/source/reference/distutils.rst index 5d11a6d4ce72..7aed4e90d78a 100644 --- a/doc/source/reference/distutils.rst +++ b/doc/source/reference/distutils.rst @@ -305,7 +305,7 @@ for these files are: equivalent to #name=item1, item2, item1, item2, item1, item2, item1, item2# -4. "\*/ "on a line by itself marks the end of the the variable expansion +4. "\*/ "on a line by itself marks the end of the variable expansion naming. The next line is the first line that will be repeated using the named rules. diff --git a/doc/source/reference/swig.interface-file.rst b/doc/source/reference/swig.interface-file.rst index e5d369d0e261..1d6fbe04dba7 100644 --- a/doc/source/reference/swig.interface-file.rst +++ b/doc/source/reference/swig.interface-file.rst @@ -905,7 +905,7 @@ Routines * ``PyArrayObject* ary``, a NumPy array. Require the given ``PyArrayObject`` to to be Fortran ordered. If - the the ``PyArrayObject`` is already Fortran ordered, do nothing. + the ``PyArrayObject`` is already Fortran ordered, do nothing. Else, set the Fortran ordering flag and recompute the strides. diff --git a/doc/source/user/building.rst b/doc/source/user/building.rst index 78dbc9fa27ac..5cb143f381d9 100644 --- a/doc/source/user/building.rst +++ b/doc/source/user/building.rst @@ -4,7 +4,7 @@ Building from source ==================== A general overview of building NumPy from source is given here, with detailed -instructions for specific platforms given seperately. +instructions for specific platforms given separately. Prerequisites ------------- diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst index 109e6adb5cc4..ab97846a85f5 100644 --- a/doc/source/user/c-info.ufunc-tutorial.rst +++ b/doc/source/user/c-info.ufunc-tutorial.rst @@ -1172,7 +1172,7 @@ automatically generates a ufunc from a C function with the correct signature. *unused* - Unused; kept for compatiblity. Just set it to zero. + Unused; kept for compatibility. Just set it to zero. .. index:: pair: ufunc; adding new From d3d2f8e92cd08bb64cc520cf714bc70fb31909ce Mon Sep 17 00:00:00 2001 From: gfyoung Date: Mon, 25 Jan 2016 18:37:23 +0000 Subject: [PATCH 439/496] TST: Fixed f2py test for win32 virtualenv Fixed test_scripts.test_f2py test so that it can pass correctly on win32 virtualenvs, in which the Python executable and the f2py.py file are both in the Scripts directory. --- numpy/tests/test_scripts.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py index 94587e80795d..2aed75ebaba0 100644 --- a/numpy/tests/test_scripts.py +++ b/numpy/tests/test_scripts.py @@ -62,7 +62,13 @@ def run_command(cmd, check_code=True): def test_f2py(): # test that we can run f2py script if sys.platform == 'win32': - f2py_cmd = r"%s\Scripts\f2py.py" % dirname(sys.executable) + exe_dir = dirname(sys.executable) + + if exe_dir.endswith('Scripts'): # virtualenv + f2py_cmd = r"%s\f2py.py" % exe_dir + else: + f2py_cmd = r"%s\Scripts\f2py.py" % exe_dir + code, stdout, stderr = run_command([sys.executable, f2py_cmd, '-v']) success = stdout.strip() == asbytes('2') assert_(success, "Warning: f2py not found in path") From 588a1c3e42302418de80eb5f7da4ad375228396e Mon Sep 17 00:00:00 2001 From: gfyoung Date: Mon, 25 Jan 2016 21:07:12 +0000 Subject: [PATCH 440/496] TST: Fixed f2py test for non-versioned python executables The 'sys.executable' can come in various names, but the three main ones are "python", "python{major_version}", and "python{major_version.minor_version}". The current version of the f2py test assumes that only the latter two are used. Since "f2py" is generally versioned, using the executable basename "python" will make it impossible to find. This commit fixes that issue by using a sure-fire way of getting the Python version. --- numpy/tests/test_scripts.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py index 2aed75ebaba0..0fc7f879f191 100644 --- a/numpy/tests/test_scripts.py +++ b/numpy/tests/test_scripts.py @@ -16,6 +16,7 @@ is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py')) + def run_command(cmd, check_code=True): """ Run command sequence `cmd` returning exit code, stdout, stderr @@ -73,10 +74,18 @@ def test_f2py(): success = stdout.strip() == asbytes('2') assert_(success, "Warning: f2py not found in path") else: - # unclear what f2py cmd was installed as, check plain (f2py) and - # current python version specific one (f2py3.4) - f2py_cmds = ('f2py', 'f2py' + basename(sys.executable)[6:]) + # unclear what f2py cmd was installed as, check plain (f2py), + # with major version (f2py3), or major/minor version (f2py3.4) + code, stdout, stderr = run_command([sys.executable, '-V']) + + # for some reason, 'python -V' returns version in 'stderr' for + # Python 2.x but in 'stdout' for Python 3.x + version = (stdout or stderr)[7:].strip() + major, minor, revision = version.decode('utf-8').split('.') + + f2py_cmds = ('f2py', 'f2py' + major, 'f2py' + major + '.' + minor) success = False + for f2py_cmd in f2py_cmds: try: code, stdout, stderr = run_command([f2py_cmd, '-v']) @@ -85,5 +94,5 @@ def test_f2py(): break except: pass - msg = "Warning: neither %s nor %s found in path" % f2py_cmds + msg = "Warning: neither %s nor %s nor %s found in path" % f2py_cmds assert_(success, msg) From bce84bb6fbfc7d26b209fb38247b64f7faeb739f Mon Sep 17 00:00:00 2001 From: gfyoung Date: Mon, 25 Jan 2016 22:05:04 +0000 Subject: [PATCH 441/496] BUG: Fixed mingw.lib error Closes gh-647. --- numpy/distutils/fcompiler/gnu.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py index 37be0800d487..9ba5759df113 100644 --- a/numpy/distutils/fcompiler/gnu.py +++ b/numpy/distutils/fcompiler/gnu.py @@ -313,7 +313,7 @@ def get_library_dirs(self): if target: d = os.path.normpath(self.get_libgcc_dir()) root = os.path.join(d, *((os.pardir,)*4)) - path = os.path.join(root, target, "lib") + path = os.path.join(root, "lib") mingwdir = os.path.normpath(path) if os.path.exists(os.path.join(mingwdir, "libmingwex.a")): opt.append(mingwdir) From 3690e967324fe41050755d6b8e9d8bbf83b999d1 Mon Sep 17 00:00:00 2001 From: Joseph Fox-Rabinovitz Date: Tue, 26 Jan 2016 22:37:03 -0500 Subject: [PATCH 442/496] DOC: Updated documentation wording and examples for np.percentile. Examples had some factual errors. Wording updated in a couple of places. --- numpy/lib/function_base.py | 79 +++++++++++++++++++------------------- 1 file changed, 40 insertions(+), 39 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 844c069c0bfc..098fba4f5250 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -3383,14 +3383,14 @@ def percentile(a, q, axis=None, out=None, """ Compute the qth percentile of the data along the specified axis. - Returns the qth percentile of the array elements. + Returns the qth percentile (or percentiles) of the array elements. Parameters ---------- a : array_like Input array or object that can be converted to an array. q : float in range of [0,100] (or sequence of floats) - Percentile to compute which must be between 0 and 100 inclusive. + Percentile to compute, which must be between 0 and 100 inclusive. axis : int or sequence of int, optional Axis along which the percentiles are computed. The default (None) is to compute the percentiles along a flattened version of the array. @@ -3400,43 +3400,42 @@ def percentile(a, q, axis=None, out=None, have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. overwrite_input : bool, optional - If True, then allow use of memory of input array `a` for - calculations. The input array will be modified by the call to - percentile. This will save memory when you do not need to preserve - the contents of the input array. In this case you should not make - any assumptions about the content of the passed in array `a` after - this function completes -- treat it as undefined. Default is False. - Note that, if the `a` input is not already an array this parameter - will have no effect, `a` will be converted to an array internally - regardless of the value of this parameter. + If True, then allow use of memory of input array `a` for calculations. + The input array will be modified by the call to `percentile`. This will + save memory when you do not need to preserve the contents of the input + array. In this case you should not make any assumptions about the + contents of the input `a` after this function completes -- treat it as + undefined. Default is False. If `a` is not already an array, this + parameter will have no effect as `a` will be converted to an array + internally regardless of the value of this parameter. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} - This optional parameter specifies the interpolation method to use, - when the desired quantile lies between two data points `i` and `j`: - * linear: `i + (j - i) * fraction`, where `fraction` is the - fractional part of the index surrounded by `i` and `j`. - * lower: `i`. - * higher: `j`. - * nearest: `i` or `j` whichever is nearest. - * midpoint: (`i` + `j`) / 2. + This optional parameter specifies the interpolation method to use + when the desired quantile lies between two data points ``i < j``: + * linear: ``i + (j - i) * fraction``, where ``fraction`` is the + fractional part of the index surrounded by ``i`` and ``j``. + * lower: ``i``. + * higher: ``j``. + * nearest: ``i`` or ``j``, whichever is nearest. + * midpoint: ``(i + j) / 2``. .. versionadded:: 1.9.0 keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original array `a`. + If this is set to True, the axes which are reduced are left in the + result as dimensions with size one. With this option, the result will + broadcast correctly against the original array `a`. .. versionadded:: 1.9.0 Returns ------- percentile : scalar or ndarray - If a single percentile `q` is given and axis=None a scalar is - returned. If multiple percentiles `q` are given an array holding - the result is returned. The results are listed in the first axis. - (If `out` is specified, in which case that array is returned - instead). If the input contains integers, or floats of smaller - precision than 64, then the output data-type is float64. Otherwise, - the output data-type is the same as that of the input. + If `q` is a single percentile and `axis=None`, then the return is a + scalar. If multiple percentiles are given, the result is an an array. + The percentiles are listed in the first axis. The remaining axes are the + reduced axes of the input `a`. If the input contains integers or floats + of smaller precision than 64, then the output data-type is float64. + Otherwise, the output data-type is the same as that of the input. If + `out` is specified, that array is returned instead. See Also -------- @@ -3445,11 +3444,11 @@ def percentile(a, q, axis=None, out=None, Notes ----- Given a vector V of length N, the q-th percentile of V is the q-th ranked - value in a sorted copy of V. The values and distances of the two - nearest neighbors as well as the `interpolation` parameter will - determine the percentile if the normalized ranking does not match q - exactly. This function is the same as the median if ``q=50``, the same - as the minimum if ``q=0`` and the same as the maximum if ``q=100``. + value in a sorted copy of V. The values and distances of the two nearest + neighbors as well as the `interpolation` parameter will determine the + percentile if the normalized ranking does not match q exactly. This function + is the same as the median if ``q=50``, the same as the minimum if ``q=0`` + and the same as the maximum if ``q=100``. Examples -------- @@ -3458,28 +3457,30 @@ def percentile(a, q, axis=None, out=None, array([[10, 7, 4], [ 3, 2, 1]]) >>> np.percentile(a, 50) - array([ 3.5]) + 3.5 >>> np.percentile(a, 50, axis=0) array([[ 6.5, 4.5, 2.5]]) >>> np.percentile(a, 50, axis=1) + array([ 7., 2.]) + >>> np.percentile(a, 50, axis=1, keepdims=True) array([[ 7.], [ 2.]]) >>> m = np.percentile(a, 50, axis=0) >>> out = np.zeros_like(m) - >>> np.percentile(a, 50, axis=0, out=m) + >>> np.percentile(a, 50, axis=0, out=out) array([[ 6.5, 4.5, 2.5]]) >>> m array([[ 6.5, 4.5, 2.5]]) >>> b = a.copy() >>> np.percentile(b, 50, axis=1, overwrite_input=True) - array([[ 7.], - [ 2.]]) + array([ 7., 2.]) >>> assert not np.all(a==b) >>> b = a.copy() >>> np.percentile(b, 50, axis=None, overwrite_input=True) - array([ 3.5]) + 3.5 + >>> assert not np.all(a==b) """ q = array(q, dtype=np.float64, copy=True) From d7c847b0cb6bdfe46862de9013b3bf7d1e909eb7 Mon Sep 17 00:00:00 2001 From: Joseph Fox-Rabinovitz Date: Wed, 27 Jan 2016 00:13:00 -0500 Subject: [PATCH 443/496] MAINT: Accepted all review comments for PR#7125 --- numpy/lib/function_base.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 098fba4f5250..4f1de30ec9db 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -3383,7 +3383,7 @@ def percentile(a, q, axis=None, out=None, """ Compute the qth percentile of the data along the specified axis. - Returns the qth percentile (or percentiles) of the array elements. + Returns the qth percentile(s) of the array elements. Parameters ---------- @@ -3429,7 +3429,7 @@ def percentile(a, q, axis=None, out=None, Returns ------- percentile : scalar or ndarray - If `q` is a single percentile and `axis=None`, then the return is a + If `q` is a single percentile and `axis=None`, then the result is a scalar. If multiple percentiles are given, the result is an an array. The percentiles are listed in the first axis. The remaining axes are the reduced axes of the input `a`. If the input contains integers or floats @@ -3476,11 +3476,7 @@ def percentile(a, q, axis=None, out=None, >>> b = a.copy() >>> np.percentile(b, 50, axis=1, overwrite_input=True) array([ 7., 2.]) - >>> assert not np.all(a==b) - >>> b = a.copy() - >>> np.percentile(b, 50, axis=None, overwrite_input=True) - 3.5 - >>> assert not np.all(a==b) + >>> assert not np.all(a == b) """ q = array(q, dtype=np.float64, copy=True) From 2a079b2d39ea71938912528cbe9d679649bf77b9 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 27 Jan 2016 21:33:12 +0100 Subject: [PATCH 444/496] TST: test installing from sdist on TravisCI. --- .travis.yml | 2 ++ tools/travis-test.sh | 15 ++++++++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index c14994d0d81e..ccb182816e99 100644 --- a/.travis.yml +++ b/.travis.yml @@ -62,6 +62,8 @@ matrix: env: USE_WHEEL=1 - python: 3.5 env: USE_WHEEL=1 + - python: 3.5 + env: USE_SDIST=1 - python: 2.7 env: - PYTHONOPTIMIZE=2 diff --git a/tools/travis-test.sh b/tools/travis-test.sh index 3de1ca78de2b..6ed51ee910e9 100755 --- a/tools/travis-test.sh +++ b/tools/travis-test.sh @@ -71,7 +71,7 @@ setup_chroot() # linux32 python setup.py build # when travis updates to ubuntu 14.04 # - # Numpy may not distinquish between 64 and 32 bit atlas in the + # Numpy may not distinguish between 64 and 32 bit ATLAS in the # configuration stage. DIR=$1 set -u @@ -149,6 +149,19 @@ if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then pip install nose popd run_test +elif [ -n "$USE_SDIST" ] && [ $# -eq 0 ]; then + # use an up-to-date pip / setuptools inside the venv + $PIP install -U virtualenv + $PYTHON setup.py sdist + # Make another virtualenv to install into + virtualenv --python=`which $PYTHON` venv-for-wheel + . venv-for-wheel/bin/activate + # Move out of source directory to avoid finding local numpy + pushd dist + pip install numpy* + pip install nose + popd + run_test elif [ -n "$USE_CHROOT" ] && [ $# -eq 0 ]; then DIR=/chroot setup_chroot $DIR From 6770f985b3663338d023dd707d43a6a53f530668 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 27 Jan 2016 21:34:28 +0100 Subject: [PATCH 445/496] BLD/BUG: revert usage of setuptools when building an sdist. This was seriously broken. Setuptools does unwanted 'smart' things and ignores MANIFEST.in. Closes gh-7127. --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index a7f66a03d851..c68f5a399951 100755 --- a/setup.py +++ b/setup.py @@ -188,7 +188,7 @@ def check_submodules(): raise ValueError('Submodule not clean: %s' % line) -from setuptools.command.sdist import sdist +from distutils.command.sdist import sdist class sdist_checked(sdist): """ check submodules on sdist to prevent incomplete tarballs """ def run(self): From 6ae1ff280b3a6c115fe468c79c5cf4f16f998f89 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 27 Jan 2016 21:59:57 +0100 Subject: [PATCH 446/496] MAINT: warn in egg_info command if using setuptools.sdist. Warn in numpy/distutils/command/egg_info.py if using setuptools.sdist. See gh-7127 for details. --- numpy/distutils/command/egg_info.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/numpy/distutils/command/egg_info.py b/numpy/distutils/command/egg_info.py index b7104de5be40..972a27df3846 100644 --- a/numpy/distutils/command/egg_info.py +++ b/numpy/distutils/command/egg_info.py @@ -1,9 +1,17 @@ from __future__ import division, absolute_import, print_function +import sys + from setuptools.command.egg_info import egg_info as _egg_info class egg_info(_egg_info): def run(self): + if 'sdist' in sys.argv: + import warnings + warnings.warn("`build_src` is being run, this may lead to missing " + "files in your sdist! See numpy issue gh-7127 for " + "details", UserWarning) + # We need to ensure that build_src has been executed in order to give # setuptools' egg_info command real filenames instead of functions which # generate files. From 33777c64fefbd0919d3cf8f832c302205b4ede23 Mon Sep 17 00:00:00 2001 From: Yaroslav Halchenko Date: Wed, 27 Jan 2016 22:08:03 -0500 Subject: [PATCH 447/496] ENH: for savez create temporary file alongside with the target file Closes: gh-5336 --- numpy/lib/npyio.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py index 640f4fa32d04..dd15db4b893f 100644 --- a/numpy/lib/npyio.py +++ b/numpy/lib/npyio.py @@ -627,7 +627,11 @@ def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): zipf = zipfile_factory(file, mode="w", compression=compression) # Stage arrays in a temporary file on disk, before writing to zip. - fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy') + + # Since target file might be big enough to exceed capacity of a global + # temporary directory, create temp file side-by-side with the target file. + file_path, file_name = os.path.split(file) + fd, tmpfile = tempfile.mkstemp(prefix=file_name, dir=file_path, suffix='-numpy.npy') os.close(fd) try: for key, val in namedict.items(): From 8a22dd42c2644c05500e2cea6974a6988a9dd01d Mon Sep 17 00:00:00 2001 From: Yaroslav Halchenko Date: Wed, 27 Jan 2016 22:12:51 -0500 Subject: [PATCH 448/496] ENH: catch and rethrow exception in _savez upon IOError with filename info --- numpy/lib/npyio.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py index dd15db4b893f..a800537504b5 100644 --- a/numpy/lib/npyio.py +++ b/numpy/lib/npyio.py @@ -644,6 +644,8 @@ def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): fid.close() fid = None zipf.write(tmpfile, arcname=fname) + except IOError as exc: + raise IOError("Failed to write to %s: %s" % (tmpfile, exc)) finally: if fid: fid.close() From 73a2fd11a74eedb97201fc8d54ea193e810ea9d3 Mon Sep 17 00:00:00 2001 From: Dongjoon Hyun Date: Wed, 27 Jan 2016 15:42:42 -0800 Subject: [PATCH 449/496] MAINT: Fix some typos in a code string and comments --- numpy/_build_utils/src/apple_sgemv_fix.c | 2 +- numpy/core/include/numpy/ndarraytypes.h | 2 +- numpy/core/info.py | 2 +- numpy/core/src/multiarray/arrayobject.c | 2 +- numpy/core/src/multiarray/ctors.c | 2 +- numpy/core/src/multiarray/datetime_busdaycal.c | 2 +- numpy/core/src/multiarray/datetime_busdaycal.h | 2 +- numpy/core/src/multiarray/dtype_transfer.c | 2 +- numpy/core/src/multiarray/einsum.c.src | 2 +- numpy/core/src/multiarray/item_selection.c | 4 ++-- numpy/core/src/multiarray/lowlevel_strided_loops.c.src | 2 +- numpy/core/src/multiarray/multiarraymodule.c | 2 +- numpy/core/src/multiarray/nditer_api.c | 4 ++-- numpy/core/src/multiarray/nditer_constr.c | 2 +- numpy/core/src/multiarray/nditer_pywrap.c | 2 +- numpy/core/src/multiarray/nditer_templ.c.src | 2 +- numpy/core/src/multiarray/usertypes.c | 2 +- numpy/core/src/umath/ufunc_object.c | 2 +- numpy/core/tests/test_einsum.py | 2 +- numpy/distutils/exec_command.py | 2 +- numpy/distutils/from_template.py | 2 +- numpy/doc/byteswapping.py | 2 +- numpy/doc/internals.py | 2 +- numpy/f2py/capi_maps.py | 2 +- numpy/f2py/crackfortran.py | 2 +- numpy/f2py/rules.py | 8 ++++---- numpy/lib/function_base.py | 4 ++-- numpy/linalg/lapack_lite/dlapack_lite.c | 6 +++--- numpy/linalg/lapack_lite/zlapack_lite.c | 2 +- numpy/ma/core.py | 2 +- 30 files changed, 38 insertions(+), 38 deletions(-) diff --git a/numpy/_build_utils/src/apple_sgemv_fix.c b/numpy/_build_utils/src/apple_sgemv_fix.c index ffdfb81f705e..4c9c82ece6d7 100644 --- a/numpy/_build_utils/src/apple_sgemv_fix.c +++ b/numpy/_build_utils/src/apple_sgemv_fix.c @@ -155,7 +155,7 @@ void sgemv_( const char* trans, const int* m, const int* n, * * Because Fortran uses column major order and X.T and Y.T are row vectors, * the leading dimensions of X.T and Y.T in SGEMM become equal to the - * strides of the the column vectors X and Y in SGEMV. */ + * strides of the column vectors X and Y in SGEMV. */ switch (*trans) { case 'T': diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h index f1fe89f1a158..34f7b4e21efa 100644 --- a/numpy/core/include/numpy/ndarraytypes.h +++ b/numpy/core/include/numpy/ndarraytypes.h @@ -781,7 +781,7 @@ typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); /* * An array never has the next four set; they're only used as parameter - * flags to the the various FromAny functions + * flags to the various FromAny functions * * This flag may be requested in constructor functions. */ diff --git a/numpy/core/info.py b/numpy/core/info.py index 241f209b556e..c6f7bbcf2ac0 100644 --- a/numpy/core/info.py +++ b/numpy/core/info.py @@ -4,7 +4,7 @@ - array - NumPy Array construction - zeros - Return an array of all zeros -- empty - Return an unitialized array +- empty - Return an uninitialized array - shape - Return shape of sequence or array - rank - Return number of dimensions - size - Return number of elements in entire array or a diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c index fd5b15a0a133..eb952836c918 100644 --- a/numpy/core/src/multiarray/arrayobject.c +++ b/numpy/core/src/multiarray/arrayobject.c @@ -11,7 +11,7 @@ by Travis Oliphant, oliphant@ee.byu.edu - Brigham Young Univeristy + Brigham Young University maintainer email: oliphant.travis@ieee.org diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index 2b8c35234304..785b3073a38b 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -1049,7 +1049,7 @@ PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd, sd = descr->elsize; } /* - * It is bad to have unitialized OBJECT pointers + * It is bad to have uninitialized OBJECT pointers * which could also be sub-fields of a VOID array */ if (zeroed || PyDataType_FLAGCHK(descr, NPY_NEEDS_INIT)) { diff --git a/numpy/core/src/multiarray/datetime_busdaycal.c b/numpy/core/src/multiarray/datetime_busdaycal.c index 91ba24c97bde..b0c53b362047 100644 --- a/numpy/core/src/multiarray/datetime_busdaycal.c +++ b/numpy/core/src/multiarray/datetime_busdaycal.c @@ -214,7 +214,7 @@ qsort_datetime_compare(const void *elem1, const void *elem2) } /* - * Sorts the the array of dates provided in place and removes + * Sorts the array of dates provided in place and removes * NaT, duplicates and any date which is already excluded on account * of the weekmask. * diff --git a/numpy/core/src/multiarray/datetime_busdaycal.h b/numpy/core/src/multiarray/datetime_busdaycal.h index cd79d0bb5ed4..02903e3d20a0 100644 --- a/numpy/core/src/multiarray/datetime_busdaycal.h +++ b/numpy/core/src/multiarray/datetime_busdaycal.h @@ -37,7 +37,7 @@ NPY_NO_EXPORT int PyArray_WeekMaskConverter(PyObject *weekmask_in, npy_bool *weekmask); /* - * Sorts the the array of dates provided in place and removes + * Sorts the array of dates provided in place and removes * NaT, duplicates and any date which is already excluded on account * of the weekmask. * diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c index bfb22ac30f0d..fd371a1f69d0 100644 --- a/numpy/core/src/multiarray/dtype_transfer.c +++ b/numpy/core/src/multiarray/dtype_transfer.c @@ -4,7 +4,7 @@ * implemented here. * * Copyright (c) 2010 by Mark Wiebe (mwwiebe@gmail.com) - * The Univerity of British Columbia + * The University of British Columbia * * See LICENSE.txt for the license. diff --git a/numpy/core/src/multiarray/einsum.c.src b/numpy/core/src/multiarray/einsum.c.src index bde543703a34..ee9ee1abde73 100644 --- a/numpy/core/src/multiarray/einsum.c.src +++ b/numpy/core/src/multiarray/einsum.c.src @@ -3,7 +3,7 @@ * which provides an einstein-summation operation. * * Copyright (c) 2011 by Mark Wiebe (mwwiebe@gmail.com) - * The Univerity of British Columbia + * The University of British Columbia * * See LICENSE.txt for the license. */ diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 64fa70b6da07..9789235c2e76 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -842,7 +842,7 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, /* * For dtype's with objects, copyswapn Py_XINCREF's src * and Py_XDECREF's dst. This would crash if called on - * an unitialized buffer, or leak a reference to each + * an uninitialized buffer, or leak a reference to each * object if initialized. * * So, first do the copy with no refcounting... @@ -1003,7 +1003,7 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, /* * For dtype's with objects, copyswapn Py_XINCREF's src * and Py_XDECREF's dst. This would crash if called on - * an unitialized valbuffer, or leak a reference to + * an uninitialized valbuffer, or leak a reference to * each object item if initialized. * * So, first do the copy with no refcounting... diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src index 0fe63c13b367..b8381ab685ec 100644 --- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src @@ -3,7 +3,7 @@ * strided data. * * Copyright (c) 2010 by Mark Wiebe (mwwiebe@gmail.com) - * The Univerity of British Columbia + * The University of British Columbia * * See LICENSE.txt for the license. */ diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index 1df3d653da49..bf25130bbffa 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -3869,7 +3869,7 @@ _PyArray_SigintHandler(int signum) { PyOS_setsig(signum, SIG_IGN); /* - * jump buffer may be unitialized as SIGINT allowing functions are usually + * jump buffer may be uninitialized as SIGINT allowing functions are usually * run in other threads than the master thread that receives the signal */ if (sigint_buf_init > 0) { diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c index c00360bfbf7c..21bbbaad4b9a 100644 --- a/numpy/core/src/multiarray/nditer_api.c +++ b/numpy/core/src/multiarray/nditer_api.c @@ -3,7 +3,7 @@ * This excludes functions specialized using the templating system. * * Copyright (c) 2010-2011 by Mark Wiebe (mwwiebe@gmail.com) - * The Univerity of British Columbia + * The University of British Columbia * * Copyright (c) 2011 Enthought, Inc * @@ -1847,7 +1847,7 @@ npyiter_goto_iterindex(NpyIter *iter, npy_intp iterindex) } /* - * This gets called after the the buffers have been exhausted, and + * This gets called after the buffers have been exhausted, and * their data needs to be written back to the arrays. The multi-index * must be positioned for the beginning of the buffer. */ diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c index 9c5afedf6f7a..3cbbb2b27605 100644 --- a/numpy/core/src/multiarray/nditer_constr.c +++ b/numpy/core/src/multiarray/nditer_constr.c @@ -3,7 +3,7 @@ * aspects of NumPy's nditer. * * Copyright (c) 2010-2011 by Mark Wiebe (mwwiebe@gmail.com) - * The Univerity of British Columbia + * The University of British Columbia * * Copyright (c) 2011 Enthought, Inc * diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c index 25e48ba058ce..67f5ab99f4a7 100644 --- a/numpy/core/src/multiarray/nditer_pywrap.c +++ b/numpy/core/src/multiarray/nditer_pywrap.c @@ -2,7 +2,7 @@ * This file implements the CPython wrapper of the new NumPy iterator. * * Copyright (c) 2010 by Mark Wiebe (mwwiebe@gmail.com) - * The Univerity of British Columbia + * The University of British Columbia * * See LICENSE.txt for the license. */ diff --git a/numpy/core/src/multiarray/nditer_templ.c.src b/numpy/core/src/multiarray/nditer_templ.c.src index 8976b132e75d..0f0d59972305 100644 --- a/numpy/core/src/multiarray/nditer_templ.c.src +++ b/numpy/core/src/multiarray/nditer_templ.c.src @@ -3,7 +3,7 @@ * are specialized using the templating system. * * Copyright (c) 2010-2011 by Mark Wiebe (mwwiebe@gmail.com) - * The Univerity of British Columbia + * The University of British Columbia * * See LICENSE.txt for the license. */ diff --git a/numpy/core/src/multiarray/usertypes.c b/numpy/core/src/multiarray/usertypes.c index f69abcc6b0e9..c32a710de5f2 100644 --- a/numpy/core/src/multiarray/usertypes.c +++ b/numpy/core/src/multiarray/usertypes.c @@ -11,7 +11,7 @@ by Travis Oliphant, oliphant@ee.byu.edu - Brigham Young Univeristy + Brigham Young University maintainer email: oliphant.travis@ieee.org diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 63ed4f492d2d..9e8c3c9851c6 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -582,7 +582,7 @@ _is_same_name(const char* s1, const char* s2) /* * Sets core_num_dim_ix, core_num_dims, core_dim_ixs, core_offsets, * and core_signature in PyUFuncObject "ufunc". Returns 0 unless an - * error occured. + * error occurred. */ static int _parse_signature(PyUFuncObject *ufunc, const char *signature) diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py index 1f863a7db925..77fb75f10b54 100644 --- a/numpy/core/tests/test_einsum.py +++ b/numpy/core/tests/test_einsum.py @@ -581,7 +581,7 @@ def test_einsum_fixedstridebug(self): def test_einsum_fixed_collapsingbug(self): # Issue #5147. - # The bug only occured when output argument of einssum was used. + # The bug only occurred when output argument of einssum was used. x = np.random.normal(0, 1, (5, 5, 5, 5)) y1 = np.zeros((5, 5)) np.einsum('aabb->ab', x, out=y1) diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py index 9fa09cd51ce0..50f03feeb427 100644 --- a/numpy/distutils/exec_command.py +++ b/numpy/distutils/exec_command.py @@ -21,7 +21,7 @@ Requires: Python 2.x -Succesfully tested on: +Successfully tested on: ======== ============ ================================================= os.name sys.platform comments diff --git a/numpy/distutils/from_template.py b/numpy/distutils/from_template.py index d10b50218d2a..e38e4d60893e 100644 --- a/numpy/distutils/from_template.py +++ b/numpy/distutils/from_template.py @@ -11,7 +11,7 @@ All function and subroutine blocks in a source file with names that contain '<..>' will be replicated according to the rules in '<..>'. - The number of comma-separeted words in '<..>' will determine the number of + The number of comma-separated words in '<..>' will determine the number of replicates. '<..>' may have two different forms, named and short. For example, diff --git a/numpy/doc/byteswapping.py b/numpy/doc/byteswapping.py index 59c0498789a1..22eb71e6d563 100644 --- a/numpy/doc/byteswapping.py +++ b/numpy/doc/byteswapping.py @@ -108,7 +108,7 @@ >>> fixed_end_dtype_arr[0] 1 -Note the the array has not changed in memory: +Note the array has not changed in memory: >>> fixed_end_dtype_arr.tobytes() == big_end_str True diff --git a/numpy/doc/internals.py b/numpy/doc/internals.py index 6bd6b1ae9474..c25872bc02ea 100644 --- a/numpy/doc/internals.py +++ b/numpy/doc/internals.py @@ -49,7 +49,7 @@ use of the .copy() method if one really wants to make a new and independent copy of the data buffer. -New views into arrays mean the the object reference counts for the data buffer +New views into arrays mean the object reference counts for the data buffer increase. Simply doing away with the original array object will not remove the data buffer if other views of it still exist. diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py index 5270cabb5be5..441629faa0c9 100644 --- a/numpy/f2py/capi_maps.py +++ b/numpy/f2py/capi_maps.py @@ -211,7 +211,7 @@ else: errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" % ( k, k1, d[k][k1], d[k][k1], list(c2py_map.keys()))) - outmess('Succesfully applied user defined changes from .f2py_f2cmap\n') + outmess('Successfully applied user defined changes from .f2py_f2cmap\n') except Exception as msg: errmess( 'Failed to apply user defined changes from .f2py_f2cmap: %s. Skipping.\n' % (msg)) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 9f8c8962a2f3..a51eb5d38a58 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -635,7 +635,7 @@ def crackline(line, reset=0): """ reset=-1 --- initialize reset=0 --- crack the line - reset=1 --- final check if mismatch of blocks occured + reset=1 --- final check if mismatch of blocks occurred Cracked data is saved in grouplist[0]. """ diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 2ea8351a236f..37cc76ec2f3c 100644 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -15,14 +15,14 @@ get_b_from_python if (successful) { - callfortran - if (succesful) { + call_fortran + if (successful) { put_a_to_python - if (succesful) { + if (successful) { put_b_to_python - if (succesful) { + if (successful) { buildvalue = ... diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 844c069c0bfc..a1048002cbb4 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1022,7 +1022,7 @@ def select(condlist, choicelist, default=0): dtype = np.result_type(*choicelist) # Convert conditions to arrays and broadcast conditions and choices - # as the shape is needed for the result. Doing it seperatly optimizes + # as the shape is needed for the result. Doing it separately optimizes # for example when all choices are scalars. condlist = np.broadcast_arrays(*condlist) choicelist = np.broadcast_arrays(*choicelist) @@ -1244,7 +1244,7 @@ def gradient(f, *varargs, **kwargs): # Convert datetime64 data into ints. Make dummy variable `y` # that is a view of ints if the data is datetime64, otherwise - # just set y equal to the the array `f`. + # just set y equal to the array `f`. if f.dtype.char in ["M", "m"]: y = f.view('int64') else: diff --git a/numpy/linalg/lapack_lite/dlapack_lite.c b/numpy/linalg/lapack_lite/dlapack_lite.c index 6b65397bd089..9f864d7ceccd 100644 --- a/numpy/linalg/lapack_lite/dlapack_lite.c +++ b/numpy/linalg/lapack_lite/dlapack_lite.c @@ -22678,7 +22678,7 @@ doublereal clanhs_(char *norm, integer *n, complex *a, integer *lda, real * =============== The algorithm used in this program is basically backward (forward) - substitution, with scaling to make the the code robust against + substitution, with scaling to make the code robust against possible overflow. Each eigenvector is normalized so that the element of largest @@ -61663,7 +61663,7 @@ doublereal dlapy3_(doublereal *x, doublereal *y, doublereal *z__) =============== The algorithm used in this program is basically backward (forward) - substitution, with scaling to make the the code robust against + substitution, with scaling to make the code robust against possible overflow. Each eigenvector is normalized so that the element of largest @@ -99417,7 +99417,7 @@ doublereal slapy3_(real *x, real *y, real *z__) =============== The algorithm used in this program is basically backward (forward) - substitution, with scaling to make the the code robust against + substitution, with scaling to make the code robust against possible overflow. Each eigenvector is normalized so that the element of largest diff --git a/numpy/linalg/lapack_lite/zlapack_lite.c b/numpy/linalg/lapack_lite/zlapack_lite.c index e6b03429b77a..9ce05ec1fd3c 100644 --- a/numpy/linalg/lapack_lite/zlapack_lite.c +++ b/numpy/linalg/lapack_lite/zlapack_lite.c @@ -22760,7 +22760,7 @@ doublereal zlanhs_(char *norm, integer *n, doublecomplex *a, integer *lda, =============== The algorithm used in this program is basically backward (forward) - substitution, with scaling to make the the code robust against + substitution, with scaling to make the code robust against possible overflow. Each eigenvector is normalized so that the element of largest diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 690655b361b8..2b4ef98e7b9b 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -5583,7 +5583,7 @@ def max(self, axis=None, out=None, fill_value=None): def ptp(self, axis=None, out=None, fill_value=None): """ - Return (maximum - minimum) along the the given dimension + Return (maximum - minimum) along the given dimension (i.e. peak-to-peak value). Parameters From dc61b5eb3e2c1f0ee61a48a3fa9f7a290f5d1c84 Mon Sep 17 00:00:00 2001 From: Yaroslav Halchenko Date: Thu, 28 Jan 2016 09:46:25 -0500 Subject: [PATCH 450/496] BF: Since savez could be used with BytesIO - check if file is a string --- numpy/lib/npyio.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py index a800537504b5..ebf43f3e4ac0 100644 --- a/numpy/lib/npyio.py +++ b/numpy/lib/npyio.py @@ -630,8 +630,8 @@ def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): # Since target file might be big enough to exceed capacity of a global # temporary directory, create temp file side-by-side with the target file. - file_path, file_name = os.path.split(file) - fd, tmpfile = tempfile.mkstemp(prefix=file_name, dir=file_path, suffix='-numpy.npy') + file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp') + fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy') os.close(fd) try: for key, val in namedict.items(): From 38fdd765536d868101167474d87e307167e49fb9 Mon Sep 17 00:00:00 2001 From: Graham Markall Date: Fri, 29 Jan 2016 11:53:30 +0000 Subject: [PATCH 451/496] BUG: Unpickled void scalars should be contiguous Void scalars are both C- and Fortran-contiguous, so pickling and unpickling them should result in a new void scalar that also has these flags set. Fixes #7140. --- numpy/core/src/multiarray/scalarapi.c | 2 +- numpy/core/tests/test_records.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/numpy/core/src/multiarray/scalarapi.c b/numpy/core/src/multiarray/scalarapi.c index 71a82d7a0c7f..85824f2ce64f 100644 --- a/numpy/core/src/multiarray/scalarapi.c +++ b/numpy/core/src/multiarray/scalarapi.c @@ -799,7 +799,7 @@ PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base) Py_INCREF(descr); vobj->obval = NULL; Py_SIZE(vobj) = itemsize; - vobj->flags = NPY_ARRAY_BEHAVED | NPY_ARRAY_OWNDATA; + vobj->flags = NPY_ARRAY_CARRAY | NPY_ARRAY_F_CONTIGUOUS | NPY_ARRAY_OWNDATA; swap = 0; if (PyDataType_HASFIELDS(descr)) { if (base) { diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py index 9fbdf51d60aa..e46cf90e1821 100644 --- a/numpy/core/tests/test_records.py +++ b/numpy/core/tests/test_records.py @@ -299,6 +299,13 @@ def test_pickle_2(self): assert_equal(a, pickle.loads(pickle.dumps(a))) assert_equal(a[0], pickle.loads(pickle.dumps(a[0]))) + def test_pickle_3(self): + # Issue #7140 + a = self.data + pa = pickle.loads(pickle.dumps(a[0])) + assert_(pa.flags.c_contiguous) + assert_(pa.flags.f_contiguous) + def test_objview_record(self): # https://github.com/numpy/numpy/issues/2599 dt = np.dtype([('foo', 'i8'), ('bar', 'O')]) From 7c03f21a133aae86f661c1dc57d71fa06eca9979 Mon Sep 17 00:00:00 2001 From: Graham Markall Date: Fri, 29 Jan 2016 15:55:09 +0000 Subject: [PATCH 452/496] TST: Unpickled void scalars must be behaved Unpickled void scalars must be both aligned and writeable. --- numpy/core/tests/test_records.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py index e46cf90e1821..2c85546a762f 100644 --- a/numpy/core/tests/test_records.py +++ b/numpy/core/tests/test_records.py @@ -305,6 +305,8 @@ def test_pickle_3(self): pa = pickle.loads(pickle.dumps(a[0])) assert_(pa.flags.c_contiguous) assert_(pa.flags.f_contiguous) + assert_(pa.flags.writeable) + assert_(pa.flags.aligned) def test_objview_record(self): # https://github.com/numpy/numpy/issues/2599 From 99977a530076433b3f8399453228518e526fd67c Mon Sep 17 00:00:00 2001 From: Dongjoon Hyun Date: Fri, 29 Jan 2016 13:22:42 -0800 Subject: [PATCH 453/496] MAINT: Change `call_fortran` into `callfortran` in comments. It was committed mistakenly in #7134. `callfortran` is used a lot really. --- numpy/f2py/rules.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 37cc76ec2f3c..6a1f5ae6e5ec 100644 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -15,7 +15,7 @@ get_b_from_python if (successful) { - call_fortran + callfortran if (successful) { put_a_to_python From 0e65b7166a6265a2047cb3ca47f487f3de19f0a6 Mon Sep 17 00:00:00 2001 From: Aditya Panchal Date: Fri, 29 Jan 2016 18:44:59 -0600 Subject: [PATCH 454/496] BUG: Fixed regressions in np.piecewise in ref to #5737 and #5729. Added unit tests for these conditions. --- numpy/lib/function_base.py | 12 ++++++++---- numpy/lib/tests/test_function_base.py | 11 +++++++++++ 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index a1048002cbb4..6eff945b0f48 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -944,11 +944,15 @@ def piecewise(x, condlist, funclist, *args, **kw): condlist = condlist.T if n == n2 - 1: # compute the "otherwise" condition. totlist = np.logical_or.reduce(condlist, axis=0) - condlist = np.vstack([condlist, ~totlist]) + try: + condlist = np.vstack([condlist, ~totlist]) + except: + condlist = [asarray(c, dtype=bool) for c in condlist] + totlist = condlist[0] + for k in range(1, n): + totlist |= condlist[k] + condlist.append(~totlist) n += 1 - if (n != n2): - raise ValueError( - "function list and condition list must be the same") y = zeros(x.shape, x.dtype) for k in range(n): diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index d6a838f3a9a5..878d00bdf9e9 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1862,6 +1862,10 @@ def test_two_conditions(self): x = piecewise([1, 2], [[True, False], [False, True]], [3, 4]) assert_array_equal(x, [3, 4]) + def test_scalar_domains_three_conditions(self): + x = piecewise(3, [True, False, False], [4, 2, 0]) + assert_equal(x, 4) + def test_default(self): # No value specified for x[1], should be 0 x = piecewise([1, 2], [True, False], [2]) @@ -1886,6 +1890,13 @@ def test_0d_comparison(self): x = 3 piecewise(x, [x <= 3, x > 3], [4, 0]) # Should succeed. + def test_multidimensional_extrafunc(self): + x = np.array([[-2.5, -1.5, -0.5], + [0.5, 1.5, 2.5]]) + y = piecewise(x, [x < 0, x >= 2], [-1, 1, 3]) + assert_array_equal(y, np.array([[-1., -1., -1.], + [3., 3., 1.]])) + class TestBincount(TestCase): From a9e8c7f05d63f9aa1fa47d7eff92c4303f41de59 Mon Sep 17 00:00:00 2001 From: "Nathaniel J. Smith" Date: Fri, 29 Jan 2016 18:59:12 -0800 Subject: [PATCH 455/496] TST: remove duplicate test There are two identical copies of test_dot_override in test_multiarray.py. This seems surplus to requirements. --- numpy/core/tests/test_multiarray.py | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index f432aa975885..5aed34bc8c59 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -4615,24 +4615,6 @@ def test_dot_scalar_and_matrix_of_objects(self): assert_equal(np.dot(arr, 3), desired) assert_equal(np.dot(3, arr), desired) - def test_dot_override(self): - class A(object): - def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): - return "A" - - class B(object): - def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): - return NotImplemented - - a = A() - b = B() - c = np.array([[1]]) - - assert_equal(np.dot(a, b), "A") - assert_equal(c.dot(a), "A") - assert_raises(TypeError, np.dot, b, c) - assert_raises(TypeError, c.dot, b) - def test_accelerate_framework_sgemv_fix(self): def aligned_array(shape, align, dtype, order='C'): From bac094caf14e420a801cf952080aa443a3865d97 Mon Sep 17 00:00:00 2001 From: "Nathaniel J. Smith" Date: Fri, 29 Jan 2016 19:00:08 -0800 Subject: [PATCH 456/496] Temporarily disable __numpy_ufunc__ Given that we accidentally released 1.11b2 with `__numpy_ufunc__` still enabled, we should probably just disable it in master for now. When ready to re-enable, grep for NUMPY_UFUNC_DISABLED. Or just revert this commit. --- numpy/core/src/private/ufunc_override.h | 6 ++++++ numpy/core/tests/test_multiarray.py | 20 ++++++++++++++++++++ numpy/core/tests/test_umath.py | 25 +++++++++++++++++++++++++ 3 files changed, 51 insertions(+) diff --git a/numpy/core/src/private/ufunc_override.h b/numpy/core/src/private/ufunc_override.h index 4042eae2fde2..59a90c770542 100644 --- a/numpy/core/src/private/ufunc_override.h +++ b/numpy/core/src/private/ufunc_override.h @@ -198,6 +198,12 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, /* Pos of each override in args */ int with_override_pos[NPY_MAXARGS]; + /* 2016-01-29: Disable for now in master -- can re-enable once details are + * sorted out. All commented bits are tagged NUMPY_UFUNC_DISABLED. -njs + */ + result = NULL; + return 0; + /* * Check inputs */ diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 5aed34bc8c59..3498b8a51079 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -2136,6 +2136,9 @@ def test_dot(self): assert_equal(c, np.dot(a, b)) def test_dot_override(self): + # 2016-01-29: NUMPY_UFUNC_DISABLED + return + class A(object): def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): return "A" @@ -2543,6 +2546,9 @@ def test_extension_incref_elide_stack(self): assert_array_equal(res, l[4] + l[4]) def test_ufunc_override_rop_precedence(self): + # 2016-01-29: NUMPY_UFUNC_DISABLED + return + # Check that __rmul__ and other right-hand operations have # precedence over __numpy_ufunc__ @@ -2661,6 +2667,9 @@ def __rop__(self, *other): yield check, op_name, False def test_ufunc_override_rop_simple(self): + # 2016-01-29: NUMPY_UFUNC_DISABLED + return + # Check parts of the binary op overriding behavior in an # explicit test case that is easier to understand. class SomeClass(object): @@ -2765,6 +2774,9 @@ def __rsub__(self, other): assert_(isinstance(res, SomeClass3)) def test_ufunc_override_normalize_signature(self): + # 2016-01-29: NUMPY_UFUNC_DISABLED + return + # gh-5674 class SomeClass(object): def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw): @@ -2781,6 +2793,9 @@ def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw): assert_equal(kw['signature'], 'ii->i') def test_numpy_ufunc_index(self): + # 2016-01-29: NUMPY_UFUNC_DISABLED + return + # Check that index is set appropriately, also if only an output # is passed on (latter is another regression tests for github bug 4753) class CheckIndex(object): @@ -2818,6 +2833,9 @@ def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw): assert_equal(np.add(a, dummy, out=a), 0) def test_out_override(self): + # 2016-01-29: NUMPY_UFUNC_DISABLED + return + # regression test for github bug 4753 class OutClass(np.ndarray): def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw): @@ -4875,6 +4893,8 @@ def test_matrix_matrix_values(self): assert_equal(res, tgt12_21) def test_numpy_ufunc_override(self): + # 2016-01-29: NUMPY_UFUNC_DISABLED + return class A(np.ndarray): def __new__(cls, *args, **kwargs): diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 2ba988b87880..917e05e6a3c4 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -1215,7 +1215,24 @@ def __array__(self): assert_equal(ncu.maximum(a, B()), 0) assert_equal(ncu.maximum(a, C()), 0) + def test_ufunc_override_disabled(self): + # 2016-01-29: NUMPY_UFUNC_DISABLED + # This test should be removed when __numpy_ufunc__ is re-enabled. + + class MyArray(object): + def __numpy_ufunc__(self, *args, **kwargs): + self._numpy_ufunc_called = True + + my_array = MyArray() + real_array = np.ones(10) + assert_raises(TypeError, lambda: real_array + my_array) + assert_raises(TypeError, np.add, real_array, my_array) + assert not hasattr(my_array, "_numpy_ufunc_called") + + def test_ufunc_override(self): + # 2016-01-29: NUMPY_UFUNC_DISABLED + return class A(object): def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs): @@ -1241,6 +1258,8 @@ def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs): assert_equal(res1[5], {}) def test_ufunc_override_mro(self): + # 2016-01-29: NUMPY_UFUNC_DISABLED + return # Some multi arg functions for testing. def tres_mul(a, b, c): @@ -1332,6 +1351,8 @@ def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs): assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c) def test_ufunc_override_methods(self): + # 2016-01-29: NUMPY_UFUNC_DISABLED + return class A(object): def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): @@ -1436,6 +1457,8 @@ def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): assert_equal(res[4], (a, [4, 2], 'b0')) def test_ufunc_override_out(self): + # 2016-01-29: NUMPY_UFUNC_DISABLED + return class A(object): def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): @@ -1470,6 +1493,8 @@ def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs): assert_equal(res7['out'][1], 'out1') def test_ufunc_override_exception(self): + # 2016-01-29: NUMPY_UFUNC_DISABLED + return class A(object): def __numpy_ufunc__(self, *a, **kwargs): From 94b361b024faada5e74e2a4063d2837a22cd1eeb Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sat, 30 Jan 2016 13:21:39 +0100 Subject: [PATCH 457/496] TST: Add missing suffix to temppath manager Without the suffix, np.save creates a new file and the file does not get cleaned up. --- numpy/lib/tests/test_io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 32e0c32ded03..226dc88faa36 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -194,7 +194,7 @@ def roundtrip(self, *args, **kwargs): def test_big_arrays(self): L = (1 << 31) + 100000 a = np.empty(L, dtype=np.uint8) - with temppath(prefix="numpy_test_big_arrays_") as tmp: + with temppath(prefix="numpy_test_big_arrays_", suffix=".npz") as tmp: np.savez(tmp, a=a) del a npfile = np.load(tmp) From 32ae04a43ccd7a3413deb1711796280f8fa69ba0 Mon Sep 17 00:00:00 2001 From: bertrand Date: Sat, 30 Jan 2016 21:00:55 -0500 Subject: [PATCH 458/496] BUG: mode kwargs passed as unicode to np.pad raises an exception isinstance(mode, str) is False in python2.7 when mode is of unicode type, and mode is then mistakenly assumed to be a callable. See #7112 --- numpy/lib/arraypad.py | 2 +- numpy/lib/tests/test_arraypad.py | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py index dad1f47649d0..c30ef6bf5828 100644 --- a/numpy/lib/arraypad.py +++ b/numpy/lib/arraypad.py @@ -1337,7 +1337,7 @@ def pad(array, pad_width, mode, **kwargs): 'reflect_type': 'even', } - if isinstance(mode, str): + if isinstance(mode, np.compat.basestring): # Make sure have allowed kwargs appropriate for mode for key in kwargs: if key not in allowedkwargs[mode]: diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py index 30ea35d55bc1..f19a0b13abad 100644 --- a/numpy/lib/tests/test_arraypad.py +++ b/numpy/lib/tests/test_arraypad.py @@ -953,6 +953,17 @@ def test_check_simple(self): assert_array_equal(a, b) +class TestUnicodeInput(TestCase): + def test_unicode_mode(self): + try: + constant_mode = unicode('constant') + except NameError: + constant_mode = 'constant' + a = np.pad([1], 2, mode=constant_mode) + b = np.array([0, 0, 1, 0, 0]) + assert_array_equal(a, b) + + class ValueError1(TestCase): def test_check_simple(self): arr = np.arange(30) From 0aa03bef711e57220ad5286f68363e6aca7cdfad Mon Sep 17 00:00:00 2001 From: Marten van Kerkwijk Date: Sat, 30 Jan 2016 15:56:11 -0500 Subject: [PATCH 459/496] Reascertain that linspace respects ndarray subclasses in start, stop. --- numpy/core/function_base.py | 11 ++++++++--- numpy/core/tests/test_function_base.py | 15 ++++++++++++++- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py index c82c9bb6b571..21ca1af01a3a 100644 --- a/numpy/core/function_base.py +++ b/numpy/core/function_base.py @@ -96,18 +96,23 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None): y = _nx.arange(0, num, dtype=dt) + delta = stop - start if num > 1: - delta = stop - start step = delta / div if step == 0: # Special handling for denormal numbers, gh-5437 y /= div - y *= delta + y = y * delta else: - y *= step + # One might be tempted to use faster, in-place multiplication here, + # but this prevents step from overriding what class is produced, + # and thus prevents, e.g., use of Quantities; see gh-7142. + y = y * step else: # 0 and 1 item long sequences have an undefined step step = NaN + # Multiply with delta to allow possible override of output class. + y = y * delta y += start diff --git a/numpy/core/tests/test_function_base.py b/numpy/core/tests/test_function_base.py index 2df7ba3ead4d..6b5430611a5d 100644 --- a/numpy/core/tests/test_function_base.py +++ b/numpy/core/tests/test_function_base.py @@ -1,7 +1,7 @@ from __future__ import division, absolute_import, print_function from numpy import (logspace, linspace, dtype, array, finfo, typecodes, arange, - isnan) + isnan, ndarray) from numpy.testing import ( TestCase, run_module_suite, assert_, assert_equal, assert_raises, assert_array_equal @@ -115,6 +115,19 @@ def __rdiv__(self, x): b = PhysicalQuantity(1.0) assert_equal(linspace(a, b), linspace(0.0, 1.0)) + def test_subclass(self): + class PhysicalQuantity2(ndarray): + __array_priority__ = 10 + + a = array(0).view(PhysicalQuantity2) + b = array(1).view(PhysicalQuantity2) + ls = linspace(a, b) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, linspace(0.0, 1.0)) + ls = linspace(a, b, 1) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, linspace(0.0, 1.0, 1)) + def test_denormal_numbers(self): # Regression test for gh-5437. Will probably fail when compiled # with ICC, which flushes denormals to zero From 46cb649aecc2d5e1a256f78fb3f7879666a7c879 Mon Sep 17 00:00:00 2001 From: Aditya Panchal Date: Sun, 31 Jan 2016 16:22:02 -0600 Subject: [PATCH 460/496] MAINT: Addressed comments in PR #7145 --- numpy/lib/function_base.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 6eff945b0f48..185e7ddc90d0 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -944,9 +944,10 @@ def piecewise(x, condlist, funclist, *args, **kw): condlist = condlist.T if n == n2 - 1: # compute the "otherwise" condition. totlist = np.logical_or.reduce(condlist, axis=0) - try: + # Only able to stack vertically if the array is 1d or less + if x.ndim <= 1: condlist = np.vstack([condlist, ~totlist]) - except: + else: condlist = [asarray(c, dtype=bool) for c in condlist] totlist = condlist[0] for k in range(1, n): From 9ec694b69a231a8de43032711c657d253edbed9d Mon Sep 17 00:00:00 2001 From: Joseph Fox-Rabinovitz Date: Wed, 27 Jan 2016 10:21:46 -0500 Subject: [PATCH 461/496] BUG: Fixed 'midpoint' interpolation of np.percentile in odd cases. 'midpoint' must return the same as 'higher' and 'lower' when the two are the same, not 'lower' + 0.5 as it was doing. --- doc/release/1.12.0-notes.rst | 7 ++++++- numpy/lib/function_base.py | 2 +- numpy/lib/tests/test_function_base.py | 6 +++++- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/doc/release/1.12.0-notes.rst b/doc/release/1.12.0-notes.rst index ee4e2d24a2cd..d6089c3118ec 100644 --- a/doc/release/1.12.0-notes.rst +++ b/doc/release/1.12.0-notes.rst @@ -32,10 +32,15 @@ default order for arrays that are now both. ``MaskedArray`` takes view of data **and** mask when slicing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - XXX +``np.percentile`` 'midpoint' interpolation method fixed for exact indices +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +'midpoint' interpolator now gives the same result as 'lower' and 'higher' when +the two coincide. Previous behavior of 'lower' + 0.5 is fixed. + + DeprecationWarning to error ~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 844c069c0bfc..fbe41442b406 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -3541,7 +3541,7 @@ def _percentile(a, q, axis=None, out=None, elif interpolation == 'higher': indices = ceil(indices).astype(intp) elif interpolation == 'midpoint': - indices = floor(indices) + 0.5 + indices = 0.5 * (floor(indices) + ceil(indices)) elif interpolation == 'nearest': indices = around(indices).astype(intp) elif interpolation == 'linear': diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index d6a838f3a9a5..56466c1ea587 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2055,7 +2055,7 @@ def compare_results(res, desired): assert_array_equal(res[i], desired[i]) -class TestScoreatpercentile(TestCase): +class TestPercentile(TestCase): def test_basic(self): x = np.arange(8) * 0.5 @@ -2104,6 +2104,10 @@ def test_lower_higher(self): def test_midpoint(self): assert_equal(np.percentile(range(10), 51, interpolation='midpoint'), 4.5) + assert_equal(np.percentile(range(11), 51, + interpolation='midpoint'), 5.5) + assert_equal(np.percentile(range(11), 50, + interpolation='midpoint'), 5) def test_nearest(self): assert_equal(np.percentile(range(10), 51, From 849b81804fb7a11dc80821dbd166562225c8450f Mon Sep 17 00:00:00 2001 From: I--P Date: Mon, 9 Nov 2015 10:58:32 +0100 Subject: [PATCH 462/496] ENH: usecols now accepts an int when only one column has to be read --- doc/release/1.12.0-notes.rst | 5 +++++ numpy/lib/npyio.py | 35 ++++++++++++++++++++++++++++----- numpy/lib/tests/test_io.py | 38 ++++++++++++++++++++++++++++++++++++ 3 files changed, 73 insertions(+), 5 deletions(-) diff --git a/doc/release/1.12.0-notes.rst b/doc/release/1.12.0-notes.rst index d6089c3118ec..d093e0542c86 100644 --- a/doc/release/1.12.0-notes.rst +++ b/doc/release/1.12.0-notes.rst @@ -61,6 +61,11 @@ New Features Improvements ============ +*np.loadtxt* now supports a single integer as ``usecol`` argument +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Instead of using ``usecol=(n,)`` to read the nth column of a file +it is now allowed to use ``usecol=n``. Also the error message is +more user friendly when a non-integer is passed as a column index. Changes diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py index 640f4fa32d04..4db542b55bf7 100644 --- a/numpy/lib/npyio.py +++ b/numpy/lib/npyio.py @@ -6,7 +6,7 @@ import itertools import warnings import weakref -from operator import itemgetter +from operator import itemgetter, index as opindex import numpy as np from . import format @@ -714,10 +714,18 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, ``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None. skiprows : int, optional Skip the first `skiprows` lines; default: 0. - usecols : sequence, optional - Which columns to read, with 0 being the first. For example, - ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. + + usecols : int or sequence, optional + Which columns to read, with 0 being the first. For example, + usecols = (1,4,5) will extract the 2nd, 5th and 6th columns. The default, None, results in all columns being read. + + .. versionadded:: 1.11.0 + + Also when a single column has to be read it is possible to use + an integer instead of a tuple. E.g ``usecols = 3`` reads the + third column the same way as `usecols = (3,)`` would. + unpack : bool, optional If True, the returned array is transposed, so that arguments may be unpacked using ``x, y, z = loadtxt(...)``. When used with a structured @@ -786,8 +794,25 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, user_converters = converters if delimiter is not None: delimiter = asbytes(delimiter) + if usecols is not None: - usecols = list(usecols) + # Allow usecols to be a single int or a sequence of ints + try: + usecols_as_list = list(usecols) + except TypeError: + usecols_as_list = [usecols] + for col_idx in usecols_as_list: + try: + opindex(col_idx) + except TypeError as e: + e.args = ( + "usecols must be an int or a sequence of ints but " + "it contains at least one element of type %s" % + type(col_idx), + ) + raise + # Fall back to existing code + usecols = usecols_as_list fown = False try: diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 226dc88faa36..c0f8c1953829 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -608,6 +608,29 @@ def test_usecols(self): x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2])) assert_array_equal(x, a[:, 1:]) + # Testing with an integer instead of a sequence + for int_type in [int, np.int8, np.int16, + np.int32, np.int64, np.uint8, np.uint16, + np.uint32, np.uint64]: + to_read = int_type(1) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=to_read) + assert_array_equal(x, a[:, 1]) + + # Testing with some crazy custom integer type + class CrazyInt(object): + def __index__(self): + return 1 + + crazy_int = CrazyInt() + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=crazy_int) + assert_array_equal(x, a[:, 1]) + + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(crazy_int,)) + assert_array_equal(x, a[:, 1]) + # Checking with dtypes defined converters. data = '''JOE 70.1 25.3 BOB 60.5 27.9 @@ -619,6 +642,21 @@ def test_usecols(self): assert_equal(arr['stid'], [b"JOE", b"BOB"]) assert_equal(arr['temp'], [25.3, 27.9]) + # Testing non-ints in usecols + c.seek(0) + bogus_idx = 1.5 + assert_raises_regex( + TypeError, + '^usecols must be.*%s' % type(bogus_idx), + np.loadtxt, c, usecols=bogus_idx + ) + + assert_raises_regex( + TypeError, + '^usecols must be.*%s' % type(bogus_idx), + np.loadtxt, c, usecols=[0, bogus_idx, 0] + ) + def test_fancy_dtype(self): c = TextIO() c.write('1,2,3.0\n4,5,6.0\n') From 857c3a8ec160a618a54edd9407f77fbfc2a07678 Mon Sep 17 00:00:00 2001 From: Nathaniel Beaver Date: Thu, 10 Sep 2015 03:47:44 -0500 Subject: [PATCH 463/496] BUG: check lower limit of base in base_repr. --- numpy/core/numeric.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index 0b728f8043f2..756c426f7390 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -2232,6 +2232,8 @@ def base_repr(number, base=2, padding=0): digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' if base > len(digits): raise ValueError("Bases greater than 36 not handled in base_repr.") + elif base < 2: + raise ValueError("Bases less than 2 not handled in base_repr.") num = abs(number) res = [] From da10a1cd5eff56af9b83585bf93bbccc21fe2a1d Mon Sep 17 00:00:00 2001 From: Nathaniel Beaver Date: Mon, 1 Feb 2016 12:24:46 -0600 Subject: [PATCH 464/496] DOC: base_repr *can* handle negative integers. --- numpy/core/numeric.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index 756c426f7390..a672fdc530f2 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -2198,7 +2198,7 @@ def base_repr(number, base=2, padding=0): Parameters ---------- number : int - The value to convert. Only positive values are handled. + The value to convert. Positive and negative values are handled. base : int, optional Convert `number` to the `base` number system. The valid range is 2-36, the default value is 2. From 7fa6aeaa145d01dd63037d51643218784071805c Mon Sep 17 00:00:00 2001 From: Nathaniel Beaver Date: Mon, 1 Feb 2016 12:52:53 -0600 Subject: [PATCH 465/496] TST: out of bounds bases in base_repr --- numpy/core/tests/test_numeric.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index 34be84135a9f..7309cf2249c0 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -1044,6 +1044,12 @@ def test_negative(self): assert_equal(np.base_repr(-12, 10, 4), '-000012') assert_equal(np.base_repr(-12, 4), '-30') + def test_base_range(self): + with self.assertRaises(ValueError): + np.base_repr(1, 1) + with self.assertRaises(ValueError): + np.base_repr(1, 37) + class TestArrayComparisons(TestCase): def test_array_equal(self): From f6a1c68ceb0c4a7494d0ce7b4fb1aed8303f70f1 Mon Sep 17 00:00:00 2001 From: Arne de Laat Date: Tue, 2 Feb 2016 10:23:25 +0100 Subject: [PATCH 466/496] Update Wikipedia references for mtrand.pyx Specifically the 'Logarithmic distribution' link is fixed. For others the links or article names are updated. --- numpy/random/mtrand/mtrand.pyx | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index a419e51a864e..e5998c001224 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -1991,9 +1991,9 @@ cdef class RandomState: ---------- .. [1] Peyton Z. Peebles Jr., "Probability, Random Variables and Random Signal Principles", 4th ed, 2001, p. 57. - .. [2] "Poisson Process", Wikipedia, + .. [2] Wikipedia, "Poisson process", http://en.wikipedia.org/wiki/Poisson_process - .. [3] "Exponential Distribution, Wikipedia, + .. [3] Wikipedia, "Exponential distribution", http://en.wikipedia.org/wiki/Exponential_distribution """ @@ -2093,8 +2093,8 @@ cdef class RandomState: .. [1] Weisstein, Eric W. "Gamma Distribution." From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/GammaDistribution.html - .. [2] Wikipedia, "Gamma-distribution", - http://en.wikipedia.org/wiki/Gamma-distribution + .. [2] Wikipedia, "Gamma distribution", + http://en.wikipedia.org/wiki/Gamma_distribution Examples -------- @@ -2184,8 +2184,8 @@ cdef class RandomState: .. [1] Weisstein, Eric W. "Gamma Distribution." From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/GammaDistribution.html - .. [2] Wikipedia, "Gamma-distribution", - http://en.wikipedia.org/wiki/Gamma-distribution + .. [2] Wikipedia, "Gamma distribution", + http://en.wikipedia.org/wiki/Gamma_distribution Examples -------- @@ -2381,7 +2381,7 @@ cdef class RandomState: .. [1] Weisstein, Eric W. "Noncentral F-Distribution." From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/NoncentralF-Distribution.html - .. [2] Wikipedia, "Noncentral F distribution", + .. [2] Wikipedia, "Noncentral F-distribution", http://en.wikipedia.org/wiki/Noncentral_F-distribution Examples @@ -3273,7 +3273,7 @@ cdef class RandomState: .. [3] Weisstein, Eric W. "Laplace Distribution." From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/LaplaceDistribution.html - .. [4] Wikipedia, "Laplace Distribution", + .. [4] Wikipedia, "Laplace distribution", http://en.wikipedia.org/wiki/Laplace_distribution Examples @@ -3994,7 +3994,7 @@ cdef class RandomState: .. [4] Weisstein, Eric W. "Binomial Distribution." From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/BinomialDistribution.html - .. [5] Wikipedia, "Binomial-distribution", + .. [5] Wikipedia, "Binomial distribution", http://en.wikipedia.org/wiki/Binomial_distribution Examples @@ -4459,7 +4459,7 @@ cdef class RandomState: .. [2] Weisstein, Eric W. "Hypergeometric Distribution." From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/HypergeometricDistribution.html - .. [3] Wikipedia, "Hypergeometric-distribution", + .. [3] Wikipedia, "Hypergeometric distribution", http://en.wikipedia.org/wiki/Hypergeometric_distribution Examples @@ -4569,8 +4569,8 @@ cdef class RandomState: Journal of Animal Ecology, 12:42-58. .. [3] D. J. Hand, F. Daly, D. Lunn, E. Ostrowski, A Handbook of Small Data Sets, CRC Press, 1994. - .. [4] Wikipedia, "Logarithmic-distribution", - http://en.wikipedia.org/wiki/Logarithmic-distribution + .. [4] Wikipedia, "Logarithmic distribution", + http://en.wikipedia.org/wiki/Logarithmic_distribution Examples -------- From 69a2ca41e2316e0eaa11ac2b9a618926815dad6d Mon Sep 17 00:00:00 2001 From: gfyoung Date: Tue, 2 Feb 2016 19:34:38 +0000 Subject: [PATCH 467/496] TST: Fixed f2py test for Anaconda non-win32 When you run 'python -V' under Anaconda, it returns for example, 'Python 3.4.3 :: Continuum Analytics, Inc.' However, the original parsing of the version in 'test_f2py' assumed there was nothing following the version number, causing a ValueError because you can't assign three variables to four components that you get from splitting on the '.' --- numpy/tests/test_scripts.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py index 0fc7f879f191..1c108ddb1a36 100644 --- a/numpy/tests/test_scripts.py +++ b/numpy/tests/test_scripts.py @@ -74,14 +74,9 @@ def test_f2py(): success = stdout.strip() == asbytes('2') assert_(success, "Warning: f2py not found in path") else: - # unclear what f2py cmd was installed as, check plain (f2py), - # with major version (f2py3), or major/minor version (f2py3.4) - code, stdout, stderr = run_command([sys.executable, '-V']) - - # for some reason, 'python -V' returns version in 'stderr' for - # Python 2.x but in 'stdout' for Python 3.x - version = (stdout or stderr)[7:].strip() - major, minor, revision = version.decode('utf-8').split('.') + version = sys.version_info + major = str(version.major) + minor = str(version.minor) f2py_cmds = ('f2py', 'f2py' + major, 'f2py' + major + '.' + minor) success = False From 6b3d477bfd6404b7f5eea7b91a547df0c61c1239 Mon Sep 17 00:00:00 2001 From: gfyoung Date: Tue, 2 Feb 2016 22:26:15 +0000 Subject: [PATCH 468/496] DOC: Fix broken pandas link in 1.11.0 release notes [ci skip] --- doc/release/1.11.0-notes.rst | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index c9287ed3f18b..e95225a7c0d5 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -76,12 +76,13 @@ printing it would convert from or to local time:: >>>> np.datetime64('2000-01-01T00:00:00') numpy.datetime64('2000-01-01T00:00:00-0800') # note the timezone offset -08:00 + A consensus of datetime64 users agreed that this behavior is undesirable -and at odds with how datetime64 is usually used (e.g., by pandas_). For -most use cases, a timezone naive datetime type is preferred, similar to the -``datetime.datetime`` type in the Python standard library. Accordingly, -datetime64 no longer assumes that input is in local time, nor does it print -local times:: +and at odds with how datetime64 is usually used (e.g., by `pandas +`__). For most use cases, a timezone naive datetime +type is preferred, similar to the ``datetime.datetime`` type in the Python +standard library. Accordingly, datetime64 no longer assumes that input is in +local time, nor does it print local times:: >>>> np.datetime64('2000-01-01T00:00:00') numpy.datetime64('2000-01-01T00:00:00') @@ -99,14 +100,12 @@ As a corollary to this change, we no longer prohibit casting between datetimes with date units and datetimes with time units. With timezone naive datetimes, the rule for casting from dates to times is no longer ambiguous. -pandas_: http://pandas.pydata.org - ``linalg.norm`` return type changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The return type of the ``linalg.norm`` function is now floating point without exception. Some of the norm types previously returned integers. -and returns floating results.polynomial fit changes +polynomial fit changes ~~~~~~~~~~~~~~~~~~~~~~ The various fit functions in the numpy polynomial package no longer accept non-integers for degree specification. From 512a820c6e02dfecb757bc5b01397ff8168847dc Mon Sep 17 00:00:00 2001 From: auke Date: Fri, 5 Feb 2016 09:31:13 +0100 Subject: [PATCH 469/496] DOC: add vstack, hstack, dstack reference to stack documentation. --- numpy/core/shape_base.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py index 599b48d82b50..04a34ed8fa58 100644 --- a/numpy/core/shape_base.py +++ b/numpy/core/shape_base.py @@ -304,6 +304,11 @@ def stack(arrays, axis=0): See Also -------- concatenate : Join a sequence of arrays along an existing axis. + vstack : Stack arrays in sequence vertically, along the existing row axis. + hstack : Stack arrays in sequence horizontally, along the existing + column axis. + dstack : Stack arrays in sequence depth wise, along the existing + third axis. split : Split array into a list of multiple sub-arrays of equal size. Examples From 735182ca4a48ae9ade7b4514e31e50069a0c2b32 Mon Sep 17 00:00:00 2001 From: auke Date: Fri, 5 Feb 2016 14:48:54 +0100 Subject: [PATCH 470/496] DOC: changed 'depth wise' to 'depthwise', 'the existing' to 'an existing' --- numpy/core/shape_base.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py index 04a34ed8fa58..646ee0e47eac 100644 --- a/numpy/core/shape_base.py +++ b/numpy/core/shape_base.py @@ -199,7 +199,7 @@ def vstack(tup): -------- stack : Join a sequence of arrays along a new axis. hstack : Stack arrays in sequence horizontally (column wise). - dstack : Stack arrays in sequence depth wise (along third dimension). + dstack : Stack arrays in sequence depthwise (along third dimension). concatenate : Join a sequence of arrays along an existing axis. vsplit : Split array into a list of multiple sub-arrays vertically. @@ -250,7 +250,7 @@ def hstack(tup): -------- stack : Join a sequence of arrays along a new axis. vstack : Stack arrays in sequence vertically (row wise). - dstack : Stack arrays in sequence depth wise (along third axis). + dstack : Stack arrays in sequence depthwise (along third axis). concatenate : Join a sequence of arrays along an existing axis. hsplit : Split array along second axis. @@ -304,11 +304,9 @@ def stack(arrays, axis=0): See Also -------- concatenate : Join a sequence of arrays along an existing axis. - vstack : Stack arrays in sequence vertically, along the existing row axis. - hstack : Stack arrays in sequence horizontally, along the existing - column axis. - dstack : Stack arrays in sequence depth wise, along the existing - third axis. + vstack : Stack arrays vertically, along an existing row axis. + hstack : Stack arrays horizontally, along an existing column axis. + dstack : Stack arrays depthwise, along an existing third axis. split : Split array into a list of multiple sub-arrays of equal size. Examples From 8b81a642c82875ada61dce17dd7c3c59493943ed Mon Sep 17 00:00:00 2001 From: Joseph Fox-Rabinovitz Date: Fri, 5 Feb 2016 09:04:52 -0500 Subject: [PATCH 471/496] MAINT: Removed supurious assert in histogram estimators --- numpy/lib/function_base.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index a49d02a1ae05..088c4c2cddc2 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -85,9 +85,6 @@ def _hist_optim_numbins_estimator(a, estimator): will choose the appropriate estimator and return it's estimate for the optimal number of bins. """ - assert isinstance(estimator, basestring) - # private function should not be called otherwise - if a.size == 0: return 1 From b29a997b8a4a999577d9fd575bda20a1d6ecd3be Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Fri, 5 Feb 2016 11:36:14 -0500 Subject: [PATCH 472/496] STY: Drop some trailing spaces in `numpy.ma.core`. --- numpy/ma/core.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 2b4ef98e7b9b..24d41bcaa704 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -3146,11 +3146,11 @@ def __getitem__(self, indx): if self._fill_value is not None: dout._fill_value = self._fill_value[indx] - # If we're indexing a multidimensional field in a + # If we're indexing a multidimensional field in a # structured array (such as dtype("(2,)i2,(2,)i1")), # dimensionality goes up (M[field].ndim == M.ndim + - # len(M.dtype[field].shape)). That's fine for - # M[field] but problematic for M[field].fill_value + # len(M.dtype[field].shape)). That's fine for + # M[field] but problematic for M[field].fill_value # which should have shape () to avoid breaking several # methods. There is no great way out, so set to # first element. See issue #6723. From d7538c5d8aaaacd1da616e77e3005df8a15b94b4 Mon Sep 17 00:00:00 2001 From: Stephan Hoyer Date: Fri, 5 Feb 2016 11:20:20 -0800 Subject: [PATCH 473/496] Revert "DOC: add vstack, hstack, dstack reference to stack documentation." --- numpy/core/shape_base.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py index 646ee0e47eac..599b48d82b50 100644 --- a/numpy/core/shape_base.py +++ b/numpy/core/shape_base.py @@ -199,7 +199,7 @@ def vstack(tup): -------- stack : Join a sequence of arrays along a new axis. hstack : Stack arrays in sequence horizontally (column wise). - dstack : Stack arrays in sequence depthwise (along third dimension). + dstack : Stack arrays in sequence depth wise (along third dimension). concatenate : Join a sequence of arrays along an existing axis. vsplit : Split array into a list of multiple sub-arrays vertically. @@ -250,7 +250,7 @@ def hstack(tup): -------- stack : Join a sequence of arrays along a new axis. vstack : Stack arrays in sequence vertically (row wise). - dstack : Stack arrays in sequence depthwise (along third axis). + dstack : Stack arrays in sequence depth wise (along third axis). concatenate : Join a sequence of arrays along an existing axis. hsplit : Split array along second axis. @@ -304,9 +304,6 @@ def stack(arrays, axis=0): See Also -------- concatenate : Join a sequence of arrays along an existing axis. - vstack : Stack arrays vertically, along an existing row axis. - hstack : Stack arrays horizontally, along an existing column axis. - dstack : Stack arrays depthwise, along an existing third axis. split : Split array into a list of multiple sub-arrays of equal size. Examples From e30d80266d476841e51d36432d1640f278a5fa05 Mon Sep 17 00:00:00 2001 From: Joseph Fox-Rabinovitz Date: Wed, 3 Feb 2016 15:48:07 -0500 Subject: [PATCH 474/496] BUG: Fixed previous attempt to fix dimension mismatch in nanpercentile nanpercentile was conforming to dimension convention of percentile incorrectly. percentile outputs results for the different percentiles along the first dimension of the output. nanpercentile was moving the reduction axis to the front using swapaxes, which would move the first axis out of place if there were more than two in the array. Added a test with more than two axes to demonstrate and used rollaxis instead of swapaxes to do the interhange. --- numpy/lib/nanfunctions.py | 3 ++- numpy/lib/tests/test_nanfunctions.py | 5 ++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py index 6b28b4a35874..491a28d224a8 100644 --- a/numpy/lib/nanfunctions.py +++ b/numpy/lib/nanfunctions.py @@ -979,7 +979,8 @@ def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False, # Move that axis to the beginning to match percentile's # convention. if q.ndim != 0: - result = np.swapaxes(result, 0, axis) + result = np.rollaxis(result, axis) + if out is not None: out[...] = result return result diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index ac88c4ea506b..989c563d994c 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -711,7 +711,7 @@ def test_multiple_percentiles(self): # For checking consistency in higher dimensional case large_mat = np.ones((3, 4, 5)) large_mat[:, 0:2:4, :] = 0 - large_mat[:, :, 3:] = 2*large_mat[:, :, 3:] + large_mat[:, :, 3:] *= 2 for axis in [None, 0, 1]: for keepdim in [False, True]: with warnings.catch_warnings(record=True) as w: @@ -727,6 +727,9 @@ def test_multiple_percentiles(self): keepdims=keepdim) assert_equal(nan_val, val) + megamat = np.ones((3, 4, 5, 6)) + assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6)) + if __name__ == "__main__": run_module_suite() From 4db3e19312483351d545f64bcb45fdcda278490c Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Fri, 5 Feb 2016 14:44:54 -0500 Subject: [PATCH 475/496] TST: Pin virtualenv used on Travis CI. [skip appveyor] --- .travis.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.travis.yml b/.travis.yml index ccb182816e99..540d05c56f96 100644 --- a/.travis.yml +++ b/.travis.yml @@ -77,6 +77,10 @@ before_install: - pushd builds # Build into own virtualenv # We therefore control our own environment, avoid travis' numpy + # + # Some change in virtualenv 14.0.5 caused `test_f2py` to fail. So, we have + # pinned `virtualenv` to the last known working version to avoid this failure. + - pip install -U 'virtualenv==14.0.4' - virtualenv --python=python venv - source venv/bin/activate - python -V From 917e530ac952cd3398e8ce19b49adef0ab297df9 Mon Sep 17 00:00:00 2001 From: Joseph Fox-Rabinovitz Date: Tue, 2 Feb 2016 10:28:53 -0500 Subject: [PATCH 476/496] DOC: Updated minor typos in function_base.py and test_function_base.py --- numpy/lib/function_base.py | 121 +++++++++++--------- numpy/lib/nanfunctions.py | 159 +++++++++++++------------- numpy/lib/tests/test_function_base.py | 3 +- 3 files changed, 147 insertions(+), 136 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index a49d02a1ae05..6a64ebe85402 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -124,7 +124,7 @@ def scott(x): def fd(x): """ - Freedman Diaconis rule using Inter Quartile Range (IQR) for binwidth + Freedman Diaconis rule using interquartile range (IQR) for binwidth Considered a variation of the Scott rule with more robustness as the IQR is less affected by outliers than the standard deviation. However the IQR depends on fewer points than the sd so it is less accurate, especially for long tailed distributions. @@ -3233,22 +3233,22 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): ---------- a : array_like Input array or object that can be converted to an array. - axis : int or sequence of int, optional - Axis along which the medians are computed. The default (axis=None) + axis : {int, sequence of int, None}, optional + Axis or axes along which the medians are computed. The default is to compute the median along a flattened version of the array. A sequence of axes is supported since version 1.9.0. out : ndarray, optional - Alternative output array in which to place the result. It must have - the same shape and buffer length as the expected output, but the - type (of the output) will be cast if necessary. + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. overwrite_input : bool, optional - If True, then allow use of memory of input array (a) for + If True, then allow use of memory of input array `a` for calculations. The input array will be modified by the call to - median. This will save memory when you do not need to preserve the - contents of the input array. Treat the input as undefined, but it - will probably be fully or partially sorted. Default is False. Note - that, if `overwrite_input` is True and the input is not already an - ndarray, an error will be raised. + `median`. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. If `overwrite_input` is ``True`` and `a` is not already an + `ndarray`, an error will be raised. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, @@ -3256,15 +3256,14 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): .. versionadded:: 1.9.0 - Returns ------- median : ndarray - A new array holding the result (unless `out` is specified, in which - case that array is returned instead). If the input contains - integers, or floats of smaller precision than 64, then the output - data-type is float64. Otherwise, the output data-type is the same - as that of the input. + A new array holding the result. If the input contains integers + or floats smaller than ``float64``, then the output data-type is + ``np.float64``. Otherwise, the data-type of the output is the + same as that of the input. If `out` is specified, that array is + returned instead. See Also -------- @@ -3272,10 +3271,10 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): Notes ----- - Given a vector V of length N, the median of V is the middle value of - a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is - odd. When N is even, it is the average of the two middle values of - ``V_sorted``. + Given a vector ``V`` of length ``N``, the median of ``V`` is the + middle value of a sorted copy of ``V``, ``V_sorted`` - i + e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the + two middle values of ``V_sorted`` when ``N`` is even. Examples -------- @@ -3396,28 +3395,32 @@ def percentile(a, q, axis=None, out=None, Input array or object that can be converted to an array. q : float in range of [0,100] (or sequence of floats) Percentile to compute, which must be between 0 and 100 inclusive. - axis : int or sequence of int, optional - Axis along which the percentiles are computed. The default (None) - is to compute the percentiles along a flattened version of the array. - A sequence of axes is supported since version 1.9.0. + axis : {int, sequence of int, None}, optional + Axis or axes along which the percentiles are computed. The + default is to compute the percentile(s) along a flattened + version of the array. A sequence of axes is supported since + version 1.9.0. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. overwrite_input : bool, optional - If True, then allow use of memory of input array `a` for calculations. - The input array will be modified by the call to `percentile`. This will - save memory when you do not need to preserve the contents of the input - array. In this case you should not make any assumptions about the - contents of the input `a` after this function completes -- treat it as - undefined. Default is False. If `a` is not already an array, this - parameter will have no effect as `a` will be converted to an array + If True, then allow use of memory of input array `a` + calculations. The input array will be modified by the call to + `percentile`. This will save memory when you do not need to + preserve the contents of the input array. In this case you + should not make any assumptions about the contents of the input + `a` after this function completes -- treat it as undefined. + Default is False. If `a` is not already an array, this parameter + will have no effect as `a` will be converted to an array internally regardless of the value of this parameter. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} - This optional parameter specifies the interpolation method to use - when the desired quantile lies between two data points ``i < j``: - * linear: ``i + (j - i) * fraction``, where ``fraction`` is the - fractional part of the index surrounded by ``i`` and ``j``. + This optional parameter specifies the interpolation method to + use when the desired quantile lies between two data points + ``i < j``: + * linear: ``i + (j - i) * fraction``, where ``fraction`` + is the fractional part of the index surrounded by ``i`` + and ``j``. * lower: ``i``. * higher: ``j``. * nearest: ``i`` or ``j``, whichever is nearest. @@ -3425,35 +3428,38 @@ def percentile(a, q, axis=None, out=None, .. versionadded:: 1.9.0 keepdims : bool, optional - If this is set to True, the axes which are reduced are left in the - result as dimensions with size one. With this option, the result will - broadcast correctly against the original array `a`. + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. .. versionadded:: 1.9.0 Returns ------- percentile : scalar or ndarray - If `q` is a single percentile and `axis=None`, then the result is a - scalar. If multiple percentiles are given, the result is an an array. - The percentiles are listed in the first axis. The remaining axes are the - reduced axes of the input `a`. If the input contains integers or floats - of smaller precision than 64, then the output data-type is float64. - Otherwise, the output data-type is the same as that of the input. If - `out` is specified, that array is returned instead. + If `q` is a single percentile and `axis=None`, then the result + is a scalar. If multiple percentiles are given, first axis of + the result corresponds to the percentiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. See Also -------- - mean, median + mean, median, nanpercentile Notes ----- - Given a vector V of length N, the q-th percentile of V is the q-th ranked - value in a sorted copy of V. The values and distances of the two nearest - neighbors as well as the `interpolation` parameter will determine the - percentile if the normalized ranking does not match q exactly. This function - is the same as the median if ``q=50``, the same as the minimum if ``q=0`` - and the same as the maximum if ``q=100``. + Given a vector ``V`` of length ``N``, the ``q``-th percentile of + ``V`` is the value ``q/100`` of the way from the mimumum to the + maximum in in a sorted copy of ``V``. The values and distances of + the two nearest neighbors as well as the `interpolation` parameter + will determine the percentile if the normalized ranking does not + match the location of ``q`` exactly. This function is the same as + the median if ``q=50``, the same as the minimum if ``q=0`` and the + same as the maximum if ``q=100``. Examples -------- @@ -3621,7 +3627,7 @@ def _percentile(a, q, axis=None, out=None, r = add(x1, x2) if np.any(n): - warnings.warn("Invalid value encountered in median", + warnings.warn("Invalid value encountered in percentile", RuntimeWarning) if zerod: if ap.ndim == 1: @@ -3733,7 +3739,8 @@ def trapz(y, x=None, dx=1.0, axis=-1): #always succeed def add_newdoc(place, obj, doc): - """Adds documentation to obj which is in module place. + """ + Adds documentation to obj which is in module place. If doc is a string add it to obj as a docstring @@ -3751,7 +3758,7 @@ def add_newdoc(place, obj, doc): in new-style classes or built-in functions. Because this routine never raises an error the caller must check manually that the docstrings were changed. - """ + """ try: new = getattr(__import__(place, globals(), {}, [obj]), obj) if isinstance(doc, str): diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py index 6b28b4a35874..b05a73859978 100644 --- a/numpy/lib/nanfunctions.py +++ b/numpy/lib/nanfunctions.py @@ -729,8 +729,9 @@ def _nanmedian(a, axis=None, out=None, overwrite_input=False): def _nanmedian_small(a, axis=None, out=None, overwrite_input=False): """ - sort + indexing median, faster for small medians along multiple dimensions - due to the high overhead of apply_along_axis + sort + indexing median, faster for small medians along multiple + dimensions due to the high overhead of apply_along_axis + see nanmedian for parameter usage """ a = np.ma.masked_array(a, np.isnan(a)) @@ -754,36 +755,35 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=False): ---------- a : array_like Input array or object that can be converted to an array. - axis : int, optional - Axis along which the medians are computed. The default (axis=None) + axis : {int, sequence of int, None}, optional + Axis or axes along which the medians are computed. The default is to compute the median along a flattened version of the array. A sequence of axes is supported since version 1.9.0. out : ndarray, optional - Alternative output array in which to place the result. It must have - the same shape and buffer length as the expected output, but the - type (of the output) will be cast if necessary. + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. overwrite_input : bool, optional - If True, then allow use of memory of input array (a) for + If True, then allow use of memory of input array `a` for calculations. The input array will be modified by the call to - median. This will save memory when you do not need to preserve + `median`. This will save memory when you do not need to preserve the contents of the input array. Treat the input as undefined, but it will probably be fully or partially sorted. Default is - False. Note that, if `overwrite_input` is True and the input - is not already an ndarray, an error will be raised. + False. If `overwrite_input` is ``True`` and `a` is not already an + `ndarray`, an error will be raised. keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. - - + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original `arr`. Returns ------- median : ndarray - A new array holding the result. If the input contains integers, or - floats of smaller precision than 64, then the output data-type is - float64. Otherwise, the output data-type is the same as that of the - input. + A new array holding the result. If the input contains integers + or floats smaller than ``float64``, then the output data-type is + ``np.float64``. Otherwise, the data-type of the output is the + same as that of the input. If `out` is specified, that array is + returned instead. See Also -------- @@ -791,10 +791,10 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=False): Notes ----- - Given a vector V of length N, the median of V is the middle value of - a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is - odd. When N is even, it is the average of the two middle values of - ``V_sorted``. + Given a vector ``V`` of length ``N``, the median of ``V`` is the + middle value of a sorted copy of ``V``, ``V_sorted`` - i.e., + ``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two + middle values of ``V_sorted`` when ``N`` is even. Examples -------- @@ -838,10 +838,10 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=False): def nanpercentile(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=False): """ - Compute the qth percentile of the data along the specified axis, while - ignoring nan values. + Compute the qth percentile of the data along the specified axis, + while ignoring nan values. - Returns the qth percentile of the array elements. + Returns the qth percentile(s) of the array elements. .. versionadded:: 1.9.0 @@ -850,11 +850,13 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False, a : array_like Input array or object that can be converted to an array. q : float in range of [0,100] (or sequence of floats) - Percentile to compute which must be between 0 and 100 inclusive. - axis : int or sequence of int, optional - Axis along which the percentiles are computed. The default (None) - is to compute the percentiles along a flattened version of the array. - A sequence of axes is supported since version 1.9.0. + Percentile to compute, which must be between 0 and 100 + inclusive. + axis : {int, sequence of int, None}, optional + Axis or axes along which the percentiles are computed. The + default is to compute the percentile(s) along a flattened + version of the array. A sequence of axes is supported since + version 1.9.0. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, @@ -862,39 +864,40 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False, overwrite_input : bool, optional If True, then allow use of memory of input array `a` for calculations. The input array will be modified by the call to - percentile. This will save memory when you do not need to preserve - the contents of the input array. In this case you should not make - any assumptions about the content of the passed in array `a` after - this function completes -- treat it as undefined. Default is False. - Note that, if the `a` input is not already an array this parameter - will have no effect, `a` will be converted to an array internally - regardless of the value of this parameter. + `percentile`. This will save memory when you do not need to + preserve the contents of the input array. In this case you + should not make any assumptions about the contents of the input + `a` after this function completes -- treat it as undefined. + Default is False. If `a` is not already an array, this parameter + will have no effect as `a` will be converted to an array + internally regardless of the value of this parameter. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} - This optional parameter specifies the interpolation method to use, - when the desired quantile lies between two data points `i` and `j`: - * linear: `i + (j - i) * fraction`, where `fraction` is the - fractional part of the index surrounded by `i` and `j`. - * lower: `i`. - * higher: `j`. - * nearest: `i` or `j` whichever is nearest. - * midpoint: (`i` + `j`) / 2. - + This optional parameter specifies the interpolation method to + use when the desired quantile lies between two data points + ``i < j``: + * linear: ``i + (j - i) * fraction``, where ``fraction`` is + the fractional part of the index surrounded by ``i`` and + ``j``. + * lower: ``i``. + * higher: ``j``. + * nearest: ``i`` or ``j``, whichever is nearest. + * midpoint: ``(i + j) / 2``. keepdims : bool, optional - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. - + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. Returns ------- - nanpercentile : scalar or ndarray - If a single percentile `q` is given and axis=None a scalar is - returned. If multiple percentiles `q` are given an array holding - the result is returned. The results are listed in the first axis. - (If `out` is specified, in which case that array is returned - instead). If the input contains integers, or floats of smaller - precision than 64, then the output data-type is float64. Otherwise, - the output data-type is the same as that of the input. + percentile : scalar or ndarray + If `q` is a single percentile and `axis=None`, then the result + is a scalar. If multiple percentiles are given, first axis of + the result corresponds to the percentiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. See Also -------- @@ -902,12 +905,14 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False, Notes ----- - Given a vector V of length N, the q-th percentile of V is the q-th ranked - value in a sorted copy of V. The values and distances of the two - nearest neighbors as well as the `interpolation` parameter will - determine the percentile if the normalized ranking does not match q - exactly. This function is the same as the median if ``q=50``, the same - as the minimum if ``q=0``and the same as the maximum if ``q=100``. + Given a vector ``V`` of length ``N``, the ``q``-th percentile of + ``V`` is the value ``q/100`` of the way from the mimumum to the + maximum in in a sorted copy of ``V``. The values and distances of + the two nearest neighbors as well as the `interpolation` parameter + will determine the percentile if the normalized ranking does not + match the location of ``q`` exactly. This function is the same as + the median if ``q=50``, the same as the minimum if ``q=0`` and the + same as the maximum if ``q=100``. Examples -------- @@ -921,24 +926,21 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False, >>> np.nanpercentile(a, 50) 3.5 >>> np.nanpercentile(a, 50, axis=0) - array([[ 6.5, 4.5, 2.5]]) - >>> np.nanpercentile(a, 50, axis=1) + array([ 6.5, 2., 2.5]) + >>> np.nanpercentile(a, 50, axis=1, keepdims=True) array([[ 7.], [ 2.]]) >>> m = np.nanpercentile(a, 50, axis=0) >>> out = np.zeros_like(m) - >>> np.nanpercentile(a, 50, axis=0, out=m) - array([[ 6.5, 4.5, 2.5]]) + >>> np.nanpercentile(a, 50, axis=0, out=out) + array([ 6.5, 2., 2.5]) >>> m - array([[ 6.5, 4.5, 2.5]]) + array([ 6.5, 2. , 2.5]) + >>> b = a.copy() >>> np.nanpercentile(b, 50, axis=1, overwrite_input=True) - array([[ 7.], - [ 2.]]) + array([ 7., 2.]) >>> assert not np.all(a==b) - >>> b = a.copy() - >>> np.nanpercentile(b, 50, axis=None, overwrite_input=True) - array([ 3.5]) """ @@ -987,9 +989,10 @@ def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False, def _nanpercentile1d(arr1d, q, overwrite_input=False, interpolation='linear'): """ - Private function for rank 1 arrays. Compute percentile ignoring NaNs. - See nanpercentile for parameter usage + Private function for rank 1 arrays. Compute percentile ignoring + NaNs. + See nanpercentile for parameter usage """ c = np.isnan(arr1d) s = np.where(c)[0] diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index c3483b032f80..ba24488154b1 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2100,7 +2100,7 @@ def test_linear(self): # Test defaults assert_equal(np.percentile(range(10), 50), 4.5) - # explicitly specify interpolation_method 'fraction' (the default) + # explicitly specify interpolation_method 'linear' (the default) assert_equal(np.percentile(range(10), 50, interpolation='linear'), 4.5) @@ -2421,6 +2421,7 @@ def test_nan_behavior(self): np.array([np.nan] * 2)) assert_(w[0].category is RuntimeWarning) assert_(w[1].category is RuntimeWarning) + assert_(w[2].category is RuntimeWarning) a = np.arange(24, dtype=float).reshape(2, 3, 4) a[1, 2, 3] = np.nan From 0ec441d60b6d10d68fd2cb86e51980160730707c Mon Sep 17 00:00:00 2001 From: Julian Taylor Date: Sat, 6 Feb 2016 23:50:58 +0100 Subject: [PATCH 477/496] BUG: raise IOError on not a file in python2 The change in 5225e4c2007 did not account for PyFile_AsFile does not raise an error on invalid input, add the handling to equalize our wr5225e4c2007 apper function. closes gh-7200 --- numpy/core/include/numpy/npy_3kcompat.h | 8 +++++++- numpy/core/src/multiarray/methods.c | 2 -- numpy/core/tests/test_multiarray.py | 8 ++++++++ 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/numpy/core/include/numpy/npy_3kcompat.h b/numpy/core/include/numpy/npy_3kcompat.h index 6a11cf960ab6..db60a312c326 100644 --- a/numpy/core/include/numpy/npy_3kcompat.h +++ b/numpy/core/include/numpy/npy_3kcompat.h @@ -320,7 +320,13 @@ static NPY_INLINE FILE * npy_PyFile_Dup2(PyObject *file, const char *NPY_UNUSED(mode), npy_off_t *NPY_UNUSED(orig_pos)) { - return PyFile_AsFile(file); + FILE * fp = PyFile_AsFile(file); + if (fp == NULL) { + PyErr_SetString(PyExc_IOError, + "first argument must be an open file"); + return NULL; + } + return fp; } static NPY_INLINE int diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c index 84d4e2c9e2fa..56b6086ff3b2 100644 --- a/numpy/core/src/multiarray/methods.c +++ b/numpy/core/src/multiarray/methods.c @@ -583,8 +583,6 @@ array_tofile(PyArrayObject *self, PyObject *args, PyObject *kwds) fd = npy_PyFile_Dup2(file, "wb", &orig_pos); if (fd == NULL) { - PyErr_SetString(PyExc_IOError, - "first argument must be a string or open file"); goto fail; } if (PyArray_ToFile(self, fd, sep, format) < 0) { diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 3498b8a51079..d57e7c106751 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -3582,6 +3582,14 @@ def setUp(self): def tearDown(self): shutil.rmtree(self.tempdir) + def test_nofile(self): + # this should probably be supported as a file + # but for now test for proper errors + b = io.BytesIO() + assert_raises(IOError, np.fromfile, b, np.uint8, 80) + d = np.ones(7); + assert_raises(IOError, lambda x: x.tofile(b), d) + def test_bool_fromstring(self): v = np.array([True, False, True, False], dtype=np.bool_) y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_) From 75ae820c74e927679af82ef634a8f3ef0f97503f Mon Sep 17 00:00:00 2001 From: Thomas A Caswell Date: Mon, 14 Apr 2014 08:43:20 -0400 Subject: [PATCH 478/496] BUG: many functions silently drop `keepdims` kwarg change test from `type(a) is not mu.ndarray` to `not isinstance(a, mu.ndarray)` Because every sub-class of ndarray is not guaranteed to implement `keepdims` as a kwarg, when wrapping these methods care must be taken. The previous behavior was to silently eat the kwarg when dealing with a sub-class of ndarray. Now, if `keepdims=np._NoValue` (the new default) it is not passed through to the underlying function call (so the default value of `keepdims` is now controlled by the sub-class). If `keepdims` is not `np._NoValue` then it is passed through and will raise an exception if the sub-class does not support the kwarg. A special case in nanvar was required to deal with `matrix` that previously relied on `fromnumeric` silently dropping `keepdims`. --- numpy/core/fromnumeric.py | 195 +++++++++++++++++++++---------- numpy/core/tests/test_numeric.py | 12 ++ numpy/lib/nanfunctions.py | 145 ++++++++++++++++------- 3 files changed, 254 insertions(+), 98 deletions(-) diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 4faeb557a6d3..bb89adbe1761 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -17,7 +17,6 @@ _dt_ = nt.sctype2char - # functions that are methods __all__ = [ 'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax', @@ -1380,6 +1379,7 @@ def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None): return asanyarray(a).trace(offset, axis1, axis2, dtype, out) + def ravel(a, order='C'): """Return a contiguous flattened array. @@ -1740,7 +1740,7 @@ def clip(a, a_min, a_max, out=None): return clip(a_min, a_max, out) -def sum(a, axis=None, dtype=None, out=None, keepdims=False): +def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): """ Sum of array elements over a given axis. @@ -1770,9 +1770,15 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=False): the same shape as the expected output, but the type of the output values will be cast if necessary. keepdims : bool, optional - If this is set to True, the axes which are reduced are left in the - result as dimensions with size one. With this option, the result - will broadcast correctly against the input array. + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + If the default value is passed, then `keepdims` will not be + passed through to the `sum` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-classes `sum` method does not implement `keepdims` any + exceptions will be raised. Returns ------- @@ -1821,6 +1827,9 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=False): -128 """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims if isinstance(a, _gentype): res = _sum_(a) if out is not None: @@ -1832,15 +1841,14 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=False): sum = a.sum except AttributeError: return _methods._sum(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - # NOTE: Dropping the keepdims parameters here... - return sum(axis=axis, dtype=dtype, out=out) + out=out, **kwargs) + return sum(axis=axis, dtype=dtype, out=out, **kwargs) else: return _methods._sum(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) + out=out, **kwargs) -def product(a, axis=None, dtype=None, out=None, keepdims=False): +def product(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): """ Return the product of array elements over a given axis. @@ -1849,11 +1857,13 @@ def product(a, axis=None, dtype=None, out=None, keepdims=False): prod : equivalent function; see for details. """ - return um.multiply.reduce(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + return um.multiply.reduce(a, axis=axis, dtype=dtype, out=out, **kwargs) -def sometrue(a, axis=None, out=None, keepdims=False): +def sometrue(a, axis=None, out=None, keepdims=np._NoValue): """ Check whether some values are true. @@ -1865,14 +1875,13 @@ def sometrue(a, axis=None, out=None, keepdims=False): """ arr = asanyarray(a) - - try: - return arr.any(axis=axis, out=out, keepdims=keepdims) - except TypeError: - return arr.any(axis=axis, out=out) + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + return arr.any(axis=axis, out=out, **kwargs) -def alltrue(a, axis=None, out=None, keepdims=False): +def alltrue(a, axis=None, out=None, keepdims=np._NoValue): """ Check if all elements of input array are true. @@ -1882,14 +1891,13 @@ def alltrue(a, axis=None, out=None, keepdims=False): """ arr = asanyarray(a) + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + return arr.all(axis=axis, out=out, **kwargs) - try: - return arr.all(axis=axis, out=out, keepdims=keepdims) - except TypeError: - return arr.all(axis=axis, out=out) - -def any(a, axis=None, out=None, keepdims=False): +def any(a, axis=None, out=None, keepdims=np._NoValue): """ Test whether any array element along a given axis evaluates to True. @@ -1915,11 +1923,18 @@ def any(a, axis=None, out=None, keepdims=False): (e.g., if it is of type float, then it will remain so, returning 1.0 for True and 0.0 for False, regardless of the type of `a`). See `doc.ufuncs` (Section "Output arguments") for details. + keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. + If the default value is passed, then `keepdims` will not be + passed through to the `any` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-classes `sum` method does not implement `keepdims` any + exceptions will be raised. + Returns ------- any : bool or ndarray @@ -1963,14 +1978,13 @@ def any(a, axis=None, out=None, keepdims=False): """ arr = asanyarray(a) - - try: - return arr.any(axis=axis, out=out, keepdims=keepdims) - except TypeError: - return arr.any(axis=axis, out=out) + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + return arr.any(axis=axis, out=out, **kwargs) -def all(a, axis=None, out=None, keepdims=False): +def all(a, axis=None, out=None, keepdims=np._NoValue): """ Test whether all array elements along a given axis evaluate to True. @@ -1994,11 +2008,18 @@ def all(a, axis=None, out=None, keepdims=False): type is preserved (e.g., if ``dtype(out)`` is float, the result will consist of 0.0's and 1.0's). See `doc.ufuncs` (Section "Output arguments") for more details. + keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. + If the default value is passed, then `keepdims` will not be + passed through to the `all` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-classes `sum` method does not implement `keepdims` any + exceptions will be raised. + Returns ------- all : ndarray, bool @@ -2037,11 +2058,10 @@ def all(a, axis=None, out=None, keepdims=False): """ arr = asanyarray(a) - - try: - return arr.all(axis=axis, out=out, keepdims=keepdims) - except TypeError: - return arr.all(axis=axis, out=out) + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + return arr.all(axis=axis, out=out, **kwargs) def cumsum(a, axis=None, dtype=None, out=None): @@ -2177,7 +2197,7 @@ def ptp(a, axis=None, out=None): return ptp(axis, out) -def amax(a, axis=None, out=None, keepdims=False): +def amax(a, axis=None, out=None, keepdims=np._NoValue): """ Return the maximum of an array or maximum along an axis. @@ -2197,11 +2217,18 @@ def amax(a, axis=None, out=None, keepdims=False): Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. See `doc.ufuncs` (Section "Output arguments") for more details. + keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. + If the default value is passed, then `keepdims` will not be + passed through to the `amax` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-classes `sum` method does not implement `keepdims` any + exceptions will be raised. + Returns ------- amax : ndarray or scalar @@ -2255,20 +2282,22 @@ def amax(a, axis=None, out=None, keepdims=False): 4.0 """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims if type(a) is not mu.ndarray: try: amax = a.max except AttributeError: return _methods._amax(a, axis=axis, - out=out, keepdims=keepdims) - # NOTE: Dropping the keepdims parameter - return amax(axis=axis, out=out) + out=out, **kwargs) + return amax(axis=axis, out=out, **kwargs) else: return _methods._amax(a, axis=axis, - out=out, keepdims=keepdims) + out=out, **kwargs) -def amin(a, axis=None, out=None, keepdims=False): +def amin(a, axis=None, out=None, keepdims=np._NoValue): """ Return the minimum of an array or minimum along an axis. @@ -2288,11 +2317,18 @@ def amin(a, axis=None, out=None, keepdims=False): Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. See `doc.ufuncs` (Section "Output arguments") for more details. + keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. + If the default value is passed, then `keepdims` will not be + passed through to the `amin` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-classes `sum` method does not implement `keepdims` any + exceptions will be raised. + Returns ------- amin : ndarray or scalar @@ -2346,17 +2382,19 @@ def amin(a, axis=None, out=None, keepdims=False): 0.0 """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims if type(a) is not mu.ndarray: try: amin = a.min except AttributeError: return _methods._amin(a, axis=axis, - out=out, keepdims=keepdims) - # NOTE: Dropping the keepdims parameter - return amin(axis=axis, out=out) + out=out, **kwargs) + return amin(axis=axis, out=out, **kwargs) else: return _methods._amin(a, axis=axis, - out=out, keepdims=keepdims) + out=out, **kwargs) def alen(a): @@ -2392,7 +2430,7 @@ def alen(a): return len(array(a, ndmin=1)) -def prod(a, axis=None, dtype=None, out=None, keepdims=False): +def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): """ Return the product of array elements over a given axis. @@ -2427,6 +2465,12 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=False): result as dimensions with size one. With this option, the result will broadcast correctly against the input array. + If the default value is passed, then `keepdims` will not be + passed through to the `prod` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-classes `sum` method does not implement `keepdims` any + exceptions will be raised. + Returns ------- product_along_axis : ndarray, see `dtype` parameter above. @@ -2484,16 +2528,19 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=False): True """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims if type(a) is not mu.ndarray: try: prod = a.prod except AttributeError: return _methods._prod(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) - return prod(axis=axis, dtype=dtype, out=out) + out=out, **kwargs) + return prod(axis=axis, dtype=dtype, out=out, **kwargs) else: return _methods._prod(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) + out=out, **kwargs) def cumprod(a, axis=None, dtype=None, out=None): @@ -2793,7 +2840,7 @@ def round_(a, decimals=0, out=None): return round(decimals, out) -def mean(a, axis=None, dtype=None, out=None, keepdims=False): +def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): """ Compute the arithmetic mean along the specified axis. @@ -2823,11 +2870,18 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=False): is ``None``; if provided, it must have the same shape as the expected output, but the type will be cast if necessary. See `doc.ufuncs` for details. + keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. + If the default value is passed, then `keepdims` will not be + passed through to the `mean` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-classes `sum` method does not implement `keepdims` any + exceptions will be raised. + Returns ------- m : ndarray, see dtype parameter above @@ -2874,18 +2928,21 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=False): 0.55000000074505806 """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims if type(a) is not mu.ndarray: try: mean = a.mean - return mean(axis=axis, dtype=dtype, out=out) + return mean(axis=axis, dtype=dtype, out=out, **kwargs) except AttributeError: pass return _methods._mean(a, axis=axis, dtype=dtype, - out=out, keepdims=keepdims) + out=out, **kwargs) -def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): +def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): """ Compute the standard deviation along the specified axis. @@ -2922,6 +2979,12 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. + If the default value is passed, then `keepdims` will not be + passed through to the `std` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-classes `sum` method does not implement `keepdims` any + exceptions will be raised. + Returns ------- standard_deviation : ndarray, see dtype parameter above. @@ -2981,19 +3044,23 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): 0.44999999925494177 """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if type(a) is not mu.ndarray: try: std = a.std - return std(axis=axis, dtype=dtype, out=out, ddof=ddof) + return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) except AttributeError: pass return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof, - keepdims=keepdims) + **kwargs) def var(a, axis=None, dtype=None, out=None, ddof=0, - keepdims=False): + keepdims=np._NoValue): """ Compute the variance along the specified axis. @@ -3031,6 +3098,12 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. + If the default value is passed, then `keepdims` will not be + passed through to the `var` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-classes `sum` method does not implement `keepdims` any + exceptions will be raised. + Returns ------- variance : ndarray, see dtype parameter above @@ -3089,12 +3162,16 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, 0.2025 """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if type(a) is not mu.ndarray: try: var = a.var - return var(axis=axis, dtype=dtype, out=out, ddof=ddof) + return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) except AttributeError: pass return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, - keepdims=keepdims) + **kwargs) diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index 7309cf2249c0..e22a5e193434 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -2472,5 +2472,17 @@ def test_number_of_arguments(self): assert_equal(mit.numiter, j) +class TestKeepdims(TestCase): + + class sub_array(np.ndarray): + def sum(self, axis=None, dtype=None, out=None): + return np.ndarray.sum(self, axis, dtype, out, keepdims=True) + + def test_raise(self): + sub_class = self.sub_array + x = np.arange(30).view(sub_class) + assert_raises(TypeError, np.sum, x, keepdims=True) + + if __name__ == "__main__": run_module_suite() diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py index 8fe7afd46fb5..56f0010afedb 100644 --- a/numpy/lib/nanfunctions.py +++ b/numpy/lib/nanfunctions.py @@ -23,6 +23,7 @@ import numpy as np from numpy.lib.function_base import _ureduce as _ureduce + __all__ = [ 'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean', 'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod', @@ -141,7 +142,7 @@ def _divide_by_count(a, b, out=None): return np.divide(a, b, out=out, casting='unsafe') -def nanmin(a, axis=None, out=None, keepdims=False): +def nanmin(a, axis=None, out=None, keepdims=np._NoValue): """ Return minimum of an array or minimum along an axis, ignoring any NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is raised and @@ -163,9 +164,14 @@ def nanmin(a, axis=None, out=None, keepdims=False): .. versionadded:: 1.8.0 keepdims : bool, optional - If this is set to True, the axes which are reduced are left in the - result as dimensions with size one. With this option, the result - will broadcast correctly against the original `a`. + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If the value is anything but the default, then + `keepdims` will be passed through to the `min` method + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. .. versionadded:: 1.8.0 @@ -220,27 +226,30 @@ def nanmin(a, axis=None, out=None, keepdims=False): -inf """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims if not isinstance(a, np.ndarray) or type(a) is np.ndarray: # Fast, but not safe for subclasses of ndarray - res = np.fmin.reduce(a, axis=axis, out=out, keepdims=keepdims) + res = np.fmin.reduce(a, axis=axis, out=out, **kwargs) if np.isnan(res).any(): warnings.warn("All-NaN axis encountered", RuntimeWarning) else: # Slow, but safe for subclasses of ndarray a, mask = _replace_nan(a, +np.inf) - res = np.amin(a, axis=axis, out=out, keepdims=keepdims) + res = np.amin(a, axis=axis, out=out, **kwargs) if mask is None: return res # Check for all-NaN axis - mask = np.all(mask, axis=axis, keepdims=keepdims) + mask = np.all(mask, axis=axis, **kwargs) if np.any(mask): res = _copyto(res, np.nan, mask) warnings.warn("All-NaN axis encountered", RuntimeWarning) return res -def nanmax(a, axis=None, out=None, keepdims=False): +def nanmax(a, axis=None, out=None, keepdims=np._NoValue): """ Return the maximum of an array or maximum along an axis, ignoring any NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is @@ -262,9 +271,14 @@ def nanmax(a, axis=None, out=None, keepdims=False): .. versionadded:: 1.8.0 keepdims : bool, optional - If this is set to True, the axes which are reduced are left in the - result as dimensions with size one. With this option, the result - will broadcast correctly against the original `a`. + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If the value is anything but the default, then + `keepdims` will be passed through to the `max` method + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. .. versionadded:: 1.8.0 @@ -319,20 +333,23 @@ def nanmax(a, axis=None, out=None, keepdims=False): inf """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims if not isinstance(a, np.ndarray) or type(a) is np.ndarray: # Fast, but not safe for subclasses of ndarray - res = np.fmax.reduce(a, axis=axis, out=out, keepdims=keepdims) + res = np.fmax.reduce(a, axis=axis, out=out, **kwargs) if np.isnan(res).any(): warnings.warn("All-NaN slice encountered", RuntimeWarning) else: # Slow, but safe for subclasses of ndarray a, mask = _replace_nan(a, -np.inf) - res = np.amax(a, axis=axis, out=out, keepdims=keepdims) + res = np.amax(a, axis=axis, out=out, **kwargs) if mask is None: return res # Check for all-NaN axis - mask = np.all(mask, axis=axis, keepdims=keepdims) + mask = np.all(mask, axis=axis, **kwargs) if np.any(mask): res = _copyto(res, np.nan, mask) warnings.warn("All-NaN axis encountered", RuntimeWarning) @@ -428,7 +445,7 @@ def nanargmax(a, axis=None): return res -def nansum(a, axis=None, dtype=None, out=None, keepdims=0): +def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): """ Return the sum of array elements over a given axis treating Not a Numbers (NaNs) as zero. @@ -462,9 +479,15 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=0): .. versionadded:: 1.8.0 keepdims : bool, optional - If True, the axes which are reduced are left in the result as - dimensions with size one. With this option, the result will - broadcast correctly against the original `arr`. + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + + If the value is anything but the default, then + `keepdims` will be passed through to the `mean` or `sum` methods + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. .. versionadded:: 1.8.0 @@ -513,7 +536,7 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=0): return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) -def nanprod(a, axis=None, dtype=None, out=None, keepdims=0): +def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): """ Return the product of array elements over a given axis treating Not a Numbers (NaNs) as zero. @@ -583,7 +606,7 @@ def nanprod(a, axis=None, dtype=None, out=None, keepdims=0): return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) -def nanmean(a, axis=None, dtype=None, out=None, keepdims=False): +def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): """ Compute the arithmetic mean along the specified axis, ignoring NaNs. @@ -613,9 +636,14 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=False): expected output, but the type will be cast if necessary. See `doc.ufuncs` for details. keepdims : bool, optional - If this is set to True, the axes which are reduced are left in the - result as dimensions with size one. With this option, the result - will broadcast correctly against the original `arr`. + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If the value is anything but the default, then + `keepdims` will be passed through to the `mean` or `sum` methods + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. Returns ------- @@ -727,6 +755,7 @@ def _nanmedian(a, axis=None, out=None, overwrite_input=False): out[...] = result return result + def _nanmedian_small(a, axis=None, out=None, overwrite_input=False): """ sort + indexing median, faster for small medians along multiple @@ -743,7 +772,8 @@ def _nanmedian_small(a, axis=None, out=None, overwrite_input=False): return out return m.filled(np.nan) -def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=False): + +def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue): """ Compute the median along the specified axis, while ignoring NaNs. @@ -772,9 +802,15 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=False): False. If `overwrite_input` is ``True`` and `a` is not already an `ndarray`, an error will be raised. keepdims : bool, optional - If this is set to True, the axes which are reduced are left in - the result as dimensions with size one. With this option, the - result will broadcast correctly against the original `arr`. + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If this is anything but the default value it will be passed + through (in the special case of an empty array) to the + `mean` function of the underlying array. If the array is + a sub-class and `mean` does not have the kwarg `keepdims` this + will raise a RuntimeError. Returns ------- @@ -829,14 +865,14 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=False): r, k = _ureduce(a, func=_nanmedian, axis=axis, out=out, overwrite_input=overwrite_input) - if keepdims: + if keepdims and keepdims is not np._NoValue: return r.reshape(k) else: return r def nanpercentile(a, q, axis=None, out=None, overwrite_input=False, - interpolation='linear', keepdims=False): + interpolation='linear', keepdims=np._NoValue): """ Compute the qth percentile of the data along the specified axis, while ignoring nan values. @@ -883,9 +919,21 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False, * nearest: ``i`` or ``j``, whichever is nearest. * midpoint: ``(i + j) / 2``. keepdims : bool, optional +<<<<<<< 35b5f5be1ffffada84c8be207e7b8b196a58f786 If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original array `a`. +======= + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If this is anything but the default value it will be passed + through (in the special case of an empty array) to the + `mean` function of the underlying array. If the array is + a sub-class and `mean` does not have the kwarg `keepdims` this + will raise a RuntimeError. +>>>>>>> BUG: many functions silently drop `keepdims` kwarg Returns ------- @@ -893,7 +941,7 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False, If `q` is a single percentile and `axis=None`, then the result is a scalar. If multiple percentiles are given, first axis of the result corresponds to the percentiles. The other axes are - the axes that remain after the reduction of `a`. If the input + the axes that remain after the reduction of `a`. If the input contains integers or floats smaller than ``float64``, the output data-type is ``float64``. Otherwise, the output data-type is the same as that of the input. If `out` is specified, that array is @@ -954,7 +1002,7 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False, r, k = _ureduce(a, func=_nanpercentile, q=q, axis=axis, out=out, overwrite_input=overwrite_input, interpolation=interpolation) - if keepdims: + if keepdims and keepdims is not np._NoValue: if q.ndim == 0: return r.reshape(k) else: @@ -964,7 +1012,7 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False, def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False, - interpolation='linear', keepdims=False): + interpolation='linear'): """ Private function that doesn't support extended axis or keepdims. These methods are extended to this function using _ureduce @@ -981,7 +1029,7 @@ def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False, # Move that axis to the beginning to match percentile's # convention. if q.ndim != 0: - result = np.rollaxis(result, axis) + result = np.rollaxis(result, axis) if out is not None: out[...] = result @@ -1020,7 +1068,7 @@ def _nanpercentile1d(arr1d, q, overwrite_input=False, interpolation='linear'): interpolation=interpolation) -def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): +def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): """ Compute the variance along the specified axis, while ignoring NaNs. @@ -1056,7 +1104,8 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. + the result will broadcast correctly against the original `a`. + Returns ------- @@ -1095,6 +1144,9 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): below). Specifying a higher-accuracy accumulator using the ``dtype`` keyword can alleviate this issue. + For this function to work on sub-classes of ndarray, they must define + `sum` with the kwarg `keepdims` + Examples -------- >>> a = np.array([[1, np.nan], [3, 4]]) @@ -1122,8 +1174,17 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): warnings.simplefilter('ignore') # Compute mean - cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=True) - avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=True) + if type(arr) is np.matrix: + _keepdims = np._NoValue + else: + _keepdims = True + # we need to special case matrix for reverse compatibility + # in order for this to work, these sums need to be called with + # keepdims=True, however matrix now raises an error in this case, but + # the reason that it drops the keepdims kwarg is to force keepdims=True + # so this used to work by serendipity. + cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims) + avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=_keepdims) avg = _divide_by_count(avg, cnt) # Compute squared deviation from mean. @@ -1151,7 +1212,7 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): return var -def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): +def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): """ Compute the standard deviation along the specified axis, while ignoring NaNs. @@ -1185,10 +1246,16 @@ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): Means Delta Degrees of Freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of non-NaN elements. By default `ddof` is zero. + keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `arr`. + the result will broadcast correctly against the original `a`. + + If this value is anything but the default it is passed through + as-is to the relevant functions of the sub-classes. If these + functions do not have a `keepdims` kwarg, a RuntimeError will + be raised. Returns ------- From 010d17ee8167196ea90c24c57b4ea34badfc11ae Mon Sep 17 00:00:00 2001 From: Thomas A Caswell Date: Fri, 5 Feb 2016 12:09:29 -0500 Subject: [PATCH 479/496] STY: pep8 only --- numpy/core/fromnumeric.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index bb89adbe1761..1c97b7c4ff1c 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -3059,8 +3059,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): **kwargs) -def var(a, axis=None, dtype=None, out=None, ddof=0, - keepdims=np._NoValue): +def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): """ Compute the variance along the specified axis. @@ -3174,4 +3173,4 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, pass return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, - **kwargs) + **kwargs) From 08fb5807118423e314f324b9bcafdbaab9316f4d Mon Sep 17 00:00:00 2001 From: Thomas A Caswell Date: Fri, 5 Feb 2016 12:10:26 -0500 Subject: [PATCH 480/496] MNT: move std, var, mean calls out of try block Move the calls to user-provided versions of std, var, and mean on non mu.ndarray objects out of the `try` block so that numpy will not mask AttributeError raised during the execution of the function rather than due to the object not having the required method. --- numpy/core/fromnumeric.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 1c97b7c4ff1c..00b2dbae0eb4 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -2934,12 +2934,13 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): if type(a) is not mu.ndarray: try: mean = a.mean - return mean(axis=axis, dtype=dtype, out=out, **kwargs) except AttributeError: pass + else: + return mean(axis=axis, dtype=dtype, out=out, **kwargs) return _methods._mean(a, axis=axis, dtype=dtype, - out=out, **kwargs) + out=out, **kwargs) def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): @@ -3051,12 +3052,13 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): if type(a) is not mu.ndarray: try: std = a.std - return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) except AttributeError: pass + else: + return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof, - **kwargs) + **kwargs) def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): @@ -3168,9 +3170,11 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): if type(a) is not mu.ndarray: try: var = a.var - return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) + except AttributeError: pass + else: + return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) From f473fe4418a98e2e0edb357f23b74964b09d6a7a Mon Sep 17 00:00:00 2001 From: Thomas A Caswell Date: Fri, 5 Feb 2016 12:15:50 -0500 Subject: [PATCH 481/496] MNT: reduce number of return statements In sum, amax, amin, and prod simplify the logic to remove an identical return statement / call to `_methods._xxx`. This removes several elif/else pairs and reduces the number of exit points from the functions but makes the code path a bit more complicated to trace. --- numpy/core/fromnumeric.py | 50 +++++++++++++++++++-------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 00b2dbae0eb4..52a15e30d7d1 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -1836,16 +1836,15 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): out[...] = res return out return res - elif type(a) is not mu.ndarray: + if type(a) is not mu.ndarray: try: sum = a.sum except AttributeError: - return _methods._sum(a, axis=axis, dtype=dtype, - out=out, **kwargs) - return sum(axis=axis, dtype=dtype, out=out, **kwargs) - else: - return _methods._sum(a, axis=axis, dtype=dtype, - out=out, **kwargs) + pass + else: + return sum(axis=axis, dtype=dtype, out=out, **kwargs) + return _methods._sum(a, axis=axis, dtype=dtype, + out=out, **kwargs) def product(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): @@ -2285,16 +2284,17 @@ def amax(a, axis=None, out=None, keepdims=np._NoValue): kwargs = {} if keepdims is not np._NoValue: kwargs['keepdims'] = keepdims + if type(a) is not mu.ndarray: try: amax = a.max except AttributeError: - return _methods._amax(a, axis=axis, - out=out, **kwargs) - return amax(axis=axis, out=out, **kwargs) - else: - return _methods._amax(a, axis=axis, - out=out, **kwargs) + pass + else: + return amax(axis=axis, out=out, **kwargs) + + return _methods._amax(a, axis=axis, + out=out, **kwargs) def amin(a, axis=None, out=None, keepdims=np._NoValue): @@ -2389,12 +2389,12 @@ def amin(a, axis=None, out=None, keepdims=np._NoValue): try: amin = a.min except AttributeError: - return _methods._amin(a, axis=axis, - out=out, **kwargs) - return amin(axis=axis, out=out, **kwargs) - else: - return _methods._amin(a, axis=axis, - out=out, **kwargs) + pass + else: + return amin(axis=axis, out=out, **kwargs) + + return _methods._amin(a, axis=axis, + out=out, **kwargs) def alen(a): @@ -2535,12 +2535,12 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): try: prod = a.prod except AttributeError: - return _methods._prod(a, axis=axis, dtype=dtype, - out=out, **kwargs) - return prod(axis=axis, dtype=dtype, out=out, **kwargs) - else: - return _methods._prod(a, axis=axis, dtype=dtype, - out=out, **kwargs) + pass + else: + return prod(axis=axis, dtype=dtype, out=out, **kwargs) + + return _methods._prod(a, axis=axis, dtype=dtype, + out=out, **kwargs) def cumprod(a, axis=None, dtype=None, out=None): From e1621a9d2a5acec84028a0dec2cea5c40d5a4067 Mon Sep 17 00:00:00 2001 From: Thomas A Caswell Date: Fri, 5 Feb 2016 13:37:04 -0500 Subject: [PATCH 482/496] DOC: add release notes --- doc/release/1.11.0-notes.rst | 1 + doc/release/1.12.0-notes.rst | 25 ++++++++++++++++++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst index e95225a7c0d5..aa11cdf0789f 100644 --- a/doc/release/1.11.0-notes.rst +++ b/doc/release/1.11.0-notes.rst @@ -142,6 +142,7 @@ FutureWarning to changed behavior due to a bug, sometimes no warning was raised and the dimensions were already preserved. + C API ~~~~~ diff --git a/doc/release/1.12.0-notes.rst b/doc/release/1.12.0-notes.rst index d093e0542c86..ce606e5b5366 100644 --- a/doc/release/1.12.0-notes.rst +++ b/doc/release/1.12.0-notes.rst @@ -41,6 +41,30 @@ XXX the two coincide. Previous behavior of 'lower' + 0.5 is fixed. +``keepdims`` kwarg is passed through to user-class methods +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +numpy functions that take a ``keepdims`` kwarg now pass the value +through to the corresponding methods on ndarray sub-classes. Previously the +``keepdims`` keyword would be silently dropped. These functions now have +the following behavior: + +1. If user does not provide ``keepdims``, no keyword is passed to the underlying + method. +2. Any user-provided value of ``keepdims`` is passed through as a keyword + argument to the method. + +This will raise in the case where the method does not support a +``keepdims`` kwarg and the user explicitly passes in ``keepdims``. + + +The following functions are changed: ``sum``, ``product``, +``sometrue``, ``alltrue``, ``any``, ``all``, ``amax``, ``amin``, +``prod``, ``mean``, ``std``, ``var``, ``nanmin``, ``nanmax``, +``nansum``, ``nanprod``, ``nanmean``, ``nanmedian``, ``nanvar``, +``nanstd`` + + DeprecationWarning to error ~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -73,4 +97,3 @@ Changes Deprecations ============ - From 5c616fba2949714b2740f9d299c1f25de3554784 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Mon, 8 Feb 2016 01:42:12 -0500 Subject: [PATCH 483/496] TST: Bump `virtualenv` to 14.0.6. --- .travis.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 540d05c56f96..040d7362e9e4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -80,7 +80,9 @@ before_install: # # Some change in virtualenv 14.0.5 caused `test_f2py` to fail. So, we have # pinned `virtualenv` to the last known working version to avoid this failure. - - pip install -U 'virtualenv==14.0.4' + # Appears we had some issues with certificates on Travis. It looks like + # bumping to 14.0.6 will help. + - pip install -U 'virtualenv==14.0.6' - virtualenv --python=python venv - source venv/bin/activate - python -V From e3bca2cf026ae16926ef58d71b33afa2981c33c6 Mon Sep 17 00:00:00 2001 From: Mads Ohm Larsen Date: Mon, 8 Feb 2016 15:43:53 +0100 Subject: [PATCH 484/496] DOC: Fix fmin examples This fixes #6904 (which has been closed already for some reason) --- numpy/core/code_generators/ufunc_docstrings.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py index 34ac59984c20..e3600406c117 100644 --- a/numpy/core/code_generators/ufunc_docstrings.py +++ b/numpy/core/code_generators/ufunc_docstrings.py @@ -2393,11 +2393,11 @@ def add_newdoc(place, name, doc): Examples -------- >>> np.fmin([2, 3, 4], [1, 5, 2]) - array([2, 5, 4]) + array([1, 3, 2]) >>> np.fmin(np.eye(2), [0.5, 2]) - array([[ 1. , 2. ], - [ 0.5, 2. ]]) + array([[ 0.5, 0. ], + [ 0. , 1. ]]) >>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan]) array([ 0., 0., NaN]) From 594efedc183089ae28bca3044b3aaa8cee3adc65 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Fri, 5 Feb 2016 10:55:28 -0500 Subject: [PATCH 485/496] BUG: Raise a quieter `MaskedArrayFutureWarning` for mask changes. Drop the `__getitem__` warning. In `__setitem__` check to see if the masked array is shared. If it is shared, we know it will propagate upstream in the future. Also, use a more specific warning type instead of using `FutureWarning` so that this can be explicitly ignored. --- numpy/ma/core.py | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 24d41bcaa704..3dfe0c4e359c 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -82,6 +82,9 @@ MaskType = np.bool_ nomask = MaskType(0) +class MaskedArrayFutureWarning(FutureWarning): + pass + def doc_note(initialdoc, note): """ @@ -3105,13 +3108,6 @@ def __getitem__(self, indx): Return the item described by i, as a masked array. """ - # 2016.01.15 -- v1.11.0 - warnings.warn( - "Currently, slicing will try to return a view of the data," + - " but will return a copy of the mask. In the future, it will try" + - " to return both as views.", - FutureWarning - ) dout = self.data[indx] # We could directly use ndarray.__getitem__ on self. @@ -3184,13 +3180,17 @@ def __setitem__(self, indx, value): """ # 2016.01.15 -- v1.11.0 - warnings.warn( - "Currently, slicing will try to return a view of the data," + - " but will return a copy of the mask. In the future, it will try" + - " to return both as views. This means that using `__setitem__`" + - " will propagate values back through all masks that are present.", - FutureWarning - ) + self._oldsharedmask = getattr(self, "_oldsharedmask", False) + self._oldsharedmask = self._oldsharedmask or self._sharedmask + if (self._mask is not nomask) and self._oldsharedmask: + warnings.warn( + "Currently, slicing will try to return a view of the data, but" + " will return a copy of the mask. In the future, it will try" + " to return both as views. This means that using" + " `__setitem__` will propagate values back through all masks" + " that are present.", + MaskedArrayFutureWarning + ) if self is masked: raise MaskError('Cannot alter the masked element.') @@ -3234,7 +3234,9 @@ def __setitem__(self, indx, value): elif not self._hardmask: # Unshare the mask if necessary to avoid propagation if not self._isfield: + _oldsharedmask = self._oldsharedmask self.unshare_mask() + self._oldsharedmask = _oldsharedmask _mask = self._mask # Set the data, then the mask _data[indx] = dval @@ -3440,6 +3442,7 @@ def unshare_mask(self): if self._sharedmask: self._mask = self._mask.copy() self._sharedmask = False + self._oldsharedmask = False return self sharedmask = property(fget=lambda self: self._sharedmask, From 8e46c6c86c66f422fa7b53714b81f872733c34d9 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Mon, 8 Feb 2016 18:50:36 -0500 Subject: [PATCH 486/496] TST: Drop `FutureWarning` filters from tests where they were added. --- numpy/ma/tests/test_core.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index d68e63358e57..b163d3b2642a 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -2223,7 +2223,6 @@ def test_inplace_addition_scalar_type(self): for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") - warnings.simplefilter('ignore', FutureWarning) (x, y, xm) = (_.astype(t) for _ in self.uint8data) xm[2] = masked x += t(1) @@ -2238,7 +2237,6 @@ def test_inplace_addition_array_type(self): for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") - warnings.simplefilter('ignore', FutureWarning) (x, y, xm) = (_.astype(t) for _ in self.uint8data) m = xm.mask a = arange(10, dtype=t) @@ -2269,7 +2267,6 @@ def test_inplace_subtraction_array_type(self): for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") - warnings.simplefilter('ignore', FutureWarning) (x, y, xm) = (_.astype(t) for _ in self.uint8data) m = xm.mask a = arange(10, dtype=t) @@ -2300,7 +2297,6 @@ def test_inplace_multiplication_array_type(self): for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") - warnings.simplefilter('ignore', FutureWarning) (x, y, xm) = (_.astype(t) for _ in self.uint8data) m = xm.mask a = arange(10, dtype=t) @@ -2318,7 +2314,6 @@ def test_inplace_floor_division_scalar_type(self): for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") - warnings.simplefilter('ignore', FutureWarning) (x, y, xm) = (_.astype(t) for _ in self.uint8data) x = arange(10, dtype=t) * t(2) xm = arange(10, dtype=t) * t(2) @@ -2335,7 +2330,6 @@ def test_inplace_floor_division_array_type(self): for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") - warnings.simplefilter('ignore', FutureWarning) (x, y, xm) = (_.astype(t) for _ in self.uint8data) m = xm.mask a = arange(10, dtype=t) @@ -2356,7 +2350,6 @@ def test_inplace_division_scalar_type(self): for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") - warnings.simplefilter('ignore', FutureWarning) (x, y, xm) = (_.astype(t) for _ in self.uint8data) x = arange(10, dtype=t) * t(2) xm = arange(10, dtype=t) * t(2) @@ -2392,7 +2385,6 @@ def test_inplace_division_array_type(self): for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") - warnings.simplefilter('ignore', FutureWarning) (x, y, xm) = (_.astype(t) for _ in self.uint8data) m = xm.mask a = arange(10, dtype=t) From 4576343702fc31ba27f6462597c63c0ce937cf9c Mon Sep 17 00:00:00 2001 From: Joseph Fox-Rabinovitz Date: Sun, 7 Feb 2016 21:50:25 -0500 Subject: [PATCH 487/496] MAINT: Made `iterable` return a boolean --- numpy/lib/function_base.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 788807086234..b8e017eabcee 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -56,24 +56,24 @@ def iterable(y): Returns ------- - b : {0, 1} - Return 1 if the object has an iterator method or is a sequence, - and 0 otherwise. + b : bool + Return ``True`` if the object has an iterator method or is a + sequence and ``False`` otherwise. Examples -------- >>> np.iterable([1, 2, 3]) - 1 + True >>> np.iterable(2) - 0 + False """ try: iter(y) - except: - return 0 - return 1 + except TypeError: + return False + return True def _hist_optim_numbins_estimator(a, estimator): From b8b55614a3d3c2e3e2c653064719de6906c1be39 Mon Sep 17 00:00:00 2001 From: Joseph Fox-Rabinovitz Date: Mon, 1 Feb 2016 16:29:48 -0500 Subject: [PATCH 488/496] Added 'doane' and 'sqrt' estimators to np.histogram in numpy.function_base --- doc/release/1.12.0-notes.rst | 3 + numpy/lib/function_base.py | 246 +++++++++++++++++--------- numpy/lib/nanfunctions.py | 6 - numpy/lib/tests/test_function_base.py | 72 +++++--- 4 files changed, 209 insertions(+), 118 deletions(-) diff --git a/doc/release/1.12.0-notes.rst b/doc/release/1.12.0-notes.rst index ce606e5b5366..9167cdfc0d94 100644 --- a/doc/release/1.12.0-notes.rst +++ b/doc/release/1.12.0-notes.rst @@ -91,6 +91,9 @@ Instead of using ``usecol=(n,)`` to read the nth column of a file it is now allowed to use ``usecol=n``. Also the error message is more user friendly when a non-integer is passed as a column index. +Additional estimators for ``histogram`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Added 'doane' and 'sqrt' estimators to ``histogram`` via the ``bins`` argument. Changes ======= diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index b8e017eabcee..521694506bff 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -78,56 +78,100 @@ def iterable(y): def _hist_optim_numbins_estimator(a, estimator): """ - A helper function to be called from histogram to deal with estimating optimal number of bins + A helper function to be called from ``histogram`` to deal with + estimating optimal number of bins. + + A description of the estimators can be found at + https://en.wikipedia.org/wiki/Histogram#Number_of_bins_and_width estimator: str - If estimator is one of ['auto', 'fd', 'scott', 'rice', 'sturges'] this function - will choose the appropriate estimator and return it's estimate for the optimal - number of bins. + If ``estimator`` is one of ['auto', 'fd', 'scott', 'doane', + 'rice', 'sturges', 'sqrt'], this function will choose the + appropriate estimation method and return the optimal number of + bins it calculates. """ if a.size == 0: return 1 + def sqrt(x): + """ + Square Root Estimator + + Used by many programs for its simplicity. + """ + return np.ceil(np.sqrt(x.size)) + def sturges(x): """ Sturges Estimator - A very simplistic estimator based on the assumption of normality of the data - Poor performance for non-normal data, especially obvious for large X. - Depends only on size of the data. + + A very simplistic estimator based on the assumption of normality + of the data. Poor performance for non-normal data, especially + obvious for large ``x``. Depends only on size of the data. """ return np.ceil(np.log2(x.size)) + 1 def rice(x): """ Rice Estimator - Another simple estimator, with no normality assumption. - It has better performance for large data, but tends to overestimate number of bins. - The number of bins is proportional to the cube root of data size (asymptotically optimal) - Depends only on size of the data + + Another simple estimator, with no normality assumption. It has + better performance for large data, but tends to overestimate + number of bins. The number of bins is proportional to the cube + root of data size (asymptotically optimal). Depends only on size + of the data. """ return np.ceil(2 * x.size ** (1.0 / 3)) def scott(x): """ Scott Estimator - The binwidth is proportional to the standard deviation of the data and - inversely proportional to the cube root of data size (asymptotically optimal) + The binwidth is proportional to the standard deviation of the + data and inversely proportional to the cube root of data size + (asymptotically optimal). """ - h = 3.5 * x.std() * x.size ** (-1.0 / 3) + h = (24 * np.pi**0.5 / x.size)**(1.0 / 3) * np.std(x) if h > 0: return np.ceil(x.ptp() / h) return 1 + def doane(x): + """ + Doane's Estimator + + Improved version of Sturges' formula which works better for + non-normal data. See + http://stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning + """ + if x.size > 2: + sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3))) + sigma = np.std(x) + if sigma > 0: + # These three operations add up to + # g1 = np.mean(((x - np.mean(x)) / sigma)**3) + # but use only one temp array instead of three + temp = x - np.mean(x) + np.true_divide(temp, sigma, temp) + np.power(temp, 3, temp) + g1 = np.mean(temp) + return np.ceil(1.0 + np.log2(x.size) + + np.log2(1.0 + np.absolute(g1) / sg1)) + return 1 + def fd(x): """ - Freedman Diaconis rule using interquartile range (IQR) for binwidth - Considered a variation of the Scott rule with more robustness as the IQR - is less affected by outliers than the standard deviation. However the IQR depends on - fewer points than the sd so it is less accurate, especially for long tailed distributions. + Freedman Diaconis Estimator - If the IQR is 0, we return 1 for the number of bins. - Binwidth is inversely proportional to the cube root of data size (asymptotically optimal) + The interquartile range (IQR) is used for binwidth, making this + variation of the Scott rule more robust, as the IQR is less + affected by outliers than the standard deviation. However, the + IQR depends on fewer points than the standard deviation, so it + is less accurate, especially for long tailed distributions. + + If the IQR is 0, we return 1 for the number of bins. Binwidth is + inversely proportional to the cube root of data size + (asymptotically optimal). """ iqr = np.subtract(*np.percentile(x, [75, 25])) @@ -140,14 +184,15 @@ def fd(x): def auto(x): """ - The FD estimator is usually the most robust method, but it tends to be too small - for small X. The Sturges estimator is quite good for small (<1000) datasets and is - the default in R. - This method gives good off the shelf behaviour. + The FD estimator is usually the most robust method, but it tends + to be too small for small ``x``. The Sturges estimator is quite + good for small (<1000) datasets and is the default in R. This + method gives good off-the-shelf behaviour. """ return max(fd(x), sturges(x)) - optimal_numbins_methods = {'sturges': sturges, 'rice': rice, 'scott': scott, + optimal_numbins_methods = {'sqrt': sqrt, 'sturges': sturges, + 'rice': rice, 'scott': scott, 'doane': doane, 'fd': fd, 'auto': auto} try: estimator_func = optimal_numbins_methods[estimator.lower()] @@ -169,34 +214,45 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, Input data. The histogram is computed over the flattened array. bins : int or sequence of scalars or str, optional If `bins` is an int, it defines the number of equal-width - bins in the given range (10, by default). If `bins` is a sequence, - it defines the bin edges, including the rightmost edge, allowing - for non-uniform bin widths. + bins in the given range (10, by default). If `bins` is a + sequence, it defines the bin edges, including the rightmost + edge, allowing for non-uniform bin widths. .. versionadded:: 1.11.0 - If `bins` is a string from the list below, `histogram` will use the method - chosen to calculate the optimal number of bins (see Notes for more detail - on the estimators). For visualisation, we suggest using the 'auto' option. + If `bins` is a string from the list below, `histogram` will use + the method chosen to calculate the optimal number of bins (see + Notes for more detail on the estimators). For visualisation, we + suggest using the 'auto' option. 'auto' - Maximum of the 'sturges' and 'fd' estimators. Provides good all round performance + Maximum of the 'sturges' and 'fd' estimators. Provides good + all round performance 'fd' (Freedman Diaconis Estimator) - Robust (resilient to outliers) estimator that takes into account data - variability and data size . + Robust (resilient to outliers) estimator that takes into + account data variability and data size . + + 'doane' + An improved version of Sturges' estimator that works better + with non-normal datasets. 'scott' Less robust estimator that that takes into account data variability and data size. 'rice' - Estimator does not take variability into account, only data size. - Commonly overestimates number of bins required. + Estimator does not take variability into account, only data + size. Commonly overestimates number of bins required. 'sturges' - R's default method, only accounts for data size. Only optimal for - gaussian data and underestimates number of bins for large non-gaussian datasets. + R's default method, only accounts for data size. Only + optimal for gaussian data and underestimates number of bins + for large non-gaussian datasets. + + 'sqrt' + Square root (of data size) estimator, used by Excel and + other programs for its speed and simplicity. range : (float, float), optional The lower and upper range of the bins. If not provided, range @@ -204,31 +260,33 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, ignored. normed : bool, optional This keyword is deprecated in Numpy 1.6 due to confusing/buggy - behavior. It will be removed in Numpy 2.0. Use the density keyword - instead. - If False, the result will contain the number of samples - in each bin. If True, the result is the value of the - probability *density* function at the bin, normalized such that - the *integral* over the range is 1. Note that this latter behavior is - known to be buggy with unequal bin widths; use `density` instead. + behavior. It will be removed in Numpy 2.0. Use the ``density`` + keyword instead. If ``False``, the result will contain the + number of samples in each bin. If ``True``, the result is the + value of the probability *density* function at the bin, + normalized such that the *integral* over the range is 1. Note + that this latter behavior is known to be buggy with unequal bin + widths; use ``density`` instead. weights : array_like, optional - An array of weights, of the same shape as `a`. Each value in `a` - only contributes its associated weight towards the bin count - (instead of 1). If `normed` is True, the weights are normalized, - so that the integral of the density over the range remains 1 + An array of weights, of the same shape as `a`. Each value in + `a` only contributes its associated weight towards the bin count + (instead of 1). If `density` is True, the weights are + normalized, so that the integral of the density over the range + remains 1. density : bool, optional - If False, the result will contain the number of samples - in each bin. If True, the result is the value of the + If ``False``, the result will contain the number of samples in + each bin. If ``True``, the result is the value of the probability *density* function at the bin, normalized such that the *integral* over the range is 1. Note that the sum of the histogram values will not be equal to 1 unless bins of unity width are chosen; it is not a probability *mass* function. - Overrides the `normed` keyword if given. + + Overrides the ``normed`` keyword if given. Returns ------- hist : array - The values of the histogram. See `normed` and `weights` for a + The values of the histogram. See `density` and `weights` for a description of the possible semantics. bin_edges : array of dtype float Return the bin edges ``(length(hist)+1)``. @@ -240,56 +298,77 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, Notes ----- - All but the last (righthand-most) bin is half-open. In other words, if - `bins` is:: + All but the last (righthand-most) bin is half-open. In other words, + if `bins` is:: [1, 2, 3, 4] - then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the - second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes* - 4. + then the first bin is ``[1, 2)`` (including 1, but excluding 2) and + the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which + *includes* 4. .. versionadded:: 1.11.0 - The methods to estimate the optimal number of bins are well found in literature, - and are inspired by the choices R provides for histogram visualisation. - Note that having the number of bins proportional to :math:`n^{1/3}` is asymptotically optimal, - which is why it appears in most estimators. - These are simply plug-in methods that give good starting points for number of bins. - In the equations below, :math:`h` is the binwidth and :math:`n_h` is the number of bins + The methods to estimate the optimal number of bins are well found in + literature, and are inspired by the choices R provides for histogram + visualisation. Note that having the number of bins proportional to + :math:`n^{1/3}` is asymptotically optimal, which is why it appears + in most estimators. These are simply plug-in methods that give good + starting points for number of bins. In the equations below, + :math:`h` is the binwidth and :math:`n_h` is the number of bins. 'Auto' (maximum of the 'Sturges' and 'FD' estimators) - A compromise to get a good value. For small datasets the sturges - value will usually be chosen, while larger datasets will usually default to FD. - Avoids the overly conservative behaviour of FD and Sturges for small and - large datasets respectively. Switchover point is usually x.size~1000. + A compromise to get a good value. For small datasets the Sturges + value will usually be chosen, while larger datasets will usually + default to FD. Avoids the overly conservative behaviour of FD + and Sturges for small and large datasets respectively. + Switchover point usually happens when ``x.size`` is around 1000. 'FD' (Freedman Diaconis Estimator) .. math:: h = 2 \\frac{IQR}{n^{1/3}} The binwidth is proportional to the interquartile range (IQR) and inversely proportional to cube root of a.size. Can be too - conservative for small datasets, but is quite good - for large datasets. The IQR is very robust to outliers. + conservative for small datasets, but is quite good for large + datasets. The IQR is very robust to outliers. 'Scott' - .. math:: h = \\frac{3.5\\sigma}{n^{1/3}} - The binwidth is proportional to the standard deviation (sd) of the data - and inversely proportional to cube root of a.size. Can be too - conservative for small datasets, but is quite good - for large datasets. The sd is not very robust to outliers. Values - are very similar to the Freedman Diaconis Estimator in the absence of outliers. + .. math:: h = \\sigma \\sqrt[3]{\\frac{24 * \\sqrt{\\pi}}{n}} + The binwidth is proportional to the standard deviation of the + data and inversely proportional to cube root of ``x.size``. Can + be too conservative for small datasets, but is quite good for + large datasets. The standard deviation is not very robust to + outliers. Values are very similar to the Freedman-Diaconis + estimator in the absence of outliers. 'Rice' .. math:: n_h = \\left\\lceil 2n^{1/3} \\right\\rceil - The number of bins is only proportional to cube root of a.size. - It tends to overestimate the number of bins - and it does not take into account data variability. + The number of bins is only proportional to cube root of + ``a.size``. It tends to overestimate the number of bins and it + does not take into account data variability. 'Sturges' .. math:: n_h = \\left\\lceil \\log _{2}n+1 \\right\\rceil - The number of bins is the base2 log of a.size. - This estimator assumes normality of data and is too conservative for larger, - non-normal datasets. This is the default method in R's `hist` method. + The number of bins is the base 2 log of ``a.size``. This + estimator assumes normality of data and is too conservative for + larger, non-normal datasets. This is the default method in R's + ``hist`` method. + + 'Doane' + .. math:: n_h = \\left\\lceil 1 + \\log_{2}(n) + + \\log_{2}(1 + \\frac{\\left g_1 \\right}{\\sigma_{g_1})} + \\right\\rceil + + g_1 = mean[(\\frac{x - \\mu}{\\sigma})^3] + + \\sigma_{g_1} = \\sqrt{\\frac{6(n - 2)}{(n + 1)(n + 3)}} + + An improved version of Sturges' formula that produces better + estimates for non-normal datasets. + + 'Sqrt' + .. math:: n_h = \\left\\lceil \\sqrt n \\right\\rceil + The simplest and fastest estimator. Only takes into account the + data size. Examples -------- @@ -311,7 +390,8 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, .. versionadded:: 1.11.0 - Automated Bin Selection Methods example, using 2 peak random data with 2000 points + Automated Bin Selection Methods example, using 2 peak random data + with 2000 points: >>> import matplotlib.pyplot as plt >>> rng = np.random.RandomState(10) # deterministic random data diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py index 56f0010afedb..b963abb21381 100644 --- a/numpy/lib/nanfunctions.py +++ b/numpy/lib/nanfunctions.py @@ -919,21 +919,15 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False, * nearest: ``i`` or ``j``, whichever is nearest. * midpoint: ``(i + j) / 2``. keepdims : bool, optional -<<<<<<< 35b5f5be1ffffada84c8be207e7b8b196a58f786 If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original array `a`. -======= - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `a`. If this is anything but the default value it will be passed through (in the special case of an empty array) to the `mean` function of the underlying array. If the array is a sub-class and `mean` does not have the kwarg `keepdims` this will raise a RuntimeError. ->>>>>>> BUG: many functions silently drop `keepdims` kwarg Returns ------- diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index ba24488154b1..00d9f36c8ed9 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1280,11 +1280,13 @@ def test_finite_range(self): class TestHistogramOptimBinNums(TestCase): """ - Provide test coverage when using provided estimators for optimal number of bins + Provide test coverage when using provided estimators for optimal number of + bins """ def test_empty(self): - estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto'] + estimator_list = ['fd', 'scott', 'rice', 'sturges', + 'doane', 'sqrt', 'auto'] # check it can deal with empty data for estimator in estimator_list: a, b = histogram([], bins=estimator) @@ -1293,40 +1295,49 @@ def test_empty(self): def test_simple(self): """ - Straightforward testing with a mixture of linspace data (for consistency). - All test values have been precomputed and the values shouldn't change + Straightforward testing with a mixture of linspace data (for + consistency). All test values have been precomputed and the values + shouldn't change """ - # some basic sanity checking, with some fixed data. Checking for the correct number of bins - basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7, 'auto': 7}, - 500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10, 'auto': 10}, - 5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14, 'auto': 17}} + # Some basic sanity checking, with some fixed data. + # Checking for the correct number of bins + basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7, + 'doane': 8, 'sqrt': 8, 'auto': 7}, + 500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10, + 'doane': 12, 'sqrt': 23, 'auto': 10}, + 5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14, + 'doane': 17, 'sqrt': 71, 'auto': 17}} for testlen, expectedResults in basic_test.items(): - # create some sort of non uniform data to test with (2 peak uniform mixture) + # Create some sort of non uniform data to test with + # (2 peak uniform mixture) x1 = np.linspace(-10, -1, testlen/5 * 2) x2 = np.linspace(1,10, testlen/5 * 3) x = np.hstack((x1, x2)) for estimator, numbins in expectedResults.items(): a, b = np.histogram(x, estimator) - assert_equal(len(a), numbins, - err_msg="For the {0} estimator with datasize of {1} ".format(estimator, testlen)) + assert_equal(len(a), numbins, err_msg="For the {0} estimator " + "with datasize of {1}".format(estimator, testlen)) def test_small(self): """ - Smaller datasets have the potential to cause issues with the data adaptive methods - Especially the FD methods - All bin numbers have been precalculated + Smaller datasets have the potential to cause issues with the data + adaptive methods, especially the FD method. All bin numbers have been + precalculated. """ - small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 2, 'sturges': 1}, - 2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2}, - 3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3}} + small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 2, 'sturges': 1, + 'doane': 1, 'sqrt': 1}, + 2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2, + 'doane': 1, 'sqrt': 2}, + 3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3, + 'doane': 3, 'sqrt': 2}} for testlen, expectedResults in small_dat.items(): testdat = np.arange(testlen) for estimator, expbins in expectedResults.items(): a, b = np.histogram(testdat, estimator) - assert_equal(len(a), expbins, - err_msg="For the {0} estimator with datasize of {1} ".format(estimator, testlen)) + assert_equal(len(a), expbins, err_msg="For the {0} estimator " + "with datasize of {1}".format(estimator, testlen)) def test_incorrect_methods(self): """ @@ -1342,26 +1353,29 @@ def test_novariance(self): Primarily for Scott and FD as the SD and IQR are both 0 in this case """ novar_dataset = np.ones(100) - novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 10, 'sturges': 8, 'auto': 8} + novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 10, 'sturges': 8, + 'doane': 1, 'sqrt': 10, 'auto': 8} for estimator, numbins in novar_resultdict.items(): a, b = np.histogram(novar_dataset, estimator) - assert_equal(len(a), numbins, - err_msg="{0} estimator, No Variance test".format(estimator)) + assert_equal(len(a), numbins, err_msg="{0} estimator, " + "No Variance test".format(estimator)) def test_outlier(self): """ - Check the fd and scott with outliers - The fd determines a smaller binwidth since it's less affected by outliers - since the range is so (artificially) large this means more bins - most of which will be empty, but the data of interest usually is unaffected. - The Scott estimator is more affected and returns fewer bins, despite most of - the variance being in one area of the data + Check the FD, Scott and Doane with outliers. + + The FD estimates a smaller binwidth since it's less affected by + outliers. Since the range is so (artificially) large, this means more + bins, most of which will be empty, but the data of interest usually is + unaffected. The Scott estimator is more affected and returns fewer bins, + despite most of the variance being in one area of the data. The Doane + estimator lies somewhere between the other two. """ xcenter = np.linspace(-10, 10, 50) outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter)) - outlier_resultdict = {'fd': 21, 'scott': 5} + outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11} for estimator, numbins in outlier_resultdict.items(): a, b = np.histogram(outlier_dataset, estimator) From 92819d88c9bc1722bb7b0386b65073559bd30492 Mon Sep 17 00:00:00 2001 From: Mad Physicist Date: Sat, 13 Feb 2016 00:11:11 -0500 Subject: [PATCH 489/496] DOC: Removed residual merge markup from previous commit --- numpy/lib/nanfunctions.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py index 56f0010afedb..b963abb21381 100644 --- a/numpy/lib/nanfunctions.py +++ b/numpy/lib/nanfunctions.py @@ -919,21 +919,15 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False, * nearest: ``i`` or ``j``, whichever is nearest. * midpoint: ``(i + j) / 2``. keepdims : bool, optional -<<<<<<< 35b5f5be1ffffada84c8be207e7b8b196a58f786 If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original array `a`. -======= - If this is set to True, the axes which are reduced are left - in the result as dimensions with size one. With this option, - the result will broadcast correctly against the original `a`. If this is anything but the default value it will be passed through (in the special case of an empty array) to the `mean` function of the underlying array. If the array is a sub-class and `mean` does not have the kwarg `keepdims` this will raise a RuntimeError. ->>>>>>> BUG: many functions silently drop `keepdims` kwarg Returns ------- From b740a69df969ec5f257af8b8a510d85af813785d Mon Sep 17 00:00:00 2001 From: mmartin Date: Sat, 13 Feb 2016 07:31:20 -0600 Subject: [PATCH 490/496] Change 'pubic' to 'public'. --- numpy/_build_utils/README | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_build_utils/README b/numpy/_build_utils/README index a817e9c5d3c3..6976e0233996 100644 --- a/numpy/_build_utils/README +++ b/numpy/_build_utils/README @@ -2,7 +2,7 @@ WARNING ======= -This directory (numpy/_build_utils) is *not* part of the pubic numpy API, +This directory (numpy/_build_utils) is *not* part of the public numpy API, - it is internal build support for numpy. - it is only present in source distributions or during an in place build - it is *not* installed with the rest of numpy From 8a3bbdea760329acb58f89e774f4c08acbac7e07 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 13 Feb 2016 16:08:05 +0100 Subject: [PATCH 491/496] DOC: update sphinxext to numpydoc 0.6.0 --- doc/sphinxext | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/sphinxext b/doc/sphinxext index 84cc897d266e..ef988a4a4658 160000 --- a/doc/sphinxext +++ b/doc/sphinxext @@ -1 +1 @@ -Subproject commit 84cc897d266e0afc28fc5296edf01afb08005472 +Subproject commit ef988a4a4658c991f4445f6241ab02d74710c6e3 From b9ae5a33b0438b0505d9ab1e2277992af3be78df Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 13 Feb 2016 16:43:37 +0100 Subject: [PATCH 492/496] DOC: fix a number of reST formatting issues in docstrings. --- numpy/fft/fftpack.py | 30 ++++++++++++++++++++++-------- numpy/lib/twodim_base.py | 4 ++-- numpy/random/mtrand/mtrand.pyx | 8 +++++--- 3 files changed, 29 insertions(+), 13 deletions(-) diff --git a/numpy/fft/fftpack.py b/numpy/fft/fftpack.py index 275be0d77ae8..fe5b76e1ace4 100644 --- a/numpy/fft/fftpack.py +++ b/numpy/fft/fftpack.py @@ -119,6 +119,7 @@ def fft(a, n=None, axis=-1, norm=None): used. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 + Normalization mode (see `numpy.fft`). Default is None. Returns @@ -169,6 +170,10 @@ def fft(a, n=None, axis=-1, norm=None): 1.14383329e-17 +1.22460635e-16j, -1.64863782e-15 +1.77635684e-15j]) + In this example, real input has an FFT which is Hermitian, i.e., symmetric + in the real part and anti-symmetric in the imaginary part, as described in + the `numpy.fft` documentation: + >>> import matplotlib.pyplot as plt >>> t = np.arange(256) >>> sp = np.fft.fft(np.sin(t)) @@ -177,10 +182,6 @@ def fft(a, n=None, axis=-1, norm=None): [, ] >>> plt.show() - In this example, real input has an FFT which is Hermitian, i.e., symmetric - in the real part and anti-symmetric in the imaginary part, as described in - the `numpy.fft` documentation. - """ a = asarray(a).astype(complex, copy=False) @@ -229,6 +230,7 @@ def ifft(a, n=None, axis=-1, norm=None): axis is used. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 + Normalization mode (see `numpy.fft`). Default is None. Returns @@ -306,6 +308,7 @@ def rfft(a, n=None, axis=-1, norm=None): used. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 + Normalization mode (see `numpy.fft`). Default is None. Returns @@ -400,6 +403,7 @@ def irfft(a, n=None, axis=-1, norm=None): axis is used. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 + Normalization mode (see `numpy.fft`). Default is None. Returns @@ -478,6 +482,7 @@ def hfft(a, n=None, axis=-1, norm=None): axis is used. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 + Normalization mode (see `numpy.fft`). Default is None. Returns @@ -555,6 +560,7 @@ def ihfft(a, n=None, axis=-1, norm=None): axis is used. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 + Normalization mode (see `numpy.fft`). Default is None. Returns @@ -638,8 +644,8 @@ def fftn(a, s=None, axes=None, norm=None): Input array, can be complex. s : sequence of ints, optional Shape (length of each transformed axis) of the output - (`s[0]` refers to axis 0, `s[1]` to axis 1, etc.). - This corresponds to `n` for `fft(x, n)`. + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``fft(x, n)``. Along any axis, if the given shape is smaller than that of the input, the input is cropped. If it is larger, the input is padded with zeros. if `s` is not given, the shape of the input along the axes specified @@ -651,6 +657,7 @@ def fftn(a, s=None, axes=None, norm=None): performed multiple times. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 + Normalization mode (see `numpy.fft`). Default is None. Returns @@ -756,6 +763,7 @@ def ifftn(a, s=None, axes=None, norm=None): axis is performed multiple times. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 + Normalization mode (see `numpy.fft`). Default is None. Returns @@ -831,8 +839,8 @@ def fft2(a, s=None, axes=(-2, -1), norm=None): Input array, can be complex s : sequence of ints, optional Shape (length of each transformed axis) of the output - (`s[0]` refers to axis 0, `s[1]` to axis 1, etc.). - This corresponds to `n` for `fft(x, n)`. + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``fft(x, n)``. Along each axis, if the given shape is smaller than that of the input, the input is cropped. If it is larger, the input is padded with zeros. if `s` is not given, the shape of the input along the axes specified @@ -844,6 +852,7 @@ def fft2(a, s=None, axes=(-2, -1), norm=None): that a one-dimensional FFT is performed. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 + Normalization mode (see `numpy.fft`). Default is None. Returns @@ -940,6 +949,7 @@ def ifft2(a, s=None, axes=(-2, -1), norm=None): that a one-dimensional FFT is performed. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 + Normalization mode (see `numpy.fft`). Default is None. Returns @@ -1019,6 +1029,7 @@ def rfftn(a, s=None, axes=None, norm=None): axes are used, or all axes if `s` is also not specified. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 + Normalization mode (see `numpy.fft`). Default is None. Returns @@ -1096,6 +1107,7 @@ def rfft2(a, s=None, axes=(-2, -1), norm=None): Axes over which to compute the FFT. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 + Normalization mode (see `numpy.fft`). Default is None. Returns @@ -1153,6 +1165,7 @@ def irfftn(a, s=None, axes=None, norm=None): axis is performed multiple times. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 + Normalization mode (see `numpy.fft`). Default is None. Returns @@ -1226,6 +1239,7 @@ def irfft2(a, s=None, axes=(-2, -1), norm=None): Default is the last two axes. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 + Normalization mode (see `numpy.fft`). Default is None. Returns diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py index b2f350bb74b9..6728ab7ecb07 100644 --- a/numpy/lib/twodim_base.py +++ b/numpy/lib/twodim_base.py @@ -649,7 +649,7 @@ def histogram2d(x, y, bins=10, range=None, normed=False, weights=None): >>> import matplotlib as mpl >>> import matplotlib.pyplot as plt - Construct a 2D-histogram with variable bin width. First define the bin + Construct a 2-D histogram with variable bin width. First define the bin edges: >>> xedges = [0, 1, 1.5, 3, 5] @@ -676,7 +676,7 @@ def histogram2d(x, y, bins=10, range=None, normed=False, weights=None): >>> ax = fig.add_subplot(131) >>> ax.set_title('imshow: equidistant') >>> im = plt.imshow(H, interpolation='nearest', origin='low', - extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]]) + ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]]) pcolormesh can display exact bin edges: diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index e5998c001224..b168bf79c6dd 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -4299,11 +4299,13 @@ cdef class RandomState: the probability density function: >>> import matplotlib.pyplot as plt - >>> import scipy.special as sps - Truncate s values at 50 so plot is interesting + >>> from scipy import special + + Truncate s values at 50 so plot is interesting: + >>> count, bins, ignored = plt.hist(s[s<50], 50, normed=True) >>> x = np.arange(1., 50.) - >>> y = x**(-a)/sps.zetac(a) + >>> y = x**(-a) / special.zetac(a) >>> plt.plot(x, y/max(y), linewidth=2, color='r') >>> plt.show() From 06169f1ccefed7c869c2ba021c7df1428457893c Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 13 Feb 2016 17:41:58 +0100 Subject: [PATCH 493/496] DOC: fix up invalid LaTeX in histogram docstring. --- numpy/lib/function_base.py | 44 +++++++++++++++++++++----------------- 1 file changed, 24 insertions(+), 20 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 521694506bff..06d1ee4a7c7e 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -150,7 +150,7 @@ def doane(x): if sigma > 0: # These three operations add up to # g1 = np.mean(((x - np.mean(x)) / sigma)**3) - # but use only one temp array instead of three + # but use only one temp array instead of three temp = x - np.mean(x) np.true_divide(temp, sigma, temp) np.power(temp, 3, temp) @@ -205,7 +205,7 @@ def auto(x): def histogram(a, bins=10, range=None, normed=False, weights=None, density=None): - """ + r""" Compute the histogram of a set of data. Parameters @@ -325,14 +325,16 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, Switchover point usually happens when ``x.size`` is around 1000. 'FD' (Freedman Diaconis Estimator) - .. math:: h = 2 \\frac{IQR}{n^{1/3}} + .. math:: h = 2 \frac{IQR}{n^{1/3}} + The binwidth is proportional to the interquartile range (IQR) and inversely proportional to cube root of a.size. Can be too conservative for small datasets, but is quite good for large datasets. The IQR is very robust to outliers. 'Scott' - .. math:: h = \\sigma \\sqrt[3]{\\frac{24 * \\sqrt{\\pi}}{n}} + .. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}} + The binwidth is proportional to the standard deviation of the data and inversely proportional to cube root of ``x.size``. Can be too conservative for small datasets, but is quite good for @@ -341,32 +343,34 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, estimator in the absence of outliers. 'Rice' - .. math:: n_h = \\left\\lceil 2n^{1/3} \\right\\rceil + .. math:: n_h = \left\lceil 2n^{1/3} \right\rceil + The number of bins is only proportional to cube root of ``a.size``. It tends to overestimate the number of bins and it does not take into account data variability. 'Sturges' - .. math:: n_h = \\left\\lceil \\log _{2}n+1 \\right\\rceil + .. math:: n_h = \left\lceil \log _{2}n+1 \right\rceil + The number of bins is the base 2 log of ``a.size``. This estimator assumes normality of data and is too conservative for larger, non-normal datasets. This is the default method in R's ``hist`` method. 'Doane' - .. math:: n_h = \\left\\lceil 1 + \\log_{2}(n) + - \\log_{2}(1 + \\frac{\\left g_1 \\right}{\\sigma_{g_1})} - \\right\\rceil + .. math:: n_h = \left\lceil 1 + \log_{2}(n) + + \log_{2}(1 + \frac{|g_1|}{\sigma_{g_1})} + \right\rceil - g_1 = mean[(\\frac{x - \\mu}{\\sigma})^3] + g_1 = mean[(\frac{x - \mu}{\sigma})^3] - \\sigma_{g_1} = \\sqrt{\\frac{6(n - 2)}{(n + 1)(n + 3)}} + \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}} An improved version of Sturges' formula that produces better estimates for non-normal datasets. 'Sqrt' - .. math:: n_h = \\left\\lceil \\sqrt n \\right\\rceil + .. math:: n_h = \left\lceil \sqrt n \right\rceil The simplest and fastest estimator. Only takes into account the data size. @@ -395,12 +399,13 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, >>> import matplotlib.pyplot as plt >>> rng = np.random.RandomState(10) # deterministic random data - >>> a = np.hstack((rng.normal(size = 1000), rng.normal(loc = 5, scale = 2, size = 1000))) - >>> plt.hist(a, bins = 'auto') # plt.hist passes it's arguments to np.histogram + >>> a = np.hstack((rng.normal(size=1000), + ... rng.normal(loc=5, scale=2, size=1000))) + >>> plt.hist(a, bins='auto') # plt.hist passes it's arguments to np.histogram >>> plt.title("Histogram with 'auto' bins") >>> plt.show() - """ + """ a = asarray(a) if weights is not None: weights = asarray(weights) @@ -1217,7 +1222,7 @@ def gradient(f, *varargs, **kwargs): single scalar specifies sample distance for all dimensions. if `axis` is given, the number of varargs must equal the number of axes. edge_order : {1, 2}, optional - Gradient is calculated using N\ :sup:`th` order accurate differences + Gradient is calculated using N-th order accurate differences at the boundaries. Default: 1. .. versionadded:: 1.9.1 @@ -1425,7 +1430,6 @@ def diff(a, n=1, axis=-1): diff : ndarray The n-th differences. The shape of the output is the same as `a` except along `axis` where the dimension is smaller by `n`. -. See Also -------- @@ -2435,6 +2439,7 @@ def corrcoef(x, y=None, rowvar=1, bias=np._NoValue, ddof=np._NoValue): for backwards compatibility with previous versions of this function. These arguments had no effect on the return values of the function and can be safely ignored in this and previous versions of numpy. + """ if bias is not np._NoValue or ddof is not np._NoValue: # 2015-03-15, 1.10 @@ -2608,7 +2613,6 @@ def bartlett(M): .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 429. - Examples -------- >>> np.bartlett(12) @@ -3482,7 +3486,7 @@ def percentile(a, q, axis=None, out=None, have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. overwrite_input : bool, optional - If True, then allow use of memory of input array `a` + If True, then allow use of memory of input array `a` calculations. The input array will be modified by the call to `percentile`. This will save memory when you do not need to preserve the contents of the input array. In this case you @@ -3517,7 +3521,7 @@ def percentile(a, q, axis=None, out=None, If `q` is a single percentile and `axis=None`, then the result is a scalar. If multiple percentiles are given, first axis of the result corresponds to the percentiles. The other axes are - the axes that remain after the reduction of `a`. If the input + the axes that remain after the reduction of `a`. If the input contains integers or floats smaller than ``float64``, the output data-type is ``float64``. Otherwise, the output data-type is the same as that of the input. If `out` is specified, that array is From f7e64cc699567124114e41ad7f518d6825807cc3 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 21 Jan 2016 22:23:53 +0100 Subject: [PATCH 494/496] DOC: Add documentation for as_strided --- numpy/lib/stride_tricks.py | 65 +++++++++++++++++++++++++-- numpy/lib/tests/test_stride_tricks.py | 17 +++++++ 2 files changed, 79 insertions(+), 3 deletions(-) diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py index 4c23ab355599..af09fd373182 100644 --- a/numpy/lib/stride_tricks.py +++ b/numpy/lib/stride_tricks.py @@ -35,8 +35,61 @@ def _maybe_view_as_subclass(original_array, new_array): return new_array -def as_strided(x, shape=None, strides=None, subok=False): - """ Make an ndarray from the given array with the given shape and strides. +def as_strided(x, shape=None, strides=None, subok=False, writeable=True): + """ + Create a view into the array with the given shape and strides. + + .. warning:: This function has to be used with extreme care, see notes. + + Parameters + ---------- + x : ndarray + Array to create a new. + shape : sequence of int, optional + The shape of the new array. Defaults to ``x.shape``. + strides : sequence of int, optional + The strides of the new array. Defaults to ``x.strides``. + subok : bool, optional + .. versionadded:: 1.10 + + If True, subclasses are preserved. + writeable : bool, optional + .. versionadded:: 1.12 + + If set to False, the returned array will always be readonly. + Otherwise it will be writable if the original array was. It + is advisable to set this to False if possible (see Notes). + + Returns + ------- + view : ndarray + + See also + -------- + broadcast_to: broadcast an array to a given shape. + reshape : reshape an array. + + Notes + ----- + ``as_strided`` creates a view into the array given the exact strides + and shape. This means it manipulates the internal data structure of + ndarray and, if done incorrectly, the array elements can point to + invalid memory and can corrupt results or crash your program. + It is advisable to always use the original ``x.strides`` when + calculating new strides to avoid reliance on a contiguous memory + layout. + + Furthermore, arrays created with this function often contain self + overlapping memory, so that two elements are identical. + Vectorized write operations on such arrays will typically be + unpredictable. They may even give different results for small, large, + or transposed arrays. + Since writing to these arrays has to be tested and done with great + care, you may want to use ``writeable=False`` to avoid accidental write + operations. + + For these reasons it is advisable to avoid ``as_strided`` when + possible. """ # first convert input to array, possibly keeping subclass x = np.array(x, copy=False, subok=subok) @@ -45,13 +98,19 @@ def as_strided(x, shape=None, strides=None, subok=False): interface['shape'] = tuple(shape) if strides is not None: interface['strides'] = tuple(strides) + array = np.asarray(DummyArray(interface, base=x)) if array.dtype.fields is None and x.dtype.fields is not None: # This should only happen if x.dtype is [('', 'Vx')] array.dtype = x.dtype - return _maybe_view_as_subclass(x, array) + view = _maybe_view_as_subclass(x, array) + + if view.flags.writeable and not writeable: + view.flags.writeable = False + + return view def _broadcast_to(array, shape, subok, readonly): diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py index 06e659002322..95df135cf8d5 100644 --- a/numpy/lib/tests/test_stride_tricks.py +++ b/numpy/lib/tests/test_stride_tricks.py @@ -317,6 +317,23 @@ def test_as_strided(): a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) assert_equal(a.dtype, a_view.dtype) +def as_strided_writeable(): + arr = np.ones(10) + view = as_strided(arr, writeable=False) + assert_(not view.flags.writeable) + + # Check that writeable also is fine: + view = as_strided(arr, writeable=True) + assert_(view.flags.writeable) + view[...] = 3 + assert_array_equal(arr, np.full_like(arr, 3)) + + # Test that things do not break down for readonly: + arr.flags.writeable = False + view = as_strided(arr, writeable=False) + view = as_strided(arr, writeable=True) + assert_(not view.flags.writeable) + class VerySimpleSubClass(np.ndarray): def __new__(cls, *args, **kwargs): From c4414c4f1ece81bfb657cffe756efe4c6fa5d668 Mon Sep 17 00:00:00 2001 From: Varun Nayyar Date: Tue, 22 Sep 2015 19:38:34 +1000 Subject: [PATCH 495/496] ENH: Adding support to the range keyword for estimation of the optimal number of bins and associated tests --- numpy/lib/function_base.py | 29 +++++++++++++++++++++++--- numpy/lib/tests/test_function_base.py | 30 +++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 3 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 06d1ee4a7c7e..f2bb7122f9d8 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -76,7 +76,7 @@ def iterable(y): return True -def _hist_optim_numbins_estimator(a, estimator): +def _hist_optim_numbins_estimator(a, estimator, data_range=None, data_weights=None): """ A helper function to be called from ``histogram`` to deal with estimating optimal number of bins. @@ -84,22 +84,45 @@ def _hist_optim_numbins_estimator(a, estimator): A description of the estimators can be found at https://en.wikipedia.org/wiki/Histogram#Number_of_bins_and_width + a: np.array + The data with which to estimate the number of bins + estimator: str If ``estimator`` is one of ['auto', 'fd', 'scott', 'doane', 'rice', 'sturges', 'sqrt'], this function will choose the appropriate estimation method and return the optimal number of bins it calculates. + + data_range: tuple (min, max) + What range should the data to be binned be restricted to + + data_weights: + weights are not supported, must be left blank or None """ + assert isinstance(estimator, basestring) + # private function should not be called otherwise + if a.size == 0: return 1 + if data_weights is not None: + raise TypeError("Automated estimation of the number of " + "bins is not supported for weighted data") + + if data_range is not None: + mn, mx = data_range + keep = (a >= mn) + keep &= (a <= mx) + if not np.logical_and.reduce(keep): + a = a[keep] + def sqrt(x): """ Square Root Estimator Used by many programs for its simplicity. """ - return np.ceil(np.sqrt(x.size)) + return np.ceil(np.sqrt(x.size) def sturges(x): """ @@ -426,7 +449,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, if isinstance(bins, basestring): - bins = _hist_optim_numbins_estimator(a, bins) + bins = _hist_optim_numbins_estimator(a, bins, range, weights) # if `bins` is a string for an automatic method, # this will replace it with the number of bins calculated diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 00d9f36c8ed9..a43230ff908e 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1381,6 +1381,36 @@ def test_outlier(self): a, b = np.histogram(outlier_dataset, estimator) assert_equal(len(a), numbins) + def test_simple_range(self): + """ + Straightforward testing with a mixture of linspace data (for consistency). + Adding in a 3rd mixture that will then be completely ignored. + All test values have been precomputed and the values shouldn't change + """ + # some basic sanity checking, with some fixed data. Checking for the correct number of bins + basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7, 'auto': 7}, + 500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10, 'auto': 10}, + 5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14, 'auto': 17}} + + for testlen, expectedResults in basic_test.items(): + # create some sort of non uniform data to test with (2 peak uniform mixture) + x1 = np.linspace(-10, -1, testlen/5 * 2) + x2 = np.linspace(1, 10, testlen/5 * 3) + x3 = np.linspace(-100, -50, testlen) + x = np.hstack((x1, x2, x3)) + for estimator, numbins in expectedResults.items(): + a, b = np.histogram(x, estimator, range = (-20, 20)) + assert_equal(len(a), numbins, + err_msg="For the {0} estimator with datasize of {1} ".format(estimator, testlen)) + + def test_simple_weighted(self): + """ + Check that weighted data raises a TypeError + """ + estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto'] + for estimator in estimator_list: + assert_raises(TypeError, histogram, [1, 2, 3], estimator, weights=[1, 2, 3]) + class TestHistogramdd(TestCase): From 473c852d32e016960812b1f0969b12baa9b62b8a Mon Sep 17 00:00:00 2001 From: Varun Nayyar Date: Sun, 14 Feb 2016 12:24:13 +0800 Subject: [PATCH 496/496] BUG: Change TypeError's raised by optimbins for weighted data to RunTimeWarnings instead. --- numpy/lib/function_base.py | 7 ++++--- numpy/lib/tests/test_function_base.py | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index f2bb7122f9d8..0f1fefee8bb5 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -106,8 +106,9 @@ def _hist_optim_numbins_estimator(a, estimator, data_range=None, data_weights=No return 1 if data_weights is not None: - raise TypeError("Automated estimation of the number of " - "bins is not supported for weighted data") + raise RuntimeWarning("Automated estimation of the number of " + "bins is not supported for weighted data. " + "Will treat data as unweighted") if data_range is not None: mn, mx = data_range @@ -122,7 +123,7 @@ def sqrt(x): Used by many programs for its simplicity. """ - return np.ceil(np.sqrt(x.size) + return np.ceil(np.sqrt(x.size)) def sturges(x): """ diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index a43230ff908e..1991eeddb9c1 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1409,7 +1409,7 @@ def test_simple_weighted(self): """ estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto'] for estimator in estimator_list: - assert_raises(TypeError, histogram, [1, 2, 3], estimator, weights=[1, 2, 3]) + assert_raises(RuntimeWarning, histogram, [1, 2, 3], estimator, weights=[1, 2, 3]) class TestHistogramdd(TestCase):