From ed7bda9f745cc53aec808739023c895194b25418 Mon Sep 17 00:00:00 2001 From: John Kirkham Date: Thu, 21 Feb 2019 20:50:11 -0500 Subject: [PATCH] MNT: Use GEMV in enet_coordinate_descent Make use of the BLAS GEMV operation in `enet_coordinate_descent` instead of using DOT in a `for`-loop. Go ahead and use GEMV with both non-transposed and transposed arrays. Previously we have had issues with the vendored BLAS and GEMV on transposed arrays, but this attempts to use GEMV on transposed arrays anyways. Hopefully we can make them work as well. As GEMV and DOT in a `for`-loop are both semantically equivalent, this is a reasonable change to make. Though GEMV likely uses a multithreaded approach unlike our application of DOT in a serial loop here. In BLAS implementations that do use threads for DOT, we can expect that GEMV will make better usage of those threads and avoid unnecessary setup and teardown costs that DOT in a `for`-loop is likely to incur (possibly in each iteration of the `for`-loop). --- sklearn/linear_model/cd_fast.pyx | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/sklearn/linear_model/cd_fast.pyx b/sklearn/linear_model/cd_fast.pyx index c512c02f6576a..bc39754cb2368 100644 --- a/sklearn/linear_model/cd_fast.pyx +++ b/sklearn/linear_model/cd_fast.pyx @@ -211,9 +211,11 @@ def enet_coordinate_descent(floating[::1] w, # stopping criterion # XtA = np.dot(X.T, R) - beta * w - for i in range(n_features): - XtA[i] = (_dot(n_samples, &X[0, i], 1, &R[0], 1) - - beta * w[i]) + _copy(n_features, &w[0], 1, &XtA[0], 1) + _gemv(ColMajor, Trans, + n_samples, n_features, 1.0, &X[0, 0], n_samples, + &R[0], 1, + -beta, &XtA[0], 1) if positive: dual_norm_XtA = max(n_features, &XtA[0])