diff --git a/sklearn/neural_network/multilayer_perceptron.py b/sklearn/neural_network/multilayer_perceptron.py index de559dc67e18f..19bd4ab5cd0bc 100644 --- a/sklearn/neural_network/multilayer_perceptron.py +++ b/sklearn/neural_network/multilayer_perceptron.py @@ -460,7 +460,7 @@ def _fit_lbfgs(self, X, y, activations, deltas, coef_grads, optimal_parameters, self.loss_, d = fmin_l_bfgs_b( x0=packed_coef_inter, func=self._loss_grad_lbfgs, - maxfun=self.max_iter, + maxiter=self.max_iter, iprint=iprint, pgtol=self.tol, args=(X, y, activations, deltas, coef_grads, intercept_grads)) diff --git a/sklearn/neural_network/tests/test_mlp.py b/sklearn/neural_network/tests/test_mlp.py index b0d5ab587a087..4b1909606b4ac 100644 --- a/sklearn/neural_network/tests/test_mlp.py +++ b/sklearn/neural_network/tests/test_mlp.py @@ -633,3 +633,16 @@ def test_n_iter_no_change_inf(): # validate _update_no_improvement_count() was always triggered assert_equal(clf._no_improvement_count, clf.n_iter_ - 1) + + +@ignore_warnings(category=ConvergenceWarning) +def test_lbfgs_max_iter(): + # test n_iter_no_change using binary data set + # the fitting process should go to max_iter iterations + X = X_digits_binary[:100] + y = y_digits_binary[:100] + + max_iter = 3000 + clf = MLPClassifier(max_iter=max_iter, solver='lbfgs') + clf.fit(X, y) + assert clf.n_iter_ == max_iter