Thanks to visit codestin.com
Credit goes to github.com

Skip to content

[MRG+1] raising warning for class_weight in fit method #3931

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Dec 12, 2014
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion sklearn/linear_model/stochastic_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

import numpy as np
import scipy.sparse as sp
import warnings

from abc import ABCMeta, abstractmethod

Expand Down Expand Up @@ -543,12 +544,19 @@ def fit(self, X, y, coef_init=None, intercept_init=None,

sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified

Returns
-------
self : returns an instance of self.
"""
if class_weight is not None:
warnings.warn("You are trying to set class_weight through the fit "
"method, which will be deprecated in version "
"v0.17 of scikit-learn. Pass the class_weight into "
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Shouldn't this have been 0.18?

"the constructor instead.", DeprecationWarning)
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
Expand Down
32 changes: 32 additions & 0 deletions sklearn/linear_model/tests/test_sgd.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_warns_message

from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
Expand Down Expand Up @@ -597,6 +598,37 @@ def test_wrong_class_weight_format(self):
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)

def test_class_weight_warning(self):
"""Tests that class_weight passed through fit raises warning.
This test should be removed after deprecating support for this"""

clf = self.factory()
warning_message = ("You are trying to set class_weight through the "
"fit "
"method, which will be deprecated in version "
"v0.17 of scikit-learn. Pass the class_weight into "
"the constructor instead.")
assert_warns_message(DeprecationWarning,
warning_message,
clf.fit, X4, Y4,
class_weight=1)

def test_weights_multiplied(self):
"""Tests that class_weight and sample_weight are multiplicative"""
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]

clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)

clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)

assert_array_equal(clf1.coef_, clf2.coef_)

def test_auto_weight(self):
"""Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
Expand Down