diff --git a/maint_tools/test_docstrings.py b/maint_tools/test_docstrings.py index e1a4dd7ca5dd6..687184d6f6500 100644 --- a/maint_tools/test_docstrings.py +++ b/maint_tools/test_docstrings.py @@ -34,7 +34,6 @@ "QuadraticDiscriminantAnalysis", "RandomizedSearchCV", "RobustScaler", - "SGDOneClassSVM", "SGDRegressor", "SelfTrainingClassifier", "SparseRandomProjection", diff --git a/sklearn/linear_model/_stochastic_gradient.py b/sklearn/linear_model/_stochastic_gradient.py index b429d8227553a..dd86c9536abd4 100644 --- a/sklearn/linear_model/_stochastic_gradient.py +++ b/sklearn/linear_model/_stochastic_gradient.py @@ -1975,8 +1975,8 @@ class SGDOneClassSVM(BaseSGD, OutlierMixin): Whether or not the training data should be shuffled after each epoch. Defaults to True. - verbose : integer, optional - The verbosity level + verbose : int, optional + The verbosity level. random_state : int, RandomState instance or None, optional (default=None) The seed of the pseudo random number generator to use when shuffling @@ -1985,7 +1985,7 @@ class SGDOneClassSVM(BaseSGD, OutlierMixin): generator; If None, the random number generator is the RandomState instance used by `np.random`. - learning_rate : string, optional + learning_rate : str, optional The learning rate schedule to use with `fit`. (If using `partial_fit`, learning rate must be controlled directly). @@ -2059,6 +2059,17 @@ class SGDOneClassSVM(BaseSGD, OutlierMixin): .. versionadded:: 1.0 + See Also + -------- + sklearn.svm.OneClassSVM : Unsupervised Outlier Detection. + + Notes + ----- + This estimator has a linear complexity in the number of training samples + and is thus better suited than the `sklearn.svm.OneClassSVM` + implementation for datasets with a large number of training samples (say + > 10,000). + Examples -------- >>> import numpy as np @@ -2070,17 +2081,6 @@ class SGDOneClassSVM(BaseSGD, OutlierMixin): >>> print(clf.predict([[4, 4]])) [1] - - See also - -------- - sklearn.svm.OneClassSVM - - Notes - ----- - This estimator has a linear complexity in the number of training samples - and is thus better suited than the `sklearn.svm.OneClassSVM` - implementation for datasets with a large number of training samples (say - > 10,000). """ loss_functions = {"hinge": (Hinge, 1.0)} @@ -2295,6 +2295,8 @@ def partial_fit(self, X, y=None, sample_weight=None): ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Subset of the training data. + y : Ignored + Not used, present for API consistency by convention. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples. @@ -2302,7 +2304,8 @@ def partial_fit(self, X, y=None, sample_weight=None): Returns ------- - self : returns an instance of self. + self : object + Returns a fitted instance of self. """ alpha = self.nu / 2 @@ -2383,6 +2386,8 @@ def fit(self, X, y=None, coef_init=None, offset_init=None, sample_weight=None): ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. + y : Ignored + Not used, present for API consistency by convention. coef_init : array, shape (n_classes, n_features) The initial coefficients to warm-start the optimization. @@ -2398,7 +2403,8 @@ def fit(self, X, y=None, coef_init=None, offset_init=None, sample_weight=None): Returns ------- - self : returns an instance of self. + self : object + Returns a fitted instance of self. """ alpha = self.nu / 2