Thanks to visit codestin.com
Credit goes to github.com

Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion maint_tools/test_docstrings.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@
"QuadraticDiscriminantAnalysis",
"RandomizedSearchCV",
"RobustScaler",
"SGDOneClassSVM",
"SGDRegressor",
"SelfTrainingClassifier",
"SparseRandomProjection",
Expand Down
38 changes: 22 additions & 16 deletions sklearn/linear_model/_stochastic_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -1975,8 +1975,8 @@ class SGDOneClassSVM(BaseSGD, OutlierMixin):
Whether or not the training data should be shuffled after each epoch.
Defaults to True.

verbose : integer, optional
The verbosity level
verbose : int, optional
The verbosity level.

random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
Expand All @@ -1985,7 +1985,7 @@ class SGDOneClassSVM(BaseSGD, OutlierMixin):
generator; If None, the random number generator is the RandomState
instance used by `np.random`.

learning_rate : string, optional
learning_rate : str, optional
The learning rate schedule to use with `fit`. (If using `partial_fit`,
learning rate must be controlled directly).

Expand Down Expand Up @@ -2059,6 +2059,17 @@ class SGDOneClassSVM(BaseSGD, OutlierMixin):

.. versionadded:: 1.0

See Also
--------
sklearn.svm.OneClassSVM : Unsupervised Outlier Detection.

Notes
-----
This estimator has a linear complexity in the number of training samples
and is thus better suited than the `sklearn.svm.OneClassSVM`
implementation for datasets with a large number of training samples (say
> 10,000).

Examples
--------
>>> import numpy as np
Expand All @@ -2070,17 +2081,6 @@ class SGDOneClassSVM(BaseSGD, OutlierMixin):

>>> print(clf.predict([[4, 4]]))
[1]

See also
--------
sklearn.svm.OneClassSVM

Notes
-----
This estimator has a linear complexity in the number of training samples
and is thus better suited than the `sklearn.svm.OneClassSVM`
implementation for datasets with a large number of training samples (say
> 10,000).
"""

loss_functions = {"hinge": (Hinge, 1.0)}
Expand Down Expand Up @@ -2295,14 +2295,17 @@ def partial_fit(self, X, y=None, sample_weight=None):
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data.
y : Ignored
Not used, present for API consistency by convention.

sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.

Returns
-------
self : returns an instance of self.
self : object
Returns a fitted instance of self.
"""

alpha = self.nu / 2
Expand Down Expand Up @@ -2383,6 +2386,8 @@ def fit(self, X, y=None, coef_init=None, offset_init=None, sample_weight=None):
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present for API consistency by convention.

coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
Expand All @@ -2398,7 +2403,8 @@ def fit(self, X, y=None, coef_init=None, offset_init=None, sample_weight=None):

Returns
-------
self : returns an instance of self.
self : object
Returns a fitted instance of self.
"""

alpha = self.nu / 2
Expand Down