diff --git a/sklearn/cluster/_kmeans.py b/sklearn/cluster/_kmeans.py index 21a604bed3eb5..8d24ed497aef3 100644 --- a/sklearn/cluster/_kmeans.py +++ b/sklearn/cluster/_kmeans.py @@ -834,6 +834,9 @@ class KMeans(TransformerMixin, ClusterMixin, BaseEstimator): For now "auto" (kept for backward compatibiliy) chooses "elkan" but it might change in the future for a better heuristic. + .. versionchanged:: 0.18 + Added Elkan algorithm + Attributes ---------- cluster_centers_ : ndarray of shape (n_clusters, n_features) diff --git a/sklearn/ensemble/_voting.py b/sklearn/ensemble/_voting.py index b044cb68e5151..0ac42407f5998 100644 --- a/sklearn/ensemble/_voting.py +++ b/sklearn/ensemble/_voting.py @@ -141,6 +141,8 @@ class VotingClassifier(ClassifierMixin, _BaseVoting): ``-1`` means using all processors. See :term:`Glossary ` for more details. + .. versionadded:: 0.18 + flatten_transform : bool, default=True Affects shape of transform output only when voting='soft' If voting='soft' and flatten_transform=True, transform method returns @@ -232,6 +234,8 @@ def fit(self, X, y, sample_weight=None): Note that this is supported only if all underlying estimators support sample weights. + .. versionadded:: 0.18 + Returns ------- self : object diff --git a/sklearn/feature_selection/_rfe.py b/sklearn/feature_selection/_rfe.py index aedcd94943bc4..8dc7aecb7dc3e 100644 --- a/sklearn/feature_selection/_rfe.py +++ b/sklearn/feature_selection/_rfe.py @@ -410,6 +410,8 @@ class RFECV(RFE): ``-1`` means using all processors. See :term:`Glossary ` for more details. + .. versionadded:: 0.18 + Attributes ---------- n_features_ : int diff --git a/sklearn/feature_selection/_univariate_selection.py b/sklearn/feature_selection/_univariate_selection.py index 7e873b3a2b65c..6911830099844 100644 --- a/sklearn/feature_selection/_univariate_selection.py +++ b/sklearn/feature_selection/_univariate_selection.py @@ -384,6 +384,8 @@ class SelectPercentile(_BaseFilter): Default is f_classif (see below "See also"). The default function only works with classification tasks. + .. versionadded:: 0.18 + percentile : int, optional, default=10 Percent of features to keep. @@ -467,6 +469,8 @@ class SelectKBest(_BaseFilter): Default is f_classif (see below "See also"). The default function only works with classification tasks. + .. versionadded:: 0.18 + k : int or "all", optional, default=10 Number of top features to select. The "all" option bypasses selection, for use in a parameter search. diff --git a/sklearn/linear_model/_ransac.py b/sklearn/linear_model/_ransac.py index 5eac651c76383..fffa29d47d91c 100644 --- a/sklearn/linear_model/_ransac.py +++ b/sklearn/linear_model/_ransac.py @@ -150,6 +150,8 @@ class RANSACRegressor(MetaEstimatorMixin, RegressorMixin, If the loss on a sample is greater than the ``residual_threshold``, then this sample is classified as an outlier. + .. versionadded:: 0.18 + random_state : int, RandomState instance, default=None The generator used to initialize the centers. Pass an int for reproducible output across multiple function calls. @@ -239,6 +241,8 @@ def fit(self, X, y, sample_weight=None): raises error if sample_weight is passed and base_estimator fit method does not support it. + .. versionadded:: 0.18 + Raises ------ ValueError diff --git a/sklearn/metrics/_classification.py b/sklearn/metrics/_classification.py index b8a1a8e5e22b4..2ceccca65203e 100644 --- a/sklearn/metrics/_classification.py +++ b/sklearn/metrics/_classification.py @@ -227,6 +227,8 @@ def confusion_matrix(y_true, y_pred, *, labels=None, sample_weight=None, sample_weight : array-like of shape (n_samples,), default=None Sample weights. + .. versionadded:: 0.18 + normalize : {'true', 'pred', 'all'}, default=None Normalizes confusion matrix over the true (rows), predicted (columns) conditions or all the population. If None, confusion matrix will not be @@ -789,6 +791,8 @@ def matthews_corrcoef(y_true, y_pred, *, sample_weight=None): sample_weight : array-like of shape (n_samples,), default=None Sample weights. + .. versionadded:: 0.18 + Returns ------- mcc : float @@ -2156,6 +2160,7 @@ def log_loss(y_true, y_pred, *, eps=1e-15, normalize=True, sample_weight=None, If not provided, labels will be inferred from y_true. If ``labels`` is ``None`` and ``y_pred`` has shape (n_samples,) the labels are assumed to be binary and are inferred from ``y_true``. + .. versionadded:: 0.18 Returns diff --git a/sklearn/metrics/cluster/_supervised.py b/sklearn/metrics/cluster/_supervised.py index 8a0fdcacb67f1..d652737bd23c0 100644 --- a/sklearn/metrics/cluster/_supervised.py +++ b/sklearn/metrics/cluster/_supervised.py @@ -881,6 +881,8 @@ def normalized_mutual_info_score(labels_true, labels_pred, *, def fowlkes_mallows_score(labels_true, labels_pred, *, sparse=False): """Measure the similarity of two clusterings of a set of points. + .. versionadded:: 0.18 + The Fowlkes-Mallows index (FMI) is defined as the geometric mean between of the precision and recall:: diff --git a/sklearn/model_selection/_split.py b/sklearn/model_selection/_split.py index edcb9b375ae79..9b2087e039f40 100644 --- a/sklearn/model_selection/_split.py +++ b/sklearn/model_selection/_split.py @@ -739,6 +739,8 @@ def split(self, X, y, groups=None): class TimeSeriesSplit(_BaseKFold): """Time Series cross-validator + .. versionadded:: 0.18 + Provides train/test indices to split time series data samples that are observed at fixed time intervals, in train/test sets. In each split, test indices must be higher than before, and thus shuffling diff --git a/sklearn/multioutput.py b/sklearn/multioutput.py index 815c1cbd67757..a5ede43f0fe8c 100644 --- a/sklearn/multioutput.py +++ b/sklearn/multioutput.py @@ -215,6 +215,8 @@ class MultiOutputRegressor(RegressorMixin, _MultiOutputEstimator): simple strategy for extending regressors that do not natively support multi-target regression. + .. versionadded:: 0.18 + Parameters ---------- estimator : estimator object diff --git a/sklearn/preprocessing/_function_transformer.py b/sklearn/preprocessing/_function_transformer.py index 21dd40365f5a0..c4e6782b7cb19 100644 --- a/sklearn/preprocessing/_function_transformer.py +++ b/sklearn/preprocessing/_function_transformer.py @@ -66,9 +66,13 @@ class FunctionTransformer(TransformerMixin, BaseEstimator): kw_args : dict, optional Dictionary of additional keyword arguments to pass to func. + .. versionadded:: 0.18 + inv_kw_args : dict, optional Dictionary of additional keyword arguments to pass to inverse_func. + .. versionadded:: 0.18 + Examples -------- >>> import numpy as np diff --git a/sklearn/svm/_classes.py b/sklearn/svm/_classes.py index 5ff6e74825e50..d082c22d0a3bc 100644 --- a/sklearn/svm/_classes.py +++ b/sklearn/svm/_classes.py @@ -213,6 +213,8 @@ def fit(self, X, y, sample_weight=None): samples. If not provided, then each sample is given unit weight. + .. versionadded:: 0.18 + Returns ------- self : object @@ -398,6 +400,8 @@ def fit(self, X, y, sample_weight=None): samples. If not provided, then each sample is given unit weight. + .. versionadded:: 0.18 + Returns ------- self : object