diff --git a/doc/modules/ensemble.rst b/doc/modules/ensemble.rst index c2897ed518509..8a414e5371511 100644 --- a/doc/modules/ensemble.rst +++ b/doc/modules/ensemble.rst @@ -1323,7 +1323,7 @@ computationally expensive. StackingRegressor(...) >>> print('R2 score: {:.2f}' ... .format(multi_layer_regressor.score(X_test, y_test))) - R2 score: 0.82 + R2 score: 0.83 .. topic:: References diff --git a/sklearn/decomposition/_nmf.py b/sklearn/decomposition/_nmf.py index 9d335eb775d8b..6d5509611cefd 100644 --- a/sklearn/decomposition/_nmf.py +++ b/sklearn/decomposition/_nmf.py @@ -842,7 +842,7 @@ def _fit_multiplicative_update(X, W, H, beta_loss='frobenius', def non_negative_factorization(X, W=None, H=None, n_components=None, - init='warn', update_H=True, solver='cd', + init=None, update_H=True, solver='cd', beta_loss='frobenius', tol=1e-4, max_iter=200, alpha=0., l1_ratio=0., regularization=None, random_state=None, @@ -891,10 +891,7 @@ def non_negative_factorization(X, W=None, H=None, n_components=None, init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom' Method used to initialize the procedure. - Default: 'random'. - - The default value will change from 'random' to None in version 0.23 - to make it consistent with decomposition.NMF. + Default: None. Valid options: @@ -915,6 +912,9 @@ def non_negative_factorization(X, W=None, H=None, n_components=None, - 'custom': use custom matrices W and H + .. versionchanged:: 0.23 + The default value of `init` changed from 'random' to None in 0.23. + update_H : boolean, default: True Set to True, both W and H will be estimated from initial guesses. Set to False, only W will be estimated. @@ -1028,13 +1028,6 @@ def non_negative_factorization(X, W=None, H=None, n_components=None, raise ValueError("Tolerance for stopping criteria must be " "positive; got (tol=%r)" % tol) - if init == "warn": - if n_components < n_features: - warnings.warn("The default value of init will change from " - "random to None in 0.23 to make it consistent " - "with decomposition.NMF.", FutureWarning) - init = "random" - # check W and H, or initialize them if init == 'custom' and update_H: _check_init(H, (n_components, n_features), "NMF (input H)") diff --git a/sklearn/decomposition/tests/test_nmf.py b/sklearn/decomposition/tests/test_nmf.py index d98ad551513e7..4fd21ffbf5b1d 100644 --- a/sklearn/decomposition/tests/test_nmf.py +++ b/sklearn/decomposition/tests/test_nmf.py @@ -224,10 +224,6 @@ def test_non_negative_factorization_checking(): A = np.ones((2, 2)) # Test parameters checking is public function nnmf = non_negative_factorization - msg = ("The default value of init will change from " - "random to None in 0.23 to make it consistent " - "with decomposition.NMF.") - assert_warns_message(FutureWarning, msg, nnmf, A, A, A, np.int64(1)) msg = ("Number of components must be a positive integer; " "got (n_components=1.5)") assert_raise_message(ValueError, msg, nnmf, A, A, A, 1.5, 'random') diff --git a/sklearn/discriminant_analysis.py b/sklearn/discriminant_analysis.py index 4492d0868994d..1495d00620911 100644 --- a/sklearn/discriminant_analysis.py +++ b/sklearn/discriminant_analysis.py @@ -423,7 +423,6 @@ def fit(self, X, y): y : array, shape (n_samples,) Target values. """ - # FIXME: Future warning to be removed in 0.23 X, y = check_X_y(X, y, ensure_min_samples=2, estimator=self, dtype=[np.float64, np.float32]) self.classes_ = unique_labels(y) @@ -455,21 +454,11 @@ def fit(self, X, y): self._max_components = max_components else: if self.n_components > max_components: - warnings.warn( + raise ValueError( "n_components cannot be larger than min(n_features, " - "n_classes - 1). Using min(n_features, " - "n_classes - 1) = min(%d, %d - 1) = %d components." - % (X.shape[1], len(self.classes_), max_components), - ChangedBehaviorWarning) - future_msg = ("In version 0.23, setting n_components > min(" - "n_features, n_classes - 1) will raise a " - "ValueError. You should set n_components to None" - " (default), or a value smaller or equal to " - "min(n_features, n_classes - 1).") - warnings.warn(future_msg, FutureWarning) - self._max_components = max_components - else: - self._max_components = self.n_components + "n_classes - 1)." + ) + self._max_components = self.n_components if self.solver == 'svd': if self.shrinkage is not None: diff --git a/sklearn/ensemble/_forest.py b/sklearn/ensemble/_forest.py index eba59c232531b..7e88f0c2f189a 100644 --- a/sklearn/ensemble/_forest.py +++ b/sklearn/ensemble/_forest.py @@ -935,14 +935,14 @@ class RandomForestClassifier(ForestClassifier): .. versionadded:: 0.19 - min_impurity_split : float, (default=1e-7) + min_impurity_split : float, (default=0) Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. deprecated:: 0.19 ``min_impurity_split`` has been deprecated in favor of ``min_impurity_decrease`` in 0.19. The default value of - ``min_impurity_split`` will change from 1e-7 to 0 in 0.23 and it + ``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it will be removed in 0.25. Use ``min_impurity_decrease`` instead. @@ -1253,14 +1253,14 @@ class RandomForestRegressor(ForestRegressor): .. versionadded:: 0.19 - min_impurity_split : float, (default=1e-7) + min_impurity_split : float, (default=0) Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. deprecated:: 0.19 ``min_impurity_split`` has been deprecated in favor of ``min_impurity_decrease`` in 0.19. The default value of - ``min_impurity_split`` will change from 1e-7 to 0 in 0.23 and it + ``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it will be removed in 0.25. Use ``min_impurity_decrease`` instead. bootstrap : boolean, optional (default=True) @@ -1530,14 +1530,14 @@ class ExtraTreesClassifier(ForestClassifier): .. versionadded:: 0.19 - min_impurity_split : float, (default=1e-7) + min_impurity_split : float, (default=0) Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. deprecated:: 0.19 ``min_impurity_split`` has been deprecated in favor of ``min_impurity_decrease`` in 0.19. The default value of - ``min_impurity_split`` will change from 1e-7 to 0 in 0.23 and it + ``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it will be removed in 0.25. Use ``min_impurity_decrease`` instead. bootstrap : boolean, optional (default=False) @@ -1840,14 +1840,14 @@ class ExtraTreesRegressor(ForestRegressor): .. versionadded:: 0.19 - min_impurity_split : float, (default=1e-7) + min_impurity_split : float, (default=0) Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. deprecated:: 0.19 ``min_impurity_split`` has been deprecated in favor of ``min_impurity_decrease`` in 0.19. The default value of - ``min_impurity_split`` will change from 1e-7 to 0 in 0.23 and it + ``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it will be removed in 0.25. Use ``min_impurity_decrease`` instead. bootstrap : boolean, optional (default=False) @@ -2078,14 +2078,14 @@ class RandomTreesEmbedding(BaseForest): .. versionadded:: 0.19 - min_impurity_split : float, (default=1e-7) + min_impurity_split : float, (default=0) Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. deprecated:: 0.19 ``min_impurity_split`` has been deprecated in favor of ``min_impurity_decrease`` in 0.19. The default value of - ``min_impurity_split`` will change from 1e-7 to 0 in 0.23 and it + ``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it will be removed in 0.25. Use ``min_impurity_decrease`` instead. sparse_output : bool, optional (default=True) diff --git a/sklearn/ensemble/_gb.py b/sklearn/ensemble/_gb.py index c3971e019a088..667a526e486a9 100644 --- a/sklearn/ensemble/_gb.py +++ b/sklearn/ensemble/_gb.py @@ -868,14 +868,14 @@ class GradientBoostingClassifier(ClassifierMixin, BaseGradientBoosting): .. versionadded:: 0.19 - min_impurity_split : float, (default=1e-7) + min_impurity_split : float, (default=0) Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. deprecated:: 0.19 ``min_impurity_split`` has been deprecated in favor of ``min_impurity_decrease`` in 0.19. The default value of - ``min_impurity_split`` will change from 1e-7 to 0 in 0.23 and it + ``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it will be removed in 0.25. Use ``min_impurity_decrease`` instead. init : estimator or 'zero', optional (default=None) @@ -1340,14 +1340,14 @@ class GradientBoostingRegressor(RegressorMixin, BaseGradientBoosting): .. versionadded:: 0.19 - min_impurity_split : float, (default=1e-7) + min_impurity_split : float, (default=0) Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. deprecated:: 0.19 ``min_impurity_split`` has been deprecated in favor of ``min_impurity_decrease`` in 0.19. The default value of - ``min_impurity_split`` will change from 1e-7 to 0 in 0.23 and it + ``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it will be removed in 0.25. Use ``min_impurity_decrease`` instead. init : estimator or 'zero', optional (default=None) diff --git a/sklearn/ensemble/tests/test_gradient_boosting.py b/sklearn/ensemble/tests/test_gradient_boosting.py index 5fe9dee573d1d..a28c69d0f7cc5 100644 --- a/sklearn/ensemble/tests/test_gradient_boosting.py +++ b/sklearn/ensemble/tests/test_gradient_boosting.py @@ -1170,9 +1170,10 @@ def test_non_uniform_weights_toy_edge_case_clf(): def check_sparse_input(EstimatorClass, X, X_sparse, y): dense = EstimatorClass(n_estimators=10, random_state=0, - max_depth=2).fit(X, y) + max_depth=2, min_impurity_decrease=1e-7).fit(X, y) sparse = EstimatorClass(n_estimators=10, random_state=0, - max_depth=2).fit(X_sparse, y) + max_depth=2, + min_impurity_decrease=1e-7).fit(X_sparse, y) assert_array_almost_equal(sparse.apply(X), dense.apply(X)) assert_array_almost_equal(sparse.predict(X), dense.predict(X)) diff --git a/sklearn/metrics/_classification.py b/sklearn/metrics/_classification.py index c7101f7a38eeb..322ac3409722f 100644 --- a/sklearn/metrics/_classification.py +++ b/sklearn/metrics/_classification.py @@ -1986,7 +1986,7 @@ class 2 1.00 0.67 0.80 3 return report -def hamming_loss(y_true, y_pred, labels=None, sample_weight=None): +def hamming_loss(y_true, y_pred, sample_weight=None): """Compute the average Hamming loss. The Hamming loss is the fraction of labels that are incorrectly predicted. @@ -2001,17 +2001,6 @@ def hamming_loss(y_true, y_pred, labels=None, sample_weight=None): y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. - labels : array, shape = [n_labels], optional (default='deprecated') - Integer array of labels. If not provided, labels will be inferred - from y_true and y_pred. - - .. versionadded:: 0.18 - .. deprecated:: 0.21 - This parameter ``labels`` is deprecated in version 0.21 and will - be removed in version 0.23. Hamming loss uses ``y_true.shape[1]`` - for the number of labels when y_true is binary label indicators, - so it is unnecessary for the user to specify. - sample_weight : array-like of shape (n_samples,), default=None Sample weights. @@ -2071,12 +2060,6 @@ def hamming_loss(y_true, y_pred, labels=None, sample_weight=None): y_type, y_true, y_pred = _check_targets(y_true, y_pred) check_consistent_length(y_true, y_pred, sample_weight) - if labels is not None: - warnings.warn("The labels parameter is unused. It was" - " deprecated in version 0.21 and" - " will be removed in version 0.23", - FutureWarning) - if sample_weight is None: weight_average = 1. else: diff --git a/sklearn/metrics/tests/test_classification.py b/sklearn/metrics/tests/test_classification.py index 66ea486f955b7..4c1db4b55bb16 100644 --- a/sklearn/metrics/tests/test_classification.py +++ b/sklearn/metrics/tests/test_classification.py @@ -1176,11 +1176,6 @@ def test_multilabel_hamming_loss(): assert hamming_loss(y1, np.zeros_like(y1), sample_weight=w) == 2. / 3 # sp_hamming only works with 1-D arrays assert hamming_loss(y1[0], y2[0]) == sp_hamming(y1[0], y2[0]) - assert_warns_message(FutureWarning, - "The labels parameter is unused. It was" - " deprecated in version 0.21 and" - " will be removed in version 0.23", - hamming_loss, y1, y2, labels=[0, 1]) def test_jaccard_score_validation(): diff --git a/sklearn/metrics/tests/test_common.py b/sklearn/metrics/tests/test_common.py index 991af61537012..331bcf197dccb 100644 --- a/sklearn/metrics/tests/test_common.py +++ b/sklearn/metrics/tests/test_common.py @@ -351,8 +351,6 @@ def precision_recall_curve_padded_thresholds(*args, **kwargs): "roc_curve", "precision_recall_curve", - "hamming_loss", - "precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score", "jaccard_score", diff --git a/sklearn/preprocessing/_data.py b/sklearn/preprocessing/_data.py index 19e21c0862cf7..cae75be2e591c 100644 --- a/sklearn/preprocessing/_data.py +++ b/sklearn/preprocessing/_data.py @@ -2606,8 +2606,8 @@ def quantile_transform(X, axis=0, n_quantiles=1000, input is already a numpy array). If True, a copy of `X` is transformed, leaving the original `X` unchanged - ..versionchnanged:: 0.22 - The default value of `copy` changed from False to True in 0.22. + ..versionchnanged:: 0.23 + The default value of `copy` changed from False to True in 0.23. Returns ------- @@ -3008,7 +3008,7 @@ def _more_tags(self): return {'allow_nan': True} -def power_transform(X, method='warn', standardize=True, copy=True): +def power_transform(X, method='yeo-johnson', standardize=True, copy=True): """ Power transforms are a family of parametric, monotonic transformations that are applied to make data more Gaussian-like. This is useful for @@ -3032,15 +3032,15 @@ def power_transform(X, method='warn', standardize=True, copy=True): X : array-like, shape (n_samples, n_features) The data to be transformed using a power transformation. - method : str + method : {'yeo-johnson', 'box-cox'}, default='yeo-johnson' The power transform method. Available methods are: - 'yeo-johnson' [1]_, works with positive and negative values - 'box-cox' [2]_, only works with strictly positive values - The default method will be changed from 'box-cox' to 'yeo-johnson' - in version 0.23. To suppress the FutureWarning, explicitly set the - parameter. + .. versionchanged:: 0.23 + The default value of the `method` parameter changed from + 'box-cox' to 'yeo-johnson' in 0.23. standardize : boolean, default=True Set to True to apply zero-mean, unit-variance normalization to the @@ -3092,12 +3092,5 @@ def power_transform(X, method='warn', standardize=True, copy=True): .. [2] G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the Royal Statistical Society B, 26, 211-252 (1964). """ - if method == 'warn': - warnings.warn("The default value of 'method' will change from " - "'box-cox' to 'yeo-johnson' in version 0.23. Set " - "the 'method' argument explicitly to silence this " - "warning in the meantime.", - FutureWarning) - method = 'box-cox' pt = PowerTransformer(method=method, standardize=standardize, copy=copy) return pt.fit_transform(X) diff --git a/sklearn/preprocessing/tests/test_data.py b/sklearn/preprocessing/tests/test_data.py index a67c101dec499..9a8e31d468f1c 100644 --- a/sklearn/preprocessing/tests/test_data.py +++ b/sklearn/preprocessing/tests/test_data.py @@ -2452,21 +2452,3 @@ def test_power_transformer_copy_False(method, standardize): X_inv_trans = pt.inverse_transform(X_trans) assert X_trans is X_inv_trans - - -def test_power_transform_default_method(): - X = np.abs(X_2d) - - future_warning_message = ( - "The default value of 'method' " - "will change from 'box-cox'" - ) - assert_warns_message(FutureWarning, future_warning_message, - power_transform, X) - - with warnings.catch_warnings(): - warnings.simplefilter('ignore') - X_trans_default = power_transform(X) - - X_trans_boxcox = power_transform(X, method='box-cox') - assert_array_equal(X_trans_boxcox, X_trans_default) diff --git a/sklearn/tests/test_discriminant_analysis.py b/sklearn/tests/test_discriminant_analysis.py index 7b3e94bea793c..dcd4009a47a2d 100644 --- a/sklearn/tests/test_discriminant_analysis.py +++ b/sklearn/tests/test_discriminant_analysis.py @@ -332,7 +332,6 @@ def test_lda_store_covariance(): @pytest.mark.parametrize('n_features', [3, 5]) @pytest.mark.parametrize('n_classes', [5, 3]) def test_lda_dimension_warning(n_classes, n_features): - # FIXME: Future warning to be removed in 0.23 rng = check_random_state(0) n_samples = 10 X = rng.randn(n_samples, n_features) @@ -348,22 +347,14 @@ def test_lda_dimension_warning(n_classes, n_features): for n_components in [max_components + 1, max(n_features, n_classes - 1) + 1]: - # if n_components > min(n_classes - 1, n_features), raise warning + # if n_components > min(n_classes - 1, n_features), raise error. # We test one unit higher than max_components, and then something # larger than both n_features and n_classes - 1 to ensure the test # works for any value of n_component lda = LinearDiscriminantAnalysis(n_components=n_components) - msg = ("n_components cannot be larger than min(n_features, " - "n_classes - 1). Using min(n_features, " - "n_classes - 1) = min(%d, %d - 1) = %d components." % - (n_features, n_classes, max_components)) - assert_warns_message(ChangedBehaviorWarning, msg, lda.fit, X, y) - future_msg = ("In version 0.23, setting n_components > min(" - "n_features, n_classes - 1) will raise a " - "ValueError. You should set n_components to None" - " (default), or a value smaller or equal to " - "min(n_features, n_classes - 1).") - assert_warns_message(FutureWarning, future_msg, lda.fit, X, y) + msg = "n_components cannot be larger than " + with pytest.raises(ValueError, match=msg): + lda.fit(X, y) @pytest.mark.parametrize("data_type, expected_type", [ diff --git a/sklearn/tree/_classes.py b/sklearn/tree/_classes.py index ea43716e20ae6..4eb02464e786f 100644 --- a/sklearn/tree/_classes.py +++ b/sklearn/tree/_classes.py @@ -293,19 +293,19 @@ def fit(self, X, y, sample_weight=None, check_input=True, min_weight_leaf = (self.min_weight_fraction_leaf * np.sum(sample_weight)) - if self.min_impurity_split is not None: + min_impurity_split = self.min_impurity_split + if min_impurity_split is not None: warnings.warn("The min_impurity_split parameter is deprecated. " - "Its default value will change from 1e-7 to 0 in " + "Its default value has changed from 1e-7 to 0 in " "version 0.23, and it will be removed in 0.25. " "Use the min_impurity_decrease parameter instead.", FutureWarning) - min_impurity_split = self.min_impurity_split - else: - min_impurity_split = 1e-7 - if min_impurity_split < 0.: - raise ValueError("min_impurity_split must be greater than " - "or equal to 0") + if min_impurity_split < 0.: + raise ValueError("min_impurity_split must be greater than " + "or equal to 0") + else: + min_impurity_split = 0 if self.min_impurity_decrease < 0.: raise ValueError("min_impurity_decrease must be greater than " @@ -679,14 +679,14 @@ class DecisionTreeClassifier(ClassifierMixin, BaseDecisionTree): .. versionadded:: 0.19 - min_impurity_split : float, default=1e-7 + min_impurity_split : float, default=0 Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. deprecated:: 0.19 ``min_impurity_split`` has been deprecated in favor of ``min_impurity_decrease`` in 0.19. The default value of - ``min_impurity_split`` will change from 1e-7 to 0 in 0.23 and it + ``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it will be removed in 0.25. Use ``min_impurity_decrease`` instead. class_weight : dict, list of dicts, "balanced" or None, default=None @@ -1061,14 +1061,14 @@ class DecisionTreeRegressor(RegressorMixin, BaseDecisionTree): .. versionadded:: 0.19 - min_impurity_split : float, (default=1e-7) + min_impurity_split : float, (default=0) Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. deprecated:: 0.19 ``min_impurity_split`` has been deprecated in favor of ``min_impurity_decrease`` in 0.19. The default value of - ``min_impurity_split`` will change from 1e-7 to 0 in 0.23 and it + ``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it will be removed in 0.25. Use ``min_impurity_decrease`` instead. presort : deprecated, default='deprecated' @@ -1349,14 +1349,14 @@ class ExtraTreeClassifier(DecisionTreeClassifier): .. versionadded:: 0.19 - min_impurity_split : float, (default=1e-7) + min_impurity_split : float, (default=0) Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. deprecated:: 0.19 ``min_impurity_split`` has been deprecated in favor of ``min_impurity_decrease`` in 0.19. The default value of - ``min_impurity_split`` will change from 1e-7 to 0 in 0.23 and it + ``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it will be removed in 0.25. Use ``min_impurity_decrease`` instead. class_weight : dict, list of dicts, "balanced" or None, default=None @@ -1573,14 +1573,14 @@ class ExtraTreeRegressor(DecisionTreeRegressor): .. versionadded:: 0.19 - min_impurity_split : float, (default=1e-7) + min_impurity_split : float, (default=0) Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. deprecated:: 0.19 ``min_impurity_split`` has been deprecated in favor of ``min_impurity_decrease`` in 0.19. The default value of - ``min_impurity_split`` will change from 1e-7 to 0 in 0.23 and it + ``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it will be removed in 0.25. Use ``min_impurity_decrease`` instead. max_leaf_nodes : int or None, optional (default=None) @@ -1645,7 +1645,7 @@ class ExtraTreeRegressor(DecisionTreeRegressor): >>> reg = BaggingRegressor(extra_tree, random_state=0).fit( ... X_train, y_train) >>> reg.score(X_test, y_test) - 0.7823... + 0.7788... """ def __init__(self, criterion="mse", diff --git a/sklearn/tree/tests/test_tree.py b/sklearn/tree/tests/test_tree.py index dcd9d4c01a8ec..9f65ad7f68e83 100644 --- a/sklearn/tree/tests/test_tree.py +++ b/sklearn/tree/tests/test_tree.py @@ -803,7 +803,7 @@ def test_min_impurity_split(): est = TreeEstimator(max_leaf_nodes=max_leaf_nodes, random_state=0) assert est.min_impurity_split is None, ( - "Failed, min_impurity_split = {0} > 1e-7".format( + "Failed, min_impurity_split = {0} != None".format( est.min_impurity_split)) try: assert_warns(FutureWarning, est.fit, X, y) diff --git a/sklearn/utils/_testing.py b/sklearn/utils/_testing.py index 4e4e6043eae3d..c40e2bc84d8f9 100644 --- a/sklearn/utils/_testing.py +++ b/sklearn/utils/_testing.py @@ -53,8 +53,8 @@ __all__ = ["assert_equal", "assert_not_equal", "assert_raises", - "assert_raises_regexp", "assert_true", - "assert_false", "assert_almost_equal", "assert_array_equal", + "assert_raises_regexp", + "assert_almost_equal", "assert_array_equal", "assert_array_almost_equal", "assert_array_less", "assert_less", "assert_less_equal", "assert_greater", "assert_greater_equal", @@ -85,16 +85,6 @@ # the old name for now assert_raises_regexp = assert_raises_regex -deprecation_message = "'assert_true' is deprecated in version 0.21 " \ - "and will be removed in version 0.23. " \ - "Please use 'assert' instead." -assert_true = deprecated(deprecation_message)(_dummy.assertTrue) - -deprecation_message = "'assert_false' is deprecated in version 0.21 " \ - "and will be removed in version 0.23. " \ - "Please use 'assert' instead." -assert_false = deprecated(deprecation_message)(_dummy.assertFalse) - def assert_warns(warning_class, func, *args, **kw): """Test that a certain warning occurs. diff --git a/sklearn/utils/estimator_checks.py b/sklearn/utils/estimator_checks.py index 30c668237b371..b8471daf5deab 100644 --- a/sklearn/utils/estimator_checks.py +++ b/sklearn/utils/estimator_checks.py @@ -479,9 +479,6 @@ def _set_checking_parameters(estimator): # K-Means estimator.set_params(n_init=2) - if hasattr(estimator, "n_components"): - estimator.n_components = 2 - if name == 'TruncatedSVD': # TruncatedSVD doesn't run with n_components = n_features # This is ugly :-/