diff --git a/sklearn/metrics/cluster/_supervised.py b/sklearn/metrics/cluster/_supervised.py index 028e161634a82..256115b8b1e31 100644 --- a/sklearn/metrics/cluster/_supervised.py +++ b/sklearn/metrics/cluster/_supervised.py @@ -476,7 +476,7 @@ def homogeneity_completeness_v_measure(labels_true, labels_pred, *, beta=1.0): Parameters ---------- - labels_true : int array, shape = [n_samples] + labels_true : array-like of shape (n_samples,) Ground truth class labels to be used as a reference. labels_pred : array-like of shape (n_samples,) @@ -532,6 +532,12 @@ def homogeneity_completeness_v_measure(labels_true, labels_pred, *, beta=1.0): return homogeneity, completeness, v_measure_score +@validate_params( + { + "labels_true": ["array-like"], + "labels_pred": ["array-like"], + } +) def homogeneity_score(labels_true, labels_pred): """Homogeneity metric of a cluster labeling given a ground truth. @@ -550,7 +556,7 @@ def homogeneity_score(labels_true, labels_pred): Parameters ---------- - labels_true : int array, shape = [n_samples] + labels_true : array-like of shape (n_samples,) Ground truth class labels to be used as a reference. labels_pred : array-like of shape (n_samples,) @@ -601,6 +607,12 @@ def homogeneity_score(labels_true, labels_pred): return homogeneity_completeness_v_measure(labels_true, labels_pred)[0] +@validate_params( + { + "labels_true": ["array-like"], + "labels_pred": ["array-like"], + } +) def completeness_score(labels_true, labels_pred): """Compute completeness metric of a cluster labeling given a ground truth. @@ -619,7 +631,7 @@ def completeness_score(labels_true, labels_pred): Parameters ---------- - labels_true : int array, shape = [n_samples] + labels_true : array-like of shape (n_samples,) Ground truth class labels to be used as a reference. labels_pred : array-like of shape (n_samples,) @@ -670,6 +682,13 @@ def completeness_score(labels_true, labels_pred): return homogeneity_completeness_v_measure(labels_true, labels_pred)[1] +@validate_params( + { + "labels_true": ["array-like"], + "labels_pred": ["array-like"], + "beta": [Interval(Real, 0, None, closed="left")], + } +) def v_measure_score(labels_true, labels_pred, *, beta=1.0): """V-measure cluster labeling given a ground truth. @@ -694,7 +713,7 @@ def v_measure_score(labels_true, labels_pred, *, beta=1.0): Parameters ---------- - labels_true : int array, shape = [n_samples] + labels_true : array-like of shape (n_samples,) Ground truth class labels to be used as a reference. labels_pred : array-like of shape (n_samples,) diff --git a/sklearn/tests/test_public_functions.py b/sklearn/tests/test_public_functions.py index 5288fffed09b3..8600f9da6a067 100644 --- a/sklearn/tests/test_public_functions.py +++ b/sklearn/tests/test_public_functions.py @@ -180,6 +180,7 @@ def _check_function_param_validation( "sklearn.metrics.balanced_accuracy_score", "sklearn.metrics.brier_score_loss", "sklearn.metrics.calinski_harabasz_score", + "sklearn.metrics.completeness_score", "sklearn.metrics.class_likelihood_ratios", "sklearn.metrics.classification_report", "sklearn.metrics.cluster.adjusted_mutual_info_score", @@ -205,6 +206,7 @@ def _check_function_param_validation( "sklearn.metrics.get_scorer", "sklearn.metrics.hamming_loss", "sklearn.metrics.hinge_loss", + "sklearn.metrics.homogeneity_score", "sklearn.metrics.jaccard_score", "sklearn.metrics.label_ranking_average_precision_score", "sklearn.metrics.label_ranking_loss", @@ -248,6 +250,7 @@ def _check_function_param_validation( "sklearn.metrics.roc_auc_score", "sklearn.metrics.roc_curve", "sklearn.metrics.top_k_accuracy_score", + "sklearn.metrics.v_measure_score", "sklearn.metrics.zero_one_loss", "sklearn.model_selection.cross_validate", "sklearn.model_selection.permutation_test_score",