@@ -2544,8 +2544,8 @@ class PowerTransformer(BaseEstimator, TransformerMixin):
2544
2544
2545
2545
Notes
2546
2546
-----
2547
- NaNs are treated as missing values: disregarded in fit, and maintained in
2548
- transform.
2547
+ NaNs are treated as missing values: disregarded in `` fit`` , and maintained
2548
+ in `` transform`` .
2549
2549
2550
2550
For a comparison of the different scalers, transformers, and normalizers,
2551
2551
see :ref:`examples/preprocessing/plot_all_scaling.py
@@ -2844,7 +2844,7 @@ def _check_input(self, X, check_positive=False, check_shape=False,
2844
2844
return X
2845
2845
2846
2846
2847
- def power_transform (X , method = 'yeo-johnson ' , standardize = True , copy = True ):
2847
+ def power_transform (X , method = 'warn ' , standardize = True , copy = True ):
2848
2848
"""
2849
2849
Power transforms are a family of parametric, monotonic transformations
2850
2850
that are applied to make data more Gaussian-like. This is useful for
@@ -2866,9 +2866,9 @@ def power_transform(X, method='yeo-johnson', standardize=True, copy=True):
2866
2866
Parameters
2867
2867
----------
2868
2868
X : array-like, shape (n_samples, n_features)
2869
- The data used to estimate the optimal transformation parameters .
2869
+ The data to be transformed using a power transformation .
2870
2870
2871
- method : str, (default='yeo-johnson ')
2871
+ method : str, (default='warn ')
2872
2872
The power transform method. Available methods are:
2873
2873
2874
2874
- 'yeo-johnson' [1]_, works with positive and negative values
@@ -2881,15 +2881,20 @@ def power_transform(X, method='yeo-johnson', standardize=True, copy=True):
2881
2881
copy : boolean, optional, default=True
2882
2882
Set to False to perform inplace computation during transformation.
2883
2883
2884
+ Returns
2885
+ -------
2886
+ X_trans : array-like, shape (n_samples, n_features)
2887
+ The transformed data.
2888
+
2884
2889
Examples
2885
2890
--------
2886
2891
>>> import numpy as np
2887
2892
>>> from sklearn.preprocessing import power_transform
2888
2893
>>> data = [[1, 2], [3, 2], [4, 5]]
2889
- >>> print(power_transform(data))
2890
- [[-1.31616039 -0.70710678 ]
2891
- [ 0.20998268 -0.70710678 ]
2892
- [ 1.1061777 1.41421356 ]]
2894
+ >>> print(power_transform(data, method='box-cox')) # doctest: +ELLIPSIS
2895
+ [[-1.332... -0.707... ]
2896
+ [ 0.256... -0.707... ]
2897
+ [ 1.076... 1.414... ]]
2893
2898
2894
2899
See also
2895
2900
--------
@@ -2902,8 +2907,8 @@ def power_transform(X, method='yeo-johnson', standardize=True, copy=True):
2902
2907
2903
2908
Notes
2904
2909
-----
2905
- NaNs are treated as missing values: disregarded in fit, and maintained in
2906
- transform.
2910
+ NaNs are treated as missing values: disregarded in `` fit`` , and maintained
2911
+ in `` transform`` .
2907
2912
2908
2913
For a comparison of the different scalers, transformers, and normalizers,
2909
2914
see :ref:`examples/preprocessing/plot_all_scaling.py
@@ -2919,7 +2924,13 @@ def power_transform(X, method='yeo-johnson', standardize=True, copy=True):
2919
2924
.. [2] G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal
2920
2925
of the Royal Statistical Society B, 26, 211-252 (1964).
2921
2926
"""
2922
-
2927
+ if method == 'warn' :
2928
+ warnings .warn ("The default value of 'method' will change from "
2929
+ "'box-cox' to 'yeo-johnson' in version 0.21. Set "
2930
+ "the 'method' argument explicitly to silence this "
2931
+ "warning in the meantime." ,
2932
+ FutureWarning )
2933
+ method = 'box-cox'
2923
2934
pt = PowerTransformer (method = method , standardize = standardize , copy = copy )
2924
2935
return pt .fit_transform (X )
2925
2936
0 commit comments