Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 859627d

Browse files
committed
DOC insert spaces before colons in parameter lists
Complies with numpydoc to improve rendering and automatic quality assurance such as #7793. Affects listings of Parameters Attributes, Returns. Performed with the help of: grep -nE '^( )+[a-zA-Z][a-zA-Z0-9_]*: ' sklearn -R | grep -v -e externals -e tests | grep -v -e default: -e else: -e Warning: -e Note: -e TRAIN: -e Default: -e True: -e False: -e DOI: -e In: | gsed 's|\([^:]*\):\([0-9]*\):\([^:]*\):\(.*\)|--- a/\1\n+++ b/\1\n@@ -\2,1 +\2,1 @@\n-\3:\4\n+\3 :\4|' | git apply --unidiff-zero -
1 parent 9dfe018 commit 859627d

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

73 files changed

+353
-353
lines changed

sklearn/base.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -43,10 +43,10 @@ def clone(estimator, safe=True):
4343
4444
Parameters
4545
----------
46-
estimator: estimator object, or list, tuple or set of objects
46+
estimator : estimator object, or list, tuple or set of objects
4747
The estimator or group of estimators to be cloned
4848
49-
safe: boolean, optional
49+
safe : boolean, optional
5050
If safe is false, clone will fall back to a deepcopy on objects
5151
that are not estimators.
5252
@@ -134,10 +134,10 @@ def _pprint(params, offset=0, printer=repr):
134134
135135
Parameters
136136
----------
137-
params: dict
137+
params : dict
138138
The dictionary to pretty print
139139
140-
offset: int
140+
offset : int
141141
The offset in characters to add at the begin of each line.
142142
143143
printer:
@@ -510,7 +510,7 @@ def score(self, X, y=None):
510510
511511
Returns
512512
-------
513-
score: float
513+
score : float
514514
"""
515515
pass
516516

sklearn/calibration.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
8080
classes_ : array, shape (n_classes)
8181
The class labels.
8282
83-
calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit")
83+
calibrated_classifiers_ : list (len() equal to cv or 1 if cv == "prefit")
8484
The list of calibrated classifiers, one for each crossvalidation fold,
8585
which has been fitted on all but the validation fold and calibrated
8686
on the validation fold.

sklearn/cluster/_k_means.pyx

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -180,24 +180,24 @@ def _mini_batch_update_csr(X, np.ndarray[DOUBLE, ndim=1] x_squared_norms,
180180
Parameters
181181
----------
182182
183-
X: CSR matrix, dtype float
183+
X : CSR matrix, dtype float
184184
The complete (pre allocated) training set as a CSR matrix.
185185
186-
centers: array, shape (n_clusters, n_features)
186+
centers : array, shape (n_clusters, n_features)
187187
The cluster centers
188188
189-
counts: array, shape (n_clusters,)
189+
counts : array, shape (n_clusters,)
190190
The vector in which we keep track of the numbers of elements in a
191191
cluster
192192
193193
Returns
194194
-------
195-
inertia: float
195+
inertia : float
196196
The inertia of the batch prior to centers update, i.e. the sum
197197
distances to the closest center for each sample. This is the objective
198198
function being minimized by the k-means algorithm.
199199
200-
squared_diff: float
200+
squared_diff : float
201201
The sum of squared update (squared norm of the centers position
202202
change). If compute_squared_diff is 0, this computation is skipped and
203203
0.0 is returned instead.
@@ -281,20 +281,20 @@ def _centers_dense(np.ndarray[floating, ndim=2] X,
281281
282282
Parameters
283283
----------
284-
X: array-like, shape (n_samples, n_features)
284+
X : array-like, shape (n_samples, n_features)
285285
286-
labels: array of integers, shape (n_samples)
286+
labels : array of integers, shape (n_samples)
287287
Current label assignment
288288
289-
n_clusters: int
289+
n_clusters : int
290290
Number of desired clusters
291291
292-
distances: array-like, shape (n_samples)
292+
distances : array-like, shape (n_samples)
293293
Distance to closest cluster for each sample.
294294
295295
Returns
296296
-------
297-
centers: array, shape (n_clusters, n_features)
297+
centers : array, shape (n_clusters, n_features)
298298
The resulting centers
299299
"""
300300
## TODO: add support for CSR input
@@ -342,20 +342,20 @@ def _centers_sparse(X, np.ndarray[INT, ndim=1] labels, n_clusters,
342342
343343
Parameters
344344
----------
345-
X: scipy.sparse.csr_matrix, shape (n_samples, n_features)
345+
X : scipy.sparse.csr_matrix, shape (n_samples, n_features)
346346
347-
labels: array of integers, shape (n_samples)
347+
labels : array of integers, shape (n_samples)
348348
Current label assignment
349349
350-
n_clusters: int
350+
n_clusters : int
351351
Number of desired clusters
352352
353-
distances: array-like, shape (n_samples)
353+
distances : array-like, shape (n_samples)
354354
Distance to closest cluster for each sample.
355355
356356
Returns
357357
-------
358-
centers: array, shape (n_clusters, n_features)
358+
centers : array, shape (n_clusters, n_features)
359359
The resulting centers
360360
"""
361361
cdef int n_features = X.shape[1]

sklearn/cluster/affinity_propagation_.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -278,7 +278,7 @@ def fit(self, X, y=None):
278278
Parameters
279279
----------
280280
281-
X: array-like, shape (n_samples, n_features) or (n_samples, n_samples)
281+
X : array-like, shape (n_samples, n_features) or (n_samples, n_samples)
282282
Data matrix or, if affinity is ``precomputed``, matrix of
283283
similarities / affinities.
284284
"""

sklearn/cluster/birch.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -481,7 +481,7 @@ def _get_leaves(self):
481481
482482
Returns
483483
-------
484-
leaves: array-like
484+
leaves : array-like
485485
List of the leaf nodes.
486486
"""
487487
leaf_ptr = self.dummy_leaf_.next_leaf_
@@ -538,7 +538,7 @@ def predict(self, X):
538538
539539
Returns
540540
-------
541-
labels: ndarray, shape(n_samples)
541+
labels : ndarray, shape(n_samples)
542542
Labelled data.
543543
"""
544544
X = check_array(X, accept_sparse='csr')

sklearn/cluster/hierarchical.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ def ward_tree(X, connectivity=None, n_clusters=None, return_distance=False):
116116
limited use, and the 'parents' output should rather be used.
117117
This option is valid only when specifying a connectivity matrix.
118118
119-
return_distance: bool (optional)
119+
return_distance : bool (optional)
120120
If True, return the distance between the clusters.
121121
122122
Returns

sklearn/cluster/k_means_.py

Lines changed: 28 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -47,20 +47,20 @@ def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
4747
4848
Parameters
4949
-----------
50-
X: array or sparse matrix, shape (n_samples, n_features)
50+
X : array or sparse matrix, shape (n_samples, n_features)
5151
The data to pick seeds for. To avoid memory copy, the input data
5252
should be double precision (dtype=np.float64).
5353
54-
n_clusters: integer
54+
n_clusters : integer
5555
The number of seeds to choose
5656
57-
x_squared_norms: array, shape (n_samples,)
57+
x_squared_norms : array, shape (n_samples,)
5858
Squared Euclidean norm of each data point.
5959
60-
random_state: numpy.RandomState
60+
random_state : numpy.RandomState
6161
The generator used to initialize the centers.
6262
63-
n_local_trials: integer, optional
63+
n_local_trials : integer, optional
6464
The number of seeding trials for each center (except the first),
6565
of which the one reducing inertia the most is greedily chosen.
6666
Set to None to make the number of trials depend logarithmically
@@ -267,7 +267,7 @@ def k_means(X, n_clusters, init='k-means++', precompute_distances='auto',
267267
The final value of the inertia criterion (sum of squared distances to
268268
the closest centroid for all observations in the training set).
269269
270-
best_n_iter: int
270+
best_n_iter : int
271271
Number of iterations corresponding to the best results.
272272
Returned only if `return_n_iter` is set to True.
273273
@@ -409,17 +409,17 @@ def _kmeans_single_lloyd(X, n_clusters, max_iter=300, init='k-means++',
409409
410410
Parameters
411411
----------
412-
X: array-like of floats, shape (n_samples, n_features)
412+
X : array-like of floats, shape (n_samples, n_features)
413413
The observations to cluster.
414414
415-
n_clusters: int
415+
n_clusters : int
416416
The number of clusters to form as well as the number of
417417
centroids to generate.
418418
419-
max_iter: int, optional, default 300
419+
max_iter : int, optional, default 300
420420
Maximum number of iterations of the k-means algorithm to run.
421421
422-
init: {'k-means++', 'random', or ndarray, or a callable}, optional
422+
init : {'k-means++', 'random', or ndarray, or a callable}, optional
423423
Method for initialization, default to 'k-means++':
424424
425425
'k-means++' : selects initial cluster centers for k-mean
@@ -435,33 +435,33 @@ def _kmeans_single_lloyd(X, n_clusters, max_iter=300, init='k-means++',
435435
If a callable is passed, it should take arguments X, k and
436436
and a random state and return an initialization.
437437
438-
tol: float, optional
438+
tol : float, optional
439439
The relative increment in the results before declaring convergence.
440440
441-
verbose: boolean, optional
441+
verbose : boolean, optional
442442
Verbosity mode
443443
444-
x_squared_norms: array
444+
x_squared_norms : array
445445
Precomputed x_squared_norms.
446446
447447
precompute_distances : boolean, default: True
448448
Precompute distances (faster but takes more memory).
449449
450-
random_state: integer or numpy.RandomState, optional
450+
random_state : integer or numpy.RandomState, optional
451451
The generator used to initialize the centers. If an integer is
452452
given, it fixes the seed. Defaults to the global numpy random
453453
number generator.
454454
455455
Returns
456456
-------
457-
centroid: float ndarray with shape (k, n_features)
457+
centroid : float ndarray with shape (k, n_features)
458458
Centroids found at the last iteration of k-means.
459459
460-
label: integer ndarray with shape (n_samples,)
460+
label : integer ndarray with shape (n_samples,)
461461
label[i] is the code or index of the centroid the
462462
i'th observation is closest to.
463463
464-
inertia: float
464+
inertia : float
465465
The final value of the inertia criterion (sum of squared distances to
466466
the closest centroid for all observations in the training set).
467467
@@ -577,26 +577,26 @@ def _labels_inertia(X, x_squared_norms, centers,
577577
578578
Parameters
579579
----------
580-
X: float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
580+
X : float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
581581
The input samples to assign to the labels.
582582
583-
x_squared_norms: array, shape (n_samples,)
583+
x_squared_norms : array, shape (n_samples,)
584584
Precomputed squared euclidean norm of each data point, to speed up
585585
computations.
586586
587-
centers: float array, shape (k, n_features)
587+
centers : float array, shape (k, n_features)
588588
The cluster centers.
589589
590590
precompute_distances : boolean, default: True
591591
Precompute distances (faster but takes more memory).
592592
593-
distances: float array, shape (n_samples,)
593+
distances : float array, shape (n_samples,)
594594
Pre-allocated array to be filled in with each sample's distance
595595
to the closest center.
596596
597597
Returns
598598
-------
599-
labels: int array of shape(n)
599+
labels : int array of shape(n)
600600
The resulting assignment
601601
602602
inertia : float
@@ -628,20 +628,20 @@ def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
628628
Parameters
629629
----------
630630
631-
X: array, shape (n_samples, n_features)
631+
X : array, shape (n_samples, n_features)
632632
633-
k: int
633+
k : int
634634
number of centroids
635635
636-
init: {'k-means++', 'random' or ndarray or callable} optional
636+
init : {'k-means++', 'random' or ndarray or callable} optional
637637
Method for initialization
638638
639-
random_state: integer or numpy.RandomState, optional
639+
random_state : integer or numpy.RandomState, optional
640640
The generator used to initialize the centers. If an integer is
641641
given, it fixes the seed. Defaults to the global numpy random
642642
number generator.
643643
644-
x_squared_norms: array, shape (n_samples,), optional
644+
x_squared_norms : array, shape (n_samples,), optional
645645
Squared euclidean norm of each data point. Pass it if you have it at
646646
hands already to avoid it being recomputed here. Default: None
647647
@@ -653,7 +653,7 @@ def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
653653
654654
Returns
655655
-------
656-
centers: array, shape(k, n_features)
656+
centers : array, shape(k, n_features)
657657
"""
658658
random_state = check_random_state(random_state)
659659
n_samples = X.shape[0]

sklearn/cluster/spectral.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
3939
Maximum number of iterations to attempt in rotation and partition
4040
matrix search if machine precision convergence is not reached
4141
42-
random_state: int seed, RandomState instance, or None (default)
42+
random_state : int seed, RandomState instance, or None (default)
4343
A pseudo random number generator used for the initialization of the
4444
of the rotation matrix
4545

sklearn/covariance/graph_lasso_.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -461,7 +461,7 @@ class GraphLassoCV(GraphLasso):
461461
grid to be used. See the notes in the class docstring for
462462
more details.
463463
464-
n_refinements: strictly positive integer
464+
n_refinements : strictly positive integer
465465
The number of times the grid is refined. Not used if explicit
466466
values of alphas are passed.
467467
@@ -492,7 +492,7 @@ class GraphLassoCV(GraphLasso):
492492
max_iter : integer, optional
493493
Maximum number of iterations.
494494
495-
mode: {'cd', 'lars'}
495+
mode : {'cd', 'lars'}
496496
The Lasso solver to use: coordinate descent or LARS. Use LARS for
497497
very sparse underlying graphs, where number of features is greater
498498
than number of samples. Elsewhere prefer cd which is more numerically

sklearn/covariance/shrunk_covariance_.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,7 @@ def ledoit_wolf_shrinkage(X, assume_centered=False, block_size=1000):
168168
169169
Returns
170170
-------
171-
shrinkage: float
171+
shrinkage : float
172172
Coefficient in the convex combination used for the computation
173173
of the shrunk estimate.
174174
@@ -496,7 +496,7 @@ class OAS(EmpiricalCovariance):
496496
store_precision : bool, default=True
497497
Specify if the estimated precision is stored.
498498
499-
assume_centered: bool, default=False
499+
assume_centered : bool, default=False
500500
If True, data are not centered before computation.
501501
Useful when working with data whose mean is almost, but not exactly
502502
zero.
@@ -545,7 +545,7 @@ def fit(self, X, y=None):
545545
546546
Returns
547547
-------
548-
self: object
548+
self : object
549549
Returns self.
550550
551551
"""

0 commit comments

Comments
 (0)