Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 31d1de0

Browse files
committed
FIX precision to float64 across the codebase
1 parent 8b1d37d commit 31d1de0

File tree

24 files changed

+63
-63
lines changed

24 files changed

+63
-63
lines changed

doc/modules/preprocessing.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -398,7 +398,7 @@ Continuing the example above::
398398

399399
>>> enc = preprocessing.OneHotEncoder()
400400
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], [1, 0, 2]]) # doctest: +ELLIPSIS
401-
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
401+
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
402402
handle_unknown='error', n_values='auto', sparse=True)
403403
>>> enc.transform([[0, 1, 3]]).toarray()
404404
array([[ 1., 0., 0., 1., 0., 0., 0., 0., 1.]])

sklearn/cluster/hierarchical.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -234,7 +234,7 @@ def ward_tree(X, connectivity=None, n_components=None, n_clusters=None,
234234
moments_1[:n_samples] = 1
235235
moments_2 = np.zeros((n_nodes, n_features), order='C')
236236
moments_2[:n_samples] = X
237-
inertia = np.empty(len(coord_row), dtype=np.float, order='C')
237+
inertia = np.empty(len(coord_row), dtype=np.float64, order='C')
238238
_hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col,
239239
inertia)
240240
inertia = list(six.moves.zip(inertia, coord_row, coord_col))
@@ -279,7 +279,7 @@ def ward_tree(X, connectivity=None, n_components=None, n_clusters=None,
279279
coord_row = np.empty(coord_col.shape, dtype=np.intp, order='C')
280280
coord_row.fill(k)
281281
n_additions = len(coord_row)
282-
ini = np.empty(n_additions, dtype=np.float, order='C')
282+
ini = np.empty(n_additions, dtype=np.float64, order='C')
283283

284284
_hierarchical.compute_ward_dist(moments_1, moments_2,
285285
coord_row, coord_col, ini)

sklearn/datasets/base.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -279,7 +279,7 @@ def load_iris():
279279
target = np.empty((n_samples,), dtype=np.int)
280280

281281
for i, ir in enumerate(data_file):
282-
data[i] = np.asarray(ir[:-1], dtype=np.float)
282+
data[i] = np.asarray(ir[:-1], dtype=np.float64)
283283
target[i] = np.asarray(ir[-1], dtype=np.int)
284284

285285
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
@@ -342,7 +342,7 @@ def load_breast_cancer():
342342
target = np.empty((n_samples,), dtype=np.int)
343343

344344
for count, value in enumerate(data_file):
345-
data[count] = np.asarray(value[:-1], dtype=np.float)
345+
data[count] = np.asarray(value[:-1], dtype=np.float64)
346346
target[count] = np.asarray(value[-1], dtype=np.int)
347347

348348
with open(join(module_path, 'descr', 'breast_cancer.rst')) as rst_file:
@@ -536,8 +536,8 @@ def load_boston():
536536
feature_names = np.array(temp)
537537

538538
for i, d in enumerate(data_file):
539-
data[i] = np.asarray(d[:-1], dtype=np.float)
540-
target[i] = np.asarray(d[-1], dtype=np.float)
539+
data[i] = np.asarray(d[:-1], dtype=np.float64)
540+
target[i] = np.asarray(d[-1], dtype=np.float64)
541541

542542
return Bunch(data=data,
543543
target=target,

sklearn/decomposition/factor_analysis.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ def fit(self, X, y=None):
151151
-------
152152
self
153153
"""
154-
X = check_array(X, copy=self.copy, dtype=np.float)
154+
X = check_array(X, copy=self.copy, dtype=np.float64)
155155

156156
n_samples, n_features = X.shape
157157
n_components = self.n_components

sklearn/decomposition/incremental_pca.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ def fit(self, X, y=None):
164164
self.noise_variance_ = None
165165
self.var_ = None
166166
self.n_samples_seen_ = 0
167-
X = check_array(X, dtype=np.float)
167+
X = check_array(X, dtype=np.float64)
168168
n_samples, n_features = X.shape
169169

170170
if self.batch_size is None:
@@ -190,7 +190,7 @@ def partial_fit(self, X, y=None):
190190
self: object
191191
Returns the instance itself.
192192
"""
193-
X = check_array(X, copy=self.copy, dtype=np.float)
193+
X = check_array(X, copy=self.copy, dtype=np.float64)
194194
n_samples, n_features = X.shape
195195
if not hasattr(self, 'components_'):
196196
self.components_ = None

sklearn/ensemble/tests/test_gradient_boosting.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -286,8 +286,8 @@ def test_regression_synthetic():
286286

287287

288288
def test_feature_importances():
289-
X = np.array(boston.data, dtype=np.float32)
290-
y = np.array(boston.target, dtype=np.float32)
289+
X = np.array(boston.data, dtype=np.float64)
290+
y = np.array(boston.target, dtype=np.float64)
291291

292292
for presort in True, False:
293293
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
@@ -558,11 +558,11 @@ def test_float_class_labels():
558558
# Test with float class labels.
559559
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
560560

561-
float_y = np.asarray(y, dtype=np.float32)
561+
float_y = np.asarray(y, dtype=np.float64)
562562

563563
clf.fit(X, float_y)
564564
assert_array_equal(clf.predict(T),
565-
np.asarray(true_result, dtype=np.float32))
565+
np.asarray(true_result, dtype=np.float64))
566566
assert_equal(100, len(clf.estimators_))
567567

568568

sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ def test_sample_weight_smoke():
7171
# least squares
7272
loss = LeastSquaresError(1)
7373
loss_wo_sw = loss(y, pred)
74-
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
74+
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float64))
7575
assert_almost_equal(loss_wo_sw, loss_w_sw)
7676

7777

@@ -113,30 +113,30 @@ def test_sample_weight_init_estimators():
113113

114114

115115
def test_weighted_percentile():
116-
y = np.empty(102, dtype=np.float)
116+
y = np.empty(102, dtype=np.float64)
117117
y[:50] = 0
118118
y[-51:] = 2
119119
y[-1] = 100000
120120
y[50] = 1
121-
sw = np.ones(102, dtype=np.float)
121+
sw = np.ones(102, dtype=np.float64)
122122
sw[-1] = 0.0
123123
score = _weighted_percentile(y, sw, 50)
124124
assert score == 1
125125

126126

127127
def test_weighted_percentile_equal():
128-
y = np.empty(102, dtype=np.float)
128+
y = np.empty(102, dtype=np.float64)
129129
y.fill(0.0)
130-
sw = np.ones(102, dtype=np.float)
130+
sw = np.ones(102, dtype=np.float64)
131131
sw[-1] = 0.0
132132
score = _weighted_percentile(y, sw, 50)
133133
assert score == 0
134134

135135

136136
def test_weighted_percentile_zero_weight():
137-
y = np.empty(102, dtype=np.float)
137+
y = np.empty(102, dtype=np.float64)
138138
y.fill(1.0)
139-
sw = np.ones(102, dtype=np.float)
139+
sw = np.ones(102, dtype=np.float64)
140140
sw.fill(0.0)
141141
score = _weighted_percentile(y, sw, 50)
142142
assert score == 1.0

sklearn/ensemble/weight_boosting.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ def fit(self, X, y, sample_weight=None):
111111

112112
if sample_weight is None:
113113
# Initialize weights to 1 / n_samples
114-
sample_weight = np.empty(X.shape[0], dtype=np.float)
114+
sample_weight = np.empty(X.shape[0], dtype=np.float64)
115115
sample_weight[:] = 1. / X.shape[0]
116116
else:
117117
# Normalize existing weights
@@ -128,8 +128,8 @@ def fit(self, X, y, sample_weight=None):
128128

129129
# Clear any previous fit results
130130
self.estimators_ = []
131-
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float)
132-
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float)
131+
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64)
132+
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64)
133133

134134
for iboost in range(self.n_estimators):
135135
# Boosting step

sklearn/feature_extraction/tests/test_image.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,8 @@ def test_grid_to_graph():
5151
assert_true(A.dtype == np.bool)
5252
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
5353
assert_true(A.dtype == np.int)
54-
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float)
55-
assert_true(A.dtype == np.float)
54+
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float64)
55+
assert_true(A.dtype == np.float64)
5656

5757

5858
def test_connect_regions():
@@ -75,12 +75,12 @@ def test_connect_regions_with_grid():
7575

7676

7777
def _downsampled_lena():
78-
lena = sp.misc.lena().astype(np.float32)
78+
lena = sp.misc.lena().astype(np.float64)
7979
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
8080
+ lena[1::2, 1::2])
8181
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
8282
+ lena[1::2, 1::2])
83-
lena = lena.astype(np.float)
83+
lena = lena.astype(np.float64)
8484
lena /= 16.0
8585
return lena
8686

sklearn/feature_selection/tests/test_chi2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def test_chi2():
3939
chi2 = mkchi2(k=2).fit(X, y)
4040
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
4141

42-
Xsp = csr_matrix(X, dtype=np.float)
42+
Xsp = csr_matrix(X, dtype=np.float64)
4343
chi2 = mkchi2(k=2).fit(Xsp, y)
4444
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
4545
Xtrans = chi2.transform(Xsp)

0 commit comments

Comments
 (0)