39
39
from sklearn .ensemble import RandomTreesEmbedding
40
40
from sklearn .grid_search import GridSearchCV
41
41
from sklearn .svm import LinearSVC
42
+ from sklearn .utils .fixes import bincount
42
43
from sklearn .utils .validation import check_random_state
43
44
44
45
from sklearn .tree .tree import SPARSE_SPLITTERS
@@ -250,7 +251,7 @@ def entropy(samples):
250
251
e = 0.
251
252
n_samples = len (samples )
252
253
253
- for count in np . bincount (samples ):
254
+ for count in bincount (samples ):
254
255
p = 1. * count / n_samples
255
256
if p > 0 :
256
257
e -= p * np .log2 (p )
@@ -260,7 +261,7 @@ def entropy(samples):
260
261
def mdi_importance (X_m , X , y ):
261
262
n_samples , p = X .shape
262
263
263
- variables = range (p )
264
+ variables = list ( range (p ) )
264
265
variables .pop (X_m )
265
266
imp = 0.
266
267
@@ -691,7 +692,7 @@ def check_min_samples_leaf(name, X, y):
691
692
random_state = 0 )
692
693
est .fit (X , y )
693
694
out = est .estimators_ [0 ].tree_ .apply (X )
694
- node_counts = np . bincount (out )
695
+ node_counts = bincount (out )
695
696
# drop inner nodes
696
697
leaf_count = node_counts [node_counts != 0 ]
697
698
assert_greater (np .min (leaf_count ), 4 ,
@@ -725,7 +726,7 @@ def check_min_weight_fraction_leaf(name, X, y):
725
726
est .bootstrap = False
726
727
est .fit (X , y , sample_weight = weights )
727
728
out = est .estimators_ [0 ].tree_ .apply (X )
728
- node_weights = np . bincount (out , weights = weights )
729
+ node_weights = bincount (out , weights = weights )
729
730
# drop inner nodes
730
731
leaf_weights = node_weights [node_weights != 0 ]
731
732
assert_greater_equal (
0 commit comments