Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 42173fd

Browse files
authored
MNT add isort to ruff's rules (#26649)
1 parent 4a8b4f9 commit 42173fd

File tree

771 files changed

+5515
-5563
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

771 files changed

+5515
-5563
lines changed

.github/scripts/label_title_regex.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,11 @@
11
"""Labels PRs based on title. Must be run in a github action with the
22
pull_request_target event."""
3-
from github import Github
4-
import os
53
import json
4+
import os
65
import re
76

7+
from github import Github
8+
89
context_dict = json.loads(os.getenv("CONTEXT_GITHUB"))
910

1011
repo = context_dict["repository"]

.pre-commit-config.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ repos:
1414
rev: v0.0.272
1515
hooks:
1616
- id: ruff
17+
args: ["--fix", "--show-source"]
1718
- repo: https://github.com/pre-commit/mirrors-mypy
1819
rev: v1.3.0
1920
hooks:

asv_benchmarks/benchmarks/cluster.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from sklearn.cluster import KMeans, MiniBatchKMeans
22

33
from .common import Benchmark, Estimator, Predictor, Transformer
4-
from .datasets import _blobs_dataset, _20newsgroups_highdim_dataset
4+
from .datasets import _20newsgroups_highdim_dataset, _blobs_dataset
55
from .utils import neg_mean_inertia
66

77

asv_benchmarks/benchmarks/common.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
1-
import os
1+
import itertools
22
import json
3-
import timeit
3+
import os
44
import pickle
5-
import itertools
5+
import timeit
66
from abc import ABC, abstractmethod
7-
from pathlib import Path
87
from multiprocessing import cpu_count
8+
from pathlib import Path
99

1010
import numpy as np
1111

asv_benchmarks/benchmarks/datasets.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,22 @@
1+
from pathlib import Path
2+
13
import numpy as np
24
import scipy.sparse as sp
35
from joblib import Memory
4-
from pathlib import Path
56

6-
from sklearn.decomposition import TruncatedSVD
77
from sklearn.datasets import (
8-
make_blobs,
98
fetch_20newsgroups,
9+
fetch_olivetti_faces,
1010
fetch_openml,
1111
load_digits,
12-
make_regression,
12+
make_blobs,
1313
make_classification,
14-
fetch_olivetti_faces,
14+
make_regression,
1515
)
16-
from sklearn.preprocessing import MaxAbsScaler, StandardScaler
16+
from sklearn.decomposition import TruncatedSVD
1717
from sklearn.feature_extraction.text import TfidfVectorizer
1818
from sklearn.model_selection import train_test_split
19+
from sklearn.preprocessing import MaxAbsScaler, StandardScaler
1920

2021
# memory location for caching datasets
2122
M = Memory(location=str(Path(__file__).resolve().parent / "cache"))

asv_benchmarks/benchmarks/decomposition.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
from sklearn.decomposition import PCA, DictionaryLearning, MiniBatchDictionaryLearning
22

33
from .common import Benchmark, Estimator, Transformer
4-
from .datasets import _olivetti_faces_dataset, _mnist_dataset
5-
from .utils import make_pca_scorers, make_dict_learning_scorers
4+
from .datasets import _mnist_dataset, _olivetti_faces_dataset
5+
from .utils import make_dict_learning_scorers, make_pca_scorers
66

77

88
class PCABenchmark(Transformer, Estimator, Benchmark):

asv_benchmarks/benchmarks/ensemble.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from sklearn.ensemble import (
2-
RandomForestClassifier,
32
GradientBoostingClassifier,
43
HistGradientBoostingClassifier,
4+
RandomForestClassifier,
55
)
66

77
from .common import Benchmark, Estimator, Predictor

asv_benchmarks/benchmarks/linear_model.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
from sklearn.linear_model import (
2-
LogisticRegression,
3-
Ridge,
42
ElasticNet,
53
Lasso,
64
LinearRegression,
5+
LogisticRegression,
6+
Ridge,
77
SGDRegressor,
88
)
99

benchmarks/bench_20newsgroups.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,19 @@
1-
from time import time
21
import argparse
3-
import numpy as np
2+
from time import time
43

5-
from sklearn.dummy import DummyClassifier
4+
import numpy as np
65

76
from sklearn.datasets import fetch_20newsgroups_vectorized
8-
from sklearn.metrics import accuracy_score
9-
from sklearn.utils.validation import check_array
10-
11-
from sklearn.ensemble import RandomForestClassifier
12-
from sklearn.ensemble import ExtraTreesClassifier
13-
from sklearn.ensemble import AdaBoostClassifier
7+
from sklearn.dummy import DummyClassifier
8+
from sklearn.ensemble import (
9+
AdaBoostClassifier,
10+
ExtraTreesClassifier,
11+
RandomForestClassifier,
12+
)
1413
from sklearn.linear_model import LogisticRegression
14+
from sklearn.metrics import accuracy_score
1515
from sklearn.naive_bayes import MultinomialNB
16+
from sklearn.utils.validation import check_array
1617

1718
ESTIMATORS = {
1819
"dummy": DummyClassifier(),

benchmarks/bench_covertype.py

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -45,20 +45,24 @@
4545
# Arnaud Joly <[email protected]>
4646
# License: BSD 3 clause
4747

48+
import argparse
4849
import os
4950
from time import time
50-
import argparse
51+
5152
import numpy as np
5253
from joblib import Memory
5354

5455
from sklearn.datasets import fetch_covtype, get_data_home
55-
from sklearn.svm import LinearSVC
56-
from sklearn.linear_model import SGDClassifier, LogisticRegression
56+
from sklearn.ensemble import (
57+
ExtraTreesClassifier,
58+
GradientBoostingClassifier,
59+
RandomForestClassifier,
60+
)
61+
from sklearn.linear_model import LogisticRegression, SGDClassifier
62+
from sklearn.metrics import zero_one_loss
5763
from sklearn.naive_bayes import GaussianNB
64+
from sklearn.svm import LinearSVC
5865
from sklearn.tree import DecisionTreeClassifier
59-
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
60-
from sklearn.ensemble import GradientBoostingClassifier
61-
from sklearn.metrics import zero_one_loss
6266
from sklearn.utils import check_array
6367

6468
# Memoize the data extraction and memory map the resulting

benchmarks/bench_feature_expansions.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,10 @@
1+
from time import time
2+
13
import matplotlib.pyplot as plt
24
import numpy as np
35
import scipy.sparse as sparse
6+
47
from sklearn.preprocessing import PolynomialFeatures
5-
from time import time
68

79
degree = 2
810
trials = 3

benchmarks/bench_glm.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,10 @@
55
66
"""
77
from datetime import datetime
8+
89
import numpy as np
9-
from sklearn import linear_model
1010

11+
from sklearn import linear_model
1112

1213
if __name__ == "__main__":
1314
import matplotlib.pyplot as plt

benchmarks/bench_glmnet.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,11 @@
1616
1717
In both cases, only 10% of the features are informative.
1818
"""
19-
import numpy as np
2019
import gc
2120
from time import time
21+
22+
import numpy as np
23+
2224
from sklearn.datasets import make_regression
2325

2426
alpha = 0.1
@@ -45,11 +47,11 @@ def bench(factory, X, Y, X_test, Y_test, ref_coef):
4547

4648

4749
if __name__ == "__main__":
48-
from glmnet.elastic_net import Lasso as GlmnetLasso
49-
from sklearn.linear_model import Lasso as ScikitLasso
50-
5150
# Delayed import of matplotlib.pyplot
5251
import matplotlib.pyplot as plt
52+
from glmnet.elastic_net import Lasso as GlmnetLasso
53+
54+
from sklearn.linear_model import Lasso as ScikitLasso
5355

5456
scikit_results = []
5557
glmnet_results = []

benchmarks/bench_hist_gradient_boosting.py

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,16 @@
1-
from time import time
21
import argparse
2+
from time import time
33

44
import matplotlib.pyplot as plt
55
import numpy as np
6-
from sklearn.model_selection import train_test_split
7-
from sklearn.ensemble import HistGradientBoostingRegressor
8-
from sklearn.ensemble import HistGradientBoostingClassifier
9-
from sklearn.datasets import make_classification
10-
from sklearn.datasets import make_regression
11-
from sklearn.ensemble._hist_gradient_boosting.utils import get_equivalent_estimator
126

7+
from sklearn.datasets import make_classification, make_regression
8+
from sklearn.ensemble import (
9+
HistGradientBoostingClassifier,
10+
HistGradientBoostingRegressor,
11+
)
12+
from sklearn.ensemble._hist_gradient_boosting.utils import get_equivalent_estimator
13+
from sklearn.model_selection import train_test_split
1314

1415
parser = argparse.ArgumentParser()
1516
parser.add_argument("--n-leaf-nodes", type=int, default=31)

benchmarks/bench_hist_gradient_boosting_adult.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,15 +4,14 @@
44
import numpy as np
55
import pandas as pd
66

7-
from sklearn.model_selection import train_test_split
8-
from sklearn.compose import make_column_transformer, make_column_selector
7+
from sklearn.compose import make_column_selector, make_column_transformer
98
from sklearn.datasets import fetch_openml
10-
from sklearn.metrics import accuracy_score, roc_auc_score
119
from sklearn.ensemble import HistGradientBoostingClassifier
1210
from sklearn.ensemble._hist_gradient_boosting.utils import get_equivalent_estimator
11+
from sklearn.metrics import accuracy_score, roc_auc_score
12+
from sklearn.model_selection import train_test_split
1313
from sklearn.preprocessing import OrdinalEncoder
1414

15-
1615
parser = argparse.ArgumentParser()
1716
parser.add_argument("--n-leaf-nodes", type=int, default=31)
1817
parser.add_argument("--n-trees", type=int, default=100)

benchmarks/bench_hist_gradient_boosting_categorical_only.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,10 @@
11
import argparse
22
from time import time
33

4-
from sklearn.preprocessing import KBinsDiscretizer
54
from sklearn.datasets import make_classification
65
from sklearn.ensemble import HistGradientBoostingClassifier
76
from sklearn.ensemble._hist_gradient_boosting.utils import get_equivalent_estimator
8-
7+
from sklearn.preprocessing import KBinsDiscretizer
98

109
parser = argparse.ArgumentParser()
1110
parser.add_argument("--n-leaf-nodes", type=int, default=31)

benchmarks/bench_hist_gradient_boosting_higgsboson.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,17 @@
1-
from urllib.request import urlretrieve
1+
import argparse
22
import os
33
from gzip import GzipFile
44
from time import time
5-
import argparse
5+
from urllib.request import urlretrieve
66

77
import numpy as np
88
import pandas as pd
99
from joblib import Memory
10-
from sklearn.model_selection import train_test_split
11-
from sklearn.metrics import accuracy_score, roc_auc_score
10+
1211
from sklearn.ensemble import HistGradientBoostingClassifier
1312
from sklearn.ensemble._hist_gradient_boosting.utils import get_equivalent_estimator
14-
13+
from sklearn.metrics import accuracy_score, roc_auc_score
14+
from sklearn.model_selection import train_test_split
1515

1616
parser = argparse.ArgumentParser()
1717
parser.add_argument("--n-leaf-nodes", type=int, default=31)

benchmarks/bench_hist_gradient_boosting_threading.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,19 @@
1-
from time import time
21
import argparse
32
import os
43
from pprint import pprint
4+
from time import time
55

66
import numpy as np
77
from threadpoolctl import threadpool_limits
8+
89
import sklearn
9-
from sklearn.model_selection import train_test_split
10-
from sklearn.ensemble import HistGradientBoostingRegressor
11-
from sklearn.ensemble import HistGradientBoostingClassifier
12-
from sklearn.datasets import make_classification
13-
from sklearn.datasets import make_regression
10+
from sklearn.datasets import make_classification, make_regression
11+
from sklearn.ensemble import (
12+
HistGradientBoostingClassifier,
13+
HistGradientBoostingRegressor,
14+
)
1415
from sklearn.ensemble._hist_gradient_boosting.utils import get_equivalent_estimator
15-
16+
from sklearn.model_selection import train_test_split
1617

1718
parser = argparse.ArgumentParser()
1819
parser.add_argument("--n-leaf-nodes", type=int, default=31)
@@ -290,8 +291,8 @@ def one_run(n_threads, n_samples):
290291

291292

292293
if args.plot or args.plot_filename:
293-
import matplotlib.pyplot as plt
294294
import matplotlib
295+
import matplotlib.pyplot as plt
295296

296297
fig, axs = plt.subplots(2, figsize=(12, 12))
297298

benchmarks/bench_isolation_forest.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,13 @@
1717
"""
1818

1919
from time import time
20-
import numpy as np
20+
2121
import matplotlib.pyplot as plt
22+
import numpy as np
2223

24+
from sklearn.datasets import fetch_covtype, fetch_kddcup99, fetch_openml
2325
from sklearn.ensemble import IsolationForest
24-
from sklearn.metrics import roc_curve, auc
25-
from sklearn.datasets import fetch_kddcup99, fetch_covtype, fetch_openml
26+
from sklearn.metrics import auc, roc_curve
2627
from sklearn.preprocessing import LabelBinarizer
2728
from sklearn.utils import shuffle as sh
2829

benchmarks/bench_isotonic.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,13 +10,15 @@
1010
This allows the scaling of the algorithm with the problem size to be
1111
visualized and understood.
1212
"""
13-
import numpy as np
13+
import argparse
1414
import gc
1515
from datetime import datetime
16-
from sklearn.isotonic import isotonic_regression
17-
from scipy.special import expit
16+
1817
import matplotlib.pyplot as plt
19-
import argparse
18+
import numpy as np
19+
from scipy.special import expit
20+
21+
from sklearn.isotonic import isotonic_regression
2022

2123

2224
def generate_perturbed_logarithm_dataset(size):

benchmarks/bench_kernel_pca_solvers_time_vs_n_components.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -39,13 +39,12 @@
3939

4040
import time
4141

42-
import numpy as np
4342
import matplotlib.pyplot as plt
44-
43+
import numpy as np
4544
from numpy.testing import assert_array_almost_equal
46-
from sklearn.decomposition import KernelPCA
47-
from sklearn.datasets import make_circles
4845

46+
from sklearn.datasets import make_circles
47+
from sklearn.decomposition import KernelPCA
4948

5049
print(__doc__)
5150

0 commit comments

Comments
 (0)