@@ -37,17 +37,16 @@ is an estimator object::
3737 >>> from sklearn.pipeline import Pipeline
3838 >>> from sklearn.svm import SVC
3939 >>> from sklearn.decomposition import PCA
40- >>> estimators = [('reduce_dim', PCA()), ('svm ', SVC())]
41- >>> clf = Pipeline(estimators)
42- >>> clf # doctest: +NORMALIZE_WHITESPACE
40+ >>> estimators = [('reduce_dim', PCA()), ('clf ', SVC())]
41+ >>> pipe = Pipeline(estimators)
42+ >>> pipe # doctest: +NORMALIZE_WHITESPACE
4343 Pipeline(steps=[('reduce_dim', PCA(copy=True, iterated_power=4,
4444 n_components=None, random_state=None, svd_solver='auto', tol=0.0,
45- whiten=False)), ('svm ', SVC(C=1.0, cache_size=200, class_weight=None,
45+ whiten=False)), ('clf ', SVC(C=1.0, cache_size=200, class_weight=None,
4646 coef0=0.0, decision_function_shape=None, degree=3, gamma='auto',
4747 kernel='rbf', max_iter=-1, probability=False, random_state=None,
4848 shrinking=True, tol=0.001, verbose=False))])
4949
50-
5150The utility function :func: `make_pipeline ` is a shorthand
5251for constructing pipelines;
5352it takes a variable number of estimators and returns a pipeline,
@@ -64,23 +63,23 @@ filling in the names automatically::
6463
6564The estimators of a pipeline are stored as a list in the ``steps `` attribute::
6665
67- >>> clf .steps[0]
66+ >>> pipe .steps[0]
6867 ('reduce_dim', PCA(copy=True, iterated_power=4, n_components=None, random_state=None,
6968 svd_solver='auto', tol=0.0, whiten=False))
7069
7170and as a ``dict `` in ``named_steps ``::
7271
73- >>> clf .named_steps['reduce_dim']
72+ >>> pipe .named_steps['reduce_dim']
7473 PCA(copy=True, iterated_power=4, n_components=None, random_state=None,
7574 svd_solver='auto', tol=0.0, whiten=False)
7675
7776Parameters of the estimators in the pipeline can be accessed using the
7877``<estimator>__<parameter> `` syntax::
7978
80- >>> clf .set_params(svm__C =10) # doctest: +NORMALIZE_WHITESPACE
79+ >>> pipe .set_params(clf__C =10) # doctest: +NORMALIZE_WHITESPACE
8180 Pipeline(steps=[('reduce_dim', PCA(copy=True, iterated_power=4,
8281 n_components=None, random_state=None, svd_solver='auto', tol=0.0,
83- whiten=False)), ('svm ', SVC(C=10, cache_size=200, class_weight=None,
82+ whiten=False)), ('clf ', SVC(C=10, cache_size=200, class_weight=None,
8483 coef0=0.0, decision_function_shape=None, degree=3, gamma='auto',
8584 kernel='rbf', max_iter=-1, probability=False, random_state=None,
8685 shrinking=True, tol=0.001, verbose=False))])
@@ -90,9 +89,17 @@ This is particularly important for doing grid searches::
9089
9190 >>> from sklearn.model_selection import GridSearchCV
9291 >>> params = dict(reduce_dim__n_components=[2, 5, 10],
93- ... svm__C=[0.1, 10, 100])
94- >>> grid_search = GridSearchCV(clf, param_grid=params)
92+ ... clf__C=[0.1, 10, 100])
93+ >>> grid_search = GridSearchCV(pipe, param_grid=params)
94+
95+ Individual steps may also be replaced as parameters, and non-final steps may be
96+ ignored by setting them to ``None ``::
9597
98+ >>> from sklearn.linear_model import LogisticRegression
99+ >>> params = dict(reduce_dim=[None, PCA(5), PCA(10)],
100+ ... clf=[SVC(), LogisticRegression()],
101+ ... clf__C=[0.1, 10, 100])
102+ >>> grid_search = GridSearchCV(pipe, param_grid=params)
96103
97104.. topic :: Examples:
98105
@@ -172,6 +179,15 @@ Like pipelines, feature unions have a shorthand constructor called
172179:func: `make_union ` that does not require explicit naming of the components.
173180
174181
182+ Like ``Pipeline ``, individual steps may be replaced using ``set_params ``,
183+ and ignored by setting to ``None ``::
184+
185+ >>> combined.set_params(kernel_pca=None) # doctest: +NORMALIZE_WHITESPACE
186+ FeatureUnion(n_jobs=1, transformer_list=[('linear_pca', PCA(copy=True,
187+ iterated_power=4, n_components=None, random_state=None,
188+ svd_solver='auto', tol=0.0, whiten=False)), ('kernel_pca', None)],
189+ transformer_weights=None)
190+
175191.. topic :: Examples:
176192
177193 * :ref: `sphx_glr_auto_examples_feature_stacker.py `
0 commit comments