|
122 | 122 | # Finally, we use a ridge regression model. When the number of features is |
123 | 123 | # larger than the number of samples, it is more efficient to solve a ridge |
124 | 124 | # regression using the (equivalent) dual formulation, kernel ridge regression |
125 | | -# with a linear kernel. |
| 125 | +# with a linear kernel [2]_. |
126 | 126 | # Here, we have 3600 training samples, and 1705 * 4 = 6820 features (we |
127 | 127 | # multiply by 4 since we use 4 time delays), therefore we use kernel ridge |
128 | 128 | # regression. |
|
137 | 137 | from himalaya.kernel_ridge import KernelRidgeCV |
138 | 138 |
|
139 | 139 | ############################################################################### |
140 | | -# Moreover, ``himalaya`` implements different computational backends, including |
141 | | -# GPU backends. The available GPU backends are "torch_cuda" and "cupy". (These |
142 | | -# backends are only available if you installed the corresponding package with |
143 | | -# CUDA enabled. Check the pytorch/cupy documentation for install instructions.) |
| 140 | +# Interestingly, ``himalaya`` implements different computational backends, |
| 141 | +# including two backends that use GPU for faster computations. The two |
| 142 | +# available GPU backends are "torch_cuda" and "cupy". (These backends are only |
| 143 | +# available if you installed the corresponding package with CUDA enabled. Check |
| 144 | +# the ``pytorch``/``cupy`` documentation for install instructions.) |
144 | 145 | # |
145 | 146 | # Here we use the "torch_cuda" backend, but if the import fails we continue |
146 | 147 | # with the default "numpy" backend. The "numpy" backend is expected to be |
|
162 | 163 |
|
163 | 164 | ############################################################################### |
164 | 165 | # We use ``scikit-learn``'s ``Pipeline`` to link the different steps together. |
165 | | -# A ``Pipeline`` can be used as a regular estimator, calling |
166 | | -# ``pipeline.fit``, ``pipeline.predict``, etc. |
167 | | -# Using a pipeline can be useful to clarify the different steps, avoid |
168 | | -# cross-validation mistakes, or automatically cache intermediate results. |
| 166 | +# A ``Pipeline`` can be used as a regular estimator, calling ``pipeline.fit``, |
| 167 | +# ``pipeline.predict``, etc. Using a ``Pipeline`` can be useful to clarify the |
| 168 | +# different steps, avoid cross-validation mistakes, or automatically cache |
| 169 | +# intermediate results. See the ``scikit-learn`` `documentation |
| 170 | +# <https://scikit-learn.org/stable/modules/compose.html>`_` for more |
| 171 | +# information. |
169 | 172 | from sklearn.pipeline import make_pipeline |
170 | 173 | pipeline = make_pipeline( |
171 | 174 | scaler, |
|
174 | 177 | ) |
175 | 178 |
|
176 | 179 | ############################################################################### |
177 | | -# We can display the scikit-learn pipeline with an HTML diagram. |
| 180 | +# We can display the ``scikit-learn`` pipeline with an HTML diagram. |
178 | 181 | from sklearn import set_config |
179 | 182 | set_config(display='diagram') |
180 | 183 | pipeline |
|
359 | 362 | # A continuous semantic space describes the representation of thousands of |
360 | 363 | # object and action categories across the human brain. Neuron, 76(6), |
361 | 364 | # 1210-1224. |
| 365 | +# |
| 366 | +# .. [2] Saunders, C., Gammerman, A., & Vovk, V. (1998). |
| 367 | +# Ridge regression learning algorithm in dual variables. |
0 commit comments