|
3 | 3 | Compute the explainable variance |
4 | 4 | ================================ |
5 | 5 |
|
6 | | -Before fitting voxelwise models to the fMRI responses, we can compute the |
7 | | -explainable variance on the test set repeats. |
8 | | -
|
9 | | -The explainable variance is the part of the fMRI responses that can be |
10 | | -explained by voxelwise modeling. It is thus the upper bound of voxelwise |
11 | | -modeling performances. |
12 | | -
|
13 | | -Indeed, we can decompose the signal into a sum of two components, one |
14 | | -component that is repeated if we repeat the same experiment, and one component |
15 | | -that changes for each repeat. Because voxelwise modeling would use the same |
16 | | -features for each repeat, it can only model the component that is common to |
17 | | -all repeats. This shared component can be estimated by taking the mean over |
18 | | -repeats of the same experiment. |
| 6 | +Before fitting voxelwise models to the fMRI responses, we can estimate the |
| 7 | +*explainable variance*. The explainable variance is the part of the fMRI |
| 8 | +responses that can be explained by the voxelwise modeling framework. |
| 9 | +
|
| 10 | +Indeed, we can decompose the signal into a sum of two components, one component |
| 11 | +that is repeated if we repeat the same experiment, and one component that |
| 12 | +changes for each repeat. Because voxelwise modeling would use the same features |
| 13 | +for each repeat, it can only model the component that is common to all repeats. |
| 14 | +This shared component can be estimated by taking the mean over repeats of the |
| 15 | +same experiment. The variance of this shared component, that we call the |
| 16 | +explainable variance, is the upper bound of the voxelwise modeling |
| 17 | +performances. |
19 | 18 | """ |
20 | 19 | # sphinx_gallery_thumbnail_number = 2 |
21 | 20 | ############################################################################### |
22 | | - |
23 | | -# path of the data directory |
| 21 | +# Path of the data directory |
24 | 22 | import os |
25 | 23 | from voxelwise_tutorials.io import get_data_home |
26 | 24 | directory = os.path.join(get_data_home(), "vim-4") |
27 | 25 | print(directory) |
28 | 26 |
|
| 27 | +############################################################################### |
| 28 | + |
29 | 29 | # modify to use another subject |
30 | 30 | subject = "S01" |
31 | 31 |
|
32 | 32 | ############################################################################### |
33 | 33 | # Compute the explainable variance |
34 | 34 | # -------------------------------- |
35 | 35 | import numpy as np |
36 | | - |
37 | 36 | from voxelwise_tutorials.io import load_hdf5_array |
38 | 37 |
|
39 | 38 | ############################################################################### |
40 | | -# First, we load the fMRI responses on the test set, which contains 10 repeats. |
| 39 | +# First, we load the fMRI responses on the test set, which contains ten (10) |
| 40 | +# repeats. |
41 | 41 | file_name = os.path.join(directory, 'responses', f'{subject}_responses.hdf') |
42 | 42 | Y_test = load_hdf5_array(file_name, key="Y_test") |
| 43 | +print("(n_repeats, n_samples_test, n_voxels) =", Y_test.shape) |
43 | 44 |
|
44 | 45 | ############################################################################### |
45 | 46 | # Then, we compute the explainable variance per voxel. |
|
48 | 49 | # taking the variance of the average response. Then, we compute the |
49 | 50 | # explainable variance by dividing these two quantities. |
50 | 51 | # Finally, a correction can be applied to account for small numbers of repeat |
51 | | -# (parameter ``bias_correction``). |
| 52 | +# (through the parameter ``bias_correction``). |
52 | 53 |
|
53 | 54 | from voxelwise_tutorials.utils import explainable_variance |
54 | 55 | ev = explainable_variance(Y_test, bias_correction=False) |
| 56 | +print("(n_voxels,) =", ev.shape) |
55 | 57 |
|
56 | 58 | ############################################################################### |
57 | 59 | # We can plot the distribution of explainable variance over voxels. |
|
68 | 70 | ############################################################################### |
69 | 71 | # We see that most voxels have a rather low explainable variance, around 0.1 |
70 | 72 | # (when not using the bias correction). This is expected, since most voxels are |
71 | | -# not directly driven by a visual stimulus. |
72 | | -# We also see that some voxels reach an explainable variance of 0.7, which is |
73 | | -# quite high. It means that these voxels consistently record the same activity |
74 | | -# across a repeated stimulus, and thus are good targets for encoding models. |
| 73 | +# not directly driven by a visual stimulus, and their activity change over |
| 74 | +# repeats. We also see that some voxels reach an explainable variance of 0.7, |
| 75 | +# which is quite high. It means that these voxels consistently record the same |
| 76 | +# activity across a repeated stimulus, and thus are good targets for encoding |
| 77 | +# models. |
75 | 78 |
|
76 | 79 | ############################################################################### |
77 | 80 | # Map to subject flatmap |
78 | 81 | # ---------------------- |
79 | 82 | # |
80 | 83 | # To better understand the distribution of explainable variance, we map the |
81 | | -# values to the subject brain. This can be done with |
82 | | -# `pycortex <https://gallantlab.github.io/pycortex/>`_, which can create |
83 | | -# interactive 3D viewers displayed in any modern browser. |
84 | | -# ``Pycortex`` can also display flatten maps of the cortical surface, to |
85 | | -# visualize the entire cortical surface at once. |
| 84 | +# values to the subject brain. This can be done with `pycortex |
| 85 | +# <https://gallantlab.github.io/pycortex/>`_, which can create interactive 3D |
| 86 | +# viewers to be displayed in any modern browser. ``pycortex`` can also display |
| 87 | +# flatten maps of the cortical surface, to visualize the entire cortical |
| 88 | +# surface at once. |
86 | 89 | # |
87 | 90 | # Here, we do not share the anatomical information of the subjects for privacy |
88 | | -# concerns. Instead, we provide two mappers, (i) to map the voxels to a |
89 | | -# subject-specific flatmap, or (ii) to map the voxels to the Freesurfer average |
90 | | -# cortical surface ("fsaverage"). |
| 91 | +# concerns. Instead, we provide two mappers: |
91 | 92 | # |
92 | | -# The first mapper is a sparse CSR matrix that map each voxel to a set of pixel |
93 | | -# in a flatmap. To ease its use, we provide here an example function |
94 | | -# ``plot_flatmap_from_mapper``. |
| 93 | +# - to map the voxels to a (subject-specific) flatmap |
| 94 | +# - to map the voxels to the Freesurfer average cortical surface ("fsaverage") |
| 95 | +# |
| 96 | +# The first mapper is 2D matrix of shape (n_pixels, n_voxels), that map each |
| 97 | +# voxel to a set of pixel in a flatmap. The matrix is efficient stored using a |
| 98 | +# ``scipy`` sparse CSR matrix format. To ease the use of this mapper, we |
| 99 | +# provide an example function ``plot_flatmap_from_mapper``. This function mimic |
| 100 | +# the behavior of ``pycortex.quickshow``. |
95 | 101 |
|
96 | 102 | from voxelwise_tutorials.viz import plot_flatmap_from_mapper |
97 | 103 |
|
|
100 | 106 | plt.show() |
101 | 107 |
|
102 | 108 | ############################################################################### |
103 | | -# We can see that the explainable variance is mainly located in the visual |
104 | | -# cortex, in early regions like V1, V2, V3, or in higher-level regions like |
105 | | -# EBA, FFA or IPS. This was expected since this is a purely visual experiment. |
| 109 | +# This figure is a flatten map of the cortical surface. A number of regions of |
| 110 | +# interest (ROIs) have been labeled to ease the interpretation. If you have |
| 111 | +# never seen such a flatmap, we recommend taking a look at a `pycortex brain |
| 112 | +# viewer <https://gallantlab.org/huth2016/>`_, which displays the brain in 3D. |
| 113 | +# In this viewer, press "I" to inflate the brain, "F" to flatten the surface, |
| 114 | +# and "R" to reset the view (or use the ``surface/unfold`` cursor on the right |
| 115 | +# menu). This viewer should help you understand the correspondance between the |
| 116 | +# flatten and the folded cortical surface of the brain. |
| 117 | + |
| 118 | +############################################################################### |
| 119 | +# On this flatmap, we can see that the explainable variance is mainly located |
| 120 | +# in the visual cortex, in early visual regions like V1, V2, V3, or in |
| 121 | +# higher-level regions like EBA, FFA or IPS. This was expected since this is a |
| 122 | +# purely visual experiment. |
106 | 123 |
|
107 | 124 | ############################################################################### |
108 | | -# Map to fsaverage |
109 | | -# ---------------- |
| 125 | +# Map to "fsaverage" |
| 126 | +# ------------------ |
110 | 127 | # |
111 | 128 | # The second mapper we provide maps the voxel data to a Freesurfer |
112 | 129 | # average surface ("fsaverage"), that can be used in ``pycortex``. |
113 | | -# First, let's download the fsaverage surface if it does not exist |
| 130 | +# First, let's download the "fsaverage" surface. |
114 | 131 |
|
115 | 132 | import cortex |
116 | 133 |
|
|
120 | 137 | cortex.utils.download_subject(subject_id=surface) |
121 | 138 |
|
122 | 139 | ############################################################################### |
123 | | -# Then, we load the fsaverage mapper. The mapper is a sparse CSR matrix, which |
124 | | -# map each voxel to some vertices in the fsaverage surface. |
125 | | -# The mapper is applied with a dot product ``@``. |
| 140 | +# Then, we load the "fsaverage" mapper. The mapper is a matrix of shape |
| 141 | +# (n_vertices, n_voxels), which map each voxel to some vertices in the |
| 142 | +# fsaverage surface. It is also stored with a sparse CSR matrix format. The |
| 143 | +# mapper is applied with a dot product ``@`` (equivalent to ``np.dot``). |
126 | 144 | from voxelwise_tutorials.io import load_hdf5_sparse_array |
127 | 145 | voxel_to_fsaverage = load_hdf5_sparse_array(mapper_file, |
128 | 146 | key='voxel_to_fsaverage') |
129 | 147 | ev_projected = voxel_to_fsaverage @ ev |
| 148 | +print("(n_vertices,) =", ev_projected.shape) |
130 | 149 |
|
131 | 150 | ############################################################################### |
132 | | -# We can then create a ``Vertex`` object with the projected data. |
133 | | -# This object can be used either in a ``pycortex`` interactive 3D viewer, or |
134 | | -# in a ``matplotlib`` figure showing directly the flatmap. |
| 151 | +# We can then create a ``Vertex`` object in ``pycortex``, containing the |
| 152 | +# projected data. This object can be used either in a ``pycortex`` interactive |
| 153 | +# 3D viewer, or in a ``matplotlib`` figure showing only the flatmap. |
135 | 154 |
|
136 | 155 | vertex = cortex.Vertex(ev_projected, surface, vmin=0, vmax=0.7, cmap='inferno') |
137 | 156 |
|
|
0 commit comments