|
332 | 332 | ############################################################################### |
333 | 333 | # Here, we are only interested in the voxels with good generalization |
334 | 334 | # performances. We select an arbitrary threshold of 0.05 (R^2 score). |
335 | | -primal_coef = primal_coef[:, scores > 0.05] |
| 335 | +primal_coef_selection = primal_coef[:, scores > 0.05] |
336 | 336 |
|
337 | 337 | ############################################################################### |
338 | 338 | # Then, we aggregate the coefficients across the different delays. |
339 | 339 |
|
340 | 340 | # split the ridge coefficients per delays |
341 | 341 | delayer = pipeline.named_steps['delayer'] |
342 | | -primal_coef_per_delay = delayer.reshape_by_delays(primal_coef, axis=0) |
| 342 | +primal_coef_per_delay = delayer.reshape_by_delays(primal_coef_selection, |
| 343 | + axis=0) |
343 | 344 | print("(n_delays, n_features, n_voxels) =", primal_coef_per_delay.shape) |
344 | 345 |
|
345 | 346 | # average over delays |
|
397 | 398 | plt.show() |
398 | 399 |
|
399 | 400 | ############################################################################### |
400 | | -# According to the authors of [1]_, "this principal component distinguishes |
| 401 | +# According to the authors of [1]_, "this principal component distinguishes |
401 | 402 | # between categories with high stimulus energy (e.g. moving objects like |
402 | 403 | # `person` and `vehicle`) and those with low stimulus energy (e.g. stationary |
403 | 404 | # objects like `sky` and `city`)". |
404 | 405 | # |
405 | 406 | # Our result is slightly different than in [1]_, since we only use one subject, |
406 | 407 | # and the voxel selection is slightly different. We also use a different |
407 | 408 | # regularization parameter in each voxels, while in [1]_ all voxels use the |
408 | | -# same regularization parameter. |
| 409 | +# same regularization parameter. Here, we do not aim at reproducing exactly the |
| 410 | +# results in [1]_, but we rather describe the general approach. |
| 411 | + |
| 412 | +############################################################################### |
| 413 | +# To project the principal component on the cortical surface, we first need to |
| 414 | +# transform the primal weights of all voxels using the fitted PCA. |
| 415 | + |
| 416 | +# split the ridge coefficients per delays |
| 417 | +primal_coef_per_delay = delayer.reshape_by_delays(primal_coef, axis=0) |
| 418 | +print("(n_delays, n_features, n_voxels) =", primal_coef_per_delay.shape) |
| 419 | + |
| 420 | +# average over delays |
| 421 | +average_coef = np.mean(primal_coef_per_delay, axis=0) |
| 422 | +print("(n_features, n_voxels) =", average_coef.shape) |
| 423 | + |
| 424 | +# transform with the fitted PCA |
| 425 | +average_coef_transformed = pca.transform(average_coef.T).T |
| 426 | +print("(n_components, n_voxels) =", average_coef_transformed.shape) |
| 427 | + |
| 428 | +# We make sure vmin = -vmax, so that the colormap is centered on 0. |
| 429 | +vmax = np.percentile(np.abs(average_coef_transformed), 99.9) |
| 430 | + |
| 431 | +# plot the primal weights projected on the first principal component. |
| 432 | +ax = plot_flatmap_from_mapper(average_coef_transformed[0], mapper_file, |
| 433 | + vmin=-vmax, vmax=vmax, cmap='coolwarm') |
| 434 | +plt.show() |
| 435 | + |
| 436 | +############################################################################### |
| 437 | +# This flatmap shows in which brain regions the model has the largest |
| 438 | +# projection on the first component. Again, this result is different from the |
| 439 | +# one in [1]_, and should only be considered as reproducing the general |
| 440 | +# approach. |
409 | 441 |
|
410 | 442 | ############################################################################### |
411 | 443 | # Following [1]_, we also plot the next three principal components on the |
|
416 | 448 | next_three_components = components[1:4].T |
417 | 449 | node_sizes = np.linalg.norm(next_three_components, axis=1) |
418 | 450 | node_colors = scale_to_rgb_cube(next_three_components) |
| 451 | +print("(n_nodes, n_channels) =", node_colors.shape) |
419 | 452 |
|
420 | 453 | plot_wordnet_graph(node_colors=node_colors, node_sizes=node_sizes) |
421 | 454 | plt.show() |
|
424 | 457 | # According to the authors of [1]_, "this graph shows that categories thought |
425 | 458 | # to be semantically related (e.g. athletes and walking) are represented |
426 | 459 | # similarly in the brain". |
427 | | -# |
428 | | -# Again, our results are slightly different than in [1]_, for the same reasons |
| 460 | + |
| 461 | +############################################################################### |
| 462 | +# Finally, we project these principal components on the cortical surface. |
| 463 | + |
| 464 | +from voxelwise_tutorials.viz import plot_3d_flatmap_from_mapper |
| 465 | + |
| 466 | +voxel_colors = scale_to_rgb_cube(average_coef_transformed[1:4].T, clip=3).T |
| 467 | +print("(n_channels, n_voxels) =", voxel_colors.shape) |
| 468 | + |
| 469 | +ax = plot_3d_flatmap_from_mapper(voxel_colors[0], voxel_colors[1], |
| 470 | + voxel_colors[2], mapper_file=mapper_file, |
| 471 | + vmin=0, vmax=1, vmin2=0, vmax2=1, vmin3=0, |
| 472 | + vmax3=1) |
| 473 | +plt.show() |
| 474 | + |
| 475 | +############################################################################### |
| 476 | +# Again, our results are different from the ones in [1]_, for the same reasons |
429 | 477 | # mentioned earlier. |
430 | 478 |
|
431 | 479 | ############################################################################### |
|
0 commit comments