6 Prepare (f)MRI Data
6.1 Regions of Interest
We want to run ROI-based representational similarity analyses to test how the hippocampal-entorhinal system represents the learned temporal relationships. We will focus our analysis on the anterior hippocampus (aHPC) and anterolateral entorhinal cortex (alEC).
Our previous work demonstrates representations reflecting the temporal relations of events from one sequence in the anterior hippocampus(21) and the anterior-lateral entorhinal cortex(27). More generally, these regions have been implicated in temporal coding and memory (for review, see(10)). Further, the hippocampus has been linked to inferential reasoning and generalization(46,48,49,51,53). We thus focused our analyses on these regions.
To define participant-specific ROI masks, we want to combine masks from the individual Freesurfer parcellations of the HPC and EC with masks dividing the subregions of the HPC and EC. Specifically, these are based on the Harvard-Oxford atlas distributed with FSL for the hippocampus and on the EC subregion masks from Navarro Schröder et al. (eLife, 2015).
We use the Freesurfer segmentation as run by Lorena Deuker. This was obtained via the recon-all command. All masks will be co-registered to the analysis space, which is the space of the wholebrain functional images. This relies on the FSL transformation matrices and warp files obtained from running FEAT on the wholebrain functional images.
FreeSurfer Masks
Region of interest (ROI) masks were based on participant-specific FreeSurfer segmentations (version 6.0.0-2), which yielded masks for the entire hippocampus and entorhinal cortex. These were co-registered to participants’ functional space.
Create Masks from Parcellation
First, we want to create nifti masks from the FreeSurfer parcellation. These are saved in the space of the participant’s highres structural space.
# create freesurfer subfolder for in each ROI folder
invisible(lapply(file.path(dirs$rois_fs_dirs, "freesurfer_highres_space"),
function(x) if(!dir.exists(x)) dir.create(x)))
for (sub_id in subjects){
# get the name of the ROIs(fs = from freesurfer, hs = highres space)
<- file.path(dirs$rois_fs_dirs, "freesurfer_highres_space",
out_roi_fn sprintf("P%s_%s_fs_hs.nii.gz", sub_id, rois_fs))
# load the structural image
<- here("data", "mri", "processed", "wholebrain",
highres_fn paste0("VIRTEM_P", sub_id, ".feat"), "reg", "highres.nii.gz")
<- readNIfTI2(highres_fn)
highres_nii
# load the freesurfer output
<- here("data", "mri", "freesurfer",
freesurf_fn paste0("VIRTEM_P", sub_id), "mri", "aparc+aseg-in-rawavg.nii")
<- readNIfTI2(freesurf_fn)
aparc_nii
for (i_roi in 1:length(rois_fs)){
# initialize all mask voxels as zeros
<- array(0, dim(img_data(highres_nii)))
mask_idx
# set voxels of the ROI to one
img_data(aparc_nii) %in% labels_fs[[i_roi]]] <- 1
mask_idx[
# create nifti based on the Freesurfer image
<- highres_nii
roi_nii img_data(roi_nii) <- mask_idx
# write the nifti to file
writenii(nim = roi_nii, filename = out_roi_fn[i_roi])
# find coordinates for sensible slices to plot (most frequent value in each dim)
<- arrayInd(which(as.logical(mask_idx)), dim(mask_idx))
coords <- c(min(statip::mfv(coords[,1])),
coords min(statip::mfv(coords[,2])),
min(statip::mfv(coords[,3])))
# save a diagnostic plot as a PDF
<- tools::file_path_sans_ext(out_roi_fn[i_roi], compression = "TRUE")
fname pdf(paste0(fname,".pdf"))
ortho2(robust_window(highres_nii, probs = c(0, 0.999)), y = roi_nii,
col.y = roi_colors_fs[i_roi], xyz = coords,
text = sprintf("P%s: %s", sub_id, rois_fs[i_roi]),
crosshairs = FALSE)
invisible(dev.off())
} }
To get an overview of the ROIs that we just created, we create a merged PDF of the diagnostic plots. We do this for each ROI separately. The files are saved to the specific ROI folders.
# create a PDF of diagnostic plots for each ROI
for (i_roi in 1:length(rois_fs)){
# find all the PDF files of this ROI
<- tools::file_path_sans_ext(
fnames sprintf("P%s_%s_fs_hs.nii.gz", subjects, rois_fs[i_roi]), compression = "TRUE")
<- file.path(dirs$rois_fs_dirs[i_roi], "freesurfer_highres_space",
fnames paste0(fnames, ".pdf"))
# merge PDFs together
<- here("data", "mri", "rois", rois_fs[i_roi],
merged_pdf paste0(rois_fs[i_roi], "_freesurfer_highres_space.pdf"))
= paste("pdfunite", paste(fnames, collapse = " "), merged_pdf)
cmd system(cmd)
}
Coregister FreeSurfer masks to functional space
Next we coregister the masks from FreeSurfer from the highres structural space to the functional analysis space. For this we will use FSL flirt based on the registration files obtained during preprocessing of the wholebrain functional images. Further we will mask them with our partial field of view mask based on the sequence used during the picture viewing tasks.
To make sure everything went well, we create diagnostic image for every subject, which we collect in a PDF for visual inspection.
# create samespace freesurfer subfolder for in each ROI folder
invisible(lapply(file.path(dirs$rois_fs_dirs, "freesurfer_samespace"),
function(x) if(!dir.exists(x)) dir.create(x)))
# name of transformation matrix file to move from highres to functional space
<- here("data", "mri", "processed", "wholebrain",
highres2func paste0("VIRTEM_P", subjects, ".feat"),
"reg", "highres2example_func.mat")
# use the mean EPI of wholebrain image as a reference
<- here("data", "mri", "processed", "samespace", paste0("VIRTEM_P", subjects),
mean_epi paste0("VIRTEM_P", subjects, "_wholebrain.nii.gz"))
# functional mask (picture viewing tasks not scanned with wholebrain coverage)
<- here("data", "mri", "processed", "samespace",
brain_mask_ss paste0("VIRTEM_P", subjects),
paste0("VIRTEM_P", subjects, "_common_mask.nii.gz"))
for (i_roi in 1:length(rois_fs)){
# define the file names of ROI files in highres space
<- file.path(dirs$rois_fs_dirs[i_roi], "freesurfer_highres_space",
roi_hs sprintf("P%s_%s_fs_hs.nii.gz",
subjects, rois_fs[i_roi]))
# define output files in samespace (ss) = analysis space based on the wholebrain EPI
<- file.path(dirs$rois_fs_dirs[i_roi], "freesurfer_samespace",
roi_ss sprintf("P%s_%s_fs_ss.nii.gz", subjects, rois_fs[i_roi]))
# define file names for masked ROI files
<- file.path(dirs$rois_fs_dirs[i_roi], "freesurfer_samespace",
roi_ss_masked sprintf("P%s_%s_fs_ss_masked.nii.gz",
subjects, rois_fs[i_roi]))
# apply FSL flirt to move ROI from highres to wholebrain functional space
<- mapply(flirt_apply, infile = roi_hs, reffile = mean_epi,
out initmat = highres2func, outfile = roi_ss,
verbose = FALSE, retimg = FALSE)
# mask to make sure to reduce to partial field of view
invisible(mapply(fsl_maths, file = roi_ss, opts = sprintf("-min %s", brain_mask_ss), outfile = roi_ss_masked,
verbose = FALSE, retimg = FALSE))
}
# Diagnostic plotting
for (i_roi in 1:length(rois_fs)){
# define file names for binarized masks
<- file.path(dirs$rois_fs_dirs[i_roi], "freesurfer_samespace",
roi_to_plot sprintf("P%s_%s_fs_ss_masked.nii.gz",
subjects, rois_fs[i_roi]))
<- vector(mode = "character", length(subjects))
fnames for (i_sub in 1:length(subjects)){
# load nifti of mean epi and the roi mask
<- readNIfTI2(mean_epi[i_sub])
mean_epi_nii <- readNIfTI2(roi_to_plot[i_sub])
roi_nii
# find coordinates for sensible slices to plot
<- arrayInd(which(as.logical(img_data(roi_nii))), dim(img_data(roi_nii)))
coords <- c(min(statip::mfv(coords[,1])),
coords min(statip::mfv(coords[,2])),
min(statip::mfv(coords[,3])))
# save a diagnostic plot as a PDF
<- paste0(tools::file_path_sans_ext(roi_to_plot[i_sub], compression = "TRUE"),".pdf")
fnames[i_sub] pdf(fnames[i_sub])
ortho2(robust_window(mean_epi_nii, probs = c(0, 0.999)), y = roi_nii,
col.y = roi_colors_fs[i_roi], xyz = coords,
text = sprintf("P%s: %s\nmasked", subjects[i_sub], rois_fs[i_roi]),
crosshairs = FALSE)
invisible(dev.off())
}
# merge PDFs together
<- here("data", "mri", "rois", rois_fs[i_roi],
merged_pdf paste0(rois_fs[i_roi], "_freesurfer_samespace_masked.pdf"))
#pdf_combine(fnames, output = merged_pdf) # pdftools doesn't work on remote linux?
= paste("pdfunite", paste(fnames, collapse = " "), merged_pdf)
cmd system(cmd)
}
Subregion masks (MNI)
We defined anterior hippocampus using the Harvard-Oxford atlas mask (thresholded at 50% probability), selecting all voxels anterior to MNI y=-21 based on Poppenk et al. (2013). The resulting anterior hippocampus mask was also co-registered to participants’ functional space and intersected with the participant-specific hippocampal mask from FreeSurfer. The mask for the anterior-lateral entorhinal cortex was based on Navarro Schröder et al. (2015). It was co-registered to participants’ functional space and intersected with the entorhinal cortex mask from FreeSurfer.
Above, we generated masks of the Hippocampus and Entorhinal Cortex based on the Freesurfer segmentation of each participant’s structural image. Our hypotheses concern the anterior portion of the hippocampus and the anterior-lateral entorhinal subregion, specifically. Hence, we want to split the participant-specific masks.
- For the hippocampus, we will start out with the probabilistic masks based on the Harvard-Oxford atlas delivered with FSL. Masks for the right and left hippocampus will be combined, thresholded at 50% probability and split in an anterior and posterior section. We will define the anterior hippocampus as all voxels anterior to MNI y = -21 based on Poppenk et al. (TiCS, 2013). They “propose that foci at or anterior to y = -21 mm in MNI space (y = -20 mm in Talairach space) may be regarded as falling in the aHPC, as this coordinate incorporates the uncal apex in the MNI152 template and current neuroanatomical atlases”.
- We will define the anterolateral entorhinal cortex based on Navarro Schröder et al. (eLife, 2015). Specifically, we will use a mask for the dominant mode of connectivity change within the EC, which is shown in Figure 2 of the original paper.
The code below assumes that masks for the left and right hippocampus were extracted from the Harvard-Oxford atlas (MNI space 1mm). Likewise, it assumes the EC subregion masks (MNI space 1mm, obtained from Tobias Navarro Schröder to be included in the folder. After the hippocampus masks are combined, thresholded, and split, the masks will be co-registered to the functional analysis space.
# define the name of the ROIs in MNI space
<- here("data", "mri", "rois", "mni_masks", paste0(rois_hpc,".nii.gz"))
rois_hpc_fnames
# combine atlas mask across hemispheres and split into anterior & posterior if not done
<- here("data", "mri", "rois", "mni_masks",
atlas_fnames c("harvardoxford-subcortical_prob_Left_Hippocampus.nii.gz",
"harvardoxford-subcortical_prob_Right_Hippocampus.nii.gz",
"harvardoxford-hpc_lr.nii.gz",
"harvardoxford-hpc_lr_tresh50_bin.nii.gz"))
# combine the left and right HPC mask from the Harvard-Oxford atlas
fsl_add(atlas_fnames[1], atlas_fnames[2],outfile = atlas_fnames[3],
retimg = FALSE, verbose = FALSE)
# threshold at 50 percent probability
fsl_thresh(atlas_fnames[3], outfile = atlas_fnames[4], thresh = 50, opts = "-bin",
retimg = FALSE, verbose = FALSE)
# create anterior hippocampus ROI by creating a volume with all voxels anterior
# to MNI y=-21 (y=105 in matrix coordinates) and mask this with the binary HPC ROI
= sprintf("-mul 0 -add 1 -roi 0 182 105 -1 0 182 0 1 -mas %s", atlas_fnames[4])
opts fsl_maths(file = atlas_fnames[4], outfile = rois_hpc_fnames[1], opts = opts,
retimg = FALSE, verbose = FALSE)
Here are some diagnostic plots for the ROIs that we will use (in MNI 1mm space).
# EC ROIs
<- here("data", "mri", "rois", "mni_masks", paste0(rois_ec,".nii.gz"))
rois_ec_fnames
# file names for mni ROIs
<- c(rois_hpc_fnames, rois_ec_fnames)
rois_mni_fnames
# load mni 1mm (here we use the ch2better template from MRIcron)
#mni_nii <- here("data", "mri", "rois", "mni_masks", "MNI152_T1_1mm_brain.nii.gz") #%>% readNIfTI2()
<- here("data", "mri", "rois", "mni_masks", "ch2better_mni1mm.nii.gz")
mni_nii # register to MNI 1mm space if needed
if(!file.exists(mni_nii)){
flirt(infile = here("data", "mri", "rois", "mni_masks", "ch2better.nii.gz"),
reffile = here("data", "mri", "rois", "mni_masks", "MNI152_T1_1mm_brain.nii.gz"),
outfile = mni_nii, omat = "ch2better_to_mni1mm")
}<- readNIfTI2(mni_nii)
mni_nii
for (i_roi in 1:length(rois_mni)){
# load roi mask nifti
<- readNIfTI2(rois_mni_fnames[i_roi])
roi_nii
# find coordinates for sensible slices to plot
<- arrayInd(which(as.logical(img_data(roi_nii))), dim(img_data(roi_nii)))
coords <- c(min(statip::mfv(coords[,1])),
coords min(statip::mfv(coords[,2])),
min(statip::mfv(coords[,3])))
# create a diagnostic plot and save as a PDF
<- paste0(tools::file_path_sans_ext(rois_mni_fnames[i_roi], compression = "TRUE"),".pdf")
fname ortho2(robust_window(mni_nii, probs = c(0, 0.999)), y = roi_nii,
col.y = roi_colors[i_roi], xyz = coords,
text = rois_mni[i_roi], crosshairs = FALSE)
dev.copy(pdf, fname)
invisible(dev.off())
}
The resulting ROI masks can now be coregistered from MNI 1mm space to the analysis space of the wholebrain functional sequence. Finally, they are thresholded at a probability of 0.5.
# name of transformation matrix file to move from highres to functional space
<- here("data", "mri", "processed", "wholebrain",
standard2func paste0("VIRTEM_P", subjects, ".feat"),
"reg", "standard2example_func.mat")
# use the mean EPI of wholebrain image as a reference
<- here("data", "mri", "processed", "samespace", paste0("VIRTEM_P", subjects),
mean_epi paste0("VIRTEM_P", subjects, "_wholebrain.nii.gz"))
for (i_roi in 1:length(rois_mni)){
#file name of ROI file in standard MNI space
<- rois_mni_fnames[i_roi]
roi_mni
# define output files in samespace (ss) = analysis space based on the wholebrain EPI
<- file.path(dirs$rois_mni_ss_dirs[i_roi],
roi_ss sprintf("P%s_%s_ss.nii.gz", subjects, rois_mni[i_roi]))
# apply FSL flirt to move ROI from standard to wholebrain functional space
invisible(mapply(flirt_apply, infile = roi_mni, reffile = mean_epi,
initmat = standard2func, outfile = roi_ss,
verbose = FALSE, retimg = FALSE))
# use fslmaths to binarize the masked ROIs using a threshold of 0.5
<- mapply(fsl_thresh, file = roi_ss, outfile = roi_ss, thresh = 0.5, opts = "-bin",
out verbose = FALSE, retimg = FALSE)
}
Final ROIs from intersection of masks
We defined anterior hippocampus using the Harvard-Oxford atlas mask (thresholded at 50% probability), selecting all voxels anterior to MNI y=-21 based on Poppenk et al. (2013). The resulting anterior hippocampus mask was also co-registered to participants’ functional space and intersected with the participant-specific hippocampal mask from FreeSurfer. The mask for the anterior-lateral entorhinal cortex was based on Navarro Schröder et al. (2015). It was co-registered to participants’ functional space and intersected with the entorhinal cortex mask from FreeSurfer.
In the last step, we intersect the masks from FreeSurfer with the respective subregion masks. At this point, both masks are in the functional analysis space and have been binarized. Finally, we generate a PDF of diagnostic plots to check the final ROI masks.
for (i_roi in 1:length(rois)){
# define ROI files in samespace (ss) = analysis space based on the wholebrain EPI
<- file.path(dirs$rois_ss_dirs[i_roi],
roi_ss sprintf("P%s_%s_ss.nii.gz", subjects, rois[i_roi]))
# define file names for ROI files masked with freesurfer (i.e. our output)
<- file.path(dirs$rois_ss_dirs[i_roi],
roi_ss_fs sprintf("P%s_%s_ss_fs.nii.gz", subjects, rois[i_roi]))
# use HPC or EC mask from freesurfer? if not HPC or EC use graymatter mask
if(grepl("HPC", rois[i_roi])){
<- here("data", "mri", "rois", "hpc_lr", "freesurfer_samespace",
fs_roi #sprintf("P%s_%s_fs_ss_masked_bin.nii.gz",subjects, "hpc_lr"))
sprintf("P%s_%s_fs_ss_masked.nii.gz",subjects, "hpc_lr"))
else if (grepl("EC", rois[i_roi])){
} <- here("data", "mri", "rois", "ec_lr", "freesurfer_samespace",
fs_roi #sprintf("P%s_%s_fs_ss_masked_bin.nii.gz",subjects, "ec_lr"))
sprintf("P%s_%s_fs_ss_masked.nii.gz",subjects, "ec_lr"))
else {#/data/pt_02261/virtem/data/mri/processed/samespace/VIRTEM_P035/VIRTEM_P035_common_graymatter_mask.nii.gz
}
# mask gray matter mask with brain mask to account for partial FOV. Result will be used to mask ROIs
<- here("data", "mri", "processed", "samespace", paste0("VIRTEM_P", subjects),
gm_mask_fn sprintf("VIRTEM_P%s_common_graymatter_mask.nii.gz",subjects))
<- here("data", "mri", "processed", "samespace", paste0("VIRTEM_P", subjects),
brain_mask_fn sprintf("VIRTEM_P%s_common_mask_perBlock.nii.gz",subjects))
<- here("data", "mri", "processed", "samespace", paste0("VIRTEM_P", subjects),
fs_roi sprintf("VIRTEM_P%s_common_graymatter_mask_brainmasked.nii.gz",subjects))
invisible(mapply(fsl_mask, file = gm_mask_fn, mask = brain_mask_fn, outfile = fs_roi,
verbose = FALSE, retimg = FALSE))
}#} else {stop("Don't know which Freesurfer mask to use!")}
# mask the subregion mask with the Freesufer ROI
invisible(mapply(fsl_maths, file = roi_ss, opts = sprintf("-min %s", fs_roi), outfile = roi_ss_fs,
verbose = FALSE, retimg = FALSE))
#invisible(mapply(fsl_mask, file = roi_ss, mask = fs_roi, outfile = roi_ss_fs,
# verbose = FALSE, retimg = FALSE))
# threshold the resulting mask at a probability of 0.5 and binarize it
invisible(mapply(fsl_thresh, file = roi_ss_fs, outfile = roi_ss_fs,
thresh = 0.500000000000000000000000000000000000001, opts = "-bin",
verbose = FALSE, retimg = FALSE))
}
6.2 Group-Level Masks
Lastly, we create some group-level masks for visualization and further analysis:
- probabilistic ROI image for visualization of ROIs
- a field of view mask in MNI space
- a gray matter mask in MNI space
- a small volume correction mask
We create these images in their respective folders, but also copy them to the analysis data folder so they can be shared.
# create the group mask folder
if (!dir.exists(file.path(dirs$data4analysis, "mni1mm_masks"))){
dir.create(file.path(dirs$data4analysis, "mni1mm_masks"))}
# copy MNI image there
file.copy(mni_fname(mm=1, brain = TRUE), file.path(dirs$data4analysis, "mni1mm_masks"),
overwrite=TRUE)
## [1] TRUE
Probabilistic ROIs for visualization
For later visualization we move the masks to MNI space and threshold at 0.5. We then add the images together and divide by the number of subjects to obtain an image giving us the probablity of each voxel to be included in the ROI.
Visualizations of the ROIs will be created before implementing ROI-based RSA.
# name of transformation matrix file to move from common functional to MNI 1mm space
<- here("data", "mri", "processed", "wholebrain",
func2standard paste0("VIRTEM_P", subjects, ".feat"),
"reg", "example_func2standard.mat")
# use MNI 1mm image as a reference
<- mni_fname(mm=1)
mni_1mm
for (i_roi in 1:length(rois)){
# where to put the output
<- here("data", "mri", "rois", rois[i_roi], "mni_1mm")
out_dir if (!dir.exists(out_dir)){dir.create(out_dir)}
# define input files in samespace (ss) = analysis space based on the wholebrain EPI
<- here("data", "mri", "rois", rois[i_roi], "samespace",
roi_ss sprintf("P%s_%s_ss.nii.gz", subjects, rois[i_roi]))
#file name of output ROI file in standard MNI space
<- file.path(out_dir, sprintf("P%s_%s_mni1mm.nii.gz", subjects, rois[i_roi]))
roi_mni
# apply FSL flirt to move ROI from wholebrain functional space to MNI space
invisible(mapply(flirt_apply, infile = roi_ss, reffile = mni_1mm,
initmat = func2standard, outfile = roi_mni,
verbose = FALSE, retimg = FALSE))
# use fslmaths to binarize the ROI using a threshold of 0.5
<- mapply(fsl_thresh, file = roi_mni, outfile = roi_mni, thresh = 0.5, opts = "-bin",
out verbose = FALSE, retimg = FALSE)
# create summary image
<- here("data", "mri", "rois", rois[i_roi],
out_fn sprintf("%s_group_prob_mni1mm.nii.gz",rois[i_roi]))
fslmaths(file = roi_mni, outfile = out_fn,
opts = sprintf("-add %s", paste0(roi_mni[2:length(roi_mni)])),
verbose=FALSE)
fslmaths(file = out_fn, outfile = out_fn,
opts = sprintf("-div %d", length(roi_mni)),
verbose=FALSE)
# copy to data sharing folder
file.copy(from = out_fn, to = file.path(dirs$data4analysis, "mni1mm_masks"),
overwrite = TRUE)
}
Field of view mask for visualization
Next, we create a mask of our field of view, i.e. voxels covered in our functional images. We do so by registering the subject-specific brain masks from FEAT (in whole-brain functional space, already combined across blocks) to MNI space. After thresholding, the result is a binary mask to illustrate the FOV when plotting brain images in the main analysis script.
# folder for output
if(!dir.exists(file.path(dirs$mask_dir, "fov"))){
dir.create(file.path(dirs$mask_dir, "fov"), recursive=TRUE)}
# name of transformation matrix file to move from the shared functional space to MNI 1mm
<- here("data", "mri", "processed", "wholebrain",
func2standard paste0("VIRTEM_P", subjects, ".feat"),
"reg", "example_func2standard.mat")
# subject-specific brain (FOV) masks in whole-brain functional space
<- file.path(dirs$samespace_dir, sprintf("VIRTEM_P%s",subjects),
subj_masks sprintf("VIRTEM_P%s_common_mask_perBlock.nii.gz",subjects))
# output file name after moving to MNI space
<- file.path(file.path(dirs$mask_dir, "fov"),
subj_masks_mni sprintf("%s_fov_mask_mni.nii.gz", subjects))
# apply FSL flirt to move from wholebrain functional space to 1mm MNI space
invisible(mapply(flirt_apply,
infile = subj_masks,
reffile = mni_fname("1"),
initmat = func2standard,
outfile = subj_masks_mni,
verbose = FALSE, retimg = FALSE))
# merge the files into a 4D file
<- file.path(dirs$mask_dir, "fov", "4d_fov_mask_mni.nii.gz")
mask_4d fsl_merge(infiles = subj_masks_mni, direction = "t", outfile = mask_4d, retimg = FALSE, verbose = FALSE)
# create binary mask
<- file.path(dirs$mask_dir, "fov", "fov_mask_mni.nii.gz")
fov_mask <- fslmaths(mask_4d, outfile = fov_mask,
fov_mask_nii opts = "-thr 0.3 -bin -Tmean -thr 0.3 -bin",
verbose = FALSE, retimg = TRUE)
# quick plot
ortho2(fov_mask_nii, xyz=c(100,100,50))
# copy to data sharing folder
file.copy(from = fov_mask, to = file.path(dirs$data4analysis, "mni1mm_masks"))
Gray matter mask
From the subject-specific gray matter masks (in whole-brain functional space) we create a merged, binary mask in MNI space. We use this to run FSL Randomise for all gray matter voxels (we only use gray matter voxels as features for our searchlight analysis).
Gray matter segmentation was done on the structural images and the results were mapped back to the space of the whole-brain functional scan for later use in the analysis.
# folder for output
if(!dir.exists(file.path(dirs$mask_dir, "gray_matter"))){
dir.create(file.path(dirs$mask_dir, "gray_matter"), recursive=TRUE)}
# name of transformation matrix file to move from the shared functional space to MNI 1mm
<- here("data", "mri", "processed", "wholebrain",
func2standard paste0("VIRTEM_P", subjects, ".feat"),
"reg", "example_func2standard.mat")
# subject-specific gray matter masks in whole-brain functional space
<- file.path(dirs$samespace_dir, sprintf("VIRTEM_P%s",subjects),
subj_masks sprintf("VIRTEM_P%s_common_graymatter_mask_brainmasked.nii.gz", subjects))
# output file name in MNI space
<- file.path(dirs$mask_dir, "gray_matter",
subj_masks_mni sprintf("%s_graymatter_mni.nii.gz", subjects))
# apply FSL flirt to move from wholebrain functional space to 1mm MNI space
invisible(mapply(flirt_apply,
infile = subj_masks,
reffile = mni_fname("1"),
initmat = func2standard,
outfile = subj_masks_mni,
verbose = FALSE, retimg = FALSE))
# merge the files into a 4D file
<- file.path(dirs$mask_dir, "gray_matter", "4d_graymatter_mni.nii.gz")
gm_4d fsl_merge(infiles = subj_masks_mni, direction = "t", outfile = gm_4d, retimg = FALSE, verbose = FALSE)
# create binary gray matter mask by thresholding and combining across subjects liberally
<- mni_fname(mm = "1", brain=TRUE, mask=TRUE)
mni_brain_mask <- file.path(dirs$mask_dir, "gray_matter", "gray_matter_mask.nii.gz")
gm_mask <- fslmaths(gm_4d, outfile = gm_mask,
gm_mask_nii opts = sprintf("-thr 0.3 -bin -Tmean -thr 0.3 -bin -mas %s",
mni_brain_mask),verbose = FALSE, retimg = TRUE)
Small Volume Correction Mask
We corrected for multiple comparisons using a small volume correction mask including our a priori regions of interest, the anterior hippocampus and the anterior-lateral entorhinal cortex.
The small volume correction mask consists of our a priori ROIs, the anterior hippocampus and the anterior-lateral entorhinal cortex. We use the subject-specific masks, moved back to MNI space for this.
# folder for output
if(!dir.exists(file.path(dirs$mask_dir, "svc"))){
dir.create(file.path(dirs$mask_dir, "svc"), recursive=TRUE)}
# create the SVC mask by merging aHPC and alEC ROIs
<- here("data", "mri", "rois", "aHPC_lr",
aHPC_fn sprintf("%s_group_prob_mni1mm.nii.gz", "aHPC_lr"))
<- here("data", "mri", "rois", "alEC_lr",
alEC_fn sprintf("%s_group_prob_mni1mm.nii.gz", "alEC_lr"))
<- file.path(dirs$mask_dir, "svc", "svc_mask.nii.gz")
svc_mask_fn fsl_add(file = aHPC_fn, file2 = alEC_fn, outfile = svc_mask_fn,
retimg = FALSE)
<- fslmaths(file = svc_mask_fn, outfile = svc_mask_fn,
svc_nii opts = sprintf("-thr 0.99 -bin -mas %s", gm_mask),
retimg = TRUE)
# let's have a look at the mask we created
<- readNIfTI2(mni_fname("1"))
mni_nii ortho2(mni_nii, y = svc_nii, xyz = c(63, 113, 50))
ortho2(mni_nii, y = svc_nii, xyz = c(71, 127, 39))
6.3 Quantify Representational Change
Get multi-voxel patterns from ROIs
We want to run RSA on the multi-voxel patterns corresponding to the object presentations during the picture viewing tasks. For this, we use the data as it was preprocessed by Lorena Deuker. We will do some further cleaning and extract the relevant volumes from the time series to eventually run RSA.
Preprocessing
Preprocessing was performed using FSL FEAT (version 6.00). Functional scans from the picture viewing tasks and the whole-brain functional scan were submitted to motion correction and high-pass filtering using FSL FEAT. For the two picture viewing tasks, data from each mini-block was preprocessed independently. For those participants with a field map scan, distortion correction was applied to the functional data sets. No spatial smoothing was performed. Functional images from the two picture viewing tasks were then registered to the preprocessed mean image of the whole-brain functional scan. The whole-brain functional images were registered to the individual structural scans. The structural scans were in turn normalized to the MNI template (1-mm resolution). Gray matter segmentation was done on the structural images, and the results were mapped back to the space of the whole-brain functional scan for later use in the analysis.
Extract ROI time series and calculate residuals from motion parameter regression
In this first section of the script we will create a dataframe that includes the relevant file names for the files that are needed to extract the clean time series. These files include the motion parameters from FSL FEAT, which was run on the functional data from the picture viewing task. As described above, these data were split into 10 blocks. We will use the preprocessed FEAT output images which were already co-registered to the analysis space (‘samespace’). Further, we will use a mask to only include voxels with data in all blocks. This mask has already been created and is available in the analysis space. Additionally, we will use the graymatter mask and ROI masks(both already co-registered to the analysis space).
Representational similarity analysis (RSA) (Kriegeskorte et al., 2008) was first implemented separately for the pre- and post-learning picture viewing task. It was carried out in ROIs co-registered to the whole-brain functional image and in searchlight analyses (see below). For the ROI analyses, preprocessed data were intersected with the participant-specific anterior hippocampus and anterolateral entorhinal cortex ROI masks as well as a brain mask obtained during preprocessing (only voxels within the brain mask in all mini-blocks were analyzed) and the gray matter mask. For each voxel within the ROI mask, motion parameters from FSL MCFLIRT were used as predictors in a general linear model (GLM) with the voxel time series as the dependent variable. The residuals of this GLM (i.e. data that could not be explained by motion) were taken to the next analysis step.
Build data frame with files and folders:
#PREPARE DATA FRAME FOR CLEANING
# create a tibble with filenames etc for data cleaning, start with subject IDs
<- tibble(subject = rep(as.numeric(subjects), each = n_runs * n_blocks))
func_dat_df # add run info (run 1 and 2 equal the pre and post PVT)
<- add_column(func_dat_df, run = rep(rep(1:n_runs, each = n_blocks), n_subs))
func_dat_df # add block information (blocks 1 to 10 are the PVT blocks preprocessed separately)
<- add_column(func_dat_df, block = rep(1:n_blocks, n_subs*n_runs))
func_dat_df # add the motion parameter file
<- add_column(func_dat_df, mc_par_fn =
func_dat_df file.path(dirs$feat_dir,
paste0("VIRTEM_P0", func_dat_df$subject),
sprintf("RSA_%02d_Block%02d.feat",
$run, func_dat_df$block),
func_dat_df"mc", "prefiltered_func_data_mcf.par"))
# add the functional data file
<- add_column(func_dat_df, filtered_func =
func_dat_df file.path(dirs$samespace_dir,
paste0("VIRTEM_P0", func_dat_df$subject),
sprintf("VIRTEM_P%03d_RSA_%02d_Block%02d_func.nii.gz",
$subject, func_dat_df$run, func_dat_df$block)))
func_dat_df# add the brain mask data file
<- add_column(func_dat_df, brain_mask =
func_dat_df file.path(dirs$samespace_dir,
paste0("VIRTEM_P0", func_dat_df$subject),
sprintf("VIRTEM_P%03d_common_mask_perBlock.nii.gz",
$subject)))
func_dat_df
# add the graymatter mask file
<- add_column(func_dat_df, gray_mask =
func_dat_df file.path(dirs$samespace_dir,
paste0("VIRTEM_P0", func_dat_df$subject),
sprintf("VIRTEM_P%03d_common_graymatter_mask.nii.gz",
$subject)))
func_dat_df
# add output directory
<- add_column(func_dat_df, out_dir = list(dirs$rsa_roi_clean_timeseries_dirs))
func_dat_df
# add list of ROIs
<- add_column(func_dat_df, rois = list(rois))
func_dat_df
# add list of ROI file names
<- add_column(func_dat_df, roi_dirs = list(dirs$rois_ss_dirs)) func_dat_df
Define the function used to clean functional data
In this next section, we will first define a function and then run it on all datasets. In this function, the preprocessed data will be loaded and transformed to matrix format. A combined mask is generated from the respective ROI mask and the graymatter and brain masks. For every voxel within this mask, movement correction parameters are used as predictors in a GLM with the voxel time series as dependent variable. Finally, the residuals from this GLM (i.e. what could not be explained by motion) will be written to a text file.
# DEFINE THE FUNCTION TO GET THE RESIDUAL TIME SERIES
<- function(df = func_dat_df[1,]){
run_motion_glm
# load the motion params and convert to tibble
<- read.table(df$mc_par_fn, header = FALSE)
mc_pars <- as_tibble(mc_pars)
mp_df colnames(mp_df) <- paste0("mp", 1:6)
# load the brain mask and linearize it
<- readNIfTI2(df$brain_mask)
brain_mask_nii <- c(brain_mask_nii)
brain_mask_lin ::assert_that(all.equal(unique(brain_mask_lin), c(0,1)))
assertthat
# load the graymatter mask and linearize it and threshold it at 0.7
<- readNIfTI2(df$gray_mask)
gray_mask_nii <- c(gray_mask_nii) > 0.7
gray_mask_lin
# load the functional data
<- readNIfTI2(df$filtered_func)
func_nii
# create matrix from the functional data with voxels in rows and volumes in columns
<- dim(func_nii)[4]
n_vols <- matrix(nrow = prod(dim(func_nii)[1:3]), ncol = n_vols)
func_dat_mat for(i_vol in 1:n_vols){
<- c(func_nii[,,,i_vol])
func_dat_mat[,i_vol]
}
# define the roi files
<- file.path(df$roi_dirs[[1]],
roi_files sprintf("P%03d_%s_ss_fs.nii.gz", df$subject, df$rois[[1]]))
# loop over the ROIs
for(i_roi in 1:length(roi_files)){
# load the current ROI and linearize
<- readNIfTI2(roi_files[i_roi])
roi_nii <- c(roi_nii)
roi_lin
# make sure masks and functional data have the same number of voxels
::assert_that(length(roi_lin) == length(gray_mask_lin))
assertthat::assert_that(length(roi_lin) == length(brain_mask_lin))
assertthat::assert_that(length(roi_lin) == dim(func_dat_mat)[1])
assertthat::assert_that(n_vols == nrow(mc_pars))
assertthat
# create a mask combining the ROI, the graymatter, and the functional brain mask
<- as.logical(roi_lin) #& gray_mask_lin & as.logical(brain_mask_lin)
comb_mask
# run the GLM for each voxel in the combined mask
<- func_dat_mat[comb_mask,]
roi_dat
# initialize output for cleaned timeseries and then loop over all voxels
<- matrix(nrow = sum(comb_mask), ncol = n_vols)
roi_dat_clean for(i_vox in 1:sum(comb_mask)){
# extract this voxel's data and merge with motion params
<- roi_dat[i_vox,]
vox_dat <- tibble(vox_dat)
vox_df <- cbind(vox_df, mp_df)
vox_df
# run the glm and store the residuals
<- resid(glm('vox_dat~mp1+mp2+mp3+mp4+mp5+mp6', data = vox_df))
roi_dat_clean[i_vox,]
}
# write clean timeseries data of this ROI to file
<- sprintf("%03d_run%02d_block%02d_%s_clean_timeseries.txt",
fn $subject, df$run, df$block, df$rois[[1]][i_roi])
df<- file.path(df$out_dir[[1]][i_roi], sprintf("%03d",df$subject))
out_dir_sub if(!dir.exists(out_dir_sub)){dir.create(out_dir_sub)}
write.table(roi_dat_clean, file.path(out_dir_sub, fn), append = FALSE, sep = ",", dec = ".",
row.names = FALSE, col.names = FALSE)
} }
Apply cleaning function
Now we are ready to apply the cleaning function to all blocks of all participants. Typically, this should be run in parallel to save time.
# next step depends on whether we are in parallel or serial mode
if (!run_parallel){ # run serially
# run the function for each row of the data frame,
# i.e. for each block in each run for each subject
for(i in 1:nrow(func_dat_df)){
tic(sprintf("Subject %s, run %d, block %d",
$subject[i], func_dat_df$run[i], func_dat_df$block[i]))
func_dat_dfrun_motion_glm(df = func_dat_df[i,])
toc()
}
else if (run_parallel){ # run in parallel, assumes CBS HTCondor is available
}
# expand the data frame and write to file
<- unnest(func_dat_df, cols = c(rois, out_dir, roi_dirs))
func_dat_df_long <- file.path(here("data","mri", "rsa", "clean_roi_timeseries"),
fn "htc_config_clean_time_series.txt")
<- cat(sprintf('"fn <- "%s"',fn))
fn_def write.table(func_dat_df_long, fn,
append = FALSE, sep = ",", dec = ".", row.names = FALSE, col.names = TRUE)
# store the function definition as text
<- capture.output(run_motion_glm)
func_def 1] <- paste0("run_motion_glm <- ",func_def[1])
func_def[#func_def <- func_def[-length(func_def)]
#write the Rscript that we want to run
<- here("data","mri", "rsa", "clean_roi_timeseries", "run_clean_timeseries.R")
rscript_fn <- file(rscript_fn)
con open(con, "w")
writeLines(c(
"\n# handle input",
"args = commandArgs()",
"i <- as.numeric(args[length(args)])",
"\n#load required packages",
noquote(sprintf('lib_dir <- "%s"',"/data/pt_02261/virtem/virtem_code/R3.6.1/library/Linux")),
'.libPaths(c(lib_dir,.libPaths()))',
'lapply(c("oro.nifti", "assertthat", "dplyr", "neurobase"), library, character.only = TRUE)',
"\n# read the data and transform ROI column back to list",
noquote(sprintf('fn <- "%s"',fn)),
'func_dat_df <- read.table(fn, sep = ",", header = TRUE, stringsAsFactors = FALSE)',
"\n#define the function to run motion GLM",
func_def,"\n# run the function on one line of the data frame",
"run_motion_glm(df = func_dat_df[i,])"),con)
close(con)
# folder for condor output
<- here("htc_logs", "clean_timeseries")
htc_dir if(!exists(htc_dir)){dir.create(htc_dir, recursive = TRUE)}
# write the submit script
<- here("data","mri", "rsa", "clean_roi_timeseries", "run_clean_timeseries.submit")
fn <- file(fn)
con open(con, "w")
writeLines(c(
"universe = vanilla",
"executable = /afs/cbs.mpg.de/software/scripts/envwrap",
"request_memory = 9000",
"notification = error"
),con)
for (i in 1:nrow(func_dat_df_long)){
writeLines(c(
sprintf("\narguments = R+ --version 3.6.1 Rscript %s %d", rscript_fn, i),
sprintf("log = %s/%d.log", htc_dir, i),
sprintf("output = %s/%d.out", htc_dir, i),
sprintf("error = %s/%d.err", htc_dir, i),
sprintf("Queue\n")),con)
}close(con)
# submit to condor
#system("reset-memory-max")
<- system(paste("condor_submit", fn), intern = TRUE)
batch_id <- regmatches(batch_id[2], gregexpr("[[:digit:]]+", batch_id[2]))[[1]][2]
batch_id #system("reset-memory-max")
sprintf("submitted jobs (ID = %s) to clean time series in ROIs. Time to wait...", batch_id)
pause_until_batch_done(batch_id = batch_id, wait_interval = 300)
}
Extract relevant volumes
As the presentation of images in the PVT pre and post blocks was locked to the onset of a new volume (see above), the second volume after image onset was selected for every trial (effectively covering the time between 2270-4540 ms after stimulus onset).
In this step, we split the presentation logfile from the picture viewing task into the 10 blocks. We reference the volume count to the first volume of each block and extract the relevant volumes from the cleaned ROI timeseries data, accounting for the temporal offset. This is done for each ROI and each block separately in both the pre and post runs. The data are written to file. These output files include the multi-voxel patterns (rows) for each image (columns) that was presented during the picture viewing tasks, including the catch images. The columns are sorted based on the presentation order during the picture viewing task.
As the presentation of images in the picture viewing tasks was locked to the onset of a new volume (see above), the second volume after image onset was selected for every trial, effectively covering the time between 2270 and 4540 ms after stimulus onset.
= 2
offset_tr
for (i_sub in subjects){
for (i_run in 1:n_runs){
# load the logfile for this run (pre or post)
<- file.path(dirs$pvt_log_dir, sprintf('P%s_%svirtem.txt', i_sub, runs[i_run]))
log_fn <- read.table(log_fn)
log colnames(log) <- c("pic", "fix_start", "pic_start", "volume", "response", "RT", "trial_end")
# split the log file into the 10 blocks
<- split(log, rep(1:10, each = 21))
log_split
# reference volume numbers to the first volume of that block
<- lapply(log_split, function(x){x$volume - x$volume[1]})
vol_block <- mapply(cbind, log_split, "vol_block"=vol_block, SIMPLIFY=FALSE)
log_split
for (i_roi in rois){
for (i_block in 1:10){
# load the ROI timeseries
<- sprintf("%s_run%02d_block%02d_%s_clean_timeseries.txt",
fn
i_sub, i_run, i_block, i_roi)<- file.path(dirs$rsa_dir, "clean_roi_timeseries", i_roi, i_sub)
dir <- read.table(file.path(dir, fn), sep = ",", dec = ".")
roi_dat
# index of relevant volumes when accounting for offset
<- log_split[[i_block]]$vol_block + offset_tr
rel_vols
# extract the relevant volumes from the ROI timeseries data
if(i_block == 1){rel_dat <- roi_dat[,rel_vols]} else{
<- cbind(rel_dat, roi_dat[,rel_vols])}
rel_dat
}
# save the relevant data
<- file.path(dirs$rsa_dir, "relevant_roi_volumes", i_roi)
out_dir if(!dir.exists(out_dir)){dir.create(out_dir, recursive = TRUE)}
<- sprintf("%s_%s_%s_relevant_volumes.txt", i_sub, i_roi, runs[i_run])
fn write.table(rel_dat, file.path(out_dir, fn), append = FALSE, sep = ",",
dec = ".", row.names = FALSE, col.names = FALSE)
}
} }
Calculate correlation matrices
Only data for the 20 event images that were shown in the learning task were analyzed; data for the target stimulus were discarded. The similarity between the multi-voxel activity pattern for every event image in every mini-block with the pattern of every other event in every other mini-block was quantified using Pearson correlation coefficients. Thus, comparisons of scenes from the same mini-block were excluded. Next, we calculated mean, Fisher z-transformed correlation coefficients for every pair of events, yielding separate matrices of pattern similarity estimates for the pre- and the post-learning picture viewing tasks (Figure 3).
Next, we need to calculate pair-wise correlations between the multi-voxel patterns from the picture viewing tasks. We will restrict the analysis to the 20 scenes during the learning task and discard the data for the target scene. The correlation matrix will first be calculated for trial-by-trial comparisons of multi-voxel patterns. These will then be averaged excluding comparisons of patterns from the same block. The resulting correlation matrices that are saved are ordered according to picture identity (i.e. picture 1-20 x picture 1-20).
# for all 10x10 comparisons we will be averaging all comparisons apart from the diagonal
# to exclude same_block comparisons
<- matrix(data = TRUE, nrow=10, ncol = 10)
no_diag diag(no_diag)<- FALSE
for (i_sub in subjects){
for (i_run in 1:n_runs){
for(i_roi in 1:length(rois)){
# load the logfile for this run (pre or post) (Note to self: moved here 6 March when hunting ghosts)
<- file.path(dirs$pvt_log_dir, sprintf('P%s_%svirtem.txt', i_sub, runs[i_run]))
log_fn <- read.table(log_fn)
log colnames(log) <- c("pic", "fix_start", "pic_start", "volume", "response", "RT", "trial_end")
# load the relevant MRI volumes (i.e. multi-voxel patterns)
<- file.path(dirs$rsa_roi_rel_vol_dirs[i_roi],
fn sprintf("%s_%s_%s_relevant_volumes.txt", i_sub, rois[i_roi], runs[i_run]))
<- read.table(fn, sep = ",", dec = ".")
rel_dat
# remove patterns & log entries corresponding to the target picture (catch trials)
<- rel_dat[, log$pic != 21]
rel_dat <- log[log$pic != 21,]
log
# order the data according to picture identity
<- rel_dat[,order(log$pic)]
rel_dat colnames(rel_dat) <- log$pic[order(log$pic)]
# write to file for diagnostic purposes
<- file.path(dirs$rsa_roi_rel_vol_dirs[i_roi],
fn sprintf("%s_%s_%s_relevant_volumes_ordered.txt", i_sub, rois[i_roi], runs[i_run]))
write.table(rel_dat, fn, append = FALSE, sep = ",",
dec = ".", row.names = FALSE, col.names = FALSE)
# calculate the correlation matrix (trial by trial correlations at this point)
<- cor(rel_dat)
corr_mat_trial
# save the correlation matrix
<- file.path(dirs$rsa_roi_corr_mat_dirs[i_roi],
fn sprintf("%s_%s_%s_corr_mat_trial.txt", i_sub, rois[i_roi], runs[i_run]))
write.table(corr_mat_trial, fn, append = FALSE, sep = ",",
dec = ".", row.names = FALSE, col.names = FALSE)
# initialize correlation matrix condition by condition
<- matrix(nrow = 20, ncol = 20, dimnames = c(list(1:20), list(1:20)))
corr_mat
# loop over all picture comparisons
for(i_pic1 in 1:20){
for(i_pic2 in 1:20){
# extract the current 10x10 correlation matrix
<- (1+(i_pic1-1)*10):(i_pic1*10)
i1 <- (1+(i_pic2-1)*10):(i_pic2*10)
i2 <- corr_mat_trial[i1, i2]
curr_mat
# average the corrlations while excluding diagonal (same block comparisons)
<- mean(curr_mat[no_diag])
corr_mat[i_pic1, i_pic2]
}
}# diagnostic plot
#corrplot(corr_mat, method = "color", is.corr=FALSE,
# cl.lim = c(min(corr_mat),max(corr_mat)), addgrid.col = NA)
# save the correlation matrix
<- file.path(dirs$rsa_roi_corr_mat_dirs[i_roi],
fn sprintf("%s_%s_%s_corr_mat.txt", i_sub, rois[i_roi], runs[i_run]))
write.table(corr_mat, fn, append = FALSE, sep = ",",
dec = ".", row.names = FALSE, col.names = FALSE)
}
} }
Calculate Pattern Similarity Change
In the next step, we calculate how the correlation between patterns change from the first to the second picture viewing task, i.e. through the day learning task. To do this, we load both correlation matrices and reduce them to the upper triangle, excluding the diagonal. Then, the correlations are Fisher Z-transformed and the similarity values from the pre-learning picture viewing task are subtracted from those of the post-learning picture viewing task to isolate pattern similarity change. The correlations and their changes are then saved together with information about the pictures that were compared.
In order to assess changes in representational similarity due to the day learning task, which took place in between the two picture viewing tasks, we quantified pattern similarity changes as the difference of the respective correlation coefficients for every pair of events between the post-learning picture viewing task and its pre-learning baseline equivalent (Figure 3).
for(i_sub in subjects){
for(i_roi in 1:length(rois)){
# load pre correlation matrix
<- file.path(dirs$rsa_roi_corr_mat_dirs[i_roi],
fn sprintf("%s_%s_pre_corr_mat.txt", i_sub, rois[i_roi]))
<- read.table(fn, sep = ",", dec = ".")
corr_mat_pre
# load post correlation matrix
<- file.path(dirs$rsa_roi_corr_mat_dirs[i_roi],
fn sprintf("%s_%s_post_corr_mat.txt", i_sub, rois[i_roi]))
<- read.table(fn, sep = ",", dec = ".")
corr_mat_post
# reduce to upper triangle of correlations (without diagonal)
<- corr_mat_pre[upper.tri(corr_mat_pre, diag = FALSE)]
pre_corrs <- corr_mat_post[upper.tri(corr_mat_post, diag = FALSE)]
post_corrs
# create a tibble with the correlations
<- which(upper.tri(corr_mat_post), arr.ind=TRUE)
pics <- tibble(pic1 = pics[,1], pic2 = pics[,2], pre_corrs, post_corrs)
corr_df
# Fisher Z transform correlations and calculate pattern similarity change
# by subtracting the pre-correlations from the post-correlations
$ps_change <- FisherZ(corr_df$post_corrs) - FisherZ(corr_df$pre_corrs)
corr_df
# write to file
<- file.path(dirs$rsa_roi_ps_change_dirs[i_roi],
fn sprintf("%s_%s_pattern_similarity_change.txt", i_sub, rois[i_roi]))
write.table(corr_df, fn, append = FALSE, sep = ",",
dec = ".", row.names = FALSE, col.names = TRUE)
} }
6.4 Combine behavioral and fMRI data for RSA
To create input for the RSA the next step is to combine the similarity change data with the behavioral data, so that we can do meaningful analyses. First the behavioral data is loaded and brought into the same pair-wise format as the similarity change data. Both datasets are combined for each subject and then written to disk.
for (i_sub in subjects){
# load the behavioral data
<- file.path(dirs$timeline_dat_dir, sprintf("%s_behavior_tbl_timeline.txt", i_sub))
fn <- cols_only(sub_id = col_character(), day = col_factor(),
col_types_list event = col_integer(), pic = col_integer(),
virtual_time = col_double(), real_time = col_double(),
memory_time = col_double(), memory_order = col_double(),
sorted_day = col_integer())
<- read_csv(fn, col_types = col_types_list)
beh_dat
# sort behavioral data according to picture identity
<- beh_dat[order(beh_dat$pic),]
beh_dat_ordered
# find the order of comparisons
<- which(upper.tri(matrix(nrow = 20, ncol = 20)), arr.ind=TRUE)
pairs
# extract the data for the first and second picture in each pair from the behavioral data
<- beh_dat_ordered[pairs[,1],]
pic1_dat <- beh_dat_ordered[pairs[,2],]
pic2_dat colnames(pic1_dat) <- paste0(colnames(pic1_dat), "1")
colnames(pic2_dat) <- paste0(colnames(pic2_dat), "2")
# combine the two tibbles
<- cbind(pic1_dat, pic2_dat)
pair_dat
# reorder the tibble columns to make a bit more sense
<- pair_dat %>%
pair_dat select(sub_id1,
day1, day2,
pic1, pic2,
event1, event2,
virtual_time1, virtual_time2,
real_time1, real_time2,
memory_order1, memory_order2,
memory_time1, memory_time2, %>%
sorted_day1, sorted_day2) rename(sub_id = sub_id1)
<- tibble()
rsa_dat for (i_roi in 1:length(rois)){
# load the pattern similarity change data for this ROI
<- file.path(dirs$rsa_roi_ps_change_dirs[i_roi],
fn sprintf("%s_%s_pattern_similarity_change.txt", i_sub, rois[i_roi]))
<- read.csv(fn)
ps_change_dat
# make sure files have the same order
::are_equal(c(pair_dat$pic1, pair_dat$pic2), c(ps_change_dat$pic1, ps_change_dat$pic2))
assertthat
# add column with ROI name
<- add_column(ps_change_dat, roi = rois[i_roi])
ps_change_dat
# collect the data from this ROI and merge into long data frame
<- cbind(pair_dat, ps_change_dat[,3:6])
roi_dat <- rbind(rsa_dat, roi_dat)
rsa_dat
}
# write to file
<- file.path(dirs$rsa_dat_dir, sprintf("%s_data_for_rsa.txt",i_sub))
fn write.table(rsa_dat, fn, append = FALSE, sep = ",",
dec = ".", row.names = FALSE, col.names = TRUE)
}
Finally, the datasets are combined across subjects.
# set up a dataframe to collect the data
= tibble()
rsa_dat
for (i_sub in subjects){
# load data from CSV
<- file.path(dirs$rsa_dat_dir, sprintf("%s_data_for_rsa.txt",i_sub))
fn <- cols_only(
col_types_list sub_id = col_character(),
day1 = col_integer(), day2 = col_integer(),
event1 = col_integer(), event2 = col_integer(),
pic1 = col_integer(), pic2 = col_integer(),
virtual_time1 = col_double(), virtual_time2 = col_double(),
real_time1 = col_double(), real_time2 = col_double(),
memory_time1 = col_double(), memory_time2 = col_double(),
memory_order1 = col_double(), memory_order2 = col_double(),
sorted_day1 = col_integer(), sorted_day2 = col_integer(),
pre_corrs = col_double(), post_corrs = col_double(),
ps_change = col_double(), roi = col_factor()
)<- as_tibble(read_csv(fn, col_types = col_types_list))
sub_dat
# append to table with data from all subjects
<- bind_rows(sub_dat, rsa_dat)
rsa_dat
}
# sort the data
<- rsa_dat[with(rsa_dat, order(sub_id, day1, day2, event1, event2)),]
rsa_dat
# write to file
<- file.path(dirs$data4analysis, "rsa_data_rois.txt")
fn write.table(rsa_dat, fn, append = FALSE, sep = ",",
dec = ".", row.names = FALSE, col.names = TRUE)