Skip to content

Instantly share code, notes, and snippets.

@GaelVaroquaux
Created March 10, 2011 16:23
Show Gist options
  • Save GaelVaroquaux/864395 to your computer and use it in GitHub Desktop.
Save GaelVaroquaux/864395 to your computer and use it in GitHub Desktop.
Preprocess some resting state fMRI data with NiPype
"""A helper node to concatenate images in nipype"""
import os
from nipype.interfaces.base import TraitedSpec, File, CommandLineInputSpec, CommandLine
from nipype.utils.misc import isdefined
class ConcatImgInputSpec(CommandLineInputSpec):
in_file1 = File(exists=True, argstr="-append %s",
position=1, mandatory=True)
in_file2 = File(argstr="%s", position=2, mandatory=True)
out_file = File(argstr="%s", position=3, genfile=True)
class ConcatImgOutputSpec(TraitedSpec):
merged_file = File(exists=True)
class ConcatImg(CommandLine):
input_spec = ConcatImgInputSpec
output_spec = ConcatImgOutputSpec
_cmd = "convert"
def _gen_filename(self, name):
if name == 'out_file':
in_file1 = self.inputs.in_file1
return '%s_merged%s' % os.path.splitext(in_file1)
else:
raise NotImplementedError
def _list_outputs(self):
outputs = self._outputs().get()
outputs['merged_file'] = self.inputs.out_file
if not isdefined(outputs['merged_file']):
outputs['merged_file'] = self._gen_filename('out_file')
return outputs
"""
Preprocess the data with SPM
for parallel computer, execute, before::
ipcluster local -n 10 --logdir ~/pipeline.ipython
"""
import os
import glob
from nipype.interfaces import io
from nipype.interfaces import spm
from nipype.interfaces import matlab # how to run matlab
from nipype.interfaces import fsl # fsl
import nipype.interfaces.utility as util # utility
import nipype.algorithms.rapidart as ra # artifact detection
from nipype.pipeline import engine as pe
import irmas
from concat_img import ConcatImg
from slicer import Slicer, Resampler
################################################################################
# Constants
IN_DIR = \
"/neurospin/unicog/protocols/IRMf/Baronnet_connectivity_2011"
WORKING_DIR = os.path.expanduser("~/tmp/baronnet/")
WORKING_DIR = "/volatile/varoquau/tmp/baronnet/"
OUT_DIR = "/neurospin/unicog/protocols/IRMf/Baronnet_connectivity_2011/preproc/spm8/"
################################################################################
# Setup the FSL environment to work with neurodebian
fsl.FSLCommand.set_default_output_type('NIFTI')
os.environ['FSLDIR'] = '/usr/share/fsl/4.1'
os.environ['PATH'] = '%s:%s/bin' % (os.environ['PATH'],
os.environ['FSLDIR'])
os.environ['LD_LIBRARY_PATH'] = '%s:/usr/lib/fsl/4.1/' % (
os.environ.get('LD_LIBRARY_PATH', ''))
TEMPLATE_T1 = os.path.join(os.environ['FSLDIR'],
'data', 'standard', 'MNI152_T1_2mm.nii.gz')
TEMPLATE_EPI = "/i2bm/local/spm8/templates/EPI.nii"
# Set the way matlab should be called
matlab.MatlabCommand.set_default_matlab_cmd("/volatile/varoquau/usr/bin/matlab")
# HACK: the above pipeline doesn't work with the neurospin setup, and we have
# to hit directly the original mathworks shell script
matlab.MatlabCommand.set_default_matlab_cmd("/neurospin/local/matlab/bin/matlab")
matlab.MatlabCommand.set_default_paths('/i2bm/local/spm8/')
################################################################################
# Helper functions
def png_file_name(scan_id):
import irmas
import os
OUT_DIR = "/neurospin/unicog/protocols/IRMf/Baronnet_connectivity_2011/preproc/spm8/"
subj_id, exam_nb = irmas.scan_to_subject(scan_id)
subject_info = irmas.get_subject_info(subj_id)
patient = ('patient' if subject_info['patient'] else 'control')
file_name = '%s_scan_%03i_subj_%03i_exam_%03i.png' % \
(patient, scan_id, subj_id, exam_nb)
file_name = os.path.join(OUT_DIR, 'all_subject_report',
'registration', file_name)
return file_name
def has_lesion_mask(scan_id):
if glob.glob(os.path.join(IN_DIR, 'subject_lesions',
'subject_%03i' % scan_id, '*.img')):
return True
else:
return False
################################################################################
if not os.path.exists(WORKING_DIR):
os.makedirs(WORKING_DIR)
if not os.path.exists(OUT_DIR):
os.makedirs(OUT_DIR)
#subject_list = range(1, 154)[:40]
subject_list = range(173, 180)
CUT_COORDS = (2, -28, 18)
# A node just to generate the right paths
info_source = pe.Node(interface=util.IdentityInterface(fields=['subject_id']),
name="infosource")
# Loading the data from the disk
data_source = pe.Node(io.DataGrabber(), name='source')
data_source.inputs.base_directory = IN_DIR
data_source.inputs.template = '%s/subject_%03i/%s*.img'
data_source.inputs.template_args = dict(
func=[['preproc/spm8', 'subject_id',
['fMRI/acquisition1/rest1/fBaronnet_connectivity',
'fMRI/acquisition1/rest2/fBaronnet_connectivity']
]],
anat=[['preproc/spm8', 'subject_id',
't1mri/acquisition1/sBaronnet_connectivity']],
flair=[['subject_lesions', 'subject_id',
'FLAIR/sIRMAS', ]],
lesion=[['subject_lesions', 'subject_id',
'lesion_subject_', ]],
)
data_source.inputs.infields = ['subject_id']
data_source.inputs.outfields = ['func', 'anat']
data_source.inputs.raise_on_empty = False
data_source.inputs.sort_filelist = True
# Converting to 4D files:
merge_to_4d = pe.MapNode(fsl.Merge(),
iterfield=['in_files'], name='3d_to_4d')
merge_to_4d.inputs.dimension = 't'
# Rename to avoid naming conflicts between first and second session
rename = pe.MapNode(util.Rename(format_string = 'fBaronnet_connectivity_rest_%(session)s'),
iterfield=['in_file', 'session'],
name='rename')
rename.inputs.keep_ext = True
#rename.inputs.parse_string = '(?P<filename>.*)\..*'
rename.inputs.session = [1, 2]
# Slice timing
slice_timing = pe.Node(interface=spm.SliceTiming(), name="slice_timing")
TR = 2.460
num_slices = 41
slice_timing.inputs.num_slices = num_slices
slice_timing.inputs.time_repetition = TR
slice_timing.inputs.time_acquisition = TR - TR/float(num_slices)
slice_timing.inputs.slice_order = ( range(1, num_slices+1, 2)
+ range(2, num_slices+1, 2))
slice_timing.inputs.ref_slice = num_slices/2
# Realign for motion correction
realign = pe.Node(interface=spm.Realign(), name="realign")
realign.inputs.register_to_mean = True
# Artifact detection
art = pe.Node(interface=ra.ArtifactDetect(), name="art")
art.inputs.use_differences = [True,True]
art.inputs.use_norm = True
art.inputs.norm_threshold = 0.5
art.inputs.zintensity_threshold = 3
art.inputs.mask_type = 'file'
art.inputs.parameter_source = 'SPM'
# Resample the EPI to the anat (voxel sizes around 1, to apply coregister
# without dowsampling
resample_high_res = pe.Node(Resampler(), name='resample_high_res')
# Coregister the functional images to the anatomical images
coregister = pe.Node(interface=spm.Coregister(), name="coregister")
coregister.inputs.jobtype = "estwrite"#'estimate'
coregister_flair = pe.Node(interface=spm.Coregister(),
name="coregister_flair")
coregister_flair.inputs.jobtype = "estwrite"#'estimate'
# Normalize to apply the transformations
# We need several of them, as we are applying them on different set of imgs
normalize_func = pe.Node(spm.Normalize(jobtype='write'), name='normalize_func')
normalize_anat = pe.Node(spm.Normalize(jobtype='write'), name='normalize_anat')
normalize_lesion = pe.Node(spm.Normalize(jobtype='write'),
name='normalize_lesion')
normalize = pe.Node(spm.Normalize(jobtype='estwrite'), name='normalize')
normalize.inputs.template = TEMPLATE_EPI
# Unified segmentation to segment tissues
segment = pe.Node(spm.Segment(), name='segment')
segment.inputs.gm_output_type = [False, False, True]
segment.inputs.wm_output_type = [False, False, True]
segment.inputs.csf_output_type = [False, False, True]
segment.inputs.affine_regularization = ''
# Some visual output to check the registration
slicer_epi = pe.Node(Slicer(), name="slicer_epi")
slicer_epi.inputs.image_edges = TEMPLATE_EPI
slicer_epi.inputs.cut_coords = CUT_COORDS
slicer_t1 = pe.Node(Slicer(), name="slicer_t1")
slicer_t1.inputs.image_edges = TEMPLATE_T1
slicer_t1.inputs.cut_coords = CUT_COORDS
slicer_overlay = pe.Node(Slicer(), name="slicer_overlay")
slicer_overlay.inputs.cut_coords = CUT_COORDS
concat_img = pe.Node(ConcatImg(), name='concat_img')
concat_img2 = pe.Node(ConcatImg(), name='concat_img2')
# And finally, a datasink, to store outputs in the right place
output = pe.Node(io.DataSink(parameterization=False), name='output')
output.inputs.base_directory = OUT_DIR
output.inputs.substitutions = [('c4wrs', 'rwc4s'),
('c3wrs', 'rwc3s'),
('c2wrs', 'rwc2s'),
('c1wrs', 'rwc1s'),
('wrs', 'rwms'),
]
# Now build the workflow
workflow = pe.Workflow(name='spm8_epi')
workflow.base_dir = WORKING_DIR
def getcontainer(subject_id):
return 'subject_%03i' % subject_id
################################################################################
workflow.connect([
(info_source, data_source,
[('subject_id', 'subject_id'),
#(('subject_id', make_template_args), 'template_args')
]),
(data_source, merge_to_4d,
[('func', 'in_files')]),
(merge_to_4d, rename,
[('merged_file', 'in_file')]),
(rename, slice_timing,
[('out_file', 'in_files')]),
(slice_timing, realign,
[('timecorrected_files', 'in_files')]),
(realign, normalize,
[('mean_image', 'source')]),
(normalize, normalize_func,
[('normalization_parameters', 'parameter_file')]),
(normalize, normalize_anat,
[('normalization_parameters', 'parameter_file')]),
(realign, resample_high_res,
[('mean_image', 'in_file')]),
(data_source, resample_high_res,
[('anat', 'target')]),
(resample_high_res, coregister,
[('out_file', 'target')]),
(data_source, coregister,
[('anat', 'source'),
#('anat', 'apply_to_files')
]),
(coregister, normalize_anat,
[('coregistered_source', 'apply_to_files')]),
(realign, normalize_func,
[('realigned_files', 'apply_to_files')]),
(normalize_anat, segment,
[('normalized_files', 'data')]),
(segment, slicer_overlay,
[('native_gm_image', 'stat_image')]),
(normalize, slicer_epi,
[('normalized_source', 'background_image')]),
(normalize_anat, slicer_overlay,
[('normalized_files', 'background_image')]),
(normalize_anat, slicer_t1,
[('normalized_files', 'background_image')]),
# (realign, art,
# [('realignment_parameters', 'realignment_parameters')]),
# (normalize_func, art,
# [('normalized_files', 'realigned_files')]),
# (art, output,
# [('outlier_files',
# 'QualityCheck_files.@outliers'),
# ('intensity_files',
# 'QualityCheck_files.@intensity'),
# ('statistic_files',
# 'QualityCheck_files.@statistics'),
# ]),
(segment, output,
[('native_wm_image',
't1mri.acquisition1.@wc2sBaronnet_connectivity_2011'),
('native_gm_image',
't1mri.acquisition1.@wc1sBaronnet_connectivity_2011'),
('native_csf_image',
't1mri.acquisition1.@wc3sBaronnet_connectivity_2011'),
]),
(info_source, output,
[(('subject_id', getcontainer), 'container'),
]),
(normalize_func, output,
[('normalized_files',
'fMRI.acquisition1.@wafBaronnet_connectivity_2011')]),
(normalize_anat, output,
[('normalized_files',
't1mri.acquisition1.@wmsBaronnet_connectivity_2011')]),
(realign, output,
[('realignment_parameters', '@allNuisanceRegressors')]),
(slicer_epi, concat_img,
[("out_file", 'in_file1')]),
(slicer_t1, concat_img,
[("out_file", 'in_file2')]),
(slicer_overlay, concat_img2,
[("out_file", 'in_file2')]),
(concat_img, concat_img2,
[("merged_file", 'in_file1')]),
(info_source, concat_img2,
[(('subject_id', png_file_name), 'out_file')]),
])
# For debugging
import logging
iflogger = logging.getLogger('interface')
#iflogger.setLevel(-10)
################################################################################
# Run the workflow for patients without lesion masks
workflow.write_graph('preproc_graph.dot')
print 'Starting workflow without lesion masks'
#workflow.config['stop_on_first_crash'] = True
info_source.iterables = ('subject_id',
[s for s in subject_list if not has_lesion_mask(s)])
workflow.run()
def first_item(l):
if isinstance(l, (list, tuple)):
return l[0]
else:
return
################################################################################
# Now the workflow for patients with lesion masks
print 'Starting workflow with lesion masks'
workflow.connect([
(normalize, normalize_lesion,
[('normalization_parameters', 'parameter_file')]),
(resample_high_res, coregister_flair,
[('out_file', 'target')]),
(data_source, coregister_flair,
[('flair', 'source'),
('lesion', 'apply_to_files')
]),
(coregister_flair, normalize_lesion,
[('coregistered_files', 'apply_to_files')]),
(normalize_lesion, output,
[('normalized_files',
'lesion.@lesion')]),
])
info_source.iterables = ('subject_id',
[s for s in subject_list if has_lesion_mask(s)])
workflow.run()
"""
A slicer node, a la FSL, but using nipy.
The main advantage of this node is not to force resampling of the
different images.
"""
import os
import pylab as pl
from nipy.labs import viz, as_volume_img, save
from nipype.interfaces.base import TraitedSpec, BaseInterface, File, traits
from nipype.utils.misc import isdefined
################################################################################
# Node that generates an 'out_file'
class OutFileOutputSpec(TraitedSpec):
out_file = File(exists=True)
class OutFileNode(BaseInterface):
output_spec = OutFileOutputSpec
_prefix = ''
_candidate_files = []
def _gen_filename(self, name):
if name == 'out_file':
for name in self._candidate_files:
in_file = getattr(self.inputs, name)
if not isdefined(in_file):
continue
return os.path.join(os.getcwd(),
'%s%s.%s' %
(self._prefix,
os.path.basename(os.path.splitext(in_file)[0]),
self._suffix))
else:
raise NotImplementedError
else:
raise NotImplementedError
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_file'] = self.inputs.out_file
if not isdefined(outputs['out_file']):
outputs['out_file'] = self._gen_filename('out_file')
return outputs
################################################################################
# Resampler node
class ResamplerInputSpec(TraitedSpec):
in_file = File(exists=True, required=True)
target = File(exists=True, required=True)
out_file = File(genfile=True)
class Resampler(OutFileNode):
input_spec = ResamplerInputSpec
_suffix = 'nii'
_prefix = 'w'
_candidate_files = ['in_file',]
def _run_interface(self, runtime):
in_img = as_volume_img(self.inputs.in_file)
target_img = as_volume_img(self.inputs.target)
in_img = in_img.resampled_to_img(target_img)
save(self._list_outputs()['out_file'], in_img)
runtime.returncode = 0
return runtime
################################################################################
# Slicer node, for display
class SlicerInputSpec(TraitedSpec):
background_image = File(exists=True)
stat_image = File(exists=True)
image_edges = File(exists=True,
desc='volume to display edge overlay for '
'(useful for checking registration')
out_file = File(genfile=True)
cut_coords = traits.Either(
traits.Enum(('auto', )),
traits.Tuple(-2, -28, 17),
)
class Slicer(OutFileNode):
input_spec = SlicerInputSpec
_suffix = 'png'
_candidate_files = ['background_image', 'stat_image', 'image_edges']
def _run_interface(self, runtime):
background_img = self.inputs.background_image
if isdefined(background_img):
background_img = as_volume_img(background_img)
else:
background_img = False
edge_img = self.inputs.image_edges
if isdefined(edge_img):
edge_img = as_volume_img(edge_img)
else:
edge_img = False
stat_img = self.inputs.stat_image
if isdefined(stat_img):
stat_img = as_volume_img(stat_img)
else:
stat_img = False
pl.close(-251)
cut_coords = self.inputs.cut_coords
if cut_coords == 'auto':
cut_coords = None
if stat_img:
if background_img:
anat = background_img.get_data()
anat_affine = background_img.affine
else:
anat = False
anat_affine = None
ortho_slicer = viz.plot_map(stat_img.get_data(),
stat_img.affine,
anat=anat, anat_affine=anat_affine,
cut_coords=cut_coords,
threshold='auto',
black_bg=True, figure=-251,
cmap=pl.cm.hot)
elif background_img:
ortho_slicer = viz.plot_anat(background_img.get_data(),
background_img.affine, black_bg=True,
cut_coords=cut_coords,
figure=-251)
else:
raise ValueError('One input image, stat or background is required')
if edge_img:
ortho_slicer.edge_map(edge_img.get_data(), edge_img.affine)
pl.savefig(self._list_outputs()['out_file'], facecolor='k',
edgecolor='k')
runtime.returncode = 0
return runtime
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment