diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 000000000..b89aa4b9e --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,44 @@ +version: 2 +jobs: + build: + docker: + - image: continuumio/miniconda3:4.5.4 + environment: + ENV_NAME: mindboggle-env + vtk_cpp_tools: /src/mindboggle/vtk_cpp_tools/bin + working_directory: /src/mindboggle + steps: + - checkout + - run: + name: Install mindboggle + command: | + set -eu + apt-get update -qq && apt-get install -yq \ + build-essential \ + libgl1-mesa-dev \ + libglu1-mesa-dev \ + libsm-dev \ + libx11-dev \ + libxt-dev \ + libxext-dev + ln -sv /usr/lib/x86_64-linux-gnu /usr/lib64 + conda config --set show_channel_urls yes --set always_yes yes + conda config --add channels conda-forge + conda create -n $ENV_NAME python=3.6 pip + source activate $ENV_NAME + conda install -y cmake mesalib vtk pandas matplotlib \ + colormath nipype tbb-devel nose etelemetry + conda info --envs + python setup.py install + mkdir $vtk_cpp_tools && cd $vtk_cpp_tools && cmake ../ && make + - run: + name: Test mindboggle + command: | + source activate $ENV_NAME + export MB=/opt/conda/envs/${ENV_NAME}/lib/python3.6/site-packages/mindboggle + export PATH=$PATH:$vtk_cpp_tools + python -m "nose" --with-doctest ${MB}/thirdparty/ + python -m "nose" --with-doctest ${MB}/mio/ + python -m "nose" --with-doctest ${MB}/guts/ + python -m "nose" --with-doctest ${MB}/shapes/ + python -m "nose" --with-doctest ${MB}/features/ diff --git a/.gitignore b/.gitignore index 9e4f95ecb..28784d79a 100644 --- a/.gitignore +++ b/.gitignore @@ -44,3 +44,5 @@ _$* *.tar.gz *.Z *.zip + +MANIFEST \ No newline at end of file diff --git a/CHANGELOG b/CHANGELOG new file mode 100644 index 000000000..08d428abc --- /dev/null +++ b/CHANGELOG @@ -0,0 +1,35 @@ +CHANGELOG + +This document is an excerpt from Mindboggle's Docker Hub Updates: +https://cloud.docker.com/u/nipy/repository/docker/nipy/mindboggle/general + +November 4, 2019 (v1.3.8): + - Fix issue #190 in v1.3.7, an error caused by a unicode character within docstrings + - Introduce Singularity recipe in neurodocker.sh + +September 24, 2019 (v1.3.7): +Satrajit Ghosh added etelemetry capabilities. + +June 11, 2019 (v1.3.4): +Conda installed a version of vtk that broke the C++ code +(https://github.com/nipy/mindboggle/issues/171). + - updated neurodocker.sh to install VTK 8.2 +Nipype is breaking with when it tries to generate reports + - added "encoding='utf-8'" in nipype files in the docker image to resolve issue #175 +Zernike moments stopped working because scipy.misc was deprecated. + - replaced with scipy.special +Non-default (Gaussian, min, max) curvature file names were incomplete. + - fixed the names so that they have the same filestem as the mean curvature file +Image links broken in docs and in the Jupyter notebook tutorial. + - moved images to a new GitHub repository "nipy/mindboggle-assets" + +September 14, 2018: +neurodocker.sh updated to remove __macosx files to allow for Singularity builds. + +August 26, 2018: +neurodocker.sh replaces the earlier Dockerfile-creating scripts. + +March 8, 2018: +mindboggle123 script revised to run antsCorticalThickness.sh with "-u 0" flag, +which turns off a default setting in ANTs that introduces a stochastic element +in the Atropos segmentation algorithm. \ No newline at end of file diff --git a/README.rst b/README.rst index 74c2d1389..cb15b549a 100644 --- a/README.rst +++ b/README.rst @@ -11,59 +11,60 @@ of results. The software runs on Linux and is written in Python 3 and Python-wra called within a `Nipype `_ pipeline framework. We have tested the software most extensively with Python 3.5.1 on Ubuntu Linux 14.04. -.. - 1. Reference - 2. Getting help - 3. Installation - 4. Run one command - 5. Run separate commands - 6. Appendix: processing - 7. Appendix: output - :Release: |version| :Date: |today| +:ref:`modindex` and :ref:`genindex` -Links: - -.. toctree:: - :maxdepth: 1 - - FAQ - license +------------------------------------------------------------------------------ +Contents +------------------------------------------------------------------------------ +- `Links`_ +- `Reference`_ +- `Help`_ +- `Installation`_ +- `Tutorial`_ +- `Run one command`_ +- `Run separate commands`_ +- `Visualize output`_ +- `Appendix: processing`_ +- `Appendix: output`_ +------------------------------------------------------------------------------ +_`Links` +------------------------------------------------------------------------------ +- `FAQ `_ - `GitHub `_ and `Circleci tests `_ - `Contributors `_ - -* :ref:`modindex` -* :ref:`genindex` +- `License `_ ------------------------------------------------------------------------------ -Reference +_`Reference` ------------------------------------------------------------------------------ A Klein, SS Ghosh, FS Bao, J Giard, Y Hame, E Stavsky, N Lee, B Rossa, M Reuter, EC Neto, A Keshavan. 2017. **Mindboggling morphometry of human brains**. *PLoS Computational Biology* 13(3): e1005350. -`doi:10.1371/journal.pcbi.1005350 `_ +`doi:10.1371/journal.pcbi.1005350 `_ ------------------------------------------------------------------------------ -Getting help +_`Help` ------------------------------------------------------------------------------ -If you have any questions about Mindboggle, please post to -`NeuroStars `_ with the tag -"mindboggle". If you have found a bug, big or small, please +General questions about Mindboggle, or having some difficulties getting started? +Please search for relevant mindboggle posts in +`NeuroStars `_ +or post your own message with the tag "mindboggle". + +Found a bug, big or small? Please `submit an issue `_ on GitHub. ------------------------------------------------------------------------------ -Installation +_`Installation` ------------------------------------------------------------------------------ We recommend installing Mindboggle and its dependencies as a cross-platform Docker container for greater convenience and reproducibility of results. All the examples below assume you are using this Docker container, with the path /home/jovyan/work/ pointing to your host machine. -(Alternatively, Mindboggle can be installed from scratch on a Linux machine -using -`this script `_). +(Alternatively, one can `create a Singularity image `_.) 1. `Install and run Docker `_ on your (macOS, Linux, or Windows) host machine. @@ -73,42 +74,61 @@ terminal window):: docker pull nipy/mindboggle -*Note 1: This contains FreeSurfer, ANTs, and Mindboggle, so it is currently +*Note 1:* This contains FreeSurfer, ANTs, and Mindboggle, so it is currently over 6GB.* -*Note 2: You may need to increase memory allocated by Docker to at least 5GB. -For example: `By default, Docker for Mac is set to use 2 GB runtime memory `_.* +*Note 2:* You may need to increase memory allocated by Docker to at least 5GB. +For example: By default, Docker for `Mac is set to use 2 GB runtime memory `_. -3. Optionally download sample data. To try out the ``mindboggle`` examples +3. Recommended: download sample data. To try out the ``mindboggle`` examples below, download and unzip the directory of example input data `mindboggle_input_example.zip `_ (455 MB). For example MRI data to preprocess with FreeSurfer and ANTs software, download and unzip `example_mri_data.zip `_ (29 MB). -4. Optionally set environment variables for clarity in the commands below -(modify accordingly, except for DOCK):: +4. Recommended: set environment variables for clarity in the commands below +(modify accordingly, except for DOCK -- careful, this step is tricky!):: - HOST=/Users/binarybottle # path on host to access input/output - DOCK=/home/jovyan/work # path to HOST from Docker container - IMAGE=$DOCK/example_mri_data/T1.nii.gz # input image on HOST + HOST=/Users/binarybottle # path on local host seen from Docker container to access/save data + DOCK=/home/jovyan/work # path to HOST from Docker container (DO NOT CHANGE) + IMAGE=$DOCK/example_mri_data/T1.nii.gz # brain image in $HOST to process ID=arno # ID for brain image + OUT=$DOCK/mindboggle123_output # output path ('--out $OUT' below is optional) + +------------------------------------------------------------------------------ +_`Tutorial` +------------------------------------------------------------------------------ +To run the Mindboggle jupyter notebook tutorial, first install the Mindboggle +Docker container (above) and run the notebook in a web browser as follows +(replacing $HOST with the absolute path where you want to access/save data):: + + docker run --rm -ti -v $HOST:/home/jovyan/work -p 8888:8888 nipy/mindboggle jupyter notebook /opt/mindboggle/docs/mindboggle_tutorial.ipynb --ip=0.0.0.0 --allow-root + +In the output on the command line you'll see something like:: + + [I 20:47:38.209 NotebookApp] The Jupyter Notebook is running at: + [I 20:47:38.210 NotebookApp] http://(057a72e00d63 or 127.0.0.1):8888/?token=62853787e0d6e180856eb22a51609b25e + +You would then copy and paste the corresponding address into your web browser +(in this case, ``http://127.0.0.1:8888/?token=62853787e0d6e180856eb22a51609b25e``), +and click on "mindboggle_tutorial.ipynb". ------------------------------------------------------------------------------ -Run one command +_`Run one command` ------------------------------------------------------------------------------ The Mindboggle Docker container can be run as a single command to process a T1-weighted MR brain image through FreeSurfer, ANTs, and Mindboggle. Skip to the next section if you wish to run ``recon-all``, ``antsCorticalThickness.sh``, and ``mindboggle`` differently:: - docker run --rm -ti -v $HOST:$DOCK nipy/mindboggle $IMAGE --id $ID + docker run --rm -ti -v $HOST:$DOCK nipy/mindboggle mindboggle123 $IMAGE --id $ID Outputs are stored in $DOCK/mindboggle123_output/ by default, but you can set a different output path with ``--out $OUT``. ------------------------------------------------------------------------------ -Run separate commands +_`Run separate commands` ------------------------------------------------------------------------------ If finer control is needed over the software in the Docker container, the following instructions outline how to run each command separately. @@ -119,12 +139,17 @@ on the cortical surfaces and in the cortical and non-cortical volumes (v5.3 generates these surface labels by default; older versions require "-gcs DKTatlas40.gcs" to generate these surface labels).* -1. Enter the Docker container's bash shell to run ``recon-all``, -``antsCorticalThickness.sh``, and ``mindboggle`` commands:: +1. Enter the Docker container's bash shell to run ``recon-all``, ``antsCorticalThickness.sh``, and ``mindboggle`` commands:: - docker run --rm -ti -v $HOST:$DOCK --entrypoint /bin/bash nipy/mindboggle + docker run --rm -ti -v $HOST:$DOCK -p 5000:5000 nipy/mindboggle -2. `FreeSurfer `_ generates labeled +2. Recommended: reset environment variables as above within the Docker container:: + + DOCK=/home/jovyan/work # path to HOST from Docker container + IMAGE=$DOCK/example_mri_data/T1.nii.gz # input image on HOST + ID=arno # ID for brain image + +3. `FreeSurfer `_ generates labeled cortical surfaces, and labeled cortical and noncortical volumes. Run ``recon-all`` on a T1-weighted IMAGE file (and optionally a T2-weighted image), and set the output ID name as well as the $FREESURFER_OUT output @@ -134,12 +159,12 @@ directory:: recon-all -all -i $IMAGE -s $ID -sd $FREESURFER_OUT -3. `ANTs `_ provides brain volume extraction, +4. `ANTs `_ provides brain volume extraction, segmentation, and registration-based labeling. ``antsCorticalThickness.sh`` generates transforms and segmentation files used by Mindboggle, and is run on the same IMAGE file and ID as above, with $ANTS_OUT output directory. TEMPLATE points to the `OASIS-30_Atropos_template `_ folder -already installed in the Docker container ("\\" splits the command for readability):: +already installed in the Docker container (backslashes split the command for readability):: ANTS_OUT=$DOCK/ants_subjects TEMPLATE=/opt/data/OASIS-30_Atropos_template @@ -149,9 +174,10 @@ already installed in the Docker container ("\\" splits the command for readabili -t $TEMPLATE/T_template0_BrainCerebellum.nii.gz \ -m $TEMPLATE/T_template0_BrainCerebellumProbabilityMask.nii.gz \ -f $TEMPLATE/T_template0_BrainCerebellumExtractionMask.nii.gz \ - -p $TEMPLATE/Priors2/priors%d.nii.gz + -p $TEMPLATE/Priors2/priors%d.nii.gz \ + -u 0 -4. **Mindboggle** can be run on data preprocessed by ``recon-all`` and +5. **Mindboggle** can be run on data preprocessed by ``recon-all`` and ``antsCorticalThickness.sh`` as above by setting:: FREESURFER_SUBJECT=$FREESURFER_OUT/$ID @@ -174,17 +200,22 @@ To learn about Mindboggle's command options, type this in a terminal window:: mindboggle -h **Example 1:** -This command runs Mindboggle on data run through FreeSurfer but not ANTs:: +Run Mindboggle on data processed by FreeSurfer but not ANTs:: mindboggle $FREESURFER_SUBJECT --out $OUT **Example 2:** +Same as Example 1 with output to visualize surface data with roygbiv:: + + mindboggle $FREESURFER_SUBJECT --out $OUT --roygbiv + +**Example 3:** Take advantage of ANTs output as well ("\\" splits for readability):: - mindboggle $FREESURFER_SUBJECT --out $OUT \ + mindboggle $FREESURFER_SUBJECT --out $OUT --roygbiv \ --ants $ANTS_SUBJECT/antsBrainSegmentation.nii.gz -**Example 3:** +**Example 4:** Generate only volume (no surface) labels and shapes:: mindboggle $FREESURFER_SUBJECT --out $OUT \ @@ -192,7 +223,20 @@ Generate only volume (no surface) labels and shapes:: --no_surfaces ------------------------------------------------------------------------------ -Appendix: processing +_`Visualize output` +------------------------------------------------------------------------------ +To visualize Mindboggle output with roygbiv, start the Docker image (#1 above), +then run roygbiv on an output directory:: + + roygbiv $OUT/$ID + +and open a browser to `localhost:5000`. + +Currently roygbiv only shows summarized data, but one of our goals is to work +on by-vertex visualizations (for the latter, try `Paraview `_). + +------------------------------------------------------------------------------ +_`Appendix: processing` ------------------------------------------------------------------------------ The following steps are performed by Mindboggle (with links to code on GitHub): @@ -201,7 +245,6 @@ The following steps are performed by Mindboggle (with links to code on GitHub): 3. Compute volume shape measures for each labeled region: - volume (`volume_per_brain_region `_) - - thickness of cortical labels (`thickinthehead `_) 4. Compute surface shape measures for every cortical mesh vertex: @@ -232,13 +275,13 @@ The following steps are performed by Mindboggle (with links to code on GitHub): - median absolute deviation - mean - standard deviation - - skewhttps://github.com/nipy/mindboggle/blob/master/mindboggle/shapes/volume_shapes.py#L132 + - skew - kurtosis - lower quartile - upper quartile ------------------------------------------------------------------------------ -Appendix: output +_`Appendix: output` ------------------------------------------------------------------------------ Example output data can be found on Mindboggle's `examples `_ site on osf.io. @@ -307,10 +350,6 @@ The following include outputs from most, but not all, optional arguments. **volumes_per_ants_label.csv**: *volume per ANTs label* - **thickinthehead_per_freesurfer_cortex_label.csv**: *FS cortex label thickness* - - **thickinthehead_per_ants_cortex_label.csv**: *ANTs cortex label thickness* - [left,right]_cortical_surface / **label_shapes.csv**: *per-label surface shape statistics* diff --git a/circle.yml b/circle.yml deleted file mode 100644 index cec10265f..000000000 --- a/circle.yml +++ /dev/null @@ -1,75 +0,0 @@ -## Configuration file for circleci.com continuous integration (testing) -## -## All dependencies are accounted for in the environment.yml file. -## -## Authors: -## - Arno Klein, 2016 (arno@mindboggle.info) http://binarybottle.com -## -## Copyright 2016, Mindboggle team (mindboggle.info), Apache v2.0 License - -#----------------------------------------------------------------------------- -# Customize the test machine: -#----------------------------------------------------------------------------- -machine: - # Add some environment variables - environment: - HOME: /home/ubuntu - CONDA_ROOT: ${HOME}/miniconda - ENV_NAME: mindboggle-env - ENV_PREFIX: ${CONDA_ROOT}/envs/${ENV_NAME} - python: ${ENV_PREFIX}/bin/python - vtk_cpp_tools: ${HOME}/mindboggle/vtk_cpp_tools/bin - PATH: ${ENV_PREFIX}/bin:${CONDA_ROOT}/bin:${vtk_cpp_tools}:${PATH} - SITEPKGS: ${ENV_PREFIX}/lib/python3.5/site-packages - PYTHONPATH: ${SITEPKGS}/vtk:${PYTHONPATH} - MB: ${SITEPKGS}/mindboggle - -#----------------------------------------------------------------------------- -# Customize dependencies: -#----------------------------------------------------------------------------- -dependencies: - #cache_directories: - # - $CONDA_ROOT - - pre: - - sudo apt-get update; sudo apt-get install libllvm-3.3 - - override: - - #------------------------------------------------------------------------- - # Download/install conda (if not already cached) - #------------------------------------------------------------------------- - - > - if [[ ! -d $CONDA_ROOT ]]; then - echo "Installing Miniconda3..."; - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh && - bash Miniconda3-latest-Linux-x86_64.sh -b -p $CONDA_ROOT; - else - echo "Using cached Miniconda install"; - fi - - conda config --set show_channel_urls yes --set always_yes yes - - conda update conda conda-env - #------------------------------------------------------------------------- - # Create a Python environment (see environment.yml file): - #------------------------------------------------------------------------- - - conda env create - - source activate mindboggle-env - - conda info --envs - #------------------------------------------------------------------------- - # Install Mindboggle and its C++ code: - #------------------------------------------------------------------------- - - $python setup.py install - - mkdir $vtk_cpp_tools && cd $vtk_cpp_tools && cmake ../ && make - -#----------------------------------------------------------------------------- -# Customize test commands: -#----------------------------------------------------------------------------- -test: - override: - - $python -m "nose" --with-doctest ${MB}/thirdparty/ - - $python -m "nose" --with-doctest ${MB}/mio/ - - $python -m "nose" --with-doctest ${MB}/guts/ - - $python -m "nose" --with-doctest ${MB}/shapes/ - - $python -m "nose" --with-doctest ${MB}/features/ - #- $python -m "nose" --with-doctest --with-coverage ${MB}/evaluate/ - #- coveralls diff --git a/colors_script/calc_colormap.py b/colors_script/calc_colormap.py new file mode 100644 index 000000000..9f36c4040 --- /dev/null +++ b/colors_script/calc_colormap.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + + +import os +import argparse +import numpy as np + +from mindboggle.mio.colors import distinguishable_colors, label_adjacency_matrix + + +if __name__ == "__main__": + + description = ('calculate colormap for labeled image;' + 'calculated result is stored in output_dirname/colors.npy') + parser = argparse.ArgumentParser(description=description) + parser.add_argument('label_filename', help='path to the label image') + parser.add_argument('output_dirname', help='path to the folder storing ' + 'temporary files and result') + parser.add_argument('-v', '--verbose', action='store_true', default=False) + args = parser.parse_args() + + if not os.path.isdir(args.output_dirname): + os.makedirs(args.output_dirname) + + matrix_filename = os.path.join(args.output_dirname, 'matrix.npy') + colormap_filename = os.path.join(args.output_dirname, 'colormap.npy') + labels_filename = os.path.join(args.output_dirname, 'labels.npy') + colors_filename = os.path.join(args.output_dirname, 'colors.npy') + + if args.verbose: + print('finding adjacency maps...') + + if not os.path.isfile(matrix_filename) or \ + not os.path.isfile(labels_filename): + labels, matrix = label_adjacency_matrix(args.label_filename, + out_dir=args.output_dirname)[:2] + matrix = matrix.as_matrix()[:, 1:] + np.save(matrix_filename, matrix) + np.save(labels_filename, labels) + else: + labels = np.load(labels_filename) + matrix = np.load(matrix_filename) + + if args.verbose: + print('finding colormap...') + + if not os.path.isfile(colormap_filename): + num_colors = len(labels) + colormap = distinguishable_colors(ncolors=num_colors, + plot_colormap=False, + save_csv=False, + out_dir=args.output_dirname) + np.save(colormap_filename, colormap) + else: + colormap = np.load(colormap_filename) + + if args.verbose: + print('finding label colors') + + if not os.path.isfile(colors_filename): + label_colors = colors.group_colors(colormap, + args.label_filename, + IDs=labels, + adjacency_matrix=matrix, + out_dir=args.output_dirname, + plot_colors=False, + plot_graphs=False) + np.save(colors_filename, label_colors) diff --git a/colors_script/convert_to_mipav_lut.py b/colors_script/convert_to_mipav_lut.py new file mode 100644 index 000000000..1f28332cd --- /dev/null +++ b/colors_script/convert_to_mipav_lut.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import numpy as np +import argparse + +def parse_inputs(): + des = 'convert colormap to mipav lut file.' + parser = argparse.ArgumentParser(description=des) + parser.add_argument('colormap_filename') + parser.add_argument('ids_filename') + parser.add_argument('output_filename') + args = parser.parse_args() + return args + +def main(args): + + labels = np.load(args.ids_filename) + colors = np.load(args.colormap_filename) * 255 + + contents = list() + contents.append('') + contents.append('256\t# Size of LUT Arrays') + + for i in range(256): + if i in labels: + idx = np.where(labels == i)[0][0] + c = colors[idx].tolist() + else: + c = [0.0, 0.0, 0.0] + c_str = [str(cc) for cc in c] + line = '\t'.join([str(i), '1.0', *c_str]) + contents.append(line) + + with open(args.output_filename, 'w') as file: + file.write('\n'.join(contents)) + + +if __name__ == "__main__": + args = parse_inputs() + main(args) diff --git a/docs/faq.rst b/docs/faq.rst index 93d793cee..d35c25920 100644 --- a/docs/faq.rst +++ b/docs/faq.rst @@ -11,3 +11,4 @@ faq/citing_mindboggle faq/run_time labels + faq/singularity diff --git a/docs/faq/citing_mindboggle.rst b/docs/faq/citing_mindboggle.rst index 491fbcb26..6741ce0f4 100644 --- a/docs/faq/citing_mindboggle.rst +++ b/docs/faq/citing_mindboggle.rst @@ -9,7 +9,7 @@ When using Mindboggle please cite the primary reference: A Klein, SS Ghosh, FS Bao, J Giard, Y Hame, E Stavsky, N Lee, B Rossa, M Reuter, EC Neto, A Keshavan. 2017. **Mindboggling morphometry of human brains**. -*PLoS Computational Biology* 13(3): e1005350. `doi:10.1371/journal.pcbi.1005350 `_ +*PLoS Computational Biology* 13(3): e1005350. `doi:10.1371/journal.pcbi.1005350 `_ Contributors are listed on Mindboggle's `team page `_. diff --git a/docs/faq/singularity.rst b/docs/faq/singularity.rst new file mode 100644 index 000000000..de83f7e89 --- /dev/null +++ b/docs/faq/singularity.rst @@ -0,0 +1,27 @@ +.. _singularity: + +------------------------------------------------------------------------------ + How do I create and use a Singularity image rather than Docker? +------------------------------------------------------------------------------ + +To convert the Mindboggle Docker image to a Singularity image, run:: + + singularity pull mindboggle.img docker://nipy/mindboggle + +After the conversion, you'll need 3 mounts (data, working directory, and +home directory) for it to work. Here is an example of the full command +together with some of the optional arguments (thanks, Satra!), using the same +environment variables as in the `README `_:: + + singularity run \ + -B $HOST:$DOCK:ro \ + -B $PWD:$DOCK \ + -B $PWD/jovyan:/home/jovyan \ + -e mindboggle.img \ + $DOCK/example_mri_data/T1.nii.gz \ + --id arno \ + --fs_T2image $DOCK/example_mri_data/T2.nii.gz \ + --plugin MultiProc --plugin_args "dict(n_procs=2)" \ + --fs_openmp 5 --ants_num_threads 5 --mb_num_threads 10 + + diff --git a/docs/mindboggle_tutorial.ipynb b/docs/mindboggle_tutorial.ipynb index ffcf7c211..b600bd224 100644 --- a/docs/mindboggle_tutorial.ipynb +++ b/docs/mindboggle_tutorial.ipynb @@ -2,56 +2,36 @@ "cells": [ { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ - "\n", - "*This jupyter notebook provides a tutorial for [Mindboggle](http://mindboggle.info), and assumes that a docker container has been installed ``[1]`` and that the notebook was run within the container ``[2]``. Figures are taken from the primary [Mindboggle article](http://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1005350#sec007).* \n", - "\n", - "   ``[1] docker run --rm -ti -v /:/root/data -p 8888:8888 --entrypoint /bin/bash bids/mindboggle;`` \n", - "   ``[2] jupyter notebook --ip 0.0.0.0`` \n", - "   Written by Arno Klein (CC-BY)" + "\n", + "This jupyter notebook provides a tutorial for the Mindboggle brain image analysis software.
    Website: http://mindboggle.info
    Reference: http://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1005350#sec007" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "# Mindboggle tutorial\n", "\n", - "1. [Introduction](#introduction)\n", - "2. [Mindboggle processing steps](#processing)\n", - "3. [Run \"mindboggle --help\" on the command line](#help)\n", - "4. [Run \"mindboggle\" command with example data](#command)\n", - "5. [Mindboggle Python library](#library)\n", - "6. Run individual functions:\n", - " - [Example 1: Compute histogram values from a nibabel-readable image](#example1)\n", - " - [Example 2: Compute statistical measures across vertices per label](#example2)\n", - " - [Example 3: Decimate a left postcentral pial surface](#example3)\n", - " - [Example 4: Relabel a VTK surface](#example4)\n", - " - [Example 5: Segment deep vertices of surface into contiguous regions](#example5)\n", - " - [Example 6: Extract sulcus label borders](#example6)" + "1. [Mindboggle processing steps](#processing)\n", + "2. [Run \"mindboggle --help\" on the command line](#help)\n", + "3. [Mindboggle on the command line](#command)\n", + "4. [Mindboggle Python library](#library)\n", + "5. [Run individual functions](#functions)" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "\n", "\n", - "## Mindboggle processing steps\n", + "# Mindboggle processing steps\n", "\n", - "\n", + "\n", "\n", - "### Output\n", + "## Output\n", "Mindboggle takes in (FreeSurfer- and optionally ANTs-) preprocessed T1-weighted MRI data, and outputs nifti volumes, vtk surfaces, and csv tables containing label, feature, and shape information for further analysis:\n", "\n", "- **labels/**: *integer-labeled vtk surfaces and nifti volumes*\n", @@ -59,53 +39,128 @@ "- **shapes/**: *float shape value for every point on vtk surfaces*\n", "- **tables/**: *csv tables of shape values for every label/feature/vertex*\n", "\n", - "### Processing steps\n", + "

" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Processing steps\n", "\n", - "1. **Combine FreeSurfer and ANTs gray/white segmentations.**\n", - "\n", - "2. **Fill hybrid segmentation with (FreeSurfer- or ANTs-registered) labels.**\n", - "3. **Compute volume shape measures for each labeled region:**\n", - " - **volume** and \"thickinthehead\" **cortical thickness**\n", - "\n", - "4. **Compute surface shape measures for every cortical mesh vertex** (in addition to FreeSurfer's convexity and thickness)\n", - " - **surface area**\n", - "\n", - " - **geodesic depth** and **travel depth**\n", - "\n", - " - **mean curvature**\n", - "\n", - "5. **Extract cortical surface features**:\n", - " - **folds** (left and upper right, with manual labels) \n", - " - **sulci** (lower right)\n", - "\n", - " - **fundi**\n", - "\n", - "6. **For each cortical surface label/sulcus, compute**:\n", - " - **area**\n", - " - **mean coordinates** (in native and in MNI152 space)\n", - " - **Zernike moments**\n", - " - **Laplace-Beltrami spectrum** (2nd, 3rd, and 9th spectral components shown for two brains)\n", - " \n", - "7. **Compute statistics for each shape measure across vertices for each label/feature**:\n", + "### 1. Combine FreeSurfer and ANTs gray/white segmentations:\n", + "
\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2. Fill hybrid segmentation with (FreeSurfer- or ANTs-registered) labels.\n", + "### 3. Compute volume shape measures for each labeled region:\n", + "        \n", + "volume\n", + "
\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4. Compute surface shape measures for every cortical mesh vertex\n", + "        \n", + "surface area\n", + "
\n", + "\n", + "

\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "        \n", + "geodesic depth and travel depth\n", + "
\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "        \n", + "mean curvature\n", + "
\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 5. Extract cortical surface features:\n", + "        \n", + "folds (left and upper right, with manual labels) \n", + "
\n", + "        \n", + "sulci (lower right)\n", + "
\n", + "\n", + "\n", + "        \n", + "fundi\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 6. For each cortical surface label/sulcus, compute:\n", + "        \n", + "Zernike moments\n", + "
\n", + "        \n", + "Laplace-Beltrami spectrum (2nd, 3rd, and 9th spectral components shown for two brains):\n", + "
\n", + "
\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 7. Compute statistics for each shape measure across vertices for each label/feature:\n", " - median\n", " - median absolute deviation\n", " - mean\n", " - standard deviation\n", " - skew\n", " - kurtosis\n", - " - lower and upper quartiles" + " - lower and upper quartiles\n", + "
" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "\n", "\n", - "## Run \"mindboggle --help\" on the command line\n", + "# Run \"mindboggle --help\" on the command line\n", "\n", "First, let's see what command-line options Mindboggle provides:" ] @@ -114,73 +169,14 @@ "cell_type": "code", "execution_count": 1, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "scrolled": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "usage: mindboggle [-h] [-v] [-c INT] [--ants STR] [--out STR] [--working STR]\r\n", - " [--cache STR] [--no_volumes] [--no_surfaces] [--no_labels]\r\n", - " [--no_shapes] [--no_sulci] [--no_points] [--no_moments]\r\n", - " [--no_spectra] [--no_thickness] [--fundi] [--moments INT]\r\n", - " [--spectra INT] [--my_atlas STR] [--my_atlases [...]]\r\n", - " [--my_graywhite STR] [--my_transform STR] [--graph STR]\r\n", - " [--plugin PLUGIN] [--plugin_args PLUGIN_ARGS]\r\n", - " DATA\r\n", - "\r\n", - "The Mindboggle software automates shape analysis of anatomical labels and\r\n", - "features extracted from human brain MR image data (http://mindboggle.info).\r\n", - "Example: mindboggle $HOME/freesurfer/subjects/arno --ants\r\n", - "$HOME/ants/subjects/arno/antsBrainSegmentation.nii.gz\r\n", - "\r\n", - "positional arguments:\r\n", - " DATA path to directory of a person's brain data,\r\n", - " usually generated by the FreeSurfer software\r\n", - "\r\n", - "optional arguments:\r\n", - " -h, --help show this help message and exit\r\n", - " -v, --version show mindboggle version number\r\n", - " -c INT, --cpus INT number of processors to use (1)\r\n", - "\r\n", - "recommended arguments:\r\n", - " --ants STR path to brain segmentation file (ex:\r\n", - " /data/arno/antsBrainSegmentation.nii.gz)\r\n", - " generated by antsCorticalThickness.sh command\r\n", - " (transforms are accessed from same directory)\r\n", - "\r\n", - "modify outputs:\r\n", - " --out STR output folder (if not $HOME/mindboggled)\r\n", - " --working STR working folder (if not $HOME/mindboggling)\r\n", - " --cache STR download folder (if not $HOME/mindboggle_cache)\r\n", - " --no_volumes no volume labels, features, or shape tables\r\n", - " --no_surfaces no surface labels, features, or shape tables\r\n", - " --no_labels no surface or volume labels\r\n", - " --no_shapes no surface or volume shape measures\r\n", - " --no_sulci no sulci from labeled folds\r\n", - " --no_points no table of per-vertex surface shape measures\r\n", - " --no_moments no Zernike moments per surface label or sulcus\r\n", - " --no_spectra no Laplace-Beltrami per surface label or sulcus\r\n", - " --no_thickness no volume-based cortical label thicknesses\r\n", - "\r\n", - "advanced settings:\r\n", - " --fundi extract, measure fundi (under evaluation, SLOW)\r\n", - " --moments INT reset order of Zernike moments (10)\r\n", - " --spectra INT reset number of Laplace-Beltrami eigenvalues (10)\r\n", - " --my_atlas STR new atlas, same labels, in MNI space (with\r\n", - " corresponding template if --ants is set)\r\n", - " --my_atlases [ ...] extra atlas(es) in MNI space with label numbers\r\n", - " from return_numbers_names_colors()\r\n", - " --my_graywhite STR new gray/white matter file (ex: edited Mindboggle\r\n", - " output); still call --ants for transforms\r\n", - " --my_transform STR different ITK affine transform to MNI space (if\r\n", - " different template used to get --ants output)\r\n", - " --graph STR plot workflow: \"hier\", \"exec\" (need graphviz)\r\n", - " --plugin PLUGIN nipype plugin (see nipype documentation)\r\n", - " --plugin_args PLUGIN_ARGS plugin arguments (see nipype documentation)\r\n" + "/bin/sh: mindboggle: command not found\r\n" ] } ], @@ -190,468 +186,265 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "\n", "\n", - "## Run \"mindboggle\" command with example data\n", + "# Mindboggle on the command line\n", "\n", - "In the following example, we will run Mindboggle on FreeSurfer (but not ANTs) data to get shape measures such as curvature, depth, and area for cortical surface labels, and to make it quick, we will turn off volume labels and volume shape measures, sulci, and surface-based Zernike moments and Laplace-Beltrami spectra." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Traceback (most recent call last):\n", - " File \"/opt/conda/bin/mindboggle\", line 193, in \n", - " raise IOError(\"Please provide correct path to DATA.\")\n", - "OSError: Please provide correct path to DATA.\n" - ] - } - ], - "source": [ - "%%bash\n", - "HOST=/root/data/Users/arno;\n", - "FREESURFER_SUBJECT=$HOST/mindboggle_input_example/freesurfer/subjects/arno;\n", - "ANTS_SUBJECT=$HOST/mindboggle_input_example/ants/subjects/arno;\n", - "MINDBOGGLING=$HOST/mindboggling;\n", - "MINDBOGGLED=$HOST/mindboggled;\n", + "The following command computes shape measures for cortical surface labels and sulci from [example FreeSurfer and ANTs data](https://osf.io/3xfb8/?action=download&version=1). Without adding more restrictive arguments, this command takes an hour or two to run.\n", "\n", - "mindboggle $FREESURFER_SUBJECT --working $MINDBOGGLING --out $MINDBOGGLED \\\n", - " --no_volumes --no_sulci --no_points --no_moments --no_spectra" + "mindboggle /home/jovyan/work/mindboggle_input_example/freesurfer/subjects/arno
", + "     --ants /home/jovyan/work/mindboggle_input_example/ants/subjects/arno/antsBrainSegmentation.nii.gz
", + "     --out /home/jovyan/work/mindboggled
", + "     --roygbiv


" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "\n", "\n", - "## Mindboggle Python library\n", - "\n", - "Rather than call Mindboggle from the command line, we can also call individual Python functions within the Mindboggle library, which includes the following files:\n", - "\n", - " - mindboggle/mindboggle/\n", - " - **mio**/ *-- input/output functions*\n", - " - **[colors](https://github.com/nipy/mindboggle/blob/master/mindboggle/mio/colors.py)**.py *-- colormap-related functions*\n", - " - **[convert_volumes](https://github.com/nipy/mindboggle/blob/master/mindboggle/mio/convert_volumes.py)**.py *-- read/write nifti volume files*\n", - " - **[fetch_data](https://github.com/nipy/mindboggle/blob/master/mindboggle/mio/fetch_data.py)**.py *-- fetch data from a URL or from third party software*\n", - " - **[labels](https://github.com/nipy/mindboggle/blob/master/mindboggle/mio/labels.py)**.py *-- information about labeling protocols*\n", - " - **[plots](https://github.com/nipy/mindboggle/blob/master/mindboggle/mio/plots.py)**.py *-- plot functions*\n", - " - **[tables](https://github.com/nipy/mindboggle/blob/master/mindboggle/mio/tables.py)**.py *-- read/write tables*\n", - " - **[vtks](https://github.com/nipy/mindboggle/blob/master/mindboggle/mio/vtks.py)**.py *-- read/write VTK surface files*\n", - " - **guts**/ *-- the \"guts\" underlying feature extraction and labeling code*\n", - " - **[compute](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/compute.py)**.py *-- compute distances, etc.*\n", - " - **[graph](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/graph.py)**.py *-- graph operations*\n", - " - **[kernels](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/kernels.py)**.py *-- kernels for graph operations*\n", - " - **[mesh](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/mesh.py)**.py *-- operate on surface mesh vertices*\n", - " - **[paths](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/paths.py)**.py *-- connect surface mesh vertices*\n", - " - **[rebound](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/rebound.py)**.py *-- adjust label borders on a surface mesh*\n", - " - **[relabel](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/relabel.py)**.py *-- relabel surface or volume files*\n", - " - **[segment](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/segment.py)**.py *-- segment a surface mesh*\n", - " - **features**/ *-- higher-level feature extraction (folds, fundi, sulci)*\n", - " - **[folds](https://github.com/nipy/mindboggle/blob/master/mindboggle/features/folds.py)**.py *-- extract surface folds*\n", - " - **[fundi](https://github.com/nipy/mindboggle/blob/master/mindboggle/features/fundi.py)**.py *-- extract fundus curves from folds*\n", - " - **[sulci](https://github.com/nipy/mindboggle/blob/master/mindboggle/features/sulci.py)**.py *-- extract sulci from folds*\n", - " - **shapes**/ *-- shape measurement functions\n", - " - **[surface_shapes](https://github.com/nipy/mindboggle/blob/master/mindboggle/shapes/surface_shapes.py)**.py *-- compute surface shapes (calls C++ library below)*\n", - " - **[volume_shapes](https://github.com/nipy/mindboggle/blob/master/mindboggle/shapes/volume_shapes.py)**.py *-- compute volumes and thicknesses*\n", - " - **[laplace_beltrami](https://github.com/nipy/mindboggle/blob/master/mindboggle/shapes/laplace_beltrami.py)**.py *-- compute a Laplace-Beltrami spectrum*\n", - " - **[zernike/zernike](https://github.com/nipy/mindboggle/blob/master/mindboggle/shapes/zernike/zernike.py)**.py *-- compute Zernike moments of a collection of vertices*\n", - " - **[likelihood](https://github.com/nipy/mindboggle/blob/master/mindboggle/shapes/likelihood.py)**.py *-- compute (fundus) likelihood values*\n", + "# Mindboggle Python library\n", + "\n", + "Rather than call Mindboggle from the command line, we can also call individual Python functions within the Mindboggle library, which includes the following files in mindboggle/mindboggle/:\n", + "\n", + "- **mio**/ *-- input/output functions*\n", + " - **[colors](https://github.com/nipy/mindboggle/blob/master/mindboggle/mio/colors.py)**.py *-- colormap-related functions*\n", + " - **[convert_volumes](https://github.com/nipy/mindboggle/blob/master/mindboggle/mio/convert_volumes.py)**.py *-- read/write nifti volume files*\n", + " - **[fetch_data](https://github.com/nipy/mindboggle/blob/master/mindboggle/mio/fetch_data.py)**.py *-- fetch data from a URL or from third party software*\n", + " - **[labels](https://github.com/nipy/mindboggle/blob/master/mindboggle/mio/labels.py)**.py *-- information about labeling protocols*\n", + " - **[plots](https://github.com/nipy/mindboggle/blob/master/mindboggle/mio/plots.py)**.py *-- plot functions*\n", + " - **[tables](https://github.com/nipy/mindboggle/blob/master/mindboggle/mio/tables.py)**.py *-- read/write tables*\n", + " - **[vtks](https://github.com/nipy/mindboggle/blob/master/mindboggle/mio/vtks.py)**.py *-- read/write VTK surface files*\n", + "\n", + "- **guts**/ *-- the \"guts\" underlying feature extraction and labeling code*\n", + " - **[compute](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/compute.py)**.py *-- compute distances, etc.*\n", + " - **[graph](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/graph.py)**.py *-- graph operations*\n", + " - **[kernels](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/kernels.py)**.py *-- kernels for graph operations*\n", + " - **[mesh](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/mesh.py)**.py *-- operate on surface mesh vertices*\n", + " - **[paths](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/paths.py)**.py *-- connect surface mesh vertices*\n", + " - **[rebound](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/rebound.py)**.py *-- adjust label borders on a surface mesh*\n", + " - **[relabel](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/relabel.py)**.py *-- relabel surface or volume files*\n", + " - **[segment](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/segment.py)**.py *-- segment a surface mesh*\n", + "\n", + "- **shapes**/ *-- shape measurement functions\n", + " - **[surface_shapes](https://github.com/nipy/mindboggle/blob/master/mindboggle/shapes/surface_shapes.py)**.py *-- compute surface shapes (calls C++ library below)*\n", + " - **[volume_shapes](https://github.com/nipy/mindboggle/blob/master/mindboggle/shapes/volume_shapes.py)**.py *-- compute volumes and thicknesses*\n", + " - **[laplace_beltrami](https://github.com/nipy/mindboggle/blob/master/mindboggle/shapes/laplace_beltrami.py)**.py *-- compute a Laplace-Beltrami spectrum*\n", + " - **[zernike/zernike](https://github.com/nipy/mindboggle/blob/master/mindboggle/shapes/zernike/zernike.py)**.py *-- compute Zernike moments of a collection of vertices*\n", + " - **[likelihood](https://github.com/nipy/mindboggle/blob/master/mindboggle/shapes/likelihood.py)**.py *-- compute (fundus) likelihood values*\n", + "\n", + "- **features**/ *-- higher-level feature extraction (folds, fundi, sulci)*\n", + " - **[folds](https://github.com/nipy/mindboggle/blob/master/mindboggle/features/folds.py)**.py *-- extract surface folds*\n", + " - **[fundi](https://github.com/nipy/mindboggle/blob/master/mindboggle/features/fundi.py)**.py *-- extract fundus curves from folds*\n", + " - **[sulci](https://github.com/nipy/mindboggle/blob/master/mindboggle/features/sulci.py)**.py *-- extract sulci from folds*\n", + "\n", + "\n", + "
\n", + "
" ] }, { "cell_type": "markdown", "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "source": [ "\n", "\n", - "## Run individual functions\n", + "# Run individual functions\n", + "\n", + "Let's run some functions within Mindboggle's Python library. The following code snippets are adapted from the above files' docstrings.\n", "\n", - "Let's run some functions within Mindboggle's Python library. The following examples are excerpts from the above files' docstrings." + "## Example: Compute statistics of depth measures in sulcus folds\n", + "### Measure travel depth for every vertex of a brain's left hemisphere\n", + "Convert a FreeSurfer surface file to VTK format:" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": { - "deletable": true, - "editable": true + "collapsed": true }, + "outputs": [], "source": [ - "\n", - "### Example 1: Compute histogram values from a nibabel-readable image.\n", - "\n", - "**[compute_image_histogram](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/compute.py#L1088)** (infile, nbins=100, threshold=0.0)\n", - "\n", - " Parameters\n", - " ----------\n", - " infile : string\n", - " input nibabel-readable image file name\n", - " nbins : integer\n", - " number of bins\n", - " threshold : float\n", - " remove values lower than threshold\n", - "\n", - " Returns\n", - " -------\n", - " histogram_values : numpy array\n", - " histogram bin values" + "from mindboggle.mio.vtks import freesurfer_surface_to_vtk\n", + "subject_path = '/home/jovyan/work/mindboggle_input_example/freesurfer/subjects/arno/'\n", + "surface_file = freesurfer_surface_to_vtk(surface_file=subject_path + 'surf/lh.pial', \n", + " orig_file=subject_path + 'mri/orig.mgz',\n", + " output_vtk='lh.pial.vtk')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Compute travel_depth for every vertex of the mesh in the VTK file:" ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, - "outputs": [ - { - "data": { - "text/plain": [ - "array([35, 19, 19, 14, 17, 12, 5, 8, 7, 5, 7, 6, 3, 4, 0, 0, 3,\n", - " 5, 3, 7, 1, 1, 0, 3, 0, 4, 3, 0, 2, 3, 3, 0, 1, 1,\n", - " 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 3, 1, 1, 2, 1, 0, 3,\n", - " 0, 1, 0, 2, 4, 3, 1, 2, 0, 1, 4, 1, 3, 1, 0, 0, 0,\n", - " 1, 0, 2, 4, 0, 1, 1, 1, 0, 1, 1, 2, 2, 0, 1, 0, 1,\n", - " 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1])" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ - "from mindboggle.guts.compute import compute_image_histogram\n", - "from mindboggle.mio.fetch_data import prep_tests\n", - "urls, fetch_data = prep_tests()\n", - "labels_file = fetch_data(urls['OASIS-30_Atropos_template'], '', '.nii.gz')\n", - "nbins = 100\n", - "threshold = 0.5\n", - "histogram_values = compute_image_histogram(labels_file, nbins, threshold)\n", - "histogram_values" + "import os\n", + "from mindboggle.shapes.surface_shapes import travel_depth\n", + "from mindboggle.mio.vtks import read_scalars\n", + "ccode_path = '/opt/vtk_cpp_tools'\n", + "command = os.path.join(ccode_path, 'travel_depth', 'TravelDepthMain')\n", + "depth_file = travel_depth(command=command,\n", + " surface_file=surface_file,\n", + " verbose=True)\n", + "depths, name = read_scalars(depth_file)" ] }, { "cell_type": "markdown", + "metadata": {}, + "source": [ + "Plot the depth values in 3-D: UNDER CONSTRUCTION" + ] + }, + { + "cell_type": "code", + "execution_count": null, "metadata": { - "deletable": true, - "editable": true + "collapsed": true }, + "outputs": [], + "source": [ + "import numpy as np\n", + "import nilearn.plotting\n", + "depths_array = np.asarray(depths)\n", + "fs_surf_mesh = subject_path + 'surf/lh.pial'\n", + "nilearn.plotting.plot_surf(surf_mesh=fs_surf_mesh, surf_map=depths_array,\n", + " bg_map=None, hemi='left', view='lateral',\n", + " cmap=None, colorbar=True,\n", + " avg_method='mean', threshold=None,\n", + " alpha='auto', bg_on_data=False, darkness=1,\n", + " vmin=None, vmax=None, cbar_vmin=None, cbar_vmax=None,\n", + " title=None, output_file=None, axes=None, figure=None)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, "source": [ - "Plot histograms:" + "### Extract folds based on depth and curvature\n", + "Plot a histogram of the depth values:" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAESCAYAAADtzi4UAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAHbZJREFUeJzt3XucXWV97/HPlyQUJCggQxq5OIpRG+lpApF7cQwCUWxF\nXiigIqnYWCstVEQtVYT2qHgK4RQilHCRFAIBIzcptxgzJOEmEwwhAhXkwOvAKzcUFFqrJPmdP55n\nk8VmT2bPzJ7Mnud836/Xfs3az1p7rd9es+e7n732Ws8oIjAzs5Fvq+EuwMzMWsOBbmZWCAe6mVkh\nHOhmZoVwoJuZFcKBbmZWCAd6m5HUJWmJpLslzZO0Y2XeuyRtlPTWStvfSbo/P+ai3HaWpA9LGi3p\nKkmLJd0r6aQG25uVt/WgpGNy23aSbpC0VNKX+6j325IWVu53Sjq8Ffuiv4Zq2/l3cu5m5k+XtHWr\ntztUaq+PFq6vU9L8Vq3PBs6B3kYk7QRcABwdEe8DbgYurCzyMeBfgVrwbg8cCxwQEX8K/EPdKo8A\nVkfEIRFxIHBDg81+MW9rKvC13PZZ4LaIOBiYKmnXzZR9IPBrSbvk+53A60JV0pZ4rTXc9hYwHWhJ\noG+h/WSF8ounvRwJ3BgR6wAi4lpg/8ofeRdwOnBYvr8R2BnYR5Ii4oW69f0WmFTr0TeYT0T8Pk++\nAXg0Tx8I3JWnFwAHNCpW0mTgIWAecHRu/jxwrKRuSTtJelTS94CZknaT9KP8iWFWXsd0STdJui1/\nytg1t39R0n35U8LeksZI+mFeb7ekbRqUVL/t6Xmd90qamtfbLWlm/lRzlqQLJfVIOjXPv1LS5bnO\n6ySNqnvOM/Mnmp9ImiTpAGAScHuueef8fH4saW6Dx0/O27slP5+ufPuhpBuB6ZJOz3U+JOmw/Nzv\nqaxjrqR3Svqn/NwWSdpfyXfzc14kqUPSCZV1ndDgd3hGfj6LJf1x3bxZkvbL04dKOkfSuLzuJZLm\nN3h+3ZLG5un5ufe+jaSr8z65RdIbJe1Zqf2SRq8vG4CI8K1NbsBXgM/Vtd0HjAPeCVyc2y4D9sjT\nhwO3A08BM3LbWcCH8/TJwP3AI6SefKPtzgPWACfm+3cBO+Tpz9bW2+Bx3wL2B7YFbs5tXcC5lWV+\nA+yYp2cB0/L05cAhpN7tNbltGukTyh8Ci0kdjk7Sm8qewHV5OfVSz6vbBt4M3AEI2A7ozu3dwEF5\n3c+Swng0sCzPv7KyH88hvVFV1/uG/HMyMLeyzrF5+lxgauX3eUxdjbfm36WApXndXfn5qm4buwB3\n5+kLgH2BscDC3PYAMDpPbwX8OXBhZVtbVda1LfBQ9fUB7AXMyW1vqf0OK48/GDg/T8/O+2rryjb/\nhdS56ATmN9gX8/O8k4HP5LZjgS8BJwF/XatzuP/2SrmNxtrJKlJwVe0CPE8K1n0k3QHsRDrsMjMi\n7gLuyodf7pF0TfXBETELmCXp3cBlkmYAFwEbIuLQvMxxSsfqH5B0FfAi8Mb8803AM73UeySwd56e\nKKmjwTJPxqZPBu8AHszTDwITgA3AskrbKaQQeDgiNgJPS9ohIn6Re3RXA89IOjMiNvRSF6T9+B5g\nUb5frW1FRGyUtDpvJyS9UplfrWcCKThrTpf0gTy9vsF2JwL7STqTFKJX1c0fFxE/B5D000p7T+R0\nA06Q9EnSJ7DxuW0OcGKu6cbc9g3gCkm/zdN/BNxdW2F+jkdIOoX0BvKOBrUeKKk736/fn/cA/6z0\n/cDEiFguaTxwcX69vIX0Ce2JymOqY4mosp33Svo0MAZYAlwCnClpLnAn8G/YoPmQS3v5d+DoWjBK\nOh64PwfX4aQe9jTSIZEj8kfZ3fNjXwb+u7oySeMlbZfvPg8QEY9GRFctzCX9QZ7/X8BLOUTvBWqh\n9QFSD/81JE0ClkTEtFzTl0i92VeA6sfwjZXpJ0m9TID3sikIJuefU/IyT5MOFW0lqRN4Mdd5YUR8\nihTOBzXYf9VtPwWsAN4fEV2k3mXNq6FTCdGq+npqz/nNwGGRvq84lU2BVd3u48AZeR/vRwquqjWS\nJkhSXU3V/fQ3wPtJvVnlOpeRetSfIH2igtR7/zQpxGcAj5E+9dTq3Yr0vciRwAdJv+Oqx/M6uvI+\nmladmffNPaQ3ix/l5k8At0b63qX2CajqBWA3SaNJb6i17VyQt3MQ8HVgfUScHhGfBL4if3fQEu6h\nt5GI+GXuTd0gKYDVwOclTQBerPVII2K9pN+TDsVcrnQ8eRRwVUS8nLICgN2B8yWtJ/2u6780BbhO\n0g6kj9LfzG2XAVdL+gzpj/fZBo/7GLCwcr8bmAtcC3xb0veBv6x7zHeAOZLOAFZGxGJJbwe2zp88\nxgLHR8RqSTeT3lg2kgLurfm5bgD+k9QzrPdI3bbnAXfnxzwC/G2DxzSyT34z/SUpfA7O7S8Av8o9\n2uqb3C3A9ZJ+QNqHl0o6O8/7MtBTWfbreR+tzs/jFVKvtWppvt1PeqOuuR04JCKez/dvym90o0nf\nH6wEpklamtf7cdIX4UtI++s136FExApJT0i6m7SfF5AOo1XNy3Xsle8vBK6S9Gek72jqXQR8n/Rm\nuia3zQZmS/qLfP88YKykk/P9O3NHwgZJjTsoZluGpOmkY66zhrsWSF+Kko6Xrxyi9Y+JiFdyj3QR\ncFxErGrysacBz0SETxG0htxDtz5JehPpFMqq0/JhgGEh6X3A2XXNh/ZxXL0d7CfpW2z6IrnZMD+T\ndKitZeePW3ncQzczK4S/iDAzK4QD3cysEFv0GPrOO+8cnZ2dW3KTZmYj3rJly56PiEbXebxGn4Ge\nT4lbDNROj5ofEd/IZwO8D/h1XnR6RCzf3Lo6Ozvp6enZ3CJmZlZHUm8X971GMz3035EuZX5Z0hhg\nqaTb87zTfQqVmVl76DPQ89VitYsbxuSbT40xM2szTX0pKmmUpOXAWmBBRNTGtvimpBWSzq9cQm5m\nZsOgqUCPiA0RMQnYDdhX0l7A3wPvJo3JsRNpZLnXkTRDabjQnnXr1rWobDMzq9ev0xYj4kXS5crT\nImJVJL8DvsemQZfqHzM7IqZExJSOjj6/pDUzswHqM9CVBsnfIU9vSxr/+PE8jCZ51LijSAMDmZnZ\nMGnmLJfxpBHyRpHeAK6PiFvzfx/pIA2fuRz4qyGs08zM+tDMWS4r2DQ+dLV96pBUZGZmA+JL/83M\nCjFihs/t7u5+dbqrq2vY6jAza1fuoZuZFcKBbmZWCAe6mVkhHOhmZoVwoJuZFcKBbmZWCAe6mVkh\nHOhmZoVwoJuZFcKBbmZWCAe6mVkhHOhmZoVwoJuZFcKBbmZWCAe6mVkhHOhmZoVwoJuZFcKBbmZW\nCAe6mVkhHOhmZoXoM9AlbSPpJ5IelvQzSWfn9rdJekDSk5Kuk7T10JdrZma9aaaH/jtgakT8CTAJ\nmCZpf+A7wPkR8Q7gBeCkoSvTzMz60megR/Jyvjsm3wKYCszP7XOAo4akQjMza0pTx9AljZK0HFgL\nLAB+AbwYEevzIs8Cu/by2BmSeiT1rFu3rhU1m5lZA00FekRsiIhJwG7AvsC7m91ARMyOiCkRMaWj\no2OAZZqZWV/6dZZLRLwILAIOAHaQNDrP2g14rsW1mZlZPzRzlkuHpB3y9LbAYcBjpGA/Ji92InDz\nUBVpZmZ9G933IowH5kgaRXoDuD4ibpX0KDBP0v8EfgpcPoR1mplZH/oM9IhYAUxu0P4U6Xi6mZm1\ngWZ66G2tu7v71emurq5hq8PMbLj50n8zs0I40M3MCuFANzMrhAPdzKwQDnQzs0I40M3MCuFANzMr\nhAPdzKwQDnQzs0I40M3MCuFANzMrhAPdzKwQDnQzs0I40M3MCuFANzMrhAPdzKwQDnQzs0I40M3M\nCuFANzMrhAPdzKwQfQa6pN0lLZL0qKSfSTolt58l6TlJy/PtQ0NfrpmZ9WZ0E8usB06LiIckbQ8s\nk7Qgzzs/Is4duvLMzKxZfQZ6RKwCVuXplyQ9Buw61IWZmVn/9OsYuqROYDLwQG46WdIKSVdI2rHF\ntZmZWT80HeiSxgI/AE6NiN8AFwN7ApNIPfjzenncDEk9knrWrVvXgpLNzKyRpgJd0hhSmM+NiBsA\nImJNRGyIiI3ApcC+jR4bEbMjYkpETOno6GhV3WZmVqeZs1wEXA48FhEzK+3jK4t9FFjZ+vLMzKxZ\nzZzlchBwAvCIpOW57QzgeEmTgACeBj43JBWamVlTmjnLZSmgBrNua305ZmY2UL5S1MysEA50M7NC\nONDNzArhQDczK4QD3cysEA50M7NCONDNzArhQDczK4QD3cysEA50M7NCONDNzArhQDczK4QD3cys\nEA50M7NCONDNzArhQDczK4QD3cysEA50M7NCONDNzArhQDczK4QD3cysEA50M7NC9BnoknaXtEjS\no5J+JumU3L6TpAWSnsg/dxz6cs3MrDfN9NDXA6dFxERgf+ALkiYCXwUWRsQEYGG+b2Zmw6TPQI+I\nVRHxUJ5+CXgM2BX4CDAnLzYHOGqoijQzs7716xi6pE5gMvAAMC4iVuVZq4FxvTxmhqQeST3r1q0b\nRKlmZrY5TQe6pLHAD4BTI+I31XkREUA0elxEzI6IKRExpaOjY1DFmplZ75oKdEljSGE+NyJuyM1r\nJI3P88cDa4emRDMza0YzZ7kIuBx4LCJmVmbdApyYp08Ebm59eWZm1qzRTSxzEHAC8Iik5bntDOAc\n4HpJJwHPAB8fmhLNzKwZfQZ6RCwF1MvsQ1tbjpmZDZSvFDUzK4QD3cysEA50M7NCONDNzArhQDcz\nK4QD3cysEA50M7NCONDNzArhQDczK4QD3cysEA50M7NCONDNzArhQDczK4QD3cysEA50M7NCONDN\nzArhQDczK4QD3cysEA50M7NCONDNzArhQDczK0SfgS7pCklrJa2stJ0l6TlJy/PtQ0NbppmZ9aWZ\nHvqVwLQG7edHxKR8u621ZZmZWX/1GegRsRj41RaoxczMBmEwx9BPlrQiH5LZsbeFJM2Q1COpZ926\ndYPYnJmZbc5AA/1iYE9gErAKOK+3BSNidkRMiYgpHR0dA9ycmZn1ZUCBHhFrImJDRGwELgX2bW1Z\nZmbWXwMKdEnjK3c/CqzsbVkzM9syRve1gKRrgS5gZ0nPAt8AuiRNAgJ4GvjcENZoZmZN6DPQI+L4\nBs2XD0EtZmY2CL5S1MysEA50M7NCONDNzArhQDczK4QD3cysEA50M7NCONDNzArhQDczK4QD3cys\nEA50M7NCONDNzArhQDczK4QD3cysEA50M7NCONDNzArhQDczK4QD3cysEA50M7NCONDNzArR5/8U\n/f9Jd3f3q9NdXV3DVoeZ2UC4h25mVggHuplZIfoMdElXSForaWWlbSdJCyQ9kX/uOLRlmplZX5rp\noV8JTKtr+yqwMCImAAvzfTMzG0Z9BnpELAZ+Vdf8EWBOnp4DHNXiuszMrJ8Gegx9XESsytOrgXG9\nLShphqQeST3r1q0b4ObMzKwvg/5SNCICiM3Mnx0RUyJiSkdHx2A3Z2ZmvRhooK+RNB4g/1zbupLM\nzGwgBhrotwAn5ukTgZtbU46ZmQ1UM6ctXgvcB7xL0rOSTgLOAQ6T9ATwgXzfzMyGUZ+X/kfE8b3M\nOrTFtZiZ2SD4SlEzs0I40M3MCuFANzMrhAPdzKwQDnQzs0I40M3MCuFANzMrhAPdzKwQDnQzs0I4\n0M3MCuFANzMrhAPdzKwQfQ7OVYLu7u5Xp7u6uoatDjOzoeQeuplZIRzoZmaFcKCbmRXCgW5mVggH\nuplZIRzoZmaFcKCbmRXCgW5mVohBXVgk6WngJWADsD4iprSiKDMz679WXCn6/oh4vgXrMTOzQfAh\nFzOzQgy2hx7AXZICuCQiZtcvIGkGMANgjz32GOTmhp/HhTGzdjXYHvrBEbE38EHgC5IOqV8gImZH\nxJSImNLR0THIzZmZWW8GFegR8Vz+uRa4Edi3FUWZmVn/DTjQJW0nafvaNHA4sLJVhZmZWf8M5hj6\nOOBGSbX1XBMRd7SkKjMz67cBB3pEPAX8SQtrMTOzQfBpi2ZmhXCgm5kVwoFuZlYIB7qZWSEc6GZm\nhXCgm5kVwoFuZlaIVgyfa3VaNYCXBwIzs/5wD93MrBAOdDOzQjjQzcwK4UA3MyuEA93MrBA+y8Vs\ngHwWkrUb99DNzArhQDczK4QD3cysEA50M7NCONDNzArhs1yGyWDOkGjHsytGyvg17bjvrEzV1xps\nmdebe+hmZoVwoJuZFWJQgS5pmqT/kPSkpK+2qigzM+u/AQe6pFHAd4EPAhOB4yVNbFVhZmbWP4Pp\noe8LPBkRT0XE74F5wEdaU5aZmfXXYM5y2RX4v5X7zwL71S8kaQYwI999WdJ/DHB7OwPPD/Cxw2Ek\n1TuSaoWRVe9IqhVGVr0jqVYYXL1vbWahIT9tMSJmA7MHux5JPRExpQUlbREjqd6RVCuMrHpHUq0w\nsuodSbXClql3MIdcngN2r9zfLbeZmdkwGEygPwhMkPQ2SVsDxwG3tKYsMzPrrwEfcomI9ZJOBu4E\nRgFXRMTPWlbZ6w36sM0WNpLqHUm1wsiqdyTVCiOr3pFUK2yBehURQ70NMzPbAnylqJlZIRzoZmaF\nGBGBPpKGGJD0tKRHJC2X1DPc9dSTdIWktZJWVtp2krRA0hP5547DWWNNL7WeJem5vH+XS/rQcNZY\nJWl3SYskPSrpZ5JOye1tt383U2tb7l9J20j6iaSHc71n5/a3SXogZ8N1+QSNdq31Skn/p7JvJ7V8\n4xHR1jfSF66/AN4ObA08DEwc7ro2U+/TwM7DXcdm6jsE2BtYWWn7X8BX8/RXge8Md52bqfUs4EvD\nXVsv9Y4H9s7T2wM/Jw2L0Xb7dzO1tuX+BQSMzdNjgAeA/YHrgeNy+78Cn2/jWq8EjhnKbY+EHrqH\nGGihiFgM/Kqu+SPAnDw9BzhqixbVi15qbVsRsSoiHsrTLwGPka6obrv9u5la21IkL+e7Y/ItgKnA\n/NzeLvu2t1qH3EgI9EZDDLTtC4/0i7tL0rI87MFIMC4iVuXp1cC44SymCSdLWpEPyQz74YtGJHUC\nk0m9s7bev3W1QpvuX0mjJC0H1gILSJ/cX4yI9XmRtsmG+lojorZvv5n37fmS/qDV2x0JgT7SHBwR\ne5NGofyCpEOGu6D+iPQ5sZ3PZb0Y2BOYBKwCzhvecl5P0ljgB8CpEfGb6rx2278Nam3b/RsRGyJi\nEumq9H2Bdw9zSb2qr1XSXsDfk2p+L7AT8JVWb3ckBPqIGmIgIp7LP9cCN5JeeO1ujaTxAPnn2mGu\np1cRsSb/sWwELqXN9q+kMaSAnBsRN+Tmtty/jWpt9/0LEBEvAouAA4AdJNUukGy7bKjUOi0f5oqI\n+B3wPYZg346EQB8xQwxI2k7S9rVp4HBg5eYf1RZuAU7M0ycCNw9jLZtVC8bso7TR/pUk4HLgsYiY\nWZnVdvu3t1rbdf9K6pC0Q57eFjiMdNx/EXBMXqxd9m2jWh+vvKmLdKy/5ft2RFwpmk+d+t9sGmLg\nm8NcUkOS3k7qlUMaVuGadqtV0rVAF2kozzXAN4CbSGcL7AE8A3w8Iob9y8heau0iHQ4I0hlFn6sc\nnx5Wkg4GlgCPABtz8xmkY9NttX83U+vxtOH+lfQ/SF96jiJ1RK+PiH/Mf3PzSIcwfgp8KveAh81m\nav0x0EE6C2Y58FeVL09bs+2REOhmZta3kXDIxczMmuBANzMrhAPdzKwQDnQzs0I40M3MCuFAbzOS\nuiQtkXS3pHnVS68lvUvSRklvrbT9naT782Muym1nSfqwpNGSrpK0WNK9kk5qsL1ZeVsPSjomt20n\n6QZJSyV9uY96vy1pYeV+p6TDW7Ev+muotp1/J+duZv70dhjlr1m110cL19cpaX7fS9pQc6C3EUk7\nARcAR0fE+0gXSVxYWeRjpBHlasG7PXAscEBE/CnwD3WrPAJYHRGHRMSBwA283hfztqYCX8ttnwVu\ni4iDgamSNjc+xoHAryXtku93ki6oqn9uW+K11nDbW8B00kigg7aF9pMVyi+e9nIkcGNErAOIiGuB\n/St/5F3A6aQrzyBdELIzsI8kRcQLdev7LTCp1qNvMJ88giXAG4BH8/SBwF15egHpEuvXkTQZeIh0\nYcfRufnzwLGSupXGAX9U0veAmZJ2k/Sj/IlhVl7HdEk3Sbotf8rYNbd/UdJ9+VPC3pLGSPphXm+3\npG0alFS/7el5nfdKmprX2y1pZv5Uc5akCyX1SDo1z79S0uW5zuskjap7zjPzJ5qfSJok6QDShTi3\n55p3zs/nx5LmNnj85Ly9W/Lz6cq3H0q6EZgu6fRc50OSDsvP/Z7KOuZKeqekf8rPbZGk/ZV8Nz/n\nRUpXLJ5QWdcJDX6HZ+Tns1jSH9fNmyVpvzx9qKRzJI3L614iaX6D59etND4MeX6n0vjgV+d9couk\nN0ras1L7JY1eXzYAW2qMYN+aGkf5K6Qr86pt95FG53sncHFuuwzYI08fDtwOPAXMyG1nAR/O0ycD\n95OuCDygl+3OI12JeWK+fxewQ57+bG29DR73LdI4z9sCN+e2LuDcyjK/AXbM07NIY1pAuuz8EFLv\n9prcNo30CeUPgcWkDkcn6U1lT+C6vJx6qefVbQNvBu4gXZW3HdCd27uBg/K6nyWF8WhgWZ5/ZWU/\nnkN6o6qu9w3552TSGCi1ddbGvz4XmFr5fR5TV+Ot+XcpYGled1d+vqrbxi7A3Xn6AtLYH2OBhbnt\nAWB0nt4K+HPgwsq2tqqsa1vgoerrA9gLmJPb3lL7HVYefzBwfp6enffV1pVt/gupc9EJzG+wL+bn\neScDn8ltxwJfAk4C/rpW53D/7ZVyqw1qY+1hFSm4qnYBnicF6z6S7iBd5nwMMDMi7iIN17s9cI+k\na6oPjohZwCxJ7wYuUxrS9yJgQ0Qcmpc5TulY/QOSrgJeBN6Yf76JdLl6I0eS/gEFwERJHQ2WeTI2\nfTJ4B2lsHvLPCcAGYFml7RRSCDwcaYCopyXtEBG/yD26q4FnJJ0ZERt6qQvSfnwPaawPSJdc16yI\niI2SVufthKRXKvOr9Uxg07CyAKdL+kCeXs/rTQT2k3QmKUSvqps/LiJ+DiDpp5X2nsjpBpwg6ZOk\nT2C1sVXmkMYqeZBNw0t8A7hC0m/z9B8Bd9dWmJ/jEUr/jUik/V9f64GSuvP9+v15D/DPSt8PTIyI\n5UrjkVycXy9vIX1Ce6LymOql56ps572SPk0aG3wJcAlwpqS5wJ3Av2GD5kMu7eXfgaNrwSjpeOD+\nHFyHk3rY00iHRI7IH2VrI1G+DPx3dWWSxisNEgbpTYGIeDQiumphrk1jMv8X8FIO0XuBWmh9gNTD\nfw2lf5+1JCKm5Zq+ROrNvkIaw6JmY2X6STaNMPdeNgXB5PxzSl7madKhoq2Uxup+Mdd5YUR8ihTO\nBzXYf9VtPwWsAN4fEV2k3mXNq6FTCdGq+npqz/nNwGGRvq84lU2BVd3u48AZeR/vRwquqjWSJkhS\nXU3V/fQ3wPtJvVnlOpeRetSfIH2igtR7/zQpxGeQBqt6dbhmpUN1XyO98X6Q9Duuejyvoyvvo2nV\nmXnf3EN6s/hRbv4EcGuk711qn4CqXgB2UxoB8T2V7VyQt3MQ8HVgfUScHhGfBL4if3fQEu6ht5GI\n+GXuTd0gKUj/DOHzkiaQBvLfkJdbL+n3pEMxlysdTx4FXBURL6esANKww+dLWk/6Xdd/aQpwndLI\ncFsDtYHELgOulvQZ0h/vsw0e9zFgYeV+NzAXuBb4tqTvA39Z95jvAHMknUH6t3KLlQZX2jp/8hgL\nHB8RqyXdTHpj2UgKuLfm57oB+E9Sz7DeI3XbngfcnR/zCPC3DR7TyD75zfSXpPA5OLe/APwq92ir\nb3K3ANdL+gFpH16q/H8kgS8D1f8t+/W8j1bn5/EKqddatTTf7ie9UdfcDhwSEc/n+zflN7rRpO8P\nVgLTJC3N6/046YvwJaT99ZrvUCJihdL/Ob2btJ8XkA6jVc3LdeyV7y8ErpL0Z6TvaOpdBHyf9Ga6\nJrfNBmZL+ot8/zxgrKST8/07c0fCBsmDc9mwkjSddMx11nDXAulLUdLx8iEZNlbSmIh4JfdIF5H+\nH2ZToxlKOg14JiJ8iqA15B669UnSm3j9ONOn5cMAw0LS+4Cz65oP7eO4ejvYT9K32PRFcrNhfibp\nUFvLzh+38riHbmZWCH8RYWZWCAe6mVkhHOhmZoVwoJuZFcKBbmZWiP8HsYol0j7GGXEAAAAASUVO\nRK5CYII=\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ + "%matplotlib inline\n", "from mindboggle.mio.plots import histograms_of_lists\n", - "columns = [histogram_values]\n", - "column_name = 'OASIS-30_Atropos_template grayscale values'\n", - "ignore_columns = []\n", - "nbins = 100\n", - "axis_limits = []\n", - "titles = ['OASIS-30_Atropos_template grayscale values']\n", - "histograms_of_lists(columns, column_name, ignore_columns, nbins, axis_limits, titles)" + "histograms_of_lists(columns=[depths],\n", + " column_name='Depth values',\n", + " ignore_columns=[],\n", + " nbins=100,\n", + " axis_limits=[],\n", + " titles='depth values')" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ - "\n", - "### Example 2: Compute statistical measures across vertices per label.\n", - "\n", - "**[stats_per_label](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/compute.py#L716)** (values, labels, include_labels=[], exclude_labels=[], weights=[], precision=1)\n", - "\n", - " When computing statistical measures across vertices per label, \n", - " you can optionally use weights (such as surface area per vertex),\n", - " to compute, for example, a surface area-weighted mean:\n", - "\n", - " average value = sum(a_i * v_i) / total_surface_area,\n", - " where *a_i* and *v_i* are the area and value for each vertex *i*.\n", - "\n", - " Reference:\n", - " Weighted skewness and kurtosis unbiased by sample size\n", - " Lorenzo Rimoldini, arXiv:1304.6564 (2013)\n", - " http://arxiv.org/abs/1304.6564\n", - "\n", - " Parameters\n", - " ----------\n", - " values : numpy array of individual or lists of integers or floats\n", - " values for all vertices\n", - " labels : list or array of integers\n", - " label for each value\n", - " include_labels : list of integers\n", - " labels to include\n", - " exclude_labels : list of integers\n", - " labels to be excluded\n", - " weights : numpy array of floats\n", - " weights to compute weighted statistical measures\n", - " precision : integer\n", - " number of decimal places to consider weights\n", - "\n", - " Returns\n", - " -------\n", - " medians : list of floats\n", - " median for each label\n", - " mads : list of floats\n", - " median absolute deviation for each label\n", - " means : list of floats\n", - " mean for each label\n", - " sdevs : list of floats\n", - " standard deviation for each label\n", - " skews : list of floats\n", - " skew for each label\n", - " kurts : list of floats\n", - " kurtosis value for each label\n", - " lower_quarts : list of floats\n", - " lower quartile for each label\n", - " upper_quarts : list of floats\n", - " upper quartile for each label\n", - " label_list : list of integers\n", - " list of unique labels" + "Find a depth threshold to extract folds from the surface:" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, - "outputs": [ - { - "ename": "SyntaxError", - "evalue": "invalid syntax (, line 20)", - "traceback": [ - "\u001b[0;36m File \u001b[0;32m\"\"\u001b[0;36m, line \u001b[0;32m20\u001b[0m\n\u001b[0;31m [np.float(\"{0}:.{1}f {2}:.{1}f {3}:.{1}f {4}:.{1}f\".format(medians[i], 5, mads[i], means[i], kurts[i]) for i in range(len(mads))]\u001b[0m\n\u001b[0m ^\u001b[0m\n\u001b[0;31mSyntaxError\u001b[0m\u001b[0;31m:\u001b[0m invalid syntax\n" - ], - "output_type": "error" - } - ], + "outputs": [], "source": [ - "from IPython.core.interactiveshell import InteractiveShell\n", - "InteractiveShell.ast_node_interactivity = \"all\"\n", - "\n", - "import numpy as np\n", - "from mindboggle.mio.vtks import read_scalars\n", - "from mindboggle.guts.compute import stats_per_label\n", - "from mindboggle.mio.fetch_data import prep_tests\n", - "urls, fetch_data = prep_tests()\n", - "values_file = fetch_data(urls['left_mean_curvature'], '', '.vtk')\n", - "labels_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')\n", - "area_file = fetch_data(urls['left_area'], '', '.vtk')\n", - "values, name = read_scalars(values_file, True, True)\n", - "areas, name = read_scalars(area_file, True, True)\n", - "labels, name = read_scalars(labels_file)\n", - "include_labels = []\n", - "exclude_labels = [-1]\n", - "weights = areas\n", - "precision = 1\n", - "medians, mads, means, sdevs, skews, kurts, lower_quarts, upper_quarts, label_list = stats_per_label(values, labels, include_labels, exclude_labels, weights, precision)\n", - "[np.float(\"{0}:.{1}f {2}:.{1}f {3}:.{1}f {4}:.{1}f\".format(medians[i], 5, mads[i], means[i], kurts[i]) for i in range(len(mads))]" + "from mindboggle.features.folds import find_depth_threshold\n", + "depth_threshold, bins, bin_edges = find_depth_threshold(depth_file=depth_file,\n", + " min_vertices=10000,\n", + " verbose=True)\n", + "depth_threshold" ] }, { "cell_type": "markdown", - "metadata": { - "collapsed": true, - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ - "\n", - "### Example 3: Decimate a left postcentral pial surface.\n", - "\n", - "**[stats_per_label](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/mesh.py#L750)** (points, faces, reduction=0.75, smooth_steps=25, scalars=[], save_vtk=False, output_vtk='')\n", - "\n", - " Decimate vtk triangular mesh with vtk.vtkDecimatePro.\n", - "\n", - " Parameters\n", - " ----------\n", - " points : list of lists of floats\n", - " each element is a list of 3-D coordinates of a vertex on a surface mesh\n", - " faces : list of lists of integers\n", - " each element is list of 3 indices of vertices that form a face\n", - " on a surface mesh\n", - " reduction : float\n", - " fraction of mesh faces to remove\n", - " smooth_steps : integer\n", - " number of smoothing steps\n", - " scalars : list of integers or floats\n", - " optional scalars for output VTK file\n", - " save_vtk : bool\n", - " output decimated vtk file?\n", - " output_vtk : string\n", - " output decimated vtk file name\n", - "\n", - " Returns\n", - " -------\n", - " points : list of lists of floats\n", - " decimated points\n", - " faces : list of lists of integers\n", - " decimated faces\n", - " scalars : list of integers or floats\n", - " scalars for output VTK file\n", - " output_vtk : string\n", - " output decimated vtk file" + "Extract folds with the depth threshold:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, - "outputs": [ - { - "data": { - "text/plain": [ - "(290134, 145066)" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ - "from mindboggle.guts.mesh import decimate\n", - "from mindboggle.mio.vtks import read_vtk\n", - "from mindboggle.mio.fetch_data import prep_tests\n", - "urls, fetch_data = prep_tests()\n", - "input_vtk = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')\n", - "points, f1, f2, faces, scalars, f3, f4, f5 = read_vtk(input_vtk)\n", - "reduction = 0.5\n", - "smooth_steps = 25\n", - "save_vtk = True\n", - "output_vtk = 'decimate.vtk'\n", - "points2, faces2, scalars, output_vtk = decimate(points, faces, reduction, smooth_steps, scalars, save_vtk, output_vtk)\n", - "(len(points), len(points2))\n", - "(len(faces), len(faces2))" + "from mindboggle.features.folds import extract_folds\n", + "folds, n_folds, folds_file = extract_folds(depth_file=depth_file,\n", + " depth_threshold=depth_threshold,\n", + " min_fold_size=50,\n", + " save_file=True,\n", + " output_file='folds.vtk',\n", + " background_value=-1,\n", + " verbose=True)" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ - "View the decimated surface:" + "Remove all vertices but the folds:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ - "from mindboggle.mio.plots import plot_surfaces\n", - "plot_surfaces('decimate.vtk')" + "from mindboggle.mio.vtks import rewrite_scalars\n", + "rewrite_scalars(input_vtk=folds_file,\n", + " output_vtk='rewrite_scalars.vtk',\n", + " new_scalars=[folds],\n", + " new_scalar_names=['folds'],\n", + " filter_scalars=folds,\n", + " background_value=-1)" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ - "\n", - "### Example 4: Relabel a VTK surface.\n", - "\n", - "**[relabel_surface](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/relabel.py#L300)** (vtk_file, hemi='', old_labels=[], new_labels=[], erase_remaining=True, erase_labels=[], erase_value=-1, output_file='')\n", - "\n", - " Parameters\n", - " ----------\n", - " vtk_file : string\n", - " input labeled VTK file\n", - " hemi : string\n", - " hemisphere ('lh' or 'rh' or '')\n", - " if set, add 1000 to left and 2000 to right hemisphere labels;\n", - " old_labels : list of integers\n", - " old labels (empty list if labels drawn from vtk scalars);\n", - " may be used in conjunction with hemi\n", - " new_labels : list of integers\n", - " new labels (empty list if labels drawn from vtk scalars);\n", - " may be used in conjunction with hemi\n", - " erase_remaining : bool\n", - " set all values not in old_labels to erase_value?\n", - " erase_labels : list of integers\n", - " values to erase (set to erase_value)\n", - " erase_value : integer\n", - " set vertices with labels in erase_labels to this value\n", - " output_file : string\n", - " new vtk file name\n", - "\n", - " Returns\n", - " -------\n", - " output_file : string\n", - " new vtk file name" + "Plot the folds in 3-D: UNDER CONSTRUCTION" ] }, { @@ -659,136 +452,82 @@ "execution_count": null, "metadata": { "collapsed": true, - "deletable": true, - "editable": true + "scrolled": false }, "outputs": [], "source": [ - "import numpy as np\n", - "from mindboggle.guts.relabel import relabel_surface\n", - "from mindboggle.mio.vtks import read_scalars\n", - "from mindboggle.mio.fetch_data import prep_tests\n", - "urls, fetch_data = prep_tests()\n", - "vtk_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')\n", - "hemi = 'lh'\n", - "old_labels = [1003,1009,1030]\n", - "new_labels = [0,500,1000]\n", - "erase_remaining = True\n", - "erase_labels = [0]\n", - "erase_value = -1\n", - "output_file = 'relabel_surface.vtk'\n", - "output_file = relabel_surface(vtk_file, hemi, old_labels, new_labels,\n", - " ... erase_remaining, erase_labels, erase_value, output_file)\n", - "labels, name = read_scalars(output_file, True, True)\n", - "np.unique(labels)" + "folds_array = np.asarray(folds)\n", + "nilearn.plotting.plot_surf(surf_mesh=fs_surf_mesh, surf_map=folds_array,\n", + " bg_map=None, hemi='left', view='lateral',\n", + " cmap=None, colorbar=True,\n", + " avg_method='mean', threshold=None,\n", + " alpha='auto', bg_on_data=False, darkness=1,\n", + " vmin=None, vmax=None, cbar_vmin=None, cbar_vmax=None,\n", + " title=None, output_file=None, axes=None, figure=None)" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ - "View relabeled surface file:" + "### Extract sulci from folds" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Load a FreeSurfer .annot file and save as a VTK format file:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ - "from mindboggle.mio.plots import plot_surfaces\n", - "plot_surfaces(output_file)" + "from mindboggle.mio.vtks import freesurfer_annot_to_vtk\n", + "labels, label_file = freesurfer_annot_to_vtk(annot_file=subject_path + 'label/lh.aparc.annot',\n", + " vtk_file=surface_file,\n", + " output_vtk='lh.aparc.annot.vtk',\n", + " background_value=-1)" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ - "\n", - "### Example 5: Segment deep vertices of surface into contiguous regions.\n", - "\n", - "**[segment_regions](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/segment.py#L181)** (vertices_to_segment, neighbor_lists, min_region_size=1, seed_lists=[], keep_seeding=False, spread_within_labels=False, labels=[], label_lists=[], values=[], max_steps='', background_value=-1, verbose=False)\n", - "\n", - " Parameters\n", - " ----------\n", - " vertices_to_segment : list of integers\n", - " indices to mesh vertices to be segmented\n", - " neighbor_lists : list of lists of integers\n", - " each list contains indices to neighboring vertices for each vertex\n", - " min_region_size : integer\n", - " minimum size of segmented set of vertices\n", - " seed_lists : list of lists, or empty list\n", - " each list contains indices to seed vertices to segment vertices_to_segment\n", - " keep_seeding : bool\n", - " grow from new seeds even after all seed lists have fully grown\n", - " spread_within_labels : bool\n", - " grow seeds only by vertices with labels in the seed labels?\n", - " labels : list of integers (required only if spread_within_labels)\n", - " label numbers for all vertices\n", - " label_lists : list of lists of integers (required only if spread_within_labels)\n", - " List of unique labels for each seed list to grow into\n", - " (If empty, set to unique labels for each seed list)\n", - " values : list of floats (default empty)\n", - " values for all vertices for use in preferentially directed segmentation\n", - " (segment in direction of lower values)\n", - " max_steps : integer (or empty string for infinity)\n", - " maximum number of segmentation steps to take for each seed list\n", - " background_value : integer or float\n", - " background value\n", - " verbose : bool\n", - " print statements?\n", - "\n", - " Returns\n", - " -------\n", - " segments : numpy array of integers\n", - " region numbers for all vertices" + "Relabel surface labels to match expected volume labels:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ - "# Example without seed lists:\n", - "import numpy as np\n", - "from mindboggle.guts.segment import segment_regions\n", - "from mindboggle.mio.vtks import read_vtk\n", - "from mindboggle.guts.mesh import find_neighbors\n", - "from mindboggle.mio.fetch_data import prep_tests\n", - "background_value = -1\n", - "urls, fetch_data = prep_tests()\n", - "depth_file = fetch_data(urls['left_travel_depth'], '', '.vtk')\n", - "f1,f2,f3, faces, depths, f4, npoints, t5 = read_vtk(depth_file, True, True)\n", - "vertices_to_segment = np.where(depths > 0.50)[0].tolist() # (sped up)\n", - "neighbor_lists = find_neighbors(faces, npoints)\n", - "segments = segment_regions(vertices_to_segment, neighbor_lists)\n", - "len_segments = [len(np.where(segments == x)[0]) for x in np.unique(segments) if x != background_value]\n", - "len_segments[0:10]" + "from mindboggle.guts.relabel import relabel_surface\n", + "from mindboggle.mio.labels import DKTprotocol\n", + "dkt = DKTprotocol()\n", + "relabel_file = relabel_surface(vtk_file=label_file,\n", + " hemi='lh', \n", + " old_labels=dkt.DKT31_numbers, \n", + " new_labels=[],\n", + " erase_remaining=True, \n", + " erase_labels=[0], \n", + " erase_value=-1, \n", + " output_file='relabeled.vtk')" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ - "Write results to vtk file and view:" + "Extract sulci from folds using pairs of labels in the DKT labeling protocol (SLOW):" ] }, { @@ -796,55 +535,28 @@ "execution_count": null, "metadata": { "collapsed": true, - "deletable": true, - "editable": true + "scrolled": true }, "outputs": [], "source": [ - "from mindboggle.mio.plots import plot_surfaces\n", - "from mindboggle.mio.vtks import rewrite_scalars\n", - "rewrite_scalars(depth_file, 'segment_regions_no_seeds.vtk', segments, 'segments', [], -1)\n", - "plot_surfaces('segment_regions_no_seeds.vtk')" + "from mindboggle.features.sulci import extract_sulci\n", + "sulci, n_sulci, sulci_file = extract_sulci(labels_file=relabel_file,\n", + " folds_or_file=folds,\n", + " hemi='lh',\n", + " min_boundary=10,\n", + " sulcus_names=[],\n", + " save_file=True,\n", + " output_file='sulci.vtk',\n", + " background_value=-1,\n", + " verbose=True)\n", + "n_sulci" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ - "\n", - "### Example 6: Extract sulcus label borders.\n", - "\n", - "**[extract_borders](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/segment.py#L1445)** \n", - " (indices, labels, neighbor_lists,\n", - " ignore_values=[], return_label_pairs=False)\n", - "\n", - " Label borders are the set of all vertices\n", - " whose neighbors do not share the same label.\n", - "\n", - " Parameters\n", - " ----------\n", - " indices : list of integers\n", - " indices to (a subset of) vertices\n", - " labels : numpy array of integers\n", - " label numbers for all vertices\n", - " neighbor_lists : list of lists of integers\n", - " each list contains indices to neighboring vertices for each vertex\n", - " ignore_values : list of integers\n", - " integers to ignore (e.g., background)\n", - " return_label_pairs : bool\n", - " return label pairs?\n", - "\n", - " Returns\n", - " -------\n", - " border_indices : list of integers\n", - " indices to label boundary vertices\n", - " border_label_tuples : list of lists of sorted pairs of integers\n", - " sorted label pairs\n", - " unique_border_label_tuples : list of sorted pairs of integers\n", - " unique, sorted label pairs" + "Plot the sulci in 3-D: UNDER CONSTRUCTION" ] }, { @@ -852,78 +564,66 @@ "execution_count": null, "metadata": { "collapsed": true, - "deletable": true, - "editable": true + "scrolled": false }, "outputs": [], "source": [ - "from mindboggle.guts.segment import extract_borders\n", - "import numpy as np\n", - "from mindboggle.guts.mesh import find_neighbors\n", - "from mindboggle.guts.segment import extract_borders\n", - "from mindboggle.mio.vtks import read_vtk\n", - "from mindboggle.mio.fetch_data import prep_tests\n", - "urls, fetch_data = prep_tests()\n", - "label_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')\n", - "f1,f2,f3, faces, labels, f4, npoints, f5 = read_vtk(label_file, True, True)\n", - "neighbor_lists = find_neighbors(faces, npoints)\n", - "ignore_values = []\n", - "return_label_pairs = True\n", - "indices_borders, label_pairs, f1 = extract_borders(list(range(npoints)), labels, neighbor_lists, ignore_values, return_label_pairs)\n", - "indices_borders[0:10]\n", - "label_pairs[0:5]" + "sulci_array = np.asarray(sulci)\n", + "nilearn.plotting.plot_surf(surf_mesh=fs_surf_mesh, surf_map=sulci_array,\n", + " bg_map=None, hemi='left', view='lateral',\n", + " cmap=None, colorbar=True,\n", + " avg_method='mean', threshold=None,\n", + " alpha='auto', bg_on_data=False, darkness=1,\n", + " vmin=None, vmax=None, cbar_vmin=None, cbar_vmax=None,\n", + " title=None, output_file=None, axes=None, figure=None)" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ - "Write borders on surfaces to vtk file and view:" + "### Compute statistics on sulcus depth values" ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ - "from mindboggle.mio.plots import plot_surfaces\n", - "from mindboggle.mio.vtks import rewrite_scalars\n", - "IDs = -1 * np.ones(npoints)\n", - "IDs[indices_borders] = 1\n", - "rewrite_scalars(label_file, 'extract_borders.vtk', IDs, 'borders')\n", - "plot_surfaces('extract_borders.vtk')" + "from mindboggle.mio.tables import write_shape_stats\n", + "label_table, sulcus_table, fundus_table = write_shape_stats(labels_or_file=[], \n", + " sulci=sulci, fundi=[], affine_transform_files=[], inverse_booleans=[], \n", + " transform_format='itk', area_file='', normalize_by_area=False, \n", + " mean_curvature_file='',\n", + " travel_depth_file=depth_file, geodesic_depth_file='',\n", + " freesurfer_thickness_file='', freesurfer_curvature_file='',\n", + " freesurfer_sulc_file='',\n", + " labels_spectra=[], labels_spectra_IDs=[],\n", + " sulci_spectra=[], sulci_spectra_IDs=[],\n", + " labels_zernike=[], labels_zernike_IDs=[],\n", + " sulci_zernike=[], sulci_zernike_IDs=[],\n", + " exclude_labels=[-1], verbose=True)" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ - "Write just the borders to vtk file and view:" + "Show statistical summary table of sulcus depth values:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ - "rewrite_scalars(label_file, 'extract_borders_no_background.vtk', IDs, 'borders', IDs)\n", - "plot_surfaces('extract_borders_no_background.vtk')" + "pd.read_csv(sulcus_table)" ] } ], @@ -938,16 +638,16 @@ "language_info": { "codemirror_mode": { "name": "ipython", - "version": 3.0 + "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.5.2" + "version": "3.6.3" } }, "nbformat": 4, - "nbformat_minor": 0 -} \ No newline at end of file + "nbformat_minor": 1 +} diff --git a/docs/mindboggle_tutorial_MIT_workshop.ipynb b/docs/mindboggle_tutorial_MIT_workshop.ipynb deleted file mode 100644 index 58760f194..000000000 --- a/docs/mindboggle_tutorial_MIT_workshop.ipynb +++ /dev/null @@ -1,1579 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "## Update the Docker file\n", - "\n", - "Create a new file called Docker.mindboggle.new\n", - "\n", - "```\n", - "FROM nipype/workshops:latest-complete\n", - "WORKDIR /home/jovyan/work\n", - "RUN conda install -y flask\n", - "RUN git clone https://github.com/nipy/mindboggle && cd mindboggle && python setup.py install\n", - "RUN git clone https://github.com/akeshavan/roygbiv && cd roygbiv && python setup.py install\n", - "RUN git clone https://github.com/akeshavan/nbpapaya && cd nbpapaya && git submodule update --init --recursive && python setup.py install\n", - "RUN mkdir data && cd data && curl -sSL https://osf.io/svxht/?action=download | tar xzf - -C /data --strip-components=2\n", - "\n", - "```\n", - "\n", - "### Build the docker\n", - "\n", - "```\n", - "docker build -t nipypeshop_plus -f Docker.mindboggle.new .\n", - "```\n", - "\n", - "### Start the docker\n", - "\n", - "```\n", - "cd /path/to/workshop/\n", - "docker run -ti -v $PWD:/home/jovyan/work -p 9876:8888 -p 5000:5000 nipypeshop_plus bash\n", - "```\n", - "\n", - "## Datalad\n", - "\n", - "If you don't have the raw data downloaded, and want to run mindboggle, do the following:\n", - "\n", - "```\n", - "cd /home/jovyan/work/data/ds000114\n", - "git remote add datasets http://datasets.datalad.org/workshops/nipype-2017/ds000114/.git\n", - "datalad get . \n", - "```\n", - "\n", - "## Try running the roygbiv webapp\n", - "\n", - "```\n", - "cd ~/work\n", - "roygbiv $PWD/data/mindboggled/sub-02\n", - "\n", - "```\n", - "\n", - "on your **chrome** browser, navigate to `localhost:5000`\n", - "\n", - "## View this notebook in a browser\n", - "\n", - "### Clone mindboggle directory\n", - "\n", - "```\n", - "cd ~/work\n", - "git clone https://github.com/nipy/mindboggle\n", - "```\n", - "\n", - "### Change directory to mindboggle/docs and start the notebook\n", - "\n", - "```\n", - "cd mindboggle/docs\n", - "jupyter notebook\n", - "```\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "








" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "\n", - "\n", - "This [Mindboggle](http://mindboggle.info) tutorial is a jupyter notebook that runs in a docker container:\n", - "
\n", - "\n", - "  ``$ docker run -it --rm -p 8888:8888 -p 5000:5000 -v /path/to/workshops:/home/jovyan/work nipype/workshops:latest-complete bash``
\n", - "  ``$ git clone https://github.com/nipy/mindboggle.git; cd mindboggle; python setup.py install; cd ..;``
\n", - "  ``$ jupyter notebook``\n", - "
\n", - "<-->\n", - "  -- Arno Klein and Anisha Keshavan (please refer to the [Mindboggle reference](http://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1005350#sec007))\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "# Mindboggle tutorial\n", - "\n", - "1. [Mindboggle processing steps](#processing)\n", - "2. [Run \"mindboggle --help\" on the command line](#help)\n", - "3. [Mindboggle on the command line](#command)\n", - "4. [Mindboggle Python library](#library)\n", - "5. [Run individual functions](#functions)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "\n", - "\n", - "# Mindboggle processing steps\n", - "\n", - "\n", - "\n", - "## Output\n", - "Mindboggle takes in (FreeSurfer- and optionally ANTs-) preprocessed T1-weighted MRI data, and outputs nifti volumes, vtk surfaces, and csv tables containing label, feature, and shape information for further analysis:\n", - "\n", - "- **labels/**: *integer-labeled vtk surfaces and nifti volumes*\n", - "- **features/**: *integer-labeled sulci or fundi on vtk surfaces*\n", - "- **shapes/**: *float shape value for every point on vtk surfaces*\n", - "- **tables/**: *csv tables of shape values for every label/feature/vertex*\n", - "\n", - "







" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "
\n", - "## Processing steps\n", - "\n", - "### 1. Combine FreeSurfer and ANTs gray/white segmentations:\n", - "
\n", - "\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "
\n", - "### 2. Fill hybrid segmentation with (FreeSurfer- or ANTs-registered) labels.\n", - "### 3. Compute volume shape measures for each labeled region:\n", - "        \n", - "volume and \"thickinthehead\" (cortical thickness)\n", - "
\n", - "\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "
\n", - "### 4. Compute surface shape measures for every cortical mesh vertex\n", - "        \n", - "surface area\n", - "
\n", - "\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "
\n", - "        \n", - "geodesic depth and travel depth\n", - "
\n", - "\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "
\n", - "        \n", - "mean curvature\n", - "
\n", - "\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "
\n", - "### 5. Extract cortical surface features:\n", - "        \n", - "folds (left and upper right, with manual labels) \n", - "
\n", - "        \n", - "sulci (lower right)\n", - "
\n", - "\n", - "\n", - "        \n", - "fundi (right)\n", - "\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "
\n", - "### 6. For each cortical surface label/sulcus, compute:\n", - "        \n", - "Zernike moments\n", - "
\n", - "        \n", - "Laplace-Beltrami spectrum (2nd, 3rd, and 9th spectral components shown for two brains):\n", - "
\n", - "
\n", - " " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "### 7. Compute statistics for each shape measure across vertices for each label/feature:\n", - " - median\n", - " - median absolute deviation\n", - " - mean\n", - " - standard deviation\n", - " - skew\n", - " - kurtosis\n", - " - lower and upper quartiles\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "\n", - "\n", - "# Run \"mindboggle --help\" on the command line\n", - "\n", - "First, let's see what command-line options Mindboggle provides:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true, - "scrolled": false - }, - "outputs": [], - "source": [ - "! mindboggle --help" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "\n", - "\n", - "# Mindboggle on the command line\n", - "\n", - "In the following command, computes shape measures for cortical surface labels and sulci Mindboggle takes FreeSurfer (but not ANTs) data.\n", - "\n", - "mindboggle /home/jovyan/work/data/ds000114/derivatives/freesurfer/sub-01 \\\n", - "
    \n", - " --working /home/jovyan/work/data/mindboggling \\\n", - "
    \n", - " --out /home/jovyan/work/data/mindboggled \\\n", - "
    \n", - " --roygbiv\n", - "


" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "\n", - "\n", - "# Mindboggle Python library\n", - "\n", - "Rather than call Mindboggle from the command line, we can also call individual Python functions within the Mindboggle library, which includes the following files in mindboggle/mindboggle/:\n", - "\n", - "- **mio**/ *-- input/output functions*\n", - " - **[colors](https://github.com/nipy/mindboggle/blob/master/mindboggle/mio/colors.py)**.py *-- colormap-related functions*\n", - " - **[convert_volumes](https://github.com/nipy/mindboggle/blob/master/mindboggle/mio/convert_volumes.py)**.py *-- read/write nifti volume files*\n", - " - **[fetch_data](https://github.com/nipy/mindboggle/blob/master/mindboggle/mio/fetch_data.py)**.py *-- fetch data from a URL or from third party software*\n", - " - **[labels](https://github.com/nipy/mindboggle/blob/master/mindboggle/mio/labels.py)**.py *-- information about labeling protocols*\n", - " - **[plots](https://github.com/nipy/mindboggle/blob/master/mindboggle/mio/plots.py)**.py *-- plot functions*\n", - " - **[tables](https://github.com/nipy/mindboggle/blob/master/mindboggle/mio/tables.py)**.py *-- read/write tables*\n", - " - **[vtks](https://github.com/nipy/mindboggle/blob/master/mindboggle/mio/vtks.py)**.py *-- read/write VTK surface files*\n", - "- **guts**/ *-- the \"guts\" underlying feature extraction and labeling code*\n", - " - **[compute](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/compute.py)**.py *-- compute distances, etc.*\n", - " - **[graph](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/graph.py)**.py *-- graph operations*\n", - " - **[kernels](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/kernels.py)**.py *-- kernels for graph operations*\n", - " - **[mesh](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/mesh.py)**.py *-- operate on surface mesh vertices*\n", - " - **[paths](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/paths.py)**.py *-- connect surface mesh vertices*\n", - " - **[rebound](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/rebound.py)**.py *-- adjust label borders on a surface mesh*\n", - " - **[relabel](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/relabel.py)**.py *-- relabel surface or volume files*\n", - " - **[segment](https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/segment.py)**.py *-- segment a surface mesh*\n", - "- **shapes**/ *-- shape measurement functions\n", - " - **[surface_shapes](https://github.com/nipy/mindboggle/blob/master/mindboggle/shapes/surface_shapes.py)**.py *-- compute surface shapes (calls C++ library below)*\n", - " - **[volume_shapes](https://github.com/nipy/mindboggle/blob/master/mindboggle/shapes/volume_shapes.py)**.py *-- compute volumes and thicknesses*\n", - " - **[laplace_beltrami](https://github.com/nipy/mindboggle/blob/master/mindboggle/shapes/laplace_beltrami.py)**.py *-- compute a Laplace-Beltrami spectrum*\n", - " - **[zernike/zernike](https://github.com/nipy/mindboggle/blob/master/mindboggle/shapes/zernike/zernike.py)**.py *-- compute Zernike moments of a collection of vertices*\n", - " - **[likelihood](https://github.com/nipy/mindboggle/blob/master/mindboggle/shapes/likelihood.py)**.py *-- compute (fundus) likelihood values*\n", - "- **features**/ *-- higher-level feature extraction (folds, fundi, sulci)*\n", - " - **[folds](https://github.com/nipy/mindboggle/blob/master/mindboggle/features/folds.py)**.py *-- extract surface folds*\n", - " - **[fundi](https://github.com/nipy/mindboggle/blob/master/mindboggle/features/fundi.py)**.py *-- extract fundus curves from folds*\n", - " - **[sulci](https://github.com/nipy/mindboggle/blob/master/mindboggle/features/sulci.py)**.py *-- extract sulci from folds*\n", - "\n", - "\n", - "
\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "collapsed": true, - "deletable": true, - "editable": true - }, - "source": [ - "\n", - "\n", - "# Run individual functions\n", - "\n", - "Let's run some functions within Mindboggle's Python library. The following examples are adapted from the above files' docstrings.\n", - "\n", - "## Example 1: Measure depth in a superior frontal gyrus\n", - "### Measure travel depth for every vertex of a brain's left hemisphere\n", - "Convert a FreeSurfer surface file to VTK format:" - ] - }, - { - "cell_type": "code", - "execution_count": 104, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, - "outputs": [], - "source": [ - "from mindboggle.mio.vtks import freesurfer_surface_to_vtk\n", - "subject_path = '/home/jovyan/work/data/ds000114/derivatives/freesurfer/sub-01/'\n", - "surface_file = freesurfer_surface_to_vtk(surface_file=subject_path + 'surf/lh.pial', \n", - " orig_file=subject_path + 'mri/orig.mgz',\n", - " output_vtk='lh.pial.vtk')" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "Compute travel_depth for every vertex of the mesh in the VTK file:" - ] - }, - { - "cell_type": "code", - "execution_count": 105, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "/opt/mindboggle/vtk_cpp_tools/travel_depth/TravelDepthMain lh.pial.vtk /home/jovyan/work/mindboggle/docs/lh.pial.travel_depth.vtk\n", - "170330-15:19:27,4 interface INFO:\n", - "\t stdout 2017-03-30T15:19:27.004299:Euclidean depth allocated for visible points\n", - "170330-15:19:27,6 interface INFO:\n", - "\t stdout 2017-03-30T15:19:27.004299:Geodesic propagation 1\n", - "170330-15:19:28,533 interface INFO:\n", - "\t stdout 2017-03-30T15:19:28.533030:Euclidean propagation 1\n", - "170330-15:19:29,35 interface INFO:\n", - "\t stdout 2017-03-30T15:19:29.035749:Geodesic propagation 2\n", - "170330-15:19:31,73 interface INFO:\n", - "\t stdout 2017-03-30T15:19:31.073775:Euclidean propagation 2\n", - "170330-15:19:31,577 interface INFO:\n", - "\t stdout 2017-03-30T15:19:31.577199:Geodesic propagation 3\n", - "170330-15:19:33,614 interface INFO:\n", - "\t stdout 2017-03-30T15:19:33.614722:Euclidean propagation 3\n", - "170330-15:19:34,117 interface INFO:\n", - "\t stdout 2017-03-30T15:19:34.116923:Geodesic propagation 4\n", - "170330-15:19:35,643 interface INFO:\n", - "\t stdout 2017-03-30T15:19:35.643546:Euclidean propagation 4\n", - "170330-15:19:36,146 interface INFO:\n", - "\t stdout 2017-03-30T15:19:36.146358:Geodesic propagation 5\n", - "170330-15:19:38,185 interface INFO:\n", - "\t stdout 2017-03-30T15:19:38.185267:Euclidean propagation 5\n", - "170330-15:19:38,688 interface INFO:\n", - "\t stdout 2017-03-30T15:19:38.688799:Geodesic propagation 6\n", - "170330-15:19:40,728 interface INFO:\n", - "\t stdout 2017-03-30T15:19:40.728875:Euclidean propagation 6\n", - "170330-15:19:41,232 interface INFO:\n", - "\t stdout 2017-03-30T15:19:41.232551:Geodesic propagation 7\n", - "170330-15:19:43,273 interface INFO:\n", - "\t stdout 2017-03-30T15:19:43.273044:Euclidean propagation 7\n", - "170330-15:19:43,775 interface INFO:\n", - "\t stdout 2017-03-30T15:19:43.775797:Travel depth main computation done\n", - "170330-15:19:45,304 interface INFO:\n", - "\t stdout 2017-03-30T15:19:45.303904:last propagation done\n", - "170330-15:19:45,305 interface INFO:\n", - "\t stdout 2017-03-30T15:19:45.303904:Travel depth computed\n", - "170330-15:19:45,808 interface INFO:\n", - "\t stdout 2017-03-30T15:19:45.808331:vtk file written\n", - "170330-15:19:45,810 interface INFO:\n", - "\t stdout 2017-03-30T15:19:45.808331:Elapsed time (meshTest): 24 s\n" - ] - } - ], - "source": [ - "import os\n", - "from mindboggle.shapes.surface_shapes import travel_depth\n", - "from mindboggle.mio.vtks import read_scalars\n", - "ccode_path = '/opt/mindboggle/vtk_cpp_tools'\n", - "command = os.path.join(ccode_path, 'travel_depth', 'TravelDepthMain')\n", - "depth_file = travel_depth(command=command,\n", - " surface_file=surface_file,\n", - " verbose=True)\n", - "depths, name = read_scalars(depth_file)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "Plot the depth values in 3-D:" - ] - }, - { - "cell_type": "code", - "execution_count": 106, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "doing checks /home/jovyan/.jupyter/custom/\n", - "{'/home/jovyan/work/mindboggle/docs/lh.pial.vtk': 'papaya_data/tmpfm3v7evm.vtk'}\n", - "/files/papaya_data/tmpfm3v7evm.vtk\n", - "{'/home/jovyan/work/mindboggle/docs/lh.pial.vtk': {'mesh_transparency': 1, 'overlay_transparency': 1, 'threshold': 2, 'key': 'depth', 'mesh_visible': True, 'colormax': '#FF0000', 'colormin': '#0000FF', 'key_options': ['depth'], 'vmax': 10, 'filename': '/files/papaya_data/tmpfv2t5y4f.csv', 'vmin': 2}} {'/files/papaya_data/tmpfm3v7evm.vtk': {'mesh_transparency': 1, 'overlay_transparency': 1, 'threshold': 2, 'key': 'depth', 'mesh_visible': True, 'colormax': '#FF0000', 'colormin': '#0000FF', 'key_options': ['depth'], 'vmax': 10, 'filename': '/files/papaya_data/tmpfv2t5y4f.csv', 'vmin': 2}}\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "execution_count": 106, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import os\n", - "import pandas as pd\n", - "from nbpapaya import Overlay\n", - "\n", - "df = pd.DataFrame(depths, columns=[\"depth\"])\n", - "df.to_csv('depths.csv', index=False)\n", - "\n", - "def getMeshOpts(vtk_file, csv_file, vmin, vmax, threshold):\n", - " cols = pd.read_csv(csv_file).columns.tolist()\n", - " MeshOpts = {}\n", - " MeshOpts[os.path.abspath(vtk_file)] = { \n", - " \"filename\": os.path.abspath(csv_file),\n", - " \"colormin\": \"#0000FF\", \n", - " \"colormax\": \"#FF0000\",\n", - " \"vmin\": vmin,\n", - " \"vmax\": vmax,\n", - " \"key\": cols[0],\n", - " \"key_options\": cols, \n", - " \"threshold\": threshold,\n", - " \"mesh_transparency\": 1,\n", - " \"mesh_visible\": True,\n", - " \"overlay_transparency\": 1\n", - " }\n", - " return MeshOpts\n", - "\n", - "MeshOpts = getMeshOpts(surface_file, \"depths.csv\" , 2,10,2)\n", - "Overlay(MeshOpts)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "### Extract folds based on depth and curvature\n", - "Plot a histogram of the depth values:" - ] - }, - { - "cell_type": "code", - "execution_count": 107, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYcAAAESCAYAAAAWtRmOAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAHDtJREFUeJzt3X2UVfV97/H3RxCjJgbU0WsBC0morXqNkvEhTaOIT2iy\nhGRpikkqyaWl16u9ye3VqE2XJhru0jyZeDV0kUhEa0AWUaEpSoiCelcVGXxGYphiIiNUxoI21hUU\n/d4/9m/idn5nns4ZOecwn9das84+3/3b+/z2HDif2Q/ntxURmJmZle1R7w6YmVnjcTiYmVnG4WBm\nZhmHg5mZZRwOZmaWcTiYmVnG4WANQ9I4SZ2S7pP0gKTvSNpngOsYKekzpedtg9S3VZLeOxjr6ml9\nkqZI+lQP7adJOmiwXt+sLw4HazT3R8Rk4CTgNeDrA1x+JPCZPls1oIi4JyLu7GH2NKDqcJDk/+s2\nIP4HYw0pim9nXg2cDSCpVdJKSQ9KujjVviZpgaR7JN0t6X3ABcBJ6S/zw4E9JN0gabWkS8uvIelY\nSXPStCQ9JGkPSd+VdL+kRyQd3W2ZL0i6KE1/UtLXSvUHJf2LpMmp9uNUWyVpXIXNvCrtId1QXrek\n/dMyKyXdJWk8MAX4saRvStpP0tLUx4WSRkgaLmmxpF9IulHSzWmdj0r6PvCPkv5rWu9DpdecJGm5\npDslPSHpz9PzRyQdUOPbaE3M4WANKyJeB0akp9cAn46Ij1N8+B+c6s9FxBRgCfBXwByKvY9JEfEM\nxZ7Et4A/Bf6i2/rXAEdLGg58FHg4It4C/j4iTgL+Grikr36mD9HpwInAacAVkvYEDgNOjIhJwPMV\nFr0rIk4EPiLp/aX6McAjEXFy2ubngHuAL0bEV4BZwLLUx3XptacBv4qIU4EnSusaBfzfiPgs0A6c\nHBEfBcZKmpDa7BERnwJuBKZHxBnAbcDUvrbddl/D690Bs55I2gvYkZ4eBdwpCYoPvLGpvjY9rgH+\nssJqtkfEb9L6fldh/n3AZOATwE9S7RJJp6bpnd3al8ebUXr8IHAEsDI9b4mINyTdCNwq6d+BrwKv\ndlvXY+nxBYoQ63I/8GeSbkttvt1tuQ8BP0zTa4CPAf/J27+LtRRhCMX2t6fp8UDXeZwPAH+Q6k+m\nx82l6ReAP8SGLO85WCO7HLgrTT8BTE1/hU/k7Q/CY9JjK8Vfxm8Aw0rr6GvwsAXAZ4GJEbE67QWc\nlvZQvszbAdBlOzAmTX84PW6k+FA9OfXvaEnDgEUR8XngReDTFV67UtAA7BkRX4+IzwGnSzq023a1\nA8el6WOBDanW9bs4prSut0rTFwDfSXscj5des9yPnvpkQ4z3HKzRnCRpJcUH4WrgilS/DLgjnVjd\nAXRd1TNW0s8pPtTOpfjrfG9JiynCpVcR8XQ6r3B3Km0HtklaBTxcYZFfABdLWkbx1/ULEfGSpIXA\n/ZLeBJ5K/V4iqevD+XP9/g3AsZJmU3ywd6Sfu4HvSfoFcANwm6TpFMFzbWo7XdK9FGH1RoX1/hPw\nfUm/xB/81gd5VFZrVulkcFtE/KzefWkEkvZMh7NmAaMi4tp698mal/cczHYfS9J3J3YAf17vzlhz\n856DmZllfELazMwyDgczM8s07TmHAw88MMaNG1fvbpiZNZW1a9e+FBEtfbVr2nAYN24cbW2DMqaa\nmdmQIek3/Wnnw0pmZpZxOJiZWcbhYGZmGYeDmZllHA5mZpZxOJiZWcbhYGZmGYeDmZllHA5mZpZp\n2m9I12LVqlW/n540aVLd+mFm1qi852BmZhmHg5mZZRwOZmaWcTiYmVnG4WBmZpk+w0HSPElbJT3d\nrf43kp6VtE7SN0v1yyW1p3lnlOpTUq1d0mWl+nhJqyVtkHS7pBGDtXFmZlad/uw53AxMKRcknQxM\nBY6KiCOAb6f64cB04Ii0zA8kDZM0DLgROBM4HDgvtQW4FrguIiYA24GZtW6UmZnVps9wiIgHgG3d\nyhcA10TEjtRma6pPBRZGxI6IeA5oB45LP+0RsTEiXgcWAlMlCZgMLE7Lzwem1bhNZmZWo2rPOfwR\n8PF0OOh+Scem+mhgU6ldR6r1VD8AeDkidnarVyRplqQ2SW2dnZ1Vdt3MzPpSbTgMB0YBJwCXAIvS\nXoAqtI0q6hVFxNyIaI2I1paWPu+PbWZmVap2+IwO4I6ICOARSW8BB6b62FK7McDmNF2p/hIwUtLw\ntPdQbm9mZnVS7Z7DXRTnCpD0R8AIig/6pcB0SXtJGg9MAB4B1gAT0pVJIyhOWi9N4bISOCetdwaw\npNqNMTOzwdHnnoOkBcAk4EBJHcCVwDxgXrq89XVgRvqgXydpEfAMsBO4MCLeTOu5CFgODAPmRcS6\n9BKXAgslfQN4DLhpELfPzMyq0Gc4RMR5Pcz6fA/tZwOzK9SXAcsq1DdSXM1kZmYNwt+QNjOzjMPB\nzMwyDgczM8s4HMzMLONwMDOzjMPBzMwyDgczM8s4HMzMLONwMDOzjMPBzMwyDgczM8s4HMzMLONw\nMDOzjMPBzMwyDgczM8s4HMzMLNNnOEiaJ2lruutb93kXSwpJB6bnknS9pHZJT0qaWGo7Q9KG9DOj\nVP+IpKfSMtdL0mBtnJmZVac/ew43A1O6FyWNBU4Dni+Vz6S4b/QEYBYwJ7Xdn+L2osdT3PXtSkmj\n0jJzUtuu5bLXMjOzXavPcIiIB4BtFWZdB3wFiFJtKnBLFB4GRko6BDgDWBER2yJiO7ACmJLm7RcR\nD6V7UN8CTKttk8zMrFZVnXOQdDbwQkQ80W3WaGBT6XlHqvVW76hQ7+l1Z0lqk9TW2dlZTdfNzKwf\nBhwOkvYBvgpcUWl2hVpUUa8oIuZGRGtEtLa0tPSnu2ZmVoVq9hw+CIwHnpD0a2AM8Kik/0Lxl//Y\nUtsxwOY+6mMq1M3MrI4GHA4R8VREHBQR4yJiHMUH/MSI+DdgKXB+umrpBOCViNgCLAdOlzQqnYg+\nHVie5v1W0gnpKqXzgSWDtG1mZlal/lzKugB4CDhMUoekmb00XwZsBNqBHwL/AyAitgFXA2vSz1Wp\nBnAB8KO0zL8Cd1e3KWZmNliG99UgIs7rY/640nQAF/bQbh4wr0K9DTiyr36Ymdmu429Im5lZxuFg\nZmYZh4OZmWUcDmZmlnE4mJlZxuFgZmYZh4OZmWUcDmZmlnE4mJlZxuFgZmYZh4OZmWUcDmZmlnE4\nmJlZxuFgZmYZh4OZmWX6c7OfeZK2Snq6VPuWpF9KelLSnZJGluZdLqld0rOSzijVp6Rau6TLSvXx\nklZL2iDpdkkjBnMDzcxs4Pqz53AzMKVbbQVwZEQcBfwKuBxA0uHAdOCItMwPJA2TNAy4ETgTOBw4\nL7UFuBa4LiImANuB3u40Z2Zmu0Cf4RARDwDbutV+HhE709OHgTFpeiqwMCJ2RMRzFLf+PC79tEfE\nxoh4HVgITE33jZ4MLE7Lzwem1bhNZmZWo8E45/DfePu+z6OBTaV5HanWU/0A4OVS0HTVK5I0S1Kb\npLbOzs5B6LqZmVVSUzhI+iqwE7itq1ShWVRRrygi5kZEa0S0trS0DLS7ZmbWT8OrXVDSDOCTwCkR\n0fWB3gGMLTUbA2xO05XqLwEjJQ1Pew/l9mZmVidV7TlImgJcCpwdEa+VZi0FpkvaS9J4YALwCLAG\nmJCuTBpBcdJ6aQqVlcA5afkZwJLqNsXMzAZLfy5lXQA8BBwmqUPSTOAG4H3ACkmPS/oHgIhYBywC\nngHuAS6MiDfTXsFFwHJgPbAotYUiZP5WUjvFOYibBnULzcxswPo8rBQR51Uo9/gBHhGzgdkV6suA\nZRXqGymuZjIzswbhb0ibmVnG4WBmZhmHg5mZZRwOZmaWcTiYmVnG4WBmZhmHg5mZZRwOZmaWcTiY\nmVnG4WBmZhmHg5mZZRwOZmaWcTiYmVnG4WBmZhmHg5mZZRwOZmaW6c+d4OZJ2irp6VJtf0krJG1I\nj6NSXZKul9Qu6UlJE0vLzEjtN6T7T3fVPyLpqbTM9ZI02BtpZmYD0589h5uBKd1qlwH3RsQE4N70\nHOBMivtGTwBmAXOgCBPgSuB4iru+XdkVKKnNrNJy3V/LzMx2sT7DISIeALZ1K08F5qfp+cC0Uv2W\nKDwMjJR0CHAGsCIitkXEdmAFMCXN2y8iHoqIAG4prcvMzOqk2nMOB0fEFoD0eFCqjwY2ldp1pFpv\n9Y4K9YokzZLUJqmts7Ozyq6bmVlfBvuEdKXzBVFFvaKImBsRrRHR2tLSUmUXzcysL9WGw4vpkBDp\ncWuqdwBjS+3GAJv7qI+pUDczszqqNhyWAl1XHM0AlpTq56erlk4AXkmHnZYDp0salU5Enw4sT/N+\nK+mEdJXS+aV1mZlZnQzvq4GkBcAk4EBJHRRXHV0DLJI0E3geODc1XwacBbQDrwFfBIiIbZKuBtak\ndldFRNdJ7gsorojaG7g7/ZiZWR31GQ4RcV4Ps06p0DaAC3tYzzxgXoV6G3BkX/0wM7Ndx9+QNjOz\njMPBzMwyDgczM8s4HMzMLONwMDOzjMPBzMwyDgczM8s4HMzMLONwMDOzjMPBzMwyDgczM8s4HMzM\nLONwMDOzjMPBzMwyDgczM8s4HMzMLFNTOEj6X5LWSXpa0gJJ75E0XtJqSRsk3S5pRGq7V3renuaP\nK63n8lR/VtIZtW2SmZnVqupwkDQa+J9Aa0QcCQwDpgPXAtdFxARgOzAzLTIT2B4RHwKuS+2QdHha\n7ghgCvADScOq7ZeZmdWu1sNKw4G9JQ0H9gG2AJOBxWn+fGBamp6anpPmnyJJqb4wInZExHMU958+\nrsZ+mZlZDaoOh4h4Afg28DxFKLwCrAVejoidqVkHMDpNjwY2pWV3pvYHlOsVlnkHSbMktUlq6+zs\nrLbrZmbWh1oOK42i+Kt/PPAHwL7AmRWaRtciPczrqZ4XI+ZGRGtEtLa0tAy802Zm1i+1HFY6FXgu\nIjoj4g3gDuBPgZHpMBPAGGBzmu4AxgKk+e8HtpXrFZYxM7M6qCUcngdOkLRPOndwCvAMsBI4J7WZ\nASxJ00vTc9L8+yIiUn16upppPDABeKSGfpmZWY2G992ksohYLWkx8CiwE3gMmAv8M7BQ0jdS7aa0\nyE3ArZLaKfYYpqf1rJO0iCJYdgIXRsSb1fbLzMxqV3U4AETElcCV3cobqXC1UUT8Dji3h/XMBmbX\n0hczMxs8/oa0mZllHA5mZpZxOJiZWcbhYGZmGYeDmZllHA5mZpZxOJiZWcbhYGZmGYeDmZllHA5m\nZpZxOJiZWcbhYGZmGYeDmZllHA5mZpZxOJiZWaamcJA0UtJiSb+UtF7SRyXtL2mFpA3pcVRqK0nX\nS2qX9KSkiaX1zEjtN0ia0fMrmpnZrlDrnsP3gXsi4o+BDwPrgcuAeyNiAnBveg5wJsUtQCcAs4A5\nAJL2p7hh0PEUNwm6sitQzMysPqoOB0n7ASeSbgMaEa9HxMvAVGB+ajYfmJampwK3ROFhYKSkQ4Az\ngBURsS0itgMrgCnV9svMzGpXy57DB4BO4MeSHpP0I0n7AgdHxBaA9HhQaj8a2FRaviPVeqqbmVmd\n1BIOw4GJwJyIOAb4T94+hFSJKtSil3q+AmmWpDZJbZ2dnQPtr5mZ9VMt4dABdETE6vR8MUVYvJgO\nF5Eet5bajy0tPwbY3Es9ExFzI6I1IlpbWlpq6LqZmfWm6nCIiH8DNkk6LJVOAZ4BlgJdVxzNAJak\n6aXA+emqpROAV9Jhp+XA6ZJGpRPRp6eamZnVyfAal/8b4DZJI4CNwBcpAmeRpJnA88C5qe0y4Cyg\nHXgttSUitkm6GliT2l0VEdtq7JeZmdWgpnCIiMeB1gqzTqnQNoALe1jPPGBeLX0xM7PB429Im5lZ\nxuFgZmYZh4OZmWUcDmZmlnE4mJlZxuFgZmYZh4OZmWUcDmZmlnE4mJlZxuFgZmYZh4OZmWUcDmZm\nlnE4mJlZxuFgZmYZh4OZmWUcDmZmlqk5HCQNk/SYpJ+l5+MlrZa0QdLt6S5xSNorPW9P88eV1nF5\nqj8r6Yxa+2RmZrUZjD2HLwHrS8+vBa6LiAnAdmBmqs8EtkfEh4DrUjskHQ5MB44ApgA/kDRsEPpl\nZmZVqikcJI0BPgH8KD0XMBlYnJrMB6al6anpOWn+Kan9VGBhROyIiOco7jF9XC39MjOz2tS65/A9\n4CvAW+n5AcDLEbEzPe8ARqfp0cAmgDT/ldT+9/UKy7yDpFmS2iS1dXZ21th1MzPrSdXhIOmTwNaI\nWFsuV2gafczrbZl3FiPmRkRrRLS2tLQMqL9mZtZ/w2tY9mPA2ZLOAt4D7EexJzFS0vC0dzAG2Jza\ndwBjgQ5Jw4H3A9tK9S7lZczMrA6q3nOIiMsjYkxEjKM4oXxfRHwOWAmck5rNAJak6aXpOWn+fRER\nqT49Xc00HpgAPFJtvwZq1apVv/8xM7NCLXsOPbkUWCjpG8BjwE2pfhNwq6R2ij2G6QARsU7SIuAZ\nYCdwYUS8+S70y8zM+mlQwiEiVgGr0vRGKlxtFBG/A87tYfnZwOzB6IuZmdXO35A2M7OMw8HMzDIO\nBzMzyzgczMws43AwM7OMw8HMzDIOBzMzyzgczMws43AwM7PMuzF8RtMqj680adKkuvXDzKzevOdg\nZmYZh4OZmWUcDmZmlvE5hx74/IOZDWXeczAzs4zDwczMMlWHg6SxklZKWi9pnaQvpfr+klZI2pAe\nR6W6JF0vqV3Sk5ImltY1I7XfIGlGT69pZma7Ri17DjuB/x0RfwKcAFwo6XDgMuDeiJgA3JueA5xJ\ncX/oCcAsYA4UYQJcCRxPcQe5K7sCxczM6qPqE9IRsQXYkqZ/K2k9MBqYCkxKzeZT3D700lS/JSIC\neFjSSEmHpLYrImIbgKQVwBRgQbV9ezf5RLWZDQWDcrWSpHHAMcBq4OAUHETEFkkHpWajgU2lxTpS\nrad6pdeZRbHXwaGHHjoYXe+XciCYmQ0FNZ+QlvRe4KfAlyPiP3prWqEWvdTzYsTciGiNiNaWlpaB\nd9bMzPqlpj0HSXtSBMNtEXFHKr8o6ZC013AIsDXVO4CxpcXHAJtTfVK3+qpa+rWr+BCTme2uqg4H\nSQJuAtZHxHdLs5YCM4Br0uOSUv0iSQspTj6/kgJkOfB/SiehTwcur7Zf9dLToSeHhpk1o1r2HD4G\n/AXwlKTHU+3vKEJhkaSZwPPAuWneMuAsoB14DfgiQERsk3Q1sCa1u6rr5PTuoKe9i10RJv15bYeX\nmVWi4uKh5tPa2hptbW1VLTsUTzD3J5hqWaeZNQdJayOita92HltpiHg3AnGg63SYmDUPD59hZmYZ\n7znYLuM9DbPm4XCwptA9WBwcZu8uh4M1rKF44YBZo3A4WFPy90rM3l0OB9ut+DscZoPD4WC7Le9d\nmFXP4WBDTi3nMhwsNlQ4HMwGwIetbKhwOJhVyUFhuzOHg9kg6M+hKgeINROHg9ku4m+IWzNxOJg1\nKF9tZfXkcDBrMr3tgTg4bLA4HMx2Iz73YYOlYcJB0hTg+8Aw4EcRcU2du2S2Wxqs73n4aq3dW0OE\ng6RhwI3AaUAHsEbS0oh4pr49M7OynoJlVw6S6CDaNRoiHIDjgPaI2AggaSEwFXA4mNk77G6j9TZq\n2DVKOIwGNpWedwDHd28kaRYwKz19VdKzVb7egcBLVS7bCJq9/9D82+D+11+zb0O9+v+H/WnUKOGg\nCrXIChFzgbk1v5jU1p8bbDeqZu8/NP82uP/11+zb0Oj9b5R7SHcAY0vPxwCb69QXM7Mhr1HCYQ0w\nQdJ4SSOA6cDSOvfJzGzIaojDShGxU9JFwHKKS1nnRcS6d/Elaz40VWfN3n9o/m1w/+uv2behofuv\niOzQvpmZDXGNcljJzMwaiMPBzMwyQyocJE2R9KykdkmX1bs/1ZD0a0lPSXpcUlu9+9MXSfMkbZX0\ndKm2v6QVkjakx1H17GNfetiGr0l6Ib0Pj0s6q5597I2ksZJWSlovaZ2kL6V6U7wPvfS/Kd4DSe+R\n9IikJ1L/v57q4yWtTr//29PFOA1jyJxzSEN0/IrSEB3Aec02RIekXwOtEdEUX/6RdCLwKnBLRByZ\nat8EtkXENSmkR0XEpfXsZ2962IavAa9GxLfr2bf+kHQIcEhEPCrpfcBaYBrwBZrgfeil/5+hCd4D\nSQL2jYhXJe0J/D/gS8DfAndExEJJ/wA8ERFz6tnXsqG05/D7IToi4nWga4gOexdFxAPAtm7lqcD8\nND2f4j96w+phG5pGRGyJiEfT9G+B9RSjEjTF+9BL/5tCFF5NT/dMPwFMBhanesP9/odSOFQaoqNp\n/oGVBPBzSWvTcCLN6OCI2ALFf3zgoDr3p1oXSXoyHXZqyEMy3UkaBxwDrKYJ34du/YcmeQ8kDZP0\nOLAVWAH8K/ByROxMTRru82gohUO/huhoAh+LiInAmcCF6ZCH7XpzgA8CRwNbgO/Utzt9k/Re4KfA\nlyPiP+rdn4Gq0P+meQ8i4s2IOJpi9IfjgD+p1GzX9qp3QykcdoshOiJic3rcCtxJ8Q+t2byYjiN3\nHU/eWuf+DFhEvJj+w78F/JAGfx/Sse6fArdFxB2p3DTvQ6X+N9t7ABARLwOrgBOAkZK6vojccJ9H\nQykcmn6IDkn7phNySNoXOB14uvelGtJSYEaangEsqWNfqtL1oZp8igZ+H9IJ0ZuA9RHx3dKspngf\neup/s7wHklokjUzTewOnUpw3WQmck5o13O9/yFytBJAudfsebw/RMbvOXRoQSR+g2FuAYuiTnzT6\nNkhaAEyiGJ74ReBK4C5gEXAo8DxwbkQ07AnfHrZhEsXhjAB+Dfx11/H7RiPpz4AHgaeAt1L57yiO\n2zf8+9BL/8+jCd4DSUdRnHAeRvEH+aKIuCr9f14I7A88Bnw+InbUr6fvNKTCwczM+mcoHVYyM7N+\ncjiYmVnG4WBmZhmHg5mZZRwOZmaWcTjYbk/SOEmdku6T9ICk70jaZ4DrGCnpM6XngzIirqRV6Zu/\nZg3F4WBDxf0RMRk4CXgN+PoAlx9JMQqo2ZDgcLAhJYov9lwNnA0gqTXdK+BBSRen2tckLZB0j6S7\n07fSLwBOSn/pHw7sIemGNB7/O4a5lnSspDlpWpIekrSHpO9Kuj+N7X90t2W+oOI+6kj6ZBoSvKv+\noKR/kTQ51X6caqvSQHRmg254303Mdi8R8XrpxirXAJ+OiO2S/knSran+XEScJ+m/A39FGuQtIs6B\n4jAT8C2KMbueAK4trX+NpOvTuDnHAQ9HxFuS/j4iXpN0DHAJ8Lne+inpAIphXk4E9gH+WdKDwGEU\nAzCGJP+BZ+8Kh4MNOZL2ArqGKTgKuLMYvodRvD0449r0uAb4ywqr2R4Rv0nr+12F+fdRjNf/CeAn\nqXaJpFPT9M5u7ctDFXSNIPxB4AiKMXgAWiLiDUk3ArdK+nfgqxQ3IjIbVP6rw4aiyynGd4Lir/6p\nETEJmMjboXBMemwF2oE3KMbG6dLXuDMLgM8CEyNiddoLOC0iPg58mXwI+e0UI3MCfDg9bgSeBE5O\n/TtaxR0NF0XE5ynGefp0n1trVgXvOdhQcZKklRQf8KuBK1L9MuCOdHhmB8XongBjJf2cIgTOpfjr\nfG9JiynCpVcR8XQ6r3B3Km0HtklaBTxcYZFfABdLWga8ALwQES9JWgjcL+lNioHnrgCWSOoagK7X\nQ1Nm1fLAe2bdpJPBbRHxs3r3xaxefFjJzMwy3nMwM7OM9xzMzCzjcDAzs4zDwczMMg4HMzPLOBzM\nzCzz/wHbUwZsyZBGgAAAAABJRU5ErkJggg==\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "from mindboggle.mio.plots import histograms_of_lists\n", - "histograms_of_lists(columns=[depths],\n", - " column_name='Depth values',\n", - " ignore_columns=[],\n", - " nbins=100,\n", - " axis_limits=[],\n", - " titles='depth values')" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "Find a depth threshold to extract folds from the surface:" - ] - }, - { - "cell_type": "code", - "execution_count": 108, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " Depth threshold: 1.4778676864945055\n" - ] - }, - { - "data": { - "text/plain": [ - "1.4778676864945055" - ] - }, - "execution_count": 108, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from mindboggle.features.folds import find_depth_threshold\n", - "depth_threshold, bins, bin_edges = find_depth_threshold(depth_file=depth_file,\n", - " min_vertices=10000,\n", - " verbose=True)\n", - "depth_threshold" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "Extract folds with the depth threshold:" - ] - }, - { - "cell_type": "code", - "execution_count": 109, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Extract folds in surface mesh\n", - " Segment vertices deeper than 1.48 as folds\n", - " ...Segmented folds (2.36 seconds)\n", - " Remove folds smaller than 50\n", - " ...Extracted 10 folds (5.82 seconds)\n" - ] - } - ], - "source": [ - "from mindboggle.features.folds import extract_folds\n", - "folds, n_folds, folds_file = extract_folds(depth_file=depth_file,\n", - " depth_threshold=depth_threshold,\n", - " min_fold_size=50,\n", - " save_file=True,\n", - " output_file='folds.vtk',\n", - " background_value=-1,\n", - " verbose=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "Remove all vertices but the folds:" - ] - }, - { - "cell_type": "code", - "execution_count": 110, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, - "outputs": [], - "source": [ - "from mindboggle.mio.vtks import rewrite_scalars\n", - "rewrite_scalars(input_vtk=folds_file,\n", - " output_vtk='rewrite_scalars.vtk',\n", - " new_scalars=[folds],\n", - " new_scalar_names=['folds'],\n", - " filter_scalars=folds,\n", - " background_value=-1)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "Plot the folds in 3-D:" - ] - }, - { - "cell_type": "code", - "execution_count": 111, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true, - "scrolled": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "doing checks /home/jovyan/.jupyter/custom/\n", - "{'/home/jovyan/work/mindboggle/docs/rewrite_scalars.vtk': 'papaya_data/tmpy8ghqbvl.vtk'}\n", - "/files/papaya_data/tmpy8ghqbvl.vtk\n", - "{'/home/jovyan/work/mindboggle/docs/rewrite_scalars.vtk': {'mesh_transparency': 1, 'overlay_transparency': 1, 'threshold': 1, 'key': 'folds', 'mesh_visible': True, 'colormax': '#FF0000', 'colormin': '#0000FF', 'key_options': ['folds'], 'vmax': 10, 'filename': '/files/papaya_data/tmp5ra4bwp2.csv', 'vmin': 1}} {'/files/papaya_data/tmpy8ghqbvl.vtk': {'mesh_transparency': 1, 'overlay_transparency': 1, 'threshold': 1, 'key': 'folds', 'mesh_visible': True, 'colormax': '#FF0000', 'colormin': '#0000FF', 'key_options': ['folds'], 'vmax': 10, 'filename': '/files/papaya_data/tmp5ra4bwp2.csv', 'vmin': 1}}\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "execution_count": 111, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "just_folds = np.ones(len(folds))\n", - "df = pd.DataFrame(just_folds, columns=[\"folds\"])\n", - "df.to_csv('folds.csv', index=False)\n", - "MeshOpts = getMeshOpts('rewrite_scalars.vtk', \"folds.csv\" , 1,10,1)\n", - "Overlay(MeshOpts)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "Load a FreeSurfer .annot file and save as a VTK format file:" - ] - }, - { - "cell_type": "code", - "execution_count": 112, - "metadata": { - "collapsed": true, - "deletable": true, - "editable": true - }, - "outputs": [], - "source": [ - "from mindboggle.mio.vtks import freesurfer_annot_to_vtk\n", - "labels, label_file = freesurfer_annot_to_vtk(annot_file=subject_path + 'label/lh.aparc.annot',\n", - " vtk_file=surface_file,\n", - " output_vtk='lh.aparc.annot.vtk',\n", - " background_value=-1)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "Relabel surface labels to match expected volume labels:" - ] - }, - { - "cell_type": "code", - "execution_count": 113, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, - "outputs": [], - "source": [ - "from mindboggle.guts.relabel import relabel_surface\n", - "from mindboggle.mio.labels import DKTprotocol\n", - "dkt = DKTprotocol()\n", - "relabel_file = relabel_surface(vtk_file=label_file,\n", - " hemi='lh', \n", - " old_labels=dkt.DKT31_numbers, \n", - " new_labels=[],\n", - " erase_remaining=True, \n", - " erase_labels=[0], \n", - " erase_value=-1, \n", - " output_file='relabeled.vtk')" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "Extract sulci from folds using pairs of labels in the DKT labeling protocol:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true, - "scrolled": true - }, - "outputs": [], - "source": [ - "from mindboggle.features.sulci import extract_sulci\n", - "sulci, n_sulci, sulci_file = extract_sulci(labels_file=relabel_file,\n", - " folds_or_file=folds,\n", - " hemi='lh',\n", - " min_boundary=10,\n", - " sulcus_names=[],\n", - " save_file=True,\n", - " output_file='sulci.vtk',\n", - " background_value=-1,\n", - " verbose=True)\n", - "n_sulci" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "Compute statistics on depth values for the sulci:" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "Plot the sulci in 3-D:" - ] - }, - { - "cell_type": "code", - "execution_count": 114, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true, - "scrolled": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "doing checks /home/jovyan/.jupyter/custom/\n", - "{'/home/jovyan/work/mindboggle/docs/sulci.vtk': 'papaya_data/tmp34l_tpws.vtk'}\n", - "/files/papaya_data/tmp34l_tpws.vtk\n", - "{'/home/jovyan/work/mindboggle/docs/sulci.vtk': {'mesh_transparency': 1, 'overlay_transparency': 1, 'threshold': 1, 'key': 'sulci', 'mesh_visible': True, 'colormax': '#FF0000', 'colormin': '#0000FF', 'key_options': ['sulci'], 'vmax': 10, 'filename': '/files/papaya_data/tmplgfl37v3.csv', 'vmin': 1}} {'/files/papaya_data/tmp34l_tpws.vtk': {'mesh_transparency': 1, 'overlay_transparency': 1, 'threshold': 1, 'key': 'sulci', 'mesh_visible': True, 'colormax': '#FF0000', 'colormin': '#0000FF', 'key_options': ['sulci'], 'vmax': 10, 'filename': '/files/papaya_data/tmplgfl37v3.csv', 'vmin': 1}}\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "execution_count": 114, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "df = pd.DataFrame(sulci, columns=[\"sulci\"])\n", - "df.to_csv('sulci.csv', index=False)\n", - "MeshOpts = getMeshOpts('sulci.vtk', \"sulci.csv\" , 1,10,1)\n", - "Overlay(MeshOpts)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "Compute statistics on sulcus area:" - ] - }, - { - "cell_type": "code", - "execution_count": 115, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " Compute statistics on sulcus area...\n" - ] - } - ], - "source": [ - "from mindboggle.mio.tables import write_shape_stats\n", - "label_table, sulcus_table, fundus_table = write_shape_stats(labels_or_file=[], \n", - " sulci=sulci, fundi=[], affine_transform_files=[], inverse_booleans=[], \n", - " transform_format='itk', area_file='', normalize_by_area=False, \n", - " mean_curvature_file='',\n", - " travel_depth_file=depth_file, geodesic_depth_file='',\n", - " freesurfer_thickness_file='', freesurfer_curvature_file='',\n", - " freesurfer_sulc_file='',\n", - " labels_spectra=[], labels_spectra_IDs=[],\n", - " sulci_spectra=[], sulci_spectra_IDs=[],\n", - " labels_zernike=[], labels_zernike_IDs=[],\n", - " sulci_zernike=[], sulci_zernike_IDs=[],\n", - " exclude_labels=[-1], verbose=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, - "source": [ - "Show statistical summary table of sulcus depth values:" - ] - }, - { - "cell_type": "code", - "execution_count": 116, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
nameIDarea: medianarea: MADarea: meanarea: SDarea: skewarea: kurtosisarea: 25%area: 75%mean position: xmean position: ymean position: z
0frontomarginal sulcus00.0000000.0000000.0000000.0000000.0000000.0000000.0000000.0000000.0000000.0000000.000000
1superior frontal sulcus17.3619542.9506077.6021623.8045230.428667-0.3920694.47257810.413661-32.58852650.6735364.506325
2inferior frontal sulcus26.9183153.0337227.6166794.1504860.623865-0.3195944.26871110.395155-46.15919050.315655-8.545227
3precentral sulcus39.1584303.5687479.7705205.1602820.593787-0.1168125.85060913.154500-38.74525020.8604751.180676
4central sulcus49.9316743.96850410.0249514.9403620.175889-0.7259075.99687913.946191-36.735775-0.2299681.572990
5postcentral sulcus59.7694764.1556399.8230515.0934750.188446-0.8911225.47181713.764740-36.055273-12.678109-2.153404
6intraparietal sulcus69.0157913.6737739.4178914.7853700.246179-0.8782725.57493213.048308-25.537266-35.593797-10.367708
7primary intermediate sulcus/1st segment of pos...79.7604063.5895589.6243324.6561960.061746-0.9460745.77806812.987445-39.214054-27.669046-21.061552
8sylvian fissure811.4701575.22876311.9699926.7289040.332771-0.8481706.47895816.871922-45.071280-12.793651-21.928806
9lateral occipital sulcus96.4864442.8185706.8937263.6706090.458068-0.7128813.8146239.534060-21.348842-46.799553-35.280727
10anterior occipital sulcus106.6478183.1132196.8601683.4916020.145091-1.2037633.6970809.800769-36.469098-29.842632-44.340942
11superior temporal sulcus119.3690683.9286259.3960754.8582300.128603-1.0609975.21107412.973169-50.74881718.009113-41.205663
12inferior temporal sulcus126.6735872.4116336.5476602.9383300.017296-1.0849903.9793988.876130-49.4883541.960510-48.396361
13circular sulcus1320.3685773.85606920.0609015.804000-0.5835250.32714316.68554024.342303-37.65786427.145120-29.397835
141st transverse temporal sulcus and Heschl's su...1414.9456995.83939214.6905767.361789-0.031307-1.0200528.61899020.403642-45.4778437.364013-27.591008
15cingulate sulcus156.1438242.6362056.6759483.6688900.639725-0.3394463.6158138.913487-13.62611122.397674-2.107752
16paracentral sulcus167.0746912.6155927.0615623.3588930.149801-0.9071754.2665739.499668-12.1140582.53465610.683855
17parietooccipital fissure179.4605784.29231210.2876625.6057340.386713-0.8729175.69441414.597697-6.856120-37.365280-19.859171
18calcarine fissure184.3474471.8139825.9029373.8246250.930572-0.4083582.9763658.878260-7.384521-18.301137-30.957549
19superior rostral sulcus196.1550802.7472816.4375802.9932500.051843-1.2861073.8023659.186960-20.68679774.851368-8.641355
20lateral H-shaped orbital sulcus209.4075993.88592110.5695035.9548400.573488-0.6588306.09057814.489672-39.23579154.125187-25.677237
21olfactory sulcus216.2199402.5598276.6820053.4656030.605068-0.2041513.8774499.027775-19.27286450.488119-36.630798
22occipitotemporal sulcus225.6120372.3778795.9058062.9404250.370942-0.8177453.3723938.152838-26.860329-24.410149-51.867309
23collateral sulcus238.3679093.1604648.2547553.8832330.003075-1.1004805.00707511.358145-19.943090-8.897159-46.205846
\n", - "
" - ], - "text/plain": [ - " name ID area: median \\\n", - "0 frontomarginal sulcus 0 0.000000 \n", - "1 superior frontal sulcus 1 7.361954 \n", - "2 inferior frontal sulcus 2 6.918315 \n", - "3 precentral sulcus 3 9.158430 \n", - "4 central sulcus 4 9.931674 \n", - "5 postcentral sulcus 5 9.769476 \n", - "6 intraparietal sulcus 6 9.015791 \n", - "7 primary intermediate sulcus/1st segment of pos... 7 9.760406 \n", - "8 sylvian fissure 8 11.470157 \n", - "9 lateral occipital sulcus 9 6.486444 \n", - "10 anterior occipital sulcus 10 6.647818 \n", - "11 superior temporal sulcus 11 9.369068 \n", - "12 inferior temporal sulcus 12 6.673587 \n", - "13 circular sulcus 13 20.368577 \n", - "14 1st transverse temporal sulcus and Heschl's su... 14 14.945699 \n", - "15 cingulate sulcus 15 6.143824 \n", - "16 paracentral sulcus 16 7.074691 \n", - "17 parietooccipital fissure 17 9.460578 \n", - "18 calcarine fissure 18 4.347447 \n", - "19 superior rostral sulcus 19 6.155080 \n", - "20 lateral H-shaped orbital sulcus 20 9.407599 \n", - "21 olfactory sulcus 21 6.219940 \n", - "22 occipitotemporal sulcus 22 5.612037 \n", - "23 collateral sulcus 23 8.367909 \n", - "\n", - " area: MAD area: mean area: SD area: skew area: kurtosis area: 25% \\\n", - "0 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 \n", - "1 2.950607 7.602162 3.804523 0.428667 -0.392069 4.472578 \n", - "2 3.033722 7.616679 4.150486 0.623865 -0.319594 4.268711 \n", - "3 3.568747 9.770520 5.160282 0.593787 -0.116812 5.850609 \n", - "4 3.968504 10.024951 4.940362 0.175889 -0.725907 5.996879 \n", - "5 4.155639 9.823051 5.093475 0.188446 -0.891122 5.471817 \n", - "6 3.673773 9.417891 4.785370 0.246179 -0.878272 5.574932 \n", - "7 3.589558 9.624332 4.656196 0.061746 -0.946074 5.778068 \n", - "8 5.228763 11.969992 6.728904 0.332771 -0.848170 6.478958 \n", - "9 2.818570 6.893726 3.670609 0.458068 -0.712881 3.814623 \n", - "10 3.113219 6.860168 3.491602 0.145091 -1.203763 3.697080 \n", - "11 3.928625 9.396075 4.858230 0.128603 -1.060997 5.211074 \n", - "12 2.411633 6.547660 2.938330 0.017296 -1.084990 3.979398 \n", - "13 3.856069 20.060901 5.804000 -0.583525 0.327143 16.685540 \n", - "14 5.839392 14.690576 7.361789 -0.031307 -1.020052 8.618990 \n", - "15 2.636205 6.675948 3.668890 0.639725 -0.339446 3.615813 \n", - "16 2.615592 7.061562 3.358893 0.149801 -0.907175 4.266573 \n", - "17 4.292312 10.287662 5.605734 0.386713 -0.872917 5.694414 \n", - "18 1.813982 5.902937 3.824625 0.930572 -0.408358 2.976365 \n", - "19 2.747281 6.437580 2.993250 0.051843 -1.286107 3.802365 \n", - "20 3.885921 10.569503 5.954840 0.573488 -0.658830 6.090578 \n", - "21 2.559827 6.682005 3.465603 0.605068 -0.204151 3.877449 \n", - "22 2.377879 5.905806 2.940425 0.370942 -0.817745 3.372393 \n", - "23 3.160464 8.254755 3.883233 0.003075 -1.100480 5.007075 \n", - "\n", - " area: 75% mean position: x mean position: y mean position: z \n", - "0 0.000000 0.000000 0.000000 0.000000 \n", - "1 10.413661 -32.588526 50.673536 4.506325 \n", - "2 10.395155 -46.159190 50.315655 -8.545227 \n", - "3 13.154500 -38.745250 20.860475 1.180676 \n", - "4 13.946191 -36.735775 -0.229968 1.572990 \n", - "5 13.764740 -36.055273 -12.678109 -2.153404 \n", - "6 13.048308 -25.537266 -35.593797 -10.367708 \n", - "7 12.987445 -39.214054 -27.669046 -21.061552 \n", - "8 16.871922 -45.071280 -12.793651 -21.928806 \n", - "9 9.534060 -21.348842 -46.799553 -35.280727 \n", - "10 9.800769 -36.469098 -29.842632 -44.340942 \n", - "11 12.973169 -50.748817 18.009113 -41.205663 \n", - "12 8.876130 -49.488354 1.960510 -48.396361 \n", - "13 24.342303 -37.657864 27.145120 -29.397835 \n", - "14 20.403642 -45.477843 7.364013 -27.591008 \n", - "15 8.913487 -13.626111 22.397674 -2.107752 \n", - "16 9.499668 -12.114058 2.534656 10.683855 \n", - "17 14.597697 -6.856120 -37.365280 -19.859171 \n", - "18 8.878260 -7.384521 -18.301137 -30.957549 \n", - "19 9.186960 -20.686797 74.851368 -8.641355 \n", - "20 14.489672 -39.235791 54.125187 -25.677237 \n", - "21 9.027775 -19.272864 50.488119 -36.630798 \n", - "22 8.152838 -26.860329 -24.410149 -51.867309 \n", - "23 11.358145 -19.943090 -8.897159 -46.205846 " - ] - }, - "execution_count": 116, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "pd.read_csv(sulcus_table)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true, - "deletable": true, - "editable": true - }, - "outputs": [], - "source": [] - } - ], - "metadata": { - "anaconda-cloud": {}, - "celltoolbar": "Raw Cell Format", - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.5.2" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/environment.yml b/environment.yml deleted file mode 100644 index 5f7fb5f49..000000000 --- a/environment.yml +++ /dev/null @@ -1,52 +0,0 @@ -# Environment for installing Mindboggle -# -# This environment is configured for circleci.com continuous integration -# (testing). -# -# Authors: -# - Arno Klein, 2016-2017 (arno@mindboggle.info) http://binarybottle.com -# -# Copyright 2016-2017, Mindboggle team (mindboggle.info), Apache v2.0 License - -#----------------------------------------------------------------------------- -# mindboggle-env environment: -#----------------------------------------------------------------------------- -name: mindboggle-env -#----------------------------------------------------------------------------- -# anaconda cloud channel for installing VTK 7.0.0: -#----------------------------------------------------------------------------- -channels: - - clinicalgraphics -#----------------------------------------------------------------------------- -# mindboggle dependencies: -#----------------------------------------------------------------------------- -dependencies: - - python=3.5.1 - - vtk=7.0.0 - - matplotlib - - numpy - - scipy - - pandas - - cmake - - pip - - pip: - - nibabel - - nipype - - prov - - traits - - xvfbwrapper - - colormath - - pydotplus - - - networkx - - lxml - - simplejson - - future - - nose - #------------------------------------------------------------------------- - # additional testing tools: - #------------------------------------------------------------------------- - - ipython - - pytest - - coverage - diff --git a/info.py b/info.py index ee4e60123..d9190e1ca 100644 --- a/info.py +++ b/info.py @@ -5,18 +5,8 @@ In setup.py we execute this file, so it cannot import mindboggle. """ -# Mindboggle version information. An empty _version_extra corresponds to a -# full release. '.dev' as a _version_extra string means a development version -_version_major = 1 -_version_minor = 2 -_version_micro = 0 -_version_extra = '' - # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z" -__version__ = "{0}.{1}.{2}{3}".format(_version_major, - _version_minor, - _version_micro, - _version_extra) +from mindboggle.version import __version__ as __version__ CLASSIFIERS = ["Development Status :: Beta", "Environment :: Console", @@ -26,7 +16,8 @@ "Programming Language :: Python 3", "Topic :: Scientific/Engineering"] -description = "Automated human brain image feature extraction, labeling, and shape analysis" +description = ("Automated human brain image feature extraction, labeling, " + "and shape analysis") # Note: this long_description is actually a copy/paste from the top-level # README.rst, so that it shows up nicely on PyPI. So please remember to edit @@ -48,7 +39,7 @@ # Main setup parameters NAME = 'Mindboggle' MAINTAINER = "Arno Klein" -MAINTAINER_EMAIL = "arno@mindboggle.info" +MAINTAINER_EMAIL = "arno@childmind.org" DESCRIPTION = description LONG_DESCRIPTION = long_description URL = "http://mindboggle.info/" @@ -56,13 +47,10 @@ LICENSE = "Apache v2.0" CLASSIFIERS = CLASSIFIERS AUTHOR = "Arno Klein" -AUTHOR_EMAIL = "arno@mindboggle.info" +AUTHOR_EMAIL = "arno@childmind.org" PLATFORMS = "Linux" -MAJOR = _version_major -MINOR = _version_minor -MICRO = _version_micro -ISRELEASE = _version_extra VERSION = __version__ PROVIDES = ["mindboggle"] -#REQUIRES = ["numpy (>={0})".format(NUMPY_MIN_VERSION)] +REQUIRES = ["nipype", "matplotlib", "colormath", "pandas", + "etelemetry"] diff --git a/install/Dockerfile b/install/Dockerfile new file mode 100644 index 000000000..1ab16c656 --- /dev/null +++ b/install/Dockerfile @@ -0,0 +1,299 @@ +# Generated by Neurodocker version 0.4.2-3-gf7055a1 +# Timestamp: 2019-11-05 01:29:27 UTC +# +# Thank you for using Neurodocker. If you discover any issues +# or ways to improve this software, please submit an issue or +# pull request on our GitHub repository: +# +# https://github.com/kaczmarj/neurodocker + +FROM neurodebian:stretch + +ARG DEBIAN_FRONTEND="noninteractive" + +ENV LANG="en_US.UTF-8" \ + LC_ALL="en_US.UTF-8" \ + ND_ENTRYPOINT="/neurodocker/startup.sh" +RUN export ND_ENTRYPOINT="/neurodocker/startup.sh" \ + && apt-get update -qq \ + && apt-get install -y -q --no-install-recommends \ + apt-utils \ + bzip2 \ + ca-certificates \ + curl \ + locales \ + unzip \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \ + && sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen \ + && dpkg-reconfigure --frontend=noninteractive locales \ + && update-locale LANG="en_US.UTF-8" \ + && chmod 777 /opt && chmod a+s /opt \ + && mkdir -p /neurodocker \ + && if [ ! -f "$ND_ENTRYPOINT" ]; then \ + echo '#!/usr/bin/env bash' >> "$ND_ENTRYPOINT" \ + && echo 'set -e' >> "$ND_ENTRYPOINT" \ + && echo 'if [ -n "$1" ]; then "$@"; else /usr/bin/env bash; fi' >> "$ND_ENTRYPOINT"; \ + fi \ + && chmod -R 777 /neurodocker && chmod a+s /neurodocker + +ENTRYPOINT ["/neurodocker/startup.sh"] + +RUN apt-get update -qq \ + && apt-get install -y -q --no-install-recommends \ + graphviz \ + tree \ + git-annex-standalone \ + vim \ + emacs-nox \ + nano \ + less \ + ncdu \ + tig \ + sed \ + build-essential \ + libsm-dev \ + libx11-dev \ + libxt-dev \ + libxext-dev \ + libglu1-mesa \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +RUN ln -s /usr/lib/x86_64-linux-gnu /usr/lib64 + +ENV CONDA_DIR="/opt/miniconda-latest" \ + PATH="/opt/miniconda-latest/bin:$PATH" +RUN export PATH="/opt/miniconda-latest/bin:$PATH" \ + && echo "Downloading Miniconda installer ..." \ + && conda_installer="/tmp/miniconda.sh" \ + && curl -fsSL --retry 5 -o "$conda_installer" https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh \ + && bash "$conda_installer" -b -p /opt/miniconda-latest \ + && rm -f "$conda_installer" \ + && conda update -yq -nbase conda \ + && conda config --system --prepend channels conda-forge \ + && conda config --system --set auto_update_conda false \ + && conda config --system --set show_channel_urls true \ + && sync && conda clean -tipsy && sync \ + && conda create -y -q --name mb \ + && conda install -y -q --name mb \ + 'python=3.6' \ + 'pip' \ + 'jupyter' \ + 'cmake' \ + 'nipype>=1.1.4' \ + 'mesalib' \ + 'vtk=8.2.0=py36ha8e561a_201' \ + 'pandas' \ + 'matplotlib' \ + 'colormath' \ + 'nilearn' \ + 'tbb-devel' \ + 'nose' \ + 'etelemetry' \ + && sync && conda clean -tipsy && sync \ + && bash -c "source activate mb \ + && pip install --no-cache-dir \ + 'datalad[full]' \ + 'duecredit'" \ + && rm -rf ~/.cache/pip/* \ + && sync \ + && sed -i '$isource activate mb' $ND_ENTRYPOINT + +WORKDIR /opt + +RUN mkdir -p /opt/data && cd /opt/data && \ + curl -sSL https://osf.io/download/rh9km/?revision=2 -o templates.zip && \ + unzip templates.zip && \ + rm -f /opt/data/templates.zip && \ + curl -sSL https://files.osf.io/v1/resources/hvc52/providers/osfstorage/57c1a8f06c613b01f98d68a9/?zip= -o OASIS-TRT-20_brains.zip && \ + mkdir OASIS-TRT-20_brains && \ + cd OASIS-TRT-20_brains && \ + unzip ../OASIS-TRT-20_brains.zip && \ + cd .. && \ + rm OASIS-TRT-20_brains.zip && \ + curl -sSL https://files.osf.io/v1/resources/zevma/providers/osfstorage/5783dfcab83f6901f963735c/?zip= -o cmalabels.zip && \ + mkdir OASIS-TRT-20_DKT31_CMA_labels_v2 && \ + cd OASIS-TRT-20_DKT31_CMA_labels_v2 && \ + unzip ../cmalabels.zip && \ + cd .. && \ + rm cmalabels.zip && \ + curl -sSL https://osf.io/download/d2cmy/ -o OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_OASIS-30_v2.nii.gz && \ + rm -rf __MACOSX + +RUN bash -c 'source /opt/miniconda-latest/etc/profile.d/conda.sh && \ + conda activate mb && \ + git clone https://github.com/nipy/mindboggle.git && \ + cd /opt/mindboggle && \ + python setup.py install && \ + mkdir /opt/vtk_cpp_tools && \ + cd /opt/vtk_cpp_tools && \ + cmake /opt/mindboggle/vtk_cpp_tools && \ + make' + +ENV vtk_cpp_tools="/opt/vtk_cpp_tools" + +RUN bash -c 'source /opt/miniconda-latest/etc/profile.d/conda.sh && \ + conda activate mb && \ + conda install -y flask && \ + git clone https://github.com/akeshavan/roygbiv && \ + cd /opt/roygbiv && \ + git checkout fbbf31c29952d0ea22ed05d98e0a5a7e7d0827f9 && \ + python setup.py install && \ + cd /opt && \ + rm -rf /opt/roygbiv' + +RUN mkdir -p /.jupyter && echo c.NotebookApp.ip = \"0.0.0.0\" > /.jupyter/jupyter_notebook_config.py + +ENV ANTSPATH="/opt/ants-b43df4bfc8/bin" \ + PATH="/opt/ants-b43df4bfc8/bin:$PATH" \ + LD_LIBRARY_PATH="/opt/ants-b43df4bfc8/lib:$LD_LIBRARY_PATH" +RUN apt-get update -qq \ + && apt-get install -y -q --no-install-recommends \ + cmake \ + g++ \ + gcc \ + git \ + make \ + zlib1g-dev \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \ + && mkdir -p /tmp/ants/build \ + && git clone https://github.com/ANTsX/ANTs.git /tmp/ants/source \ + && cd /tmp/ants/source \ + && git fetch --tags \ + && git checkout b43df4bfc8 \ + && cd /tmp/ants/build \ + && cmake -DBUILD_SHARED_LIBS=ON /tmp/ants/source \ + && make -j 4 \ + && mkdir -p /opt/ants-b43df4bfc8 \ + && mv bin lib /opt/ants-b43df4bfc8/ \ + && mv /tmp/ants/source/Scripts/* /opt/ants-b43df4bfc8/bin \ + && rm -rf /tmp/ants + +ENV FREESURFER_HOME="/opt/freesurfer-6.0.0-min" \ + PATH="/opt/freesurfer-6.0.0-min/bin:$PATH" +RUN apt-get update -qq \ + && apt-get install -y -q --no-install-recommends \ + bc \ + libgomp1 \ + libxmu6 \ + libxt6 \ + perl \ + tcsh \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \ + && echo "Downloading FreeSurfer ..." \ + && mkdir -p /opt/freesurfer-6.0.0-min \ + && curl -fsSL --retry 5 https://dl.dropbox.com/s/nnzcfttc41qvt31/recon-all-freesurfer6-3.min.tgz \ + | tar -xz -C /opt/freesurfer-6.0.0-min --strip-components 1 \ + && sed -i '$isource "/opt/freesurfer-6.0.0-min/SetUpFreeSurfer.sh"' "$ND_ENTRYPOINT" + +RUN curl -sSL https://osf.io/download/n3ud2/?revision=1 -o /opt/freesurfer-6.0.0-min/license.txt + +RUN echo '{ \ + \n "pkg_manager": "apt", \ + \n "instructions": [ \ + \n [ \ + \n "base", \ + \n "neurodebian:stretch" \ + \n ], \ + \n [ \ + \n "install", \ + \n [ \ + \n "graphviz", \ + \n "tree", \ + \n "git-annex-standalone", \ + \n "vim", \ + \n "emacs-nox", \ + \n "nano", \ + \n "less", \ + \n "ncdu", \ + \n "tig", \ + \n "sed", \ + \n "build-essential", \ + \n "libsm-dev", \ + \n "libx11-dev", \ + \n "libxt-dev", \ + \n "libxext-dev", \ + \n "libglu1-mesa" \ + \n ] \ + \n ], \ + \n [ \ + \n "run", \ + \n "ln -s /usr/lib/x86_64-linux-gnu /usr/lib64" \ + \n ], \ + \n [ \ + \n "miniconda", \ + \n { \ + \n "conda_install": [ \ + \n "python=3.6", \ + \n "pip", \ + \n "jupyter", \ + \n "cmake", \ + \n "nipype>=1.1.4", \ + \n "mesalib", \ + \n "vtk=8.2.0=py36ha8e561a_201", \ + \n "pandas", \ + \n "matplotlib", \ + \n "colormath", \ + \n "nilearn", \ + \n "tbb-devel", \ + \n "nose", \ + \n "etelemetry" \ + \n ], \ + \n "pip_install": [ \ + \n "datalad[full]", \ + \n "duecredit" \ + \n ], \ + \n "create_env": "mb", \ + \n "activate": true \ + \n } \ + \n ], \ + \n [ \ + \n "workdir", \ + \n "/opt" \ + \n ], \ + \n [ \ + \n "run", \ + \n "mkdir -p /opt/data && cd /opt/data && \\\\n curl -sSL https://osf.io/download/rh9km/?revision=2 -o templates.zip && \\\\n unzip templates.zip && \\\\n rm -f /opt/data/templates.zip && \\\\n curl -sSL https://files.osf.io/v1/resources/hvc52/providers/osfstorage/57c1a8f06c613b01f98d68a9/?zip= -o OASIS-TRT-20_brains.zip && \\\\n mkdir OASIS-TRT-20_brains && \\\\n cd OASIS-TRT-20_brains && \\\\n unzip ../OASIS-TRT-20_brains.zip && \\\\n cd .. && \\\\n rm OASIS-TRT-20_brains.zip && \\\\n curl -sSL https://files.osf.io/v1/resources/zevma/providers/osfstorage/5783dfcab83f6901f963735c/?zip= -o cmalabels.zip && \\\\n mkdir OASIS-TRT-20_DKT31_CMA_labels_v2 && \\\\n cd OASIS-TRT-20_DKT31_CMA_labels_v2 && \\\\n unzip ../cmalabels.zip && \\\\n cd .. && \\\\n rm cmalabels.zip && \\\\n curl -sSL https://osf.io/download/d2cmy/ -o OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_OASIS-30_v2.nii.gz && \\\\n rm -rf __MACOSX" \ + \n ], \ + \n [ \ + \n "run_bash", \ + \n "source /opt/miniconda-latest/etc/profile.d/conda.sh && \\\\n conda activate mb && \\\\n git clone https://github.com/nipy/mindboggle.git && \\\\n cd /opt/mindboggle && \\\\n python setup.py install && \\\\n mkdir /opt/vtk_cpp_tools && \\\\n cd /opt/vtk_cpp_tools && \\\\n cmake /opt/mindboggle/vtk_cpp_tools && \\\\n make" \ + \n ], \ + \n [ \ + \n "env", \ + \n { \ + \n "vtk_cpp_tools": "/opt/vtk_cpp_tools" \ + \n } \ + \n ], \ + \n [ \ + \n "run_bash", \ + \n "source /opt/miniconda-latest/etc/profile.d/conda.sh && \\\\n conda activate mb && \\\\n conda install -y flask && \\\\n git clone https://github.com/akeshavan/roygbiv && \\\\n cd /opt/roygbiv && \\\\n git checkout fbbf31c29952d0ea22ed05d98e0a5a7e7d0827f9 && \\\\n python setup.py install && \\\\n cd /opt && \\\\n rm -rf /opt/roygbiv" \ + \n ], \ + \n [ \ + \n "run", \ + \n "mkdir -p /.jupyter && echo c.NotebookApp.ip = \\\"0.0.0.0\\\" > /.jupyter/jupyter_notebook_config.py" \ + \n ], \ + \n [ \ + \n "ants", \ + \n { \ + \n "version": "b43df4bfc8", \ + \n "method": "source", \ + \n "cmake_opts": "-DBUILD_SHARED_LIBS=ON", \ + \n "make_opts": "-j 4" \ + \n } \ + \n ], \ + \n [ \ + \n "freesurfer", \ + \n { \ + \n "version": "6.0.0-min" \ + \n } \ + \n ], \ + \n [ \ + \n "run", \ + \n "curl -sSL https://osf.io/download/n3ud2/?revision=1 -o /opt/freesurfer-6.0.0-min/license.txt" \ + \n ] \ + \n ] \ + \n}' > /neurodocker/neurodocker_specs.json diff --git a/install/Dockerfile.mindboggle.base b/install/Dockerfile.mindboggle.base deleted file mode 100644 index baa86998f..000000000 --- a/install/Dockerfile.mindboggle.base +++ /dev/null @@ -1,171 +0,0 @@ -# Dockerfile.mindboggle.base -#----------------------------------------------------------------------------- -# This Dockerfile installs most of Mindboggle's dependencies, -# including preprocessing software packages FreeSurfer and ANTs, -# and visualization software roygbiv. -# -# -- Adapted from dockerfiles created by Satrajit Ghosh -# for a nipype workshop held at MIT in March 2017 -# (in turn adapted from https://github.com/miykael/nipype_env) -# -# Steps: -# 1. Update OS dependencies and set up neurodebian. -# 2. Install conda packages and nibabel. -# 3. Install FreeSurfer. -# 4. Install ANTS. -# 5. Install dependencies for mindboggle's C++ library. -# 6. Install mindboggle's OASIS-30_Atropos_template data for use by ANTs. -# Missing steps (see Dockerfile.mindboggle.complete): -# 7. Install mindboggle. -# 8. Install roygbiv for mindboggle output visualization. -# -# Build the docker image: -# docker build -t mindboggle-base -f Dockerfile.mindboggle.base . -# -# Push to Docker hub: -# (https://docs.docker.com/docker-cloud/builds/push-images/) -# export DOCKER_ID_USER="nipy" -# docker login -# docker tag mindboggle-base nipy/mindboggle-base -# docker push nipy/mindboggle-base -# -# Pull from Docker hub: -# docker pull nipy/mindboggle-base -# -# In the following, the Docker container can be the original (mindboggle-base) -# or the pulled version (nipy/mindboggle-base), and is given access to -# /Users/arno on the host machine. -# -# Enter the bash shell of the Docker container: -# docker run --rm -ti -v /Users/arno:/home/jovyan/work nipy/mindboggle-base bash -# -# (c) 2017 by Arno Klein (CC-BY license) -#----------------------------------------------------------------------------- - -FROM jupyter/base-notebook -MAINTAINER Arno Klein - -#----------------------------------------------------------------------------- -# 1. Update OS dependencies -# (graphviz conda version doesn't work): -#----------------------------------------------------------------------------- -USER root -RUN apt-get update -qq && \ - apt-get install -yq --no-install-recommends tcsh \ - bc \ - bzip2 \ - ca-certificates \ - curl \ - tree \ - unzip \ - wget \ - zip \ - vim \ - emacs-nox \ - xvfb \ - graphviz \ - less \ - git && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \ - mkdir /opt/data && \ - chown $NB_USER /opt && \ - chown $NB_USER /opt/data - -#----------------------------------------------------------------------------- -# 2. Install nibabel and conda packages for mindboggle -# (nipype, pandas, matplotlib, scipy, cmake, and vtk), -# and for mindboggle visualization in a jupyter notebook (pythreejs): -#----------------------------------------------------------------------------- -USER $NB_USER -RUN conda install --quiet --yes nomkl \ - pip \ - reprozip \ - reprounzip \ - nipype \ - pandas \ - matplotlib \ - scipy \ - cmake \ - pythreejs \ - xvfbwrapper && \ - conda install --quiet --yes -c clinicalgraphics vtk=7.1.0 && \ - conda clean -tipsy && \ - jupyter nbextension enable --py --sys-prefix widgetsnbextension && \ - jupyter nbextension enable --py --sys-prefix pythreejs - -RUN pip install --quiet https://github.com/nipy/nibabel/archive/ca977abeb77f95ed3a40b7b89c310b286b9885b7.zip && \ - rm -rf ~/.cache/pip - -#----------------------------------------------------------------------------- -# 3. Install FreeSurfer v6.0, minimized with reprozip -# (https://github.com/freesurfer/freesurfer/issues/70): -#----------------------------------------------------------------------------- -USER $NB_USER -WORKDIR /opt -RUN curl -sSL https://dl.dropbox.com/sh/mvgpn6cml04me6u/AADWHds-ZeRqBvmuNX7_RoUla/recon-all-freesurfer6%2BMCR.min.tgz?dl=0 | tar zx -C /opt && \ - ( echo "cHJpbnRmICJrcnp5c3p0b2YuZ29yZ29sZXdza2lAZ21haWwuY29tXG41MTcyXG4gKkN2dW12RVYzelRmZ1xuRlM1Si8yYzFhZ2c0RVxuIiA+IC9vcHQvZnJlZXN1cmZlci9saWNlbnNlLnR4dAo=" | base64 -d | sh ) -ENV FS_OVERRIDE=0 \ - OS=Linux \ - FSF_OUTPUT_FORMAT=nii.gz \ - FIX_VERTEX_AREA=\ - FREESURFER_HOME=/opt/freesurfer -ENV MNI_DIR=$FREESURFER_HOME/mni \ - SUBJECTS_DIR=/subjects -ENV PERL5LIB=$MNI_DIR/share/perl5 \ - MNI_PERL5LIB=$MNI_DIR/share/perl5 \ - MINC_BIN_DIR=$MNI_DIR/bin \ - MINC_LIB_DIR=$MNI_DIR/lib \ - MNI_DATAPATH=$MNI_DIR/data -ENV PATH=$FREESURFER_HOME/bin:$FREESURFER_HOME/tktools:$MINC_BIN_DIR:$PATH - -#----------------------------------------------------------------------------- -# 4. Install ANTS v2.2.0 -# (git checkout 0740f9111e5a9cd4768323dc5dfaa7c29481f9ef): -#----------------------------------------------------------------------------- -USER root -RUN apt-get update -qq && apt-get install -yq --no-install-recommends \ - build-essential && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* -USER $NB_USER -WORKDIR /opt -RUN git clone https://github.com/stnava/ANTs.git && \ - cd ANTs && \ - git checkout 0740f9111e5a9cd4768323dc5dfaa7c29481f9ef && \ - mkdir build && cd build && cmake .. && make && \ - mkdir -p /opt/ants && \ - cp bin/* /opt/ants && cp ../Scripts/* /opt/ants && \ - cd /opt && rm -rf ANTs -ENV ANTSPATH=/opt/ants/ \ - PATH=/opt/ants:$PATH - -#----------------------------------------------------------------------------- -# 5. Install dependencies for mindboggle's C++ library: -#----------------------------------------------------------------------------- -USER root -RUN apt-get update -qq && \ - apt-get install -yq --no-install-recommends \ - build-essential libsm-dev libx11-dev libxt-dev libxext-dev && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* - -#----------------------------------------------------------------------------- -# 6. Install mindboggle's OASIS-30_Atropos_template data for use by ANTs: -#----------------------------------------------------------------------------- -USER $NB_USER -WORKDIR /opt/data -RUN curl -sSL https://osf.io/rh9km/?action=download\&version=2 -o templates.zip && \ - unzip templates.zip && \ - rm -rf /opt/data/templates.zip - -#----------------------------------------------------------------------------- -# Clear apt cache and other empty folders: -#----------------------------------------------------------------------------- -USER root -RUN apt-get clean && apt-get autoremove -y && \ - rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /boot /media /mnt /srv && \ - chmod a+w /tmp - -WORKDIR /home/$NB_USER/work -USER $NB_USER diff --git a/install/Dockerfile.mindboggle.complete b/install/Dockerfile.mindboggle.complete deleted file mode 100644 index 596166935..000000000 --- a/install/Dockerfile.mindboggle.complete +++ /dev/null @@ -1,99 +0,0 @@ -# Dockerfile.mindboggle.complete -#----------------------------------------------------------------------------- -# This Dockerfile installs mindboggle (http://mindboggle.info) -# and visualization software roygbiv. This builds on the docker image -# nipy/mindboggle-base, which contains all of Mindboggle's dependencies -# and preprocessing software packages, including FreeSurfer and ANTs. -# -# Steps: -# 1. Install mindboggle. -# 2. Install roygbiv for mindboggle output visualization. -# -# Build the docker image: -# docker build -t mindboggle -f Dockerfile.mindboggle.complete . -# -# Push to Docker hub: -# (https://docs.docker.com/docker-cloud/builds/push-images/) -# export DOCKER_ID_USER="nipy" -# docker login -# docker tag mindboggle nipy/mindboggle -# docker push nipy/mindboggle -# -# Pull from Docker hub: -# docker pull nipy/mindboggle -# -# In the following, the Docker container can be the original (mindboggle) -# or the pulled version (nipy/mindboggle), and is given access to /Users/arno -# on the host machine. -# -# Enter the bash shell of the Docker container, and add port mappings: -# docker run --rm -ti -v /Users/arno:/home/jovyan/work -p 8888:8888 -p 5000:5000 nipy/mindboggle bash -# -# Run the Docker container as an executable (variables set for clarity): -# HOST=/Users/binarybottle # path on host to access input and output -# DOCK=/home/jovyan/work # path to HOST from Docker container -# IMAGE=$DOCK/example_mri_data/T1.nii.gz # input image (from container) -# ID=arno # ID for brain image -# OUT=$DOCK/mindboggle123_output # '--output $OUT' is OPTIONAL -# docker run --rm -ti -v $HOST:/home/jovyan/work nipy/mindboggle $IMAGE --id $ID --out $OUT -# -# (c) 2017 by Arno Klein (CC-BY license) -#----------------------------------------------------------------------------- - -FROM nipy/mindboggle-base -MAINTAINER Arno Klein -ENTRYPOINT ["mindboggle123"] -CMD ["IMAGE", "--id", "--out"] -#----------------------------------------------------------------------------- -# Original command: -# IMAGE=/home/jovyan/work/example_mri_data/T1.nii.gz -# ID=arno -# OUT=/home/jovyan/work/mindboggle123_output -# mindboggle123 $IMAGE --id $ID --out $OUT -#----------------------------------------------------------------------------- - -#----------------------------------------------------------------------------- -# 1. Install mindboggle (git tag v1.2.0_mindboggle123): -#----------------------------------------------------------------------------- -USER $NB_USER -WORKDIR /opt -RUN git clone https://github.com/nipy/mindboggle && \ - cd /opt/mindboggle && \ -# git checkout tags/v1.2.0_mindboggle123 && \ - python setup.py install && \ - mkdir /opt/vtk_cpp_tools && \ - cd /opt/vtk_cpp_tools && \ - cmake /opt/mindboggle/vtk_cpp_tools -DCMAKE_EXE_LINKER_FLAGS="-L /opt/conda/lib" && \ - make - #rm -rf /opt/mindboggle -ENV vtk_cpp_tools=/opt/vtk_cpp_tools - -#----------------------------------------------------------------------------- -# 2. Install roygbiv for mindboggle output visualization -# (git checkout 368e1844b164b599496db817f095c53c72332b9f; -# nbpapaya 60119b6e1de651f250af26a3541d9cb18e971526): -#----------------------------------------------------------------------------- -USER $NB_USER -WORKDIR /opt -RUN conda install -y flask && \ - git clone https://github.com/akeshavan/roygbiv && \ - cd /opt/roygbiv && \ - git checkout fbbf31c29952d0ea22ed05d98e0a5a7e7d0827f9 && \ - python setup.py install && \ - cd /opt && \ - git clone https://github.com/akeshavan/nbpapaya && \ - cd /opt/nbpapaya && \ - git checkout 60119b6e1de651f250af26a3541d9cb18e971526 && \ - python setup.py install && \ - rm -rf /opt/roygbiv /opt/nbpapaya - -#----------------------------------------------------------------------------- -# Clear apt cache and other empty folders: -#----------------------------------------------------------------------------- -USER root -RUN apt-get clean && apt-get autoremove -y && \ - rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /boot /media /mnt /srv && \ - chmod a+w /tmp - -WORKDIR /home/$NB_USER/work -USER $NB_USER diff --git a/install/Singularity b/install/Singularity new file mode 100644 index 000000000..7a40da18c --- /dev/null +++ b/install/Singularity @@ -0,0 +1,309 @@ +# Generated by Neurodocker version 0.4.2-3-gf7055a1 +# Timestamp: 2019-11-05 01:29:29 UTC +# +# Thank you for using Neurodocker. If you discover any issues +# or ways to improve this software, please submit an issue or +# pull request on our GitHub repository: +# +# https://github.com/kaczmarj/neurodocker + +Bootstrap: docker +From: neurodebian:stretch + +%post +export ND_ENTRYPOINT="/neurodocker/startup.sh" +apt-get update -qq +apt-get install -y -q --no-install-recommends \ + apt-utils \ + bzip2 \ + ca-certificates \ + curl \ + locales \ + unzip +apt-get clean +rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen +dpkg-reconfigure --frontend=noninteractive locales +update-locale LANG="en_US.UTF-8" +chmod 777 /opt && chmod a+s /opt +mkdir -p /neurodocker +if [ ! -f "$ND_ENTRYPOINT" ]; then + echo '#!/usr/bin/env bash' >> "$ND_ENTRYPOINT" + echo 'set -e' >> "$ND_ENTRYPOINT" + echo 'if [ -n "$1" ]; then "$@"; else /usr/bin/env bash; fi' >> "$ND_ENTRYPOINT"; +fi +chmod -R 777 /neurodocker && chmod a+s /neurodocker + +apt-get update -qq +apt-get install -y -q --no-install-recommends \ + graphviz \ + tree \ + git-annex-standalone \ + vim \ + emacs-nox \ + nano \ + less \ + ncdu \ + tig \ + sed \ + build-essential \ + libsm-dev \ + libx11-dev \ + libxt-dev \ + libxext-dev \ + libglu1-mesa +apt-get clean +rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +ln -s /usr/lib/x86_64-linux-gnu /usr/lib64 + +export PATH="/opt/miniconda-latest/bin:$PATH" +echo "Downloading Miniconda installer ..." +conda_installer="/tmp/miniconda.sh" +curl -fsSL --retry 5 -o "$conda_installer" https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh +bash "$conda_installer" -b -p /opt/miniconda-latest +rm -f "$conda_installer" +conda update -yq -nbase conda +conda config --system --prepend channels conda-forge +conda config --system --set auto_update_conda false +conda config --system --set show_channel_urls true +sync && conda clean -tipsy && sync +conda create -y -q --name mb +conda install -y -q --name mb \ + 'python=3.6' \ + 'pip' \ + 'jupyter' \ + 'cmake' \ + 'mesalib' \ + 'vtk=8.2.0=py36ha8e561a_201' \ + 'pandas' \ + 'matplotlib' \ + 'colormath' \ + 'nipype>=1.1.4' \ + 'nilearn' \ + 'tbb-devel' \ + 'nose' \ + 'etelemetry' +sync && conda clean -tipsy && sync +bash -c "source activate mb + pip install --no-cache-dir \ + 'datalad[full]' \ + 'duecredit'" +rm -rf ~/.cache/pip/* +sync +sed -i '$isource activate mb' $ND_ENTRYPOINT + + +cd /opt + +mkdir -p /opt/data && cd /opt/data && \ + curl -sSL https://osf.io/download/rh9km/?revision=2 -o templates.zip && \ + unzip templates.zip && \ + rm -f /opt/data/templates.zip && \ + curl -sSL https://files.osf.io/v1/resources/hvc52/providers/osfstorage/57c1a8f06c613b01f98d68a9/?zip= -o OASIS-TRT-20_brains.zip && \ + mkdir OASIS-TRT-20_brains && \ + cd OASIS-TRT-20_brains && \ + unzip ../OASIS-TRT-20_brains.zip && \ + cd .. && \ + rm OASIS-TRT-20_brains.zip && \ + curl -sSL https://files.osf.io/v1/resources/zevma/providers/osfstorage/5783dfcab83f6901f963735c/?zip= -o cmalabels.zip && \ + mkdir OASIS-TRT-20_DKT31_CMA_labels_v2 && \ + cd OASIS-TRT-20_DKT31_CMA_labels_v2 && \ + unzip ../cmalabels.zip && \ + cd .. && \ + rm cmalabels.zip && \ + curl -sSL https://osf.io/download/d2cmy/ -o OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_OASIS-30_v2.nii.gz && \ + rm -rf __MACOSX + +bash -c 'source /opt/miniconda-latest/etc/profile.d/conda.sh && \ + conda activate mb && \ + git clone https://github.com/nipy/mindboggle.git && \ + cd /opt/mindboggle && \ + python setup.py install && \ + mkdir /opt/vtk_cpp_tools && \ + cd /opt/vtk_cpp_tools && \ + cmake /opt/mindboggle/vtk_cpp_tools && \ + make' + +bash -c 'source /opt/miniconda-latest/etc/profile.d/conda.sh && \ + conda activate mb && \ + conda install -y flask && \ + git clone https://github.com/akeshavan/roygbiv && \ + cd /opt/roygbiv && \ + git checkout fbbf31c29952d0ea22ed05d98e0a5a7e7d0827f9 && \ + python setup.py install && \ + cd /opt && \ + rm -rf /opt/roygbiv' + +mkdir -p /.jupyter && echo c.NotebookApp.ip = \"0.0.0.0\" > /.jupyter/jupyter_notebook_config.py + +apt-get update -qq +apt-get install -y -q --no-install-recommends \ + cmake \ + g++ \ + gcc \ + git \ + make \ + zlib1g-dev +apt-get clean +rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +mkdir -p /tmp/ants/build +git clone https://github.com/ANTsX/ANTs.git /tmp/ants/source +cd /tmp/ants/source +git fetch --tags +git checkout b43df4bfc8 +cd /tmp/ants/build +cmake -DBUILD_SHARED_LIBS=ON /tmp/ants/source +make -j 4 +mkdir -p /opt/ants-b43df4bfc8 +mv bin lib /opt/ants-b43df4bfc8/ +mv /tmp/ants/source/Scripts/* /opt/ants-b43df4bfc8/bin +rm -rf /tmp/ants + +apt-get update -qq +apt-get install -y -q --no-install-recommends \ + bc \ + libgomp1 \ + libxmu6 \ + libxt6 \ + perl \ + tcsh +apt-get clean +rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +echo "Downloading FreeSurfer ..." +mkdir -p /opt/freesurfer-6.0.0-min +curl -fsSL --retry 5 https://dl.dropbox.com/s/nnzcfttc41qvt31/recon-all-freesurfer6-3.min.tgz \ +| tar -xz -C /opt/freesurfer-6.0.0-min --strip-components 1 +sed -i '$isource "/opt/freesurfer-6.0.0-min/SetUpFreeSurfer.sh"' "$ND_ENTRYPOINT" + +curl -sSL https://osf.io/download/n3ud2/?revision=1 -o /opt/freesurfer-6.0.0-min/license.txt + +echo '{ +\n "pkg_manager": "apt", +\n "instructions": [ +\n [ +\n "base", +\n "neurodebian:stretch" +\n ], +\n [ +\n "_header", +\n { +\n "version": "generic", +\n "method": "custom" +\n } +\n ], +\n [ +\n "install", +\n [ +\n "graphviz", +\n "tree", +\n "git-annex-standalone", +\n "vim", +\n "emacs-nox", +\n "nano", +\n "less", +\n "ncdu", +\n "tig", +\n "sed", +\n "build-essential", +\n "libsm-dev", +\n "libx11-dev", +\n "libxt-dev", +\n "libxext-dev", +\n "libglu1-mesa" +\n ] +\n ], +\n [ +\n "run", +\n "ln -s /usr/lib/x86_64-linux-gnu /usr/lib64" +\n ], +\n [ +\n "miniconda", +\n { +\n "conda_install": [ +\n "python=3.6", +\n "pip", +\n "jupyter", +\n "cmake", +\n "mesalib", +\n "vtk=8.2.0=py36ha8e561a_201", +\n "pandas", +\n "matplotlib", +\n "colormath", +\n "nipype>=1.1.4", +\n "nilearn", +\n "tbb-devel", +\n "nose", +\n "etelemetry" +\n ], +\n "pip_install": [ +\n "datalad[full]", +\n "duecredit" +\n ], +\n "create_env": "mb", +\n "activate": true +\n } +\n ], +\n [ +\n "workdir", +\n "/opt" +\n ], +\n [ +\n "run", +\n "mkdir -p /opt/data && cd /opt/data && \\\\n curl -sSL https://osf.io/download/rh9km/?revision=2 -o templates.zip && \\\\n unzip templates.zip && \\\\n rm -f /opt/data/templates.zip && \\\\n curl -sSL https://files.osf.io/v1/resources/hvc52/providers/osfstorage/57c1a8f06c613b01f98d68a9/?zip= -o OASIS-TRT-20_brains.zip && \\\\n mkdir OASIS-TRT-20_brains && \\\\n cd OASIS-TRT-20_brains && \\\\n unzip ../OASIS-TRT-20_brains.zip && \\\\n cd .. && \\\\n rm OASIS-TRT-20_brains.zip && \\\\n curl -sSL https://files.osf.io/v1/resources/zevma/providers/osfstorage/5783dfcab83f6901f963735c/?zip= -o cmalabels.zip && \\\\n mkdir OASIS-TRT-20_DKT31_CMA_labels_v2 && \\\\n cd OASIS-TRT-20_DKT31_CMA_labels_v2 && \\\\n unzip ../cmalabels.zip && \\\\n cd .. && \\\\n rm cmalabels.zip && \\\\n curl -sSL https://osf.io/download/d2cmy/ -o OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_OASIS-30_v2.nii.gz && \\\\n rm -rf __MACOSX" +\n ], +\n [ +\n "run_bash", +\n "source /opt/miniconda-latest/etc/profile.d/conda.sh && \\\\n conda activate mb && \\\\n git clone https://github.com/nipy/mindboggle.git && \\\\n cd /opt/mindboggle && \\\\n python setup.py install && \\\\n mkdir /opt/vtk_cpp_tools && \\\\n cd /opt/vtk_cpp_tools && \\\\n cmake /opt/mindboggle/vtk_cpp_tools && \\\\n make" +\n ], +\n [ +\n "env", +\n { +\n "vtk_cpp_tools": "/opt/vtk_cpp_tools" +\n } +\n ], +\n [ +\n "run_bash", +\n "source /opt/miniconda-latest/etc/profile.d/conda.sh && \\\\n conda activate mb && \\\\n conda install -y flask && \\\\n git clone https://github.com/akeshavan/roygbiv && \\\\n cd /opt/roygbiv && \\\\n git checkout fbbf31c29952d0ea22ed05d98e0a5a7e7d0827f9 && \\\\n python setup.py install && \\\\n cd /opt && \\\\n rm -rf /opt/roygbiv" +\n ], +\n [ +\n "run", +\n "mkdir -p /.jupyter && echo c.NotebookApp.ip = \\\"0.0.0.0\\\" > /.jupyter/jupyter_notebook_config.py" +\n ], +\n [ +\n "ants", +\n { +\n "version": "b43df4bfc8", +\n "method": "source", +\n "cmake_opts": "-DBUILD_SHARED_LIBS=ON", +\n "make_opts": "-j 4" +\n } +\n ], +\n [ +\n "freesurfer", +\n { +\n "version": "6.0.0-min" +\n } +\n ], +\n [ +\n "run", +\n "curl -sSL https://osf.io/download/n3ud2/?revision=1 -o /opt/freesurfer-6.0.0-min/license.txt" +\n ] +\n ] +\n}' > /neurodocker/neurodocker_specs.json + +%environment +export LANG="en_US.UTF-8" +export LC_ALL="en_US.UTF-8" +export ND_ENTRYPOINT="/neurodocker/startup.sh" +export CONDA_DIR="/opt/miniconda-latest" +export PATH="/opt/miniconda-latest/bin:$PATH" +export vtk_cpp_tools="/opt/vtk_cpp_tools" +export ANTSPATH="/opt/ants-b43df4bfc8/bin" +export PATH="/opt/ants-b43df4bfc8/bin:$PATH" +export LD_LIBRARY_PATH="/opt/ants-b43df4bfc8/lib:$LD_LIBRARY_PATH" +export FREESURFER_HOME="/opt/freesurfer-6.0.0-min" +export PATH="/opt/freesurfer-6.0.0-min/bin:$PATH" + +%runscript +/neurodocker/startup.sh "$@" diff --git a/install/install_mindboggle.sh b/install/install_mindboggle.sh deleted file mode 100644 index c889ca999..000000000 --- a/install/install_mindboggle.sh +++ /dev/null @@ -1,243 +0,0 @@ -#!/bin/bash -#============================================================================= -# This script provides directions for installing Mindboggle and dependencies -# (http://mindboggle.info) on a Linux machine (tested on Ubuntu 14.04) -# with Python 3. Running it requires a good Internet connection. -# -# This script assumes that if it isn't installing ANTs, the global environment -# file .bash_profile already includes the path to the ANTs bin directory. -# ANTs is used to perform affine registration to standard (MNI152) space, -# refine gray/white matter segmentation, and perform nonlinear volume -# registration for whole-brain labeling. Installing and running ANTs -# requires considerable (> 1 GB) RAM. -# -# Usage: -# source ./install_mindboggle.sh -# -# Or with arguments: -# source ./install_mindboggle.sh -# -# -# -# -# -# Example: -# source ./install_mindboggle.sh /home/arno/downloads \ -# /home/arno/install /home/arno/.bash_profile yes -# -# Authors: -# - Daniel Clark, 2014 -# - Arno Klein, 2014-2016 (arno@mindboggle.info) http://binarybottle.com -# -# Copyright 2016, Mindboggle team, Apache v2.0 License -#============================================================================= - -#----------------------------------------------------------------------------- -# Path arguments: -#----------------------------------------------------------------------------- -DOWNLOAD=$1 -INSTALL=$2 -ENV=$3 -ANTS=$4 - -#----------------------------------------------------------------------------- -# OS and sudo: -#----------------------------------------------------------------------------- -OS=Linux -SUDO=1 - -#----------------------------------------------------------------------------- -# Create folders and file if they don't exist: -#----------------------------------------------------------------------------- -if [ -z "$DOWNLOAD" ]; then - DOWNLOAD="$HOME/downloads" -fi -if [ ! -d $DOWNLOAD ]; then - mkdir -p $DOWNLOAD; -fi - -if [ -z "$INSTALL" ]; then - INSTALL="$HOME/install" -fi -if [ ! -d $INSTALL ]; then - mkdir -p $INSTALL; -fi - -if [ -z "$ENV" ]; then - ENV="$HOME/.bash_profile" -fi -if [ ! -e "$ENV" ] ; then - touch "$ENV" -fi -if [ ! -w "$ENV" ] ; then - echo cannot write to $ENV - exit 1 -fi -if [ -z "$ANTS" ]; then - ANTS="yes" -fi -if [ -z "$OS" ]; then - OS="Linux" -fi -if [ -z "$SUDO" ]; then - SUDO=1 -fi - -#----------------------------------------------------------------------------- -# Install system-wide dependencies in linux: -#----------------------------------------------------------------------------- -if [ $OS = "Linux" ]; then - if [ $SUDO -eq 1 ]; then - sudo apt-get update - sudo apt-get install -y g++ git make xorg - else - apt-get update - apt-get install -y g++ git make xorg - fi -fi - -#----------------------------------------------------------------------------- -# Fix paths to Linux libraries using symbolic links: -#----------------------------------------------------------------------------- -# To avoid the following errors: -# "No rule to make target `/usr/lib/x86_64-linux-gnu/libGLU.so'" -# ... -# http://techtidings.blogspot.com/2012/01/problem-with-libglso-on-64-bit-ubuntu.html -if [ $OS = "Linux" ]; then - if [ $SUDO -eq 1 ]; then - sudo mkdir /usr/lib64 - sudo ln -s /usr/lib/x86_64-linux-gnu/libGLU.so.1 /usr/lib64/libGLU.so - sudo ln -s /usr/lib/x86_64-linux-gnu/libSM.so.6 /usr/lib64/libSM.so - sudo ln -s /usr/lib/x86_64-linux-gnu/libICE.so.6 /usr/lib64/libICE.so - sudo ln -s /usr/lib/x86_64-linux-gnu/libX11.so.6 /usr/lib64/libX11.so - sudo ln -s /usr/lib/x86_64-linux-gnu/libXext.so.6 /usr/lib64/libXext.so - sudo ln -s /usr/lib/x86_64-linux-gnu/libXt.so.6 /usr/lib64/libXt.so - sudo ln -s /usr/lib/x86_64-linux-gnu/mesa/libGL.so.1 /usr/lib64/libGL.so - else - mkdir /usr/lib64 - ln -s /usr/lib/x86_64-linux-gnu/libGLU.so.1 /usr/lib64/libGLU.so - ln -s /usr/lib/x86_64-linux-gnu/libSM.so.6 /usr/lib64/libSM.so - ln -s /usr/lib/x86_64-linux-gnu/libICE.so.6 /usr/lib64/libICE.so - ln -s /usr/lib/x86_64-linux-gnu/libX11.so.6 /usr/lib64/libX11.so - ln -s /usr/lib/x86_64-linux-gnu/libXext.so.6 /usr/lib64/libXext.so - ln -s /usr/lib/x86_64-linux-gnu/libXt.so.6 /usr/lib64/libXt.so - ln -s /usr/lib/x86_64-linux-gnu/mesa/libGL.so.1 /usr/lib64/libGL.so - fi -fi - -#----------------------------------------------------------------------------- -# Install Anaconda's latest miniconda Python 3 distribution: -#----------------------------------------------------------------------------- -CONDA_URL="https://repo.continuum.io/miniconda" -CONDA_FILE="Miniconda3-latest-$OS-x86_64.sh" -CONDA_DL="$DOWNLOAD/$CONDA_FILE" -CONDA_PATH="$INSTALL/miniconda3" -if [ $OS = "Linux" ]; then - wget -O $CONDA_DL $CONDA_URL/$CONDA_FILE -else - curl -o $CONDA_DL $CONDA_URL/$CONDA_FILE -fi -bash $CONDA_DL -b -p $CONDA_PATH - -# Set environment variables: -echo "# Conda" >> $ENV -echo "export PATH=$CONDA_PATH/bin:\$PATH" >> $ENV -source $ENV - -conda config --set always_yes yes - -#------------------------------------------------------------------------- -# Install VTK 7.0: -#------------------------------------------------------------------------- -conda install -c https://conda.anaconda.org/clinicalgraphics vtk -VTK_DIR="$CONDA_PATH/lib/cmake/vtk-7.0" - -#------------------------------------------------------------------------- -# Install nipype: -#------------------------------------------------------------------------- -conda install pip scipy nose networkx lxml future simplejson -pip install nibabel prov xvfbwrapper traits -pip install https://github.com/nipy/nipype/archive/master.zip - -#------------------------------------------------------------------------- -# Install optional graphviz and pygraphviz for generating nipype graphs: -#------------------------------------------------------------------------- -if [ $OS = "Linux" ]; then - if [ $SUDO -eq 1 ]; then - sudo apt-get install graphviz libgraphviz-dev - else - apt-get install graphviz libgraphviz-dev - fi - pip install --upgrade pygraphviz graphviz -fi - -#------------------------------------------------------------------------- -# Install additional testing tools: -#------------------------------------------------------------------------- -conda install ipython pytest coverage # nose - -#------------------------------------------------------------------------- -# Install Mindboggle's remaining dependencies and C++ code -#------------------------------------------------------------------------- -conda install cmake matplotlib numpy pandas - -vtk_cpp_tools=$INSTALL/mindboggle/vtk_cpp_tools/bin -git clone https://github.com/nipy/mindboggle.git $INSTALL/mindboggle -cd $INSTALL/mindboggle -python setup.py install -mkdir $vtk_cpp_tools -cd $vtk_cpp_tools -cmake ../ -DVTK_DIR:STRING=$VTK_DIR -make - -# Set environment variables: -echo "# Mindboggle" >> $ENV -echo "export vtk_cpp_tools=$vtk_cpp_tools" >> $ENV -echo "export PATH=$vtk_cpp_tools:\$PATH" >> $ENV -source $ENV - -#----------------------------------------------------------------------------- -# Install ANTs v2.1.0rc3 -# The antsCorticalThickness.h pipeline optionally provides gray/white matter -# segmentation, affine registration to standard space, and nonlinear volume -# registration for whole-brain labeling, to improve Mindboggle results. -#----------------------------------------------------------------------------- -if [ $ANTS = "yes" ]; then - ANTS_DL=$DOWNLOAD/ants - ANTSPATH=$INSTALL/ants/bin - git clone https://github.com/stnava/ANTs.git $ANTS_DL - cd $ANTS_DL - git checkout tags/v2.1.0rc3 - mkdir $INSTALL/ants - cd $INSTALL/ants - cmake $ANTS_DL # -DVTK_DIR:STRING=$VTK_DIR - make - cp -r $ANTS_DL/Scripts/* $ANTSPATH - - # Set environment variables: - echo "# ANTs" >> $ENV - echo "export ANTSPATH=$ANTSPATH" >> $ENV - echo "export PATH=$ANTSPATH:\$PATH" >> $ENV - source $ENV -fi - -#----------------------------------------------------------------------------- -# Remove non-essential directories -# (set to 0 to keep a complete box for easy git updates of ANTs): -#----------------------------------------------------------------------------- -rm_extras=1 -if [ $rm_extras -eq 1 ]; then - if [ $ANTS = "yes" ]; then - if [ $SUDO -eq 1 ]; then - sudo mv $ANTSPATH $INSTALL/ants_bin - sudo rm -rf $INSTALL/ants/* - sudo mv $INSTALL/ants_bin $ANTSPATH - else - mv $ANTSPATH $INSTALL/ants_bin - rm -rf $INSTALL/ants/* - mv $INSTALL/ants_bin $ANTSPATH - fi - fi - #rm -r $DOWNLOAD/* -fi - diff --git a/install/neurodocker.sh b/install/neurodocker.sh new file mode 100644 index 000000000..e08e4e942 --- /dev/null +++ b/install/neurodocker.sh @@ -0,0 +1,170 @@ +#!/bin/bash + +############################################################################### +# Generate a Dockerfile and Singularity recipe for building a Mindboggle container +# (https://mindboggle.info). +# The Dockerfile and/or Singularity recipe installs most of Mindboggle's dependencies, +# including preprocessing software packages FreeSurfer and ANTs, +# and visualization software roygbiv: +# - Set up neurodebian and miniconda and install conda packages +# - Install FreeSurfer and ANTS. +# - Install Mindboggle templates from the Open Science Framework +# - Install Mindboggle +# - Install roygbiv for output visualization +# +# Steps to build, upload, and deploy the Mindboggle docker and/or singularity image: +# +# 1. Create or update the Dockerfile and Singuarity recipe: +# bash neurodocker.sh +# +# 2. Build the docker image: +# docker build -t mindboggle -f Dockerfile . +# +# and/or singularity image: +# singularity build mindboggle.simg Singularity +# +# 3. Push to Docker hub: +# (https://docs.docker.com/docker-cloud/builds/push-images/) +# export DOCKER_ID_USER="nipy" +# docker login +# docker tag mindboggle nipy/mindboggle # See: https://docs.docker.com/engine/reference/commandline/tag/ +# docker push nipy/mindboggle +# +# 4. Pull from Docker hub (or use the original): +# docker pull nipy/mindboggle +# +# In the following, the Docker container can be the original (mindboggle) +# or the pulled version (nipy/mindboggle), and is given access to /Users/arno +# on the host machine. +# +# 5. Enter the bash shell of the Docker container, and add port mappings: +# docker run --rm -ti -v /Users/arno:/home/jovyan/work -p 8888:8888 -p 5000:5000 nipy/mindboggle bash +# +# 6. Run the Docker container as an executable (variables set for clarity): +# HOST=/Users/binarybottle # path on host to access input and output +# DOCK=/home/jovyan/work # path to HOST from Docker container +# IMAGE=$DOCK/example_mri_data/T1.nii.gz # input image (from container) +# ID=arno # ID for brain image +# OUT=$DOCK/mindboggle123_output # '--out $OUT' is OPTIONAL +# docker run --rm -ti -v $HOST:/home/jovyan/work nipy/mindboggle $IMAGE --id $ID --out $OUT +# +############################################################################### + +image="kaczmarj/neurodocker:master@sha256:936401fe8f677e0d294f688f352cbb643c9693f8de371475de1d593650e42a66" + +# Generate a dockerfile for building a mindboggle container +docker run --rm ${image} generate docker \ + --base neurodebian:stretch \ + --pkg-manager apt \ + --install graphviz tree git-annex-standalone vim \ + emacs-nox nano less ncdu tig sed build-essential \ + libsm-dev libx11-dev libxt-dev libxext-dev libglu1-mesa \ + --run 'ln -s /usr/lib/x86_64-linux-gnu /usr/lib64' \ + --miniconda \ + conda_install="python=3.6 pip jupyter cmake nipype>=1.1.4 mesalib vtk=8.2.0=py36ha8e561a_201 pandas + matplotlib colormath nilearn tbb-devel nose etelemetry" \ + pip_install="datalad[full] duecredit" \ + create_env="mb" \ + activate=true \ + --workdir /opt \ + --run 'mkdir -p /opt/data && cd /opt/data && \ + curl -sSL https://osf.io/download/rh9km/?revision=2 -o templates.zip && \ + unzip templates.zip && \ + rm -f /opt/data/templates.zip && \ + curl -sSL https://files.osf.io/v1/resources/hvc52/providers/osfstorage/57c1a8f06c613b01f98d68a9/?zip= -o OASIS-TRT-20_brains.zip && \ + mkdir OASIS-TRT-20_brains && \ + cd OASIS-TRT-20_brains && \ + unzip ../OASIS-TRT-20_brains.zip && \ + cd .. && \ + rm OASIS-TRT-20_brains.zip && \ + curl -sSL https://files.osf.io/v1/resources/zevma/providers/osfstorage/5783dfcab83f6901f963735c/?zip= -o cmalabels.zip && \ + mkdir OASIS-TRT-20_DKT31_CMA_labels_v2 && \ + cd OASIS-TRT-20_DKT31_CMA_labels_v2 && \ + unzip ../cmalabels.zip && \ + cd .. && \ + rm cmalabels.zip && \ + curl -sSL https://osf.io/download/d2cmy/ -o OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_OASIS-30_v2.nii.gz && \ + rm -rf __MACOSX' \ + --run-bash 'source /opt/miniconda-latest/etc/profile.d/conda.sh && \ + conda activate mb && \ + git clone https://github.com/nipy/mindboggle.git && \ + cd /opt/mindboggle && \ + python setup.py install && \ + mkdir /opt/vtk_cpp_tools && \ + cd /opt/vtk_cpp_tools && \ + cmake /opt/mindboggle/vtk_cpp_tools && \ + make' \ + --env vtk_cpp_tools=/opt/vtk_cpp_tools \ + --run-bash 'source /opt/miniconda-latest/etc/profile.d/conda.sh && \ + conda activate mb && \ + conda install -y flask && \ + git clone https://github.com/akeshavan/roygbiv && \ + cd /opt/roygbiv && \ + git checkout fbbf31c29952d0ea22ed05d98e0a5a7e7d0827f9 && \ + python setup.py install && \ + cd /opt && \ + rm -rf /opt/roygbiv' \ + --run 'mkdir -p /.jupyter && echo c.NotebookApp.ip = \"0.0.0.0\" > /.jupyter/jupyter_notebook_config.py' \ + --ants version=b43df4bfc8 method=source cmake_opts='-DBUILD_SHARED_LIBS=ON' make_opts='-j 4' \ + --freesurfer version=6.0.0-min \ + --run 'curl -sSL https://osf.io/download/n3ud2/?revision=1 -o /opt/freesurfer-6.0.0-min/license.txt' \ +> Dockerfile + + +# Generate a singularity recipe for building a mindboggle container +docker run --rm ${image} generate singularity \ + --base neurodebian:stretch \ + --pkg-manager apt \ + --install graphviz tree git-annex-standalone vim \ + emacs-nox nano less ncdu tig sed build-essential \ + libsm-dev libx11-dev libxt-dev libxext-dev libglu1-mesa \ + --run 'ln -s /usr/lib/x86_64-linux-gnu /usr/lib64' \ + --miniconda \ + conda_install="python=3.6 pip jupyter cmake mesalib vtk=8.2.0=py36ha8e561a_201 pandas + matplotlib colormath nipype>=1.1.4 nilearn tbb-devel nose etelemetry" \ + pip_install="datalad[full] duecredit" \ + create_env="mb" \ + activate=true \ + --workdir /opt \ + --run 'mkdir -p /opt/data && cd /opt/data && \ + curl -sSL https://osf.io/download/rh9km/?revision=2 -o templates.zip && \ + unzip templates.zip && \ + rm -f /opt/data/templates.zip && \ + curl -sSL https://files.osf.io/v1/resources/hvc52/providers/osfstorage/57c1a8f06c613b01f98d68a9/?zip= -o OASIS-TRT-20_brains.zip && \ + mkdir OASIS-TRT-20_brains && \ + cd OASIS-TRT-20_brains && \ + unzip ../OASIS-TRT-20_brains.zip && \ + cd .. && \ + rm OASIS-TRT-20_brains.zip && \ + curl -sSL https://files.osf.io/v1/resources/zevma/providers/osfstorage/5783dfcab83f6901f963735c/?zip= -o cmalabels.zip && \ + mkdir OASIS-TRT-20_DKT31_CMA_labels_v2 && \ + cd OASIS-TRT-20_DKT31_CMA_labels_v2 && \ + unzip ../cmalabels.zip && \ + cd .. && \ + rm cmalabels.zip && \ + curl -sSL https://osf.io/download/d2cmy/ -o OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_OASIS-30_v2.nii.gz && \ + rm -rf __MACOSX' \ + --run-bash 'source /opt/miniconda-latest/etc/profile.d/conda.sh && \ + conda activate mb && \ + git clone https://github.com/nipy/mindboggle.git && \ + cd /opt/mindboggle && \ + python setup.py install && \ + mkdir /opt/vtk_cpp_tools && \ + cd /opt/vtk_cpp_tools && \ + cmake /opt/mindboggle/vtk_cpp_tools && \ + make' \ + --env vtk_cpp_tools=/opt/vtk_cpp_tools \ + --run-bash 'source /opt/miniconda-latest/etc/profile.d/conda.sh && \ + conda activate mb && \ + conda install -y flask && \ + git clone https://github.com/akeshavan/roygbiv && \ + cd /opt/roygbiv && \ + git checkout fbbf31c29952d0ea22ed05d98e0a5a7e7d0827f9 && \ + python setup.py install && \ + cd /opt && \ + rm -rf /opt/roygbiv' \ + --run 'mkdir -p /.jupyter && echo c.NotebookApp.ip = \"0.0.0.0\" > /.jupyter/jupyter_notebook_config.py' \ + --ants version=b43df4bfc8 method=source cmake_opts='-DBUILD_SHARED_LIBS=ON' make_opts='-j 4' \ + --freesurfer version=6.0.0-min \ + --run 'curl -sSL https://osf.io/download/n3ud2/?revision=1 -o /opt/freesurfer-6.0.0-min/license.txt' \ +> Singularity diff --git a/mindboggle/__init__.py b/mindboggle/__init__.py index b15ff6692..a6f8674c9 100644 --- a/mindboggle/__init__.py +++ b/mindboggle/__init__.py @@ -1,7 +1,7 @@ import os #from .info import (LONG_DESCRIPTION as __doc__, -# __version__) +# __version__) #__doc__ += """ #""" @@ -17,3 +17,17 @@ #from . import blah as blah # object imports #from .blah import blah, blah + +INIT_MSG = "Running {packname} version {version} (latest: {latest})".format +latest = {"version": 'Unknown'} +try: + from .version import __version__ + import etelemetry + latest = etelemetry.get_project("nipy/mindboggle") +except Exception as e: + print("Could not check for version updates: ", e) +finally: + print(INIT_MSG(packname='mindboggle', + version=__version__, + latest=latest["version"])) + diff --git a/mindboggle/evaluate/evaluate_features.py b/mindboggle/evaluate/evaluate_features.py index 15305dd74..6ea39ec74 100644 --- a/mindboggle/evaluate/evaluate_features.py +++ b/mindboggle/evaluate/evaluate_features.py @@ -554,7 +554,7 @@ def evaluate_deep_features(features_file, labels_file, sulci_file='', hemi='', print('Maximum distance = ' + str(data.max().max())) #maxd = data.max().max() data_summary = data.describe() - data_summary.to_csv(summary_file) + data_summary.to_csv(summary_file, encoding='utf-8') # Set up the data for plotting. We will need to have values for every # pair of subject/label names. Map the value to a color. diff --git a/mindboggle/evaluate/evaluate_labels.py b/mindboggle/evaluate/evaluate_labels.py index a12eacdba..3d65bf8f3 100644 --- a/mindboggle/evaluate/evaluate_labels.py +++ b/mindboggle/evaluate/evaluate_labels.py @@ -371,8 +371,8 @@ def evaluate_surface_overlaps_cpp(command, labels_file1, labels_file2, df_jaccards = pd.DataFrame(jaccards, index=subjects, columns=labels) df_dices = pd.DataFrame(dices, index=subjects, columns=labels) - df_jaccards.to_csv(jaccard_file) - df_dices.to_csv(dice_file) + df_jaccards.to_csv(jaccard_file, encoding='utf-8') + df_dices.to_csv(dice_file, encoding='utf-8') # -------------------------------------------------------------------- # Plot heatmap for labels X subjects array: diff --git a/mindboggle/evaluate/evaluate_shapes.py b/mindboggle/evaluate/evaluate_shapes.py index 83e189d1b..e52ef90c9 100644 --- a/mindboggle/evaluate/evaluate_shapes.py +++ b/mindboggle/evaluate/evaluate_shapes.py @@ -104,25 +104,25 @@ def compare_surface_shape_measures_by_vertex(): index=label_names_bilateral, columns=[x for x in range(101)]) data.to_csv('mean_and_FS_curvature_distance_correlation_' - 'per_left_label_vertices_Mindboggle101.csv') + 'per_left_label_vertices_Mindboggle101.csv', encoding='utf-8') data = pd.DataFrame(dcors[:,:,1].transpose(), index=label_names_bilateral, columns=[x for x in range(101)]) data.to_csv('mean_and_FS_curvature_distance_correlation_' - 'per_right_label_vertices_Mindboggle101.csv') + 'per_right_label_vertices_Mindboggle101.csv', encoding='utf-8') data = pd.DataFrame(dcors[:,:,2].transpose(), index=label_names_bilateral, columns=[x for x in range(101)]) data.to_csv('geodesic_and_travel_depth_distance_correlation_' - 'per_left_label_vertices_Mindboggle101.csv') + 'per_left_label_vertices_Mindboggle101.csv', encoding='utf-8') data = pd.DataFrame(dcors[:,:,3].transpose(), index=label_names_bilateral, columns=[x for x in range(101)]) data.to_csv('geodesic_and_travel_depth_distance_correlation_' - 'per_right_label_vertices_Mindboggle101.csv') + 'per_right_label_vertices_Mindboggle101.csv', encoding='utf-8') data = dcors.mean(axis=0) data = pd.DataFrame(data, index=label_names_bilateral, @@ -131,7 +131,7 @@ def compare_surface_shape_measures_by_vertex(): 'geodesic / travel depth distance correlation (left)', 'geodesic / travel depth distance correlation (right)']) data.to_csv('mean_and_FS_curvature_geodesic_and_travel_depth_distance_correlations_' - 'per_label_vertices_avg_over_Mindboggle101.csv') + 'per_label_vertices_avg_over_Mindboggle101.csv', encoding='utf-8') # ------------------------------------------------------------------------ @@ -193,7 +193,7 @@ def compare_thickness_measures(): data = pd.DataFrame(dcors, index=label_names, #index=columns1.columns) columns=['freesurfer / thickinthehead cortical thickness distance correlation']) data.to_csv('thickinthehead_FSthickness_distance_correlations_' - 'per_label_Mindboggle101.csv') + 'per_label_Mindboggle101.csv', encoding='utf-8') # ------------------------------------------------------------------------ @@ -366,20 +366,20 @@ def compare_shapes_between_scans(): # Save csv files: # ---------------------------------------------------------------- data = pd.DataFrame(subject_shapes, index=subjects, columns=labels) - data.to_csv(name + '_scans.csv') + data.to_csv(name + '_scans.csv', encoding='utf-8') data_summary = data.describe(include='all') - data_summary.to_csv(name + '_scans_summary.csv') + data_summary.to_csv(name + '_scans_summary.csv', encoding='utf-8') data = pd.DataFrame(subject2_shapes, index=subjects, columns=labels) - data.to_csv(name + '_rescans.csv') + data.to_csv(name + '_rescans.csv', encoding='utf-8') data_summary = data.describe(include='all') - data_summary.to_csv(name + '_rescans_summary.csv') + data_summary.to_csv(name + '_rescans_summary.csv', encoding='utf-8') subject_shape_diffs = subject2_shapes - subject_shapes data = pd.DataFrame(subject_shape_diffs, index=subjects, columns=labels) - data.to_csv(name + '_differences.csv') + data.to_csv(name + '_differences.csv', encoding='utf-8') data_summary = data.describe(include='all') - data_summary.to_csv(name + '_differences_summary.csv') + data_summary.to_csv(name + '_differences_summary.csv', encoding='utf-8') subject_shape_abs_diffs = np.abs(subject_shape_diffs) max_diffs = subject_shape_abs_diffs.max(axis=0) @@ -388,9 +388,9 @@ def compare_shapes_between_scans(): index=subjects, columns=labels) #iInf, jInf = np.where(data.values == np.inf) #data.iloc[iInf, jInf] = 'NaN' - data.to_csv(name + '_fractional_differences.csv') + data.to_csv(name + '_fractional_differences.csv', encoding='utf-8') data_summary = data.describe(include='all') - data_summary.to_csv(name + '_fractional_differences_summary.csv') + data_summary.to_csv(name + '_fractional_differences_summary.csv', encoding='utf-8') # max_array = np.zeros((nsubjects, len(labels), 2)) # max_array[:, :, 0] = subject_shapes @@ -416,9 +416,9 @@ def compare_shapes_between_scans(): print("") #iInf, jInf = np.where(data.values == np.inf) #data.iloc[iInf, jInf] = 'NaN' - data.to_csv(name + '_fractional_abs_differences.csv') + data.to_csv(name + '_fractional_abs_differences.csv', encoding='utf-8') data_summary = data.describe(include='all') - data_summary.to_csv(name + '_fractional_abs_differences_summary.csv') + data_summary.to_csv(name + '_fractional_abs_differences_summary.csv', encoding='utf-8') data_means[:, ititle] = data_summary.loc['mean'].values data_summaries[ititle, :] = data_summary.mean(axis=1) @@ -511,12 +511,12 @@ def compare_shapes_between_scans(): data_means_df = pd.DataFrame(data_means, index=label_names, columns=names) - data_means_df.to_csv('means_of_rescan_fractional_abs_shape_differences.csv') + data_means_df.to_csv('means_of_rescan_fractional_abs_shape_differences.csv', encoding='utf-8') data_summaries_df = pd.DataFrame(data_summaries, index=names, columns=data_summary.index) - data_summaries_df.to_csv('summary_of_rescan_fractional_abs_shape_differences.csv') + data_summaries_df.to_csv('summary_of_rescan_fractional_abs_shape_differences.csv', encoding='utf-8') # ------------------------------------------------------------------------ @@ -643,28 +643,28 @@ def compare_shapes_between_hemispheres(): # Save csv files: # ---------------------------------------------------------------- data = pd.DataFrame(subject_shapesL, index=subjects, columns=labels_left) - data.to_csv(name + '_left.csv') + data.to_csv(name + '_left.csv', encoding='utf-8') data_summary = data.describe(include='all') - data_summary.to_csv(name + '_left_summary.csv') + data_summary.to_csv(name + '_left_summary.csv', encoding='utf-8') data = pd.DataFrame(subject_shapesR, index=subjects, columns=labels_right) - data.to_csv(name + '_right.csv') + data.to_csv(name + '_right.csv', encoding='utf-8') data_summary = data.describe(include='all') - data_summary.to_csv(name + '_right_summary.csv') + data_summary.to_csv(name + '_right_summary.csv', encoding='utf-8') subject_shape_diffs = subject_shapesL - subject_shapesR data = pd.DataFrame(subject_shape_diffs, index=subjects, columns=label_names) - data.to_csv(name + '_differences.csv') + data.to_csv(name + '_differences.csv', encoding='utf-8') data_summary = data.describe(include='all') - data_summary.to_csv(name + '_differences_summary.csv') + data_summary.to_csv(name + '_differences_summary.csv', encoding='utf-8') subject_shape_abs_diffs = np.abs(subject_shape_diffs) subject_shape_frac_diffs = subject_shape_diffs / subject_shapesL data = pd.DataFrame(subject_shape_frac_diffs, index=subjects, columns=label_names) - data.to_csv(name + '_fractional_differences.csv') + data.to_csv(name + '_fractional_differences.csv', encoding='utf-8') data_summary = data.describe(include='all') - data_summary.to_csv(name + '_fractional_differences_summary.csv') + data_summary.to_csv(name + '_fractional_differences_summary.csv', encoding='utf-8') subject_shape_frac_abs_diffs = np.abs(subject_shape_abs_diffs / subject_shapesL) data = pd.DataFrame(subject_shape_frac_abs_diffs, @@ -676,9 +676,9 @@ def compare_shapes_between_hemispheres(): print("Fractional absolute differences above " "0.5: {0}; 0.25: {1}; 0.1: {2}".format(n50, n25, n10)) print("") - data.to_csv(name + '_fractional_abs_differences.csv') + data.to_csv(name + '_fractional_abs_differences.csv', encoding='utf-8') data_summary = data.describe(include='all') - data_summary.to_csv(name + '_fractional_abs_differences_summary.csv') + data_summary.to_csv(name + '_fractional_abs_differences_summary.csv', encoding='utf-8') data_means[:, ititle] = data_summary.loc['mean'].values data_summaries[ititle, :] = data_summary.mean(axis=1) @@ -761,12 +761,12 @@ def compare_shapes_between_hemispheres(): data_means_df = pd.DataFrame(data_means, index=label_names, columns=names) - data_means_df.to_csv('means_of_interhemispheric_fractional_abs_shape_differences.csv') + data_means_df.to_csv('means_of_interhemispheric_fractional_abs_shape_differences.csv', encoding='utf-8') data_summaries_df = pd.DataFrame(data_summaries, index=names, columns=data_summary.index) - data_summaries_df.to_csv('summary_of_interhemispheric_fractional_abs_shape_differences.csv') + data_summaries_df.to_csv('summary_of_interhemispheric_fractional_abs_shape_differences.csv', encoding='utf-8') # def compare_shapes_between_hemispheres(): @@ -890,5 +890,5 @@ def compare_shapes_between_hemispheres(): # 'freesurfer curvature', 'travel depth', # 'geodesic depth']) # data.to_csv('distance_correlations_for_shapes_between_hemispheres_' - # 'per_label_Mindboggle101.csv') + # 'per_label_Mindboggle101.csv', encoding='utf-8') diff --git a/mindboggle/features/fundi.py b/mindboggle/features/fundi.py index de279b9bc..a1dac04da 100644 --- a/mindboggle/features/fundi.py +++ b/mindboggle/features/fundi.py @@ -32,12 +32,12 @@ def extract_fundi(folds, curv_file, depth_file, min_separation=10, To find the endpoints, the find_outer_endpoints function propagates multiple tracks from seed vertices at median depth in the fold through - concentric rings toward the fold’s edge, selecting maximal values within + concentric rings toward the fold's edge, selecting maximal values within each ring, and terminating at candidate endpoints. The final endpoints are those candidates at the end of tracks that have a high median value, with the higher value chosen if two candidate endpoints are within (a default of) 10 edges from each other (otherwise, the resulting fundi - can have spurious branching at the fold’s edge). + can have spurious branching at the fold's edge). The connect_points_erosion function connects the deepest fold vertices to the endpoints with a skeleton of 1-vertex-thick curves by erosion. @@ -254,4 +254,4 @@ def extract_fundi(folds, curv_file, depth_file, min_separation=10, # ============================================================================ if __name__ == "__main__": import doctest - doctest.testmod(verbose=True) # py.test --doctest-modules \ No newline at end of file + doctest.testmod(verbose=True) # py.test --doctest-modules diff --git a/mindboggle/features/sulci.py b/mindboggle/features/sulci.py index 38ce451eb..6ebe9cdde 100644 --- a/mindboggle/features/sulci.py +++ b/mindboggle/features/sulci.py @@ -31,7 +31,7 @@ def extract_sulci(labels_file, folds_or_file, hemi, min_boundary=1, This function assigns vertices in a fold to a sulcus in one of two cases. In the first case, vertices whose labels are in only one label pair in - the fold are assigned to the label pair’s sulcus if they are connected + the fold are assigned to the label pair's sulcus if they are connected through similarly labeled vertices to the boundary between the two labels. In the second case, the segment_regions function propagates labels from label borders to vertices whose labels are in multiple label pairs in the @@ -492,4 +492,4 @@ def extract_sulci(labels_file, folds_or_file, hemi, min_boundary=1, # ============================================================================ if __name__ == "__main__": import doctest - doctest.testmod(verbose=True) # py.test --doctest-modules \ No newline at end of file + doctest.testmod(verbose=True) # py.test --doctest-modules diff --git a/mindboggle/guts/compute.py b/mindboggle/guts/compute.py index 1a0cdedad..27ffef6cb 100644 --- a/mindboggle/guts/compute.py +++ b/mindboggle/guts/compute.py @@ -211,10 +211,10 @@ def pairwise_vector_distances(vectors, save_file=False, normalize=False): ... save_file, normalize) >>> print(np.array_str(np.array(vector_distances), ... precision=5, suppress_small=True)) - [[ 0. 0.8165 0.89753 0.74536] - [ 0. 0. 0.16667 1.52753] - [ 0. 0. 0. 1.60728] - [ 0. 0. 0. 0. ]] + [[0. 0.8165 0.89753 0.74536] + [0. 0. 0.16667 1.52753] + [0. 0. 0. 1.60728] + [0. 0. 0. 0. ]] """ import os @@ -1080,7 +1080,7 @@ def compute_overlaps(targets, list1, list2, output_file='', save_output=True, df2 = pd.DataFrame({'Dice overlap': dice_overlaps}) df3 = pd.DataFrame({'Jaccard overlap': jacc_overlaps}) df = pd.concat([df1, df2, df3], axis=1) - df.to_csv(output_file, index=False) + df.to_csv(output_file, index=False, encoding='utf-8') return dice_overlaps, jacc_overlaps, output_file diff --git a/mindboggle/guts/graph.py b/mindboggle/guts/graph.py index 305f84251..7f4d5d8b4 100644 --- a/mindboggle/guts/graph.py +++ b/mindboggle/guts/graph.py @@ -40,8 +40,8 @@ def diagonal_degree_matrix(W, inverse=False, square_root=False): >>> from mindboggle.guts.graph import diagonal_degree_matrix >>> W = np.array([[10,2.3,3], [0,0,3], [0,1.5,0]]) >>> tocsr = diagonal_degree_matrix(W, inverse=False, square_root=False) - >>> tocsr.data - array([ 15.3, 3. , 1.5]) + >>> np.allclose(tocsr.data, [ 15.3, 3. , 1.5]) + True """ import numpy as np @@ -109,8 +109,8 @@ def weight_graph(Nodes, Indices, Meshes, kernel=rbf_kernel, add_to_graph=True, ... add_to_graph, G, sigma, verbose) >>> G.size() 9 - >>> G.degree() - {0.0: 4, 1.0: 4, 2.0: 3, 3.0: 4, 4.0: 3} + >>> sorted(dict(G.degree()).items()) + [(0.0, 4), (1.0, 4), (2.0, 3), (3.0, 4), (4.0, 3)] """ import numpy as np @@ -267,4 +267,4 @@ def graph_laplacian(W, type_of_laplacian='norm1', verbose=False): # ============================================================================ if __name__ == "__main__": import doctest - doctest.testmod(verbose=True) # py.test --doctest-modules \ No newline at end of file + doctest.testmod(verbose=True) # py.test --doctest-modules diff --git a/mindboggle/guts/paths.py b/mindboggle/guts/paths.py index 799cd3734..d6ef5fd74 100644 --- a/mindboggle/guts/paths.py +++ b/mindboggle/guts/paths.py @@ -699,7 +699,7 @@ def smooth_skeletons(skeletons, bounds, vtk_file, likelihoods, wN_max=1.0, ... bounds, vtk_file, likelihoods, wN_max, do_erode, save_file, ... output_file, background_value, verbose) >>> np.where(np.array(smoothed_skeletons)!=-1)[0][0:8] - array([112572, 113453, 113454, 113469, 114294, 114295, 114312, 114313]) + array([112572, 113435, 113454, 113469, 114294, 114295, 114296, 114312]) Write out vtk file and view (skip test): diff --git a/mindboggle/guts/segment.py b/mindboggle/guts/segment.py index 31647cc9d..da850c0af 100644 --- a/mindboggle/guts/segment.py +++ b/mindboggle/guts/segment.py @@ -94,7 +94,7 @@ def propagate(points, faces, region, seeds, labels, >>> segments = propagate(points, faces, region, seeds, labels, ... max_iters, tol, sigma, background_value, verbose) >>> np.unique(segments)[0:10] - array([ -1., 3., 12., 22.]) + array([-1., 3., 12., 22.]) >>> len_segments = [len(np.where(segments == x)[0]) ... for x in np.unique(segments) if x != background_value] >>> len_segments[0:10] @@ -282,7 +282,7 @@ def segment_regions(vertices_to_segment, neighbor_lists, min_region_size=1, >>> len(np.unique(segments)) 122 >>> np.unique(segments)[0:10] - array([ -1., 1., 3., 4., 5., 6., 7., 8., 9., 11.]) + array([-1., 1., 3., 4., 5., 6., 7., 8., 9., 11.]) >>> len_segments = [len(np.where(segments == x)[0]) ... for x in np.unique(segments) if x != background_value] >>> len_segments[0:10] diff --git a/mindboggle/mindboggle b/mindboggle/mindboggle old mode 100755 new mode 100644 index d5815071f..0b1adbebe --- a/mindboggle/mindboggle +++ b/mindboggle/mindboggle @@ -1,8 +1,8 @@ #!/usr/bin/env python """ -This is the main program to run Mindboggle (http://mindboggle.info). +This is the main program to run Mindboggle (https://mindboggle.info). -Mindboggle is an open source brain morphometry platform +Mindboggle is an open source brain anatomy/morphometry platform that takes in preprocessed T1-weighted MRI data and outputs volume, surface, and tabular data containing label, feature, and shape information for further analysis. @@ -11,25 +11,25 @@ Mindboggle can be run on the command line as "mindboggle" and can be installed as a cross-platform virtual machine for convenience and reproducibility of results. The software runs on Linux and is written in Python 3 and Python-wrapped C++ code called within a modular Nipype -pipeline framework (http://www.nipy.org/nipype/) to promote a modular, +pipeline framework (https://www.nipy.org/nipype/) to promote a modular, flexible design that captures provenance information (this file). We have tested the software most extensively for Python 3.5 on Ubuntu 14.04. For help in using Mindboggle :: - - Online `documentation `_ + - Online `documentation `_ - README file - Help on the command line:: $ mindboggle --help Authors: - - Arno Klein, 2010-2016 (arno@mindboggle.info) http://binarybottle.com - - Satrajit S. Ghosh, 2013 (satra@mit.edu) http://www.mit.edu/~satra/ + - Arno Klein, 2010-2019 (arno@childmind.org) https://binarybottle.com + - Satrajit S. Ghosh, 2013 (satra@mit.edu) https://www.mit.edu/~satra/ - Each file lists Mindboggle team members who contributed to its content. -Copyright 2016, Mindboggle team (http://mindboggle.info), Apache v2.0 License +Copyright 2010-2019, Mindboggle team (https://mindboggle.info), Apache v2.0 License """ @@ -73,6 +73,9 @@ from mindboggle.shapes.volume_shapes import thickinthehead, \ from mindboggle.shapes.zernike.zernike import zernike_moments_per_label from mindboggle.thirdparty.ants import PropagateLabelsThroughMask +from mindboggle.version import __version__ as mbversion +mindboggle_version = 'mindboggle version {0}'.format(mbversion) + # ============================================================================ # # Command-line arguments @@ -81,7 +84,7 @@ from mindboggle.thirdparty.ants import PropagateLabelsThroughMask parser = argparse.ArgumentParser(description=""" The Mindboggle software automates shape analysis of anatomical labels and features extracted from human brain - MR image data (http://mindboggle.info). Example: + MR image data (https://mindboggle.info). Example: mindboggle /home/jovyan/work/freesurfer_subjects/arno --ants /home/jovyan/work/ants_subjects/arno/antsBrainSegmentation.nii.gz""", formatter_class = lambda prog: @@ -97,8 +100,9 @@ parser.add_argument("DATA", help=("path to directory of a person's brain " "data, usually generated by the FreeSurfer " "software")) # "optional arguments": -parser.add_argument("-v", "--version", help="show mindboggle version number", - action='version', version='%(prog)s 1.2.0') +parser.add_argument("--version", help="show mindboggle version number", + action='version', + version='%(prog)s {}'.format(mbversion)) parser.add_argument("-c", "--cpus", help='number of processors to use (1)', type=int, default=1, metavar='INT') @@ -134,9 +138,9 @@ out_args.add_argument("--no_moments", action='store_true', help="no Zernike moments per surface label or sulcus") out_args.add_argument("--no_spectra", action='store_true', help="no Laplace-Beltrami per surface label or sulcus") -out_args.add_argument("--no_thickness", action='store_true', - help="no volume-based cortical label thicknesses") +adv_args.add_argument("--thickinthehead", action='store_true', + help="volume-based cortical label thicknesses") adv_args.add_argument("--fundi", action='store_true', help="extract, measure fundi (under evaluation, SLOW)") adv_args.add_argument("--moments", @@ -178,6 +182,8 @@ adv_args.add_argument("--plugin", dest="plugin", help="nipype plugin (see nipype documentation)") adv_args.add_argument("--plugin_args", dest="plugin_args", help="plugin arguments (see nipype documentation)") +adv_args.add_argument("--prov", action='store_true', + help="Capture provenance") args = parser.parse_args() # ---------------------------------------------------------------------------- @@ -247,6 +253,7 @@ if args.no_sulci: do_fundi = False else: do_sulci = True + do_label = True if args.fundi: do_fundi = True else: @@ -255,10 +262,15 @@ if args.no_points: do_points = False else: do_points = True -if args.no_thickness: - do_thickinthehead = False -else: +if args.roygbiv: + do_points = True +if do_points: + do_label = True + +if args.thickinthehead: do_thickinthehead = True +else: + do_thickinthehead = False # Set Laplace-Beltrami spectra: if args.no_spectra: @@ -342,6 +354,10 @@ def first_string_containing_substring(substring, List): first_matching_string = [x for x in List if substring in x][0] return first_matching_string +# Ensure provenance configuration is inherited by workflow +if args.prov: + config.enable_provenance() + # ============================================================================ # # Initialize workflow inputs and outputs @@ -2249,7 +2265,6 @@ if do_label and not args.no_volumes: 'noncortex_value', 'labels', 'names', - 'resize', 'propagate', 'output_dir', 'save_table', @@ -2289,7 +2304,6 @@ if do_label and not args.no_volumes: FSthicknesses.inputs.noncortex_value = 3 FSthicknesses.inputs.labels = dkt.cerebrum_cortex_numbers FSthicknesses.inputs.names = dkt.cerebrum_cortex_names - FSthicknesses.inputs.resize = True FSthicknesses.inputs.propagate = False FSthicknesses.inputs.output_dir = '' FSthicknesses.inputs.save_table = True @@ -2357,11 +2371,12 @@ if __name__ == '__main__': time0 = time() # ------------------------------------------------------------------------ - # Workflow configuration: provenance tracking, content hashing, etc.: + # Workflow configuration: content hashing, crashfiles, etc.: # ------------------------------------------------------------------------ - # config.enable_provenance() mbFlow.config['execution']['hash_method'] = 'content' - # mbFlow.config['execution']['use_relative_paths'] = True + mbFlow.config['execution']['crashfile_format'] = 'txt' + # Do not propagate the check to sub nodes + mbFlow.config['execution']['check_version'] = False # ------------------------------------------------------------------------ # Generate a visual graph: diff --git a/mindboggle/mindboggle123 b/mindboggle/mindboggle123 old mode 100755 new mode 100644 index da1e9037e..be3425e35 --- a/mindboggle/mindboggle123 +++ b/mindboggle/mindboggle123 @@ -4,7 +4,7 @@ This nipype (python) script runs a complete brain image morphology pipeline:: 1. FreeSurfer's recon-all (12hrs on macOS 10.12, 2.6GHz, 16GB RAM) 2. ANTs's antsCorticalThickness.sh (5.8hrs) - 3. Mindboggle (http://mindboggle.info) (1.8hrs) + 3. Mindboggle (https://mindboggle.info) (1.5hrs) mindboggle123 is intended to be run within the Mindboggle Docker container. Don't use this script if you wish to use different arguments than those below. @@ -20,21 +20,31 @@ Example (we set environment variables for clarity):: Authors: - - Arno Klein, 2017 (arno@mindboggle.info) http://binarybottle.com - - Satrajit S. Ghosh, 2017 (satra@mit.edu) http://www.mit.edu/~satra/ + - Arno Klein, 2017 (arno@mindboggle.info) https://binarybottle.com + - Satrajit S. Ghosh, 2017 (satra@mit.edu) https://www.mit.edu/~satra/ -Copyright 2017, Mindboggle team (http://mindboggle.info), Apache v2.0 License +Copyright 2017, Mindboggle team (https://mindboggle.info), Apache v2.0 License """ import os import argparse +from glob import glob from nipype import config, logging -from nipype.pipeline.engine import Workflow, Node +from nipype.pipeline.engine import Workflow, Node, MapNode from nipype.interfaces.utility import Function as Fn +from nipype.interfaces.utility import Merge from nipype.interfaces.freesurfer import ReconAll -from nipype.interfaces.ants.segmentation import antsCorticalThickness +from nipype.interfaces.ants.segmentation import CorticalThickness +from nipype.interfaces.ants import (ApplyTransforms, AntsJointFusion, + LabelGeometry, Registration, + MultiplyImages) +from nipype.utils.misc import human_order_sorted + + +from mindboggle.version import __version__ as mbversion +mindboggle_version = 'mindboggle version {0}'.format(mbversion) # ---------------------------------------------------------------------------- # Command-line arguments @@ -43,7 +53,7 @@ parser = argparse.ArgumentParser(description=""" mindboggle123 runs a complete brain MR image morphology pipeline: (1) FreeSurfer's recon-all, (2) ANTs's antsCorticalThickness.sh, and - (3) Mindboggle (http://mindboggle.info). + (3) Mindboggle (https://mindboggle.info). Example: mindboggle123 IMAGE --id ID --out OUT""", formatter_class = lambda prog: argparse.HelpFormatter(prog, @@ -56,6 +66,9 @@ adv_args = parser.add_argument_group('advanced settings') parser.add_argument("IMAGE", help=("T1-weighted MR human brain image")) # "additional arguments": +parser.add_argument("--version", help="show mindboggle version number", + action='version', + version='%(prog)s {}'.format(mbversion)) add_args.add_argument("--id", help='ID for the brain image', metavar='STR') add_args.add_argument("--out", help='output folder for all commands', default='/home/jovyan/work/mindboggle123_output', @@ -66,6 +79,8 @@ adv_args.add_argument("--working", default=os.path.join('/home/jovyan/work/mindboggle123_output', 'working'), metavar='STR') +adv_args.add_argument("--template", help="folder with OASIS-30 template", + default='/opt/data/OASIS-30_Atropos_template', metavar='STR') adv_args.add_argument("--skip_freesurfer", action='store_true', help="skip FreeSurfer for debugging (nipype skips when run)") adv_args.add_argument("--skip_ants", action='store_true', @@ -81,12 +96,27 @@ adv_args.add_argument("--fs_openmp", dest="openmp", adv_args.add_argument("--fs_T2image", dest="T2image", type=str, help="Optional T2 image to use with FreeSurfer") +adv_args.add_argument("--fs_flags", dest="fs_flags", + nargs='+', + help="include additional recon-all flags like e.g. nuintensitycor-3T ") adv_args.add_argument("--ants_num_threads", dest="num_threads", default=1, type=int, help="Number of threads to use with ANTs") +def SegOptions(option): + if option in ['quick', 'fusion']: + return option + else: + raise argparse.ArgumentError('ants_seg value must be one of "quick" or "fusion".') +adv_args.add_argument("--ants_seg", dest="seg", + default="quick", type=SegOptions, + help="Use ANTs 'quick' or 'fusion' to label subcortical structures") +adv_args.add_argument("--ants_segN", dest="segN", type=int, + help="Number of images to use for joint fusion (2-20)") adv_args.add_argument("--mb_num_threads", dest="mb_num_threads", default=1, type=int, help="Number of threads to use with mindboggle") +adv_args.add_argument("--prov", action='store_true', + help="Capture provenance") args = parser.parse_args() # ---------------------------------------------------------------------------- @@ -96,9 +126,20 @@ IMAGE = args.IMAGE ID = args.id OUT = args.out WORK = args.working +TDIR = args.template if args.skip_freesurfer and args.skip_ants: print("Use only one of the skip arguments: --skip_freesurfer, --skip_ants.") +# Ensure provenance configuration is inherited by workflow +if args.prov: + config.enable_provenance() + +# ---------------------------------------------------------------------------- +# Initialize workflow inputs and outputs +# ---------------------------------------------------------------------------- +mbFlow = Workflow(name='Mindboggle123') +mbFlow.base_dir = WORK + # ---------------------------------------------------------------------------- # Output directories # ---------------------------------------------------------------------------- @@ -118,12 +159,6 @@ mindboggle_output = os.path.join(OUT, 'mindboggled') if not os.path.isdir(mindboggle_output): os.makedirs(mindboggle_output) -# ---------------------------------------------------------------------------- -# Initialize workflow inputs and outputs -# ---------------------------------------------------------------------------- -mbFlow = Workflow(name='Mindboggle123') -mbFlow.base_dir = WORK - # ---------------------------------------------------------------------------- # Use recon-all to generate surfaces and parcellations of structural data: # recon-all -all -i -subjid -sd <.> @@ -138,6 +173,10 @@ if args.openmp and args.openmp > 1: if args.T2image: reconall.inputs.T2_file = args.T2image reconall.inputs.use_T2 = True +if args.fs_flags: + for i in range(len(args.fs_flags)): + args.fs_flags[i]= "-" + args.fs_flags[i] + reconall.inputs.flags = args.fs_flags # ---------------------------------------------------------------------------- # Use antsCorticalThickness.sh to generate segmentations of structural data: @@ -148,8 +187,8 @@ if args.T2image: # -f $TEMPLATE/T_template0_BrainCerebellumExtractionMask.nii.gz \ # -p $TEMPLATE/Priors2/priors%d.nii.gz \ # -o $PREFIX +# -u 0 # ---------------------------------------------------------------------------- -TDIR = '/opt/data/OASIS-30_Atropos_template' TEMPLATE = os.path.join(TDIR, 'T_template0.nii.gz') REG = os.path.join(TDIR, 'T_template0_BrainCerebellum.nii.gz') PROB = os.path.join(TDIR, 'T_template0_BrainCerebellumProbabilityMask.nii.gz') @@ -162,7 +201,7 @@ PRIOR5 = os.path.join(TDIR, 'Priors2', 'priors5.nii.gz') PRIOR6 = os.path.join(TDIR, 'Priors2', 'priors6.nii.gz') PREFIX = os.path.join(ants_output, ID, 'ants') -corticalthickness = Node(antsCorticalThickness(), +corticalthickness = Node(CorticalThickness(), name='antsCorticalThickness') corticalthickness.inputs.dimension = 3 corticalthickness.inputs.anatomical_image = IMAGE @@ -173,6 +212,8 @@ corticalthickness.inputs.extraction_registration_mask = EXT corticalthickness.inputs.segmentation_priors = [PRIOR1, PRIOR2, PRIOR3, PRIOR4, PRIOR5, PRIOR6] corticalthickness.inputs.out_prefix = PREFIX +corticalthickness.inputs.use_random_seeding = 0 +corticalthickness.inputs.use_floatingpoint_precision = True if args.num_threads and args.num_threads > 1: corticalthickness.inputs.num_threads = args.num_threads @@ -180,7 +221,8 @@ if args.num_threads and args.num_threads > 1: # ---------------------------------------------------------------------------- # Create function to call mindboggle # ---------------------------------------------------------------------------- -def mindboggle(subjectid, fsdir, antsdir, antsseg, out, args, num_threads=1, verbose=True): +def mindboggle(subjectid, fsdir, antsdir, antsseg, out, prov, args, + num_threads=1): """ Run the mindboggle morphology pipeline (see http://mindboggle.info). @@ -196,50 +238,48 @@ def mindboggle(subjectid, fsdir, antsdir, antsseg, out, args, num_threads=1, ver name of antsCorticalThickness.sh output segmentation file out : string path to mindboggle output directory + prov : boolean + capture provenance args : string extra arguments - verbose : boolean - print to stdout? + num_threads + number of threads + + Returns + ------- + command : string + command """ import os - from nipype.interfaces.base import CommandLine DATA = os.path.join(fsdir, subjectid) ants = os.path.join(antsdir, subjectid, antsseg) - all_args = ' '.join([DATA, '--out', out, '--ants', ants, args]) - if num_threads > 1: - all_args += ' --plugin MultiProc --plugin_args "dict(n_procs={0})"'.format(num_threads) - command = "{0} {1}".format('mindboggle', all_args) + all_args = ' '.join([DATA, '--out', out, '--ants', ants, + '--working', os.getcwd()] + + [args] + (['--prov'] if prov else [])) - if verbose: - print(command) + if num_threads > 1: + all_args += ' --plugin MultiProc --plugin_args "dict(n_procs={0})"'.\ + format(num_threads) - from nipype import config - del config._sections['execution']['display_variable'] cli = CommandLine(command='mindboggle') cli.inputs.args = all_args - cli.cmdline + command = cli.cmdline + print(command) cli.run() - return command # ---------------------------------------------------------------------------- # Run mindboggle on the recon-all and antsCorticalThickness.sh results: # mindboggle $FREESURFER_SUBJECT --out $MINDBOGGLED -# --ants $ANTS_SUBJECT/antsBrainSegmentation.nii.gz --roygbiv --graph hier +# --ants $ANTS_SUBJECT/antsBrainSegmentation.nii.gz +# --roygbiv --graph hier # ---------------------------------------------------------------------------- Mindboggle = Node(name='mindboggle', interface=Fn(function=mindboggle, - input_names=['subjectid', - 'fsdir', - 'antsdir', - 'antsseg', - 'out', - 'args', - 'verbose'], output_names=['command'])) Mindboggle.inputs.subjectid = ID if args.skip_freesurfer: @@ -250,11 +290,153 @@ Mindboggle.inputs.antsdir = ants_output if args.skip_ants: Mindboggle.inputs.antsseg = PREFIX + 'BrainSegmentation.nii.gz' else: + comps = TDIR.split(os.sep) + IDIR = os.sep.join(comps[:(-2 if not comps[-1] else -1)] + + ['OASIS-TRT-20_brains']) + LDIR = os.sep.join(comps[:(-2 if not comps[-1] else -1)] + + ['OASIS-TRT-20_DKT31_CMA_labels_v2']) + T1s = human_order_sorted(glob(os.path.join(IDIR, '*.nii.gz'))) + labels = human_order_sorted(glob(os.path.join(LDIR, '*.nii.gz'))) + N = args.segN or len(T1s) + + def mask_labels(intensity_image, label_image, output_dir=None): + import nibabel as nb + import os + thick_data = nb.load(intensity_image).get_data() > 0 + limg = nb.load(label_image) + label_data = limg.get_data() + new_labels = thick_data * label_data * (label_data >= 1000) + \ + (label_data < 1000) * label_data + new_label_img = nb.Nifti1Image(new_labels, + header=limg.header, + affine=limg.affine) + new_label_file = os.path.join(os.getcwd(), 'newlabels.nii.gz') + new_label_img.to_filename(new_label_file) + if output_dir is None: + output_dir = os.path.dirname(intensity_image) + limg.to_filename(os.path.join(output_dir, 'OASISlabels.nii.gz')) + return new_label_file + masker = Node(Fn(function=mask_labels, input_names=['intensity_image', + 'label_image', + 'output_dir'], + output_names=['new_label_file']), + name='masker') + + tocsv = Node(LabelGeometry(), name='get_measures') + tocsv.inputs.output_file = os.path.join(ants_output, ID, + 'antslabelstats.csv') + + if args.seg and args.seg == "quick": + # ----------------------------------------------------- + # Label ANTs output with Labels in template space + # ----------------------------------------------------- + merge_transforms = Node(Merge(2), name="merge_transforms") + transformer_nn = Node(ApplyTransforms(), name="transformer_nn") + transformer_nn.inputs.dimension = 3 + transformer_nn.inputs.invert_transform_flags = [False, False] + transformer_nn.inputs.input_image = '/opt/data/OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_OASIS-30_v2.nii.gz' + transformer_nn.inputs.interpolation = 'NearestNeighbor' + + mbFlow.connect(corticalthickness, 'BrainSegmentationN4', + transformer_nn, 'reference_image') + mbFlow.connect(corticalthickness, 'TemplateToSubject1GenericAffine', + merge_transforms, 'in1') + mbFlow.connect(corticalthickness, 'TemplateToSubject0Warp', + merge_transforms, 'in2') + mbFlow.connect(merge_transforms, 'out', transformer_nn, 'transforms') + mbFlow.connect(transformer_nn, 'output_image', masker, 'label_image') + elif args.seg: + # ----------------------------------------------------- + # Create workflow to label ANTs output with JointFusion + # ----------------------------------------------------- + reg = MapNode(Registration(), iterfield=['moving_image'], + name="register") + reg.inputs.moving_image = T1s[:N] + + # mimic fmriprep's T1 - MNI registration + reg.inputs.dimension = 3 + reg.inputs.convergence_threshold = [1e-06, 1e-06, 1e-06] + reg.inputs.convergence_window_size = [20, 20, 10] + reg.inputs.metric = ["Mattes", "Mattes", "CC"] + reg.inputs.metric_weight = [1, 1, 1] + reg.inputs.radius_or_number_of_bins = [56, 56, 4] + reg.inputs.transforms = ["Rigid", "Affine", "SyN"] + reg.inputs.transform_parameters = [(0.05,), (0.08,), (0.1, 3.0, 0.0)] + reg.inputs.number_of_iterations = [[100, 100], [100, 100], [100, 70, 50, 20]] + reg.inputs.sampling_strategy = ["Regular", "Regular", "None"] + reg.inputs.sampling_percentage = [0.25, 0.25, 1] + reg.inputs.smoothing_sigmas = [[2, 1], [1, 0], [3, 2, 1, 0]] + reg.inputs.sigma_units = ["vox", "vox", "vox"] + reg.inputs.shrink_factors = [[2, 1], [2, 1], [8, 4, 2, 1]] + reg.inputs.winsorize_upper_quantile = 0.995 + reg.inputs.winsorize_lower_quantile = 0.005 + reg.inputs.use_estimate_learning_rate_once = [True, True, True] + reg.inputs.use_histogram_matching = [True, True, True] + reg.inputs.collapse_output_transforms = True + reg.inputs.write_composite_transform = True + reg.inputs.output_transform_prefix = "output_" + reg.inputs.output_warped_image = True + reg.inputs.output_warped_image = "output_warped_image.nii.gz" + reg.inputs.interpolation = "LanczosWindowedSinc" + reg.inputs.float = True + reg.inputs.initial_moving_transform_com = 0 + + if args.num_threads and args.num_threads > 1: + reg.inputs.num_threads = args.num_threads + + transformer_nn = MapNode(ApplyTransforms(), + iterfield=['input_image', 'transforms'], + name="transformer_nn") + transformer_nn.inputs.dimension = 3 + transformer_nn.inputs.input_image = labels[:N] + transformer_nn.inputs.interpolation = 'NearestNeighbor' + + labeler = Node(AntsJointFusion(), name='labeler') + labeler.inputs.dimension = 3 + labeler.inputs.out_label_fusion = 'label.nii.gz' + if args.num_threads and args.num_threads > 1: + labeler.inputs.num_threads = args.num_threads + + def tolist(x): + return [x] + + mask_brain = Node(MultiplyImages(dimension=3, + output_product_image='brain.nii.gz' + ), + name='mask_brain') + mbFlow.connect(corticalthickness, 'BrainSegmentationN4', + mask_brain, 'first_input') + mbFlow.connect(corticalthickness, 'BrainExtractionMask', mask_brain, + 'second_input') + mbFlow.connect(mask_brain, 'output_product_image', + reg, 'fixed_image') + mbFlow.connect(mask_brain, 'output_product_image', + transformer_nn, 'reference_image') + mbFlow.connect(mask_brain, ('output_product_image', tolist), + labeler, 'target_image') + mbFlow.connect(reg, 'composite_transform', transformer_nn, 'transforms') + mbFlow.connect(corticalthickness, 'BrainExtractionMask', labeler, + 'mask_image') + mbFlow.connect(reg, 'warped_image', labeler, 'atlas_image') + mbFlow.connect(transformer_nn, 'output_image', labeler, + 'atlas_segmentation_image') + mbFlow.connect(labeler, 'out_label_fusion', masker, 'label_image') + + + mbFlow.connect(corticalthickness, 'CorticalThickness', + tocsv, 'intensity_image') + mbFlow.connect(corticalthickness, 'CorticalThickness', + masker, 'intensity_image') + mbFlow.connect(masker, 'new_label_file', tocsv, 'label_image') + + # ---------------------------------------------------------------------------- + # Connect ants to Mindboggle + # ---------------------------------------------------------------------------- mbFlow.connect(corticalthickness, 'BrainSegmentation', Mindboggle, 'antsseg') Mindboggle.inputs.out = mindboggle_output -Mindboggle.inputs.args = '--roygbiv' #' --graph hier' -Mindboggle.inputs.verbose = True +Mindboggle.inputs.prov = False # args.prov +Mindboggle.inputs.args = '--roygbiv' # ' --graph hier' if args.mb_num_threads: Mindboggle.inputs.num_threads = args.mb_num_threads @@ -266,25 +448,27 @@ if __name__ == '__main__': from time import time time0 = time() - # ------------------------------------------------------------------------ - # Workflow configuration: provenance tracking, content hashing, etc.: - # ------------------------------------------------------------------------ - # config.enable_provenance() + # -------------------------------------------------------------------- + # Workflow configuration: content hashing, crashfiles, etc.: + # -------------------------------------------------------------------- mbFlow.config['execution']['hash_method'] = 'content' - # mbFlow.config['execution']['use_relative_paths'] = True + mbFlow.config['execution']['crashfile_format'] = 'txt' + mbFlow.config['execution']['crashdump_dir'] = WORK + # Do not propagate the version check to sub nodes + mbFlow.config['execution']['check_version'] = False - # ------------------------------------------------------------------------ + # -------------------------------------------------------------------- # Debug: http://nipy.org/nipype/users/config_file.html#debug-configuration - # ------------------------------------------------------------------------ + # -------------------------------------------------------------------- debug = False if debug: config.set('logging', 'workflow_level', 'DEBUG') logging.update_logging(config) mbFlow.config['execution']['stop_on_first_rerun'] = True - # ------------------------------------------------------------------------ + # -------------------------------------------------------------------- # Run with or without a plugin: - # ------------------------------------------------------------------------ + # -------------------------------------------------------------------- if args.plugin: if args.plugin_args: mbFlow.run(plugin=args.plugin, plugin_args=eval(args.plugin_args)) @@ -294,5 +478,5 @@ if __name__ == '__main__': mbFlow.run() print('mindboggle123 done running recon-all, antsCorticalThicness.sh, ' - 'and mindboggle on {0} after {1:0.2f} seconds.'. - format(ID, time() - time0)) + 'and mindboggle {0} on {1} after {2:0.2f} seconds.'. + format(mindboggle_version, ID, time() - time0)) diff --git a/mindboggle/mio/colors.py b/mindboggle/mio/colors.py index 2b3721333..b320b67ce 100644 --- a/mindboggle/mio/colors.py +++ b/mindboggle/mio/colors.py @@ -9,9 +9,11 @@ """ +import os def distinguishable_colors(ncolors, backgrounds=[[0,0,0],[1,1,1]], - save_csv=True, plot_colormap=True, verbose=True): + save_csv=True, plot_colormap=True, verbose=True, + out_dir='.'): """ Create a colormap of perceptually distinguishable colors. @@ -52,6 +54,7 @@ def distinguishable_colors(ncolors, backgrounds=[[0,0,0],[1,1,1]], Examples -------- >>> from mindboggle.mio.colors import distinguishable_colors + >>> import numpy as np >>> ncolors = 31 >>> backgrounds = [[0,0,0],[1,1,1]] >>> save_csv = False @@ -59,12 +62,12 @@ def distinguishable_colors(ncolors, backgrounds=[[0,0,0],[1,1,1]], >>> verbose = False >>> colors = distinguishable_colors(ncolors, backgrounds, ... save_csv, plot_colormap, verbose) - >>> colors[0] - array([ 0.62068966, 0.06896552, 1. ]) - >>> colors[1] - array([ 0. , 0.5862069, 0. ]) - >>> colors[2] - array([ 0.75862069, 0.20689655, 0. ]) + >>> np.allclose(colors[0], [ 0.62068966, 0.06896552, 1. ]) + True + >>> np.allclose(colors[1], [ 0. , 0.5862069, 0. ]) + True + >>> np.allclose(colors[2], [ 0.75862069, 0.20689655, 0. ]) + True """ import numpy as np @@ -74,6 +77,7 @@ def distinguishable_colors(ncolors, backgrounds=[[0,0,0],[1,1,1]], from colormath.color_diff import delta_e_cie2000 filename = "colormap_of_{0}_distinguishable_colors".format(ncolors) + filename = os.path.join(out_dir, filename) # ------------------------------------------------------------------------ # Generate a sizable number of RGB triples. This represents our space of @@ -158,7 +162,7 @@ def distinguishable_colors(ncolors, backgrounds=[[0,0,0],[1,1,1]], if verbose: print(rgb) plt.barh(0, 50, 1, 0, color=rgb) - plt.savefig(filename + ".png") + plt.savefig(filename + ".png") if verbose: print("Colormap image saved to {0}".format(filename + ".png")) @@ -176,7 +180,7 @@ def distinguishable_colors(ncolors, backgrounds=[[0,0,0],[1,1,1]], def label_adjacency_matrix(label_file, ignore_values=[-1, 999], add_value=0, save_table=True, output_format='csv', - verbose=True): + verbose=True, out_dir='.'): """ Extract surface or volume label boundaries, find unique label pairs, and write adjacency matrix (useful for constructing a colormap). @@ -215,6 +219,7 @@ def label_adjacency_matrix(label_file, ignore_values=[-1, 999], add_value=0, -------- >>> from mindboggle.mio.colors import label_adjacency_matrix >>> from mindboggle.mio.fetch_data import prep_tests + >>> import numpy as np >>> urls, fetch_data = prep_tests() >>> ignore_values = [-1, 0] >>> add_value = 0 @@ -224,15 +229,17 @@ def label_adjacency_matrix(label_file, ignore_values=[-1, 999], add_value=0, >>> label_file = fetch_data(urls['left_manual_labels'], '', '.vtk') >>> labels, matrix, output_table = label_adjacency_matrix(label_file, ... ignore_values, add_value, save_table, output_format, verbose) - >>> matrix.lookup([20,21,22,23,24,25,26,27,28,29], + >>> out = matrix.lookup([20,21,22,23,24,25,26,27,28,29], ... [35,35,35,35,35,35,35,35,35,35]) - array([ 0., 1., 0., 0., 0., 0., 0., 1., 1., 1.]) + >>> np.allclose(out, [ 0., 1., 0., 0., 0., 0., 0., 1., 1., 1.]) + True >>> label_file = fetch_data(urls['freesurfer_labels'], '', '.nii.gz') >>> labels, matrix, output_table = label_adjacency_matrix(label_file, ... ignore_values, add_value, save_table, output_format, verbose) - >>> matrix.lookup([4,5,7,8,10,11,12,13,14,15], [4,4,4,4,4,4,4,4,4,4]) - array([ 1., 1., 0., 0., 0., 1., 0., 0., 1., 0.]) + >>> out = matrix.lookup([4,5,7,8,10,11,12,13,14,15], [4,4,4,4,4,4,4,4,4,4]) + >>> np.allclose(out, [ 1., 1., 0., 0., 0., 1., 0., 0., 1., 0.]) + True """ import numpy as np @@ -280,6 +287,8 @@ def label_adjacency_matrix(label_file, ignore_values=[-1, 999], add_value=0, else: raise IOError("Use appropriate input file type.") + output_table = os.path.join(out_dir, output_table) + # Find unique pairs (or first two of each list): pairs = [] for pair in label_pairs: @@ -304,7 +313,7 @@ def label_adjacency_matrix(label_file, ignore_values=[-1, 999], add_value=0, if save_table: if output_format == 'csv': - matrix.to_csv(output_table, index=False) + matrix.to_csv(output_table, index=False, encoding='utf-8') if verbose: print("Adjacency matrix saved to {0}".format(output_table)) else: @@ -395,6 +404,7 @@ def group_colors(colormap, colormap_name, description='', adjacency_matrix=[], -------- >>> # Get colormap: >>> from mindboggle.mio.colors import distinguishable_colors + >>> import numpy as np >>> colormap = distinguishable_colors(ncolors=31, ... backgrounds=[[0,0,0],[1,1,1]], ... save_csv=False, plot_colormap=False, verbose=False) @@ -425,14 +435,14 @@ def group_colors(colormap, colormap_name, description='', adjacency_matrix=[], >>> colors = group_colors(colormap, colormap_name, description, ... adjacency_matrix, IDs, names, groups, ... save_text_files, plot_colors, plot_graphs, out_dir, verbose) - >>> colors[0] - [0.7586206896551724, 0.20689655172413793, 0.0] - >>> colors[1] - [0.48275862068965514, 0.4482758620689655, 0.48275862068965514] - >>> colors[2] - [0.3448275862068966, 0.3103448275862069, 0.034482758620689655] - >>> colors[-1] - [0.7931034482758621, 0.9655172413793103, 0.7931034482758621] + >>> np.allclose(colors[0], [0.7586206896551724, 0.20689655172413793, 0.0]) + True + >>> np.allclose(colors[1], [0.48275862068965514, 0.4482758620689655, 0.48275862068965514]) + True + >>> np.allclose(colors[2], [0.3448275862068966, 0.3103448275862069, 0.034482758620689655]) + True + >>> np.allclose(colors[-1], [0.7931034482758621, 0.9655172413793103, 0.7931034482758621]) + True No groups / subgraphs: @@ -440,14 +450,14 @@ def group_colors(colormap, colormap_name, description='', adjacency_matrix=[], >>> colors = group_colors(colormap, colormap_name, description, ... adjacency_matrix, IDs, names, groups, ... save_text_files, plot_colors, plot_graphs, out_dir, verbose) - >>> colors[0] - [0.5172413793103449, 0.8275862068965517, 1.0] - >>> colors[1] - [0.13793103448275862, 0.0, 0.24137931034482757] - >>> colors[2] - [0.3793103448275862, 0.27586206896551724, 0.48275862068965514] - >>> colors[-1] - [0.6206896551724138, 0.48275862068965514, 0.3448275862068966] + >>> np.allclose(colors[0], [0.5172413793103449, 0.8275862068965517, 1.0]) + True + >>> np.allclose(colors[1], [0.13793103448275862, 0.0, 0.24137931034482757]) + True + >>> np.allclose(colors[2], [0.3793103448275862, 0.27586206896551724, 0.48275862068965514]) + True + >>> np.allclose(colors[-1], [0.6206896551724138, 0.48275862068965514, 0.3448275862068966]) + True """ import os @@ -827,6 +837,7 @@ def write_json_colormap(colormap, label_numbers, label_names=[], -------- >>> from mindboggle.mio.colors import write_xml_colormap >>> from mindboggle.mio.labels import DKTprotocol + >>> import numpy as np >>> dkt = DKTprotocol() >>> colormap = dkt.colormap_normalized >>> colormap = [[x[2], x[3], x[4]] for x in colormap] @@ -835,8 +846,8 @@ def write_json_colormap(colormap, label_numbers, label_names=[], >>> colormap_file = '' >>> colormap_name = "DKT31colormap" >>> description = "Colormap for DKT31 human brain cortical labels" - >>> colormap[0] - [0.803921568627451, 0.24313725490196078, 0.3058823529411765] + >>> np.allclose(colormap[0], [0.803921568627451, 0.24313725490196078, 0.3058823529411765]) + True >>> write_json_colormap(colormap, label_numbers, label_names, ... colormap_file, colormap_name, description) """ @@ -846,7 +857,7 @@ def write_json_colormap(colormap, label_numbers, label_names=[], if not colormap_name: colormap_name = 'Colormap' - f = open(colormap_file, 'w') + f = open(colormap_file, 'w', encoding='utf-8') f.write("{\n") f.write(' "name": "{0}",\n'.format(colormap_name)) f.write(' "description": "{0}",\n'.format(description)) @@ -888,14 +899,15 @@ def write_xml_colormap(colormap, label_numbers, colormap_file='', -------- >>> from mindboggle.mio.colors import write_xml_colormap >>> from mindboggle.mio.labels import DKTprotocol + >>> import numpy as np >>> dkt = DKTprotocol() >>> colormap = dkt.colormap_normalized >>> colormap = [[x[2], x[3], x[4]] for x in colormap] >>> label_numbers = dkt.label_numbers >>> colormap_file = '' >>> colormap_name = 'DKT31colormap' - >>> colormap[0] - [0.803921568627451, 0.24313725490196078, 0.3058823529411765] + >>> np.allclose(colormap[0], [0.803921568627451, 0.24313725490196078, 0.3058823529411765]) + True >>> write_xml_colormap(colormap, label_numbers, colormap_file, ... colormap_name) """ @@ -905,7 +917,7 @@ def write_xml_colormap(colormap, label_numbers, colormap_file='', if not colormap_name: colormap_name = 'Colormap' - f = open(colormap_file,'w') + f = open(colormap_file, 'w', encoding='utf-8') f.write(''' @@ -1188,4 +1200,4 @@ def viridis_colormap(): [0.983868, 0.904867, 0.136897], [0.993248, 0.906157, 0.143936]] - return viridis \ No newline at end of file + return viridis diff --git a/mindboggle/mio/labels.py b/mindboggle/mio/labels.py index eb9d6a8c0..6f280cc93 100644 --- a/mindboggle/mio/labels.py +++ b/mindboggle/mio/labels.py @@ -4373,7 +4373,7 @@ class DKTprotocol(object): "superior temporal sulcus", "inferior temporal sulcus", "circular sulcus", - "1st transverse temporal sulcus and Heschl's sulcus", + "1st transverse temporal sulcus and Heschl sulcus", "cingulate sulcus", "paracentral sulcus", "parietooccipital fissure", diff --git a/mindboggle/mio/plots.py b/mindboggle/mio/plots.py index 438e31e2c..66fa0d534 100644 --- a/mindboggle/mio/plots.py +++ b/mindboggle/mio/plots.py @@ -34,10 +34,10 @@ def plot_surfaces(vtk_files, use_colormap=False, colormap_file=''): >>> from mindboggle.mio.plots import plot_surfaces >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() - >>> label_file = fetch_data(urls['freesurfer_labels'], '', '.nii.gz') + >>> labels_file = fetch_data(urls['left_manual_labels'], '', '.vtk') #'left_freesurfer_labels' >>> use_colormap = True >>> colormap_file = '/software/vtk_cpp_tools/colormap.xml' # doctest: +SKIP - >>> plot_surfaces(vtk_files, use_colormap, colormap_file) # doctest: +SKIP + >>> plot_surfaces(labels_file, use_colormap, colormap_file) # doctest: +SKIP Plot manual labels on folds of the left hemisphere: @@ -293,7 +293,7 @@ def histogram_of_vtk_scalars(vtk_file, nbins=100): # Histogram: fig = plt.figure() ax = fig.add_subplot(1,1,1) - ax.hist(values, nbins, normed=False, facecolor='gray', alpha=0.5) + ax.hist(values, bins=nbins, density=False, facecolor='gray', alpha=0.5) plt.show() @@ -351,7 +351,7 @@ def histograms_of_lists(columns, column_name='', ignore_columns=[], if icolumn not in ignore_columns: ax = fig.add_subplot(nplotrows, nplotcols, icolumn + 1) column = [np.float(x) for x in column] - ax.hist(column, nbins, normed=False, facecolor='gray', alpha=0.5) + ax.hist(column, bins=nbins, density=False, facecolor='gray', alpha=0.5) plt.xlabel(column_name, fontsize='small') if len(titles) == ncolumns: plt.title(titles[icolumn], fontsize='small') diff --git a/mindboggle/mio/tables.py b/mindboggle/mio/tables.py index 8cf00f794..4bbc4c2eb 100644 --- a/mindboggle/mio/tables.py +++ b/mindboggle/mio/tables.py @@ -28,7 +28,7 @@ def write_shape_stats(labels_or_file=[], sulci=[], fundi=[], There can be thousands of vertices in a single feature such as a gyrus, sulcus, or fundus, and for per-vertex shape measures, it makes sense to characterize their collective shape as a distribution of shape values. - Mindboggle’s stats_per_label function generates tables of summary + Mindboggle's stats_per_label function generates tables of summary statistical measures for these distributions, and includes the shape measures computed on cortical features as well. @@ -381,12 +381,12 @@ def write_shape_stats(labels_or_file=[], sulci=[], fundi=[], if columns: df1 = pd.DataFrame({'ID': label_numbers}) df2 = pd.DataFrame(np.transpose(columns), - columns = column_names) + columns=column_names) df = pd.concat([df1, df2], axis=1) if label_names: df0 = pd.DataFrame({'name': label_names}) df = pd.concat([df0, df], axis=1) - df.to_csv(output_table, index=False) + df.to_csv(output_table, index=False, encoding='utf-8') if not os.path.exists(output_table): raise IOError(output_table + " not found") @@ -585,7 +585,7 @@ def write_vertex_measures(output_table, labels_or_file, sulci=[], fundi=[], output_table = os.path.join(os.getcwd(), 'vertices.csv') df = pd.DataFrame(np.transpose(columns), columns = column_names) - df.to_csv(output_table, index=False) + df.to_csv(output_table, index=False, encoding='utf-8') if not os.path.exists(output_table): raise IOError(output_table + " not found") @@ -655,7 +655,7 @@ def write_face_vertex_averages(input_file, output_table='', area_file=''): output_table = os.path.join(os.getcwd(), 'average_face_values.csv') df = pd.DataFrame({'': columns}) - df.to_csv(output_table, index=False) + df.to_csv(output_table, index=False, encoding='utf-8') if not os.path.exists(output_table): raise IOError(output_table + " not found") @@ -769,7 +769,7 @@ def write_average_face_values_per_label(input_indices_vtk, # Write to table: # ---------------------------------------------------------------- df = pd.DataFrame({'': columns}) - df.to_csv(output_table, index=False) + df.to_csv(output_table, index=False, encoding='utf-8') if not os.path.exists(output_table): raise IOError(output_table + " not found") @@ -815,6 +815,7 @@ def select_column_from_tables(tables, index=0, write_table=True, -------- >>> from mindboggle.mio.tables import select_column_from_tables >>> from mindboggle.mio.fetch_data import prep_tests + >>> import numpy as np >>> urls, fetch_data = prep_tests() >>> tables = [fetch_data(urls['thickinthehead_freesurfer_labels_table'], '', '.csv'), ... fetch_data(urls['thickinthehead_freesurfer_labels_table'], '', '.csv')] @@ -824,12 +825,8 @@ def select_column_from_tables(tables, index=0, write_table=True, >>> output = select_column_from_tables(tables, index, write_table, ... output_table) >>> columns = output[1][0] - >>> columns[0] - 2.8010000000000002 - >>> columns[1] - 3.9430000000000001 - >>> columns[2] - 4.0280000000000005 + >>> np.allclose(columns[:3], [2.801, 3.943, 4.028]) + True """ import os @@ -859,7 +856,7 @@ def select_column_from_tables(tables, index=0, write_table=True, output_table = os.path.join(os.getcwd(), 'select_column_from_tables.csv') df = pd.DataFrame({'': columns}) - df.to_csv(output_table, index=False) + df.to_csv(output_table, index=False, encoding='utf-8') else: raise IOError('Not saving table.') @@ -1027,7 +1024,7 @@ def explode_table(input_table='', column_headers=[], output_path=None, out_file = os.path.join(output_path, output_stem + str(label) + '.csv') - label_table.to_csv(out_file, index=False) + label_table.to_csv(out_file, index=False, encoding='utf-8') if not os.path.exists(out_file): raise IOError(out_file + " not found") @@ -1104,6 +1101,85 @@ def explode_mindboggle_tables(subject_path='', output_path='.', format(output_dir)) +def short_name(filepath): + """ + Generate a short name for a given branch of the mindboggle output + + Parameters + ---------- + filepath: str + a path to a mindboggle output file + """ + return ''.join([v[0] for v in + filepath.split('/tables/')[-1].replace('/','_').split('_')]) + + +def fname2df(fname): + """ + Read a single csv into a single dataframe row + + Parameters + ---------- + fname: str + a path to a mindboggle output file + """ + import numpy as np + import pandas as pd + + df = pd.read_csv(fname, na_values=[0.0]).dropna(axis=0) + sn = short_name(fname) + outerproduct = [[sn+'-'+x+'-'+y.lstrip() for x in df.name] for y in + df.keys()[2:]] + outerproduct = np.array(outerproduct).flatten().tolist() + df_row = pd.DataFrame(data=df.iloc[:, 2:].values.flatten()[None, :], + columns=outerproduct, index=[0]) + return df_row + + +def collate_participant_tables(subject_ids, base_dir): + """ + Generate a pandas dataframe across all subjects + + Parameters + ---------- + subject_ids: list + a list of subject identifiers in + base_dir: str + path to a mindboggle output base directory (mindboggled) + + Returns + ------- + collated_table : pandas DataFrame + rows of subject_ids, and columns of shape measures + + Examples + -------- + >>> from mindboggle.mio.tables import collate_participant_tables + >>> subject_ids = ['arno', 'arno'] # normally two different subjects + >>> base_dir = os.environ['MINDBOGGLE_DATA'] # doctest: +SKIP + >>> dft = collate_participant_tables(subject_ids, base_dir) # doctest: +SKIP + >>> dft['lcsfs-sylvian fissure-area'] # doctest: +SKIP + arno 4.641015 + arno 4.641015 + Name: lcsfs-sylvian fissure-area, dtype: float64 + + """ + from glob import glob + import os + import pandas as pd + + out = None + for id in subject_ids: + fl = glob(os.path.join(base_dir, id, 'tables', '*.csv')) + \ + glob(os.path.join(base_dir, id, 'tables', '*', '*.csv')) + # skip vertices outputs + dft = pd.concat([fname2df(val) for val in sorted(fl) + if 'vertices' not in val], axis=1) + dft.index = [id] + out = dft if out is None else pd.concat((out, dft), axis=0) + return out + + # ============================================================================ # Doctests # ============================================================================ diff --git a/mindboggle/mio/vtks.py b/mindboggle/mio/vtks.py index a6f2d5681..aeeb7d826 100644 --- a/mindboggle/mio/vtks.py +++ b/mindboggle/mio/vtks.py @@ -707,7 +707,7 @@ def write_vtk(output_vtk, points, indices=[], lines=[], faces=[], output_vtk = os.path.join(os.getcwd(), output_vtk) - Fp = open(output_vtk,'w') + Fp = open(output_vtk,'w', encoding="utf-8") write_header(Fp) write_points(Fp, points) if indices: @@ -817,7 +817,7 @@ def rewrite_scalars(input_vtk, output_vtk, new_scalars, faces, points, original_indices = reindex_faces_points(faces, points) # Write VTK file - Fp = open(output_vtk,'w') + Fp = open(output_vtk,'w', encoding="utf-8") write_header(Fp) if points: write_points(Fp, points) diff --git a/mindboggle/shapes/laplace_beltrami.py b/mindboggle/shapes/laplace_beltrami.py index 0c7343dad..b33fa10a6 100644 --- a/mindboggle/shapes/laplace_beltrami.py +++ b/mindboggle/shapes/laplace_beltrami.py @@ -89,15 +89,16 @@ def computeAB(points, faces): [ 0. 0. 0. -1.20711 0. 1.20711 0. 0. ] [ 0. 0. 0. -0.5 0. 0. 0.5 0. ] [ 0. -0.5 0. 0. 0. 0. 0. 0.5 ]] + >>> print(np.array_str(B.toarray(), precision=5, suppress_small=True)) - [[ 0.25 0.08333 0.04167 0. 0.08333 0. 0. 0.04167] - [ 0.08333 0.16667 0. 0. 0.04167 0. 0. 0.04167] - [ 0.04167 0. 0.16667 0.04167 0.08333 0. 0. 0. ] - [ 0. 0. 0.04167 0.28452 0.10059 0.10059 0.04167 0. ] - [ 0.08333 0.04167 0.08333 0.10059 0.36785 0.05893 0. 0. ] - [ 0. 0. 0. 0.10059 0.05893 0.20118 0.04167 0. ] - [ 0. 0. 0. 0.04167 0. 0.04167 0.08333 0. ] - [ 0.04167 0.04167 0. 0. 0. 0. 0. 0.08333]] + [[0.25 0.08333 0.04167 0. 0.08333 0. 0. 0.04167] + [0.08333 0.16667 0. 0. 0.04167 0. 0. 0.04167] + [0.04167 0. 0.16667 0.04167 0.08333 0. 0. 0. ] + [0. 0. 0.04167 0.28452 0.10059 0.10059 0.04167 0. ] + [0.08333 0.04167 0.08333 0.10059 0.36785 0.05893 0. 0. ] + [0. 0. 0. 0.10059 0.05893 0.20118 0.04167 0. ] + [0. 0. 0. 0.04167 0. 0.04167 0.08333 0. ] + [0.04167 0.04167 0. 0. 0. 0. 0. 0.08333]] """ import numpy as np @@ -775,8 +776,8 @@ def spectrum_per_label(vtk_file, spectrum_size=10, exclude_labels=[-1], >>> spectrum_lists, label_list = spectrum_per_label(vtk_file, ... spectrum_size, exclude_labels, None, area_file, largest_segment, ... verbose) - >>> [np.float("{0:.{1}f}".format(x, 5)) for x in spectrum_lists[0]] - [0.0, 0.00054, 0.00244, 0.00291, 0.00456, 0.00575] + >>> [np.float("{0:.{1}f}".format(x, 5)) for x in spectrum_lists[0][1::]] + [0.00054, 0.00244, 0.00291, 0.00456, 0.00575] >>> label_list[0:10] [1029, 1005, 1011, 1021, 1008, 1025, 999, 1013, 1007, 1022] diff --git a/mindboggle/shapes/surface_shapes.py b/mindboggle/shapes/surface_shapes.py index 963f28bf2..270fe1ec4 100644 --- a/mindboggle/shapes/surface_shapes.py +++ b/mindboggle/shapes/surface_shapes.py @@ -43,9 +43,12 @@ def area(command, surface_file, verbose=False): >>> ccode_path = os.environ['vtk_cpp_tools'] >>> command = os.path.join(ccode_path, 'area', 'PointAreaMain') >>> area_file = area(command, surface_file, verbose) + >>> >>> scalars, name = read_scalars(area_file) - >>> [np.float("{0:.{1}f}".format(x, 5)) for x in scalars[0:8]] - [0.4827, 0.39661, 0.57813, 0.70574, 0.84318, 0.57643, 0.66942, 0.7063] + >>> np.allclose(scalars[0:8], + ... [0.48270401731, 0.39661528543, 0.57813454792, 0.70574099571, + ... 0.84318527207, 0.57642554119, 0.66942016035, 0.70629953593]) + True """ import os @@ -60,7 +63,7 @@ def area(command, surface_file, verbose=False): cli = CommandLine(command=command) cli.inputs.args = args - cli.cmdline + cli.terminal_output = 'file' cli.run() if not os.path.exists(area_file): @@ -102,8 +105,8 @@ def travel_depth(command, surface_file, verbose=False): >>> command = os.path.join(ccode_path, 'travel_depth', 'TravelDepthMain') >>> depth_file = travel_depth(command, surface_file, verbose) >>> scalars, name = read_scalars(depth_file) - >>> [np.float("{0:.{1}f}".format(x, 5)) for x in scalars[0:8]] - [0.02026, 0.06009, 0.12859, 0.04564, 0.00774, 0.05284, 0.05354, 0.01316] + >>> np.allclose(scalars[0:8], [0.020259869839, 0.06009166489, 0.12858575442, 0.045639221313, 0.007742772964, 0.052839111255, 0.053538904296, 0.013158746337]) + True """ import os @@ -118,7 +121,7 @@ def travel_depth(command, surface_file, verbose=False): cli = CommandLine(command=command) cli.inputs.args = args - cli.cmdline + cli.terminal_output = 'file' cli.run() if not os.path.exists(depth_file): @@ -158,8 +161,8 @@ def geodesic_depth(command, surface_file, verbose=False): >>> command = os.path.join(ccode_path, 'geodesic_depth', 'GeodesicDepthMain') >>> depth_file = geodesic_depth(command, surface_file, verbose) >>> scalars, name = read_scalars(depth_file) - >>> [np.float("{0:.{1}f}".format(x, 5)) for x in scalars[0:8]] - [0.02026, 0.06009, 0.12859, 0.04564, 0.00774, 0.05284, 0.05354, 0.01316] + >>> np.allclose(scalars[0:8], [0.020259869839, 0.06009166489, 0.12858575442, 0.045639221313, 0.007742772964, 0.052839111255, 0.053538904296, 0.013158746337]) + True """ import os @@ -174,7 +177,7 @@ def geodesic_depth(command, surface_file, verbose=False): cli = CommandLine(command=command) cli.inputs.args = args - cli.cmdline + cli.terminal_output = 'file' cli.run() if not os.path.exists(depth_file): @@ -270,8 +273,8 @@ def curvature(command, method, arguments, surface_file, verbose=False): >>> mean_curvature_file, f1,f2,f3,f4 = curvature(command, method, ... arguments, surface_file, verbose) >>> scalars, name = read_scalars(mean_curvature_file) - >>> [np.float("{0:.{1}f}".format(x, 5)) for x in scalars[0:8]] - [-5.81361, -5.9313, -6.28055, -5.621, -5.69631, -5.80399, -5.87265, -5.7107] + >>> np.allclose(scalars[0:8], [-5.8136068088, -5.9312990469, -6.2805500474, -5.6210018286, -5.6963067208, -5.8039874097, -5.8726460688, -5.7106966401]) + True """ import os @@ -284,8 +287,8 @@ def curvature(command, method, arguments, surface_file, verbose=False): min_curvature_vector_file = None basename = os.path.splitext(os.path.basename(surface_file))[0] - mean_curvature_file = os.path.join(os.getcwd(), basename) + \ - '.mean_curvature.vtk' + stem = os.path.join(os.getcwd(), basename) + mean_curvature_file = stem + '.mean_curvature.vtk' if method in [0, 1]: gauss_curvature_file = stem + '.gauss_curvature.vtk' args.extend(['-g', gauss_curvature_file]) @@ -307,7 +310,7 @@ def curvature(command, method, arguments, surface_file, verbose=False): cli = CommandLine(command=command) cli.inputs.args = ' '.join(args) - cli.cmdline + cli.terminal_output = 'file' cli.run() return mean_curvature_file, gauss_curvature_file, \ diff --git a/mindboggle/shapes/volume_shapes.py b/mindboggle/shapes/volume_shapes.py index 0c0820b56..d9d472d62 100644 --- a/mindboggle/shapes/volume_shapes.py +++ b/mindboggle/shapes/volume_shapes.py @@ -87,8 +87,7 @@ def volume_per_brain_region(input_file, include_labels=[], exclude_labels=[], # Load labeled image volumes: img = nb.load(input_file) - hdr = img.get_header() - volume_per_voxel = np.product(hdr.get_zooms()) + volume_per_voxel = np.product(img.header.get_zooms()) labels = img.get_data().ravel() unique_labels, counts = count_per_label(labels, include_labels, @@ -102,7 +101,7 @@ def volume_per_brain_region(input_file, include_labels=[], exclude_labels=[], else: output_table = os.path.join(os.getcwd(), 'volume_for_each_label.csv') - fid = open(output_table, 'w') + fid = open(output_table, 'w', encoding='utf-8') if len(label_names) == len(unique_labels): fid.write("name, ID, volume\n") else: @@ -129,9 +128,9 @@ def volume_per_brain_region(input_file, include_labels=[], exclude_labels=[], return unique_labels, volumes, output_table -def thickinthehead(segmented_file, labeled_file, cortex_value=2, - noncortex_value=3, labels=[], names=[], resize=True, - propagate=True, output_dir='', save_table=False, +def thickinthehead(segmented_file, labeled_file, + cortex_value=2, noncortex_value=3, labels=[], names=[], + propagate=False, output_dir='', save_table=False, output_table='', verbose=False): """ Compute a simple thickness measure for each labeled cortex region volume. @@ -142,36 +141,38 @@ def thickinthehead(segmented_file, labeled_file, cortex_value=2, produce favorable results. For example, we found that at least a quarter of the over one hundred EMBARC brain images we processed through FreeSurfer clipped ventral cortical regions, resulting in bad surface - patches in those regions. For comparison, we built a function called + patches in those regions. For comparison, we built this function called thickinthehead which computes a simple thickness measure for each - cortical region using the hybrid segmentation volume rather than surfaces. - - The thickinthehead function first saves a brain volume that has been - segmented into cortex and non-cortex voxels into separate binary files, - then resamples these cortex and non-cortex files from, for example, - 1mm^3 to 0.5mm^3 voxel dimensions to better represent the contours - of the cortex, then extracts outer and inner boundary voxels of the cortex - by morphologically eroding the cortex by one (resampled) voxel bordering - the outside of the brain and bordering the inside of the brain - (non-cortex). Then it estimates the middle cortical surface area by the - average volume of the outer and inner boundary voxels of the cortex. - Finally, it estimates the thickness of a labeled cortical region as the - volume of the labeled region divided by the surface area of that region. - - We compared thickinthehead and FreeSurfer cortical thickness estimates - for 16 cortical regions in 40 EMBARC control subjects (unpublished - results) with published estimates based on manual delineations of MR - images (Kabani, 2001). Forty percent of FreeSurfer estimates for the 640 - labels were in the range of the published values, whereas almost ninety - percent of thickinthehead’s estimates were within range. ANTs values - deviated further from the published estimates and were less reliable - (greater inter-subject ranges) than the FreeSurfer or thickinthehead - values. + cortical region using a segmentation volume rather than surfaces. + + We have revised this algorithm from the original published version. + We removed upsampling to reduce memory issues for large image volumes, + and replaced the estimated volume of middle cortical layer + with an estimate of its surface area. We made these revisions to be less + susceptible to deviations in voxel size from isometric 1mm^3 voxels + for which thickinthehead was originally built. + + Steps :: + + 1. Extract noncortex and cortex into separate files. + 2. Either mask labels with cortex or fill cortex with labels. + 3. Extract outer and inner boundary voxels of the cortex, + by morphologically eroding the cortex (=2) by one voxel bordering + the outside of the brain (=0) and bordering the inside of the brain + (non-cortex=3). + 4. Estimate middle cortical layer's surface area by the average + surface area of the outer and inner boundary voxels of the cortex, + where surface area is roughly estimated as the average face area + of a voxel times the number of voxels. + 5. Compute the volume of a labeled region of cortex. + 6. Estimate the thickness of the labeled cortical region as the + volume of the labeled region (#5) divided by the + estimate of the middle cortical surface area of that region (#4). Note:: - Cortex, noncortex, & label files are from the same coregistered brain. - - Calls ANTs functions: ImageMath, Threshold, ResampleImageBySpacing + - Calls ANTs functions: ImageMath and Threshold - There may be slight discrepancies between volumes computed by thickinthehead() and volumes computed by volume_per_label(); in 31 of 600+ ADNI 1.5T images, some volume_per_label() volumes @@ -179,52 +180,6 @@ def thickinthehead(segmented_file, labeled_file, cortex_value=2, label propagation through the cortex in thickinthehead(). This is more pronounced in ANTs vs. FreeSurfer-labeled volumes. - Example preprocessing steps :: - - 1. Run Freesurfer and antsCorticalThickness.sh on T1-weighted image. - 2. Convert FreeSurfer volume labels (e.g., wmparc.mgz or aparc+aseg.mgz) - to cortex (2) and noncortex (3) segments using relabel_volume() - function [refer to labels.rst or FreeSurferColorLUT labels file]. - 3. Convert ANTs Atropos-segmented volume (tmpBrainSegmentation.nii.gz) - to cortex and noncortex segments, by converting 1-labels to 0 and - 4-labels to 3 with the relabel_volume() function - (the latter is to include deep-gray matter with noncortical tissues). - 4. Combine FreeSurfer and ANTs segmentation volumes to obtain a single - cortex (2) and noncortex (3) segmentation file using the function - combine_2labels_in_2volumes(). This function takes the union of - cortex voxels from the segmentations, the union of the noncortex - voxels from the segmentations, and overwrites intersecting cortex - and noncortex voxels with noncortex (3) labels. - ANTs tends to include more cortical gray matter at the periphery of - the brain than Freesurfer, and FreeSurfer tends to include more white - matter that extends deep into gyral folds than ANTs, so the above - attempts to remedy their differences by overlaying ANTs cortical gray - with FreeSurfer white matter. - 5. Optional, see Step 2 below: - Fill segmented cortex with cortex labels and noncortex with - noncortex labels using the PropagateLabelsThroughMask() function - (which calls ImageMath ... PropagateLabelsThroughMask in ANTs). - The labels can be initialized using FreeSurfer (e.g. wmparc.mgz) - or ANTs (by applying the nonlinear inverse transform generated by - antsCorticalThickness.sh to labels in the Atropos template space). - [Note: Any further labeling steps may be applied, such as - overwriting cerebrum with intersecting cerebellum labels.] - - Steps :: - - 1. Extract noncortex and cortex. - 2. Either mask labels with cortex or fill cortex with labels. - 3. Resample cortex and noncortex files from 1x1x1 to 0.5x0.5x0.5 - to better represent the contours of the boundaries of the cortex. - 4. Extract outer and inner boundary voxels of the cortex, - by eroding 1 (resampled) voxel for cortex voxels (2) bordering - the outside of the brain (0) and bordering noncortex (3). - 5. Estimate middle cortical surface area by the average volume - of the outer and inner boundary voxels of the cortex. - 6. Compute the volume of a labeled region of cortex. - 7. Estimate the thickness of the labeled cortical region as the - volume of the labeled region (#6) divided by the surface area (#5). - Parameters ---------- segmented_file : string @@ -239,10 +194,8 @@ def thickinthehead(segmented_file, labeled_file, cortex_value=2, label indices names : list of strings label names - resize : bool - resize (2x) segmented_file for more accurate thickness estimates? propagate : bool - propagate labels through cortex? + propagate labels through cortex (or mask labels with cortex)? output_dir : string output directory save_table : bool @@ -261,7 +214,7 @@ def thickinthehead(segmented_file, labeled_file, cortex_value=2, Examples -------- - >>> # Example simply using ants segmentation and labels: + >>> # Example simply using ants segmentation and labels vs. hybrid segmentation: >>> import os >>> import numpy as np >>> from mindboggle.shapes.volume_shapes import thickinthehead @@ -280,7 +233,6 @@ def thickinthehead(segmented_file, labeled_file, cortex_value=2, >>> labels.remove(1033) >>> labels.remove(2033) >>> names = [] - >>> resize = True >>> propagate = False >>> output_dir = '' >>> save_table = True @@ -291,13 +243,10 @@ def thickinthehead(segmented_file, labeled_file, cortex_value=2, >>> label_volume_thickness, output_table = thickinthehead(segmented_file, ... labeled_file, cortex_value, noncortex_value, labels, names, - ... resize, propagate, output_dir, save_table, output_table, verbose) # doctest: +SKIP + ... propagate, output_dir, save_table, output_table, verbose) # doctest: +SKIP >>> [np.int("{0:.{1}f}".format(x, 5)) label_volume_thickness[0][0:10]] # doctest: +SKIP - [1002, 1003, 1005, 1006, 1007, 1008, 1009, 1010, 1011 1012] >>> [np.float("{0:.{1}f}".format(x, 5)) for x in label_volume_thickness[1][0:5]] # doctest: +SKIP - [3136.99383, 7206.98582, 3257.99359, 1950.99616, 12458.97549] >>> [np.float("{0:.{1}f}".format(x, 5)) for x in label_volume_thickness[2][0:5]] # doctest: +SKIP - [3.8639, 3.69637, 2.56334, 4.09336, 4.52592] """ import os @@ -329,7 +278,7 @@ def thickinthehead(segmented_file, labeled_file, cortex_value=2, else: output_table = os.path.join(os.getcwd(), 'thickinthehead_for_each_label.csv') - fid = open(output_table, 'w') + fid = open(output_table, 'w', encoding='utf-8') if names: fid.write("name, ID, thickness (thickinthehead)\n") else: @@ -337,20 +286,13 @@ def thickinthehead(segmented_file, labeled_file, cortex_value=2, else: output_table = '' - # ------------------------------------------------------------------------ - # ants command paths: - # ------------------------------------------------------------------------ - ants_thresh = 'ThresholdImage' - ants_math = 'ImageMath' - ants_resample = 'ResampleImageBySpacing' - # ------------------------------------------------------------------------ # Extract noncortex and cortex: # ------------------------------------------------------------------------ - cmd = [ants_thresh, '3', segmented_file, noncortex, + cmd = ['ThresholdImage', '3', segmented_file, noncortex, str(noncortex_value), str(noncortex_value), '1 0'] execute(cmd, 'os') - cmd = [ants_thresh, '3', segmented_file, cortex, + cmd = ['ThresholdImage', '3', segmented_file, cortex, str(cortex_value), str(cortex_value), '1 0'] execute(cmd, 'os') @@ -358,65 +300,47 @@ def thickinthehead(segmented_file, labeled_file, cortex_value=2, # Either mask labels with cortex or fill cortex with labels: # ------------------------------------------------------------------------ if propagate: - cmd = [ants_math, '3', cortex, 'PropagateLabelsThroughMask', + cmd = ['ImageMath', '3', cortex, 'PropagateLabelsThroughMask', cortex, labeled_file] execute(cmd, 'os') else: - cmd = [ants_math, '3', cortex, 'm', cortex, labeled_file] + cmd = ['ImageMath', '3', cortex, 'm', cortex, labeled_file] execute(cmd, 'os') # ------------------------------------------------------------------------ # Load data and dimensions: # ------------------------------------------------------------------------ - if resize: - rescale = 2.0 - else: - rescale = 1.0 - compute_real_volume = True - if compute_real_volume: - img = nb.load(cortex) - hdr = img.get_header() - vv_orig = np.prod(hdr.get_zooms()) - vv = np.prod([x/rescale for x in hdr.get_zooms()]) - cortex_data = img.get_data().ravel() - else: - vv = 1/rescale - cortex_data = nb.load(cortex).get_data().ravel() - - # ------------------------------------------------------------------------ - # Resample cortex and noncortex files from 1x1x1 to 0.5x0.5x0.5 - # to better represent the contours of the boundaries of the cortex: - # ------------------------------------------------------------------------ - if resize: - dims = ' '.join([str(1/rescale), str(1/rescale), str(1/rescale)]) - cmd = [ants_resample, '3', cortex, cortex, dims, '0 0 1'] - execute(cmd, 'os') - cmd = [ants_resample, '3', noncortex, noncortex, dims, '0 0 1'] - execute(cmd, 'os') + img = nb.load(cortex) + cortex_data = img.get_data().ravel() + voxsize = img.header.get_zooms() + voxvol = np.prod(voxsize) + voxarea = (voxsize[0] * voxsize[1] + \ + voxsize[0] * voxsize[2] + \ + voxsize[1] * voxsize[2]) / 3 # ------------------------------------------------------------------------ # Extract outer and inner boundary voxels of the cortex, - # by eroding 1 (resampled) voxel for cortex voxels (2) bordering - # the outside of the brain (0) and bordering noncortex (3): + # by eroding 1 voxel for cortex voxels (=2) bordering + # the outside of the brain (=0) and bordering noncortex (=3): # ------------------------------------------------------------------------ - cmd = [ants_math, '3', inner_edge, 'MD', noncortex, '1'] + cmd = ['ImageMath', '3', inner_edge, 'MD', noncortex, '1'] execute(cmd, 'os') - cmd = [ants_math, '3', inner_edge, 'm', cortex, inner_edge] + cmd = ['ImageMath', '3', inner_edge, 'm', cortex, inner_edge] execute(cmd, 'os') if use_outer_edge: - cmd = [ants_thresh, '3', cortex, outer_edge, '1 10000 1 0'] + cmd = ['ThresholdImage', '3', cortex, outer_edge, '1 10000 1 0'] execute(cmd, 'os') - cmd = [ants_math, '3', outer_edge, 'ME', outer_edge, '1'] + cmd = ['ImageMath', '3', outer_edge, 'ME', outer_edge, '1'] execute(cmd, 'os') - cmd = [ants_thresh, '3', outer_edge, outer_edge, '1 1 0 1'] + cmd = ['ThresholdImage', '3', outer_edge, outer_edge, '1 1 0 1'] execute(cmd, 'os') - cmd = [ants_math, '3', outer_edge, 'm', cortex, outer_edge] + cmd = ['ImageMath', '3', outer_edge, 'm', cortex, outer_edge] execute(cmd, 'os') - cmd = [ants_thresh, '3', inner_edge, temp, '1 10000 1 0'] + cmd = ['ThresholdImage', '3', inner_edge, temp, '1 10000 1 0'] execute(cmd, 'os') - cmd = [ants_thresh, '3', temp, temp, '1 1 0 1'] + cmd = ['ThresholdImage', '3', temp, temp, '1 1 0 1'] execute(cmd, 'os') - cmd = [ants_math, '3', outer_edge, 'm', temp, outer_edge] + cmd = ['ImageMath', '3', outer_edge, 'm', temp, outer_edge] execute(cmd, 'os') # ------------------------------------------------------------------------ @@ -440,23 +364,26 @@ def thickinthehead(segmented_file, labeled_file, cortex_value=2, name = names[ilabel] # -------------------------------------------------------------------- - # Compute thickness as a ratio of label volume and edge volume: - # - Estimate middle cortical surface area by the average volume + # Compute thickness as a ratio of label volume and layer surface area: + # - Estimate middle cortical surface area by the average area # of the outer and inner boundary voxels of the cortex. + # - Surface area is roughly estimated as the average face area + # of a voxel times the number of voxels. # - Compute the volume of a labeled region of cortex. # - Estimate the thickness of the labeled cortical region as the - # volume of the labeled region divided by the surface area. + # volume of the labeled region divided by the middle surface area. # -------------------------------------------------------------------- - label_cortex_volume = vv_orig * len(np.where(cortex_data==label)[0]) - label_inner_edge_volume = vv * len(np.where(inner_edge_data==label)[0]) - if label_inner_edge_volume: + label_cortex_volume = voxvol * len(np.where(cortex_data==label)[0]) + label_inner_edge_area = voxarea * \ + len(np.where(inner_edge_data==label)[0]) + if label_inner_edge_area: if use_outer_edge: - label_outer_edge_volume = \ - vv * len(np.where(outer_edge_data==label)[0]) - label_area = (label_inner_edge_volume + - label_outer_edge_volume) / 2.0 + label_outer_edge_area = \ + voxarea * len(np.where(outer_edge_data==label)[0]) + label_area = (label_inner_edge_area + + label_outer_edge_area) / 2.0 else: - label_area = label_inner_edge_volume + label_area = label_inner_edge_area thickness = label_cortex_volume / label_area label_volume_thickness[ilabel, 1] = label_cortex_volume label_volume_thickness[ilabel, 2] = thickness diff --git a/mindboggle/shapes/zernike/pipelines.py b/mindboggle/shapes/zernike/pipelines.py index 22571d7ac..5ad980ddf 100644 --- a/mindboggle/shapes/zernike/pipelines.py +++ b/mindboggle/shapes/zernike/pipelines.py @@ -2,7 +2,7 @@ import numpy as np import scipy -from scipy.misc import (factorial, +from scipy.special import (factorial, comb as nchoosek, ) diff --git a/mindboggle/thirdparty/ants.py b/mindboggle/thirdparty/ants.py index d80b90435..884cab928 100644 --- a/mindboggle/thirdparty/ants.py +++ b/mindboggle/thirdparty/ants.py @@ -77,10 +77,10 @@ def antsApplyTransformsToPoints(points, transform_files, # Write points (x,y,z,1) to a .csv file: # ------------------------------------------------------------------------ points_file = os.path.join(os.getcwd(), 'points.csv') - fid = open(points_file, 'w') + fid = open(points_file, 'w', encoding='utf-8') fid.write('x,y,z,t\n') fid.close() - fid = open(points_file, 'a') + fid = open(points_file, 'a', encoding='utf-8') for point in points: string_of_zeros = (4 - len(point)) * ',0' fid.write(','.join([str(x) for x in point]) + string_of_zeros + '\n') diff --git a/mindboggle/thirdparty/vtkviewer.py b/mindboggle/thirdparty/vtkviewer.py old mode 100755 new mode 100644 diff --git a/mindboggle/version.py b/mindboggle/version.py new file mode 100644 index 000000000..bb5a33cd7 --- /dev/null +++ b/mindboggle/version.py @@ -0,0 +1 @@ +__version__ = '1.3.8' diff --git a/mindboggle/x/test_zernike/compat/__init__.py b/mindboggle/x/test_zernike/compat/__init__.py index b1f83a43a..0ffebddc7 100644 --- a/mindboggle/x/test_zernike/compat/__init__.py +++ b/mindboggle/x/test_zernike/compat/__init__.py @@ -1,7 +1,7 @@ import numpy import numpy.linalg import scipy -import scipy.misc +import scipy.special import time #import profilehooks @@ -62,7 +62,7 @@ def rng(self,*args) : elif len(args) == 2 : return range(args[0],args[1]+1) elif len(args) == 3 : return range(args[0],args[1]+1,args[2]) else : raise Exception() - def factorial(self,*args,**dargs) : return scipy.misc.factorial(*args,**dargs) + def factorial(self,*args,**dargs) : return scipy.special.factorial(*args,**dargs) def tic(self) : self.tic_time = time.time() def toc(self) : @@ -81,7 +81,7 @@ def cat(self,axis,*args) : if axis >= ndim : args = tuple([ numpy.expand_dims(a,axis) for a in args ]) return numpy.concatenate(args,axis=axis) def sqrt(self,*args,**dargs) : return scipy.sqrt(*args,**dargs) - def nchoosek(self,*args,**dargs) : return scipy.misc.comb(*args,**dargs) + def nchoosek(self,*args,**dargs) : return scipy.special.comb(*args,**dargs) def floor(self,*args,**dargs) : return numpy.floor(*args,**dargs).astype(int) def conj(self,*args,**dargs) : return numpy.conj(*args,**dargs) def sum(self,*args,**dargs) : return numpy.sum(*args,**dargs) diff --git a/mindboggle/x/test_zernike/multiproc/__init__.py b/mindboggle/x/test_zernike/multiproc/__init__.py index a6d3502ad..264207f36 100644 --- a/mindboggle/x/test_zernike/multiproc/__init__.py +++ b/mindboggle/x/test_zernike/multiproc/__init__.py @@ -1,7 +1,7 @@ import numpy import numpy.linalg import scipy -import scipy.misc +import scipy.special import time #import profilehooks import copy_reg @@ -58,7 +58,7 @@ def rng(self,*args) : def rng_prod(self,*args,**dargs) : args = tuple([ self.rng(*a) for a in args ]) return itertools.product(*args,**dargs) - def factorial(self,*args,**dargs) : return scipy.misc.factorial(*args,**dargs) + def factorial(self,*args,**dargs) : return scipy.special.factorial(*args,**dargs) def tic(self) : self.tic_time = time.time() def toc(self) : @@ -76,7 +76,7 @@ def cat(self,axis,*args) : if axis >= ndim : args = tuple([ numpy.expand_dims(a,axis) for a in args ]) return numpy.concatenate(args,axis=axis) def sqrt(self,*args,**dargs) : return scipy.sqrt(*args,**dargs) - def nchoosek(self,*args,**dargs) : return scipy.misc.comb(*args,**dargs) + def nchoosek(self,*args,**dargs) : return scipy.special.comb(*args,**dargs) def floor(self,*args,**dargs) : return numpy.floor(*args,**dargs).astype(int) def conj(self,*args,**dargs) : return numpy.conj(*args,**dargs) def sum(self,*args,**dargs) : return numpy.sum(*args,**dargs) diff --git a/setup.py b/setup.py old mode 100755 new mode 100644 index 3825b7439..4932bc152 --- a/setup.py +++ b/setup.py @@ -18,8 +18,6 @@ if len(set(('develop', 'bdist_egg', 'bdist_rpm', 'bdist', 'bdist_dumb', 'bdist_wininst', 'install_egg_info', 'egg_info', 'easy_install', )).intersection(sys.argv)) > 0: - # setup_egg imports setuptools setup, thus monkeypatching distguts. - # import setup_egg pass from distutils.core import setup @@ -27,20 +25,6 @@ ver_file = pjoin(os.getcwd(), 'info.py') exec(open(ver_file).read()) -# Do dependency checking -#package_check('numpy', NUMPY_MIN_VERSION) - -# extra_setuptools_args = {} -# if 'setuptools' in sys.modules: -# extra_setuptools_args = dict( -# tests_require=['nose'], -# test_suite='nose.collector', -# zip_safe=False, -# extras_require = dict( -# doc='Sphinx>=0.3', -# test='nose>=0.10.1') -# ) - def main(**extra_args): setup(name=NAME, maintainer=MAINTAINER, @@ -55,7 +39,7 @@ def main(**extra_args): author_email=AUTHOR_EMAIL, platforms=PLATFORMS, version=VERSION, - #requires=REQUIRES, + requires=REQUIRES, provides=PROVIDES, packages=['mindboggle', 'mindboggle.data', @@ -69,12 +53,11 @@ def main(**extra_args): package_data={'mindboggle': [pjoin('data', '*.nii.gz'), pjoin('data', '*.txt')]}, scripts=[pjoin('mindboggle', 'mindboggle'), - pjoin('mindboggle', 'mindboggle123')], + pjoin('mindboggle', 'mindboggle123'), + pjoin('colors_script', 'calc_colormap.py'), + pjoin('colors_script', 'convert_to_mipav_lut.py')], **extra_args ) if __name__ == "__main__": main() - #main(**extra_setuptools_args) - - diff --git a/vtk_cpp_tools/CMakeLists.txt b/vtk_cpp_tools/CMakeLists.txt index 49e02b438..11917be9c 100644 --- a/vtk_cpp_tools/CMakeLists.txt +++ b/vtk_cpp_tools/CMakeLists.txt @@ -5,7 +5,7 @@ project(mindboggle_surfaces) ## "cmake .. -DVTK_DIR:STRING=/Users/arno/anaconda/lib/cmake/vtk-7.0" #set(VTK_DIR "/Users/arno/anaconda/lib/cmake/vtk-7.0") #find_package(VTK REQUIRED NO_MODULE) -find_package(VTK 7.0 COMPONENTS vtkInteractionStyle vtkRenderingFreeType vtkRenderingOpenGL2 vtkRenderingVolumeOpenGL2 vtkIOLegacy vtkIOMINC vtkIOGeometry vtkImagingStencil vtkImagingMorphological vtkFiltersModeling NO_MODULE) +find_package(VTK 8.1 COMPONENTS vtkInteractionStyle vtkRenderingFreeType vtkRenderingOpenGL2 vtkRenderingVolumeOpenGL2 vtkIOLegacy vtkIOMINC vtkIOGeometry vtkImagingStencil vtkImagingMorphological vtkFiltersModeling NO_MODULE) include(${VTK_USE_FILE}) #if(COMMAND CMAKE_POLICY) diff --git a/vtk_cpp_tools/MeshAnalyser.cpp b/vtk_cpp_tools/MeshAnalyser.cpp old mode 100755 new mode 100644 index f71f769ea..f935ea633 --- a/vtk_cpp_tools/MeshAnalyser.cpp +++ b/vtk_cpp_tools/MeshAnalyser.cpp @@ -537,6 +537,8 @@ void MeshAnalyser::WriteIntoFile(char* fileName, char* prop) else if(strcmp("euclideanDepth",prop)==0) this->mesh->GetPointData()->SetScalars(this->euclideanDepth); else if(strcmp("curv",prop)==0) this->mesh->GetPointData()->SetScalars(this->curv); else if(strcmp("gCurv",prop)==0) this->mesh->GetPointData()->SetScalars(this->gCurv); + else if(strcmp("curv1",prop)==0) this->mesh->GetPointData()->SetScalars(this->curv1); + else if(strcmp("curv2",prop)==0) this->mesh->GetPointData()->SetScalars(this->curv2); else if(strcmp("test",prop)==0) this->mesh->GetPointData()->SetScalars(this->test); else if(strcmp("surf",prop)==0) this->mesh->GetPointData()->SetScalars(this->pointSurf); else if(strcmp("1color",prop)==0) diff --git a/vtk_cpp_tools/MeshAnalyser.h b/vtk_cpp_tools/MeshAnalyser.h old mode 100755 new mode 100644