pax_global_header00006660000000000000000000000064146346017770014531gustar00rootroot0000000000000052 comment=af382dd8fbd9333f30faddacce9a4c7bc0abfd2c libbart-devel/000077500000000000000000000000001463460177700136115ustar00rootroot00000000000000libbart-devel/.github/000077500000000000000000000000001463460177700151515ustar00rootroot00000000000000libbart-devel/.github/workflows/000077500000000000000000000000001463460177700172065ustar00rootroot00000000000000libbart-devel/.github/workflows/c-cpp.yml000066400000000000000000000010551463460177700207340ustar00rootroot00000000000000name: C/C++ CI on: push: branches: [ master ] pull_request: branches: [ master ] jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: update run: sudo apt-get install -q gcc make libfftw3-dev liblapacke-dev libpng-dev libopenblas-dev gfortran - name: make all run: make all - name: make utest run: make utest - name: make test run: make test - name: make pythontest run: | sudo apt-get install -q python3 python3-numpy make pythontest libbart-devel/.gitignore.main000066400000000000000000000011261463460177700165240ustar00rootroot00000000000000 # autogenerated .gitignore files .gitignore # dependency files *.d # object files *.o # Windows executables *.exe # mac debug files *.dSYM # python compiled files *.pyc python/.ipynb_checkpoints/ # temporary files *.swp *~ # Mac file .DS_Store # local Makefile Makefile.local # version string src/misc/version.inc # noise simulations save/nsv/*.dat # fftw wisdoms save/fftw/*.fftw # ctags tags GTAGS GSYMS GRTAGS GPATH # autogenerated documentation doc/html doc/latex doc/dx doc/commands.txt # test files tests/test-* #clangd cache .cache/* compile_commands.json #vscode .vscode/* libbart-devel/.gitlab-ci.yml000066400000000000000000000223061463460177700162500ustar00rootroot00000000000000variables: OMP_NUM_THREADS: "1" # mpirun in docker has a problem with its default transfer mechanism, # so we disable it: OMPI_MCA_btl_vader_single_copy_mechanism: "none" # make DEBUG_DWARF the default DEBUG: "1" DEBUG_DWARF: "1" # utests give a backtrace + abort on the first error BART_UTEST_ABORT: "1" image: registry.gitlab.tugraz.at/ibi/reproducibility/reproducibility_testing/ibi_cuda_all_deps default: # Make Builds interruptible by default interruptible: true before_script: # - apt-get update -qq && apt-get install -y -qq - chown -R ibi . stages: - build - test1 - test2 Build_NoDEBUG: stage: build script: - sudo -E -u ibi DEBUG=0 DEBUG_DWARF=0 WERROR=1 make all artifacts: paths: - bart - "./lib/*.a" - ./commands/ expire_in: 45 minutes Build: stage: build script: - sudo -E -u ibi WERROR=1 make all artifacts: paths: - bart - "./lib/*.a" - ./commands/ - "./src/*.o" - ./src/misc/version.inc expire_in: 45 minutes Build_riscv: stage: build tags: - riscv image: registry.gitlab.tugraz.at/ibi/reproducibility/reproducibility_testing/ibi_riscv script: - sudo -E -u ibi make -j 4 all artifacts: paths: - bart - "./lib/*.a" - ./commands/ - "./src/*.o" - ./src/misc/version.inc expire_in: 180 minutes Build_Clang: stage: build script: - sudo -E -u ibi CC=clang-16 make all artifacts: paths: - bart - "./lib/*.a" - ./commands/ - "./src/*.o" - ./src/misc/version.inc expire_in: 45 minutes Build_Clang_GPU: stage: build script: - sudo -E -u ibi CC=clang-16 CUDA_CC=clang-14 CUDA=1 CUDA_LIB=lib64 GPUARCH_FLAGS="-arch sm_35 -Wno-deprecated-gpu-targets" make all artifacts: paths: - bart - "./lib/*.a" - ./commands/ - "./src/*.o" - ./src/misc/version.inc expire_in: 45 minutes Build_Static: stage: build script: # - apt-get update -qq && apt-get install -y libgfortran-12-dev - sudo -E -u ibi SLINK=1 make artifacts: paths: - bart expire_in: 45 minutes Build_Shared: stage: build script: - sudo -E -u ibi make libbart.so artifacts: paths: - libbart.so expire_in: 45 minutes Build_Shared_GPU: stage: build script: - sudo -E -u ibi CUDA=1 CUDA_LIB=lib64 GPUARCH_FLAGS="-arch sm_35 -Wno-deprecated-gpu-targets" make libbart.so artifacts: paths: - libbart.so expire_in: 45 minutes Build_UBSan: stage: build script: - sudo -E -u ibi UBSAN=1 ASAN=1 make all artifacts: paths: - bart - "./lib/*.a" - ./commands/ - "./src/*.o" - ./src/misc/version.inc expire_in: 45 minutes Build_GPU: stage: build script: - sudo -E -u ibi CUDA=1 CUDA_LIB=lib64 GPUARCH_FLAGS="-arch sm_35 -Wno-deprecated-gpu-targets" WERROR=1 make all artifacts: paths: - bart - "./lib/*.a" - ./commands/ - "./src/*.o" - ./src/misc/version.inc expire_in: 45 minutes Build_MPI_GPU: stage: build script: - sudo -E -u ibi MPI=1 CUDA=1 CUDA_LIB=lib64 GPUARCH_FLAGS="-arch sm_35 -Wno-deprecated-gpu-targets" make all artifacts: paths: - bart - "./lib/*.a" - ./commands/ - "./src/*.o" - ./src/misc/version.inc expire_in: 45 minutes Build_MPI: stage: build script: - sudo -E -u ibi MPI=1 make all artifacts: paths: - bart - "./lib/*.a" - ./commands/ - "./src/*.o" - ./src/misc/version.inc expire_in: 45 minutes Build_LTO: stage: build script: - sudo -E -u ibi CFLAGS="-flto -O2 -g" DEBUG=0 DEBUG_DWARF=0 make artifacts: paths: - bart - "./lib/*.a" - "./src/*.o" - ./src/misc/version.inc expire_in: 45 minutes Build_Analyzer: stage: build script: - sudo -E -u ibi CFLAGS="-fanalyzer -Werror" DEBUG=0 DEBUG_DWARF=0 make all artifacts: paths: - bart - "./lib/*.a" - ./commands/ expire_in: 45 minutes allow_failure: true Build_Tensorflow: stage: build script: # Already part of the Docker image # - wget --no-verbose https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-linux-x86_64-2.4.0.tar.gz # - mkdir tensorflow_dir && tar -C tensorflow_dir -xvzf libtensorflow-gpu-linux-x86_64-2.4.0.tar.gz - sudo -E -u ibi TENSORFLOW=1 TENSORFLOW_BASE=/tensorflow_dir/ CUDA=1 CUDA_LIB=lib64 GPUARCH_FLAGS="-arch sm_35 -Wno-deprecated-gpu-targets" make all artifacts: paths: - bart - "./lib/*.a" - ./commands/ - "./src/*.o" - ./src/misc/version.inc expire_in: 45 minutes UTest: stage: test1 script: - sudo -E -u ibi AUTOCLEAN=0 WERROR=1 make utest needs: [Build] dependencies: - Build UTest_riscv: stage: test1 tags: - riscv image: registry.gitlab.tugraz.at/ibi/reproducibility/reproducibility_testing/ibi_riscv script: - sudo -E -u ibi AUTOCLEAN=0 make -j 4 utest needs: [Build_riscv] dependencies: - Build_riscv UTest_Clang: stage: test1 script: - sudo -E -u ibi AUTOCLEAN=0 CC=clang-16 make utest needs: [Build_Clang] dependencies: - Build_Clang UTest_Clang_GPU: stage: test1 script: - if ! nvidia-smi ; then printf "No usable GPU found, skipping GPU tests!\n"; exit 0; fi - sudo -E -u ibi AUTOCLEAN=0 CC=clang-16 CUDA_CC=clang-14 CUDA=1 CUDA_LIB=lib64 GPUARCH_FLAGS="-arch sm_35 -Wno-deprecated-gpu-targets" make utest_gpu needs: [Build_Clang_GPU] dependencies: - Build_Clang_GPU UTest_Valgrind: stage: test1 script: - sudo -E -u ibi AUTOCLEAN=0 UTESTLEAK=1 make utest needs: [Build] dependencies: - Build UTest_UBSan: stage: test1 script: - sudo -E -u ibi AUTOCLEAN=0 UBSAN=1 ASAN=1 make utest needs: [Build_UBSan] dependencies: - Build_UBSan UTest_GPU: stage: test1 script: - if ! nvidia-smi ; then printf "No usable GPU found, skipping GPU tests!\n"; exit 0; fi - sudo -E -u ibi AUTOCLEAN=0 CUDA=1 CUDA_LIB=lib64 GPUARCH_FLAGS="-arch sm_35 -Wno-deprecated-gpu-targets" WERROR=1 make utest_gpu needs: [Build_GPU] dependencies: - Build_GPU UTest_MPI: stage: test1 script: - sudo -E -u ibi AUTOCLEAN=0 MPI=1 make utest needs: [Build_MPI] dependencies: - Build_MPI UTest_Tensorflow: stage: test1 script: # Already part of the Docker image # - wget --no-verbose https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-linux-x86_64-2.4.0.tar.gz # - mkdir tensorflow_dir && tar -C tensorflow_dir -xvzf libtensorflow-gpu-linux-x86_64-2.4.0.tar.gz - sudo -E -u ibi AUTOCLEAN=0 TENSORFLOW=1 TENSORFLOW_BASE=/tensorflow_dir/ CUDA=1 CUDA_LIB=lib64 GPUARCH_FLAGS="-arch sm_35 -Wno-deprecated-gpu-targets" make utest needs: [Build_Tensorflow] dependencies: - Build_Tensorflow UTest_LTO: stage: test1 script: - sudo -E -u ibi AUTOCLEAN=0 CFLAGS="-flto -O2 -g" DEBUG=0 DEBUG_DWARF=0 make utest needs: [Build_LTO] dependencies: - Build_LTO IntTest: stage: test2 script: - sudo -E -u ibi AUTOCLEAN=0 AGUE_TWIX_REF=/reference_data/twix/ WERROR=1 make test testslow testague needs: [Build] dependencies: - Build IntTest_riscv: stage: test2 tags: - riscv image: registry.gitlab.tugraz.at/ibi/reproducibility/reproducibility_testing/ibi_riscv script: - sudo -E -u ibi AUTOCLEAN=0 AGUE_TWIX_REF=/reference_data/twix/ make -j 4 test testague needs: [Build_riscv] dependencies: - Build_riscv IntTest_Python: stage: test2 script: - sudo -E -u ibi AUTOCLEAN=0 make pythontest needs: [Build] dependencies: - Build allow_failure: true IntTest_GPU: stage: test2 script: - if ! nvidia-smi ; then printf "No usable GPU found, skipping GPU tests!\n"; exit 0; fi - sudo -E -u ibi AUTOCLEAN=0 CUDA=1 CUDA_LIB=lib64 GPUARCH_FLAGS="-arch sm_35 -Wno-deprecated-gpu-targets" WERROR=1 make gputest needs: [Build_GPU] dependencies: - Build_GPU IntTest_Clang: stage: test2 script: - sudo -E -u ibi AUTOCLEAN=0 CC=clang-16 make test needs: [Build_Clang] dependencies: - Build_Clang IntTest_Clang_GPU: stage: test2 script: - if ! nvidia-smi ; then printf "No usable GPU found, skipping GPU tests!\n"; exit 0; fi - sudo -E -u ibi AUTOCLEAN=0 CC=clang-16 CUDA_CC=clang-14 CUDA=1 CUDA_LIB=lib64 GPUARCH_FLAGS="-arch sm_35 -Wno-deprecated-gpu-targets" make gputest needs: [Build_Clang_GPU] dependencies: - Build_Clang_GPU IntTest_UBSan: stage: test2 script: - sudo -E -u ibi AUTOCLEAN=0 UBSAN=1 ASAN=1 DEBUG=1 UBSAN_OPTIONS=print_stacktrace=1 make test needs: [Build_UBSan] dependencies: - Build_UBSan IntTest_MPI: stage: test2 script: - sudo -E -u ibi AUTOCLEAN=0 MPI=1 make testslow needs: [Build_MPI] dependencies: - Build_MPI IntTest_MPI_GPU: stage: test2 script: - if ! nvidia-smi ; then printf "No usable GPU found, skipping GPU tests!\n"; exit 0; fi - sudo -E -u ibi AUTOCLEAN=0 CUDA=1 CUDA_LIB=lib64 GPUARCH_FLAGS="-arch sm_35 -Wno-deprecated-gpu-targets" MPI=1 make gputest needs: [Build_MPI_GPU] dependencies: - Build_MPI_GPU #IntTest_LTO: # stage: test2 # script: # - CFLAGS="-flto -O2 -g" DEBUG=0 DEBUG_DWARF=0 make test # needs: [Build_LTO] # dependencies: # - Build_LTO libbart-devel/ACKNOWLEDGEMENTS000066400000000000000000000017301463460177700160670ustar00rootroot00000000000000 We want to acknowledge the following persons for supporting BART by contributing source code, testing, feedback, data, bug reports, etc. (alphabetical) Marcus T. Alley Michael Anderson Jakob Asslaender Dara Bahri Yaël Balbastre Moritz Blumenthal Pim Borman Soumick Chatterjee Joseph Y. Cheng Nguyen Damien Sofia Dimoudi Philipp Ehses Alexander Fyrdahl Siddharth Iyer Hans Johnson Tamás Hakkel Martin Heide Christian Holme Yuxin Hu Gregory R. Lee Evan G. Levine Gilad Liberman Max Litster Tim Loderhose Michael Lustig Martin Krämer Sidharth Kumar Lyu Mengye Damien Nguyen Frank Ong Bernhard Rapp Melvin Robinson Volkert Roeloffs William A. Romero Sebastian Rosenzweig Philip Schaten Nick Scholand Jasper Schoormans Efrat Shimron David Smith Martin Strunz Jonathan I. Tamir Michelle Tamir (logo) Zhengguo Tan Johannes Töger Christian Tönnes Aurélien Trotier Martin Uecker Shreyas S. Vasanawala Sana Vaziri Marten Veldmann Patrick Virtue Xiaoqing Wang Simon Yeung Tao Zhang libbart-devel/LICENSE000066400000000000000000000031461463460177700146220ustar00rootroot00000000000000Copyright (c) 2013-2018. The Regents of the University of California. Copyright (c) 2013-2024. BART Developer Team and Contributors. Copyright (c) 2012. Intel Corporation. (src/lapacke/) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. libbart-devel/Makefile000066400000000000000000000541261463460177700152610ustar00rootroot00000000000000# Copyright 2013-2015. The Regents of the University of California. # Copyright 2015-2022. Martin Uecker # Copyright 2022-2024. Institute of Biomedical Imaging, TU Graz. # All rights reserved. Use of this source code is governed by # a BSD-style license which can be found in the LICENSE file. # silent make #MAKEFLAGS += --silent # auto clean on makefile updates AUTOCLEAN?=1 # clear out all implicit rules MAKEFLAGS += --no-builtin-rules # clear out some variables by hand, as we cannot use -R, --no-builtin-variables without recursive make # but only undefine them if they come from their default values define undef_builtin ifeq ($(origin $(1)),default) undefine $(1) endif endef $(eval $(foreach VAR,CC CXX CPP LD ARFLAGS ,$(eval $(call undef_builtin,$(VAR))))) # use for parallel make AR=./ar_lock.sh # Paths here = $(realpath $(dir $(lastword $(MAKEFILE_LIST)))) root := $(here) srcdir = $(root)/src libdir = $(root)/lib bindir = $(root)/bin export LOCKDIR?=${libdir} export BART_TOOLBOX_PATH=$(root) MAKEFILES = $(wildcard $(root)/Makefiles/Makefile.*) ALLMAKEFILES = $(root)/Makefile $(wildcard $(root)/Makefile.* $(root)/*.mk $(root)/rules/*.mk $(root)/Makefiles/Makefile.*) -include Makefile.$(NNAME) -include Makefile.local -include $(MAKEFILES) # some operations might still be non deterministic NON_DETERMINISTIC?=0 # allow blas calls within omp regions (fails on Debian 9, openblas) BLAS_THREADSAFE?=0 # use for ppc64le HPC MPI?=0 OPENBLAS?=0 MKL?=0 CUDA?=0 CUDNN?=0 ACML?=0 OMP?=1 SLINK?=0 DEBUG?=0 UBSAN?=0 ASAN?=0 FFTWTHREADS?=1 SCALAPACK?=0 ISMRMRD?=0 TENSORFLOW?=0 NOEXEC_STACK?=0 PARALLEL?=0 PARALLEL_NJOBS?= FORTRAN?=1 PNG?=1 DEBUG_DWARF?=0 WERROR?=0 LOG_BACKEND?=0 LOG_SIEMENS_BACKEND?=0 LOG_ORCHESTRA_BACKEND?=0 LOG_GADGETRON_BACKEND?=0 DESTDIR ?= / PREFIX ?= usr/local/ BUILDTYPE = Linux UNAME = $(shell uname -s) MNAME = $(shell uname -m) NNAME = $(shell uname -n) MYLINK=ln ifeq ($(UNAME),Darwin) BUILDTYPE = MacOSX MYLINK = ln -s endif ifeq ($(BUILDTYPE), MacOSX) MACPORTS ?= 1 endif ifeq ($(BUILDTYPE), Linux) # as the defaults changed on most Linux distributions # explicitly specify non-deterministic archives to not break make ARFLAGS ?= rsU else ARFLAGS ?= rs endif ifeq ($(UNAME),Cygwin) BUILDTYPE = Cygwin NOLAPACKE ?= 1 endif ifeq ($(UNAME),CYGWIN_NT-10.0) BUILDTYPE = Cygwin NOLAPACKE ?= 1 endif ifneq (,$(findstring MSYS,$(UNAME))) BUILDTYPE = MSYS #LDFLAGS += -lucrtbase # support for %F, %T formatting codes in strftime() #LDFLAGS += -static-libgomp NOLAPACKE ?= 1 SLINK = 1 endif # Automatic dependency generation DEPFILE = $(*D)/.$(*F).d DEPFLAG = -MMD -MF $(DEPFILE) ALLDEPS = $(shell find $(srcdir) utests -name ".*.d") # Compilation flags ifeq ($(DEBUG_DWARF),1) DEBUG=1 endif ifneq ($(DEBUG),1) OPT = -O2 else OPT = -Og endif #OPT += -ffp-contract=off CPPFLAGS ?= -Wall -Wextra CFLAGS ?= $(OPT) -Wmissing-prototypes -Wincompatible-pointer-types -Wsign-conversion CXXFLAGS ?= $(OPT) ifeq ($(BUILDTYPE), MacOSX) CC ?= gcc-mp-12 else CC ?= gcc ifneq ($(BUILDTYPE), MSYS) # for symbols in backtraces LDFLAGS += -rdynamic endif endif # for debug backtraces ifeq ($(DEBUG_DWARF),1) LIBS += -ldw -lunwind CPPFLAGS += -DUSE_DWARF endif ifeq ($(WERROR),1) CFLAGS += -Werror endif ifeq ($(MNAME),riscv64) CFLAGS+=-ffp-contract=off endif # openblas ifeq ($(BUILDTYPE), MSYS) BLAS_BASE ?= /mingw64/include/OpenBLAS/ else ifneq ($(BUILDTYPE), MacOSX) BLAS_BASE ?= /usr/ else ifeq ($(MACPORTS),1) BLAS_BASE ?= /opt/local/ CPPFLAGS += -DUSE_MACPORTS endif BLAS_BASE ?= /usr/local/opt/openblas/ endif endif ifeq ($(BUILDTYPE), Linux) ifneq ($(OPENBLAS), 1) ifneq (,$(findstring Red Hat,$(shell gcc --version))) CPPFLAGS+=-I/usr/include/lapacke/ LDFLAGS+=-L/usr/lib64/atlas -ltatlas endif endif endif # cuda CUDA_BASE ?= /usr/ CUDA_LIB ?= lib CUDNN_BASE ?= $(CUDA_BASE) CUDNN_LIB ?= lib64 # tensorflow TENSORFLOW_BASE ?= /usr/local/ # acml ACML_BASE ?= /usr/local/acml/acml4.4.0/gfortran64_mp/ # mkl MKL_BASE ?= /opt/intel/mkl/lib/intel64/ # fftw ifneq ($(BUILDTYPE), MacOSX) FFTW_BASE ?= /usr/ else FFTW_BASE ?= /opt/local/ endif # Matlab MATLAB_BASE ?= /usr/local/matlab/ # ISMRM ISMRM_BASE ?= /usr/local/ismrmrd/ # Main build targets # TBASE=show slice crop resize join transpose squeeze flatten zeros ones flip circshift extract repmat bitmask reshape version delta copy casorati vec poly index multicfl TFLP=scale invert conj fmac saxpy sdot spow cpyphs creal carg normalize cdf97 pattern nrmse mip avg cabs zexp calc TNUM=fft fftmod fftshift noise bench threshold conv rss filter nlmeans mandelbrot wavelet window var std fftrot roistat pol2mask conway morphop TRECO=pics pocsense sqpics itsense nlinv moba nufft nufftbase rof tgv ictv sake wave lrmatrix estdims estshift estdelay wavepsf wshfl rtnlinv mobafit grog TCALIB=ecalib ecaltwo caldir walsh cc ccapply rovir calmat svd estvar whiten rmfreq ssa bin psf TMRI=homodyne poisson twixread fakeksp looklocker upat fovshift TSIM=phantom traj signal epg sim TIO=toimg TNN=reconet nnet onehotenc measure mnist tensorflow nlinvnet MODULES = -lnum -lmisc -lnum -lmisc ifeq ($(BUILDTYPE), MSYS) MODULES += -lwin endif MODULES_pics = -lgrecon -lsense -liter -llinops -lwavelet -llowrank -lnoncart -lnn -lnlops MODULES_sqpics = -lsense -liter -llinops -lwavelet -llowrank -lnoncart -llinops MODULES_pocsense = -lsense -liter -llinops -lwavelet MODULES_nlinv = -lnoir -liter -lnlops -llinops -lnoncart MODULES_rtnlinv = -lnoir -liter -lnlops -llinops -lnoncart MODULES_moba = -lmoba -lnoir -lnn -lnlops -llinops -lwavelet -lnoncart -lsimu -lgrecon -llowrank -llinops -liter -lnn MODULES_mobafit = -lmoba -lnlops -llinops -lsimu -liter -lnoir MODULES_bpsense = -lsense -lnoncart -liter -llinops -lwavelet MODULES_itsense = -liter -llinops MODULES_ecalib = -lcalib -llinops MODULES_ecaltwo = -lcalib -llinops MODULES_estdelay = -lcalib MODULES_caldir = -lcalib MODULES_walsh = -lcalib MODULES_calmat = -lcalib MODULES_cc = -lcalib -llinops MODULES_ccapply = -lcalib -llinops MODULES_estvar = -lcalib MODULES_nufft = -lnoncart -liter -llinops MODULES_rof = -liter -llinops MODULES_tgv = -liter -llinops MODULES_ictv = -liter -llinops MODULES_bench = -lwavelet -llinops MODULES_phantom = -lsimu -lgeom MODULES_bart = -lbox -lgrecon -lsense -lnoir -liter -llinops -lwavelet -llowrank -lnoncart -lcalib -lsimu -lsake -lnlops -lnetworks -lnoir -lnn -liter -lmoba -lgeom -lnn -lnlops MODULES_sake = -lsake MODULES_traj = -lnoncart MODULES_wave = -liter -lwavelet -llinops -llowrank MODULES_threshold = -llowrank -liter -llinops -lwavelet MODULES_fakeksp = -lsense -llinops MODULES_lrmatrix = -llowrank -liter -llinops -lnlops MODULES_estdims = MODULES_ismrmrd = -lismrm MODULES_wavelet = -llinops -lwavelet MODULES_wshfl = -lgrecon -lsense -liter -llinops -lwavelet -llowrank -lnoncart -lnlops -lnn -lnlops MODULES_ssa = -lcalib MODULES_bin = -lcalib MODULES_signal = -lsimu MODULES_pol2mask = -lgeom MODULES_epg = -lsimu MODULES_reconet = -lgrecon -lnetworks -lnoncart -lnn -lnlops -llinops -liter MODULES_mnist = -lnetworks -lnn -lnlops -llinops -liter MODULES_nnet = -lgrecon -lnetworks -lnoncart -lnn -lnlops -llinops -liter MODULES_tensorflow = -lnn -lnlops -llinops -liter MODULES_measure = -lgrecon -lnetworks -lnoncart -lnn -lnlops -llinops -liter MODULES_onehotenc = -lnn MODULES_sim = -lsimu MODULES_morphop = -lnlops -llinops -lgeom MODULES_psf = -lnoncart -llinops MODULES_nlinvnet = -lnetworks -lnoir -liter -lnn -lnlops -llinops -lnoncart -lgrecon -lnetworks -lsense -liter -llinops -lwavelet -llowrank -lnoncart -lnlops -lnn MODULES_grog = -lcalib GCCVERSION11 := $(shell expr `$(CC) -dumpversion | cut -f1 -d.` \>= 11) GCCVERSION14 := $(shell expr `$(CC) -dumpversion | cut -f1 -d.` \>= 14) # clang ifeq ($(findstring clang,$(CC)),clang) CFLAGS += -fblocks LDFLAGS += -lBlocksRuntime ifeq ($(DEBUG_DWARF),1) CFLAGS += -gdwarf -gdwarf-aranges endif # Make complains if $(error ...) is indented by tab: ifeq ($(MPI),1) $(error ERROR MPI is not support with clang, please compile with gcc) endif else # only add if not clang, as it doesn't understand this: ifeq ($(GCCVERSION14), 1) CFLAGS += -Wuseless-cast -Wno-c23-extensions -Wjump-misses-init else ifeq ($(GCCVERSION11), 1) CFLAGS += -Wno-vla-parameter -Wno-nonnull -Wno-maybe-uninitialized endif endif endif CXX ?= g++ LINKER ?= $(CC) ifeq ($(ISMRMRD),1) TMRI += ismrmrd MODULES_bart += -lismrm endif ifeq ($(NOLAPACKE),1) CPPFLAGS += -DNOLAPACKE MODULES += -llapacke endif ifeq ($(TENSORFLOW),1) CPPFLAGS += -DTENSORFLOW -I$(TENSORFLOW_BASE)/include LIBS += -L$(TENSORFLOW_BASE)/lib -Wl,-rpath $(TENSORFLOW_BASE)/lib -ltensorflow_framework -ltensorflow endif XTARGETS += $(TBASE) $(TFLP) $(TNUM) $(TIO) $(TRECO) $(TCALIB) $(TMRI) $(TSIM) $(TNN) XTARGETS:=$(sort $(XTARGETS)) # CTARGETS: command targets, that are in the commands/ subdir CTARGETS = $(addprefix commands/, $(XTARGETS)) ifeq ($(DEBUG),1) CPPFLAGS += -g CFLAGS += -g NVCCFLAGS += -g endif ifeq ($(UBSAN),1) CFLAGS += -fsanitize=undefined,bounds-strict -fno-sanitize-recover=all ifeq ($(DEBUG),0) CFLAGS += -fsanitize-undefined-trap-on-error endif endif ifeq ($(ASAN),1) CFLAGS += -fsanitize=address endif ifeq ($(NOEXEC_STACK),1) CPPFLAGS += -DNOEXEC_STACK endif ifeq ($(PARALLEL),1) MAKEFLAGS += -j$(PARALLEL_NJOBS) endif CPPFLAGS += $(DEPFLAG) -iquote $(srcdir)/ CFLAGS += -std=gnu11 CXXFLAGS += -std=c++14 default: bart .gitignore -include $(ALLDEPS) # cuda NVCC?=$(CUDA_BASE)/bin/nvcc ifeq ($(CUDA),1) CUDA_H := -I$(CUDA_BASE)/include CPPFLAGS += -DUSE_CUDA $(CUDA_H) ifeq ($(CUDNN),1) CUDNN_H := -I$(CUDNN_BASE)/include CPPFLAGS += -DUSE_CUDNN $(CUDNN_H) endif ifeq ($(BUILDTYPE), MacOSX) CUDA_L := -L$(CUDA_BASE)/$(CUDA_LIB) -lcufft -lcudart -lcublas -m64 -lstdc++ else ifeq ($(CUDNN),1) CUDA_L := -L$(CUDA_BASE)/$(CUDA_LIB) -L$(CUDNN_BASE)/$(CUDNN_LIB) -lcudnn -lcufft -lcudart -lcublas -lstdc++ -Wl,-rpath $(CUDA_BASE)/$(CUDA_LIB) else CUDA_L := -L$(CUDA_BASE)/$(CUDA_LIB) -lcufft -lcudart -lcublas -lstdc++ -Wl,-rpath $(CUDA_BASE)/$(CUDA_LIB) endif endif else CUDA_H := CUDA_L := endif # sm_20 no longer supported in CUDA 9 GPUARCH_FLAGS ?= CUDA_CC ?= $(CC) NVCCFLAGS += -DUSE_CUDA -Xcompiler -fPIC -O2 $(GPUARCH_FLAGS) -I$(srcdir)/ -m64 -ccbin $(CUDA_CC) #NVCCFLAGS = -Xcompiler -fPIC -Xcompiler -fopenmp -O2 -I$(srcdir)/ %.o: %.cu $(NVCC) $(NVCCFLAGS) -c $^ -o $@ $(NVCC) $(NVCCFLAGS) -M $^ -o $(DEPFILE) # OpenMP ifeq ($(OMP),1) ifneq ($(BUILDTYPE), MacOSX) CFLAGS += -fopenmp CXXFLAGS += -fopenmp NVCCFLAGS += -Xcompiler -fopenmp else LDFLAGS += "-L/usr/local/opt/libomp/lib" -lomp CPPFLAGS += "-I/usr/local/opt/libomp/include" -Xclang -fopenmp endif else CFLAGS += -Wno-unknown-pragmas CXXFLAGS += -Wno-unknown-pragmas endif # Message Passing Interface ifeq ($(MPI),1) CFLAGS += -DUSE_MPI CC = mpicc endif # BLAS/LAPACK ifeq ($(SCALAPACK),1) BLAS_L := -lopenblas -lscalapack CPPFLAGS += -DUSE_OPENBLAS CFLAGS += -DUSE_OPENBLAS else ifeq ($(ACML),1) BLAS_H := -I$(ACML_BASE)/include BLAS_L := -L$(ACML_BASE)/lib -lgfortran -lacml_mp -Wl,-rpath $(ACML_BASE)/lib CPPFLAGS += -DUSE_ACML else ifeq ($(BUILDTYPE), MSYS) BLAS_H := -I$(BLAS_BASE) else BLAS_H := -I$(BLAS_BASE)/include endif ifeq ($(BUILDTYPE), MacOSX) BLAS_L := -L$(BLAS_BASE)/lib -lopenblas else ifeq ($(BUILDTYPE), MSYS) BLAS_L := -L/mingw64/lib -lopenblas else BLAS_L := -Wl,-rpath $(BLAS_BASE)/lib -L$(BLAS_BASE)/lib ifeq ($(NOLAPACKE),1) BLAS_L += -llapack -lblas CPPFLAGS += -Isrc/lapacke else ifeq ($(OPENBLAS), 1) ifeq ($(FORTRAN), 0) BLAS_L += -lopenblas else BLAS_L += -llapacke -lopenblas endif CPPFLAGS += -DUSE_OPENBLAS CFLAGS += -DUSE_OPENBLAS else BLAS_L += -llapacke -lblas endif endif endif endif endif endif ifeq ($(MKL),1) BLAS_H := -I$(MKL_BASE)/include BLAS_L := -L$(MKL_BASE)/lib/intel64 -lmkl_intel_lp64 -lmkl_gnu_thread -lmkl_core CPPFLAGS += -DUSE_MKL -DMKL_Complex8="complex float" -DMKL_Complex16="complex double" CFLAGS += -DUSE_MKL -DMKL_Complex8="complex float" -DMKL_Complex16="complex double" endif ifeq ($(BLAS_THREADSAFE),1) CPPFLAGS += -DBLAS_THREADSAFE CFLAGS += -DBLAS_THREADSAFE endif ifeq ($(NON_DETERMINISTIC),1) CPPFLAGS += -DNON_DETERMINISTIC CFLAGS += -DNON_DETERMINISTIC NVCCFLAGS += -DNON_DETERMINISTIC endif CPPFLAGS += $(FFTW_H) $(BLAS_H) # librt ifeq ($(BUILDTYPE), MacOSX) LIBRT := else LIBRT := -lrt endif # png ifeq ($(PNG), 0) PNG_L := CFLAGS += -DNO_PNG CPPFLAGS += -DNO_PNG else PNG_L := -lpng endif ifeq ($(SLINK),1) PNG_L += -lz ifeq ($(DEBUG_DWARF),1) LIBS += -lelf -lz -llzma -lbz2 endif endif ifeq ($(LINKER),icc) PNG_L += -lz endif # fftw FFTW_H := -I$(FFTW_BASE)/include/ FFTW_L := -Wl,-rpath $(FFTW_BASE)/lib -L$(FFTW_BASE)/lib -lfftw3f ifeq ($(FFTWTHREADS),1) ifneq ($(BUILDTYPE), MSYS) FFTW_L += -lfftw3f_threads CPPFLAGS += -DFFTWTHREADS endif endif # Matlab MATLAB_H := -I$(MATLAB_BASE)/extern/include MATLAB_L := -Wl,-rpath $(MATLAB_BASE)/bin/glnxa64 -L$(MATLAB_BASE)/bin/glnxa64 -lmat -lmx -lm -lstdc++ # ISMRM ifeq ($(ISMRMRD),1) ISMRM_H := -I$(ISMRM_BASE)/include ISMRM_L := -L$(ISMRM_BASE)/lib -lismrmrd ISMRM_H += -I /usr/include/hdf5/serial/ else ISMRM_H := ISMRM_L := endif # Logging backends ifeq ($(LOG_BACKEND),1) CPPFLAGS += -DUSE_LOG_BACKEND ifeq ($(LOG_SIEMENS_BACKEND),1) miscextracxxsrcs += $(srcdir)/misc/UTrace.cc endif ifeq ($(LOG_ORCHESTRA_BACKEND),1) miscextracxxsrcs += $(srcdir)/misc/Orchestra.cc endif endif ifeq ($(ISMRMRD),1) miscextracxxsrcs += $(srcdir)/ismrm/xml_wrapper.cc CPPFLAGS += $(ISMRM_H) LIBS += -lstdc++ endif # change for static linking ifeq ($(SLINK),1) ifeq ($(SCALAPACK),1) BLAS_L += -lgfortran -lquadmath else # work around fortran problems with static linking LDFLAGS += -static -Wl,--whole-archive -lpthread -Wl,--no-whole-archive -Wl,--allow-multiple-definition ifneq ($(BUILDTYPE), MSYS) LIBS += -lmvec BLAS_L += -llapack -lblas endif BLAS_L += -lgfortran -lquadmath endif endif # Modules .LIBPATTERNS := lib%.a vpath %.a lib vpath % commands/ boxextrasrcs := $(XTARGETS:%=src/%.c) define alib $(1)srcs := $(wildcard $(srcdir)/$(1)/*.c) $(1)cudasrcs := $(wildcard $(srcdir)/$(1)/*.cu) $(1)objs := $$($(1)srcs:.c=.o) $(1)objs += $$($(1)extrasrcs:.c=.o) $(1)objs += $$($(1)extracxxsrcs:.cc=.o) ifeq ($(CUDA),1) $(1)objs += $$($(1)cudasrcs:.cu=.o) endif .INTERMEDIATE: $$($(1)objs) lib/lib$(1).a: lib$(1).a($$($(1)objs)) endef ALIBS = misc num grecon sense noir iter linops wavelet lowrank noncart calib simu sake nlops moba lapacke box geom networks nn ifeq ($(ISMRMRD),1) ALIBS += ismrm endif ifeq ($(BUILDTYPE), MSYS) ALIBS += win endif $(eval $(foreach t,$(ALIBS),$(eval $(call alib,$(t))))) # additional rules for lib misc $(shell $(root)/rules/update_version.sh) $(srcdir)/misc/version.o: $(srcdir)/misc/version.inc # additional rules for lib ismrm lib/libismrm.a: CPPFLAGS += $(ISMRM_H) # additional rules for lib box lib/libbox.a: CPPFLAGS += -DMAIN_LIST="$(XTARGETS:%=%,) ()" -include src/main.h # lib calib UTARGETS += test_grog MODULES_test_grog += -lcalib -lnoncart -lsimu -lgeom # lib linop UTARGETS += test_linop_matrix test_linop test_padding MODULES_test_linop += -llinops MODULES_test_linop_matrix += -llinops MODULES_test_padding += -llinops # lib lowrank UTARGETS += test_batchsvd MODULES_test_batchsvd = -llowrank # lib misc UTARGETS += test_pattern test_types test_misc test_memcfl test_tree # lib moba UTARGETS += test_moba MODULES_test_moba += -lmoba -lnoir -llowrank -lwavelet -liter -lnlops -llinops -lsimu # lib nlop UTARGETS += test_nlop test_nlop_jacobian MODULES_test_nlop += -lnlops -lnoncart -llinops -liter MODULES_test_nlop_jacobian += -lnlops -llinops # lib noncart UTARGETS += test_nufft test_fib MODULES_test_nufft += -lnoncart -llinops MODULES_test_fib += -lnoncart # lib num UTARGETS += test_multind test_flpmath test_splines test_linalg test_polynom test_window test_conv test_ode test_nlmeans test_rand test_matexp UTARGETS += test_blas test_mdfft test_ops test_ops_p test_flpmath2 test_convcorr test_specfun test_qform test_fft test_gaussians ifeq ($(MPI),1) UTARGETS += test_mpi test_mpi_multind test_mpi_flpmath test_mpi_fft endif UTARGETS_GPU += test_cudafft test_cuda_flpmath test_cuda_flpmath2 test_cuda_gpukrnls test_cuda_convcorr test_cuda_multind test_cuda_shuffle test_cuda_memcache_clear test_cuda_rand # lib simu UTARGETS += test_ode_bloch test_ode_simu test_biot_savart test_signals test_epg test_pulse MODULES_test_ode_bloch += -lsimu MODULES_test_ode_simu += -lsimu MODULES_test_biot_savart += -lsimu MODULES_test_signals += -lsimu MODULES_test_epg += -lsimu MODULES_test_pulse += -lsimu # lib geom UTARGETS += test_geom MODULES_test_geom += -lgeom # lib iter UTARGETS += test_iter test_prox test_prox2 MODULES_test_iter += -liter -lnlops -llinops MODULES_test_prox += -liter -llinops MODULES_test_prox2 += -liter -llinops -lnlops # lib nn ifeq ($(TENSORFLOW),1) UTARGETS += test_nn_tf MODULES_test_nn_tf += -lnn -lnlops -llinops endif UTARGETS += test_nn_ops test_nn MODULES_test_nn_ops += -lnn -lnlops -llinops -liter MODULES_test_nn += -lnn -lnlops -llinops -liter .gitignore: .gitignore.main Makefile* @echo '# AUTOGENERATED. DO NOT EDIT. (are you looking for .gitignore.main ?)' > .gitignore cat .gitignore.main >> .gitignore @echo /bart >> .gitignore @echo $(patsubst %, /%, $(CTARGETS) $(UTARGETS) $(UTARGETS_GPU)) | tr ' ' '\n' >> .gitignore doc/commands.txt: bart ./rules/update_commands.sh ./bart doc/commands.txt $(XTARGETS) .PHONY: doxygen doxygen: makedoc.sh doxyconfig bart ./makedoc.sh all: .gitignore $(CTARGETS) bart # special targets $(CTARGETS): CPPFLAGS += -DMAIN_LIST="$(XTARGETS:%=%,) ()" -include src/main.h bart: CPPFLAGS += -DMAIN_LIST="$(XTARGETS:%=%,) ()" -include src/main.h mat2cfl: $(srcdir)/mat2cfl.c -lnum -lmisc $(CC) $(CFLAGS) $(MATLAB_H) -omat2cfl $+ $(MATLAB_L) $(CUDA_L) # implicit rules %.o: %.c $(CC) $(CPPFLAGS) $(CFLAGS) -c -o $@ $< %.o: %.cc $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c -o $@ $< ifeq ($(PARALLEL),1) (%): % $(AR) $(ARFLAGS) $@ $% else (%): % $(AR) $(ARFLAGS) $@ $% endif .SECONDEXPANSION: $(CTARGETS): commands/% : src/main.c $(srcdir)/%.o $$(MODULES_%) $(MODULES) $(LINKER) $(LDFLAGS) $(CFLAGS) $(CPPFLAGS) -Dmain_real=main_$(@F) -o $@ $+ $(FFTW_L) $(CUDA_L) $(BLAS_L) $(PNG_L) $(ISMRM_L) $(LIBS) -lm $(LIBRT) .SECONDEXPANSION: bart: % : src/main.c $(srcdir)/%.o $$(MODULES_%) $(MODULES) ifeq ($(SHARED),1) $(LINKER) $(LDFLAGS) -shared $(CFLAGS) $(CPPFLAGS) -Dmain_real=main_$@ -o bart.o $+ $(FFTW_L) $(CUDA_L) $(BLAS_L) $(PNG_L) $(ISMRM_L) $(LIBS) -lm $(LIBRT) else $(LINKER) $(LDFLAGS) $(CFLAGS) $(CPPFLAGS) -Dmain_real=main_$(@F) -o $@ $+ $(FFTW_L) $(CUDA_L) $(BLAS_L) $(PNG_L) $(ISMRM_L) $(LIBS) -lm $(LIBRT) endif # rm $(srcdir)/$@.o UTESTS=$(shell $(root)/utests/utests-collect.sh ./utests/$@.c) .SECONDEXPANSION: $(UTARGETS): % : utests/utest.c utests/%.o $$(MODULES_%) $(MODULES) $(CC) $(LDFLAGS) $(CFLAGS) $(CPPFLAGS) -DUTESTS="$(UTESTS)" -o $@ $+ $(FFTW_L) $(CUDA_L) $(BLAS_L) $(LIBS) -lm $(LIBRT) UTESTS_GPU=$(shell $(root)/utests/utests_gpu-collect.sh ./utests/$@.c) .SECONDEXPANSION: $(UTARGETS_GPU): % : utests/utest.c utests/%.o $$(MODULES_%) $(MODULES) $(CC) $(LDFLAGS) $(CFLAGS) $(CPPFLAGS) -DUTESTS="$(UTESTS_GPU)" -DUTEST_GPU -o $@ $+ $(FFTW_L) $(CUDA_L) $(BLAS_L) $(LIBS) -lm $(LIBRT) # linker script version - does not work on MacOS X # $(CC) $(LDFLAGS) -Wl,-Tutests/utests.ld $(CFLAGS) -o $@ $+ $(FFTW_L) $(CUDA_L) $(BLAS_L) -lm -rt # automatic tests # system tests ROOTDIR=$(root) TOOLDIR=$(root)/commands TESTS_DIR=$(root)/tests TESTS_TMP=$(TESTS_DIR)/tmp/$$$$ TESTS_OUT=$(TESTS_DIR)/out include $(root)/tests/*.mk ifeq ($(BUILDTYPE), MSYS) TMP_TESTS := $(TESTS) NOT_SUPPORTED=tests/test-io tests/test-io2 tests/test-join-append tests/test-join-append-one tests/test-whiten TESTS = $(filter-out $(NOT_SUPPORTED),$(TMP_TESTS)) endif test: ${TESTS} testslow: ${TESTS_SLOW} testague: ${TESTS_AGUE} # test importing *.dat-files specified in tests/twixread.mk gputest: ${TESTS_GPU} pythontest: ${TESTS_PYTHON} # unit tests UTEST_RUN= ifeq ($(MPI),1) # only cfl files allowed with MPI UTARGETS:=$(filter-out test_memcfl ,$(UTARGETS)) UTEST_RUN=mpirun -n 3 endif ifeq ($(UTESTLEAK),1) # we blacklist some targets because valgrind crashes (blas related) UTARGETS:=$(filter-out test_flpmath test_blas,$(UTARGETS)) UTEST_RUN=valgrind --quiet --leak-check=full --error-exitcode=1 valgrind --suppressions=./valgrind.supp --log-file=/dev/null endif .PHONY: utests-all utest utests_gpu-all utest_gpu utests-all: $(UTARGETS) ./utests/utests_run.sh "CPU" "$(UTEST_RUN)" $(UTARGETS) utest: utests-all @echo ALL CPU UNIT TESTS PASSED. utests_gpu-all: $(UTARGETS_GPU) ./utests/utests_run.sh "GPU" "$(UTEST_RUN)" $(UTARGETS_GPU) utest_gpu: utests_gpu-all @echo ALL GPU UNIT TESTS PASSED. .PHONY: clean clean: rm -f `find $(srcdir) -name "*.o"` rm -f $(root)/utests/*.o rm -f $(patsubst %, %, $(UTARGETS)) rm -f $(patsubst %, %, $(UTARGETS_GPU)) rm -f $(libdir)/.*.lock .PHONY: allclean allclean: clean rm -f $(libdir)/*.a $(ALLDEPS) rm -f bart rm -f $(patsubst commands/%, %, $(CTARGETS)) rm -f $(CTARGETS) rm -f $(srcdir)/misc/version.inc rm -rf $(root)/tests/tmp/*/ rm -rf $(root)/stests/tmp/*/ rm -rf $(root)/doc/dx rm -f $(root)/doc/commands.txt rm -f $(root)/save/fftw/*.fftw rm -f $(root)/save/nsv/*.dat touch isclean .PHONY: distclean distclean: allclean -include isclean isclean: $(ALLMAKEFILES) ifeq ($(AUTOCLEAN),1) @echo "CONFIGURATION MODIFIED. RUNNING FULL REBUILD." touch isclean $(MAKE) allclean || rm isclean else ifneq ($(MAKECMDGOALS),allclean) @echo "CONFIGURATION MODIFIED." endif endif # shared library .PHONY: shared-lib shared-lib: make allclean CFLAGS="-fPIC $(OPT) -Wmissing-prototypes" make gcc -shared -fopenmp -o libbart.so src/bart.o -Wl,-whole-archive lib/lib*.a -Wl,-no-whole-archive -Wl,-Bdynamic $(FFTW_L) $(CUDA_L) $(BLAS_L) $(PNG_L) $(ISMRM_L) $(LIBS) -lm -lrt make allclean libbart.so: shared-lib .PHONY: install install: bart install -d $(DESTDIR)/$(PREFIX)/bin/ install bart $(DESTDIR)/$(PREFIX)/bin/ install -d $(DESTDIR)/$(PREFIX)/share/doc/bart/ install $(root)/doc/*.txt $(root)/README $(DESTDIR)/$(PREFIX)/share/doc/bart/ install -d $(DESTDIR)/$(PREFIX)/lib/bart/commands/ # generate release tar balls (identical to github) %.tar.gz: git archive --prefix=bart-$(patsubst bart-%.tar.gz,%,$@)/ -o $@ v$(patsubst bart-%.tar.gz,%,$@) # symbol table bart.syms: bart rules/make_symbol_table.sh bart bart.syms libbart-devel/Makefiles/000077500000000000000000000000001463460177700155115ustar00rootroot00000000000000libbart-devel/Makefiles/README.md000066400000000000000000000017251463460177700167750ustar00rootroot00000000000000### Custom Makefiles Put custom Makefiles here, to be included in the standard Makefile. The build will automatically include the following files in this directory matching the expansion `Makefile.*` Example custom Makefile for modifying build: ```bash ## Makefile.local # Makefile for my local build DEBUG = 1 # Parallel make PARALLEL ?= 1 # GPU CUDA=0 CC=clang OMP=0 # Paths FFTW_BASE := /opt/local/ MATLAB_BASE := /Applications/MATLAB_R2016a.app CUDA_BASE = /usr/local/cuda/ BLAS_BASE := /opt/local ``` Example Makefile and library rules for adding a custom program: ```bash ## Makefiles/Makefile.sum # Compile my custom program, src/sum.c, which relies on # my custom library, lib/libsum.a MODULES_sum = -lsum MODULES_bart += -lsum XTARGETS += sum ``` ```bash ### rules/sum.mk # Build my custom library with files under src/sum/ sumsrcs := $(wildcard $(srcdir)/sum/*.c) sumobjs := $(sumsrcs:.c=.o) .INTERMEDIATE: $(sumobjs) lib/libsum.a: libsum.a($(sumobjs)) ``` libbart-devel/README000066400000000000000000000342411463460177700144750ustar00rootroot00000000000000 0. License ========== See LICENSE file for licensing information. ------------------------------------------------------------------------------- The tools in this software implement various reconstruction algorithms for Magnetic Resonance Imaging. The software is intended for research use only and NOT FOR DIAGNOSTIC USE. It comes without any warranty (see LICENSE for details). Please cite the corresponding articles when using these tools. Some references can be found at the end of this file. The source code might provide more detailed references, e.g. for specific iterative algorithms. 1. Help ======= Please direct all questions or comments to the public mailing list: mrirecon@lists.eecs.berkeley.edu https://lists.eecs.berkeley.edu/sympa/info/mrirecon Note: This list has a public archive! Please do not send any confidential information. Updates and further information can be found here: http://mrirecon.github.io/bart/ 2. Installation =============== 2.1. Packages ------------- The easiest way to install BART on Debian or Ubuntu is to install the BART package. $ sudo apt-get install bart bart-view You can also try the package built with CUDA support: $ sudo apt-get install bart-cuda bart-view Note, if you need Matlab / Python integration, want to use BART on Mac, Windows, or other Linux distributions, or if you need a very recent version or BART, you may need to compile BART yourself. Please see then follow the instructions below. The recommended way to use BART on Windows is with the Windows Subsystem for Linux (WSL2) which is available for Windows 10. Once you have installed WSL2 you can follow the steps for Linux given above to install the BART packages, or compile it yourself as described below. 2.2. Prerequisites ------------------ GCC compiler, the FFTW library, and optionally CUDA. (see recon/Makefile to turn options on or off) The minimum GCC supported is 11. It should also be possible to use the clang compiler. The software can be used in combination with Matlab or octave. There is limited support for reading Cartesian data encoded with the ISMRM Raw Data format when linking with the ISMRMRD library (http://ismrmrd.sourceforge.net/). In the following, the symbol '`$`' indicates a shell prompt. Do not type '`$`' when entering commands. For more build information, check docs/building.txt ### 2.1.1. Linux The software tools should run on any recent Linux distribution. To install the required libraries on Debian and Ubuntu run: $ sudo apt-get install gcc make libfftw3-dev liblapacke-dev libpng-dev libopenblas-dev gfortran (optional) $ sudo apt-get install octave (optional) install version 0.5.2 of the ISMRMRD library To install the required libraries on Redhat / Centos run: $ sudo yum install atlas-devel fftw3-devel libpng-devel lapack-devel It may be required to install support for software collections (for Centos): $ sudo yum install centos-release-scl devtoolset-8 To enable gcc 8 and start a bash shell run: $ scl enable devtoolset-8 bash ### 2.1.2. Mac OS X BART is supported on Intel-based and ARM-based Macs. Xcode is also required. For ARM-based Macs, it is recommended to use gcc12 or higher. Using MacPorts (http://www.macports.org/): $ sudo port install fftw-3-single $ sudo port install gcc12 $ sudo port install libpng $ sudo port install openblas $ sudo port install flock $ sudo port install gmake (optional) $ sudo port install octave (optional) install version 0.5.2 of the ISMRMRD library Use gmake when building and select gcc as the compiler: $ CC=gcc-mp-12 gmake Using HomeBrew (https://brew.sh): $ brew install --cask gcc-arm-embedded $ brew install libpng $ brew install fftw $ brew install openblas $ brew install gmake $ brew install llvm libomp Use gmake when building, select gcc as the compiler, and turn off the default setting for MACPORTS: $ CC=gcc MACPORTS=0 gmake ### 2.2.3 Windows BART is supported through WSL 2 which is avaialable on Windows 10. The instructions are similar to installing on Linux. Step-by-step instructions are available on the website at: https://mrirecon.github.io/bart/installation_windows.html First follow the instructions for Debian/Ubuntu in Section 2.1.1 to install reqiured libraries. Next follow the instructions in Section 2.2 to download and compile bart 2.2.3.1. Using BART with Matlab outside of WSL To use BART outside of WSL, e.g. with Matlab, it is recommended to soft link the bart binary to /usr/local/bin. Assuming bart is installed in the ${BART_TOOLBOX_PATH} directory, execute the following command in WSL: $ sudo ln -s ${BART_TOOLBOX_PATH}/bart /usr/local/bin/bart Outside of WSL, copy the files from ${BART_TOOLBOX_PATH}/matlab to a local folder and add the folder to the Matlab path. 2.2. Downloading and Compilation -------------------------------- If you are a git user, you can simply clone our public repository: $ git clone https://github.com/mrirecon/bart Otherwise, please download the latest version as a zip file from Github: http://github.com/mrirecon/bart/releases/latest and unpack it somewhere on your computer. Open a terminal window and enter the bart directory (the top-level directory with the Makefile in it). To build the reconstruction tools type: $ make If you have installed the ISMRMRD library version 0.5.2, you can also build the ISMRM raw data import tool: $ make ismrmrd 2.3. Getting Started -------------------- ### 2.3.1. Organization . main directory / built software tools Makefile makefile Makefiles/ directory for custom makefiles matlab/ Matlab helper scripts python/ Python helper functions doc/ documentation pkg/ packaging for Fedora rules/ more built-related files scripts/ various helper scripts and examples src/ source code src/calib/ source code for sensitivity calibration src/sense/ source code for SENSE or ESPIRiT reconstruction src/noir/ source code for nonlinear inversion src/sake/ source code for SAKE reconstruction src/moba/ source code for model-based reconstruction src/networks/ source code for neural networks src/wavelet/ source code for wavelets src/dfwavelet/ source code for divergence-free wavelets src/lowrank/ source code for low-rank regularization src/simu/ source code for MRI simulation src/noncart/ source code for non-uniform FFT src/iter/ library of iterative algorithms src/linops/ library of linear operators src/nlops/ library of nonlinear operators src/nn/ library for neural networks src/geom/ library for geometric computations src/num/ base library with numerical functions src/misc/ miscellaneous (e.g. I/O) src/lapacke/ copy of a part of LAPACKE src/grecon/ helper functions for generic reconstructions src/ismrm/ support for ISMRM raw data format src/python/ support for Python src/win/ support for Windows tests/ system tests utests/ unit tests lib/ built software libraries ### 2.3.2. Terminal When using the toolbox commands from a UNIX shell, it is recommended to set the BART_TOOLBOX_PATH to the base directory and to add it to the PATH variable. You can do this by running the following command: $ . startup.sh Note: The dot or 'source' command is needed so that the variables are imported into the current shell. ### 2.3.3. Matlab You can set the BART_TOOLBOX_PATH to the base directory and to add it to the Matlab path by running the following command in the bart directory: >> startup (Note: The '>>' indicates the shell prompt. Do not type '>>' when entering commands.) You can use Matlab to read and visualize/process files. To write a data file 'xyz' from Matlab you can run: >> writecfl('xyz', A); Note, that the name 'xyz' is used without filename extension. See below for more information about the file format used in BART. To read the data file 'xyz' back into Matlab use: >> A = readcfl('xyz'); To call a BART tool (e.g. ecalib) from Matlab, you can use the 'bart' command: >> sensitivities = bart('ecalib', kspace); Download and unpack the examples which demonstrate interoperability with Matlab. Go to the examples directory and run: >> examples ### 2.3.4. Python You can set the BART_TOOLBOX_PATH to the base directory and start a Python interactively as follows: $ python3 -i startup.py To avoid doing the above everytime, it is recommended to update your PYTHONPATH environment. For example, in Linux, assuming your BART_TOOLBOX_PATH is set, add the below line to your bashrc file. $ export PYTHONPATH="${BART_TOOLBOX_PATH}/python:$PYTHONPATH" After doing so, we can simply import as needed. >>> from bart import bart >>> import cfl You can use Python to read and visualize/process files. To write a data file 'xyz' from Python you can run: >>> cfl.writecfl('xyz', A); Note, that the name 'xyz' is used without filename extension. See below for more information about the file format used in BART. To read the data file 'xyz' back into Python use: >>> A = cfl.readcfl('xyz'); To call a BART tool (e.g. ecalib) from Python, you can use the 'bart' command: >>> sensitivities = bart(1, 'ecalib', kspace); The bart function expects the following signature: >>> = bart(, , , ...) To use BART in a script, please follow the steps in the startup.py file. 3. Data Format ============== 3.1. Generic ------------ The input and output datasets are each stored in a pair of files: one header (*.hdr) and one raw data (*.cfl). The header is a simple text readable file that describes the dimensions of the data. The raw data file is a binary file containing a single contiguous block of array data of dimensions described in the header stored in column-major order (first index is sequential). The raw data file is complex float (32 bit real + 32 bit imaginary, IEEE 754 binary32 little-endian). Convenience methods to read and write our data files using Matlab may be found in the matlab/ directory (readcfl.m and writecfl.m). Similar methods for Python may be found in the python/ directory (cfl.py). 3.2. Magnetic Resonance Imaging Data ------------------------------------ For MRI data and images, the dimensions are usually assigned in the following order: 0 readout 1 phase-encoding dimension 1 2 phase-encoding dimension 2 3 receive channels 4 ESPIRiT maps ... ... (more dimensions are defined in src/misc/mri.h) Undersampled data is stored with zeros in the unsampled positions. 3.3. Non-Cartesian Trajectories and Samples ------------------------------------------- The k-space coordinates for each sample are stored along dimension 0 which must have size equal to three. The unit of measurement is 1/FOV. Dimension 1 stores the samples along a single readout window while dimension 2 may be used to differentiate between different lines (e.g. radial spokes). Channel (3) and map (4) dimensions must not be used (i.e. have size one), while other dimensions can be used as for Cartesian data. Non-Cartesian samples are stored in a similar way as trajectories except that dimension 0 is not used. The channel dimension can be used for different receiver coils as usual. 4. Command-line Tools ===================== All tools operate on the simple file format given above. Indices and dimensions run from 0 to N-1. Sometimes a set of dimensions is given as a bitmask where the lowest bit corresponds to the 0st dimension. For example, an inverse Fourier transform of first three dimensions can be performed with the following command: $ bart fft -i 7 kspace volume More information about each command can be found using the help option '-h' or in the 'doc/commands.txt' file that can be generated using 'make doc/commands.txt'. 5. Information for Contributors =============================== Thank you for helping to improve BART! In order for us to be able to accept your contribution, it has to be released under the BSD license used by BART (see LICENSE file). By submitting patches to us it is understood that you agree to these terms and that you confirm that you hold all necessary rights yourself or have permission from the copyright holder. Please also add the name of the copyright holder and name and email of the author(s) to the copyright headers in all new or changed files. 6. Troubleshooting ================== 6.1. Installation Problems -------------------------- When problems occur after updating BART or changing build variables, it may help to clean the build environment and to recompile BART: $ make allclean $ make Make sure the PATH and BART_TOOLBOX_PATH environment variables are set correctly. Sometimes, several versions of BART are installed and the wrong version is used accidentally. 6.2. Reporting Problems ----------------------- Please report problems to our mailing list and include the following information (as applicable): * What system you are using (Linux, Mac OS X, Windows) and whether you use Matlab/Python wrappers. * The output of the 'version' command: $ bart version -V * The exact BART command-line that caused the problem. * The specific error message. * Information about the data files used when the problem occured (please provide atleast the dimensions of all input files). 6.3. Debugging -------------- See 'doc/debugging.txt' for details. 7. References ============= * Tamir JI, Ong F, Cheng JY, Uecker M, Lustig M, Generalized Magnetic Resonance Image Reconstruction using The Berkeley Advanced Reconstruction Toolbox, ISMRM Workshop on Data Sampling and Image Reconstruction, Sedona 2016 * Uecker M, Ong F, Tamir JI, Bahri D, Virtue P, Cheng JY, Zhang T, Lustig M, Berkeley Advanced Reconstruction Toolbox, Annual Meeting ISMRM, Toronto 2015 In: Proc Intl Soc Mag Reson Med 23:2486 * Uecker M, Virtue P, Ong F, Murphy MJ, Alley MT, Vasanawala SS, Lustig M, Software Toolbox and Programming Library for Compressed Sensing and Parallel Imaging, ISMRM Workshop on Data Sampling and Image Reconstruction, Sedona 2013 References related to implemented methods and algorithms can be found in the file 'doc/references.txt'. libbart-devel/README.md000066400000000000000000000022521463460177700150710ustar00rootroot00000000000000 BART: Toolbox for Computational Magnetic Resonance Imaging ========================================================== [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.592960.svg)](https://doi.org/10.5281/zenodo.592960) The Berkeley Advanced Reconstruction Toolbox (BART) is a free and open-source image-reconstruction framework for Computational Magnetic Resonance Imaging. The tools in this software implement various reconstruction algorithms for Magnetic Resonance Imaging. The software is intended for research use only and NOT FOR DIAGNOSTIC USE. It comes without any warranty (see LICENSE for details). For more information: https://mrirecon.github.io/bart/ Information for Contributors ---------------------------- Thank you for helping to improve BART! In order for us to be able to accept your contribution, it has to be released under the BSD license used by BART (see LICENSE file). By submitting patches to us it is understood that you agree to these terms and that you confirm that you hold all necessary rights yourself or have permission from the copyright holder. Please also add the name of the copyright holder to the copyright header in all new or changed files. libbart-devel/ar_lock.sh000077500000000000000000000006371463460177700155700ustar00rootroot00000000000000#!/bin/bash set -e if command -v flock > /dev/null ; then flock ${LOCKDIR}/.`basename $2`.lock -c "ar $*" exit 0 fi if command -v shlock > /dev/null ; then LOCK=/tmp/`basename $2`.lock trap 'rm -f ${LOCK} ; exit 1' 1 2 3 15 while true ; do if shlock -p $$ -f ${LOCK} ; then ar $* rm -rf ${LOCK} exit 0 else sleep 1 fi done fi echo "Error: no flock/shlock command!" exit 1 libbart-devel/build_webasm.sh000066400000000000000000000050501463460177700166020ustar00rootroot00000000000000emcc -O3 -Wall bart.o -s EXPORTED_FUNCTIONS="['__Block_object_dispose','_malloc','_free','_bart_version', '_calc_phantom', '_calc_bart', \ '_calc_circ', '_fftc','_ifftc','_num_init', '_pha_opts_defaults', '_memcfl_create', '_load_cfl', '_main_ecalib', '_main_pics', '_main_phantom', \ '_main_fft']" -s ALLOW_MEMORY_GROWTH=1 -s MAXIMUM_MEMORY=4GB -o ./web/wwwroot/bart.js \ ../fftw/lib/libfftw3f.a ../openblas/lib/libopenblas.a ../blocksruntime/libBlocksRuntime.a emcc -O3 -Wall bart.o -s EXPORTED_FUNCTIONS="['__Block_object_dispose','_malloc','_free','_bart_version', \ '_memcfl_create', '_load_cfl', '_memcfl_list_all', '_memcfl_unlink', \ '_main_avg', '_main_bench', '_main_bin', '_main_bitmask', '_main_cabs', '_main_caldir', '_main_calmat', '_main_carg', '_main_casorati', \ '_main_cc', '_main_ccapply', '_main_cdf97', '_main_circshift', '_main_conj', '_main_conv', '_main_conway', '_main_copy', '_main_cpyphs', \ '_main_creal', '_main_crop', '_main_delta', '_main_ecalib', '_main_ecaltwo', '_main_estdelay', '_main_estdims', '_main_estshift', \ '_main_estvar', '_main_extract', '_main_fakeksp', '_main_fft', '_main_fftmod', '_main_fftrot', '_main_fftshift', '_main_filter', \ '_main_flatten', '_main_flip', '_main_fmac', '_main_fovshift', '_main_homodyne', '_main_ictv', '_main_index', '_main_invert', \ '_main_itsense', '_main_join', '_main_looklocker', '_main_lrmatrix', '_main_mandelbrot', '_main_measure', '_main_mip', \ '_main_mnist', '_main_moba', '_main_mobafit', '_main_morphop', '_main_multicfl', '_main_nlinv', '_main_nnet', '_main_noise', '_main_normalize', \ '_main_nrmse', '_main_nufft', '_main_nufftbase', '_main_onehotenc', '_main_ones', '_main_pattern', '_main_phantom', '_main_pics', \ '_main_pocsense', '_main_poisson', '_main_pol2mask', '_main_poly', '_main_reconet', '_main_repmat', '_main_reshape', '_main_resize', \ '_main_rmfreq', '_main_rof', '_main_roistat', '_main_rss', '_main_rtnlinv', '_main_sake', '_main_saxpy', '_main_scale', '_main_sdot', '_main_show', \ '_main_signal', '_main_sim', '_main_slice', '_main_spow', '_main_sqpics', '_main_squeeze', '_main_ssa', '_main_std', '_main_svd', '_main_tgv', \ '_main_threshold', '_main_toimg', '_main_traj', '_main_transpose', '_main_twixread', '_main_upat', '_main_var', '_main_vec', '_main_version', \ '_main_walsh', '_main_wave', '_main_wavelet', '_main_wavepsf', '_main_whiten', '_main_window', '_main_wshfl', '_main_zeros', '_main_zexp' \ ]" -s ALLOW_MEMORY_GROWTH=1 -s MAXIMUM_MEMORY=4GB -o ./web/wwwroot/bart_cmd.js \ ../fftw/lib/libfftw3f.a ../openblas/lib/libopenblas.a ../blocksruntime/libBlocksRuntime.alibbart-devel/commands/000077500000000000000000000000001463460177700154125ustar00rootroot00000000000000libbart-devel/commands/.gitignore000066400000000000000000000001261463460177700174010ustar00rootroot00000000000000# Ignore everything in this directory * # Except this file !.gitignore # and sub !sub libbart-devel/commands/sub000077500000000000000000000006361463460177700161360ustar00rootroot00000000000000#!/bin/bash set -e if [ ! -f ${BART_TOOLBOX_PATH}/bart ] ; then if [ -f ${TOOLBOX_PATH}/bart ] ; then BART_TOOLBOX_PATH=${TOOLBOX_PATH} else echo "\$BART_TOOLBOX_PATH is not set correctly!" exit 1 fi fi if [ $# -lt 3 ] ; then echo "Usage: $0 " >&2 echo "Subtract from and save in " >&2 exit 1 fi ${BART_TOOLBOX_PATH}/bart saxpy -- -1. $1 $2 $3 libbart-devel/doc/000077500000000000000000000000001463460177700143565ustar00rootroot00000000000000libbart-devel/doc/applications.txt000066400000000000000000000140031463460177700176030ustar00rootroot00000000000000 (an incomplete list of papers using BART...) Hollingsworth KG, Higgins DM, McCallum M, Ward L, Coombs A, Straub V. Investigating the quantitative fidelity of prospectively undersampled chemical shift imaging in muscular dystrophy with compressed sensing and parallel imaging reconstruction. Magn Reson Med 2014; 72:1610-1619. Zhang T, Cheng JY, Potnick AG, Barth RA, Alley MT, Uecker M, Lustig M, Pauly JM, Vasanawala SS. Fast Pediatric 3D Free Breathing Abdominal Dynamic Contrast Enhanced MRI with a High Spatiotemporal Resolution, J Magn Reson Imaging 2015; 41:460-473. Addy NO, Ingle RR, Wu HH, Hu BS, Nishimura DG. High-resolution variable-density 3D cones coronary MRA. Magn Reson Med 2015; 74:614-621. Cheng JY, Zhang T, Ruangwattanapaisarn N, Alley MT, Uecker M, Pauly JM, Lustig M, Vasanawala SS. Free-Breathing Pediatric MRI with Nonrigid Motion Correction and Acceleration, J Magn Reson Imaging 2015; 42:407-420. Athalye V, Lustig M, Uecker M. Parallel Magnetic Resonance Imaging as Approximation in a Reproducing Kernel Hilbert Space, Inverse Problems 2015; 31:045008. Mann LW, Higgins DM, Peters CN, Cassidy S, Hodson KK, Coombs A, Taylor R, Hollingsworth KG. Accelerating MR Imaging Liver Steatosis Measurement Using Combined Compressed Sensing and Parallel Imaging: A Quantitative Evaluation, Radiology 2016; 278:245-256. Cheng JY, Hanneman K, Zhang T, Alley MT, Lai P, Tamir JI, Uecker M, Pauly JM, Lustig M, Vasanawala SS. Comprehensive Motion-Compensated Highly-Accelerated 4D Flow MRI with Ferumoxytol Enhancement for Pediatric Congenital Heart Disease. J Magn Reson Imaging 2016; 43:1355-1368. Tamir JI, Uecker M, Chen W, Lai P, Aleey MT, Vasanawala SS, Lustig M. T2-Shuffling: Sharp, Multi-Contrast, Volumetric Fast Spin-Echo Imaging. Magn Recon Med 2017; 77:180-195. Uecker M, Lustig M. Estimating Absolute-Phase Maps Using ESPIRiT and Virtual Conjugate Coils. Magn Reson Med 2017; 77:1201-1207. Cheng JY, Zhang T, Alley MT, Uecker M, Lustig M, Pauly JM, Vasanawala SS. Comprehensive Multi-Dimensional MRI for the Simultaneous Assessment of Cardiopulmonary Anatomy and Physiology. Scientific Reports 2017; 7:5330. Bao S, Tamir JI, Young JL, Tariq U, Uecker M, Lai P, Chen W, Lustig M, Vasanawala SS. Fast comprehensive single-sequence four-dimensional pediatric knee MRI with T2 shuffling. J Magn Reson Imaging 2017; 45:1700-1711. Mazzoli V, Schoormans J, Froeling M, Sprengers AM, Coolen BF, Verdonschot N, Strijkers GJ, Nederveen AJ. Accelerated 4D self‐gated MRI of tibiofemoral kinematics. NMR in Biomed 2017; 30:e3791. Moghari MH, Uecker M, Roujol S, Sabbagh M, Geva T, Powell AJ. Accelerated Whole-heart Magnetic Resonance Angiography Using a Variable-Density Poisson-Disc Undersampling Pattern and Compressed Sensing Reconstruction. Magn Reson Med 2018; 79:761-769. Peper ES, Strijkers GJ, Gazzola K, Potters WV, Motaal AG, Luirink IK, Hutten BA, Wiegman A, van Ooij P, van den Born B-JH, Nederveen AJ, Coolen BF. Regional assessment of carotid artery pulse wave velocity using compressed sensing accelerated high temporal resolution 2D CINE PC MRI. J Cardiovasc Magn Reson 2018; 20:1-12. Mazzoli V, Gottwald LM, Peper ES, Froeling M, Coolen BF, Verdonschot N, Sprengers AM, van Ooij P, Strijkers GJ, Nederveen AJ. Accelerated 4D phase contrast MRI in skeletal muscles contraction. Magn Reson Med 2018; 80:1799-1811. Rosenzweig S, Holme HCM, Wilke RN, Voit D, Frahm J, Uecker M. Simultaneous Multi-Slice Reconstruction Using Regularized Nonlinear Inversion: SMS-NLINV. Magn Reson Med 2018; 79:2057-2066. Lyu M, Barth M, Xie VB, Liu Y, Ma X, Feng Y, Wu EX. Robust SENSE reconstruction of simultaneous multislice EPI with low‐rank enhanced coil sensitivity calibration and slice‐dependent 2D Nyquist ghost correction. Magn Reson Medo 2018; 80:1376-1390. Sanders J, Song H, Frank S, Bathala T, Venkatesan A, Anscher M, Tang C, Bruno T, Wei W, Ma J. Parallel imaging compressed sensing for accelerated imaging and improved SNR in MRI-based prostate brachytherapy post-implant dosimetry. Brachytherapy 2018; 17:816-824. Chen F, Taviani V, Tamir JI, Cheng JY, Zhang T, Song Q, Hargreaves BA, Pauly JM, Vasanawala SS. Self‐Calibrating Wave‐Encoded Variable‐Density Single‐Shot Fast Spin Echo Imaging. J Magn Reson Imaging 2018; 47:954-966. Roeloffs V, Rosenzweig S, Holme HCM, Uecker M, Frahm J. Frequency-modulated SSFP with radial sampling and subspace reconstruction: A time-efficient alternative to phase-cycled bSSFP. Magn Reson Med 2019; 81:1566-1579. de Jonge CS, Coolen BF, Peper ES, Motaal AG, Nio CY, Somers I, Strijkers GJ, Stoker J, Nederveen AJ. Evaluation of compressed sensing MRI for accelerated bowel motility imaging. European Radiology Experimental 2019; 3:1-12. Walheim J, Dillinger H, Kozerke. Multipoint 5D flow cardiovascular magnetic resonance - accelerated cardiac- and respiratory-motion resolved mapping of mean and turbulent velocities. J Cardiovasc Magn Reson 2019; 21:42. Wang X, Kohler F, Unterberg-Buchwald C, Lotz J, Frahm J, Uecker M. Model-based myocardial T1 mapping with sparsity constraints using single-shot inversion-recovery radial FLASH. J Cardiovasc Magn Reson 2019; in press. Hauptmann A, Arridge S, Lucka F, Muthurangu V, Steeden JA. Real‐time cardiovascular MR with spatio‐temporal artifact suppression using deep learning–proof of concept in congenital heart disease. Magn Reson Med 2019; 81:1143-1156. Tamir JI, Taviani V, Alley MT, Perkins BC, Hart L, O'Brien K, Wishah F, Sandberg JK, Anderson MJ, Turek JS, Willke TL, Lustig M, Vasanawala SS. Targeted rapid knee MRI exam using T2 shuffling. J Magn Reson Imaging 2019; 49: e195-e204. doi:10.1002/jmri.26600 Su Y, Anderson M, Tamir JI, Lustig M, Li K. Compressed Sensing MRI Reconstruction on Intel HARPv2. 2019 IEEE 27th Annual International Symposium on Field-Programmable Custom Computing Machines (FCCM), San Diego, CA, USA, 2019, pp. 254-257. doi: 10.1109/FCCM.2019.00041 Smith DS, Sengupta S, Smith SA, Welch EB. Trajectory optimized NUFFT: Faster non-Cartesian MRI reconstruction through prior knowledge and parallel architectures. Magn Reson Med 2019; 81:2064-2071. libbart-devel/doc/bart.1000066400000000000000000000004661463460177700153760ustar00rootroot00000000000000.TH BART 1 .SH NAME bart - Berkeley Advanced Reconstruction Toolbox .SH SYNOPSIS .B bart .IR command [\fB\-h\fR] ... .SH DESCRIPTION .B bart invokes a command from the Berkeley Advanced Reconstruction Toolbox. .SH AUTHOR BART Developer Team and Contributors. .SH SEE ALSO .B https://mrirecon.github.io/bart/ libbart-devel/doc/bitmasks.txt000066400000000000000000000012771463460177700167430ustar00rootroot00000000000000 A bitmask is a binary number where the individual bits are used to indicate something. For example, a bitmask is often used to select a subset of dimensions, e.g. if the FFT should be applited to dimensions 3 and 5 the corresponding bits at position 3 and 5 are set in a bitmask: 876543210 (position of the bit) 000101000b (bitmask as binary number) In decimal this binary number is 40, so the command $ bart fft 40 input output would transform the dimensions 3 and 5 (counting from zero). The 'bitmask' command computes the required bitmask from the set of dimensions: $ bart bitmask 3 5 40 On the command-line both commands can also be combined: $ bart fft 'bart bitmask 3 5' input output libbart-devel/doc/building.txt000066400000000000000000000115321463460177700167160ustar00rootroot00000000000000 0. Introduction BART has a build system based on GNU Make. The build system offers many features for users and developers: BART can be built on different architectures, with different compilers, and with various optional features. This makes it easy to use BART in different environments, on a laptop, a multi-GPU system, a HPC cluster, or in the cloud. The build system also supports running system and unit tests. To make developing more fun, the makefile is optimized for extremely fast builds. Using parallel builds, BART can be built from scratch in about five seconds. After changing a single source code file it is usually possible to rebuild the binary in less than a second. This is accomplished by automatically maintaining dependencies between object files and incrementally updating the binaries from object stored in libraries. 1. Building BART 1.2. Main Build Targets 1.2.1. Default By default, the main 'bart' binary will be built with: make or make bart 1.2.1. Building Individual Commands Individual BART commands can be built as standalone binaries: make All BART commands can be built with: make all Attention: If the BART_TOOLBOX_PATH is set, the 'bart' tool will call the standalone tool and not the built-in tool. This can be used to selectively update individual tools, but can also cause confusion. 1.2.2. Testing System and unit tests can be build and run with: make test make utest To run individual tests: make tests/test-pics-cs 1.2.3. Cleaning Up To clean up working directory, run: make clean To also remove all built commands, run: make allclean 1.3. Libraries As a side effect of build the main 'bart' tool, static libraries are generated in 'lib/'. 2. Local Configuration The build can be configured by setting or adding variables. 2.1. Makefile.local It is recommended to put this variables into a file called 'Makefile.local' in the main BART directory. This file is then automatically included. By having local configuration is a seperate file, local changes are not overwritten when BART is updated and do not cause conflicts when using a version control system. 2.2. Makefile. It is also possible to put machine-specific configurations variables in a Makefile. where is the name of the machine as returned by 'uname -n'. 2.3 Custom Makefiles directory Additional Makefiles can be included by placing them in the Makefiles directory. All files matching the expansion Makefiles/Makefile.* are automatically included in the build. See Makefiles/README.md for example files. 3. Build Options 3.1. Adding New BART Commands # add new tool (src/foo.c) to list of targets XTARGETS += foo # dependencies for foo MODULES_foo += -llowrank 3.2. Build Flags 3.2.1. Silent Builds Silent builds can be activated with the following option: MAKEFLAGS += --silent 3.3.2. Parallel Build Parallel builds can be activated with the following option: PARALLEL=1 3.3. Optional Features Some BART features are optional, because they depend on the other libraries or features which are not available everywhere. 3.3.1. CUDA Support for CUDA can be turned on. It may be necessary to also provide the base path for CUDA installation. CUDA is supported starting with version 8, however, newer versions are recommended. CUDA=1 CUDA_BASE=/usr/ 3.3.2. OpenMP OpenMP can be turned off for compilers which do not support it properly (.e.g. clang): OMP=0 3.3.3. FFTW Threads It is possible to turn off FFTW threads if the library is not available: FFTWTHREADS=0 3.3.4. ISMRM Raw Data Format If the ISMRMRD library is installed, preliminary support for the ISMRM raw data format can be activated: ISMRMRD=1 3.3.5. TensorFlow Integration with TensorFlow is possible using the C API of TensorFlow, which can be downloaded from here: https://www.tensorflow.org/install/lang_c See tensorflow.txt for more information on conventions used. TensorFlow support can then be activated with: TENSORFLOW=1 TENSORFLOW_BASE=/opt/tensorflow/ 3.3.6 MPI Support for MPI can be turned on. It is recommended to use Open MPI as MPI implementation as this enables automatic detection of CUDA-aware MPI. Moreover BART reads environment variables exported by the Open MPI implementation of mpirun to automatically acivate run-time support for MPI. MPI=1 3.4. Compiler 3.4.1. Different Compiler If different compilers or compiler versions are installed, it is possible to override the default compiler: CC = gcc-4.8 #CC = gcc-5 #CC = clang-3.5 3.4.2. Different CFLAGS Different CFLAGS can be set like this: CFLAGS= -g -O2 -ffast-math 3.4.3. Static Linking Static linking can be used to build binaries which do not depend on external shared libraries. This might be useful if BART is to be deployed on a different machine where it is difficult to install required dependencies. SLINK=1 libbart-devel/doc/debugging.txt000066400000000000000000000015201463460177700170500ustar00rootroot00000000000000 Running a command in a debugger ------------------------------- This involves several (easy) steps: 1. Recompile BART with debugging information. Create a Makefile.local in the BART directory with the following line added: DEBUG=1 Then recompile with: make allclean make bart 2. Install the GNU debugger (gdb) 3. Run the failing BART command: gdb --args bart [ ...] ... 4. Then type 'run' to start the process. If it crashes, you are back in the debugger. You can also type CTRL-C to interrupt it at any time. In the debugger: You can type 'bt' to get a backtrace which is helpful to investigate a segmentation fault or similar. You can also call functions. For example, this can be used to save a multi-dimensional array from the debugger like this: (gdb) call dump_cfl("dbg_img", 16, dims, image) libbart-devel/doc/dimensions-and-strides.txt000066400000000000000000000037031463460177700215050ustar00rootroot00000000000000 BART is built around a library which defines many generic functions on multi-dimensional arrays. Most functions come in two flavours: A basic version (e.g. 'md_copy') which takes as input the dimensions for its arguments and an extended version (e.g. 'md_copy2') which also takes the strides for each argument. The basic versions assume strides for a column-major array which is contiguous in memory. A stride refers to the distance in memory between successive elements in an array. They are used to compute the position of an element in memory for a given multi-index by multiplying the index of each dimension with the corresponding stride and summing the results. For a regular array of complex floats continuously laid out in memory with dimensions 'dim[N]' the default strides are: str[0] = sizeof(complex float) str[1] = dim[0] * sizeof(complex float) str[2] = dim[0] * dim[1] * sizeof(complex float) ... There is one exception: For a dimension with length one, the corresponding stride is set to zero. These default strides are what the function 'md_calc_strides' computes and which are also used automatically whenever strides are not explicitly specified. Dimensions and strides can be manipulated to obtain different views of the same array without having to make a copy. For example, swapping dimensions and strides for two indices yields a transposed view of the array, an increased stride yields a view of a sub-array, a negative stride a view where the corresponding dimension is reversed (flipped), and a zero stride yields a view in which one dimension is replicated. Many basic functions such as 'md_transpose', 'md_flip', 'md_slice', 'md_copy_block' etc. are implemented in this way by setting up dimensions and strides and calling into the generic copy function 'md_copy2'. Even much more complicated operations can often be implemented in just a few lines of code. One example is building a Casorati matrix of overlapping blocks (see 'num/casorati.c'). libbart-devel/doc/distributed-architectures.txt000066400000000000000000000106721463460177700223120ustar00rootroot00000000000000Distributed architectures means to use different compute entities. Nodes can be PCs, workstations, or nodes of a cluster. They can be connected by TCP/IP or other connections such as InfiniBand. An alternative term would be heterogenous systems. BART uses the Message Passing Interface (MPI) [1] to work on distributed systems. # Basic Requirements 1.) Install (Open-)MPI on all nodes 2.) make sure bart executable is working on all nodes (compiling on each node may be necessary, especially if different OS are used) 3.) build bart with MPI=1 4.) setup ssh connections between nodes (requires ssh-server on each node as well) 5.) setup file share between the nodes (for file reading/writing) 6.) run bart (with mpirun) and parallel flags (bart -p) # BART command line interface (no mpi required) - bart -p [-s dim0 ... dimN] [-e maxdim0 ... maxdimN] [-S] tool - l : specifies dimensions of looping which will be parallelized - -s: gives start dimensions or (if -e not given) slice which should be processed - -e: gives max in dimensions (imagine a for loop with i < maxdim) - Example 1: loop in dimension 13 from item 1 to 3 - bart -l $(bart bitmask 13) -s 1 -e 4 fft 3 example_file.ra - Example 2: process only slice 2 in a 3D stack - bart -l (bart bitmask 3) -s 2 fft 1 example_file.ra - Example 3: loop in dimension 13 over all items (3 items required in this dimensions for this example) - bart -l (bart bitmask 13) -e 3 example_file.ra - Example 4: loop in dimensions 12 and 13 over items 2 to 3 and 3 to 5 - bart -l (bart bitmask 12 13) -s 2:3 -e 3:5 fft 1 example_file.ra # Useful commands - [Setup ssh key]: ssh-keygen -t rsa -b 2024 -f ~/.ssh/mpi - generate rsa key with bitlength 2024 in file ~/.ssh/mpi - [Copy ssh key]: ssh-copy-id @ - copies public key from to , 'ssh-copy-id' has to be installed seperatly - Advantage: you can not accidantly share your private key - [Setup file share over ssh]: sshfs @: - mounts on - , user on , Note: if ssh is setup correctly, you don't need the same users on each nodes - [Unmount file share over ssh]: fusermount -u - [Run bart with MPI]: 'mpirun -n --host : -x -wdir bart -p [-s dim0 ... dimN] [-e max0 ... maxN] [-S] tool : ... - -n: specifies how many slots should be used, slots != processes - --host ip address or better ssh-alias, specify how many slots should max available, has to correspond with - -x if needed set environment variable (e.g.: useful for DEBUG_LEVEL) - -wdir specify working directory, could be different on each node - bart has to be the absolute path to the bart executable on each node if not the same or in - : separates different node configurations (basically repeat everything for each node with adopted parameters, if they are the same, you don't have to separate them, just use --hostfile ) - [Setup symbolic link]: ln -s - Setup an symbolic link to create the same file structure on nodes to avoid the need for '-wdir' # Useful files - ~/.ssh/config: "Host User HostName IdentityFile " - alternatively setup '/etc/hosts', to use names for nodes # Troublehooting - Q: Nothing happens at all after running 'mpirun' A: Check the mpi versions on each node, they should be the same or at least same major version 'mpirun --version' - Q: "No protocol specified": A: This is an warning/error from ssh. It means that no protocol for X is specified - Q: "ssh: connect to host port 22: Connection refused" A: Most likely you have to install openssh-server on host - Q: "WARNING: Open MPI failed to TCP connect to a peer MPI process." A: Most likely you didn't setup ssh correctly, check each node connection again. You should be able to connect to each node and vice versa - Q: "/lib/x86_64-linux-gnu/libc.so.6: version `GLIBC_2.33' /lib/x86_64-linux-gnu/libc.so.6: version `GLIBC_2.34'" - A: Most likely you nodes use different OS (distributions) with different versions of GLIBC. Compile bart on each node separatly Use the absolute path to this binary in your call of 'mpirun', different nodes can be separated by ':' [1] https://mpitutorial.com/ libbart-devel/doc/fft.txt000066400000000000000000000042311463460177700156760ustar00rootroot00000000000000 Centered FFT ------------ The center is N / 2 for even N and (N - 1) / 2 for odd N when counting from zero. Instead of using fftshift/ifftshift we usually use fftmod/ifftmod. While fftshift/ifftshift involves a cyclic copy of memory, fftmod applies a linear phase which has a similar effect. We prefer fftmod because we can usually merge this phase with other operations to reduce computation time. Though similar, there are some subtle differences which one has to keep in mind. The correct use of fftshift/ifftshift for a centered forward/inverse FFT is the following: forward: 1. ifftshift 2. fft 3. fftshift inverse: 1. ifftshift 2. ifft 3. fftshift In contrast, the correct use of fftmod/ifftmod for a centered forward/inverse FFT is this: forward: 1. fftmod 2. fft 3. fftmod inverse: 1. ifftmod 2. ifft 3. ifftmod If \xi_N is the N-th root of unity with smallest positive complex argument, the uncentered forward DFT of length N is: \hat f(k) = \sum_{x=0}^{N-1} \xi_N^{-xk} f(x) Shifting the center from index 0 to new index c yields the formula for the centered forward DFT of length N: \hat f_c(k) = \sum_{x=0}^{N-1} \xi_N^{-(x-c)(k-c)} f_c(x) Note that this corresponds to shifts in different directions for input and output. Expanding the exponent yields: (x-c)(k-c) = xk - xc - ck + c^2 Thus, the centered FFT can be implemented by multiplication with a linear phase before and after calling the uncentered FFT: \hat f(k) = \xi_N^{(k-c/2)c} \sum_{x=0}^{N-1} \xi_N^{-xk} \x_N^{(x-c/2)c} f(x) Observe that this is the same linear phase applied to the input and output. Note that we distributed the additional phase \xi^{-c^2} evenly to both terms. If N is a multiple of four, then c^2 = N (N/4) and the additional phase term vanishes. Then \xi_N^{kc} and \xi_N^{xc} are simply the alternating sequence of 1, -1, 1, -1, ... Because ifftmod applies the conjugate phase this implies that it is the same as fftmod in this special case. If N is a multiple of two, the additional phase is -pi/2. Evenly distributed this yields a factor of '-i' (i the imaginary number), i.e. fftmod applies -i, +i, -i, +i, ... For N odd the phase is more complicated. libbart-devel/doc/pics.txt000066400000000000000000000020721463460177700160560ustar00rootroot00000000000000 Returns the coil-combined images in image domain. For Cartesian imaging, 3D k-space (kz along z dim) must be provided. By default, pics assumes a 3D reconstruction. For a slice-by-slice 2D reconstruction, pics can be called in a loop. If no scaling factor is provided, pics will scale the data prior to reconstruction. The scaling factor is calculated using the center k-space region. This scaling will be undone before returning if the -S flag is provided which may be important for computing quantitative parameters. Additional dimensions such as coils, maps, and time must follow the dimension order specified in mri.h, otherwise regularizers may be applied on the wrong dimensions and the forward sense operator may be inaccurate. The sampling mask is determined automatically from the provided k-space. For the sampling mask to be calculated correctly, missing samples must be exactly zero. Small values in k-space, perhaps from numerical errors, will cause the sampling mask (reflected in logged acceleration factor), and the data consistency step to be incorrect. libbart-devel/doc/references.txt000066400000000000000000000414171463460177700172470ustar00rootroot00000000000000 - BART - Uecker M, Ong F, Tamir JI, Bahri D, Virtue P, Cheng JY, Zhang T, Lustig M. Berkeley Advanced Reconstruction Toolbox. Annual Meeting ISMRM, Toronto 2015, In: Proc Intl Soc Mag Reson Med 2015; 23:2486. Uecker M, Virtue P, Ong F, Murphy MJ, Alley MT, Vasanawala SS, Lustig M. Software Toolbox and Programming Library for Compressed Sensing and Parallel Imaging, ISMRM Workshop on Data Sampling and Image Reconstruction, Sedona 2013. Tamir JI, Ong F, Cheng JY, Uecker M, Lustig M. Generalized Magnetic Resonance Image Reconstruction using The Berkeley Advanced Reconstruction Toolbox, ISMRM Workshop on Data Sampling and Image Reconstruction, Sedona 2016. Uecker M. Machine Learning Using the BART Toolbox - Implementation of a Deep Convolutional Neural Network for Denoising. Joint Annual Meeting ISMRM-ESMRMB, Paris 2018, In: Proc. Intl. Soc. Mag. Reson. Med. 2018; 26:2802. Blumenthal M. and Uecker M. Deep Deep Learning with BART. ISMRM Annual Meeting 2021, In: Proc. Intl. Soc. Mag. Reson. Med. 2021; 29:1754. Luo G, Blumenthal M, Uecker M. Using data-driven image priors for image reconstruction with BART. ISMRM Annual Meeting 2021, In: Proc. Intl. Soc. Mag. Reson. Med. 2021; 29:1756. Holme HCM and Uecker M. Reproducibility meets Software Testing: Automatic Tests of Reproducible Publications Using BART. ISMRM Annual Meeting 2021, In: Proc. Intl. Soc. Mag. Reson. Med. 2021; 29:3768. Scholand N, Schilling M, Heide M, Uecker M. Digital Reference Objects with BART. In: Proc Intl Soc Mag Reson Med 2023; 31:3118. Blumenthal M, Holme HCM, Uecker M. Scaling nuFFT Memory-Overhead Down to Zero: Computational Trade-Offs and Memory-Efficient PICS-Reconstructions with BART. In: Proc Intl Soc Mag Reson Med 2023; 31:4947. - reproducible publications using BART - Uecker M, Lustig M. Estimating Absolute-Phase Maps Using ESPIRiT and Virtual Conjugate Coils. Magn Reson Med 2017; 77:1201-1207. https://github.com/mrirecon/vcc-espirit Rosenzweig S, Holme HCM, Wilke RN, Voit D, Frahm J, Uecker M. Simultaneous Multi-Slice Reconstruction Using Regularized Nonlinear Inversion: SMS-NLINV. Magn Reson Med 2018; 79:2057-2066. https://github.com/mrirecon/sms-nlinv Rosenzweig S, Holme HCM, Uecker M. Simple Auto-Calibrated Gradient Delay Estimation From Few Spokes Using Radial Intersections (RING). Magn Reson Med 2019; 81:1898-1906. https://github.com/mrirecon/ring Holme HCM, Rosenzweig S, Ong F, Wilke RN, Lustig M, Uecker M. ENLIVE: An Efficient Nonlinear Method for Calibrationless and Robust Parallel Imaging. Scientific Reports 2019; 9:3034. https://github.com/mrirecon/enlive Wang X, Kohler F, Unterberg-Buchwald C, Lotz J, Frahm J, Uecker M. Model-based myocardial T1 mapping with sparsity constraints using single-shot inversion-recovery radial FLASH cardiovascular magnetic resonance. J Cardioviasc Magn Reson 2019; 21:60. https://github.com/mrirecon/myocardial-t1-mapping Rosenzweig S, Scholand N, Holme HCM, Uecker M. Cardiac and Respiratory Self-Gating in Radial MRI using an Adapted Singular Spectrum Analysis (SSA-FARY). IEEE Trans Med Imag 2020; 39:3029-3041. https://github.com/mrirecon/SSA-FARY Wang X, Rosenzweig S, Scholand N, Holme HCM, Uecker M. Model-based Reconstruction for Simultaneous Multi-slice T1 Mapping using Single-shot Inversion-recovery Radial FLASH. Magn Reson Med 2021; 85:1258-1271. https://github.com/mrirecon/sms-t1-mapping Wang X, Tan Z, Scholand N, Roeloffs V, Uecker M. Physics-based Reconstruction Methods for Magnetic Resonance Imaging. Philos. Trans. R. Soc. A. 2021; 379:20200196. https://github.com/mrirecon/physics-recon Wang X, Rosenzweig S, Roeloffs V, Blumenthal M, Scholand N, Tan Z, Holme HCM, Unterberg-Buchwald C, Hinkel R, Uecker M. Free-breathing myocardial T1 mapping using inversion-recovery radial FLASH and motion-resolved model-based reconstruction. Magn Reson Med 2023; 89;1368-1384. https://github.com/mrirecon/motion-resolved-myocardial-T1-mapping Scholand N, Wang X, Roeloffs V, Rosenzweig S, Uecker M. Quantitative MRI by nonlinear inversion of the Bloch equations. Magn Reson Med 2023; 90:520-538. https://github.com/mrirecon/bloch-moba - sensitivity-encoded parallel imaging - (commands: itsense, pocsense, bpsense, pics) Ra JB and Rim CY. Fast imaging using subencoding data sets from multiple detectors. Magn Reson Med 1993; 30:142-145. Pruessmann KP, Weiger M, Scheidegger MB, Boesiger P. SENSE: Sensitivity encoding for fast MRI. Magn Reson Med 1999; 42:952-962. Pruessmann KP, Weiger M, Boernert P, Boesiger P. Advances in sensitivity encoding with arbitrary k-space trajectories. Magn Reson Med 2001; 46:638-651. Samsonov AA, Kholmovski EG, Parker DL, Johnson CR. POCSENSE: POCS-based reconstruction for sensitivity encoded magnetic resonance imaging. Magn Reson Med 2004; 52:1397-1406. - implementation of the (non-uniform) FFT - (commands: fft, nufft, nufftbase, pics) O’Sullivan JD. A fast sinc function gridding algorithm for Fourier inversion in computer tomography. IEEE Trans Med Imaging 1985; 4:200-207. Jackson JI, Meyer CH, Nishimura DG, Macovski A. Selection of a convolution function for Fourier inversion using gridding. IEEE Trans Med Imaging 1991; 3:473-478. Wajer F and Pruessmann KP. Major speedup of reconstruction for sensitivity­encoding with arbitrary trajectories. Annual Meeting of the ISMRM, Glasgow 2001, In: Proc Intl Soc Mag Reson Med 2001; 9:767. Frigo M, Johnson SG. The Design and Implementation of FFTW3. Proc IEEE 2005; 93:216-231. Uecker M, Zhang S, Frahm J. Nonlinear Inverse Reconstruction for Real-time MRI of the Human Heart Using Undersampled Radial FLASH. Magn Reson Med 2010; 63:1456-1462. Ong F, Uecker M, Jiang W, Lustig M. Fast Non-Cartesian Reconstruction with Pruned Fast Fourier Transform. Annual Meeting ISMRM, Toronto 2015, In: Proc Intl Soc Mag Reson Med 2015; 23:3639. Ong F, Uecker M, Lustig M. Accelerating non-Cartesian MRI reconstruction convergence using k-space preconditioning. IEEE Trans Med Imag 2019; 39:1646-1654. Blumenthal M, Holme HCM, Uecker M. Scaling nuFFT Memory-Overhead Down to Zero: Computational Trade-Offs and Memory-Efficient PICS-Reconstructions with BART. In: Proc Intl Soc Mag Reson Med 2023; 31:4947. - methods for sensitivity calibration - (commands: walsh, caldir, ecalib, ecaltwo) Walsh DO, Gmitro AF, Marcellin MW. Adaptive reconstruction of phased array MR imagery. Magn Reson Med 2000; 43:682-690. Griswold M, Walsh D, Heidemann R, Haase A, Jakob A. The Use of an Adaptive Reconstruction for Array Coil Sensitivity Mapping and Intensity Normalization Annual Meetig ISMRM, Honolulu 2002, In: Proc Intl Soc Mag Reson Med 2002; 10:2410. McKenzie CA, Yeh EN, Ohliger MA, Price MD, Sodickson DK. Self-calibrating parallel imaging with automatic coil sensitivity extraction. Magn Reson Med 2002; 47:529-538. Uecker M, Virtue P, Vasanawala SS, Lustig M. ESPIRiT Reconstruction Using Soft SENSE. Annual Meeting ISMRM, Salt Lake City 2013, In: Proc Intl Soc Mag Reson Med 2013; 21:127. Uecker M, Lai P, Murphy MJ, Virtue P, Elad M, Pauly JM, Vasanawala SS, Lustig M. ESPIRiT - An Eigenvalue Approach to Autocalibrating Parallel MRI: Where SENSE meets GRAPPA. Magn Reson Med 2014; 71:990-1001. - joint estimation: nonlinear inversion, calibrationless - (commands: nlinv, sake) Uecker M, Hohage T, Block KT, Frahm J. Image reconstruction by regularized nonlinear inversion-joint estimation of coil sensitivities and image content. Magn Reson Med 2008; 60:674-682. Bi Z, Uecker M, Jiang D, Lustig M, Ying K. Robust Low-rank Matrix Completion for sparse motion correction in auto calibration PI. Annual Meeting ISMRM, Salt Lake City 2013, In: Proc Intl Soc Mag Reson Med 2013; 21:2584. Shin PJ, Larson PEZ, Ohliger MA, Elad M, Pauly JM, Vigneron DB, Lustig M. Calibrationless Parallel Imaging Reconstruction Based on Structured Low-Rank Matrix Completion. Magn Reson Med 2014; 72:959-970. Holme HCM, Rosenzweig S, Ong F, Wilke RN, Lustig M, Uecker M. ENLIVE: An Efficient Nonlinear Method for Calibrationless and Robust Parallel Imaging. Scientific Reports 2019; 9:3034. - coil compression - (command: cc) Buehrer M, Pruessmann KP, Boesiger P, Kozerke S. Array compression for MRI with large coil arrays. Magn Reson Med 2007, 57:1131-1139. Huang F, Vijayakumar S, Li Y, Hertel S, Duensing GR. A software channel compression technique for faster reconstruction with many channels. Magn Reson Imaging 2008; 26:133-141. Zhang T, Pauly JM, Vasanawala SS, Lustig M. Coil compression for accelerated imaging with cartesian sampling. Magn Reson Med 2013; 69:571-582. Bahri D, Uecker M, Lustig M. ESPIRiT-Based Coil Compression for Cartesian Sampling. Annual Meeting ISMRM, Salt Lake City 2013, In: Proc Intl Soc Mag Reson Med 2013; 21:2657. - compressed sensing MRI - (commands: pocsense, pics) Block KT, Uecker M, Frahm J. Undersampled radial MRI with multiple coils. Iterative image reconstruction using a total variation constraint. Magn Reson Med 2007; 57:1086-1098. Lustig M, Donoho D, Pauly JM. Sparse MRI: The application of compressed sensing for rapid MR imaging. Magn Reson Med 2007; 58:1182-1195. Liu B, King K, Steckner M, Xie J, Sheng J, Ying L. Regularized sensitivity encoding (SENSE) reconstruction using Bregman iterations. Magn Reson Med 2009; 61:145-152. - non-linear model-based reconstruction - (commands: moba) Tong CY, Prato FS. A Novel Fast T1-Mapping Method. Magn Reson Imaging 1994; 4:701-708. McKenzie CA, Pereira RS, Prato FS, Chen Z, Drost DJ. Improved Contrast Agent Bolus Tracking Using T1 FARM. Magn Reson Med 1999; 41:429-435. Graff C, Li Z, Bilgin A, Altbach MI, Gmitro AF, Clarkson EW. Iterative T2 estimation from highly undersampled radial fast spin-echo data. ISMRM 2006; 14:925. Olafsson VT, Noll DC, Fessler JA. Fast joint reconstruction of dynamic and field maps in functional MRI. IEEE Trans Med Imag 2008; 27:1177–1188. Block KT, Uecker M, Frahm J. Model-Based Iterative Reconstruction for Radial Fast Spin-Echo MRI. IEEE Trans Med Imag 2009; 28:1759-1769. Wang X, Roeloffs V, Klosowski J, Tan Z, Voit D, Uecker M, Frahm J. Model-based T1 Mapping with Sparsity Constraints Using Single-Shot Inversion-Recovery Radial FLASH. Magn Reson Med 2018; 79:730-740. Wang X, Kohler F, Unterberg-Buchwald C, Lotz J, Frahm J, Uecker M. Model-based myocardial T1 mapping with sparsity constraints using single-shot inversion-recovery radial FLASH cardiovascular magnetic resonance. J Cardioviasc Magn Reson 2019; 21:60. Tan Z, Voit D, Kollmeier J, Uecker M, Frahm J. Dynamic Water/Fat Separation and B0 Inhomogeneity Mapping -- Joint Estimation Using Undersampled Triple-Echo Multi-Spoke Radial FLASH. Magn Reson Med 2019; 82:1000-1011. Wang X, Rosenzweig S, Scholand N, Holme HCM, Uecker M. Model-based Reconstruction for Simultaneous Multi-slice T1 Mapping using Single-shot Inversion-recovery Radial FLASH. Magn Reson Med 2021; 85:1258-1271. Wang X, Tan Z, Scholand N, Roeloffs V, Uecker M. Physics-based Reconstruction Methods for Magnetic Resonance Imaging. Philos. Trans. R. Soc. A. 2021; 379:20200196. Tan Z, Unterberg-Buchwald C, Blumenthal M, Scholand N, Schaten P, Holme HCM, Wang X, Raddatz D, Uecker M. Free-Breathing Liver Fat, R∗2 and B0 Field Mapping Using Multi-Echo Radial FLASH and Regularized Model-based Reconstruction. IEEE Trans Med Imag 2023; 42:1374-1387. Wang X, Rosenzweig S, Roeloffs V, Blumenthal M, Scholand N, Tan Z, Holme HCM, Unterberg-Buchwald C, Hinkel R, Uecker M. Free-breathing myocardial T1 mapping using inversion-recovery radial FLASH and motion-resolved model-based reconstruction. Magn Reson Med 2023; 89;1368-1384. Scholand N, Wang X, Roeloffs V, Rosenzweig S, Uecker M. Quantitative MRI by nonlinear inversion of the Bloch equations. Magn Reson Med 2023; 90:520-538. - subspace reconstruction - (commands: pics, signal, svd) Liang Z, Spatiotemporal Imaging with partially separable functions. 4th IEEE International Symposium on Biomedical Imaging: From Nano to Macro, 2007; 988-991. Petzschner FH, Ponce IP, Blaimer M, Jakob PM, Breuer FA. Fast MR parameter mapping using k‐t principal component analysis. Magn Reson Med 2011; 66;706-716. Mani M, Jacob M, Magnotta V, Zhong J. Fast iterative algorithm for the reconstruction of multishot non-cartesian diffusion data. Magn Reson Med 2015; 74:1086–1094. Tamir JI, Uecker M, Chen W, Lai P, Alley MT, Vasanawala SS, Lustig M. T2 shuffling: Sharp, multicontrast, volumetric fast spin‐echo imaging. Magn Reson Med 2017; 77:180-195. - sparsity transforms, variational penalties, regularization - (commands: cdf97, rof, tgv, lrmatrix, pocsense, pics) Rudin LI, Osher S, Fatemi E. Nonlinear total variation based noise removal algorithms, Physica D: Nonlinear Phenomena 1992; 60:259-268. Figueiredo MAT and Nowak RD. An EM algorithm for wavelet-based image restoration. IEEE Trans Image Process 2003; 12:906-916. Ong F, Uecker M, Tariq U, Hsiao A, Alley MT, Vasanawala SS, Lustig M. Robust 4D Flow Denoising using Divergence-free Wavelet Transform, Magn Reson Med 2015; 73:828-842. Ong F, Lustig M. Beyond low rank + sparse: Multi-scale low rank matrix decomposition, IEEE J Sel Topics Signal Process 2016; 10:672-687. Bredies K, Kunisch K, Pock T. Total generalized variation. SIAM Journal on Imaging Sciences 2010; 3:492-526. Luo G, Zhao N, Jiang W, Hui ES, Cao P. MRI reconstruction using deep Bayesian estimation. Magn Reson Med 2020; 84:2246-2261. Knoll F, Bredies K, Pock T, Stollberger R. Second order total generalized variation (TGV) for MRI. Magn Reson Med 2010; 65:480-491. - sampling schemes - (commands: traj, poisson, wave, wavepsf) Winkelmann S, Schaeffter T, Koehler T, Eggers H, Doessel O. An optimal radial profile order based on the Golden Ratio for time-resolved MRI. IEEE Trans Med Imaging 2007; 26:68-76. Lustig M, Alley M, Vasanawala S, Donoho DL, Pauly JM. L1 SPIR-iT: Autocalibrating Parallel Imaging Compressed Sensing Annual Meeting ISMRM, Honolulu 2009, In: Proc Intl Soc Mag Reson Med 2009; 17:379. Bilgic B, Gagoski BA, Cauley SF, Fan AP, Polimeni JR, Grant PE, Wald LL, Setsompop K. Wave-CAIPI for highly accelerated 3D imaging. Magn Reson Med 2014; 73:2152-2162. Wundrak S, Paul J, Ulrici J, Hell E, Geibel M-A, Bernhardt P, Rottbauer W, Rasche V. Golden ratio sparse MRI using tiny golden angles. Magn Reson Med 2016; 75:2372-2378. - trajectory correction - (commands: estdelay) Block KT, Uecker M. Simple Method for Adaptive Gradient-Delay Compensation in Radial MRI. Annual Meeting ISMRM, Montreal 2011, In: Proc. Intl. Soc. Mag. Reson. Med 2011; 19:2816. Rosenzweig S, Holme HCM, Uecker M. Simple Auto-Calibrated Gradient Delay Estimation From Few Spokes Using Radial Intersections (RING). Magn Reson Med 2019; 81:1898-1906. - acceleration with graphical processing units - (commands: pocsense, nufft, pics, nlinv) Uecker M, Zhang S, Frahm J. Nonlinear Inverse Reconstruction for Real-time MRI of the Human Heart Using Undersampled Radial FLASH. Magn Reson Med 2010; 63:1456-1462. Murphy M, Alley M, Demmel J, Keutzer K, Vasanawala S, Lustig M. Fast ℓ1-SPIRiT Compressed Sensing Parallel Imaging MRI: Scalable Parallel Implementation and Clinically Feasible Runtime. IEEE Trans Med Imaging 2012; 31:1250-1262. - numerical phantoms and signals - (commands: phantom, signal) Shepp LA, Logan BF. The Fourier reconstruction of a head section. IEEE T Nucl Sci 1974; 21:21-43. Koay CG, Sarlls JE, Özarslan E. Three-Dimensional Analytical Magnetic Resonance Imaging Phantom in the Fourier Domain. Magn Reson Med 2007; 58:430-436. Guerquin-Kern M, Lejeune L, Pruessmann KP, M Unser M, Realistic Analytical Phantoms for Parallel Magnetic Resonance Imaging. IEEE Trans Med Imaging 2012; 31:626-636. Look DC, Locker DR. Time Saving in Measurement of NMR and EPR Relaxation Times. Review of Scientific Instruments 1970; 41:250. Schmitt P, Griswold MA, Jakob PM, Kotas M, Gulani V, Flentje M, Haase A. Inversion recovery TrueFISP: Quantification of T1, T2, and spin density. Magn Reson Med 2004; 51:661-667. Hamilton G, Yokoo T, Bydder M, Cruite I, Schroeder ME, Sirlin CB, Middleton MS. In vivo characterization of the liver fat 1H MR spectrum. NMR Biomed 2011; 24:784-790. Assländer J, Novikov DS, Lattanzi R, Sodickson DK, Cloos MA. Hybrid-state free precession in nuclear magnetic resonance. Communications Physics. 2019; 2:73. Scholand N, Schilling M, Heide M, Uecker M. Digital Reference Objects with BART. In: Proc Intl Soc Mag Reson Med 2023; 31:3118. - machine learning - (command: mnist, nnet, reconet) Hammernik K, Klatzer T, Kobler E, Recht MP, Sodickson DK, Pock T, Knoll F. Learning a variational network for reconstruction of accelerated MRI data. Magn Reson Med 2018; 79:3055-3071. Aggarwal HK, Mani MP, Jacob M. MoDL: Model-Based Deep Learning Architecture for Inverse Problems. IEEE Trans Med Imaging 2019; 38:394--405. Pock T and Sabach S. Inertial Proximal Alternating Linearized Minimization (iPALM) for Nonconvex and Nonsmooth Problems. SIAM J Imaging Sci 2016; 9:1756--1787. Kingma DP and Ba J. Adam: A Method for Stochastic Optimization. arXiv preprint 2014, arXiv:1412.6980 libbart-devel/doc/resize.txt000066400000000000000000000007371463460177700164270ustar00rootroot00000000000000bart resize help ------------------- If the input looks like this: [[1 2 3] [4 5 6]] bart resize -c 0 6 1 5 input output [[0. 0. 0. 0. 0.] [0. 0. 0. 0. 0.] [0. 1. 2. 3. 0.] [0. 4. 5. 6. 0.] [0. 0. 0. 0. 0.] [0. 0. 0. 0. 0.]] bart resize 0 6 1 5 input output [[1. 2. 3. 0. 0.] [4. 5. 6. 0. 0.] [0. 0. 0. 0. 0.] [0. 0. 0. 0. 0.] [0. 0. 0. 0. 0.] [0. 0. 0. 0. 0.]] bart resize -c 0 1 1 2 input output [[4. 5.]] bart resize 0 1 1 2 input output [[1. 2.]] libbart-devel/doc/style.txt000066400000000000000000000117161463460177700162650ustar00rootroot00000000000000 1. Language The main language is C11 with commonly used GNU extensions (gnu11) as supported by the GCC and clang compilers. 1.1. Standard types: The complex types are the standard types introduced with ISO C99. #include complex float complex double In headers we use _Complex without including the standard header for complex types for compatibility with C++. Similarly, we use the standard boolean type. #include bool x = true; 1.2. Arrays Large multi-dimensional arrays should use our md_* functions. Small arrays should use (variable-length) arrays to increase type-safety. Pointer arithmetic should be avoided. float kernel[N][3]; complex float* image = md_alloc(N, dims, CFL_SIZE); In headers, we use the __VLA(x) macro for compatibility with C++ when this is possible. 1.2. GNU Extensions: Some extensions a commonly supported by compilers and useful. statement expressions ({ }) __typeof__ const array parameters 1.3. Type safety void* and casts should be used only when necessary. Functions must have a prototype. Variable-length arrays are preferred over basic pointers. Macros can often be made type-safe, e.g. using the TYPE_CHECK macro. structs should be defined in the source (.c) file whenever possible to enforce modularization ("opaque pointers"). 2. Coding Style Coding style are meant as guidelines. It is OK to deviate from the guidelines in situations, if it helps to make the code easier to understand. 2.1. Indentation Indentation using a single tab. A tab is considered 8 characters. White space errors (white space after the last visible character of a line) should be avoided. Labels should be indented one tab less than the actual code. pragmas should start at the beginning of a line. The "omp" in OpenMP pragmas should be aligned to the affected statement by use of tabs after the initial "pragma". Lines should not exceed 80 to 100 characters. 2.2. Expressions There should be no space after the opening or before the closing bracket. There should be a single space before and after any operator except for prefix and postfix operators. Subexpressions should be enclosed in brackets and not rely on operator precedence for correct evaluation order. int i = (3 + x++) * 2; If there is a constant involved in a comparison the constant should be on the left side. if (0. == x) return false; The type of the controlling expression used in if statements or loops should be boolean. Pointers and integers should not implicitly compared to NULL or zero. if (NULL == foo_ptr) foo_ptr = init_foo_ptr(); 2.3. Statement groups. Opening curly brace is on the next line for functions and on the same line for if, for, while, and switch statements. In the latter case there should be an empty line afterwards. In case only a single statement follows an if, for, or while statement, the statement block can be omitted - but for if-else pairs only if it can be omitted on both sides. There should be no empty line before the closing bracket. if (0. == x) { ... } if (0. == x) y = 3; Statements should be grouped in logical blocks by a single empty line. In particular, declarations, definitions (with initialization) should be separated from other statements. Memory allocation and deallocation should usually be separated. Multiple declarators in a single declaration should be avoided. void compute_bar(complex float* bar) { complex float* foo = md_alloc(); compute_foo(foo); compute_bar_from_foo(bar, foo); md_free(foo); } 2.4. Comments: Comments should be used to document the API, explain complicated code and algorithms, and give required background information. Comments are not meant to explain things a competent programmer should know by reading the code. Good: // gram-schmidt algorithm for (...) { for (..) { ... } } Bad: // initialize foo int foo = 3; // config struct struct foo_conf_s conf = ... // allocate memory complex float* x = md_alloc(...); 2.5. Variable and Function Names Functions and variables names should be lower case and use '_' has separator of components. Names should be meaningful but not unnecessary long. If possible, use self-explanatory variable names. Except for loop indices, where one-letter variables names can be used. float norm = calc_frobenius_norm(image) This is preferable to adding comments: // calculate frobenous norm float n = clc_frbn(i); On the other hand, for often used functions a short name is appropriate. For example, we use md_fmac() instead of multidim_fused_multiply_accumulate() Locally used loop indices can be single character letters: i, j, k 2.6. Includes System headers should be included first, followed by headers from other modules, headers from the same module and finally the header belonging to the current file. Include guards should use the following style __NAME_H where NAME is the basename of the header file. libbart-devel/doc/tensorflow.txt000066400000000000000000000057311463460177700173270ustar00rootroot00000000000000 TensorFlow graphs can be wrapped in an nlop (non-linear operator) to be called in BART. TensorFlow v1 Graphs and TensorFlow v2 SavedModel are supported. In python/bart_tf.py, we provide wrapping functions to export TensorFlow graphs. 1. Tensor data types and shapes nlops work with single precision complex floats. From the TensorFlow side, we support the tf.complex64 and tf.float32 data types. Tensors with data type tf.float32 must have a 2 in the last dimension to stack real and imaginary part. BART will create an nlop ignoring this dimension. BART uses Fortran ordering for dimensions. Hence, the nlop will have flipped dimensions compared to the TensorFlow graph. 2. Naming Conventions The inputs of the TensorFlow graph should be named "input_0", "input_1", ... The outputs of the TensorFlow graph should be named "output_0", "output_1", ... TensorFlow v1 graphs are exported respecting the names for inputs/outputs which are assigned by the user. TensorFlow v2 SavedModels assign new names for the saved inputs/outputs, usually of the form: input_0 -> input_0 serving_default_input_0 output_0 -> StatefulPartitionedCall The names can be inspected using "saved_model_cli" tool provided by TensorFlow. To provide the mapping to BART, include a file named "bart_config.dat" in the SavedModel directory containing the mapping in the following structure: # ArgumentNameMapping serving_default input_0 serving_default_input_0 0 grad_ys_0 serving_default_grad_ys_0 0 grad_0_0 StatefulPartitionedCall 0 output_0 StatefulPartitionedCall 1 Here "serving_default" is the signature and the integer in the last column is the index of the operation. 3. Automatic Differentiation We provide three methods to use TensorFlow automatic differentiation in BART: 3.1 Backpropagation For each output o provide an input "grad_ys_o" and for each combination of outputs o and inputs i provide the gradient with name "grad_i_o". The shape of "grad_ys_o" must equal the shape of "output_o" and the shape of "grad_i_o" must equal the shape of "input_i" 3.2 Jacobian The forward path can directly output the complete Jacobian matrix. For this, all dimensions of the input and output should either equal or one must be one. We assume that the operator and hence the Jacobian is block diagonal. 3.2.1 Holomorphic Functions The output of the Jacobian should be named "jacobian_0_0" (only one input and one output is supported). Please note, that the Jacobian computed by TensorFlow (tape.jacobian) is the complex conjugate of the actual Jacobian! 3.2.2 Non-Holomorphic Functions For non-holomorphic functions, the Jacobian of the real-valued function can be provided with the name "jacobian_real_0_0". This Jacobian should have (TensorFlow)-dimensions [ ... , 2, 2 ] where the 2x2 matrix contains the real valued derivatives: [ ... , 0, 0 ]: d real / d real [ ... , 0, 1 ]: d real / d imag [ ... , 1, 0 ]: d imag / d real [ ... , 1, 1 ]: d imag / d imaglibbart-devel/doc/webasm.txt000066400000000000000000000076501463460177700164050ustar00rootroot00000000000000A short guide on how I compiled BART to WebAssembler: 1. Download (tested versions.): - emscripten (3.1.41) - OpenBLAS source (0.3.23) - libfftw3 source (3.3.10) - BlocksRuntime source (from git) - bart source 2. Prepare: - use emsdk to install and activate latest emscripten. 3. Compile OpenBLAS: - compiling this is a bit troublesome, for me it only works on a linux system and not on windows with wsl. - a few month ago OpenBLAS got support for emscripten so just use this make command to compile: make CC=emcc HOSTCC=gcc TARGET=RISCV64_GENERIC NOFORTRAN=1 USE_THREAD=0 - install the results outside of the standard places in /usr/ ! e.g. somewhere in your home directory make CC=emcc HOSTCC=gcc TARGET=RISCV64_GENERIC NOFORTRAN=1 USE_THREAD=0 PREFIX=$HOME/openblas install 4. Compile libfftw3 - again: set install prefix outside of /usr/ ! - enable float and disable fortran emconfigure ./configure --prefix=$HOME/fftw/ --disable-fortran --enable-float emmake make make install 5. Compile BlocksRuntime CC=emcc AR=emar RANLIB=emranlib ./buildlib - run the install script with sudo or: - open ./installlib in a texteditor and delete the lines 85 to 89. then run it without sudo sudo prefix=$HOME/blocksruntime ./installlib 6. Compile bart - create a Makefile.local in folder Makefiles - content should be this: """ FFTW_BASE=$HOME/fftw BLAS_BASE=$HOME/openblas CC=emcc CFLAGS+=-fblocks OPENBLAS=1 FORTRAN=0 FFTWTHREADS=0 USE_THREAD=1 SHARED=1 PNG=0 OMP=0 TENSORFLOW=0 DEBUG=0 MKL=0 ACML=0 CUDA=0 CUDNN=0 ISMRMRD=0 """ - don't add libBlocksRuntime.a to LDFLAGS! clang will crash. - then compile, no need for emmake just make: make - Troubleshoot: the compilation will fail if /usr/include is included (-I/usr//include). - Check if you have set all base directories outside of /usr or disabled them (e.g. ACML) 6. Include into your WebAssembler project - copy the bart.o file to where you want it - add it to the emcc call as an input file e.g.: emcc -O3 -Wall bart.o -s EXPORTED_FUNCTIONS="['__Block_object_dispose','_malloc','_free', \ '_calc_phantom', '_calc_bart', '_calc_circ', '_fftc','_ifftc', '_num_init', '_pha_opts_defaults', \ '_memcfl_create', '_load_cfl', '_main_ecalib', '_main_pics', '_main_phantom', '_main_fft']" \ -s ALLOW_MEMORY_GROWTH=1 -s MAXIMUM_MEMORY=4GB -o ./web/wwwroot/bart.js /home/ich/fftw/lib/libfftw3f.a \ /home/ich/openblas/lib/libopenblas.a /home/ich/blocksruntime/libBlocksRuntime.a - the script build_webasm.sh builds the files needed for the included web examples - important to include all libfftw3.a, libopenblas.a and also libBlocksRuntime.a (here clang doesn't crash) - all functions, variables you want to access in the js files have to be exported. - you definitely have to include '__Block_Object_dispose' even if you don't call this function in your js code! - to allocate and free memory include '_malloc', '_free' - In the example, given above, all functions needed for the small web example are exported, the second line contains names to call the bart functions directly, the third line uses the command line functions. - The "_" in front of the name is mandatory. The c function calc_phantom is called "_calc_phantom" in JS - MAXIMUM_MEMORY=4GB raises the amount of ram the browser is allowed to use, ALLOW_MEMORY_GROWTH=1 allows dynamic allocation. - if you want to call a js Function from c use -s EXPORTED_RUNTIME_METHODS="['addFunction', 'removeFunction']" -s RESERVED_FUNCTION_POINTERS=2 - Module.addFunction adds a callback and returns the c pointer. - set the reserved function pointers to the number of functions u need at the same time. - Results in two files: a.out.js and a.out.wasm (can be changed to anything with -o filename.js but the .js at the end is important) - Both have to be in the same directory on the web serverlibbart-devel/doxyconfig000066400000000000000000002342221463460177700157120ustar00rootroot00000000000000# Doxyfile 1.8.3.1 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" "). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or sequence of words) that should # identify the project. Note that if you do not use Doxywizard you need # to put quotes around the project name if it contains spaces. PROJECT_NAME = "Berkeley Advanced Reconstruction Toolbox (BART)" # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. #PROJECT_NUMBER = 0.3 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer # a quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = "" # With the PROJECT_LOGO tag one can specify an logo or icon that is # included in the documentation. The maximum height of the logo should not # exceed 55 pixels and the maximum width should not exceed 200 pixels. # Doxygen will copy the logo to the output directory. PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = doc/dx/ # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, # Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, # Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. Note that you specify absolute paths here, but also # relative paths, which will be relative from the directory where doxygen is # started. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful if your file system # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 4 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding # "class=itcl::class" will allow you to use the command class in the # itcl::class meaning. TCL_SUBST = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = YES # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given # extension. Doxygen has a built-in mapping, but you can override or extend it # using this tag. The format is ext=language, where ext is a file extension, # and language is one of the parsers supported by doxygen: IDL, Java, # Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, # C++. For instance to make doxygen treat .inc files as Fortran files (default # is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note # that for custom extensions you also need to set FILE_PATTERNS otherwise the # files are not read by doxygen. EXTENSION_MAPPING = # If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all # comments according to the Markdown format, which allows for more readable # documentation. See http://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you # can mix doxygen, HTML, and XML commands with Markdown formatting. # Disable only in case of backward compatibilities issues. MARKDOWN_SUPPORT = YES # When enabled doxygen tries to link words that correspond to documented classes, # or namespaces to their corresponding documentation. Such a link can be # prevented in individual cases by by putting a % sign in front of the word or # globally by setting AUTOLINK_SUPPORT to NO. AUTOLINK_SUPPORT = YES # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also makes the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate # getter and setter methods for a property. Setting this option to YES (the # default) will make doxygen replace the get and set methods by a property in # the documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and # unions are shown inside the group in which they are included (e.g. using # @ingroup) instead of on a separate page (for HTML and Man pages) or # section (for LaTeX and RTF). INLINE_GROUPED_CLASSES = NO # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and # unions with only public data fields will be shown inline in the documentation # of the scope in which they are defined (i.e. file, namespace, or group # documentation), provided this scope is documented. If set to NO (the default), # structs, classes, and unions are shown on a separate page (for HTML and Man # pages) or section (for LaTeX and RTF). INLINE_SIMPLE_STRUCTS = NO # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # The SYMBOL_CACHE_SIZE determines the size of the internal cache use to # determine which symbols to keep in memory and which to flush to disk. # When the cache is full, less often used symbols will be written to disk. # For small to medium size projects (<1000 input files) the default value is # probably good enough. For larger projects a too small cache size can cause # doxygen to be busy swapping symbols to and from disk most of the time # causing a significant performance penalty. # If the system has enough physical memory increasing the cache will improve the # performance by keeping more symbols in memory. Note that the value works on # a logarithmic scale so increasing the size by one will roughly double the # memory usage. The cache size is given by this formula: # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols. SYMBOL_CACHE_SIZE = 0 # Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be # set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given # their name and scope. Since this can be an expensive process and often the # same symbol appear multiple times in the code, doxygen keeps a cache of # pre-resolved symbols. If the cache is too small doxygen will become slower. # If the cache is too large, memory is wasted. The cache size is given by this # formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols. LOOKUP_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_PACKAGE tag is set to YES all members with package or internal # scope will be included in the documentation. EXTRACT_PACKAGE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespaces are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = NO # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen # will list include files with double quotes in the documentation # rather than with sharp brackets. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen # will sort the (brief and detailed) documentation of class members so that # constructors and destructors are listed first. If set to NO (the default) # the constructors will appear in the respective orders defined by # SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. # This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO # and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to # do proper type resolution of all parameters of a function it will reject a # match between the prototype and the implementation of a member function even # if there is only one candidate or it is obvious which candidate to choose # by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen # will still accept a match between prototype and implementation in such cases. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if section-label ... \endif # and \cond section-label ... \endcond blocks. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or macro consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and macros in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. # You can optionally specify a file name after the option, if omitted # DoxygenLayout.xml will be used as the name of the layout file. LAYOUT_FILE = # The CITE_BIB_FILES tag can be used to specify one or more bib files # containing the references data. This must be a list of .bib files. The # .bib extension is automatically appended if omitted. Using this command # requires the bibtex tool to be installed. See also # http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style # of the bibliography can be controlled using LATEX_BIB_STYLE. To use this # feature you need bibtex and perl available in the search path. Do not use # file names with spaces, bibtex cannot handle them. CITE_BIB_FILES = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # The WARN_NO_PARAMDOC option can be enabled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh # *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py # *.f90 *.f *.for *.vhd *.vhdl FILE_PATTERNS = *.c *.cc *.cxx *.cpp *.c++ *.cu *.h *.py *.m # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. # Note that relative paths are relative to the directory from which doxygen is # run. EXCLUDE = src/spirit-1.0 src/spirit-2.0 src/butterfly # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty or if # non of the patterns match the file name, INPUT_FILTER is applied. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) # and it is also possible to disable source filtering for a specific pattern # using *.ext= (so without naming a filter). This option only has effect when # FILTER_SOURCE_FILES is enabled. FILTER_SOURCE_PATTERNS = # If the USE_MD_FILE_AS_MAINPAGE tag refers to the name of a markdown file that # is part of the input, its contents will be placed on the main page (index.html). # This can be useful if you have a project on for instance GitHub and want reuse # the introduction page also for the doxygen output. USE_MDFILE_AS_MAINPAGE = #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C, C++ and Fortran comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = YES # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. Note that when using a custom header you are responsible # for the proper inclusion of any scripts and style sheets that doxygen # needs, which is dependent on the configuration options used. # It is advised to generate a default header using "doxygen -w html # header.html footer.html stylesheet.css YourConfigFile" and then modify # that header. Note that the header is subject to change so you typically # have to redo this when upgrading to a newer version of doxygen or when # changing the value of configuration settings such as GENERATE_TREEVIEW! HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If left blank doxygen will # generate a default style sheet. Note that it is recommended to use # HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this # tag will in the future become obsolete. HTML_STYLESHEET = # The HTML_EXTRA_STYLESHEET tag can be used to specify an additional # user-defined cascading style sheet that is included after the standard # style sheets created by doxygen. Using this option one can overrule # certain style aspects. This is preferred over using HTML_STYLESHEET # since it does not replace the standard style sheet and is therefor more # robust against future updates. Doxygen will copy the style sheet file to # the output directory. HTML_EXTRA_STYLESHEET = # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that # the files will be copied as-is; there are no commands or markers available. HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. # Doxygen will adjust the colors in the style sheet and background images # according to this color. Hue is specified as an angle on a colorwheel, # see http://en.wikipedia.org/wiki/Hue for more information. # For instance the value 0 represents red, 60 is yellow, 120 is green, # 180 is cyan, 240 is blue, 300 purple, and 360 is red again. # The allowed range is 0 to 359. HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of # the colors in the HTML output. For a value of 0 the output will use # grayscales only. A value of 255 will produce the most vivid colors. HTML_COLORSTYLE_SAT = 100 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to # the luminance component of the colors in the HTML output. Values below # 100 gradually make the output lighter, whereas values above 100 make # the output darker. The value divided by 100 is the actual gamma applied, # so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, # and 100 does not change the gamma. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting # this to NO can help when comparing the output of multiple runs. HTML_TIMESTAMP = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. HTML_DYNAMIC_SECTIONS = NO # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of # entries shown in the various tree structured indices initially; the user # can expand and collapse entries dynamically later on. Doxygen will expand # the tree to such a level that at most the specified number of entries are # visible (unless a fully collapsed tree already exceeds this amount). # So setting the number of entries 1 will produce a full collapsed tree by # default. 0 is a special value representing an infinite number of entries # and will result in a full expanded tree by default. HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html # for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely # identify the documentation publisher. This should be a reverse domain-name # style string, e.g. com.mycompany.MyDocSet.documentation. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated # that can be used as input for Qt's qhelpgenerator to generate a # Qt Compressed Help (.qch) of the generated HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to # add. For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see # # Qt Help Project / Custom Filters. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's # filter section matches. # # Qt Help Project / Filter Attributes. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files # will be generated, which together with the HTML files, form an Eclipse help # plugin. To install this plugin and make it available under the help contents # menu in Eclipse, the contents of the directory containing the HTML and XML # files needs to be copied into the plugins directory of eclipse. The name of # the directory within the plugins directory should be the same as # the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before # the help appears. GENERATE_ECLIPSEHELP = NO # A unique identifier for the eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have # this name. ECLIPSE_DOC_ID = org.doxygen.Project # The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) # at top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. Since the tabs have the same information as the # navigation tree you can set this option to NO if you already set # GENERATE_TREEVIEW to YES. DISABLE_INDEX = NO # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to YES, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). # Windows users are probably better off using the HTML help feature. # Since the tree basically has the same information as the tab index you # could consider to set DISABLE_INDEX to NO when enabling this option. GENERATE_TREEVIEW = NO # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values # (range [0,1..20]) that doxygen will group on one line in the generated HTML # documentation. Note that a value of 0 will completely suppress the enum # values from appearing in the overview section. ENUM_VALUES_PER_LINE = 4 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open # links to external symbols imported via tag files in a separate window. EXT_LINKS_IN_WINDOW = NO # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are # not supported properly for IE 6.0, but are supported on all modern browsers. # Note that when changing this option you need to delete any form_*.png files # in the HTML output before the changes have effect. FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax # (see http://www.mathjax.org) which uses client side Javascript for the # rendering instead of using prerendered bitmaps. Use this if you do not # have LaTeX installed or if you want to formulas look prettier in the HTML # output. When enabled you may also need to install MathJax separately and # configure the path to it using the MATHJAX_RELPATH option. USE_MATHJAX = NO # When MathJax is enabled you can set the default output format to be used for # thA MathJax output. Supported types are HTML-CSS, NativeMML (i.e. MathML) and # SVG. The default value is HTML-CSS, which is slower, but has the best # compatibility. MATHJAX_FORMAT = HTML-CSS # When MathJax is enabled you need to specify the location relative to the # HTML output directory using the MATHJAX_RELPATH option. The destination # directory should contain the MathJax.js script. For instance, if the mathjax # directory is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to # the MathJax Content Delivery Network so you can quickly see the result without # installing MathJax. # However, it is strongly recommended to install a local # copy of MathJax from http://www.mathjax.org before deployment. MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest # The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension # names that should be enabled during MathJax rendering. MATHJAX_EXTENSIONS = # When the SEARCHENGINE tag is enabled doxygen will generate a search box # for the HTML output. The underlying search engine uses javascript # and DHTML and should work on any modern browser. Note that when using # HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets # (GENERATE_DOCSET) there is already a search function so this one should # typically be disabled. For large projects the javascript based search engine # can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. SEARCHENGINE = YES # When the SERVER_BASED_SEARCH tag is enabled the search engine will be # implemented using a web server instead of a web client using Javascript. # There are two flavours of web server based search depending on the # EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for # searching and an index file used by the script. When EXTERNAL_SEARCH is # enabled the indexing and searching needs to be provided by external tools. # See the manual for details. SERVER_BASED_SEARCH = NO # When EXTERNAL_SEARCH is enabled doxygen will no longer generate the PHP # script for searching. Instead the search results are written to an XML file # which needs to be processed by an external indexer. Doxygen will invoke an # external search engine pointed to by the SEARCHENGINE_URL option to obtain # the search results. Doxygen ships with an example indexer (doxyindexer) and # search engine (doxysearch.cgi) which are based on the open source search engine # library Xapian. See the manual for configuration details. EXTERNAL_SEARCH = NO # The SEARCHENGINE_URL should point to a search engine hosted by a web server # which will returned the search results when EXTERNAL_SEARCH is enabled. # Doxygen ships with an example search engine (doxysearch) which is based on # the open source search engine library Xapian. See the manual for configuration # details. SEARCHENGINE_URL = # When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed # search data is written to a file for indexing by an external tool. With the # SEARCHDATA_FILE tag the name of this file can be specified. SEARCHDATA_FILE = searchdata.xml # When SERVER_BASED_SEARCH AND EXTERNAL_SEARCH are both enabled the # EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is # useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple # projects and redirect the results back to the right project. EXTERNAL_SEARCH_ID = # The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen # projects other than the one defined by this configuration file, but that are # all added to the same external search index. Each project needs to have a # unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id # of to a relative location where the documentation can be found. # The format is: EXTRA_SEARCH_MAPPINGS = id1=loc1 id2=loc2 ... EXTRA_SEARCH_MAPPINGS = #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = YES # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. # Note that when enabling USE_PDFLATEX this option is only used for # generating bitmaps for formulas in the HTML output, but not in the # Makefile that is written to the output directory. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4 # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for # the generated latex document. The footer should contain everything after # the last chapter. If it is left blank doxygen will generate a # standard footer. Notice: only use this tag if you know what you are doing! LATEX_FOOTER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include # source code with syntax highlighting in the LaTeX output. # Note that which sources are shown also depends on other settings # such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO # The LATEX_BIB_STYLE tag can be used to specify the style to use for the # bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See # http://en.wikipedia.org/wiki/BibTeX for more info. LATEX_BIB_STYLE = plain #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load style sheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # pointed to by INCLUDE_PATH will be searched when a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition that # overrules the definition found in the source code. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all references to function-like macros # that are alone on a line, have an all uppercase name, and do not end with a # semicolon, because these will confuse the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. For each # tag file the location of the external documentation should be added. The # format of a tag file without this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths # or URLs. Note that each tag file must have a unique name (where the name does # NOT include the path). If a tag file is not located in the directory in which # doxygen is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option also works with HAVE_DOT disabled, but it is recommended to # install and use dot, since it yields more powerful graphs. CLASS_DIAGRAMS = YES # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # The DOT_NUM_THREADS specifies the number of dot invocations doxygen is # allowed to run in parallel. When set to 0 (the default) doxygen will # base this on the number of processors available in the system. You can set it # explicitly to a value larger than 0 to get control over the balance # between CPU load and processing speed. DOT_NUM_THREADS = 0 # By default doxygen will use the Helvetica font for all dot files that # doxygen generates. When you want a differently looking font you can specify # the font name using DOT_FONTNAME. You need to make sure dot is able to find # the font, which can be done by putting it in a standard location or by setting # the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the # directory containing the font. DOT_FONTNAME = Helvetica # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the Helvetica font. # If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to # set the path where dot can find it. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If the UML_LOOK tag is enabled, the fields and methods are shown inside # the class node. If there are many fields or methods and many nodes the # graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS # threshold limits the number of items for each type to make the size more # managable. Set this to 0 for no limit. Note that the threshold may be # exceeded by 50% before the limit is enforced. UML_LIMIT_NUM_FIELDS = 10 # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will generate a graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are svg, png, jpg, or gif. # If left blank png will be used. If you choose svg you need to set # HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible in IE 9+ (other browsers do not have this requirement). DOT_IMAGE_FORMAT = png # If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to # enable generation of interactive SVG images that allow zooming and panning. # Note that this requires a modern browser other than Internet Explorer. # Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you # need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible. Older versions of IE do not have SVG support. INTERACTIVE_SVG = NO # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MSCFILE_DIRS tag can be used to specify one or more directories that # contain msc files that are included in the documentation (see the # \mscfile command). MSCFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES libbart-devel/genctags000077500000000000000000000001251463460177700153300ustar00rootroot00000000000000#!/bin/bash ctags --langmap=c++:+.cu --extra=+f `find . -regex '.*\.[ch]u*' -print` libbart-devel/git-version.sh000077500000000000000000000006131463460177700164160ustar00rootroot00000000000000#!/bin/bash if test -d ${GIT_DIR:-.git} -o -f .git then GDOUT=`git describe --abbrev=7 --match "v*" --dirty 2>&1` if [[ $? -eq 0 ]]; then echo ${GDOUT} git describe --abbrev=7 --match "v*" | cut -f1 -d'-' > version.txt else if git diff --quiet --exit-code then cat version.txt else var=`cat version.txt` echo ${var}-dirty fi fi else cat version.txt fi libbart-devel/lib/000077500000000000000000000000001463460177700143575ustar00rootroot00000000000000libbart-devel/lib/.gitignore000066400000000000000000000001071463460177700163450ustar00rootroot00000000000000# Ignore everything in this directory * # Except this file !.gitignore libbart-devel/makedoc.sh000077500000000000000000000001271463460177700155530ustar00rootroot00000000000000#!/bin/bash ( cat doxyconfig ; echo "PROJECT_NUMBER=$(cat version.txt)" ) | doxygen - libbart-devel/matlab/000077500000000000000000000000001463460177700150515ustar00rootroot00000000000000libbart-devel/matlab/bart.m000066400000000000000000000075701463460177700161700ustar00rootroot00000000000000function [varargout] = bart(cmd, varargin) % BART Call BART command from Matlab. % [varargout] = BART(cmd, varargin) to run given bart command (cmd) using the % data arrays/matrices passed as varargin. % % [A, B] = BART('command', X, Y) call command with inputs X Y and outputs A B % % To output a list of available bart commands simply run "bart". To % output the help for a specific bart command type "bart command -h". % % Parameters: % cmd: Command to run as string (including non data parameters) % varargin: Data arrays/matrices used as input % % Example: % bart traj -h % [reco] = bart('nufft -i traj', data) call nufft with inputs data and outputs reco % % Authors: % 2014-2016 Martin Uecker % 2018 (Edited for WSL) Soumick Chatterjee % 2020 Martin Krämer % 2022 Jon Tamir % Check input variables if nargin==0 || isempty(cmd) fprintf('Usage: bart \n\n'); cmd = ''; end % Check bart toolbox path [bart_path, isWSL] = get_bart_path(); if isempty(bart_path) error('BART path not detected.'); end % Clear the LD_LIBRARY_PATH environment variable (to work around a bug in Matlab). % Store original library path to be restored later. if ismac==1 libPath = getenv('DYLD_LIBRARY_PATH'); setenv('DYLD_LIBRARY_PATH', ''); else libPath = getenv('LD_LIBRARY_PATH'); setenv('LD_LIBRARY_PATH', ''); end % Strip string arguments that were passed as varargin strArgsInd = cellfun(@ischar,varargin); strArgs = varargin(strArgsInd); dataArgs = varargin(~strArgsInd); if (~isempty(strArgs)) % append to cmd cmd = sprintf('%s %s', cmd, sprintf('%s ', strArgs{:})); cmd(end) = []; end % Root path for temporary file name = tempname; % Files used for input in = cell(1, length(dataArgs)); for iFile = 1:length(dataArgs) in{iFile} = strcat(name, 'in', num2str(iFile)); writecfl(in{iFile}, dataArgs{iFile}); end in_str = sprintf(' %s', in{:}); % Files used for output out = cell(1, nargout); for iFile = 1:nargout out{iFile} = strcat(name, 'out', num2str(iFile)); end out_str = sprintf(' %s', out{:}); % Run bart if ispc % running windows? if isWSL % For WSL and modify paths in_strWSL = wslPathCorrection(in_str); out_strWSL = wslPathCorrection(out_str); final_strWSL = ['wsl ', bart_path, '/bart ', cmd, ' ', in_strWSL, ' ', out_strWSL]; ERR = system(final_strWSL); else % For cygwin use bash and modify paths ERR = system(['bash.exe --login -c ', ... strrep(bart_path, filesep, '/'), ... '"', '/bart ', strrep(cmd, filesep, '/'), ' ', ... strrep(in_str, filesep, '/'), ... ' ', strrep(out_str, filesep, '/'), '"']); end else ERR = system([bart_path, '/bart ', cmd, ' ', in_str, ' ', out_str]); end % Remove input files for iFile = 1:length(in) if (exist(strcat(in{iFile}, '.cfl'),'file')) delete(strcat(in{iFile}, '.cfl')); end if (exist(strcat(in{iFile}, '.hdr'),'file')) delete(strcat(in{iFile}, '.hdr')); end end % Remove output files for iFile = 1:length(out) if ERR == 0 varargout{iFile} = readcfl(out{iFile}); end if (exist(strcat(out{iFile}, '.cfl'),'file')) delete(strcat(out{iFile}, '.cfl')); end if (exist(strcat(out{iFile}, '.hdr'),'file')) delete(strcat(out{iFile}, '.hdr')); end end % Restore Library Path to it's original value if (~isempty(libPath)) if ismac==1 setenv('DYLD_LIBRARY_PATH', libPath); else setenv('LD_LIBRARY_PATH', libPath); end end % Check if running BART was successful if (ERR~=0) && (~isempty(cmd)) error('command exited with an error'); end end libbart-devel/matlab/get_bart_path.m000066400000000000000000000015661463460177700200420ustar00rootroot00000000000000function [bart_path, isWSL] = get_bart_path() % BART get BART for Matlab. % [bart_path, isWSL] = get_bart_path() will return the bart path as seen by Matlab, % and optionally will return whether WSL was detected. % % Authors: % 2022 Jon Tamir % Check bart toolbox path bart_path = getenv('BART_TOOLBOX_PATH'); isWSL = false; if isempty(bart_path) if ~isempty(getenv('TOOLBOX_PATH')) bart_path = getenv('TOOLBOX_PATH'); elseif exist('/usr/local/bin/bart', 'file') bart_path = '/usr/local/bin'; elseif exist('/usr/bin/bart', 'file') bart_path = '/usr/bin'; else % Try to execute bart inside wsl, if it works, then it returns status 0 [bartstatus, ~] = system('wsl bart version -V'); if bartstatus==0 [~, bart_path] = system('wsl dirname $(which bart)'); bart_path = strip(bart_path); isWSL = true; end end end end libbart-devel/matlab/readcfl.m000066400000000000000000000022341463460177700166300ustar00rootroot00000000000000function data = readcfl(filenameBase) %READCFL Read complex data from file. % READCFL(filenameBase) read in reconstruction data stored in filenameBase.cfl % (complex float) based on dimensions stored in filenameBase.hdr. % % Parameters: % filenameBase: path and filename of cfl file (without extension) % % Written to edit data with the Berkeley Advanced Reconstruction Toolbox (BART). % % Copyright 2016. CBClab, Maastricht University. % 2016 Tim Loderhose (t.loderhose@student.maastrichtuniversity.nl) dims = readReconHeader(filenameBase); filename = strcat(filenameBase,'.cfl'); fid = fopen(filename); data_r_i = fread(fid, prod([2 dims]), '*float32'); data_r_i = reshape(data_r_i, [2 dims]); data = complex(zeros(dims,'single'),0); data(:) = complex(data_r_i(1,:),data_r_i(2,:)); fclose(fid); end function dims = readReconHeader(filenameBase) filename = strcat(filenameBase,'.hdr'); fid = fopen(filename); line = getNextLine(fid); dims = str2num(line); fclose(fid); end function line = getNextLine(fid) line = fgetl(fid); while(line(1) == '#') line = fgetl(fid); end end libbart-devel/matlab/test_bart.m000066400000000000000000000072631463460177700172260ustar00rootroot00000000000000function test_bart() %TEST_BART Runs a unit test for the MATLAB BART wrapper. % TEST_BART() can be used to test if the BART toolbox is properly set-up % and if changes/additions made to the MATLAB wrapper break any core % functionality of the MATLAB wrapper. % % Copyright 2020. Martin Krämer (Martin.Kraemer@uni-jena.de) % 2022 Jon Tamir %% Test setup testLog = []; capture = false; tolFloat = 1e-7; %% Test1: Environmental variable bartPath = get_bart_path() testAssert(~isempty(bartPath), 'BART path'); %% Test2: Write/Read cfl file = tempname; data = rand(32,24,16); testRun('writecfl(file, data)','Write cfl file'); dataRead = testRun('readcfl(file)','Read cfl file', 1); testAssert(~any(reshape(abs(data-dataRead{1}),[],1) > tolFloat), 'Data consistency cfl file'); if (exist(strcat(file,'.cfl'),'file')) delete(strcat(file,'.cfl')) end %% Test3: Run bart with various parameters testRun('bart', 'Wrapper (without parameter)'); testRun('bart traj -h', 'Wrapper (method help)'); phantom = testRun("bart('phantom')", "Wrapper (No input, no parameter)", 1); testAssert(~isempty(phantom{1}), "Wrapper (No input, no parameter) - check output"); phantom = testRun("bart('phantom -3')", "Wrapper (No input)", 1); testAssert(~isempty(phantom{1}), "Wrapper (No input) - check output)"); phantom_kSpace = testRun("bart('fft -u 3', varargin{1})", "Wrapper (One input, one parameter)", 1, phantom{1}); testAssert(~isempty(phantom_kSpace{1}), "Wrapper (One input, one parameter) - check output)"); %% Check final test score failCount = sum(cellfun(@(x)(~x),testLog(:,2))); if (failCount == 0) fprintf('\nTEST Result: All Tests Passed!\n\n'); else fprintf(2, '\nTEST Result: %i Tests Failed!\n\n', failCount); end %% Helper functions function [Result] = testRun(Command, Name, OutCount, varargin) if (nargin < 3) OutCount = []; Result = []; end fprintf('TEST [%s] - running "%s" ', Name, Command); status = false; try % when not printing to console (capture = true) we use evalc, % otherwise eval is used if (capture) fprintf('\n'); if (isempty(OutCount)) eval(Command); else % to actually capture and return the output we have % pre initialize the results cell array with the % pre-defined number of outputs to capture Result = cell(OutCount); [Result{:}] = eval(Command); end else if (isempty(OutCount)) evalc(Command); else Result = cell(OutCount); [~, Result{:}] = evalc(Command); end end status = true; catch end testLog = cat(1, testLog, {Name, status}); fprintf(2 - status, '>> %s\n', testStatusToString(status)); end function testAssert(Condition, Name) fprintf('TEST [%s] ', Name); testLog = cat(1, testLog, {Name, Condition}); fprintf(2 - Condition, '>> %s\n', testStatusToString(Condition)); end function [StatusString] = testStatusToString(Status) if (Status) StatusString = 'Passed'; else StatusString = 'Failed'; end end function printLog(log) for iLog = 1:size(log,1) fprintf('%s: %s\n', log{iLog, 1}, log{iLog, 2}); end end end libbart-devel/matlab/writecfl.m000066400000000000000000000023771463460177700170570ustar00rootroot00000000000000function writecfl(filenameBase,data) %WRITECFL Write complex data to file. % WRITECFL(filenameBase, data) writes reconstruction data to % filenameBase.cfl (complex float) and its dimensions to filenameBase.hdr. % % Written to edit data with the Berkeley Advanced Reconstruction Toolbox (BART). % % Parameters: % filenameBase: path and filename of cfl file (without extension) % data: array/matrix to be written % % Copyright 2013. Joseph Y Cheng. % Copyright 2016. CBClab, Maastricht University. % 2012 Joseph Y Cheng (jycheng@mrsrl.stanford.edu). % 2016 Tim Loderhose (t.loderhose@student.maastrichtuniversity.nl). dims = size(data); writeReconHeader(filenameBase,dims); filename = strcat(filenameBase,'.cfl'); fid = fopen(filename,'w'); data = data(:); fwrite(fid,[real(data)'; imag(data)'],'float32'); fclose(fid); end function writeReconHeader(filenameBase,dims) filename = strcat(filenameBase,'.hdr'); fid = fopen(filename,'w'); fprintf(fid,'# Dimensions\n'); for N=1:length(dims) fprintf(fid,'%d ',dims(N)); end if length(dims) < 5 for N=1:(5-length(dims)) fprintf(fid,'1 '); end end fprintf(fid,'\n'); fclose(fid); end libbart-devel/matlab/wslPathCorrection.m000066400000000000000000000007161463460177700207050ustar00rootroot00000000000000%Soumick Chatterjee function [outData] = wslPathCorrection(inData) outData=inData; for i = 'a':'z' %Replace drive letters with /mnt/ outData=strrep(outData,[i,':'],['/mnt/',i]); %if drive letter is supplied in lowercase outData=strrep(outData,[upper(i),':'],['/mnt/',i]); %if drive letter is supplied as uppercase end outData = strrep(outData, '\', '/'); %Change windows filesep to linux filesep end libbart-devel/msys_setup.sh000066400000000000000000000007731463460177700163670ustar00rootroot00000000000000#!/bin/bash pacman --sync --noconfirm --needed base-devel mingw-w64-x86_64-toolchain git mingw-w64-x86_64-fftw mingw-w64-x86_64-openblas mingw-w64-x86_64-libpng echo "Installing /usr/lib/librt.a" CURRENT_PATH=$(pwd) cd / curl https://repo.msys2.org/msys/x86_64/msys2-runtime-devel-3.2.0-3-x86_64.pkg.tar.zst | tar -I zstd -x usr/lib/librt.a cd $CURRENT_PATH GCC_PATH="/mingw64/bin" if [ -d "$GCC_PATH" ] && [[ ":$PATH:" != *":$GCC_PATH:"* ]]; then echo "export PATH=$GCC_PATH:\$PATH" >> ~/.bashrc fi libbart-devel/pkg/000077500000000000000000000000001463460177700143725ustar00rootroot00000000000000libbart-devel/pkg/rpm/000077500000000000000000000000001463460177700151705ustar00rootroot00000000000000libbart-devel/pkg/rpm/bart.spec000066400000000000000000000043661463460177700170050ustar00rootroot00000000000000Name: bart Version: {{{ bart_version }}} Release: {{{ bart_release }}}%{?dist} Epoch: 1 Summary: tools for computational magnetic resonance imaging License: BSD URL: https://mrirecon.github.io/bart VCS: {{{ git_repo_vcs }}} Source0: {{{ git_archive path=. source_name=bart dir_name=bart }}} %if 0%{?rhel} == 07 BuildRequires: fftw-devel, lapack-devel, openblas-devel, atlas-devel, libpng-devel, devtoolset-7-toolchain, devtoolset-7-libatomic-devel %else BuildRequires: gcc, make, fftw-devel, lapack-devel, openblas-devel, atlas-devel, libpng-devel %endif Requires: fftw, lapack, openblas, atlas, libpng %description The Berkeley Advanced Reconstruction Toolbox (BART) is a free and open-source image-reconstruction framework for Computational Magnetic Resonance Imaging. It consists of a programming library and a toolbox of command-line programs. The library provides common operations on multi-dimensional arrays, Fourier and wavelet transforms, as well as generic implementations of iterative optimization algorithms. The command-line tools provide direct access to basic operations on multi-dimensional arrays as well as efficient implementations of many calibration and reconstruction algorithms for parallel imaging and compressed sensing. # I suppose the binary shouldn't contain debug symbols by default %global debug_package %{nil} %prep {{{ git_setup_macro dir_name=bart }}} # transfer .git-version information from rpkg-macro-expansion time to build time echo {{{ bart_git_version }}} > version.txt %build %if 0%{?rhel} == 07 . /opt/rh/devtoolset-7/enable %endif export LDFLAGS="$LDFLAGS -Wl,--no-as-needed" make PARALLEL=1 make doc/commands.txt %install rm -rf $RPM_BUILD_ROOT export make PREFIX=usr DESTDIR=%{buildroot} install mkdir -p %{buildroot}/usr/share/bash-completion/completions/ install scripts/bart_completion.sh %{buildroot}/usr/share/bash-completion/completions/ install -D doc/bart.1 %{buildroot}/%{_mandir}/man1/bart.1 %files /usr/bin/bart %license LICENSE %{_mandir}/man1/bart.1* %doc /usr/share/doc/bart/README /usr/share/doc/bart/commands.txt {{{ bart_static_docs }}} /usr/share/bash-completion/completions/bart_completion.sh %changelog {{{ git_dir_changelog }}} libbart-devel/pkg/rpm/libbart-dev.install000066400000000000000000000022031463460177700207500ustar00rootroot00000000000000lib/libmisc.a usr/lib/bart src/misc/debug.h usr/include/bart/misc/ src/misc/mmio.h usr/include/bart/misc/ src/misc/version.h usr/include/bart/misc/ src/misc/cppwrap.h usr/include/bart/misc/ src/misc/misc.h usr/include/bart/misc/ src/misc/types.h usr/include/bart/misc/ src/misc/png.h usr/include/bart/misc/ src/misc/opts.h usr/include/bart/misc/ src/misc/nested.h usr/include/bart/misc/ lib/libnum.a usr/lib/bart src/num/multind.h usr/include/bart/num/ src/num/flpmath.h usr/include/bart/num/ src/num/fft.h usr/include/bart/num/ src/num/init.h usr/include/bart/num/ src/num/iovec.h usr/include/bart/num/ src/num/ops.h usr/include/bart/num/ lib/liblinops.a usr/lib/bart src/linops/linop.h usr/include/bart/linops/ src/linops/someops.h usr/include/bart/linops/ src/linops/grad.h usr/include/bart/linops/ lib/libiter.a usr/lib/bart src/iter/iter.h usr/include/bart/iter/ src/iter/iter2.h usr/include/bart/iter/ src/iter/prox.h usr/include/bart/iter/ src/iter/thresh.h usr/include/bart/iter/ src/iter/lsqr.h usr/include/bart/iter/ lib/libwavelet.a usr/lib/bart src/wavelet/wavelet.h usr/include/bart/wavelet/ src/wavelet/wavthresh.h usr/include/bart/wavelet/ libbart-devel/pkg/rpm/libbart-devel.spec000066400000000000000000000024331463460177700205620ustar00rootroot00000000000000Name: libbart-devel Version: {{{ bart_version }}} Release: {{{ bart_release }}}%{?dist} Epoch: 1 Summary: Development files for BART License: BSD URL: https://mrirecon.github.io/bart VCS: {{{ git_dir_vcs }}} Source0: {{{ git_archive path=. source_name=libbart-devel dir_name=libbart-devel }}} BuildRequires: gcc, make, fftw-devel, lapack-devel, openblas-devel, atlas-devel, libpng-devel %description The Berkeley Advanced Reconstruction Toolbox (BART) is a free and open-source image-reconstruction framework for Computational Magnetic Resonance Imaging. This package provides headers and static libraries. # I suppose the binary shouldn't contain debug symbols by default %global debug_package %{nil} %prep {{{ git_setup_macro dir_name=libbart-devel }}} %build export LDFLAGS="$LDFLAGS -Wl,--no-as-needed" make PARALLEL=1 %install rm -rf $RPM_BUILD_ROOT while read line; do src=$(cut -d' ' -f1 <<<"$line") dst=%{buildroot}/$(cut -d' ' -f2 <<<"$line") install -d "$dst" install "$src" "$dst" done < pkg/rpm/libbart-dev.install # ^ Contents of https://salsa.debian.org/med-team/bart/-/blob/master/debian/libbart-dev.install %files /usr/include/bart/ /usr/lib/bart/ %license LICENSE %changelog {{{ git_dir_changelog }}} libbart-devel/pkg/rpm/octave-bart.spec000066400000000000000000000031011463460177700202460ustar00rootroot00000000000000%global octpkg bart Name: octave-%{octpkg} Version: {{{ bart_version }}} Release: {{{ bart_release }}}%{?dist} Epoch: 1 Summary: Octave bindings for BART License: BSD URL: https://mrirecon.github.io/bart VCS: {{{ git_dir_vcs }}} Source0: {{{ git_archive path=. source_name=octave-bart dir_name=octave-bart }}} BuildArch: noarch BuildRequires: octave-devel Requires: bart, octave Requires(post): octave Requires(postun): octave %description The Berkeley Advanced Reconstruction Toolbox (BART) is a free and open-source image-reconstruction framework for Computational Magnetic Resonance Imaging. This package provides Octave bindings for BART. %prep {{{ git_setup_macro dir_name=octave-bart }}} # files that belong inside an octave pkg according to https://octave.org/doc/v4.4.0/Creating-Packages.html mkdir matlab/inst mv matlab/*.m matlab/inst cp LICENSE matlab/COPYING cat > matlab/DESCRIPTION < /dev/null 2>&1 if [[ $? -ne 0 ]]; then release=$(($(./git-version.sh | cut -d'-' -f2) + 1)) date=$(date '+%Y%m%d') commit=$(git rev-parse --short=7 HEAD) release=$release"."$date"git"$commit else release=1 fi echo -n $release } function bart_git_version() { ./git-version.sh | tr -d '\n' } function bart_static_docs() { find doc/ -type f -name "*.txt" | sed "s/^doc\/\(.*\)\$/\/usr\/share\/doc\/bart\/\1/g" } libbart-devel/python/000077500000000000000000000000001463460177700151325ustar00rootroot00000000000000libbart-devel/python/bart.py000066400000000000000000000120311463460177700164310ustar00rootroot00000000000000# Copyright 2016. The Regents of the University of California. # All rights reserved. Use of this source code is governed by # a BSD-style license which can be found in the LICENSE file. # # Authors: # 2016 Siddharth Iyer # 2018 Soumick Chatterjee , WSL Support import subprocess as sp import tempfile as tmp import cfl import os from wslsupport import PathCorrection def bart(nargout, cmd, *args, **kwargs): if type(nargout) != int or nargout < 0: print("Usage: bart(, , )") return try: bart_path = os.environ['BART_TOOLBOX_PATH'] except: bart_path = None # support old environment variable: if bart_path is None: try: bart_path = os.environ['TOOLBOX_PATH'] except: bart_path = None isWSL = False if not bart_path: if os.path.isfile('/usr/local/bin/bart'): bart_path = '/usr/local/bin' elif os.path.isfile('/usr/bin/bart'): bart_path = '/usr/bin' else: bartstatus = os.system('wsl bart version -V') if bartstatus==0: bart_path = '/usr/bin' isWSL = True else: raise Exception('Environment variable BART_TOOLBOX_PATH is not set.') name = tmp.NamedTemporaryFile().name nargin = len(args) infiles = [name + 'in' + str(idx) for idx in range(nargin)] for idx in range(nargin): cfl.writecfl(infiles[idx], args[idx]) args_kw = [("--" if len(kw)>1 else "-") + kw for kw in kwargs] infiles_kw = [name + 'in' + kw for kw in kwargs] for idx, kw in enumerate(kwargs): cfl.writecfl(infiles_kw[idx], kwargs[kw]) outfiles = [name + 'out' + str(idx) for idx in range(nargout)] cmd = cmd.split(" ") if os.name =='nt': if isWSL: #For WSL and modify paths infiles = [PathCorrection(item) for item in infiles] infiles_kw = [PathCorrection(item) for item in infiles_kw] outfiles = [PathCorrection(item) for item in outfiles] cmd = [PathCorrection(item) for item in cmd] args_infiles_kw = [item for pair in zip(args_kw, infiles_kw) for item in pair] shell_cmd = ['wsl', 'bart', *cmd, *args_infiles_kw, *infiles, *outfiles] else: #For cygwin use bash and modify paths infiles = [item.replace(os.path.sep, '/') for item in infiles] infiles_kw = [item.replace(os.path.sep, '/') for item in infiles_kw] outfiles = [item.replace(os.path.sep, '/') for item in outfiles] cmd = [item.replace(os.path.sep, '/') for item in cmd] args_infiles_kw = [item for pair in zip(args_kw, infiles_kw) for item in pair] shell_cmd = ['bash.exe', '--login', '-c', os.path.join(bart_path, 'bart'), *cmd, *args_infiles_kw, *infiles, *outfiles] #TODO: Test with cygwin, this is just translation from matlab code else: args_infiles_kw = [item for pair in zip(args_kw, infiles_kw) for item in pair] shell_cmd = [os.path.join(bart_path, 'bart'), *cmd, *args_infiles_kw, *infiles, *outfiles] # run bart command ERR, stdout, stderr = execute_cmd(shell_cmd) # store error code, stdout and stderr in function attributes for outside access # this makes it possible to access these variables from outside the function (e.g "print(bart.ERR)") bart.ERR, bart.stdout, bart.stderr = ERR, stdout, stderr for elm in infiles: if os.path.isfile(elm + '.cfl'): os.remove(elm + '.cfl') if os.path.isfile(elm + '.hdr'): os.remove(elm + '.hdr') for elm in infiles_kw: if os.path.isfile(elm + '.cfl'): os.remove(elm + '.cfl') if os.path.isfile(elm + '.hdr'): os.remove(elm + '.hdr') output = [] for idx in range(nargout): elm = outfiles[idx] if not ERR: output.append(cfl.readcfl(elm)) if os.path.isfile(elm + '.cfl'): os.remove(elm + '.cfl') if os.path.isfile(elm + '.hdr'): os.remove(elm + '.hdr') if ERR: print(f"Command exited with error code {ERR}.") return if nargout == 0: return elif nargout == 1: return output[0] else: return output def execute_cmd(cmd): """ Execute a command in a shell. Print and catch the output. """ errcode = 0 stdout = "" stderr = "" # remove empty strings from cmd cmd = [item for item in cmd if len(item)] # execute cmd proc = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE, universal_newlines=True) # print to stdout for stdout_line in iter(proc.stdout.readline, ""): stdout += stdout_line print(stdout_line, end="") proc.stdout.close() # in case of error, print to stderr errcode = proc.wait() if errcode: stderr = "".join(proc.stderr.readlines()) print(stderr) proc.stderr.close() return errcode, stdout, stderr libbart-devel/python/bart_tf.py000066400000000000000000000241111463460177700171240ustar00rootroot00000000000000# Copyright 2022. Uecker Lab. University Center Göttingen. # All rights reserved. Use of this source code is governed by # a BSD-style license which can be found in the LICENSE file. # # Authors: # Moritz Blumenthal import os import numpy as np import cfl import tensorflow as tf2 try: import tensorflow.compat.v1 as tf1 except ImportError: import tensorflow as tf1 pass def tf2_export_module(model, dims, path, trace_complex=True): class BartWrapper(tf2.Module): def __init__(self, model, dims, vars_as_input = True, name=None): super(BartWrapper, self).__init__(name=name) self.model = model self.trace_complex = trace_complex self.dims_bart = [1] * 16 self.dims_tf = [1] * (len(dims) + 1) if not(trace_complex): self.dims_bart = self.dims_bart + [2] self.dims_tf = self.dims_tf + [2] for i in range(len(dims)): self.dims_bart[len(self.dims_bart) - 2 - i] = dims[i] self.dims_tf[len(self.dims_tf) - 2 - i] = dims[i] self.model(np.zeros(self.dims_tf, np.float32)) #run model ones to initialize weights else : for i in range(len(dims)): self.dims_bart[len(self.dims_bart) - 2 - i] = dims[i] self.dims_tf[len(self.dims_tf) - 2 - i] = dims[i] self.model(np.zeros(self.dims_tf, np.complex64)) #run model ones to initialize weights self.dims_tf[0] = -1 self.dims_bart[0] = -1 self.trace_complex = trace_complex if vars_as_input: self.vars = model.variables else: self.vars = [] self.vars_rtoc = [] # variables for which a 0 imaginary part is stacked for var in self.vars: self.vars_rtoc.append(2 != var.shape[-1]) self.sig = {} self.add_concrete_function() @tf2.function def __call__(self, input, weights, grad_in): for i in range(len(weights)): wgh = weights[i] if (self.vars_rtoc)[i]: slc = [ slice(None, None, None) ]* len(wgh.shape) slc[-1] = 0 wgh=wgh[tuple(slc)] self.model.variables[i].assign(wgh) with tf2.GradientTape(persistent=True) as g: g.watch(input) print("Tracing TensorFlow model with dims: {}".format(input)) res = tf2.reshape(input, self.dims_tf) outr = self.model(res) out = tf2.reshape(outr, self.dims_bart) result = {} result["output_0"] = out result["grad_0_0"] = g.gradient(out, input, grad_in) for i, input in enumerate(self.model.variables, 1): result["grad_{}_0".format(i)] = g.gradient(out, input, grad_in) if self.vars_rtoc[i - 1]: tmp = result["grad_{}_0".format(i)] result["grad_{}_0".format(i)] = tf2.stack([tmp, tf2.zeros_like(tmp)], axis = len(tmp.shape)) return result def add_concrete_function(self, name=None): dims = self.dims_bart.copy() dims[0] = None if (self.trace_complex): signature_input = tf2.TensorSpec(shape=dims, dtype=tf2.complex64, name="input_0") signature_grad_ys = tf2.TensorSpec(shape=dims, dtype=tf2.complex64, name="grad_ys_0") else: signature_input = tf2.TensorSpec(shape=dims, dtype=tf2.float32, name="input_0") signature_grad_ys = tf2.TensorSpec(shape=dims, dtype=tf2.float32, name="grad_ys_0") signature_weight = [] for i, var in enumerate(self.model.variables, 1): if self.vars_rtoc[i - 1]: signature_weight.append(tf2.TensorSpec(shape=list(var.shape)+[2], dtype=tf2.float32, name="input_{}".format(i))) else: signature_weight.append(tf2.TensorSpec(shape=var.shape, dtype=tf2.float32, name="input_{}".format(i))) if name is None: name = "serving_default" self.sig[name] = self.__call__.get_concrete_function(signature_input, signature_weight, signature_grad_ys) def save_variables(self, path): weights = [] for i, var in enumerate(self.variables): if (self.vars_rtoc[i]): weights.append(var.numpy().astype(np.complex64)) else: weights.append(np.empty(var.shape[:-1], dtype=np.complex64)) slc = [ slice(None, None, None) ] * len(var.shape) slc[-1] = 0 weights[-1].real = var.numpy()[tuple(slc)] slc[-1] = 1 weights[-1].imag = var.numpy()[tuple(slc)] if 0 == len(weights[-1].shape): weights[-1] = weights[-1].reshape([1]) weights[-1] = np.transpose(weights[-1]) if (0 < len(weights)): cfl.writemulticfl(path, weights) def save(self, path): tf2.saved_model.save(self, path, signatures=self.sig) self.save_variables(path+"/bart_initial_weights") from tensorflow.python.tools import saved_model_utils meta_graph_def = saved_model_utils.get_meta_graph_def(path, "serve") with open(path + "/bart_config.dat", 'w') as f: for signature in list(self.sig): inputs = meta_graph_def.signature_def[signature].inputs outputs = meta_graph_def.signature_def[signature].outputs f.write('# ArgumentNameMapping\n') f.write('{}\n'.format(signature)) for bart_name in list(inputs): f.write("{} {} {}\n".format(bart_name, inputs[bart_name].name.split(":")[0], inputs[bart_name].name.split(":")[1])) for bart_name in list(outputs): f.write("{} {} {}\n".format(bart_name, outputs[bart_name].name.split(":")[0], outputs[bart_name].name.split(":")[1])) BartWrapper(model, dims).save(path) class TensorMap: def __init__(self, tensor, name, enforce_real = False): if isinstance(tensor, TensorMap): self.tensor = tensor.tensor else: self.tensor = tensor self.name = name if (self.tensor.shape[-1] != 2) and (self.tensor.dtype == tf1.float32): self.type = "REAL" else: self.type = "COMPLEX" if isinstance(tensor, TensorMap): self.type = tensor.type if enforce_real: self.type = "REAL" def export(self): n = self.tensor.name return "{} {} {} {}".format(self.name, n.split(":")[0], n.split(":")[1], self.type) def tf1_export_tensor_mapping(path, name, mapping, signature="serving_default"): with open(path + "/" + name + ".map", 'w') as f: f.write('# ArgumentNameMapping\n') f.write('{}\n'.format(signature)) for map in mapping: f.write('{}\n'.format(map.export())) def tf1_op_exists(graph, name): try: graph.get_operation_by_name(name) return True except KeyError: return False def tf1_find_tensors(graph, inputs, outputs): if inputs is None: II = 0 inputs = [] while tf1_op_exists(graph, "input_"+str(II)): inputs.append(graph.get_tensor_by_name("input_{}:0".format(II))) II += 1 if outputs is None: OO = 0 outputs = [] while tf1_op_exists(graph, "output_"+str(OO)): outputs.append(graph.get_tensor_by_name("output_{}:0".format(OO))) OO += 1 for i in range(len(inputs)): inputs[i] = TensorMap(inputs[i], "input_"+str(i)) for i in range(len(outputs)): outputs[i] = TensorMap(outputs[i], "output_"+str(i)) return inputs, outputs def tf1_graph_attach_gradients(graph, inputs, outputs): grad_tensors=[] for o, out in enumerate(outputs): with graph.as_default(): gy = tf1.placeholder(out.tensor.dtype, shape=out.tensor.shape, name='grad_ys_'+ str(o)) grad_tensors.append(TensorMap(gy, 'grad_ys_'+ str(o), out.type == "REAL")) for i, inp in enumerate(inputs): for o, out in enumerate(outputs): name = 'grad_{}_{}'.format(i, o) with graph.as_default(): grad = tf1.gradients(out.tensor, inp.tensor, grad_tensors[o].tensor) grad = tf1.reshape(grad, tf1.shape(inp.tensor), name='grad_{}_{}'.format(i, o)) grad_tensors.append(TensorMap(grad, name, inp.type == "REAL")) return grad_tensors def tf1_export_graph(path, graph = None, session=None, inputs=None, outputs=None, name=None, attach_gradients=True): if graph is None: graph = tf1.get_default_graph() if name is None: name = os.path.basename(os.path.normpath(path)) inputs, outputs = tf1_find_tensors(graph, inputs, outputs) mappings = [] if attach_gradients: mappings = tf1_graph_attach_gradients(graph, inputs, outputs) mappings += inputs mappings += outputs tf1.train.write_graph(graph, path, name+'.pb', False) if session is not None: saver = tf1.train.Saver() saver.save(session, os.path.join(path, name)) else: if (tf1_op_exists(graph, "save/restore_all")): print("WARNING: No weights are stored with the graph!\nWARNING: BART propably will not be able to load the graph.") tf1_export_tensor_mapping(path, name, mappings) def tf1_convert_model(model_path, path, name): sess = tf1.Session() saver = tf1.train.Saver() saver.restore(sess, model_path) tf1_graph_attach_gradients(sess.graph) tf1_export_graph(sess.graph, path, name, session=sess) libbart-devel/python/bartview.py000077500000000000000000000325061463460177700173400ustar00rootroot00000000000000#!/usr/bin/python # # Copyright 2015. The Regents of the University of California. # All rights reserved. Use of this source code is governed by # a BSD-style license which can be found in the LICENSE file. # # Authors: # 2015 Frank Ong from __future__ import division import operator import numpy as np import sys import matplotlib import matplotlib.pyplot as plt from matplotlib.widgets import Slider, Button, RadioButtons from functools import partial import time import threading import os.path class DiscreteSlider(Slider): """A matplotlib slider widget with discrete steps.""" def __init__(self, *args, **kwargs): self.previous_val = kwargs['valinit'] Slider.__init__(self, *args, **kwargs) def set_val(self, val): discrete_val = round(val) xy = self.poly.xy xy[2] = discrete_val, 1 xy[3] = discrete_val, 0 self.poly.xy = xy self.valtext.set_text(self.valfmt % discrete_val) if self.drawon: self.ax.figure.canvas.draw() self.val = val if self.previous_val!=discrete_val: self.previous_val = discrete_val if not self.eventson: return for cid, func in self.observers.iteritems(): func(discrete_val) class BartView(object): def __init__(self, cflname): matplotlib.rcParams['toolbar'] = 'None' #matplotlib.rcParams['font.size'] = 6 # Read data self.cflname = sys.argv[1] self.im = self.readcfl(self.cflname) self.im_unsqueeze_shape = np.where( np.array(self.im.shape) > 1 )[0] self.im = self.im.squeeze() t1 = time.clock() # Reorder image self.Ndims = len( self.im.shape ) self.order = np.r_[:self.Ndims] self.im_ordered = self.im self.order3 = np.array([0,1,1]) # Slice image self.slice_num = np.zeros( self.Ndims, dtype = 'int' ); self.im_shape = self.im_ordered.shape self.im_slice = self.im_ordered[ (slice(None), slice(None)) + tuple(self.slice_num[2:]) ] # Create figure self.fig = plt.figure(facecolor='black', figsize=(9,6)) #self.fig = plt.figure(facecolor='black', figsize=(6,4)) self.fig.subplots_adjust( left=0.0 , bottom=0.0 , right=1.0 , top=1 - 0.25) self.fig.canvas.set_window_title(self.cflname) # Show image self.immax = np.max(abs(self.im)) self.l = plt.imshow( abs(self.im_slice) , cmap = "gray", vmin=0, vmax=self.immax) self.ax = plt.gca() self.asp = self.im_ordered.shape[1] / self.im_ordered.shape[0] self.aspect = 1 self.ax.set_aspect( 1 ) plt.axis('off') radios = [] buttons = [] sliders = [] # Create Radio Buttons for X Y dimensions dims = self.im_unsqueeze_shape[ self.order ].astype(str) for i in xrange(0,len(dims)): dims[i] = "Dim " + dims[i] oboxx_ax = plt.axes( [0, 1 - 0.03, 0.1, 0.03], axisbg = "gainsboro" ) oboxx_ax.set_xticks([]); oboxx_ax.set_yticks([]); orderx_ax = plt.axes( [0, 1 - 0.18, 0.1, 0.15], axisbg = 'gainsboro' ) orderx_radio = RadioButtons( orderx_ax, dims, activecolor = 'SteelBlue', active = 0 ) orderx_ax.text(0.5,1.05, 'Up/Down', horizontalalignment = 'center') radios.append( orderx_radio ) orderx_radio.on_clicked( self.update_orderx ) oboxy_ax = plt.axes( [0.1, 1 - 0.03, 0.1, 0.03], axisbg = "gainsboro" ) oboxy_ax.set_xticks([]); oboxy_ax.set_yticks([]); ordery_ax = plt.axes( [0.1, 1 - 0.18, 0.1, 0.15], axisbg = 'gainsboro' ) ordery_radio = RadioButtons( ordery_ax, dims, activecolor = 'SteelBlue', active = 1 ) ordery_ax.text(0.5,1.05, 'Left/Right', horizontalalignment = 'center') radios.append( ordery_radio ) ordery_radio.on_clicked( self.update_ordery ) # Create Radio buttons for mosaic self.mosaic_valid = False mbox_ax = plt.axes( [0.2, 1 - 0.03, 0.1, 0.03], axisbg = "gainsboro" ) mbox_ax.set_xticks([]); mbox_ax.set_yticks([]); mosaic_ax = plt.axes( [0.2, 1 - 0.18, 0.1, 0.15], axisbg = 'gainsboro' ) mosaic_radio = RadioButtons( mosaic_ax, dims, activecolor = 'SteelBlue', active = 1 ) mosaic_ax.text(0.5,1.05, 'Mosaic', horizontalalignment = 'center') radios.append( mosaic_radio ) mosaic_radio.on_clicked( self.update_mosaic ) # Create flip buttons self.flipx = 1; flipx_ax = plt.axes( [0.3, 1 - 0.09, 0.1, 0.09] ) flipx_button = Button( flipx_ax, 'Flip\nUp/Down', color='gainsboro' ) flipx_button.on_clicked(self.update_flipx); self.flipy = 1; flipy_ax = plt.axes( [0.3, 1 - 0.18, 0.1, 0.09] ) flipy_button = Button( flipy_ax, 'Flip\nLeft/Right', color='gainsboro' ) flipy_button.on_clicked(self.update_flipy); # Create Refresh buttons refresh_ax = plt.axes( [0.4, 1 - 0.09, 0.1, 0.09] ) refresh_button = Button( refresh_ax, 'Refresh', color='gainsboro' ) refresh_button.on_clicked(self.update_refresh); # Create Save button save_ax = plt.axes( [0.4, 1 - 0.18, 0.1, 0.09] ) save_button = Button( save_ax, 'Export to\nPNG', color='gainsboro' ) save_button.on_clicked(self.save); # Create dynamic refresh radio button #self.drefresh = threading.Event() #drefresh_ax = plt.axes( [0.4, 1 - 0.18, 0.1, 0.09] ) #drefresh_button = Button( drefresh_ax, 'Dynamic\nRefresh', color='gainsboro' ) #drefresh_button.on_clicked(self.update_drefresh); # Create Magnitude/phase radio button self.magnitude = True mag_ax = plt.axes( [0.5, 1 - 0.18, 0.1, 0.18], axisbg = 'gainsboro' ) mag_radio = RadioButtons( mag_ax, ('Mag','Phase') , activecolor = 'SteelBlue', active = 0 ) radios.append( mag_radio ) mag_radio.on_clicked( self.update_magnitude ) sbox_ax = plt.axes( [0.6, 1 - 0.18, 0.5, 0.18], axisbg='gainsboro') sbox_ax.set_xticks([]) sbox_ax.set_yticks([]) # Create aspect sliders aspect_ax = plt.axes( [0.65, 1 - 0.09 + 0.02, 0.1, 0.02], axisbg = 'white' ) aspect_slider = Slider( aspect_ax, "", 0.25, 4, valinit=1, color='SteelBlue') aspect_ax.text( 4 / 2,1.5, 'Aspect Ratio', horizontalalignment = 'center') sliders.append( aspect_slider ) aspect_slider.on_changed( self.update_aspect ) # Create contrast sliders self.vmin = 0 vmin_ax = plt.axes( [0.83, 1 - 0.09 + 0.02, 0.1, 0.02], axisbg = 'white' ) vmin_slider = Slider( vmin_ax, "", 0, 1, valinit=0, color='SteelBlue') vmin_ax.text(0.5,1.5, 'Contrast Min', horizontalalignment = 'center') sliders.append( vmin_slider ) vmin_slider.on_changed( self.update_vmin ) self.vmax = 1 vmax_ax = plt.axes( [0.83, 1 - 0.18 + 0.02, 0.1, 0.02], axisbg = 'white' ) vmax_slider = Slider( vmax_ax, "", 0, 1, valinit=1, color='SteelBlue') vmax_ax.text(0.5,1.5, 'Contrast Max', horizontalalignment = 'center') sliders.append( vmax_slider ) vmax_slider.on_changed( self.update_vmax ) # Create sliders for choosing slices box_ax = plt.axes( [0, 1 - 0.25, 1, 0.07], axisbg='gainsboro') box_ax.set_xticks([]) box_ax.set_yticks([]) slider_thick = 0.02 slider_start = 0.1 ax = [] for d in np.r_[:self.Ndims]: slice_ax = plt.axes( [0.01 + 1 / self.Ndims * d, 1 - 0.24, 0.8 / self.Ndims, slider_thick] , axisbg='white') slice_slider = DiscreteSlider( slice_ax, "", 0, self.im_shape[d]-1, valinit=self.slice_num[d],valfmt='%i', color='SteelBlue') slice_ax.text( (self.im_shape[d]-1)/2,1.5, 'Dim %d Slice' % self.im_unsqueeze_shape[d], horizontalalignment = 'center' ) sliders.append(slice_slider); slice_slider.on_changed( partial( self.update_slice, d ) ) plt.show() def readcfl(self, name): h = open(name + ".hdr", "r") h.readline() # skip l = h.readline() dims = [int(i) for i in l.split( )] n = reduce(operator.mul, dims, 1) h.close() return np.memmap( name + ".cfl", dtype = np.complex64, mode='r', shape=tuple(dims), order='F' ) def save( self, event ): extent = self.ax.get_window_extent().transformed(self.fig.dpi_scale_trans.inverted()) num = 0 fname = self.cflname + '_' + str(num) + '.png' while( os.path.isfile(fname) ): num += 1 fname = self.cflname + '_' + str(num) + '.png' self.fig.savefig(fname, bbox_inches=extent) def update_flipx( self, event ): self.flipx *= -1 self.update_image() def update_flipy( self, event ): self.flipy *= -1 self.update_image() def update_refresh( self, event ): self.update_image() def dynamic_refresh( self ): while( self.drefresh.is_set() ): self.update_image() def update_drefresh( self, event ): if ( self.drefresh.is_set() ): self.drefresh.clear() else: self.drefresh.set() th = threading.Thread( target = self.dynamic_refresh ) th.start() def update_aspect( self, aspect ): self.aspect = aspect self.ax.set_aspect( self.asp * self.im_ordered.shape[0] / self.im_ordered.shape[1] * aspect ) def update_vmax( self, vmax ): self.vmax = max(self.vmin, vmax) self.l.set_clim( vmax = self.vmax * self.immax ); def update_vmin( self, vmin ): self.vmin = min(self.vmax,vmin) self.l.set_clim( vmin = self.vmin * self.immax ); def update_magnitude( self, l ): self.magnitude = ( l == 'Mag' ) if (self.magnitude): self.l.set_cmap('gray') else: self.l.set_cmap('hsv') self.update_image() def update_orderx( self, l ): l = int(l[4:]) self.order3[0] = np.where( self.im_unsqueeze_shape == l )[0] self.update_ordered_image() def update_ordery( self, l ): l = int(l[4:]) self.order3[1] = np.where( self.im_unsqueeze_shape == l )[0] self.update_ordered_image() def update_ordered_image(self): self.mosaic_valid = len( self.order3[:3] ) == len( set( self.order3[:3] ) ) self.order_valid = len( self.order3[:2] ) == len( set( self.order3[:2] ) ); if ( self.mosaic_valid ): self.order[:3] = self.order3[:3] order_remain = np.r_[:self.Ndims] for t in np.r_[:3]: order_remain = order_remain[ (order_remain != self.order[t] ) ] self.order[3:] = order_remain self.im_ordered = np.transpose( self.im, self.order ) self.ax.set_aspect( self.asp * self.im_ordered.shape[0] / self.im_ordered.shape[1] * self.aspect ) self.update_image() elif ( self.order_valid ): self.order[:2] = self.order3[:2] order_remain = np.r_[:self.Ndims] for t in np.r_[:2]: order_remain = order_remain[ (order_remain != self.order[t] ) ] self.order[2:] = order_remain self.im_ordered = np.transpose( self.im, self.order ) self.ax.set_aspect( self.asp * self.im_ordered.shape[0] / self.im_ordered.shape[1] * self.aspect ) self.update_image() def update_image( self ): self.immax = np.max(abs(self.im)) self.l.set_clim( vmin = self.vmin * self.immax , vmax = self.vmax * self.immax ); if ( self.mosaic_valid ): im_slice = self.im_ordered[ (slice(None,None,self.flipx), slice(None,None,self.flipy), slice(None)) + tuple(self.slice_num[self.order[3:]])] im_slice = self.mosaic( im_slice ) else: im_slice = self.im_ordered[ (slice(None,None,self.flipx), slice(None,None,self.flipy)) + tuple(self.slice_num[self.order[2:]]) ] if self.magnitude: self.l.set_data( abs(im_slice) ) else: self.l.set_data( (np.angle(im_slice) + np.pi) / (2 * np.pi) ) self.fig.canvas.draw() def update_slice( self, d, s ): self.slice_num[d] = int(round(s)) self.update_image() def mosaic( self, im ): im = im.squeeze() (x, y, z) = im.shape z2 = int( np.ceil( z ** 0.5 ) ) z = int( z2 ** 2 ) im = np.pad( im, [(0,0), (0,0), (0, z - im.shape[2] )], mode='constant') im = im.reshape( (x, y * z, 1), order = 'F' ) im = im.transpose( (1, 2, 0) ) im = im.reshape( (y * z2 , z2, x), order = 'F' ) im = im.transpose( (2, 1, 0) ) im = im.reshape( (x * z2, y * z2), order = 'F' ) return im def update_mosaic( self, l ): l = int(l[4:]) self.order3[2] = np.where( self.im_unsqueeze_shape == l )[0] self.update_ordered_image() if __name__ == "__main__": # Error if more than 1 argument if (len(sys.argv) != 2): print "BartView: multidimensional image viewer for cfl" print "Usage: bview cflname" exit() BartView( sys.argv[1] ) libbart-devel/python/bartview3.py000077500000000000000000000327671463460177700174340ustar00rootroot00000000000000#!/usr/bin/python3 # # Copyright 2017. Massachusetts Institute of Technology. # Copyright 2015. The Regents of the University of California. # All rights reserved. Use of this source code is governed by # a BSD-style license which can be found in the LICENSE file. # # Authors: # 2015 Frank Ong # 2017 Siddharth Iyer import operator import numpy as np import sys import matplotlib import matplotlib.pyplot as plt from matplotlib.widgets import Slider, Button, RadioButtons from functools import partial, reduce import time import threading import os.path class DiscreteSlider(Slider): """A matplotlib slider widget with discrete steps.""" def __init__(self, *args, **kwargs): self.previous_val = kwargs['valinit'] Slider.__init__(self, *args, **kwargs) def set_val(self, val): discrete_val = round(val) xy = self.poly.xy xy[2] = discrete_val, 1 xy[3] = discrete_val, 0 self.poly.xy = xy self.valtext.set_text(self.valfmt % discrete_val) if self.drawon: self.ax.figure.canvas.draw() self.val = val if self.previous_val!=discrete_val: self.previous_val = discrete_val if not self.eventson: return for cid, func in self.observers.items(): func(discrete_val) class BartView(object): def __init__(self, cflname): matplotlib.rcParams['toolbar'] = 'None' #matplotlib.rcParams['font.size'] = 6 # Read data self.cflname = sys.argv[1] self.im = self.readcfl(self.cflname) self.im_unsqueeze_shape = np.where( np.array(self.im.shape) > 1 )[0] self.im = self.im.squeeze() if sys.version_info.major==3 and sys.version_info.minor < 8: t1 = time.clock() # Reorder image self.Ndims = len( self.im.shape ) self.order = np.r_[:self.Ndims] self.im_ordered = self.im self.order3 = np.array([0,1,1]) # Slice image self.slice_num = np.zeros( self.Ndims, dtype = 'int' ); self.im_shape = self.im_ordered.shape self.im_slice = self.im_ordered[ (slice(None), slice(None)) + tuple(self.slice_num[2:]) ] # Create figure self.fig = plt.figure(facecolor='black', figsize=(9,6)) #self.fig = plt.figure(facecolor='black', figsize=(6,4)) self.fig.subplots_adjust( left=0.0 , bottom=0.0 , right=1.0 , top=1 - 0.25) self.fig.canvas.set_window_title(self.cflname) # Show image self.immax = np.max(abs(self.im)) self.l = plt.imshow( abs(self.im_slice) , cmap = "gray", vmin=0, vmax=self.immax) self.ax = plt.gca() self.asp = self.im_ordered.shape[1] / self.im_ordered.shape[0] self.aspect = 1 self.ax.set_aspect( 1 ) plt.axis('off') radios = [] buttons = [] sliders = [] # Create Radio Buttons for X Y dimensions dims = self.im_unsqueeze_shape[ self.order ].astype(str) for i in range(0,len(dims)): dims[i] = "Dim " + dims[i] oboxx_ax = plt.axes( [0, 1 - 0.03, 0.1, 0.03], facecolor = "gainsboro" ) oboxx_ax.set_xticks([]); oboxx_ax.set_yticks([]); orderx_ax = plt.axes( [0, 1 - 0.18, 0.1, 0.15], facecolor = 'gainsboro' ) orderx_radio = RadioButtons( orderx_ax, dims, activecolor = 'SteelBlue', active = 0 ) orderx_ax.text(0.5,1.05, 'Up/Down', horizontalalignment = 'center') radios.append( orderx_radio ) orderx_radio.on_clicked( self.update_orderx ) oboxy_ax = plt.axes( [0.1, 1 - 0.03, 0.1, 0.03], facecolor = "gainsboro" ) oboxy_ax.set_xticks([]); oboxy_ax.set_yticks([]); ordery_ax = plt.axes( [0.1, 1 - 0.18, 0.1, 0.15], facecolor = 'gainsboro' ) ordery_radio = RadioButtons( ordery_ax, dims, activecolor = 'SteelBlue', active = 1 ) ordery_ax.text(0.5,1.05, 'Left/Right', horizontalalignment = 'center') radios.append( ordery_radio ) ordery_radio.on_clicked( self.update_ordery ) # Create Radio buttons for mosaic self.mosaic_valid = False mbox_ax = plt.axes( [0.2, 1 - 0.03, 0.1, 0.03], facecolor = "gainsboro" ) mbox_ax.set_xticks([]); mbox_ax.set_yticks([]); mosaic_ax = plt.axes( [0.2, 1 - 0.18, 0.1, 0.15], facecolor = 'gainsboro' ) mosaic_radio = RadioButtons( mosaic_ax, dims, activecolor = 'SteelBlue', active = 1 ) mosaic_ax.text(0.5,1.05, 'Mosaic', horizontalalignment = 'center') radios.append( mosaic_radio ) mosaic_radio.on_clicked( self.update_mosaic ) # Create flip buttons self.flipx = 1; flipx_ax = plt.axes( [0.3, 1 - 0.09, 0.1, 0.09] ) flipx_button = Button( flipx_ax, 'Flip\nUp/Down', color='gainsboro' ) flipx_button.on_clicked(self.update_flipx); self.flipy = 1; flipy_ax = plt.axes( [0.3, 1 - 0.18, 0.1, 0.09] ) flipy_button = Button( flipy_ax, 'Flip\nLeft/Right', color='gainsboro' ) flipy_button.on_clicked(self.update_flipy); # Create Refresh buttons refresh_ax = plt.axes( [0.4, 1 - 0.09, 0.1, 0.09] ) refresh_button = Button( refresh_ax, 'Refresh', color='gainsboro' ) refresh_button.on_clicked(self.update_refresh); # Create Save button save_ax = plt.axes( [0.4, 1 - 0.18, 0.1, 0.09] ) save_button = Button( save_ax, 'Export to\nPNG', color='gainsboro' ) save_button.on_clicked(self.save); # Create dynamic refresh radio button #self.drefresh = threading.Event() #drefresh_ax = plt.axes( [0.4, 1 - 0.18, 0.1, 0.09] ) #drefresh_button = Button( drefresh_ax, 'Dynamic\nRefresh', color='gainsboro' ) #drefresh_button.on_clicked(self.update_drefresh); # Create Magnitude/phase radio button self.magnitude = True mag_ax = plt.axes( [0.5, 1 - 0.18, 0.1, 0.18], facecolor = 'gainsboro' ) mag_radio = RadioButtons( mag_ax, ('Mag','Phase') , activecolor = 'SteelBlue', active = 0 ) radios.append( mag_radio ) mag_radio.on_clicked( self.update_magnitude ) sbox_ax = plt.axes( [0.6, 1 - 0.18, 0.5, 0.18], facecolor='gainsboro') sbox_ax.set_xticks([]) sbox_ax.set_yticks([]) # Create aspect sliders aspect_ax = plt.axes( [0.65, 1 - 0.09 + 0.02, 0.1, 0.02], facecolor = 'white' ) aspect_slider = Slider( aspect_ax, "", 0.25, 4, valinit=1, color='SteelBlue') aspect_ax.text( 4 / 2,1.5, 'Aspect Ratio', horizontalalignment = 'center') sliders.append( aspect_slider ) aspect_slider.on_changed( self.update_aspect ) # Create contrast sliders self.vmin = 0 vmin_ax = plt.axes( [0.83, 1 - 0.09 + 0.02, 0.1, 0.02], facecolor = 'white' ) vmin_slider = Slider( vmin_ax, "", 0, 1, valinit=0, color='SteelBlue') vmin_ax.text(0.5,1.5, 'Contrast Min', horizontalalignment = 'center') sliders.append( vmin_slider ) vmin_slider.on_changed( self.update_vmin ) self.vmax = 1 vmax_ax = plt.axes( [0.83, 1 - 0.18 + 0.02, 0.1, 0.02], facecolor = 'white' ) vmax_slider = Slider( vmax_ax, "", 0, 1, valinit=1, color='SteelBlue') vmax_ax.text(0.5,1.5, 'Contrast Max', horizontalalignment = 'center') sliders.append( vmax_slider ) vmax_slider.on_changed( self.update_vmax ) # Create sliders for choosing slices box_ax = plt.axes( [0, 1 - 0.25, 1, 0.07], facecolor='gainsboro') box_ax.set_xticks([]) box_ax.set_yticks([]) slider_thick = 0.02 slider_start = 0.1 ax = [] for d in np.r_[:self.Ndims]: slice_ax = plt.axes( [0.01 + 1 / self.Ndims * d, 1 - 0.24, 0.8 / self.Ndims, slider_thick] , facecolor='white') slice_slider = DiscreteSlider( slice_ax, "", 0, self.im_shape[d]-1, valinit=self.slice_num[d],valfmt='%i', color='SteelBlue') slice_ax.text( (self.im_shape[d]-1)/2,1.5, 'Dim %d Slice' % self.im_unsqueeze_shape[d], horizontalalignment = 'center' ) sliders.append(slice_slider); slice_slider.on_changed( partial( self.update_slice, d ) ) plt.show() def readcfl(self, name): h = open(name + ".hdr", "r") h.readline() # skip l = h.readline() dims = [int(i) for i in l.split( )] n = reduce(operator.mul, dims, 1) h.close() return np.memmap( name + ".cfl", dtype = np.complex64, mode='r', shape=tuple(dims), order='F' ) def save( self, event ): extent = self.ax.get_window_extent().transformed(self.fig.dpi_scale_trans.inverted()) num = 0 fname = self.cflname + '_' + str(num) + '.png' while( os.path.isfile(fname) ): num += 1 fname = self.cflname + '_' + str(num) + '.png' self.fig.savefig(fname, bbox_inches=extent) def update_flipx( self, event ): self.flipx *= -1 self.update_image() def update_flipy( self, event ): self.flipy *= -1 self.update_image() def update_refresh( self, event ): self.update_image() def dynamic_refresh( self ): while( self.drefresh.is_set() ): self.update_image() def update_drefresh( self, event ): if ( self.drefresh.is_set() ): self.drefresh.clear() else: self.drefresh.set() th = threading.Thread( target = self.dynamic_refresh ) th.start() def update_aspect( self, aspect ): self.aspect = aspect self.ax.set_aspect( self.asp * self.im_ordered.shape[0] / self.im_ordered.shape[1] * aspect ) def update_vmax( self, vmax ): self.vmax = max(self.vmin, vmax) self.l.set_clim( vmax = self.vmax * self.immax ); def update_vmin( self, vmin ): self.vmin = min(self.vmax,vmin) self.l.set_clim( vmin = self.vmin * self.immax ); def update_magnitude( self, l ): self.magnitude = ( l == 'Mag' ) if (self.magnitude): self.l.set_cmap('gray') else: self.l.set_cmap('hsv') self.update_image() def update_orderx( self, l ): l = int(l[4:]) self.order3[0] = np.where( self.im_unsqueeze_shape == l )[0] self.update_ordered_image() def update_ordery( self, l ): l = int(l[4:]) self.order3[1] = np.where( self.im_unsqueeze_shape == l )[0] self.update_ordered_image() def update_ordered_image(self): self.mosaic_valid = len( self.order3[:3] ) == len( set( self.order3[:3] ) ) self.order_valid = len( self.order3[:2] ) == len( set( self.order3[:2] ) ); if ( self.mosaic_valid ): self.order[:3] = self.order3[:3] order_remain = np.r_[:self.Ndims] for t in np.r_[:3]: order_remain = order_remain[ (order_remain != self.order[t] ) ] self.order[3:] = order_remain self.im_ordered = np.transpose( self.im, self.order ) self.ax.set_aspect( self.asp * self.im_ordered.shape[0] / self.im_ordered.shape[1] * self.aspect ) self.update_image() elif ( self.order_valid ): self.order[:2] = self.order3[:2] order_remain = np.r_[:self.Ndims] for t in np.r_[:2]: order_remain = order_remain[ (order_remain != self.order[t] ) ] self.order[2:] = order_remain self.im_ordered = np.transpose( self.im, self.order ) self.ax.set_aspect( self.asp * self.im_ordered.shape[0] / self.im_ordered.shape[1] * self.aspect ) self.update_image() def update_image( self ): self.immax = np.max(abs(self.im)) self.l.set_clim( vmin = self.vmin * self.immax , vmax = self.vmax * self.immax ); if ( self.mosaic_valid ): im_slice = self.im_ordered[ (slice(None,None,self.flipx), slice(None,None,self.flipy), slice(None)) + tuple(self.slice_num[self.order[3:]])] im_slice = self.mosaic( im_slice ) else: im_slice = self.im_ordered[ (slice(None,None,self.flipx), slice(None,None,self.flipy)) + tuple(self.slice_num[self.order[2:]]) ] if self.magnitude: self.l.set_data( abs(im_slice) ) else: self.l.set_data( (np.angle(im_slice) + np.pi) / (2 * np.pi) ) self.fig.canvas.draw() def update_slice( self, d, s ): self.slice_num[d] = int(round(s)) self.update_image() def mosaic( self, im ): im = im.squeeze() (x, y, z) = im.shape z2 = int( np.ceil( z ** 0.5 ) ) z = int( z2 ** 2 ) im = np.pad( im, [(0,0), (0,0), (0, z - im.shape[2] )], mode='constant') im = im.reshape( (x, y * z, 1), order = 'F' ) im = im.transpose( (1, 2, 0) ) im = im.reshape( (y * z2 , z2, x), order = 'F' ) im = im.transpose( (2, 1, 0) ) im = im.reshape( (x * z2, y * z2), order = 'F' ) return im def update_mosaic( self, l ): l = int(l[4:]) self.order3[2] = np.where( self.im_unsqueeze_shape == l )[0] self.update_ordered_image() if __name__ == "__main__": # Error if more than 1 argument if (len(sys.argv) != 2): print("BartView: multidimensional image viewer for cfl") print("Usage: bview cflname") exit() BartView( sys.argv[1] ) libbart-devel/python/cfl.py000066400000000000000000000066651463460177700162650ustar00rootroot00000000000000# Copyright 2013-2015. The Regents of the University of California. # Copyright 2021. Uecker Lab. University Center Göttingen. # All rights reserved. Use of this source code is governed by # a BSD-style license which can be found in the LICENSE file. # # Authors: # 2013 Martin Uecker # 2015 Jonathan Tamir from __future__ import print_function from __future__ import with_statement import numpy as np import mmap import os def readcfl(name): # get dims from .hdr with open(name + ".hdr", "rt") as h: h.readline() # skip l = h.readline() dims = [int(i) for i in l.split()] # remove singleton dimensions from the end n = np.prod(dims) dims_prod = np.cumprod(dims) dims = dims[:np.searchsorted(dims_prod, n)+1] # load data and reshape into dims with open(name + ".cfl", "rb") as d: a = np.fromfile(d, dtype=np.complex64, count=n); return a.reshape(dims, order='F') # column-major def readmulticfl(name): # get dims from .hdr with open(name + ".hdr", "rt") as h: lines = h.read().splitlines() index_dim = 1 + lines.index('# Dimensions') total_size = int(lines[index_dim]) index_sizes = 1 + lines.index('# SizesDimensions') sizes = [int(i) for i in lines[index_sizes].split()] index_dims = 1 + lines.index('# MultiDimensions') with open(name + ".cfl", "rb") as d: a = np.fromfile(d, dtype=np.complex64, count=total_size) offset = 0 result = [] for i in range(len(sizes)): dims = ([int(i) for i in lines[index_dims + i].split()]) n = np.prod(dims) result.append(a[offset:offset+n].reshape(dims, order='F')) offset += n if total_size != offset: print("Error") return result def writecfl(name, array): with open(name + ".hdr", "wt") as h: h.write('# Dimensions\n') for i in (array.shape): h.write("%d " % i) h.write('\n') size = np.prod(array.shape) * np.dtype(np.complex64).itemsize with open(name + ".cfl", "a+b") as d: os.ftruncate(d.fileno(), size) mm = mmap.mmap(d.fileno(), size, flags=mmap.MAP_SHARED, prot=mmap.PROT_WRITE) if array.dtype != np.complex64: array = array.astype(np.complex64) mm.write(np.ascontiguousarray(array.T)) mm.close() #with mmap.mmap(d.fileno(), size, flags=mmap.MAP_SHARED, prot=mmap.PROT_WRITE) as mm: # mm.write(array.astype(np.complex64).tobytes(order='F')) def writemulticfl(name, arrays): size = 0 dims = [] for array in arrays: size += array.size dims.append(array.shape) with open(name + ".hdr", "wt") as h: h.write('# Dimensions\n') h.write("%d\n" % size) h.write('# SizesDimensions\n') for dim in dims: h.write("%d " % len(dim)) h.write('\n') h.write('# MultiDimensions\n') for dim in dims: for i in dim: h.write("%d " % i) h.write('\n') size = size * np.dtype(np.complex64).itemsize with open(name + ".cfl", "a+b") as d: os.ftruncate(d.fileno(), size) mm = mmap.mmap(d.fileno(), size, flags=mmap.MAP_SHARED, prot=mmap.PROT_WRITE) for array in arrays: if array.dtype != np.complex64: array = array.astype(np.complex64) mm.write(np.ascontiguousarray(array.T)) mm.close() libbart-devel/python/splines_from_svg.py000077500000000000000000000345411463460177700210750ustar00rootroot00000000000000#!/usr/bin/python # -- coding: utf-8 -- """ Copyright 2022. Uecker Lab, University Medical Center Goettingen. Authors: 2022 Martin Schilling (martin.schilling@med.uni-goettingen.de) 2022 Nick Scholand (scholand@tugraz.at) DESCRIPTION : This script takes an SVG file as an input, analyses the paths of objects, which can consist of horizontal, vertical, diagonal and cubic spline transformations, splits these transformations up into cubic Hermite splines and creates a CFL file for use with the BART phantom command line tool. """ import os import numpy as np import argparse import sys if 'BART_TOOLBOX_PATH' in os.environ and os.path.exists(os.environ['BART_TOOLBOX_PATH']): sys.path.insert(0, os.path.join(os.environ['BART_TOOLBOX_PATH'], 'python')) elif 'TOOLBOX_PATH' in os.environ and os.path.exists(os.environ['TOOLBOX_PATH']): sys.path.insert(0, os.path.join(os.environ['TOOLBOX_PATH'], 'python')) else: raise RuntimeError("BART_TOOLBOX_PATH is not set correctly!") import cfl DBLEVEL = 0 def read_svg(svg_input, scale_flag=True): """ Reads a given svg file to extract parameters of paths. :param str svg_input: File path to input svg. :param bool scale_coords: Boolean for scaling coordinates :returns: List of lists for paths. Element: [object_id, color, transforms] :rtype: list """ paths_list = [] prev_key_list, points_list = [], [] readout = False with open (svg_input, 'rt', encoding="utf8", errors='ignore') as input: for line in input: if "" in line: readout = False prev_key_list.append(prev_keys) points_list.append(points) paths_list.append([object_id, color]) input.close() if scale_flag: scale_coords(points_list, center=[0,0], norm=1.8) for num,(k,p) in enumerate(zip(prev_key_list,points_list)): if 0 != len(k): transforms = get_transforms(k,p) paths_list[num].append(transforms) else: paths_list[num].append([]) return paths_list def scale_coords(coords_list, center=[187.5, 125], norm=350): """ Scale coordinates to a given center with a maximal norm. :param list coords_list: List of lists of coordinates. Each list belongs to a series of control points. :param list center: Coordinates of new center :param int norm: Maximal value for width and height """ x_min = coords_list[0][0][0] x_max = coords_list[0][0][0] y_min = coords_list[0][0][1] y_max = coords_list[0][0][1] # determine maximal and minimal x- and y-values for cs in coords_list: for c in cs: x_max = c[0] if c[0] > x_max else x_max x_min = c[0] if c[0] < x_min else x_min y_max = c[1] if c[1] > y_max else y_max y_min = c[1] if c[1] < y_min else y_min # transfer values to new center x_trans = (x_max + x_min) / 2 y_trans = (y_max + y_min) / 2 # normalization factor of coordinates as ratio of norm and max([width,height]) norm_factor = norm / max([np.abs(x_max-x_min), np.abs(y_max-y_min)]) for cs in coords_list: for c in cs: c[0] = (c[0] - x_trans) * norm_factor + center[0] c[1] = (c[1] - y_trans) * norm_factor + center[1] def try_float(string): # Function for trying a string for conversion to float. try: f = float(string) return True except ValueError: return False def analyse_d_string(d_string): """ Analyse string of 'd' argument of path. The function returns the signal transform parameters and a list of coordinates of the control points. :param str d_string: Complete string contained in the 'd' parameter of a path. :returns: tuple(transform_keys, coordinates) WHERE list transform_keys is list of lower case signal characters for transformations list coordinates is list of absolute coordinates of control points """ content = d_string.split() prev_key = None points = [] transf_keys = [] cspline = [] count = 0 x_origin, y_origin = 0, 0 for num,section in enumerate(content): if len(section.split(",")) > 1 or try_float(section): # keys before coordinates signal new transformation, # lower case for relative, upper case for absolute coordinates special_keys = ['c','C','m','M', 'l', 'L'] # deal with exception, that 'm'/'M' keys may be followed by diagonal # transformation ('l' key) without explicit key if len(section.split(",")) > 1 and prev_key not in special_keys: prev_key = "l" if "c" == prev_key or "C" == prev_key: count += 1 if 3 == count: cspline.append([cspline[1][0]+cspline[1][0]-cspline[0][0],cspline[1][1]+cspline[1][1]-cspline[0][1]]) if "c" == prev_key: # relative reference point x_origin += float(content[num].split(",")[0]) y_origin += float(content[num].split(",")[1]) if "C" == prev_key: # absolute reference point x_origin = float(content[num].split(",")[0]) y_origin = float(content[num].split(",")[1]) # append intermediate control points points.append([cspline[0][0], cspline[0][1]]) transf_keys.append(prev_key) points.append([cspline[1][0], cspline[1][1]]) transf_keys.append(prev_key) count = 0 cspline = [] else: if "c" == prev_key: cspline.append([x_origin+float(content[num].split(",")[0]), y_origin+float(content[num].split(",")[1])]) if "C" == prev_key: cspline.append([float(content[num].split(",")[0]), float(content[num].split(",")[1])]) else: count = 0 # start of path if "m" == prev_key or "M" == prev_key: x_origin = float(content[num].split(",")[0]) y_origin = float(content[num].split(",")[1]) # horizontal transformation if "h" == prev_key: x_origin += float(content[num]) if "H" == prev_key: x_origin = float(content[num]) # vertical transformation if "v" == prev_key: y_origin += float(content[num]) if "V" == prev_key: y_origin = float(content[num]) # diagonal transformation if "l" == prev_key: x_origin += float(content[num].split(",")[0]) y_origin += float(content[num].split(",")[1]) if "L" == prev_key: x_origin = float(content[num].split(",")[0]) y_origin = float(content[num].split(",")[1]) if 0 == count: points.append([x_origin, y_origin]) transf_keys.append(prev_key.lower()) if 'M' == prev_key: prev_key = 'L' if 'm' == prev_key: prev_key = 'l' else: prev_key = section return transf_keys, points def controlpoints2cspline(bezier_points): """ Translate four input control points into a cubic Hermite spline format suitable for BART. :param list bezier_points: List of four control points in format [p1,p2,p3,p4] with p_i=[x_i,y_i] :returns: Parameters for cubic Hermite spline [x_parameters, y_parameters] :rtype: list """ bezier_cspline = [[1,-3,0,0],[0,3,0,0],[0,0,0,-3],[0,0,1,3]] bezier_x = [p[0] for p in bezier_points] bezier_y = [p[1] for p in bezier_points] bezier = [bezier_x, bezier_y] cspline = [[0,0] for i in range(4)] for num, c in enumerate(bezier): for i in range(4): for j in range(4): cspline[i][num] += bezier_cspline[j][i] * bezier[num][j] cspline_x = [p[0] for p in cspline] cspline_y = [p[1] for p in cspline] return [cspline_x, cspline_y] def get_transforms(keys, points): """ Create separate transformations from given lists of keys and coordinates. The transformations have the form [[x_transforms],[y_transforms]] in the cubic Hermite spline format. :param list keys: List of signal characters for path transformations [key1, key2, ...] :param list points: List of coordinates [[x1,y1], [x2,y2], ...] :returns: Transformations in cubic Hermite spline format :rtype: list """ transforms = [] for num,(k,p) in enumerate(zip(keys,points)): if 'h' == k: transforms.append([[points[num-1][0],0.,p[0],-0.],[p[1],0.,p[1],-0.]]) if 'v' == k: transforms.append([[p[0],0.,p[0],-0.],[points[num-1][1],0.,p[1],-0.]]) if 'l' == k: transforms.append([[points[num-1][0],0.,p[0],-0.],[points[num-1][1],0.,p[1],-0.]]) if num+1 < len(points) and 'c' == k: # non-trivial B-spline if 'c' == keys[num-1] and 'c' == keys[num+1]: keys[num+1] = None transforms.append(controlpoints2cspline(points[num-2:num+2])) # trivial B-spline elif 'c' != keys[num-1] and 'c' != keys[num+1]: transforms.append([[points[num-1][0],0.,p[0],-0.],[points[num-1][1],0.,p[1],-0.]]) return transforms def format_transforms(transforms, object_id, filename, output_file): """ Format transforms for insertion into /bart/src/geom/logo.c :param list transform: List of transformations in cubic Hermite spline format :param list object_id: List of object ids for indexing transformations :param str filename: Name of struct :param str output_file: File path to output text file """ total_transforms = sum([len(t) for t in transforms]) with open (output_file, 'w', encoding="utf8", errors='ignore') as output: output.write("//Replace in bart/src/geom/logo.c > bart_logo and adjust bart/src/geom/logo.h\n\n") output.write("const double "+filename+"["+str(total_transforms)+"][2][4] = {\n") for num, transform in enumerate(transforms): output.write("\t//"+str(object_id[num])+"\n") for enum,t in enumerate(transform): x_string = str(t[0][0])+", "+str(t[0][1])+", "+str(t[0][2])+", "+str(t[0][3]) y_string = str(t[1][0])+", "+str(t[1][1])+", "+str(t[1][2])+", "+str(t[1][3]) # current implementation in BART, likely to change to x_string, y_string in the future output.write("\t{ { "+y_string+" }, { "+x_string+" } },\n") output.write("};\n") def transform2polystruct(transforms, id_color, output_file): """ Create a polystruct for a given set of transformations and append it to output file. Can replace code in bart/src/simu/phantom.c > calc_bart :param list transforms: List of transformations in [[x_transforms],[y_transforms]] format :param list id_color: List of fill colors of individual objects :param str output_file: File path to output text file """ total_transforms = sum([len(t) for t in transforms]) with open (output_file, 'a', encoding="utf8", errors='ignore') as output: output.write("\tint N = "+str(total_transforms)+";\n") output.write("\tdouble points[N * 11][2];\n") output.write("\n") output.write("\tstruct poly poly = {\n\t\tkspace,\n\t\tcoeff,\n\t\tpopts->large_sens,\n\t\t"+str(len(transforms))+",\n\t\t&(struct poly1[]){\n") array_position = 0 for num, transform in enumerate(transforms): output.write("\t\t\t{ "+str(len(transform)*11)+" , "+str(id_color[num])+", ARRAY_SLICE(points, "+str(array_position*11)+", "+str((array_position+len(transform))*11) +") },\n") array_position += len(transform) output.write("\t\t}\n") output.write("\t};") output.close() def assign_color_id(colors): """ Extract color IDs from hex colors :param list colors: List of strings representing the objects colors :returns: List of Integers representing the objects colors as integer (> 0 !) IDs :rtype: list """ color_values, color_counts = np.unique(colors, return_counts=True) id_color = [list(color_values).index(i)+1 for i in colors] return id_color # Save geometry data in numpy array # coord -> [segment, cp_set:[x,y], cp_coord] with control points (cp) # meta -> [path index, number of segments, color of path] def save2cfl(new_transforms, new_colors, cfl_output): coord = [] meta = [] ind_path = 0 for sub_array in new_transforms: ind_seg = 0 for path in sub_array: path_array = np.array(path) coord.append(path_array) ind_seg += 1 meta.append(np.array([ind_path, ind_seg, new_colors[ind_path]])) ind_path += 1 coord = np.array(coord) meta = np.array(meta) if (2 <= DBLEVEL): print("Coord Dims:") print(np.shape(coord)) print("Meta Dims:") print(np.shape(meta)) print("Meta:") print(meta) cfl.writemulticfl(cfl_output, np.array([coord, meta], dtype=object)) def main(svg_input, text_output, output): """ Extract parameters of paths from SVG file and write code block into txt file, which is suitable for bart/src/simu/shepplogan.c > calc_bart and bart/src/geom/logo.c. :param str svg_input: File path to input SVG file :param str cfl: File path to output cfl file. Default: .{cfl,hdr} """ if (text_output): text_filename = output+".txt" path_objects = read_svg(svg_input) object_ids = [obj[0] for obj in path_objects] colors = [obj[1] for obj in path_objects] transforms = [obj[2] for obj in path_objects] # Sort paths by color (=: grey value in provided SVG file) id_color = assign_color_id(colors) new_colors = sorted(id_color) new_ids = [id for color, id in sorted(zip(id_color,object_ids))] new_transforms = [trans for color, trans in sorted(zip(id_color,transforms))] color_values, color_counts = np.unique(new_colors, return_counts=True) if (2 <= DBLEVEL): print("Distribution of colors:") print("Value:\t", color_values) print("Number:\t", color_counts) save2cfl(new_transforms, new_colors, output) if (1 <= DBLEVEL): print("Created files:") print(output+".{cfl,hdr}") if (text_output): format_transforms(new_transforms, new_ids, output, text_filename) transform2polystruct(new_transforms, new_colors, text_filename) if (1 <= DBLEVEL): print(output+".txt") if __name__ == "__main__": parser = argparse.ArgumentParser( description="Script to extract control points of cubic Hermite splines from SVG file to CFL format.") parser.add_argument('input', type=str, help="Input SVG file") parser.add_argument('output', type=str, help="Output CFL filename") parser.add_argument('-d', '--db', default=-1, type=int, help="Specify debug value for additional information [default: 0]") # Internal option for more complicated objects with multi component paths (example: BRAIN geometry) # Requires manual tuning and is therefore hidden for simplicity parser.add_argument('-t', action='store_true', help=argparse.SUPPRESS) args = parser.parse_args() if ("BART_DEBUG_LEVEL" in os.environ): if (-1 != args.db): print("A local BART_DEBUG_LEVEL variable exists! It will be overwritten by -d input!\n") DBLEVEL = int(os.environ["BART_DEBUG_LEVEL"]) elif ("DEBUG_LEVEL" in os.environ): DBLEVEL = int(os.environ["DEBUG_LEVEL"]) if (-1 != args.db): DBLEVEL = args.db main(args.input, args.t, args.output) libbart-devel/python/wslsupport.py000066400000000000000000000006401463460177700177460ustar00rootroot00000000000000import string import os def PathCorrection(inData): outData=inData for i in string.ascii_lowercase: #Replace drive letters with /mnt/ outData=outData.replace(i+':','/mnt/'+i) #if drive letter is supplied in lowercase outData=outData.replace(i.upper()+':','/mnt/'+i) #if drive letter is supplied as uppercase outData=outData.replace(os.path.sep, '/') #Change windows filesep to linux filesep return outData libbart-devel/rules/000077500000000000000000000000001463460177700147435ustar00rootroot00000000000000libbart-devel/rules/make_symbol_table.sh000077500000000000000000000001311463460177700207460ustar00rootroot00000000000000#!/bin/bash EXEC=$1 OUT=$2 nm --defined-only ${EXEC} | cut -c11-16,19- | sort > ${OUT} libbart-devel/rules/update_commands.sh000077500000000000000000000005421463460177700204460ustar00rootroot00000000000000#!/bin/bash set -e BINDIR=$(dirname $0) BART_EXE=$1 shift OUTPUT=$1 shift XTARGETS=$@ TEST_FILE_COMMANDS=$(mktemp) echo "AUTOGENERATED. DO NOT EDIT." > ${TEST_FILE_COMMANDS} for cmd in ${XTARGETS} ; do printf "\n\n--%s--\n\n" $cmd ; ${BART_EXE} $cmd -h ; done >> ${TEST_FILE_COMMANDS} ${BINDIR}/update_if_changed.sh ${TEST_FILE_COMMANDS} ${OUTPUT} libbart-devel/rules/update_if_changed.sh000077500000000000000000000000561463460177700207140ustar00rootroot00000000000000#!/bin/bash cmp -s $1 $2 || mv $1 $2 rm -f $1 libbart-devel/rules/update_version.sh000077500000000000000000000002041463460177700203250ustar00rootroot00000000000000#!/bin/bash echo 'VERSION('`./git-version.sh`')' > version.new.$$ ./rules/update_if_changed.sh version.new.$$ src/misc/version.inc libbart-devel/save/000077500000000000000000000000001463460177700145475ustar00rootroot00000000000000libbart-devel/save/fftw/000077500000000000000000000000001463460177700155155ustar00rootroot00000000000000libbart-devel/save/fftw/README.txt000066400000000000000000000000221463460177700172050ustar00rootroot00000000000000Saves FFT wisdom. libbart-devel/save/nsv/000077500000000000000000000000001463460177700153555ustar00rootroot00000000000000libbart-devel/save/nsv/README.txt000066400000000000000000000000731463460177700170530ustar00rootroot00000000000000This folder is to save the simulations done by estvar/nsv. libbart-devel/scripts/000077500000000000000000000000001463460177700153005ustar00rootroot00000000000000libbart-devel/scripts/bart_completion.sh000066400000000000000000000014721463460177700210210ustar00rootroot00000000000000# bart parameter-completion function _bart() { local cur=${COMP_WORDS[$COMP_CWORD]} if [ $COMP_CWORD -eq 1 ] ; then local CMDS=$(bart | tail -n +2) COMPREPLY=($(compgen -W "$CMDS" -- "$cur")); else local bcmd=${COMP_WORDS[1]} case $cur in -*) COMPREPLY=($(bart ${bcmd} -h | grep -o -E "^${cur}\w*")) ;; *) case $bcmd in twixread) COMPREPLY=($(compgen -o plusdirs -f -X '!*.dat' -- ${cur})) ;; *) local CFLS=$(compgen -o plusdirs -f -X '!*.hdr' -- ${cur}) local COOS=$(compgen -o plusdirs -f -X '!*.coo' -- ${cur}); local RAS=$(compgen -o plusdirs -f -X '!*.ra' -- ${cur}); local suffix=".hdr" COMPREPLY=($(for i in ${CFLS} ${COOS} ${RAS}; do echo ${i%$suffix} ; done)) ;; esac ;; esac fi return 0 } complete -o filenames -F _bart bart ./bart libbart-devel/scripts/espirit_econ.sh000066400000000000000000000041121463460177700203150ustar00rootroot00000000000000#!/bin/bash # Copyright 2018. Martin Uecker. # All rights reserved. Use of this source code is governed by # a BSD-style license which can be found in the LICENSE file. # # Authors: # 2018 Martin Uecker # # Memory-saving ESPIRiT # set -e LOGFILE=/dev/stdout title=$(cat <<- EOF ESPIRiT-ECON EOF ) helpstr=$(cat <<- EOF -l logfile -h help EOF ) usage="Usage: $0 [-h] " echo "$title" echo while getopts "hl:" opt; do case $opt in h) echo "$usage" echo echo "$helpstr" exit 0 ;; l) LOGFILE=$(readlink -f "$OPTARG") ;; \?) echo "$usage" >&2 exit 1 ;; esac done shift $((OPTIND - 1)) if [ $# -lt 2 ] ; then echo "$usage" >&2 exit 1 fi if [ ! -e "$BART_TOOLBOX_PATH"/bart ] ; then if [ -e "$TOOLBOX_PATH"/bart ] ; then BART_TOOLBOX_PATH="$TOOLBOX_PATH" else echo "\$BART_TOOLBOX_PATH is not set correctly!" >&2 exit 1 fi fi export PATH="$BART_TOOLBOX_PATH:$PATH" input=$(readlink -f "$1") output=$(readlink -f "$2") if [ ! -e $input.cfl ] ; then echo "Input file does not exist." >&2 echo "$usage" >&2 exit 1 fi if [ ! -e $TOOLBOX_PATH/bart ] ; then echo "\$TOOLBOX_PATH is not set correctly!" >&2 exit 1 fi #WORKDIR=$(mktemp -d) # Mac: http://unix.stackexchange.com/questions/30091/fix-or-alternative-for-mktemp-in-os-x WORKDIR=`mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir'` trap 'rm -rf "$WORKDIR"' EXIT cd $WORKDIR # start group for redirection of output to the logfile { XX=$(bart show -d0 $input) YY=$(bart show -d1 $input) ZZ=$(bart show -d2 $input) DIM=2 # To decouple along another dimension: # 1. change DIM # 2. replace ZZ below # 3. change the ecaltwo command bart ecalib -1 $input eon # zero-pad bart fft $(bart bitmask ${DIM}) eon eon_fft bart resize -c ${DIM} ${ZZ} eon_fft eon_fft2 bart fft -i $(bart bitmask ${DIM}) eon_fft2 eon for i in `seq -w 0 $(($ZZ - 1))` ; do bart slice ${DIM} $i eon sl bart ecaltwo ${XX} ${YY} 1 sl sens-$i.coo done # # join slices back together bart join ${DIM} sens-*.coo $output } > $LOGFILE exit 0 libbart-devel/scripts/grasp.sh000066400000000000000000000130121463460177700167450ustar00rootroot00000000000000#!/bin/bash # Copyright 2015. The Regents of the University of California. # All rights reserved. Use of this source code is governed by # a BSD-style license which can be found in the LICENSE file. # # Authors: # 2015 Martin Uecker # # Compressed sensing parallel imaging reconstruction with temporal # total-variation regularization for Siemens radial VIBE sequence # with golden-angle sampling (GRASP). # set -e # default settings export SPOKES=21 export SKIP=0 export CALIB=400 export ITER=30 export REG=0.05 SCALE=0.6 LOGFILE=/dev/stdout MAXPROC=4 MAXTHREADS=4 title=$(cat <<- EOF (BART-)GRASP v0.3 (Berkeley Advanced Reconstruction Toolbox) --- EXPERIMENTAL --- FOR RESEARCH USE ONLY --- EOF ) helpstr=$(cat <<- EOF Compressed sensing parallel imaging reconstruction with temporal total-variation regularization for Siemens radial VIBE sequence with golden-angle sampling (GRASP). This script requires the Berkeley Advanced Reconstruction Toolbox version 0.2.09. (later versions may also work). -s spokes number of spokes per frame -r lambda regularization parameter -p maxproc max. number of slices processed in parallel -t maxthreads max. number of threads per slice -l logfile -h help EOF ) usage="Usage: $0 [-h] [-s spokes] [-r lambda] " echo "$title" echo while getopts "hl:s:p:t:r:" opt; do case $opt in s) SPOKES=$OPTARG ;; r) REG=$OPTARG ;; h) echo "$usage" echo echo "$helpstr" exit 0 ;; l) LOGFILE=$(readlink -f "$OPTARG") ;; p) MAXPROC=$OPTARG ;; t) MAXTHREADS=$OPTARG ;; \?) echo "$usage" >&2 exit 1 ;; esac done shift $((OPTIND - 1)) if [ $# -lt 2 ] ; then echo "$usage" >&2 exit 1 fi if [ ! -e "$BART_TOOLBOX_PATH"/bart ] ; then if [ -e "$TOOLBOX_PATH"/bart ] ; then BART_TOOLBOX_PATH="$TOOLBOX_PATH" else echo "\$BART_TOOLBOX_PATH is not set correctly!" >&2 exit 1 fi fi export PATH="$BART_TOOLBOX_PATH:$PATH" input=$(readlink -f "$1") output=$(readlink -f "$2") if [ ! -e $input ] ; then echo "Input file does not exist." >&2 echo "$usage" >&2 exit 1 fi if [ ! -e $TOOLBOX_PATH/bart ] ; then echo "\$TOOLBOX_PATH is not set correctly!" >&2 exit 1 fi #WORKDIR=$(mktemp -d) # Mac: http://unix.stackexchange.com/questions/30091/fix-or-alternative-for-mktemp-in-os-x WORKDIR=`mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir'` trap 'rm -rf "$WORKDIR"' EXIT cd $WORKDIR # start group for redirection of output to the logfile { # read TWIX file bart twixread -A $input grasp export READ=$(bart show -d0 grasp) export COILS=$(bart show -d3 grasp) export PHASES=$(($(bart show -d1 grasp) / $SPOKES)) export OMP_NUM_THREADS=$((MAXPROC * $MAXTHREADS)) # zero-pad #flip $(bitmask 2) grasp grasp2 #resize 2 64 grasp2 grasp #circshift 2 10 grasp grasp2 #fft -u $(bitmask 2) grasp2 grasp_hybrid #rm grasp.* grasp2.* # inverse FFT along 3rd dimension bart fft -i -u $(bart bitmask 2) grasp grasp_hybrid rm grasp.cfl grasp.hdr SLICES=$(bart show -d2 grasp_hybrid) # create trajectory with 400 spokes and 2x oversampling bart traj -G -x$READ -y$CALIB r bart scale $SCALE r rcalib # create trajectory with 2064 spokes and 2x oversampling bart traj -G -x$READ -y$(($SPOKES * $PHASES)) r bart scale $SCALE r r2 # split off time dimension into index 10 bart reshape $(bart bitmask 2 10) $SPOKES $PHASES r2 rfull # number of threads per slice export OMP_NUM_THREADS=$MAXTHREADS calib_slice() { # extract slice bart slice 2 $1 grasp_hybrid grasp1-$1 # extract first $CALIB spokes bart extract 1 $(($SKIP + 0)) $(($SKIP + $CALIB)) grasp1-$1 grasp2-$1 # reshape dimensions bart reshape $(bart bitmask 0 1 2 3) 1 $READ $CALIB $COILS grasp2-$1 grasp3-$1 # apply inverse nufft to first $CALIB spokes bart nufft -i -t rcalib grasp3-$1 img-$1.coo } recon_slice() { # extract sensitivities for slice bart slice 2 $1 sens sens-$1 # extract spokes and split-off time dim bart extract 1 $(($SKIP + 0)) $(($SKIP + $SPOKES * $PHASES)) grasp1-$1 grasp2-$1 bart reshape $(bart bitmask 1 2) $SPOKES $PHASES grasp2-$1 grasp1-$1 # move time dimensions to dim 10 and reshape bart transpose 2 10 grasp1-$1 grasp2-$1 bart reshape $(bart bitmask 0 1 2) 1 $READ $SPOKES grasp2-$1 grasp1-$1 rm grasp2-$1.cfl grasp2-$1.hdr # reconstruction with tv penality along dimension 10 # old (v0.2.08): # pics -S -d5 -lv -u10. -r$REG -R$(bitmask 10) -i$ITER -t rfull grasp1-$1 sens-$1 i-$1.coo # new (v0.2.09): bart pics -S -d5 -u10. -RT:$(bart bitmask 10):0:$REG -i$ITER -t rfull grasp1-$1 sens-$1 i-$1.coo # clean up temp files rm *-$1.cfl *-$1.hdr } export -f calib_slice export -f recon_slice # loop over slices seq -w 0 $(($SLICES - 1)) | xargs -I {} -P $MAXPROC bash -c "calib_slice {}" # transform back to k-space and compute sensitivities bart join 2 img-*.coo img bart fft -u $(bart bitmask 0 1 2) img ksp #ecalib -S -c0.8 -m1 -r20 ksp sens # transpose because we already support off-center calibration region # in dim 0 but here we might have it in 2 bart transpose 0 2 ksp ksp2 bart ecalib -S -c0.8 -m1 -r20 ksp2 sens2 bart transpose 0 2 sens2 sens # loop over slices seq -w 0 $(($SLICES - 1)) | xargs -I {} -P $MAXPROC bash -c "recon_slice {}" #echo 20 | xargs -i --max-procs=$MAXPROC bash -c "recon_slice {}" # join slices back together bart join 2 i-*.coo $output # generate dicoms #for s in $(seq -w 0 $(($SLICES - 1))) ; do # for p in $(seq -w 0 $(($PHASES - 1))) ; do # bart slice 10 $p i-$s.coo i-$p-$s.coo # bart toimg i-$p-$s.coo $output.series$p.slice$s.dcm # done #done } > $LOGFILE exit 0 libbart-devel/scripts/kspace_precond.sh000077500000000000000000000051501463460177700206200ustar00rootroot00000000000000#!/bin/bash # Copyright 2022. TU Graz. Institute of Biomedical Imaging. # Author: Moritz Blumenthal # # F. Ong, M. Uecker and M. Lustig, Accelerating Non-Cartesian # MRI Reconstruction Convergence Using k-Space Preconditioning # IEEE TMI, 2020 39:1646-1654 # helpstr=$(cat <<- EOF Compute k-space preconditioner P such that ||P^2 AA^H - 1|| is minimal Note the square in the definition. The preconditioner can be used directly as wights in PICS. contains ones with image dimensions -B file subspace basis -g use GPU -h help EOF ) usage="Usage: $0 [-h] [-g] [-B ] " GPU="" BASIS="" while getopts "hgB:" opt; do case $opt in g) GPU="-g" ;; B) BASIS=$(readlink -f "$OPTARG") ;; h) echo "$usage" echo echo "$helpstr" exit 0 ;; \?) echo "$usage" >&2 exit 1 ;; esac done shift $((OPTIND - 1)) if [ $# -lt 3 ] ; then echo "$usage" >&2 exit 1 fi if [ ! -e "$BART_TOOLBOX_PATH"/bart ] ; then if [ -e "$TOOLBOX_PATH"/bart ] ; then BART_TOOLBOX_PATH="$TOOLBOX_PATH" else echo "\$BART_TOOLBOX_PATH is not set correctly!" >&2 exit 1 fi fi export PATH="$BART_TOOLBOX_PATH:$PATH" ones=$(readlink -f "$1") traj=$(readlink -f "$2") prec=$(readlink -f "$3") WORKDIR=`mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir'` trap 'rm -rf "$WORKDIR"' EXIT cd $WORKDIR X=$(bart show -d0 $ones) Y=$(bart show -d1 $ones) Z=$(bart show -d2 $ones) s1=$((X*Y*Z)) if [[ 1 != $X ]] ; then X=$((2*X)); fi if [[ 1 != $Y ]] ; then Y=$((2*Y)); fi if [[ 1 != $Z ]] ; then Z=$((2*Z)); fi s2=$((X*Y*Z)) s3=$(echo "$s1*e(-1.5*l($s2))"|bc -l) bart fmac -C -s7 $ones $ones mps_norm2 bart scale $s3 mps_norm2 scale ksp_dims="1" for i in $(seq 15); do ksp_dims+=" $(bart show -d$i $traj)" done if [[ "$BASIS" != "" ]] ; then bart fmac -C -s$(bart bitmask 6) $BASIS $BASIS bas_scale bart fmac scale bas_scale scale2 bart copy scale2 scale COE=$(bart show -d6 $BASIS) bart transpose 6 7 $BASIS basis_r bart fmac -C $BASIS basis_r basis_2 bart reshape $(bart bitmask 6 7) $((COE*COE)) 1 basis_2 basis BASIS="-B basis" fi bart ones 16 $ksp_dims ksp bart scale 2 $traj traj2 bart nufft $BASIS -P --lowmem --no-precomp -a $GPU -x$X:$Y:$Z traj2 ksp psf bart resize -c 0 $X 1 $Y 2 $Z $ones ones_os bart fft -u 7 ones_os ones_ksp1 bart fmac -C ones_ksp1 ones_ksp1 ones_ksp bart fft -u -i 7 ones_ksp ones_img bart fmac psf ones_img psf_mul bart nufft $BASIS -P --lowmem --no-precomp $GPU traj2 psf_mul pre_inv bart creal pre_inv pre_inv_real bart invert pre_inv_real pre_real bart fmac pre_real scale pre_sqr bart spow -- 0.5 pre_sqr $prec libbart-devel/scripts/life.sh000066400000000000000000000023631463460177700165570ustar00rootroot00000000000000#!/bin/bash if [ ! -e "$BART_TOOLBOX_PATH"/bart ] ; then if [ -e "$TOOLBOX_PATH"/bart ] ; then BART_TOOLBOX_PATH="$TOOLBOX_PATH" else echo "\$BART_TOOLBOX_PATH is not set correctly!" >&2 exit 1 fi fi export PATH="$BART_TOOLBOX_PATH:$PATH" #bart vec 0 0 1 0 v1 #bart vec 0 0 0 1 v2 #bart vec 0 1 1 1 v3 #bart join 1 v1 v2 v3 v #bart vec 0 0 1 1 v1 #bart vec 0 1 1 0 v2 #bart vec 0 0 1 0 v3 #bart join 1 v1 v2 v3 v #bart vec 0 1 1 1 0 1 v1 #bart vec 0 1 0 0 0 0 v2 #bart vec 0 0 0 0 1 1 v3 #bart vec 0 0 1 1 0 1 v4 #bart vec 0 1 0 1 0 1 v5 #bart join 1 v1 v2 v3 v4 v5 v #bart resize -c 0 300 1 300 v o #bart conway -n3000 o x bart vec 0 0 0 1 1 1 0 0 0 1 1 1 0 0 v0 bart vec 0 0 0 0 0 0 0 0 0 0 0 0 0 0 v1 bart vec 0 1 0 0 0 0 1 0 1 0 0 0 0 1 v2 bart vec 0 1 0 0 0 0 1 0 1 0 0 0 0 1 v3 bart vec 0 1 0 0 0 0 1 0 1 0 0 0 0 1 v4 bart vec 0 0 0 1 1 1 0 0 0 1 1 1 0 0 v5 bart vec 0 0 0 0 0 0 0 0 0 0 0 0 0 0 v6 bart vec 0 0 0 1 1 1 0 0 0 1 1 1 0 0 v7 bart vec 0 1 0 0 0 0 1 0 1 0 0 0 0 1 v8 bart vec 0 1 0 0 0 0 1 0 1 0 0 0 0 1 v9 bart vec 0 1 0 0 0 0 1 0 1 0 0 0 0 1 va bart vec 0 0 0 0 0 0 0 0 0 0 0 0 0 0 vb bart vec 0 0 0 1 1 1 0 0 0 1 1 1 0 0 vc bart join 1 v0 v1 v2 v3 v4 v5 v6 v7 v8 v9 va vb vc v bart resize -c 0 50 1 50 v o bart conway -n3 o x libbart-devel/scripts/octview.m000077500000000000000000000004031463460177700171360ustar00rootroot00000000000000#! /usr/bin/octave -qf addpath(strcat(getenv("BART_TOOLBOX_PATH"), "/matlab")); addpath(strcat(getenv("TOOLBOX_PATH"), "/matlab")); % support old enviroment variable arg_list = argv(); data = squeeze(readcfl(arg_list{1})); imshow3(abs(data), []); pause; libbart-devel/scripts/phantom.sh000077500000000000000000000140771463460177700173160ustar00rootroot00000000000000#!/bin/bash # Copyright 2022. TU Graz. Institute of Biomedical Imaging. # All rights reserved. Use of this source code is governed by # a BSD-style license which can be found in the LICENSE file. # # Author: # 2022 Nick Scholand # # Creation of digital reference object. set -e LOGFILE=/dev/stdout KSPACE=false SENS=1 ROT_ANGLE=0 ROT_STEPS=1 GEOM=NIST title=$(cat <<- EOF Digital Reference Object EOF ) helpstr=$(cat <<- EOF -S \t\t Diagnostic Sonar geometry (NIST phantom is default) -k \t\t simulate in k-space -a d \t\t angle of rotation -r d \t\t number of rotation steps -s d \t\t number of simulated coils -t \t define custom trajectory file -l \t\t logfile -h \t\t help Please adjust simulation parameters inside the script. EOF ) usage="Usage: $0 [-h] [-k] [-r d] [-s d] [-t ] " echo "$title" echo while getopts "hSka:r:s:t:l:" opt; do case $opt in h) echo "$usage" echo echo -e "$helpstr" exit 0 ;; S) GEOM=SONAR ;; k) KSPACE=true ;; a) ROT_ANGLE=$OPTARG ;; r) ROT_STEPS=$OPTARG ;; s) SENS=$OPTARG ;; t) TRAJ=$(readlink -f "$OPTARG") ;; l) LOGFILE=$(readlink -f "$OPTARG") ;; \?) echo "$usage" >&2 exit 1 ;; esac done shift $((OPTIND - 1)) if [ $# != 1 ] ; then echo "$usage" >&2 exit 1 fi if [ ! -e "$BART_TOOLBOX_PATH"/bart ] ; then if [ -e "$TOOLBOX_PATH"/bart ] ; then BART_TOOLBOX_PATH="$TOOLBOX_PATH" else echo "\$BART_TOOLBOX_PATH is not set correctly!" >&2 exit 1 fi fi export PATH="$BART_TOOLBOX_PATH:$PATH" output=$(readlink -f "$1") # Tests for usefull input if [ ! -z "${TRAJ}" ] && [ "$KSPACE" = false ]; then echo "Trajectory only works in k-space domain. Please add [-k]!" >&2 exit 1 fi #WORKDIR=$(mktemp -d) # Mac: http://unix.stackexchange.com/questions/30091/fix-or-alternative-for-mktemp-in-os-x WORKDIR=`mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir'` trap 'rm -rf "$WORKDIR"' EXIT cd $WORKDIR # start group for redirection of output to the logfile { case $GEOM in NIST) echo "NIST Phantom Geometry" echo "T2 Sphere of Model 130" echo "Relaxation Paramters for 3 T" echo "" ## Relaxation parameters for T2 Sphere of NIST phantom at 3 T (Model 130) ## Stupic, KF, Ainslie, M, Boss, MA, et al. ## A standard system phantom for magnetic resonance imaging. ## Magn Reson Med. 2021; 86: 1194– 1211. https://doi.org/10.1002/mrm.28779 T1=(3 2.48 2.173 1.907 1.604 1.332 1.044 0.802 0.609 0.458 0.337 0.244 0.177 0.127 0.091) T2=(1 0.581 0.404 0.278 0.191 0.133 0.097 0.064 0.046 0.032 0.023 0.016 0.011 0.008 0.006) ;; SONAR) echo "Diagnostic Sonar Phantom Geometry" echo "Eurospin II" echo "Gels: 3, 4, 7, 10, 14, and 16" echo "" ## Relaxation parameters for Diagnostic Sonar phantom ## Eurospin II, gel nos 3, 4, 7, 10, 14, and 16) ## T1 from reference measurements in ## Wang, X., Roeloffs, V., Klosowski, J., Tan, Z., Voit, D., Uecker, M. and Frahm, J. (2018), ## Model-based T1 mapping with sparsity constraints using single-shot inversion-recovery radial FLASH. ## Magn. Reson. Med, 79: 730-740. https://doi.org/10.1002/mrm.26726 ## T2 from ## T. J. Sumpf, A. Petrovic, M. Uecker, F. Knoll and J. Frahm, ## Fast T2 Mapping With Improved Accuracy Using Undersampled Spin-Echo MRI and Model-Based Reconstructions With a Generating Function ## IEEE Transactions on Medical Imaging, vol. 33, no. 12, pp. 2213-2222, Dec. 2014, doi: 10.1109/TMI.2014.2333370. T1=(3 0.311 0.458 0.633 0.805 1.1158 1.441 3) T2=(1 0.046 0.081 0.101 0.132 0.138 0.166 1) ;; *) echo -n "Unknown geometry!\n" exit 1 ;; esac # Simulation Parameters # Run `bart sim --seq h` for more details SEQ=IR-FLASH # Sequence Type TR=0.0034 # Repetition Time [s] TE=0.0021 # Echo Time [s] REP=600 # Number of repetitions IPL=0.01 # Inversion Pulse Length [s] ISP=0.005 # Inversion Spoiler Gradient Length [s] PPL=0 # Preparation Pulse Length [s] TRF=0.001 # Pulse Duration [s] FA=6 # Flip Angle [degree] BWTP=4 # Bandwidth-Time-Product OFF=0 # Off-Resonance [rad/s] SLGRAD=0 # Slice Selection Gradient Strength [T/m] SLTHICK=0 # Thickness of Simulated Slice [m] NSPINS=1 # Number of Simulated Spins # Run Simulation for i in `seq 0 $((${#T1[@]}-1))`; do echo -e "Tube $i\t T1: ${T1[$i]} s,\tT2[$i]: ${T2[$i]} s" bart sim --ODE \ --seq $SEQ,TR=$TR,TE=$TE,Nrep=$REP,ipl=$IPL,isp=$ISP,ppl=$PPL,Trf=$TRF,FA=$FA,BWTP=$BWTP,off=$OFF,sl-grad=$SLGRAD,slice-thickness=$SLTHICK,Nspins=$NSPINS \ -1 ${T1[$i]}:${T1[$i]}:1 -2 ${T2[$i]}:${T2[$i]}:1 \ _simu$(printf "%02d" $i) done # Join individual simulations bart join 7 $(ls _simu*.cfl | sed -e 's/\.cfl//') simu # Join simulations in a single dimension (-> 6) bart reshape $(bart bitmask 6 7) ${#T1[@]} 1 simu simu2 # Create Geometry if [ -z "${TRAJ}" ]; then if $KSPACE; then # Create default trajectory DIM=192 SPOKES=$((DIM-1)) bart traj -x $DIM -y $SPOKES traj bart phantom --${GEOM} -b -s $SENS --rotation-steps $ROT_STEPS --rotation-angle $ROT_ANGLE -t traj geom else bart phantom --${GEOM} -b -s $SENS --rotation-steps $ROT_STEPS --rotation-angle $ROT_ANGLE geom fi else if $KSPACE; then bart phantom --${GEOM} -b -s $SENS --rotation-steps $ROT_STEPS --rotation-angle $ROT_ANGLE -k -t ${TRAJ} geom else bart phantom --${GEOM} -b -s $SENS --rotation-steps $ROT_STEPS --rotation-angle $ROT_ANGLE geom fi fi # Combine simulated signal and geometry bart fmac -s $(bart bitmask 6) geom simu2 $output } > $LOGFILE [ -d $WORKDIR ] && rm -rf $WORKDIR exit 0 libbart-devel/scripts/profile.sh000066400000000000000000000024051463460177700172750ustar00rootroot00000000000000#!/bin/sh set -e usage="Usage: $0 " helpstr=$(cat <<- EOF Postprocess debugging output from BART to extract profiling information and to translate pointer values to symbol names. -h help EOF ) while getopts "h" opt; do case $opt in h) echo "$usage" echo echo "$helpstr" exit 0 ;; \?) echo "$usage" >&2 exit 1 ;; esac done shift $((OPTIND - 1)) if [ $# -lt 2 ] ; then echo "$usage" >&2 exit 1 fi in=$(readlink -f "$1") out=$(readlink -f "$2") if [ ! -e $input ] ; then echo "Input file does not exist." >&2 echo "$usage" >&2 exit 1 fi if [ ! -e "$BART_TOOLBOX_PATH"/bart ] ; then if [ -e "$TOOLBOX_PATH"/bart ] ; then BART_TOOLBOX_PATH="$TOOLBOX_PATH" else echo "\$BART_TOOLBOX_PATH is not set correctly!" >&2 exit 1 fi fi #WORKDIR=$(mktemp -d) # Mac: http://unix.stackexchange.com/questions/30091/fix-or-alternative-for-mktemp-in-os-x WORKDIR=`mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir'` trap 'rm -rf "$WORKDIR"' EXIT cd $WORKDIR nm --defined-only "$BART_TOOLBOX_PATH"/bart | cut -c11-16,19- | sort > bart.syms cat $in | grep "^TRACE" \ | grep " 0x" \ | cut -c7-23,25-31,34- \ | sort -k3 \ | join -11 -23 bart.syms - \ | cut -c8- \ | sort -k2 > $out libbart-devel/scripts/radial_dcf.sh000066400000000000000000000021641463460177700177070ustar00rootroot00000000000000#!/bin/bash if [ ! -e "$BART_TOOLBOX_PATH"/bart ] ; then if [ -e "$TOOLBOX_PATH"/bart ] ; then BART_TOOLBOX_PATH="$TOOLBOX_PATH" else echo "\$BART_TOOLBOX_PATH is not set correctly!" >&2 exit 1 fi fi export PATH="$BART_TOOLBOX_PATH:$PATH" # oversampled radial trajectory bart traj -r -y55 -x256 traj_tmp bart scale 0.5 traj_tmp traj # simulate k-space bart phantom -t traj ksp # compute Ram-Lak filter bart rss 1 traj ramlak # apply to data bart fmac ksp ramlak ksp_filt # adjoint nufft bart nufft -a traj ksp img bart nufft -a traj ksp_filt img_filt # grid and degrid ones bart ones 3 1 256 55 ones bart nufft -a traj ones dens_tmp bart nufft traj dens_tmp density # sqrt bart spow -- -1. density dcf # inv sqrt bart spow -- -0.5 density sqdcf # adjoint nufft bart fmac dcf ksp ksp_filt2 bart nufft -a traj ksp_filt2 img_filt2 # one channel all ones sensititty bart ones 3 256 256 1 sens # without dcf bart pics -i30 -t traj ksp sens img_pics_i30 bart pics -i3 -t traj ksp sens img_pics_i3 # with dcf bart pics -i30 -t traj -p sqdcf ksp sens img_pics_dcf_i30 bart pics -i3 -t traj -p sqdcf ksp sens img_pics_dcf_i3 libbart-devel/scripts/rovir.sh000077500000000000000000000046121463460177700170030ustar00rootroot00000000000000#!/bin/bash # Copyright 2023. TU Graz. Institute of Biomedical Imaging. # Author: Moritz Blumenthal # # Kim, D, Cauley, SF, Nayak, KS, Leahy, RM, Haldar, JP. # Region-optimized virtual (ROVir) coils: Localization and/or # suppression of spatial regions using sensor-domain beamforming. # Magn Reson Med. 2021; 86: 197–212. # set -eu helpstr=$(cat <<- EOF Compute coil compression following the ROVir method. Signal to be compressed Mask (1/0) for region of interest to be optimized for. Defines also low resolution image. Compressed signal or coefficient matrix -p N compress to N virtual channels -t file trajectory -B file subspace basis -M output coefficients -g use GPU -h help EOF ) usage="Usage: $0 [-h] [-g] [-t ][-B ] " CC="" GPU="" TRAJ="" BASIS="" COEFFS=0 while getopts "hgB:t:Mp:" opt; do case $opt in g) GPU=" -g" ;; t) TRAJ=$(readlink -f "$OPTARG") ;; p) CC=" -p $OPTARG" ;; M) COEFFS=1 ;; B) BASIS="-B $(readlink -f "$OPTARG")" ;; h) echo "$usage" echo echo "$helpstr" exit 0 ;; \?) echo "$usage" >&2 exit 1 ;; esac done shift $((OPTIND - 1)) if [ $# -ne 4 ] ; then echo "$usage" >&2 exit 1 fi if [ ! -e "$BART_TOOLBOX_PATH"/bart ] ; then if [ -e "$TOOLBOX_PATH"/bart ] ; then BART_TOOLBOX_PATH="$TOOLBOX_PATH" else echo "\$BART_TOOLBOX_PATH is not set correctly!" >&2 exit 1 fi fi export PATH="$BART_TOOLBOX_PATH:$PATH" sig=$(readlink -f "$1") pos=$(readlink -f "$2") neg=$(readlink -f "$3") out=$(readlink -f "$4") WORKDIR=`mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir'` trap 'rm -rf "$WORKDIR"' EXIT cd $WORKDIR if [ -z "$TRAJ" ] ; then DIMS="0 $(bart show -d 0 $pos) 1 $(bart show -d 1 $pos) 2 $(bart show -d 2 $pos)" bart resize -c $DIMS $sig res bart fft -i 7 res img bart fmac img $pos pos bart fmac img $neg neg else DIMS="$(bart show -d 0 $pos):$(bart show -d 1 $pos):$(bart show -d 2 $pos)" bart nufftbase $DIMS $TRAJ pat bart nufft $BASIS -p pat -i -x$DIMS $TRAJ $sig cim bart fmac cim $pos ipos bart fmac cim $neg ineg bart nufft $BASIS -ppat $TRAJ ipos pos bart nufft $BASIS -ppat $TRAJ ineg neg fi bart rovir pos neg compress if [[ "$COEFFS" -eq 1 ]]; then bart copy compress $out else bart ccapply $CC $sig compress $out fi libbart-devel/scripts/rtnlinv.m000066400000000000000000000026441463460177700171600ustar00rootroot00000000000000% 2015, Martin Uecker % % Example script to use BART for the initial preprocessing % (gridding) which is required - but not included - in the % original Matlab RT-NLINV example. The example is for a % single frame, but this should also work in a similar way % for the RT-NLINV2 code which reconstructs a time-series % of images from highly undersampled data using temporal % regularization. % % Links to the Matlab code can be found here: % http://www.eecs.berkeley.edu/~uecker/toolbox.html % % References: % % Uecker M et al., Nonlinear Inverse Reconstruction for Real-time MRI % of the Human Heart Using Undersampled Radial FLASH, % MRM 63:1456-1462 (2010) % % Uecker M et al., Real-time magnetic resonance imaging at 20 ms % resolution, NMR in Biomedicine 23: 986-994 (2010) % % data set is included in the IRGNTV example A = load('radial_cardiac_25_projections.mat'); % re-format trajectory for BART t = zeros(3, 256, 25); t(1,:,:) = real(A.k) * 384.; t(2,:,:) = imag(A.k) * 384.; % use adjoint nufft to interpolate data onto Cartesia grid adj = bart('nufft -d384:384:1 -a ', t, reshape(A.rawdata, [1 256 25 12])); % compute point-spread function psf = bart('nufft -d384:384:1 -a ', t, ones(1, 256, 25)); % transform back to k-space adjk = bart('fft -u 7', adj); psfk = bart('fft -u 7', psf); % use nlinv from RT-NLINV (nlinv2) matlab package R = nlinv(squeeze(adjk), squeeze(psfk) * 1., 9, 'noncart'); libbart-devel/src/000077500000000000000000000000001463460177700144005ustar00rootroot00000000000000libbart-devel/src/avg.c000066400000000000000000000025751463460177700153320ustar00rootroot00000000000000/* Copyright 2014-2016. The Regents of the University of California. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2014 Frank Ong */ #include #include "num/multind.h" #include "num/flpmath.h" #include "num/init.h" #include "misc/mmio.h" #include "misc/opts.h" #ifndef DIMS #define DIMS 16 #endif static const char help_str[] = "Calculates (weighted) average along dimensions specified by bitmask."; int main_avg(int argc, char* argv[argc]) { unsigned long flags = 0; const char* in_file = NULL; const char* out_file = NULL; struct arg_s args[] = { ARG_ULONG(true, &flags, "bitmask"), ARG_INFILE(true, &in_file, "input"), ARG_OUTFILE(true, &out_file, "output"), }; bool wavg = false; const struct opt_s opts[] = { OPT_SET('w', &wavg, "weighted average"), }; cmdline(&argc, argv, ARRAY_SIZE(args), args, help_str, ARRAY_SIZE(opts), opts); num_init(); int N = DIMS; long idims[N]; complex float* data = load_cfl(in_file, N, idims); long odims[N]; md_select_dims(N, ~flags, odims, idims); complex float* out = create_cfl(out_file, N, odims); (wavg ? md_zwavg : md_zavg)(N, idims, flags, out, data); unmap_cfl(N, idims, data); unmap_cfl(N, odims, out); return 0; } libbart-devel/src/bart.c000066400000000000000000000243601463460177700155010ustar00rootroot00000000000000/* Copyright 2015. The Regents of the University of California. * Copyright 2015-2021. Martin Uecker. * Copyright 2018. Damien Nguyen. * Copyright 2023. Institute of Biomedical Imaging. TU Graz. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. */ #include #include #include #include #include #include #include #include #ifdef _WIN32 #include "win/fmemopen.h" #include "win/basename_patch.h" #endif #include "misc/io.h" #include "misc/mmio.h" #include "misc/misc.h" #include "misc/opts.h" #include "misc/version.h" #include "misc/debug.h" #include "misc/cppmap.h" #include "num/mpi_ops.h" #include "num/multind.h" #ifdef USE_MPI #include #endif #ifdef _OPENMP #include #endif #ifdef USE_CUDA #include "num/gpuops.h" #endif #ifdef USE_LOCAL_FFTW #include "fftw3_local.h" #define MANGLE(name) local_ ## name #else #include #define MANGLE(name) name #endif #include "main.h" // also check in commands/ subdir at the bart exe location #define CHECK_EXE_COMMANDS #ifndef DIMS #define DIMS 16 #endif extern FILE* bart_output; // src/misc.c static void bart_exit_cleanup(void) { if (NULL != command_line) XFREE(command_line); io_memory_cleanup(); opt_free_strdup(); #ifdef FFTWTHREADS MANGLE(fftwf_cleanup_threads)(); #endif #ifdef USE_CUDA cuda_memcache_clear(); #endif } typedef int (main_fun_t)(int argc, char* argv[]); struct { main_fun_t* main_fun; const char* name; } dispatch_table[] = { #define DENTRY(x) { main_ ## x, # x }, MAP(DENTRY, MAIN_LIST) #undef DENTRY { NULL, NULL } }; static const char help_str[] = "BART. command line flags"; static void usage(void) { printf("BART. Available commands are:"); for (int i = 0; NULL != dispatch_table[i].name; i++) { if (0 == i % 6) printf("\n"); printf("%-12s", dispatch_table[i].name); } printf("\n"); } static int bart_exit(int err_no, const char* exit_msg) { if (0 != err_no) { if (NULL != exit_msg) debug_printf(DP_ERROR, "%s\n", exit_msg); #ifdef USE_MPI MPI_Abort(MPI_COMM_WORLD, err_no); #endif } return err_no; } static void parse_bart_opts(int* argcp, char*** argvp) { int omp_threads = 1; unsigned long flags = 0; unsigned long pflags = 0; long param_start[DIMS] = { [0 ... DIMS - 1] = -1 }; long param_end[DIMS] = { [0 ... DIMS - 1] = -1 }; const char* ref_file = NULL; bool use_mpi = false; bool version = false; struct arg_s args[] = { }; struct opt_s opts[] = { OPTL_ULONG('l', "loop", &(flags), "flag", "Flag to specify dimensions for looping"), OPTL_ULONG('p', "parallel-loop", &(pflags), "flag", "Flag to specify dimensions for looping and activate parallelization"), OPTL_VECN('s', "start", param_start, "Start index of range for looping (default: 0)"), OPTL_VECN('e', "end", param_end, "End index of range for looping (default: start + 1)"), OPTL_INT('t', "threads", &omp_threads, "nthreads", "Set threads for parallelization"), OPTL_INFILE('r', "ref-file", &ref_file, "", "Obtain loop size from reference file"), OPTL_SET('M', "mpi", &use_mpi, "Initialize MPI"), OPT_SET('S', &mpi_shared_files, "Maps files from each rank (requires shared files system)"), OPTL_SET(0, "version", &version, "print version"), }; int next_arg = options(argcp, *argvp, "", help_str, ARRAY_SIZE(opts), opts, ARRAY_SIZE(args), args, true); if (version) debug_printf(DP_INFO, "%s\n", bart_version); *argcp -= next_arg; *argvp += next_arg; if (0 != flags && 0 != pflags && flags != pflags) error("Inconsistent use of -p and -l!\n"); flags |= pflags; if (1 == omp_threads && 0 != pflags) omp_threads = 0; const char* ompi_str; if (NULL != (ompi_str = getenv("OMPI_COMM_WORLD_SIZE"))) { unsigned long mpi_ranks = strtoul(ompi_str, NULL, 10); if (1 < mpi_ranks) use_mpi = true; } if (use_mpi) init_mpi(argcp, argvp); if (NULL != ref_file) { long ref_dims[DIMS]; const void* tmp = load_cfl(ref_file, DIMS, ref_dims); unmap_cfl(DIMS, ref_dims, tmp); assert(-1 == param_end[0]); for (int i =0, ip = 0; i < DIMS; i++) if (MD_IS_SET(flags, i)) param_end[ip++] = ref_dims[i]; } opt_free_strdup(); int nstart = 0; int nend = 0; for(; nstart < DIMS && -1 != param_start[nstart]; nstart++); for(; nend < DIMS && -1 != param_end[nend]; nend++); if (0 != nstart && bitcount(flags) != nstart) error("Size of start values does not coincide with number of selected flags!\n"); if (0 != nend && bitcount(flags) != nend) error("Size of start values does not coincide with number of selected flags!\n"); if (0 == nstart) for (int i = 0; i < bitcount(flags); i++) param_start[i] = 0; if (0 == nend) for (int i = 0; i < bitcount(flags); i++) param_end[i] = param_start[i] + 1; long offs_size[DIMS] = { [0 ... DIMS - 1] = 0 }; long loop_dims[DIMS] = { [0 ... DIMS - 1] = 1 }; for (int i = 0, j = 0; i < DIMS; ++i) { if (MD_IS_SET(flags, i)) { offs_size[i] = param_start[j]; loop_dims[i] = param_end[j] - param_start[j]; j++; } } #ifdef _OPENMP if (0 == omp_threads) { if (NULL == getenv("OMP_NUM_THREADS")) omp_set_num_threads(omp_get_num_procs()); omp_threads = omp_get_max_threads(); } #endif omp_threads = MAX(omp_threads, 1); omp_threads = MIN(omp_threads, md_calc_size(DIMS, loop_dims)); if (1 < mpi_get_num_procs()) omp_threads = 1; init_cfl_loop_desc(DIMS, loop_dims, offs_size, flags, omp_threads, 0); } static int batch_wrapper(main_fun_t* dispatch_func, int argc, char *argv[argc], long pos) { char* thread_argv[argc + 1]; char* thread_argv_save[argc]; for(int m = 0; m < argc; m++) { thread_argv[m] = strdup(argv[m]); thread_argv_save[m] = thread_argv[m]; } thread_argv[argc] = NULL; set_cfl_loop_index(pos); int ret = (*dispatch_func)(argc, thread_argv); io_memory_cleanup(); for(int m = 0; m < argc; ++m) free(thread_argv_save[m]); return ret; } int main_bart(int argc, char* argv[argc]) { char* bn = basename(argv[0]); // only skip over initial bart or bart.exe. calling "bart bart" is an error. if (0 == strcmp(bn, "bart") || 0 == strcmp(bn, "bart.exe")) { if (1 == argc) { usage(); return -1; } // This advances argv to behind the bart options parse_bart_opts(&argc, &argv); bn = basename(argv[0]); } main_fun_t* dispatch_func = NULL; for (int i = 0; NULL != dispatch_table[i].name; i++) if (0 == strcmp(bn, dispatch_table[i].name)) dispatch_func = dispatch_table[i].main_fun; bool builtin_found = (NULL != dispatch_func); if (builtin_found) { debug_printf(DP_DEBUG3, "Builtin found: %s\n", bn); unsigned int v[5]; version_parse(v, bart_version); if (0 != v[4]) debug_printf(DP_WARN, "BART version %s is not reproducible.\n", bart_version); int final_ret = 0; if (cfl_loop_omp()) { // gomp does only use a thread pool for non-nested parallelism! // Threads are spwaned dynamically with a performance penality for md_functions, // if we have an outer parallel region even if it is inactive. #ifdef USE_CUDA cuda_set_stream_level(); #endif #pragma omp parallel num_threads(cfl_loop_num_workers()) { long start = cfl_loop_worker_id(); long total = cfl_loop_desc_total(); long workers = cfl_loop_num_workers(); for (long i = start; ((i < total) && (0 == final_ret)); i += workers) { int ret = batch_wrapper(dispatch_func, argc, argv, i); if (0 != ret) { #pragma omp critical (main_end_condition) final_ret = ret; bart_exit(ret, "Tool exited with error"); } } } } else { long start = cfl_loop_worker_id(); long total = cfl_loop_desc_total(); long workers = cfl_loop_num_workers(); mpi_signoff_proc(cfl_loop_desc_active() && (mpi_get_rank() >= total)); for (long i = start; ((i < total) && (0 == final_ret)); i += workers) { int ret = batch_wrapper(dispatch_func, argc, argv, i); int tag = ((((i + workers) < total) || (0 != ret)) ? 1 : 0); mpi_signoff_proc(cfl_loop_desc_active() && (0 == tag)); if (0 != ret) { final_ret = ret; bart_exit(ret, "Tool exited with error"); } } } deinit_mpi(); bart_exit_cleanup(); return final_ret; } else { // could not find any builtin // try to find something in commands debug_printf(DP_DEBUG3, "No builtin found: %s\n", argv[0]); #ifdef CHECK_EXE_COMMANDS // also check PATH_TO_BART/../commands/: char exe_loc[1024] = {0}; ssize_t exe_loc_size = ARRAY_SIZE(exe_loc); ssize_t rl = readlink("/proc/self/exe", exe_loc, (size_t)exe_loc_size); char* exe_dir = NULL; if ((-1 != rl) && (exe_loc_size != rl)) { // readlink returned without error and did not truncate exe_dir = dirname(exe_loc); // no need to check for NULL, as in that case, we skip it in the loop below } #endif const char* tpath[] = { #ifdef CHECK_EXE_COMMANDS exe_dir, #endif getenv("BART_TOOLBOX_PATH"), getenv("TOOLBOX_PATH"), // support old environment variable "/usr/local/lib/bart/", "/usr/lib/bart/", }; for (int i = 0; i < (int)ARRAY_SIZE(tpath); i++) { if (NULL == tpath[i]) continue; size_t len = strlen(tpath[i]) + strlen(bn) + 10 + 1; // extra space for /commands/ and null-terminator char (*cmd)[len] = xmalloc(sizeof *cmd); int r = snprintf(*cmd, len, "%s/commands/%s", tpath[i], bn); if (r >= (int)len) { error("Commandline too long\n"); return bart_exit(1, NULL); // not really needed, error calls abort() } debug_printf(DP_DEBUG3, "Trying: %s\n", cmd); if (-1 == execv(*cmd, argv)) { if (ENOENT != errno) { error("Executing bart command failed\n"); return bart_exit(1, NULL); // not really needed, error calls abort() } } else { assert(0); // unreachable } xfree(cmd); } fprintf(stderr, "Unknown bart command: \"%s\".\n", bn); return bart_exit(-1, NULL); } } int bart_command(int len, char* buf, int argc, char* argv[]) { int save = debug_level; if (NULL != buf) { buf[0] = '\0'; bart_output = fmemopen(buf, (size_t)len, "w"); } int ret = error_catcher(main_bart, argc, argv); bart_exit_cleanup(); debug_level = save; if (NULL != bart_output) { #ifdef _WIN32 rewind(bart_output); fread(buf, 1, len, bart_output); #endif fclose(bart_output); // write final nul bart_output = NULL; } return ret; } libbart-devel/src/bart_embed_api.h000066400000000000000000000117041463460177700174710ustar00rootroot00000000000000#ifndef BART_API_H_INCLUDED #define BART_API_H_INCLUDED #ifdef __cplusplus extern "C" { #endif //! BART's current debug level extern int debug_level; //! Load the content of some in-memory CFL /*! * This function will load the data from some named in-memory CFL and returns * its data. * The dimensions array will get modified to match those from the CFL * * \param name Name used to refer to in-memory CFL * \param D Size of the dimensions array (should be < 16) * \param dimensions Array holding the dimensions of the data * (will get modified) * * \return Pointer to the data or NULL if no matching in-memory CFL file * was found */ void* load_mem_cfl(const char* name, unsigned int D, long dimensions[]); //! Register some memory into the list of in-memory CFL files /*! * This function handles data that was allocated using the C malloc(...) function. * It takes *ownership* of the data and will free it using free(...) * * \param name Name which will be used to refer to the created in-mem CFL * \param D Size of the dimensions array (should be < 16) * \param dimensions Array holding the dimensions of the data * \param ptr Pointer to the data * * \note The underlying data type of ptr is assumed to be complex floats * (complex float or _Complex float) * * \warning Calling this function on data allocated with new[] will result * in undefined behaviour! * * \warning Be aware that if MEMONLY_CFL is not defined, names that do not * end with the '.mem' extension will be unreachable by user code */ void register_mem_cfl_malloc(const char* name, unsigned int D, const long dimensions[], void* ptr); //! Register some memory into the list of in-memory CFL files /*! * This function handles data that was allocated using the C++ new[] operator * It takes *ownership* of the data and will free it using delete[] * * \param name Name which will be used to refer to the created in-mem CFL * \param D Size of the dimensions array (should be < 16) * \param dimensions Array holding the dimensions of the data * \param ptr Pointer to the data * * \note The underlying data type of ptr is assumed to be complex floats * (complex float or _Complex float) * * \warning Calling this function on data allocated with malloc will * result in undefined behaviour! * * \warning Be aware that if MEMONLY_CFL is not defined, names that do not * end with the '.mem' extension will be unreachable by user code */ void register_mem_cfl_new(const char* name, unsigned int D, const long dimensions[], void* ptr); //! Register some memory into the list of in-memory CFL files /*! * This function handles data that was allocated by the user and of which * the user wishes to retain control of its lifetime. * It does *not* takes ownership of the data * * \param name Name which will be used to refer to the created in-mem CFL * \param D Size of the dimensions array (should be < 16) * \param dimensions Array holding the dimensions of the data * \param ptr Pointer to the data * * \note The underlying data type of ptr is assumed to be complex floats * (complex float or _Complex float) * * \warning Be aware that if MEMONLY_CFL is not defined, names that do not * end with the '.mem' extension will be unreachable by user code */ void register_mem_cfl_non_managed(const char* name, unsigned int D, const long dims[], void* ptr); //! BART's main function /*! * This function will execute the BART command specified in argv[0] * * If applicable, the output of the BART command will be returned into * out. This applies to: * - bitmask * - estdims * - estvar * - nrmse * - sdot * - show * - version * * If out is not NULL, outputs of the above commands are redirected to out * * \param len Size of the out buffer * \param out Should be either NULL or point to a valid array of characters * \param argc Same as for the main function * \param argv Same as for the main function * * \warning Be aware that if MEMONLY_CFL is not defined, names that do not * end with the '.mem' extension will be unreachable by user code */ int bart_command(int len, char* out, int argc, char* argv[]); //! Deallocate any memory CFLs /*! * \note It is safe to call this function multiple times. */ void deallocate_all_mem_cfl(); #ifdef __cplusplus } #endif #endif //BART_API_H_INCLUDED libbart-devel/src/bbox.c000066400000000000000000000000601463460177700154720ustar00rootroot00000000000000 #define main_bart main_bbox #include "bart.c" libbart-devel/src/bench.c000066400000000000000000000353411463460177700156310ustar00rootroot00000000000000/* Copyright 2014. The Regents of the University of California. * Copyright 2015-2018. Martin Uecker. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2014-2018 Martin Uecker * 2014 Jonathan Tamir */ #include #include #include #include #include #include "num/multind.h" #include "num/flpmath.h" #include "num/rand.h" #include "num/init.h" #include "num/ops_p.h" #include "num/mdfft.h" #include "num/fft.h" #include "wavelet/wavthresh.h" #include "misc/debug.h" #include "misc/misc.h" #include "misc/mmio.h" #include "misc/opts.h" #define DIMS 8 static double bench_generic_copy(long dims[DIMS]) { long strs[DIMS]; md_calc_strides(DIMS, strs, dims, CFL_SIZE); md_calc_strides(DIMS, strs, dims, CFL_SIZE); complex float* x = md_alloc(DIMS, dims, CFL_SIZE); complex float* y = md_alloc(DIMS, dims, CFL_SIZE); md_gaussian_rand(DIMS, dims, x); double tic = timestamp(); md_copy2(DIMS, dims, strs, y, strs, x, CFL_SIZE); double toc = timestamp(); md_free(x); md_free(y); return toc - tic; } static double bench_generic_matrix_multiply(long dims[DIMS]) { long dimsX[DIMS]; long dimsY[DIMS]; long dimsZ[DIMS]; #if 1 md_select_dims(DIMS, 2 * 3 + 17, dimsX, dims); // 1 110 1 md_select_dims(DIMS, 2 * 6 + 17, dimsY, dims); // 1 011 1 md_select_dims(DIMS, 2 * 5 + 17, dimsZ, dims); // 1 101 1 #else md_select_dims(DIMS, 2 * 5 + 17, dimsZ, dims); // 1 101 1 md_select_dims(DIMS, 2 * 3 + 17, dimsY, dims); // 1 110 1 md_select_dims(DIMS, 2 * 6 + 17, dimsX, dims); // 1 011 1 #endif complex float* x = md_alloc(DIMS, dimsX, CFL_SIZE); complex float* y = md_alloc(DIMS, dimsY, CFL_SIZE); complex float* z = md_alloc(DIMS, dimsZ, CFL_SIZE); md_gaussian_rand(DIMS, dimsX, x); md_gaussian_rand(DIMS, dimsY, y); double tic = timestamp(); md_ztenmul(DIMS, dimsZ, z, dimsX, x, dimsY, y); double toc = timestamp(); md_free(x); md_free(y); md_free(z); return toc - tic; } static double bench_generic_add(long dims[DIMS], unsigned long flags, bool forloop) { long dimsX[DIMS]; long dimsY[DIMS]; long dimsC[DIMS]; md_select_dims(DIMS, flags, dimsX, dims); md_select_dims(DIMS, ~flags, dimsC, dims); md_select_dims(DIMS, ~0UL, dimsY, dims); long strsX[DIMS]; long strsY[DIMS]; md_calc_strides(DIMS, strsX, dimsX, CFL_SIZE); md_calc_strides(DIMS, strsY, dimsY, CFL_SIZE); complex float* x = md_alloc(DIMS, dimsX, CFL_SIZE); complex float* y = md_alloc(DIMS, dimsY, CFL_SIZE); md_gaussian_rand(DIMS, dimsX, x); md_gaussian_rand(DIMS, dimsY, y); long L = md_calc_size(DIMS, dimsC); long T = md_calc_size(DIMS, dimsX); double tic = timestamp(); if (forloop) { for (long i = 0; i < L; i++) { for (long j = 0; j < T; j++) y[i + j * L] += x[j]; } } else { md_zaxpy2(DIMS, dims, strsY, y, 1., strsX, x); } double toc = timestamp(); md_free(x); md_free(y); return toc - tic; } static double bench_generic_sum(long dims[DIMS], unsigned long flags, bool forloop) { long dimsX[DIMS]; long dimsY[DIMS]; long dimsC[DIMS]; md_select_dims(DIMS, ~0UL, dimsX, dims); md_select_dims(DIMS, flags, dimsY, dims); md_select_dims(DIMS, ~flags, dimsC, dims); long strsX[DIMS]; long strsY[DIMS]; md_calc_strides(DIMS, strsX, dimsX, CFL_SIZE); md_calc_strides(DIMS, strsY, dimsY, CFL_SIZE); complex float* x = md_alloc(DIMS, dimsX, CFL_SIZE); complex float* y = md_alloc(DIMS, dimsY, CFL_SIZE); md_gaussian_rand(DIMS, dimsX, x); md_clear(DIMS, dimsY, y, CFL_SIZE); long L = md_calc_size(DIMS, dimsC); long T = md_calc_size(DIMS, dimsY); double tic = timestamp(); if (forloop) { for (long i = 0; i < L; i++) { for (long j = 0; j < T; j++) y[j] = y[j] + x[i + j * L]; } } else { md_zaxpy2(DIMS, dims, strsY, y, 1., strsX, x); } double toc = timestamp(); md_free(x); md_free(y); return toc - tic; } static double bench_copy1(long scale) { long dims[DIMS] = { 1, 128 * scale, 128 * scale, 1, 1, 16, 1, 16 }; return bench_generic_copy(dims); } static double bench_copy2(long scale) { long dims[DIMS] = { 262144 * scale, 16, 1, 1, 1, 1, 1, 1 }; return bench_generic_copy(dims); } static double bench_matrix_mult(long scale) { long dims[DIMS] = { 1, 256 * scale, 256 * scale, 256 * scale, 1, 1, 1, 1 }; return bench_generic_matrix_multiply(dims); } static double bench_batch_matmul1(long scale) { long dims[DIMS] = { 30000 * scale, 8, 8, 8, 1, 1, 1, 1 }; return bench_generic_matrix_multiply(dims); } static double bench_batch_matmul2(long scale) { long dims[DIMS] = { 1, 8, 8, 8, 30000 * scale, 1, 1, 1 }; return bench_generic_matrix_multiply(dims); } static double bench_tall_matmul1(long scale) { long dims[DIMS] = { 1, 8, 8, 100000 * scale, 1, 1, 1, 1 }; return bench_generic_matrix_multiply(dims); } static double bench_tall_matmul2(long scale) { long dims[DIMS] = { 1, 100000 * scale, 8, 8, 1, 1, 1, 1 }; return bench_generic_matrix_multiply(dims); } static double bench_add(long scale) { long dims[DIMS] = { 65536 * scale, 1, 50 * scale, 1, 1, 1, 1, 1 }; return bench_generic_add(dims, MD_BIT(2), false); } static double bench_addf(long scale) { long dims[DIMS] = { 65536 * scale, 1, 50 * scale, 1, 1, 1, 1, 1 }; return bench_generic_add(dims, MD_BIT(2), true); } static double bench_add2(long scale) { long dims[DIMS] = { 50 * scale, 1, 65536 * scale, 1, 1, 1, 1, 1 }; return bench_generic_add(dims, MD_BIT(0), false); } static double bench_sum2(long scale) { long dims[DIMS] = { 50 * scale, 1, 65536 * scale, 1, 1, 1, 1, 1 }; return bench_generic_sum(dims, MD_BIT(0), false); } static double bench_sum(long scale) { long dims[DIMS] = { 65536 * scale, 1, 50 * scale, 1, 1, 1, 1, 1 }; return bench_generic_sum(dims, MD_BIT(2), false); } static double bench_sumf(long scale) { long dims[DIMS] = { 65536 * scale, 1, 50 * scale, 1, 1, 1, 1, 1 }; return bench_generic_sum(dims, MD_BIT(2), true); } static double bench_zmul(long scale) { long dimsx[DIMS] = { 256, 256, 1, 1, 90 * scale, 1, 1, 1 }; long dimsy[DIMS] = { 256, 256, 1, 1, 1, 1, 1, 1 }; long dimsz[DIMS] = { 1, 1, 1, 1, 90 * scale, 1, 1, 1 }; complex float* x = md_alloc(DIMS, dimsx, CFL_SIZE); complex float* y = md_alloc(DIMS, dimsy, CFL_SIZE); complex float* z = md_alloc(DIMS, dimsz, CFL_SIZE); md_gaussian_rand(DIMS, dimsy, y); md_gaussian_rand(DIMS, dimsz, z); long strsx[DIMS]; long strsy[DIMS]; long strsz[DIMS]; md_calc_strides(DIMS, strsx, dimsx, CFL_SIZE); md_calc_strides(DIMS, strsy, dimsy, CFL_SIZE); md_calc_strides(DIMS, strsz, dimsz, CFL_SIZE); double tic = timestamp(); md_zmul2(DIMS, dimsx, strsx, x, strsy, y, strsz, z); double toc = timestamp(); md_free(x); md_free(y); md_free(z); return toc - tic; } static double bench_transpose(long scale) { long dims[DIMS] = { 2000 * scale, 2000 * scale, 1, 1, 1, 1, 1, 1 }; complex float* x = md_alloc(DIMS, dims, CFL_SIZE); complex float* y = md_alloc(DIMS, dims, CFL_SIZE); md_gaussian_rand(DIMS, dims, x); md_clear(DIMS, dims, y, CFL_SIZE); double tic = timestamp(); md_transpose(DIMS, 0, 1, dims, y, dims, x, CFL_SIZE); double toc = timestamp(); md_free(x); md_free(y); return toc - tic; } static double bench_resize(long scale) { long dimsX[DIMS] = { 2000 * scale, 1000 * scale, 1, 1, 1, 1, 1, 1 }; long dimsY[DIMS] = { 1000 * scale, 2000 * scale, 1, 1, 1, 1, 1, 1 }; complex float* x = md_alloc(DIMS, dimsX, CFL_SIZE); complex float* y = md_alloc(DIMS, dimsY, CFL_SIZE); md_gaussian_rand(DIMS, dimsX, x); md_clear(DIMS, dimsY, y, CFL_SIZE); double tic = timestamp(); md_resize(DIMS, dimsY, y, dimsX, x, CFL_SIZE); double toc = timestamp(); md_free(x); md_free(y); return toc - tic; } static double bench_norm(int s, long scale) { long dims[DIMS] = { 256 * scale, 256 * scale, 1, 16, 1, 1, 1, 1 }; #if 0 complex float* x = md_alloc_gpu(DIMS, dims, CFL_SIZE); complex float* y = md_alloc_gpu(DIMS, dims, CFL_SIZE); #else complex float* x = md_alloc(DIMS, dims, CFL_SIZE); complex float* y = md_alloc(DIMS, dims, CFL_SIZE); #endif md_gaussian_rand(DIMS, dims, x); md_gaussian_rand(DIMS, dims, y); double tic = timestamp(); switch (s) { case 0: md_zscalar(DIMS, dims, x, y); break; case 1: md_zscalar_real(DIMS, dims, x, y); break; case 2: md_znorm(DIMS, dims, x); break; case 3: md_z1norm(DIMS, dims, x); break; } double toc = timestamp(); md_free(x); md_free(y); return toc - tic; } static double bench_zscalar(long scale) { return bench_norm(0, scale); } static double bench_zscalar_real(long scale) { return bench_norm(1, scale); } static double bench_znorm(long scale) { return bench_norm(2, scale); } static double bench_zl1norm(long scale) { return bench_norm(3, scale); } static double bench_wavelet(long scale) { long dims[DIMS] = { 1, 256 * scale, 256 * scale, 1, 16, 1, 1, 1 }; long minsize[DIMS] = { [0 ... DIMS - 1] = 1 }; minsize[0] = MIN(dims[0], 16); minsize[1] = MIN(dims[1], 16); minsize[2] = MIN(dims[2], 16); const struct operator_p_s* p = prox_wavelet_thresh_create(DIMS, dims, 6, 0u, WAVELET_DAU2, minsize, 1.1, true); complex float* x = md_alloc(DIMS, dims, CFL_SIZE); md_gaussian_rand(DIMS, dims, x); double tic = timestamp(); operator_p_apply(p, 0.98, DIMS, dims, x, DIMS, dims, x); double toc = timestamp(); md_free(x); operator_p_free(p); return toc - tic; } static double bench_generic_mdfft(long dims[DIMS], unsigned long flags) { complex float* x = md_alloc(DIMS, dims, CFL_SIZE); complex float* y = md_alloc(DIMS, dims, CFL_SIZE); md_gaussian_rand(DIMS, dims, x); double tic = timestamp(); md_fft(DIMS, dims, flags, 0u, y, x); double toc = timestamp(); md_free(x); md_free(y); return toc - tic; } static double bench_mdfft(long scale) { long dims[DIMS] = { 1, 128 * scale, 128 * scale, 1, 1, 4, 1, 4 }; return bench_generic_mdfft(dims, 6ul); } static double bench_generic_fft(long dims[DIMS], unsigned long flags) { complex float* x = md_alloc(DIMS, dims, CFL_SIZE); complex float* y = md_alloc(DIMS, dims, CFL_SIZE); md_gaussian_rand(DIMS, dims, x); double tic = timestamp(); fft(DIMS, dims, flags, y, x); double toc = timestamp(); md_free(x); md_free(y); return toc - tic; } static double bench_fft(long scale) { long dims[DIMS] = { 1, 256 * scale, 256 * scale, 1, 1, 16, 1, 8 }; return bench_generic_fft(dims, 6ul); } static double bench_generic_fftmod(long dims[DIMS], unsigned long flags) { complex float* x = md_alloc(DIMS, dims, CFL_SIZE); complex float* y = md_alloc(DIMS, dims, CFL_SIZE); md_gaussian_rand(DIMS, dims, x); double tic = timestamp(); fftmod(DIMS, dims, flags, y, x); double toc = timestamp(); md_free(x); md_free(y); return toc - tic; } static double bench_fftmod(long scale) { long dims[DIMS] = { 1, 256 * scale, 256 * scale, 1, 1, 16, 1, 16 }; return bench_generic_fftmod(dims, 6ul); } static double bench_generic_expand(int typ, long scale) { long dims[DIMS] = { 1, 256 * scale, 256 * scale, 1, 1, 16, 1, 16 }; complex float* x = md_alloc(DIMS, dims, CFL_SIZE); double tic = timestamp(); switch (typ) { case 0: md_zfill(DIMS, dims, x, 1.); break; case 1: md_zsmul(DIMS, dims, x, x, 1.); break; default: assert(0); } double toc = timestamp(); md_free(x); return toc - tic; } static double bench_zfill(long scale) { return bench_generic_expand(0, scale); } static double bench_zsmul(long scale) { return bench_generic_expand(1, scale); } enum bench_indices { REPETITION_IND, SCALE_IND, THREADS_IND, TESTS_IND, BENCH_DIMS }; typedef double (*bench_fun)(long scale); static void do_test(const long dims[BENCH_DIMS], complex float* out, long scale, bench_fun fun, const char* str) { printf("%30.30s |", str); int N = (int)dims[REPETITION_IND]; double sum = 0.; double min = 1.E10; double max = 0.; for (int i = 0; i < N; i++) { double dt = fun(scale); sum += dt; min = MIN(dt, min); max = MAX(dt, max); printf(" %3.4f", (float)dt); fflush(stdout); assert(0 == REPETITION_IND); out[i] = dt; } printf(" | Avg: %3.4f Max: %3.4f Min: %3.4f\n", (float)(sum / N), max, min); } const struct benchmark_s { bench_fun fun; const char* str; } benchmarks[] = { { bench_add, "add (md_zaxpy)" }, { bench_add2, "add (md_zaxpy), contiguous" }, { bench_addf, "add (for loop)" }, { bench_sum, "sum (md_zaxpy)" }, { bench_sum2, "sum (md_zaxpy), contiguous" }, { bench_sumf, "sum (for loop)" }, { bench_zmul, "complex mult. (md_zmul2)" }, { bench_transpose, "complex transpose" }, { bench_resize, "complex resize" }, { bench_matrix_mult, "complex matrix multiply" }, { bench_batch_matmul1, "batch matrix multiply 1" }, { bench_batch_matmul2, "batch matrix multiply 2" }, { bench_tall_matmul1, "tall matrix multiply 1" }, { bench_tall_matmul2, "tall matrix multiply 2" }, { bench_zscalar, "complex dot product" }, { bench_zscalar, "complex dot product" }, { bench_zscalar_real, "real complex dot product" }, { bench_znorm, "l2 norm" }, { bench_zl1norm, "l1 norm" }, { bench_copy1, "copy 1" }, { bench_copy2, "copy 2" }, { bench_zfill, "complex fill" }, { bench_zsmul, "complex scalar multiplication" }, { bench_wavelet, "wavelet soft thresh" }, { bench_mdfft, "(MD-)FFT" }, { bench_fft, "FFT" }, { bench_fftmod, "fftmod" }, }; static const char help_str[] = "Performs a series of micro-benchmarks."; int main_bench(int argc, char* argv[argc]) { const char* out_file = NULL; struct arg_s args[] = { ARG_OUTFILE(false, &out_file, "output"), }; bool threads = false; bool scaling = false; long flags = ~0l; const struct opt_s opts[] = { OPT_SET('T', &threads, "varying number of threads"), OPT_SET('S', &scaling, "varying problem size"), OPT_LONG('s', &flags, "flags", "select benchmarks"), }; cmdline(&argc, argv, ARRAY_SIZE(args), args, help_str, ARRAY_SIZE(opts), opts); long dims[BENCH_DIMS] = MD_INIT_ARRAY(BENCH_DIMS, 1); long strs[BENCH_DIMS]; long pos[BENCH_DIMS] = { 0 }; dims[REPETITION_IND] = 5; dims[THREADS_IND] = threads ? 8 : 1; dims[SCALE_IND] = scaling ? 5 : 1; dims[TESTS_IND] = sizeof(benchmarks) / sizeof(benchmarks[0]); md_calc_strides(BENCH_DIMS, strs, dims, CFL_SIZE); bool outp = (NULL != out_file); complex float* out = (outp ? create_cfl : anon_cfl)(out_file, BENCH_DIMS, dims); num_init(); md_clear(BENCH_DIMS, dims, out, CFL_SIZE); do { if (!(flags & (1 << pos[TESTS_IND]))) continue; if (threads) { num_set_num_threads((int)pos[THREADS_IND] + 1); debug_printf(DP_INFO, "%02d threads. ", pos[THREADS_IND] + 1); } do_test(dims, &MD_ACCESS(BENCH_DIMS, strs, pos, out), pos[SCALE_IND] + 1, benchmarks[pos[TESTS_IND]].fun, benchmarks[pos[TESTS_IND]].str); } while (md_next(BENCH_DIMS, dims, ~MD_BIT(REPETITION_IND), pos)); unmap_cfl(BENCH_DIMS, dims, out); return 0; } libbart-devel/src/bin.c000066400000000000000000000222111463460177700153120ustar00rootroot00000000000000/* Copyright 2020. Uecker Lab. University Medical Center Göttingen. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2018-2020 Sebastian Rosenzweig * 2020 Martin Uecker */ #include #include #include #include #include "num/multind.h" #include "num/init.h" #include "num/flpmath.h" #include "num/filter.h" #include "misc/mmio.h" #include "misc/misc.h" #include "misc/mri.h" #include "misc/opts.h" #include "misc/debug.h" #include "calib/bin.h" /* Reorder binning: [-o] * -------------------- * * Input a 1D file with at the dimension that * you want to reorder according to the label order. * * * Label binning: [-l long] * ------------------------ * * Bin a dimension according to the label-file * The label file must be 1D and the dimension of the