diff --git a/.travis.yml b/.travis.yml index ed1bca620..f512b11ba 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,69 +1,127 @@ language: python -python: -- '3.6' -- '3.7-dev' - -git: - depth: 200 -sudo: required -dist: trusty +matrix: + include: + - name: Bionic python 3.7 + os: linux + dist: bionic + python: 3.7 + env: + - REPO=https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh + - OS=linux-64 + - name: trusty python 3.6 + os: linux + dist: trusty + python: 3.6 + env: + - REPO=https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh + - OS=linux-64 + - name: xenial python 3.7 + os: linux + dist: xenial + python: 3.7 + env: + - REPO=https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh + - OS=linux-64 + - name: xenial python 3.6 + os: linux + dist: xenial + python: 3.6 + env: + - REPO=https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh + - OS=linux-64 + - name: osx python 3.7 + os: osx + language: generic + env: + - REPO=https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh + - TRAVIS_PYTHON_VERSION=3.7 + - OS=osx-64 + - name: osx python 3.6 + os: osx + language: generic + env: + - REPO=https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh + - TRAVIS_PYTHON_VERSION=3.6 + - OS=osx-64 before_install: - - sudo apt-get update - - ldd --version - - gcc --version - - export START=$(pwd) +- gcc --version +- export START=$(pwd) install: -- if [[ "$TRAVIS_PYTHON_VERSION" == "3.7-dev" ]]; then export VADD="py37"; else export VADD="py36"; fi -- wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh - -O miniconda.sh +- wget "$REPO" -O miniconda.sh - bash miniconda.sh -b -p $HOME/miniconda - export PATH="$HOME/miniconda/bin:$PATH" - hash -r - conda config --set always_yes yes --set changeps1 no -- conda install conda-build -- conda update -q conda -- conda update -n root conda-build -- conda config --set anaconda_upload no - conda config --append channels conda-forge - conda config --append channels tofuproject - conda info -a -- if [[ "$TRAVIS_PYTHON_VERSION" == "3.7-dev" ]]; then export THIS_PY_VERSION="3.7"; - else THIS_PY_VERSION=$TRAVIS_PYTHON_VERSION; - fi -- conda install -q python=$THIS_PY_VERSION conda-build anaconda-client nose - nose-timer coverage codecov +- conda install -q python="$TRAVIS_PYTHON_VERSION" conda-verify nose nose-timer coverage + codecov - export REV=$(python -c "import _updateversion as up; out=up.updateversion(); print(out)") - export VERSION=$(echo $REV | tr - .) - echo $REV -- conda build conda_recipe -- export PKG_DIR=$HOME/miniconda/conda-bld/linux-64/ -- conda install tofu --use-local +- pip install -e ".[dev]" script: - cd $HOME -- nosetests tofu.tests --nocapture -v --with-id --with-timer --with-coverage - --cover-package=tofu +- nosetests tofu.tests --nocapture -v --with-id --with-timer --with-coverage --cover-package=tofu after_success: - codecov - chmod +x $START/anaconda_upload.sh - echo $TRAVIS_TAG before_deploy: +- echo "BEFORE DEPLOY START........" - ls $START - cd $START +- conda config --set anaconda_upload no +- conda install anaconda-client conda-build +- conda build conda_recipe +- export PKG_REAL=$(conda build . --output | tail -1) +- echo "BEFORE DEPLOY END.........." deploy: - - provider: script - user: "ToFuProject" - script: $START/anaconda_upload.sh + - provider: pypi + distributions: sdist + user: __token__ + skip_existing: true on: tags: true - all_branches: true - skip_cleanup: true + branch: deploy-test + server: https://test.pypi.org/legacy/ + password: + secure: xfVFuoz9YYNChzmT8DC9y+8eH6zdFkfoy3B51uqy8b+vhJNzCzLay4F0uSHvhHy6iYorM6UQKr6soC4D7n3PhmnFOTX/cgLtd/p4gBWGYZF6yXacvw+UHKMshgbAhn2sEynxdSAqdAlNttMI8jsUu9RhbzGiv1l5zSNnFWF4Zsly02G68UnztxIGoz8AYTRW2N2oQhGrl/ryj/YG4mSRKjled6BzK7kNoJUqLGl12DqdMMTEmdJ9NHBXgK3Dv0ya17ReFz3TcxE/4+Yc38NwSR4Ia2EvVSMtyIaccQ1uSrXwW8JQOMn+9CmDWZVUMDD2bzKYbm2WGGM9Fh8WrHnwlWRujoLDofhYEK0Cus11gULFF+J88XucOJlyJNrHP6TWxdSVVoQfwWr2ABqZIvilsvHpF+sjDLqomTNHdi+BbzP2koRv0nJb9K1W24bjPLtSK8+plX7suv7gdBNwlsJ+dPLDM87v4+jGHGthQ6P4X2guTMHZm1PU0PSPB9LCbENCN1uktLLhkgx7gZ42Ag+Jwiu02ENkChLaEB4WpPb9mjLnomu5LDYXFGtPJ/uLMOi3VCXyda0LrzqDhXYT3Cg4hvXySwJcgMYSXalfTxnTm9oouePiEXDbK+XwjMP9mjC5CeMg3SaFFTywqaTH0WUqiOBUJ6H3Gsm0sB15Tj4lNKQ= + - provider: pypi + distributions: bdist_wheel + user: __token__ + skip_existing: true + on: + condition: $OS = osx-64 + tags: true + branch: deploy-test + server: https://test.pypi.org/legacy/ + password: + secure: xfVFuoz9YYNChzmT8DC9y+8eH6zdFkfoy3B51uqy8b+vhJNzCzLay4F0uSHvhHy6iYorM6UQKr6soC4D7n3PhmnFOTX/cgLtd/p4gBWGYZF6yXacvw+UHKMshgbAhn2sEynxdSAqdAlNttMI8jsUu9RhbzGiv1l5zSNnFWF4Zsly02G68UnztxIGoz8AYTRW2N2oQhGrl/ryj/YG4mSRKjled6BzK7kNoJUqLGl12DqdMMTEmdJ9NHBXgK3Dv0ya17ReFz3TcxE/4+Yc38NwSR4Ia2EvVSMtyIaccQ1uSrXwW8JQOMn+9CmDWZVUMDD2bzKYbm2WGGM9Fh8WrHnwlWRujoLDofhYEK0Cus11gULFF+J88XucOJlyJNrHP6TWxdSVVoQfwWr2ABqZIvilsvHpF+sjDLqomTNHdi+BbzP2koRv0nJb9K1W24bjPLtSK8+plX7suv7gdBNwlsJ+dPLDM87v4+jGHGthQ6P4X2guTMHZm1PU0PSPB9LCbENCN1uktLLhkgx7gZ42Ag+Jwiu02ENkChLaEB4WpPb9mjLnomu5LDYXFGtPJ/uLMOi3VCXyda0LrzqDhXYT3Cg4hvXySwJcgMYSXalfTxnTm9oouePiEXDbK+XwjMP9mjC5CeMg3SaFFTywqaTH0WUqiOBUJ6H3Gsm0sB15Tj4lNKQ= - provider: pypi + distributions: sdist user: "Didou09" - distributions: "sdist" - skip_cleanup: true skip_existing: true on: tags: true - all_branches: true + branch: master password: secure: JNEDTDJVx/2fXNfHntNQ99iDRNuQ4uB3y+DBWVIBycCT95+UCb36YPtKzmruEk/UUS29Xgq4IYCGdfCSWE9smKqG8tV1PcHiw705m+AzcpKy77YtzbVECFBxqY4W36O2pHrkwEUzP/7acjFwNsnUFzArqEzsBJ+KdLaa4OPHJXCh30GA0GyqlrXYbBKG+DA9hX5vtsGo4C6w9noALYF3fS7pKPiI6ipKFnAlzGgHQ7Ke0uQME8N3IAFhmh+Z5xMtIIDWxlnqv+KszdG4DIaGV/W6NIJNAbRhzkqUd+Chu6LoPAd/XkHDTeirR/MBkNUc5UcRJxRnP9rUTRo1gCO/buTYuNRgFkMvqhV5a033+x9edWgtUiKNJIMPLXOxe0RJvc5GWji+Co77HtHxRmGRM2rnYqWMtZeYZlFbUdvHu/8jf0d6I8jyUgAoJYdlMA2u/ipENP3S6by4epE9qycUPXiIVh6r3DZbf3vPTMFvTZYAjBrA0NOzihv1xgcXwemmNUFOQSpe0io4UcFxtS9lLMo+30UMQjCHSnbEVM3zSlZmbMOKpkVOlKlt8Lz5NxwVgWtu9FuW2pGukLtE8AWbqvY9urXAPZCQqZlOIklIjJQIqOITnuw9LEV09cgvPHXfdvNni3ldbMlIQ89zryM6dYvhYryTiEZGK4JDR3wAKJA= + - provider: pypi + distributions: bdist_wheel + user: "Didou09" + skip_existing: true + on: + condition: $OS = osx-64 + tags: true + branch: master + password: + secure: JNEDTDJVx/2fXNfHntNQ99iDRNuQ4uB3y+DBWVIBycCT95+UCb36YPtKzmruEk/UUS29Xgq4IYCGdfCSWE9smKqG8tV1PcHiw705m+AzcpKy77YtzbVECFBxqY4W36O2pHrkwEUzP/7acjFwNsnUFzArqEzsBJ+KdLaa4OPHJXCh30GA0GyqlrXYbBKG+DA9hX5vtsGo4C6w9noALYF3fS7pKPiI6ipKFnAlzGgHQ7Ke0uQME8N3IAFhmh+Z5xMtIIDWxlnqv+KszdG4DIaGV/W6NIJNAbRhzkqUd+Chu6LoPAd/XkHDTeirR/MBkNUc5UcRJxRnP9rUTRo1gCO/buTYuNRgFkMvqhV5a033+x9edWgtUiKNJIMPLXOxe0RJvc5GWji+Co77HtHxRmGRM2rnYqWMtZeYZlFbUdvHu/8jf0d6I8jyUgAoJYdlMA2u/ipENP3S6by4epE9qycUPXiIVh6r3DZbf3vPTMFvTZYAjBrA0NOzihv1xgcXwemmNUFOQSpe0io4UcFxtS9lLMo+30UMQjCHSnbEVM3zSlZmbMOKpkVOlKlt8Lz5NxwVgWtu9FuW2pGukLtE8AWbqvY9urXAPZCQqZlOIklIjJQIqOITnuw9LEV09cgvPHXfdvNni3ldbMlIQ89zryM6dYvhYryTiEZGK4JDR3wAKJA= + - provider: script + user: "ToFuProject" + script: $START/anaconda_upload.sh + on: + tags: true + branch: master + skip_cleanup: true diff --git a/Notebooks/Cython_speedup_notes.ipynb b/Notebooks/Cython_speedup_notes.ipynb index bead1fb51..9635fdd7c 100644 --- a/Notebooks/Cython_speedup_notes.ipynb +++ b/Notebooks/Cython_speedup_notes.ipynb @@ -94,20 +94,20 @@ "output_type": "stream", "text": [ "For L = 1000\n", - "0.360000 μs, using the untyped_func\n", - "0.123000 μs, using the somewhat_typed_func\n", - "0.113000 μs, using the typed_func\n", - "0.029000 μs, using the inline_typed_func\n", + "0.798000 μs, using the untyped_func\n", + "0.027000 μs, using the somewhat_typed_func\n", + "0.017000 μs, using the typed_func\n", + "0.008000 μs, using the inline_typed_func\n", "For L = 10000\n", - "5.174000 μs, using the untyped_func\n", - "0.024000 μs, using the somewhat_typed_func\n", - "0.012000 μs, using the typed_func\n", - "0.015000 μs, using the inline_typed_func\n", + "8.871000 μs, using the untyped_func\n", + "0.027000 μs, using the somewhat_typed_func\n", + "0.014000 μs, using the typed_func\n", + "0.014000 μs, using the inline_typed_func\n", "For L = 100000\n", - "43.197000 μs, using the untyped_func\n", - "0.151000 μs, using the somewhat_typed_func\n", - "0.025000 μs, using the typed_func\n", - "0.024000 μs, using the inline_typed_func\n" + "81.221000 μs, using the untyped_func\n", + "0.148000 μs, using the somewhat_typed_func\n", + "0.010000 μs, using the typed_func\n", + "0.015000 μs, using the inline_typed_func\n" ] } ], @@ -200,7 +200,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 4, "metadata": {}, "outputs": [ { @@ -210,19 +210,19 @@ "\n", "-------- TESTS -------\n", "Running numpy buffers\n", - "0.020667 0.009333 0.098333 0.882333 6.571667 58.261333 670.627333 μs\n", + "0.010667 0.027000 0.108667 0.926667 11.027667 95.586333 1118.653333 μs\n", "Running cpython.array buffer\n", - "0.327333 0.090000 0.323333 0.742333 7.339333 70.054000 1853.809000 μs\n", + "0.074667 0.091667 0.351667 3.606667 25.803333 230.891000 2605.097000 μs\n", "Running cpython.array memoryview\n", - "0.890333 0.785000 1.215667 1.305667 6.270667 44.810333 538.815667 μs\n", + "0.325333 0.398667 1.079000 1.887667 7.430000 79.003667 1005.866000 μs\n", "Running cpython.array raw C type with trick\n", - "0.044000 0.048667 0.380000 1.419333 10.037000 100.486667 2541.475333 μs\n", + "0.035000 0.041667 0.119667 0.787667 8.991000 93.519667 1401.235667 μs\n", "Running C pointers\n", - "0.006000 0.006333 0.023667 0.159000 2.129667 25.866667 363.967667 μs\n", + "0.005000 0.006667 0.057333 0.563333 5.081000 35.753000 887.650667 μs\n", "Running malloc memoryview\n", - "0.588667 0.630333 0.663667 1.224333 2.392333 23.987667 379.138667 μs\n", + "0.482333 1.043333 0.778000 1.127333 6.685000 53.277667 798.593333 μs\n", "Running argument memoryview\n", - "0.011667 0.016333 0.112000 0.722667 5.305000 45.683333 525.975333 μs\n" + "0.008000 0.019000 0.156000 1.217000 10.740000 96.713667 1305.644333 μs\n" ] } ], @@ -391,7 +391,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -402,9 +402,28 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 6, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "For L = 1000\n", + "10.211000 μs, using the sequential loop\n", + "58.846000 μs, using the parallel 1 loop\n", + "25.343000 μs, using the parallel 2 loop\n", + "For L = 10000\n", + "53.134000 μs, using the sequential loop\n", + "180.918000 μs, using the parallel 1 loop\n", + "49.014000 μs, using the parallel 2 loop\n", + "For L = 100000\n", + "827.005000 μs, using the sequential loop\n", + "601.007000 μs, using the parallel 1 loop\n", + "132.242000 μs, using the parallel 2 loop\n" + ] + } + ], "source": [ "%%cython --compile=-fopenmp --link-args=-fopenmp\n", "\n", @@ -650,7 +669,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.2" + "version": "3.7.5" } }, "nbformat": 4, diff --git a/Notes_Upgrades/SpectroX2D/SpectroX2D_ConeCircleMagAxis.pdf b/Notes_Upgrades/SpectroX2D/SpectroX2D_ConeCircleMagAxis.pdf new file mode 100644 index 000000000..136140d9c Binary files /dev/null and b/Notes_Upgrades/SpectroX2D/SpectroX2D_ConeCircleMagAxis.pdf differ diff --git a/Notes_Upgrades/SpectroX2D/SpectroX2D_ConeCircleMagAxis.tex b/Notes_Upgrades/SpectroX2D/SpectroX2D_ConeCircleMagAxis.tex new file mode 100644 index 000000000..65fad2cb5 --- /dev/null +++ b/Notes_Upgrades/SpectroX2D/SpectroX2D_ConeCircleMagAxis.tex @@ -0,0 +1,133 @@ +\documentclass[a4paper,11pt,twoside,titlepage,openright]{book} + +\usepackage[english]{babel} +\usepackage{color} +\usepackage{graphicx} +\usepackage{amsmath} +\numberwithin{equation}{section} +\usepackage[margin=3cm]{geometry} +\usepackage{hyperref} +\usepackage{epsfig,amsfonts} +\usepackage{xcolor,import} + + +\pagestyle{plain} + +\newcommand{\ud}[1]{\underline{#1}} +\newcommand{\e}[1]{\underline{e}_{#1}} +\newcommand{\lt}{\left} +\newcommand{\rt}{\right} +\newcommand{\lra}{\Leftrightarrow} +\DeclareMathOperator{\n}{\underline{n}} +\DeclareMathOperator{\ex}{\underline{e}_x} +\DeclareMathOperator{\ey}{\underline{e}_y} +\DeclareMathOperator{\ez}{\underline{e}_z} +\DeclareMathOperator{\bragg}{\theta_{bragg}} +\DeclareMathOperator{\Z}{Z_C} +\DeclareMathOperator{\DD}{\cos(\theta)^2 - \sin(\psi)^2} +\newcommand{\wdg}{\wedge} +\newcommand{\hypot}[1]{\textbf{\textcolor{green}{#1}}} + + +\begin{document} + +\title{ToFu geometric tools\\ Intersection of a cone with a circle (magnetic axis)} +\author{Didier VEZINET} +\date{04.12.2019} +\maketitle + +\tableofcontents + +\chapter{Geometry} +\label{chap:Geometry} + +\section{Generic cone and plane} + +Let's consider a cartesian frame $(O, \ex, \ey, \ez)$. +Let's consider a half-cone defined by its axis $(S, \n)$ and half-opening $\alpha = \pi/2 - \bragg$. +The coordinates of $S$ are $(x_S, y_S, z_S)$. +The coordinates of $\n$ are $(n_x, n_y, n_z)$. +Let's consider a circle of axis $(O, \ez)$, of radius $R$, centered on $C$ or coordinates $(0, 0, \Z)$. + +Let's consider point $M$ of coordinates $(x, y, z)$ and $(R, \theta, z)$ belonging to both the cone and the circle. + + +$$ +\lt\{ + \begin{array}{ll} + M \in circle & \lra \ud{OM} = \Z\ez + R(\cos(\theta)\ex + \sin(\theta)\ey)\\ + M \in cone & \lra \ud{SM}.\n = \cos(\alpha)\|\ud{SM}\| + \end{array} +\rt. +$$ + + + +\section{Intersection} + +If $M$ belongs to both the circle and the cone, then: + +$$ +\begin{array}{ll} + & \lt[\lt(\ud{SO}+\ud{OM}\rt).\n\rt]^2 = \cos^2(\alpha)\|\ud{SO} + \ud{OM}\|^2\\ + \lra & + \lt(\ud{SO}.\n\rt)^2 + \lt(\ud{OM}.\n\rt)^2 + 2\lt(\ud{SO}.\n\rt)\lt(\ud{OM}.\n\rt) + = \cos^2(\alpha)\lt[\|\ud{SO}\|^2 + \|\ud{OM}\|^2 + 2\ud{SO}.\ud{OM}\rt]\\ + \lra & + \lt(\ud{OM}.\n\rt)^2 + 2\lt(\ud{SO}.\n\rt)\lt(\ud{OM}.\n\rt) + - \cos^2(\alpha)\|\ud{OM}\|^2 - 2\cos^2(\alpha)\ud{SO}.\ud{OM} + A = 0 +\end{array} +$$ + +Where we have introduced $A = \lt(\ud{SO}.\n\rt)^2 - \cos^2(\alpha)\|\ud{SO}\|^2$ + +Now, we can write: +$$ +\lt\{ + \begin{array}{lll} + \|\ud{OM}\|^2 & = & \Z^2 + R^2\\ + \ud{OM}.\n & = & \Z n_z + R\cos(\theta)n_x + R\sin(\theta)n_y\\ + \lt(\ud{OM}.\n\rt)^2 & = & \lt(\Z n_z\rt)^2 + \lt(R\cos(\theta)n_x\rt)^2 + \lt(R\sin(\theta)n_y\rt)^2\\ + && + 2\Z R\cos(\theta)n_xn_Z + 2\Z R\sin(\theta)n_yn_z + 2R^2\cos(\theta)\sin(\theta)n_xn_y\\ + \ud{SO}.\ud{OM} & = & -\Z z_S - Rx_S\cos(\theta) - Ry_S\sin(\theta) + \end{array} +\rt. +$$ + +Hence: +$$ +\begin{array}{ll} + & \lt(\ud{OM}.\n\rt)^2 + 2\lt(\ud{SO}.\n\rt)\lt(\ud{OM}.\n\rt) + - \cos^2(\alpha)\|\ud{OM}\|^2 - 2\cos^2(\alpha)\ud{SO}.\ud{OM} + A\\ + = & + \lt(\Z n_z\rt)^2 + \lt(R\cos(\theta)n_x\rt)^2 + \lt(R\sin(\theta)n_y\rt)^2\\ + & + 2\Z R\cos(\theta)n_xn_Z + 2\Z R\sin(\theta)n_yn_z + 2R^2\cos(\theta)\sin(\theta)n_xn_y\\ + & +2\lt(\ud{SO}.\n\rt)\lt(\Z n_z + R\cos(\theta)n_x + R\sin(\theta)n_y\rt)\\ + & - \cos^2(\alpha)\Z^2 - \cos^2(\alpha)R^2\\ + & + 2\cos^2(\alpha)\lt(\Z z_S + Rx_S\cos(\theta) + Ry_S\sin(\theta)\rt) + A +\end{array} +$$ + + + + + + + +\section{Parametric equation} + +\subsection{From bragg angle and parameter to local cartesian coordinates} + + + + + + + +\appendix +\chapter{Appendices} + +\section{Section} +\subsection{Subsection} + +\end{document} diff --git a/Notes_Upgrades/meetings/nov_2019.md b/Notes_Upgrades/meetings/nov_2019.md index 8a0db0a8e..0e607d5c8 100644 --- a/Notes_Upgrades/meetings/nov_2019.md +++ b/Notes_Upgrades/meetings/nov_2019.md @@ -32,7 +32,7 @@ Summary of meeting 27/11/2019 - Documentation problems: - More and more users asking for doc (mostly on IMAS). - - docstring: + - docstring: Didier will update docstring of main user functions **Assignee**: Didier - website: @@ -41,7 +41,7 @@ Summary of meeting 27/11/2019 - add tutorial for reflexions (number and types) - 2d radiations with ITER data (best case) or fake data -> update the tutorial called Computing a camera image with custom emissivity - sinogram: to be added to an existing tutorial - - how to build cameras: add more comments on how to create a camera (in the 5 minutes to Tofu) what are the different parameters. Maybe separate camera 1D and camera 2D and go into details on how it works. + - how to build cameras: add more comments on how to create a camera (in the 5 minutes to Tofu) what are the different parameters. Maybe separate camera 1D and camera 2D and go into details on how it works. **Assignee**: Laura + Didier + Florian ? Deadline: December ? diff --git a/_updateversion.py b/_updateversion.py index 9ae7ad0cc..d948f779b 100644 --- a/_updateversion.py +++ b/_updateversion.py @@ -6,6 +6,7 @@ _HERE = os.path.abspath(os.path.dirname(__file__)) + def updateversion(path=_HERE): # Fetch version from git tags, and write to version.py # Also, when git is not available (PyPi package), use stored version.py @@ -13,13 +14,13 @@ def updateversion(path=_HERE): try: version_git = subprocess.check_output(["git", "describe"]).rstrip().decode() - except: - with open(version_py,'r') as fh: - version_git = fh.read().strip().split("=")[-1].replace("'",'') - version_git = version_git.lower().replace('v','') + except subprocess.CalledProcessError: + with open(version_py, 'r') as fh: + version_git = fh.read().strip().split("=")[-1].replace("'", '') + version_git = version_git.lower().replace('v', '').replace(' ', '') version_msg = "# Do not edit, pipeline versioning governed by git tags!" - with open(version_py,"w") as fh: + with open(version_py, "w") as fh: msg = "{0}__version__ = '{1}'{0}".format(os.linesep, version_git) fh.write(version_msg + msg) return version_git diff --git a/conda_recipe/conda_upload.sh b/conda_recipe/conda_upload.sh index d50ad2fb0..4fe6fa2b5 100644 --- a/conda_recipe/conda_upload.sh +++ b/conda_recipe/conda_upload.sh @@ -1,20 +1,6 @@ # Only need to change these two variables -PKG_NAME=tofu USER=ToFuProject -OS=linux-64 -#mkdir ~/conda-bld -#conda config --set anaconda_upload no -#conda update -n root conda-build -#conda config --append channels conda-forge -#conda config --append channels tofuproject -#export CONDA_BLD_PATH=~/conda-bld -#export VERSION=`date +%Y.%m.%d` -#export VERSION=$(head -n 1 version.txt) - -#conda build conda_recipe echo "Available conda packages:" -echo $(find $CONDA_BLD_PATH/$OS/ -type f -name $PKG_NAME*.tar.bz2) -PKG_REAL=$(find $CONDA_BLD_PATH/$OS/ -type f -name $PKG_NAME-$VERSION-$VADD*.tar.bz2) echo $PKG_REAL anaconda -t $CONDA_UPLOAD_TOKEN upload -u $USER -l main $PKG_REAL --force diff --git a/conda_recipe/meta.yaml b/conda_recipe/meta.yaml index caf994d59..2f0a3752c 100644 --- a/conda_recipe/meta.yaml +++ b/conda_recipe/meta.yaml @@ -4,16 +4,12 @@ package: source: git_url: https://github.com/ToFuProject/tofu.git - #git_branch: {{ environ['TRAVIS_BRANCH'] }} git_rev: {{ environ['REV'] }} - # "patches:" might be the answer for clang compilers ? build: script_env: - # - REV - - VERSION + - PKG_REAL - TRAVIS_BRANCH - - CONDA_BLD_PATH requirements: @@ -21,7 +17,7 @@ requirements: # here same as run, as we are using cython build: - python - - setuptools + - setuptools >=40.8.0 - setuptools_scm - numpy - scipy diff --git a/examples/tutorials/tuto_plot_custom_emissivity.py b/examples/tutorials/tuto_plot_custom_emissivity.py index 5959a37af..d6aecf05c 100644 --- a/examples/tutorials/tuto_plot_custom_emissivity.py +++ b/examples/tutorials/tuto_plot_custom_emissivity.py @@ -95,4 +95,4 @@ def project_to_2D(xyz): t=time_vector) sig.plot(ntMax=1) -plt.show(block=False) +plt.show(block=True) diff --git a/examples/tutorials/tuto_plot_gallery_fusion_machines.py b/examples/tutorials/tuto_plot_gallery_fusion_machines.py index 7c1359678..2ecb2261a 100644 --- a/examples/tutorials/tuto_plot_gallery_fusion_machines.py +++ b/examples/tutorials/tuto_plot_gallery_fusion_machines.py @@ -15,25 +15,25 @@ # `tofu` provides a geometry helper function that allows creating a # configuration with a single call. # -# Calling with empty arguments results in a default configuration. At the time -# of writing, this is ITER. -# By printing the `config` object, a text representation of its components is -# printed. This allows inspecting -# component names, number of sections, color or visibility. +# Some configurations are pre-defined, for example ITER's configuration. +# +# By printing the `config` object created, a text representation of its +# components is printed. This allows inspecting the component names, number +# of sections, color or visibility. -config = tf.geom.utils.create_config() # create default configuration +config = tf.geom.utils.create_config("ITER") # create ITER configuration print(config) ############################################################################### # To get a list of all available built-in configs, one has to know some details # about `tofu`. Configurations can be accessed by names (ITER, WEST, JET, etc). -print(tf.geom.utils._DCONFIG_TABLE.keys()) +print(tf.geom.utils.get_available_config()) ############################################################################### # With that being said, let's create a gallery of the "top 3" fusion machines # provided by `tofu` to accelerate diagnostic development. -for fusion_machine in ['ITER', 'WEST', 'JET']: +for fusion_machine in ['ITER', 'WEST', 'JET', 'NSTX', 'AUG']: config = tf.geom.utils.create_config(fusion_machine) config.plot() diff --git a/setup.py b/setup.py index 5f45f94c0..443fdd0f2 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,6 @@ from setuptools import setup, find_packages from setuptools import Extension # ... packages that need to be in pyproject.toml -import Cython as cth from Cython.Distutils import build_ext import numpy as np # ... @@ -36,6 +35,7 @@ logging.basicConfig(level=logging.INFO) logger = logging.getLogger("tofu.setup") + class CleanCommand(Clean): description = "Remove build artifacts from the source tree" @@ -136,6 +136,7 @@ def check_for_openmp(cc_var): shutil.rmtree(tmpdir) return result + # ....... Using function if is_platform_windows: openmp_installed = False @@ -155,7 +156,7 @@ def updateversion(path=_HERE): try: version_git = subprocess.check_output(["git", "describe"]).rstrip().decode() - except Exception as err: + except subprocess.CalledProcessError: with open(version_py, 'r') as fh: version_git = fh.read().strip().split("=")[-1].replace("'", '') version_git = version_git.lower().replace('v', '') @@ -166,6 +167,7 @@ def updateversion(path=_HERE): fh.write(version_msg + msg) return version_git + def get_version_tofu(path=_HERE): # Try from git @@ -177,7 +179,6 @@ def get_version_tofu(path=_HERE): [ "git", "rev-parse", - "--symbolic-full-name", "--abbrev-ref", "HEAD", ] @@ -185,8 +186,8 @@ def get_version_tofu(path=_HERE): .rstrip() .decode() ) - if git_branch in ["master"]: - version_tofu = updateversion(os.path.join(path, "tofu")) + if git_branch in ["master", "deploy-test"]: + version_tofu = updateversion() else: isgit = False except Exception: @@ -198,14 +199,16 @@ def get_version_tofu(path=_HERE): with open(version_tofu, "r") as fh: version_tofu = fh.read().strip().split("=")[-1].replace("'", "") - version_tofu = version_tofu.lower().replace("v", "") + version_tofu = version_tofu.lower().replace("v", "").replace(" ", "") return version_tofu + version_tofu = get_version_tofu(path=_HERE) print("") print("Version for setup.py : ", version_tofu) print("") + # ============================================================================= # ============================================================================= diff --git a/tofu/__init__.py b/tofu/__init__.py index b2defb789..4ef97c747 100644 --- a/tofu/__init__.py +++ b/tofu/__init__.py @@ -91,7 +91,7 @@ lsubout = ['tofu.{0}'.format(ss) for ss in lsubout] msg = "\nThe following subpackages are not available:" msg += "\n - " + "\n - ".join(lsubout) - msg += "\n => see tofu.dsub[] for details." + msg += "\n => see print(tofu.dsub[]) for details." warnings.warn(msg) # ------------------------------------- diff --git a/tofu/data/_comp.py b/tofu/data/_comp.py index 77abb52cb..9af8e8117 100644 --- a/tofu/data/_comp.py +++ b/tofu/data/_comp.py @@ -413,6 +413,7 @@ def func(pts, vect=None, t=None, ntall=ntall, shapeval = list(pts.shape) shapeval[0] = ntall if t is None else t.size val = np.full(tuple(shapeval), fill_value) + if t is None: for ii in range(0,ntall): val[ii,...] = mplTriLinInterp(mpltri, @@ -431,6 +432,8 @@ def func(pts, vect=None, t=None, ntall=ntall, vquant[indtq[indtu[ii]], :], trifinder=trifind )(r, z).filled(fill_value) + if np.any(np.isnan(val)) and not np.isnan(fill_value): + val[np.isnan(val)] = fill_value return val, t # -------------------- @@ -462,6 +465,8 @@ def func(pts, vect=None, t=None, ntall=ntall, ind = indt == indtu[ii] val[ind, indok] = vquant[indtq[indtu[ii]], indpts[indok]] + if np.any(np.isnan(val)) and not np.isnan(fill_value): + val[np.isnan(val)] = fill_value return val, t @@ -517,14 +522,13 @@ def func(pts, vect=None, t=None, ntall=ntall, # interpolate 1d ind = indt == indtu[ii] val[ind, ...] = scpinterp.interp1d( - vr1[indtr1[indtu[ii]], :], + vr1[indtr1[ii], :], vquant[indtq[indtu[ii]], :], kind='linear', bounds_error=False, fill_value=fill_value )(np.asarray(vii)) val[np.isnan(val)] = fill_value - return val, t else: @@ -562,12 +566,16 @@ def func(pts, vect=None, t=None, ntall=ntall, # interpolate 1d ind = indt == indtu[ii] val[ind, indok] = scpinterp.interp1d( - vr1[indtr1[indtu[ii]], :], + vr1[indtr1[ii], :], vquant[indtq[indtu[ii]], :], kind='linear', bounds_error=False, fill_value=fill_value )(vr2[indtr2[ii], indpts[indok]]) + + # Double check nan in case vr2 is itself nan on parts of mesh + if np.any(np.isnan(val)) and not np.isnan(fill_value): + val[np.isnan(val)] = fill_value return val, t return func @@ -671,13 +679,13 @@ def func(pts, vect=None, t=None, ntall=ntall, tall=tall, tbinall=tbinall): # Get pts in (r,z,phi) - r, z = np.hypot(pts[0,:],pts[1,:]), pts[2,:] - phi = np.arctan2(pts[1,:],pts[0,:]) + r, z = np.hypot(pts[0, :], pts[1, :]), pts[2, :] + phi = np.arctan2(pts[1, :], pts[0, :]) # Deduce vect in (r,z,phi) - vR = np.cos(phi)*vect[0,:] + np.sin(phi)*vect[1,:] - vphi = -np.sin(phi)*vect[0,:] + np.cos(phi)*vect[1,:] - vZ = vect[2,:] + vR = np.cos(phi)*vect[0, :] + np.sin(phi)*vect[1, :] + vPhi = -np.sin(phi)*vect[0, :] + np.cos(phi)*vect[1, :] + vZ = vect[2, :] # Prepare output shapeval = list(pts.shape) @@ -688,6 +696,7 @@ def func(pts, vect=None, t=None, ntall=ntall, # Interpolate indpts = trifind(r, z) + indok = indpts > -1 if t is None: for ii in range(0,ntall): valR[ii, ...] = vq2dR[indtq[ii], indpts] @@ -696,9 +705,8 @@ def func(pts, vect=None, t=None, ntall=ntall, t = tall else: ntall, indt, indtu = plasma._get_indtu(t=t, tall=tall, - tbinall=tbinall, - idref1d=idref1d, - idref2d=idref2d)[1:] + tbinall=tbinall)[1:-2] + for ii in range(0, ntall): ind = indt == indtu[ii] valR[ind, ...] = vq2dR[indtq[indtu[ii]], indpts] diff --git a/tofu/data/_core.py b/tofu/data/_core.py index 8913c41cd..a4f45c47e 100644 --- a/tofu/data/_core.py +++ b/tofu/data/_core.py @@ -2026,7 +2026,11 @@ def _extract_common_params(obj0, obj1=None): @staticmethod def _recreatefromoperator(d0, other, opfunc): - if type(other) in [int,float,np.int64,np.float64]: + if type(other) in [int, float, np.int64, np.float64]: + data = opfunc(d0.data, other) + dcom = d0._extract_common_params(d0) + + elif isinstance(other, np.ndarray): data = opfunc(d0.data, other) dcom = d0._extract_common_params(d0) @@ -2515,6 +2519,9 @@ def trifind(r, z, indpts = indR*nZ + indZ else: indpts = indZ*nR + indR + indout = ((r < R[0]) | (r > R[-1]) + | (z < Z[0]) | (z > Z[-1])) + indpts[indout] = -1 return indpts dd[dk][k0]['R'] = R @@ -3677,11 +3684,15 @@ def _get_indtu(t=None, tall=None, tbinall=None, # Get indt (t with respect to tbinall) indt, indtu = None, None if t is not None: - indt = np.digitize(t, tbinall) - indtu = np.unique(indt) + if len(t) == len(tall) and np.allclose(t, tall): + indt = np.arange(0, tall.size) + indtu = indt + else: + indt = np.digitize(t, tbinall) + indtu = np.unique(indt) + # Update + tall = tall[indtu] - # Update - tall = tall[indtu] if idref1d is not None: assert indtr1 is not None indtr1 = indtr1[indtu] @@ -3731,7 +3742,7 @@ def _get_finterp(self, idquant=None, idref1d=None, idref2d=None, idq2dR=None, idq2dPhi=None, idq2dZ=None, interp_t=None, interp_space=None, - fill_value=np.nan, ani=False, Type=None): + fill_value=None, ani=False, Type=None): if interp_t is None: interp_t = 'nearest' @@ -3849,7 +3860,7 @@ def _checkformat_qr12RPZ(self, quant=None, ref1d=None, ref2d=None, def get_finterp2d(self, quant=None, ref1d=None, ref2d=None, q2dR=None, q2dPhi=None, q2dZ=None, interp_t=None, interp_space=None, - fill_value=np.nan, Type=None): + fill_value=None, Type=None): """ Return the function interpolating (X,Y,Z) pts on a 1d/2d profile Can be used as input for tf.geom.CamLOS1D/2D.calc_signal() @@ -3876,7 +3887,7 @@ def interp_pts2profile(self, pts=None, vect=None, t=None, quant=None, ref1d=None, ref2d=None, q2dR=None, q2dPhi=None, q2dZ=None, interp_t=None, interp_space=None, - fill_value=np.nan, Type=None): + fill_value=None, Type=None): """ Return the value of the desired profiles_1d quantity For the desired inputs points (pts): @@ -3907,8 +3918,23 @@ def interp_pts2profile(self, pts=None, vect=None, t=None, else: idmesh = [id_ for id_ in self._ddata[idref2d]['depend'] if self._dindref[id_]['group'] == 'mesh'][0] - pts = self.dmesh[idmesh]['data']['nodes'] - pts = np.array([pts[:,0], np.zeros((pts.shape[0],)), pts[:,1]]) + if self.dmesh[idmesh]['data']['type'] == 'rect': + if self.dmesh[idmesh]['data']['shapeRZ'] == ('R', 'Z'): + R = np.repeat(self.dmesh[idmesh]['data']['R'], + self.dmesh[idmesh]['data']['nZ']) + Z = np.tile(self.dmesh[idmesh]['data']['Z'], + self.dmesh[idmesh]['data']['nR']) + else: + R = np.tile(self.dmesh[idmesh]['data']['R'], + self.dmesh[idmesh]['data']['nZ']) + Z = np.repeat(self.dmesh[idmesh]['data']['Z'], + self.dmesh[idmesh]['data']['nR']) + pts = np.array( + [R, np.zeros((self.dmesh[idmesh]['data']['size'],)), Z]) + else: + pts = self.dmesh[idmesh]['data']['nodes'] + pts = np.array( + [pts[:, 0], np.zeros((pts.shape[0],)), pts[:, 1]]) pts = np.atleast_2d(pts) if pts.shape[0] != 3: @@ -3941,7 +3967,7 @@ def calc_signal_from_Cam(self, cam, t=None, quant=None, ref1d=None, ref2d=None, q2dR=None, q2dPhi=None, q2dZ=None, Brightness=True, interp_t=None, - interp_space=None, fill_value=np.nan, + interp_space=None, fill_value=None, res=0.005, DL=None, resMode='abs', method='sum', ind=None, out=object, plot=True, dataname=None, fs=None, dmargin=None, wintit=None, invert=True, diff --git a/tofu/data/_plot.py b/tofu/data/_plot.py index d99cb0abe..4101ba8d9 100644 --- a/tofu/data/_plot.py +++ b/tofu/data/_plot.py @@ -364,9 +364,9 @@ def _DataCam12D_plot(lData, key=None, nchMax=_nchMax, ntMax=_ntMax, c0 = [all([dd.dlabels[kk] == lData[0].dlabels[kk] for dd in lData[1:]]) for kk in ['t','X','data']] if not all(c0): - msg = "All Data objects must have the same:\n" + msg = "All Data objects do not have the same:\n" msg += " dlabels['t'], dlabels['X'] and dlabels['data'] !" - raise Exception(msg) + warnings.warn(msg) # --------- @@ -387,8 +387,9 @@ def _DataCam12D_plot(lData, key=None, nchMax=_nchMax, ntMax=_ntMax, # Check nch and X c0 = [dd.nch == lData[0].nch for dd in lData[1:]] if not all(c0): - msg = "All Data objects must have the same number of channels (self.nch)" - msg += "\nYou can set the indices of the channels with self.set_indch()" + msg = ("All Data objects must have the same nb. of channels\n" + + "\t- self.nch: {}\n".format([dd.nch for dd in lData]) + + "\n => use self.set_indch()") raise Exception(msg) nch = lData[0].nch diff --git a/tofu/geom/_GG.pyx b/tofu/geom/_GG.pyx index 7143e078f..47a2f401f 100644 --- a/tofu/geom/_GG.pyx +++ b/tofu/geom/_GG.pyx @@ -186,48 +186,67 @@ def CoordShift(Pts, In='(X,Y,Z)', Out='(R,Z)', CrossRef=None): ######################################################## -@cython.cdivision(True) -@cython.wraparound(False) -@cython.boundscheck(False) def Poly_isClockwise(np.ndarray[double,ndim=2] Poly): """ Assuming 2D closed Poly ! - TODO @LM : http://www.faqs.org/faqs/graphics/algorithms-faq/ - A slightly faster method is based on the observation that it isn't - necessary to compute the area. Find the lowest vertex (or, if - there is more than one vertex with the same lowest coordinate, - the rightmost of those vertices) and then take the cross product - of the edges fore and aft of it. Both methods are O(n) for n vertices, - but it does seem a waste to add up the total area when a single cross - product (of just the right edges) suffices. Code for this is - available at ftp://cs.smith.edu/pub/code/polyorient.C (2K). + Find the lowest vertex (or, if there is more than one vertex with + the same lowest coordinate, the rightmost of those vertices) and then + take the cross product of the edges before and after it. + Both methods are O(n) for n vertices, but it does seem a waste to add up + the total area when a single cross product (of just the right edges) + suffices. Code for this is available at + ftp://cs.smith.edu/pub/code/polyorient.C (2K). """ cdef double res cdef double[:,::1] mv_poly = np.ascontiguousarray(Poly) cdef int npts = mv_poly.shape[1] - cdef double[::1] mvx = mv_poly[0,:] - cdef double[::1] mvy = mv_poly[1,:] - cdef int idmin = _bgt.find_ind_lowerright_corner(mvx, mvy, npts) - cdef int idm1 = (idmin - 1) % npts - cdef int idp1 = (idmin + 1) % npts + cdef int ndim = mv_poly.shape[0] + cdef double[::1] mvx + cdef double[::1] mvy + cdef int idmin + cdef int idm1 + cdef int idp1 + cdef str err_msg = "" + # Checking that Poly wasn't given in the shape (npts, ndim) + if ndim > npts: + mv_poly = np.ascontiguousarray(Poly.T) + npts = mv_poly.shape[1] + ndim = mv_poly.shape[0] + mvx = mv_poly[0,:] + mvy = mv_poly[1,:] + # Getting index of lower right corner and its neighbors + idmin = _bgt.find_ind_lowerright_corner(mvx, mvy, npts) + idm1 = idmin - 1 + idp1 = (idmin + 1) % npts + if idmin == 0 : + idm1 = npts - 2 + # Computing area of lower right triangle res = mvx[idm1] * (mvy[idmin] - mvy[idp1]) + \ mvx[idmin] * (mvy[idp1] - mvy[idm1]) + \ mvx[idp1] * (mvy[idm1] - mvy[idmin]) + if abs(res) < _VSMALL: + err_msg += ("In Poly_isClockwise : \n" + + " Found lowest right point at index : " + + str(idmin) + + ", of coordinates :" + str(mvx[idmin]) + + ", " + str(mvy[idmin]) + ".\n" + + " The two neighboring points are : " + + str(idm1) + " and " + str(idp1) + ".") + raise Exception(err_msg) # not working return res < 0. -def Poly_Order(np.ndarray[double,ndim=2] Poly, str order='C', Clock=False, - close=True, str layout='(cc,N)', - str layout_in=None, Test=True): +def format_poly(np.ndarray[double,ndim=2] poly, str order='C', Clock=False, + close=True, Test=True): """ - Return a polygon Poly as a np.ndarray formatted according to parameters + Return a polygon poly as a np.ndarray formatted according to parameters Parameters ---------- - Poly np.ndarray or list Input polygon under from of (cc,N) or - or tuple (N,cc) np.ndarray (where cc = 2 or 3, the - number of coordinates and N points), or - list or tuple of vertices + poly np.ndarray or list Input np.ndarray of shape (cc,N) + or tuple (where cc = 2 or 3, the number of + coordinates and N points), or + list or tuple of vertices of a polygon order str Flag indicating whether the output np.ndarray shall be C-contiguous ('C') or Fortran-contiguous ('F') @@ -240,9 +259,6 @@ def Poly_Order(np.ndarray[double,ndim=2] Poly, str order='C', Clock=False, cating whether the output array shall be closed (True, ie: last point==first point) or not closed (False) - layout str Flag indicating whether the output - np.ndarray shall be of shape '(cc,N)' - or '(N,cc)' Test bool Flag indicating whether the inputs should be tested for conformity, default: True @@ -251,40 +267,25 @@ def Poly_Order(np.ndarray[double,ndim=2] Poly, str order='C', Clock=False, poly np.ndarray Output formatted polygon """ if Test: - assert (2 in np.shape(Poly) or 3 in np.shape(Poly)), \ - "Arg Poly must contain the 2D or 3D coordinates of at least 3 points!" - assert max(np.shape(Poly))>=3, ("Arg Poly must contain the 2D or 3D", - " coordinates of at least 3 points!") + assert (poly.shape[0] == 2 or poly.shape[0] == 3), \ + ("Arg poly must contain the 2D or 3D coordinates of N points." + " And be shaped in the form (dim, N).") + assert poly.shape[1]>=3, ("Arg poly must contain the 2D or 3D", + " coordinates of at least 3 points!") assert order.lower() in ['c','f'], "Arg order must be in ['c','f']!" assert type(Clock) is bool, "Arg Clock must be a bool!" assert type(close) is bool, "Arg close must be a bool!" - assert layout.lower() in ['(cc,n)','(n,cc)'], \ - "Arg layout must be in ['(cc,n)','(n,cc)']!" - assert layout_in is None or layout_in.lower() in ['(cc,n)','(n,cc)'],\ - "Arg layout_in must be None or in ['(cc,n)','(n,cc)']!" - - if np.shape(Poly)==(3,3): - assert not layout_in is None, \ - ("Could not resolve the input layout of Poly because shape==(3,3)", - " Please specify if input is in '(cc,n)' or '(n,cc)' format!") - poly = np.array(Poly).T if layout_in.lower()=='(n,cc)' \ - else np.array(Poly) - else: - poly = np.array(Poly).T if min(np.shape(Poly))==Poly.shape[1]\ - else np.array(Poly) + if not np.allclose(poly[:,0],poly[:,-1], atol=_VSMALL): poly = np.concatenate((poly,poly[:,0:1]),axis=1) if poly.shape[0]==2 and not Clock is None: - if not Clock==Poly_isClockwise(poly): - poly = poly[:,::-1] + try: + if not Clock==Poly_isClockwise(poly): + poly = poly[:,::-1] + except Exception as excp: + raise excp if not close: poly = poly[:,:-1] - if layout.lower()=='(n,cc)': - poly = poly.T - # TODO : @LM @DV > seems strange to me that we order all polys - # in order "(cc,n)" and just last minute we look at what's actually - # asked - # >> ok poly = np.ascontiguousarray(poly) if order.lower()=='c' \ else np.asfortranarray(poly) return poly @@ -2985,16 +2986,16 @@ def LOS_calc_signal(func, double[:,::1] ray_orig, double[:,::1] ray_vdir, res, val_2d = func(pts, t=t, vect=-usbis, **fkwdargs) else: val_2d = func(pts, t=t, **fkwdargs) + # Integrate if n_imode == 0: # "sum" integration mode # .. integrating function .......................................... - for ii in range(nlos): - if ii > 0: - sig[:,ii] = np.sum(val_2d[:, ind_arr[ii-1]:ind_arr[ii]], - axis=-1) * reseff_arr[ii] - else: - sig[:,0] = np.sum(val_2d[:, 0:ind_arr[0]], - axis=-1) * reseff_arr[0] + reseffs = np.copy(np.asarray(reseff_arr)) + indices = np.copy(np.asarray(ind_arr).astype(int)) + sig = np.asfortranarray(np.add.reduceat(val_2d, + np.r_[0, indices], + axis=-1) + * reseffs[None, :]) # Cleaning up... free(coeff_ptr[0]) free(coeff_ptr) diff --git a/tofu/geom/_basic_geom_tools.pxd b/tofu/geom/_basic_geom_tools.pxd index 99c5bb054..d61936605 100644 --- a/tofu/geom/_basic_geom_tools.pxd +++ b/tofu/geom/_basic_geom_tools.pxd @@ -1,6 +1,7 @@ # cython: boundscheck=False # cython: wraparound=False # cython: cdivision=True +# cython: initializedcheck=False # ################################################################################ # Utility functions for basic geometry : diff --git a/tofu/geom/_basic_geom_tools.pyx b/tofu/geom/_basic_geom_tools.pyx index edcb4f502..ba99e1dce 100644 --- a/tofu/geom/_basic_geom_tools.pyx +++ b/tofu/geom/_basic_geom_tools.pyx @@ -1,6 +1,8 @@ # cython: boundscheck=False # cython: wraparound=False # cython: cdivision=True +# cython: initializedcheck=False +# cimport cython from cython.parallel import prange diff --git a/tofu/geom/_comp.py b/tofu/geom/_comp.py index 8ddef84bf..5faaa396e 100644 --- a/tofu/geom/_comp.py +++ b/tofu/geom/_comp.py @@ -3,7 +3,6 @@ """ # Built-in -import sys import warnings # Common @@ -35,9 +34,11 @@ def _Struct_set_Poly( """ Compute geometrical attributes of a Struct object """ # Make Poly closed, counter-clockwise, with '(cc,N)' layout and arrayorder - Poly = _GG.Poly_Order( - Poly, order="C", Clock=False, close=True, layout="(cc,N)", Test=True - ) + try: + Poly = _GG.format_poly(Poly, order="C", Clock=False, close=True, + Test=True) + except Exception as excp: + print(excp) assert Poly.shape[0] == 2, "Arg Poly must be a 2D polygon !" fPfmt = np.ascontiguousarray if arrayorder == "C" else np.asfortranarray @@ -62,8 +63,13 @@ def _Struct_set_Poly( Vol, BaryV = None, None else: Vol, BaryV = _GG.Poly_VolAngTor(Poly) - msg = "Pb. with volume computation for Ves object of type 'Tor' !" - assert Vol > 0.0, msg + if Vol <= 0.0: + msg = ("Pb. with volume computation for Struct of type 'Tor' !\n" + + "\t- Vol = {}\n".format(Vol) + + "\t- Poly = {}\n\n".format(str(Poly)) + + " => Probably corrupted polygon\n" + + " => Please check polygon is not self-intersecting") + raise Exception(msg) # Compute the non-normalized vector of each side of the Poly Vect = np.diff(Poly, n=1, axis=1) @@ -75,12 +81,11 @@ def _Struct_set_Poly( Vin = Vin / np.hypot(Vin[0, :], Vin[1, :])[np.newaxis, :] Vin = fPfmt(Vin) - poly = _GG.Poly_Order( + poly = _GG.format_poly( Poly, order=arrayorder, Clock=Clock, close=False, - layout="(cc,N)", Test=True, ) diff --git a/tofu/geom/_comp_optics.py b/tofu/geom/_comp_optics.py index 4917835b5..3f31e95d1 100644 --- a/tofu/geom/_comp_optics.py +++ b/tofu/geom/_comp_optics.py @@ -106,25 +106,70 @@ def get_lamb_from_bragg(bragg, d, n=None): # Approximate solution # ############################################### -def get_approx_detector_rel(rcurve, bragg): +def get_approx_detector_rel(rcurve, bragg, tangent_to_rowland=None): + + if tangent_to_rowland is None: + tangent_to_rowland = True # distance crystal - det_center det_dist = rcurve*np.sin(bragg) # det_nout and det_e1 in (nout, e1, e2) (det_e2 = e2) - det_nout_rel = np.r_[np.sin(bragg), -np.cos(bragg), 0.] - det_ei_rel = np.r_[np.cos(bragg), np.sin(bragg), 0] - return det_dist, det_nout_rel, det_ei_rel + n_crystdet_rel = np.r_[-np.sin(bragg), np.cos(bragg), 0.] + if tangent_to_rowland: + bragg2 = 2.*bragg + det_nout_rel = np.r_[-np.cos(bragg2), -np.sin(bragg2), 0.] + det_ei_rel = np.r_[np.sin(bragg2), -np.cos(bragg2), 0.] + else: + det_nout_rel = -n_crystdet_rel + det_ei_rel = np.r_[np.cos(bragg), np.sin(bragg), 0] + return det_dist, n_crystdet_rel, det_nout_rel, det_ei_rel -def get_det_abs_from_rel(det_dist, det_nout_rel, det_ei_rel, - summit, nout, e1, e2): + +def get_det_abs_from_rel(det_dist, n_crystdet_rel, det_nout_rel, det_ei_rel, + summit, nout, e1, e2, + ddist=None, di=None, dj=None, + dtheta=None, dpsi=None, tilt=None): + # Reference det_nout = (det_nout_rel[0]*nout + det_nout_rel[1]*e1 + det_nout_rel[2]*e2) det_ei = (det_ei_rel[0]*nout + det_ei_rel[1]*e1 + det_ei_rel[2]*e2) det_ej = np.cross(det_nout, det_ei) - det_cent = summit - det_dist*det_nout - return det_cent, det_nout, det_ei, det_ej + + # Apply translation of center (ddist, di, dj) + if ddist is None: + ddist = 0. + if di is None: + di = 0. + if dj is None: + dj = 0. + det_dist += ddist + + n_crystdet = (n_crystdet_rel[0]*nout + + n_crystdet_rel[1]*e1 + n_crystdet_rel[2]*e2) + det_cent = summit + det_dist*n_crystdet + di*det_ei + dj*det_ej + + # Apply angles on unit vectors with respect to themselves + if dtheta is None: + dtheta = 0. + if dpsi is None: + dpsi = 0. + if tilt is None: + tilt = 0. + + # dtheta and dpsi + det_nout2 = ((np.cos(dpsi)*det_nout + + np.sin(dpsi)*det_ei)*np.cos(dtheta) + + np.sin(dtheta)*det_ej) + det_ei2 = (np.cos(dpsi)*det_ei - np.sin(dpsi)*det_nout) + det_ej2 = np.cross(det_nout2, det_ei2) + + # tilt + det_ei3 = np.cos(tilt)*det_ei2 + np.sin(tilt)*det_ej2 + det_ej3 = np.cross(det_nout2, det_ei3) + + return det_cent, det_nout2, det_ei3, det_ej3 # ############################################### @@ -144,6 +189,7 @@ def checkformat_vectang(Z, nn, frame_cent, frame_ang): return Z, nn, frame_cent, frame_ang + def get_e1e2_detectorplane(nn, nIn): e1 = np.cross(nn, nIn) e1n = np.linalg.norm(e1) @@ -155,6 +201,7 @@ def get_e1e2_detectorplane(nn, nIn): e2 = e2 / np.linalg.norm(e2) return e1, e2 + def calc_xixj_from_braggphi(summit, det_cent, det_nout, det_ei, det_ej, nout, e1, e2, bragg, phi): sp = (det_cent - summit) @@ -168,25 +215,32 @@ def calc_xixj_from_braggphi(summit, det_cent, det_nout, det_ei, det_ej, xj = np.sum((pts - det_cent[:, None])*det_ej[:, None], axis=0) return xi, xj -def calc_braggphi_from_xixj(xi, xj, det_cent, det_ei, det_ej, - summit, nin, e1, e2): - xi = xi[None, ...] - xj = xj[None, ...] - if xi.ndim == 1: - summit = summit[:, None] - det_cent = det_cent[:, None] - det_ei, det_ej = det_ei[:, None], det_ej[:, None] - nin, e1, e2 = nin[:, None], e1[:, None], e2[:, None] +def calc_braggphi_from_xixjpts(det_cent, det_ei, det_ej, + summit, nin, e1, e2, + xi=None, xj=None, pts=None): + + if pts is None: + xi = xi[None, ...] + xj = xj[None, ...] + if xi.ndim == 1: + summit = summit[:, None] + det_cent = det_cent[:, None] + det_ei, det_ej = det_ei[:, None], det_ej[:, None] + nin, e1, e2 = nin[:, None], e1[:, None], e2[:, None] + else: + summit = summit[:, None, None] + det_cent = det_cent[:, None, None] + det_ei, det_ej = det_ei[:, None, None], det_ej[:, None, None] + nin, e1, e2 = (nin[:, None, None], + e1[:, None, None], e2[:, None, None]) + pts = det_cent + xi*det_ei + xj*det_ej else: + assert pts.ndim == 2 + pts = pts[:, :, None] summit = summit[:, None, None] - det_cent = det_cent[:, None, None] - det_ei, det_ej = det_ei[:, None, None], det_ej[:, None, None] nin, e1, e2 = nin[:, None, None], e1[:, None, None], e2[:, None, None] - - pts = det_cent + xi*det_ei + xj*det_ej - vect = pts - summit vect = vect / np.sqrt(np.sum(vect**2, axis=0))[None, ...] bragg = np.arcsin(np.sum(vect*nin, axis=0)) @@ -194,9 +248,82 @@ def calc_braggphi_from_xixj(xi, xj, det_cent, det_ei, det_ej, phi = np.arctan2(np.sum(vect*e2, axis=0), np.sum(vect*e1, axis=0)) return bragg, phi + def get_lambphifit(lamb, phi, nxi, nxj): lambD = lamb.max()-lamb.min() lambfit = lamb.min() +lambD*np.linspace(0, 1, nxi) phiD = phi.max() - phi.min() phifit = phi.min() + phiD*np.linspace(0, 1, nxj) return lambfit, phifit + + +# ############################################### +# From plasma pts +# ############################################### + +def calc_psidthetaphi_from_pts_lamb(pts, center, rcurve, + bragg, nlamb, npts, + nout, e1, e2, extenthalf, ntheta=None): + + if ntheta is None: + ntheta = 100 + + scaPCem = np.full((nlamb, npts), np.nan) + dtheta = np.full((nlamb, npts, ntheta), np.nan) + psi = np.full((nlamb, npts, ntheta), np.nan) + + # Get to scalar product + PC = center[:, None] - pts + PCnorm2 = np.sum(PC**2, axis=0) + cos2 = np.cos(bragg)**2 + deltaon4 = (rcurve**2*cos2[:, None]**2 + - (rcurve**2*cos2[:, None] + - PCnorm2[None, :]*np.sin(bragg)[:, None]**2)) + + # Get two relevant solutions + ind = deltaon4 >= 0. + cos2 = np.repeat(cos2[:, None], npts, axis=1)[ind] + PCnorm = np.tile(np.sqrt(PCnorm2), (nlamb, 1))[ind] + sol1 = -rcurve*cos2 - np.sqrt(deltaon4[ind]) + sol2 = -rcurve*cos2 + np.sqrt(deltaon4[ind]) + # em is a unit vector and ... + ind1 = (np.abs(sol1) <= PCnorm) & (sol1 >= -rcurve) + ind2 = (np.abs(sol2) <= PCnorm) & (sol2 >= -rcurve) + assert not np.any(ind1 & ind2) + sol1 = sol1[ind1] + sol2 = sol2[ind2] + indn = ind.nonzero() + ind1 = [indn[0][ind1], indn[1][ind1]] + ind2 = [indn[0][ind2], indn[1][ind2]] + scaPCem[ind1[0], ind1[1]] = sol1 + scaPCem[ind2[0], ind2[1]] = sol2 + ind = ~np.isnan(scaPCem) + + # Get equation on PCem + X = np.sum(PC*nout[:, None], axis=0) + Y = np.sum(PC*e1[:, None], axis=0) + Z = np.sum(PC*e2[:, None], axis=0) + + scaPCem = np.repeat(scaPCem[..., None], ntheta, axis=-1) + ind = ~np.isnan(scaPCem) + XYnorm = np.repeat(np.repeat(np.sqrt(X**2 + Y**2)[None, :], + nlamb, axis=0)[..., None], + ntheta, axis=-1)[ind] + Z = np.repeat(np.repeat(Z[None, :], nlamb, axis=0)[..., None], + ntheta, axis=-1)[ind] + angextra = np.repeat( + np.repeat(np.arctan2(Y, X)[None, :], nlamb, axis=0)[..., None], + ntheta, axis=-1)[ind] + dtheta[ind] = np.repeat( + np.repeat(extenthalf[1]*np.linspace(-1, 1, ntheta)[None, :], + npts, axis=0)[None, ...], + nlamb, axis=0)[ind] + + psi[ind] = (np.arccos( + (scaPCem[ind] - Z*np.sin(dtheta[ind]))/(XYnorm*np.cos(dtheta[ind]))) + + angextra) + psi[ind] = np.arctan2(np.sin(psi[ind]), np.cos(psi[ind])) + indnan = (~ind) | (np.abs(psi) > extenthalf[0]) + psi[indnan] = np.nan + dtheta[indnan] = np.nan + return dtheta, psi, indnan diff --git a/tofu/geom/_core.py b/tofu/geom/_core.py index 123c9fd49..fc4c2c8bf 100644 --- a/tofu/geom/_core.py +++ b/tofu/geom/_core.py @@ -436,19 +436,23 @@ def _checkformat_inputs_dgeom( Poly = Poly.T # Elimininate any double identical point - ind = np.sum(np.diff(Poly, axis=1) ** 2, axis=0) < 1.0e-12 + ind = np.sum(np.diff(np.concatenate((Poly, Poly[:, 0:1]), axis=1), + axis=1) ** 2, axis=0) < 1.0e-12 if np.any(ind): npts = Poly.shape[1] + Poly = Poly[:, ~ind] msg = ( "%s instance: double identical points in Poly\n" % cls.__name__ ) msg += " => %s points removed\n" % ind.sum() msg += " => Poly goes from %s to %s points" % ( npts, - npts - ind.sum(), + Poly.shape[1], ) warnings.warn(msg) - Poly = Poly[:, ~ind] + ind = np.sum(np.diff(np.concatenate((Poly, Poly[:, 0:1]), axis=1), + axis=1) ** 2, axis=0) < 1.0e-12 + assert not np.any(ind), ind lC = [Lim is None, pos is None] if not any(lC): @@ -885,6 +889,51 @@ def dmisc(self): # public methods ########### + def get_summary( + self, + sep=" ", + line="-", + just="l", + table_sep=None, + verb=True, + return_=False, + ): + """ Summary description of the object content """ + + # ----------------------- + # Build detailed view + col0 = [ + "class", + "Name", + "SaveName", + "nP", + "noccur", + "mobile", + "color", + ] + ar0 = [ + self._Id.Cls, + self._Id.Name, + self._Id.SaveName, + str(self._dgeom["nP"]), + str(self._dgeom["noccur"]), + str(self._dgeom["mobile"]), + str(self._dmisc["color"])] + + return self._get_summary( + [ar0], + [col0], + sep=sep, + line=line, + table_sep=table_sep, + verb=verb, + return_=return_, + ) + + ########### + # public methods + ########### + def set_color(self, col): self._set_color(col) @@ -2154,8 +2203,8 @@ def _get_largs_dsino(): def _checkformat_inputs_Struct(self, struct, err=True): assert issubclass(struct.__class__, Struct) - C0 = struct.Id.Exp==self.Id.Exp - C1 = struct.Id.Type==self.Id.Type + C0 = struct.Id.Exp == self.Id.Exp + C1 = struct.Id.Type == self.Id.Type C2 = struct.Id.Name.isidentifier() C2 = C2 and '_' not in struct.Id.Name msgi = None @@ -3085,7 +3134,6 @@ def plot_phithetaproj_dist(self, refpt=None, ntheta=None, nphi=None, tit=tit, wintit=wintit, invertx=invertx, draw=draw) - def isInside(self, pts, In="(X,Y,Z)", log="any"): """ Return a 2D array of bool @@ -3679,7 +3727,7 @@ def _checkformat_inputs_dgeom(self, dgeom=None): if (isinstance(dgeom, self._dcases[k]['type']) and all([kk in dgeom.keys() # noqa for kk in self._dcases[k]['lk']]))] - if not len(lC)==1: + if not len(lC) == 1: lstr = [v['lk'] for v in self._dcases.values()] msg = "Arg dgeom must be either:\n" msg += " - dict with keys:\n" @@ -4092,10 +4140,16 @@ def _complete_dX12(self, dgeom): k = np.sum(DDb*(u - np.sqrt(sca2)*dgeom['u'][:, 1:]), axis=0) k = k / (1.0-sca2) - if k[0] > 0 and np.allclose(k, k[0], atol=1.e-3, rtol=1.e-6): + if k[0] > 0 and np.allclose(k, k[0], atol=1.e-3, + rtol=1.e-6): pinhole = dgeom['D'][:, 0] + k[0]*u[:, 0] dgeom['pinhole'] = pinhole + if np.any(np.isnan(dgeom['D'])): + msg = ("Some LOS have nan as starting point !\n" + + "The geometry may not be provided !") + raise Exception(msg) + # Test if all D are on a common plane or line va = dgeom["D"] - dgeom["D"][:, 0:1] @@ -4164,9 +4218,6 @@ def _complete_dX12(self, dgeom): dgeom["dX12"] = {} dgeom["dX12"].update({"nIn": nIn, "e1": e1, "e2": e2}) - if not self._is2D(): - return dgeom - # Test binning if dgeom["pinhole"] is not None: k1ref = -np.sum( @@ -4982,8 +5033,8 @@ def D(self): @property def u(self): - if (self._dgeom['u'] is not None - and self._dgeom['u'].shape[1] == self._dgeom['nRays']): + if self._dgeom['u'] is not None \ + and self._dgeom['u'].shape[1] == self._dgeom['nRays']: u = self._dgeom['u'] elif self.isPinhole: u = self._dgeom['pinhole'][:, None] - self._dgeom['D'] @@ -5076,7 +5127,7 @@ def _isLOS(cls): def _check_indch(self, ind, out=int): if ind is not None: ind = np.asarray(ind) - assert ind.ndim==1 + assert ind.ndim == 1 assert ind.dtype in [np.int64, np.bool_, np.long] if ind.dtype == np.bool_: assert ind.size == self.nRays @@ -5523,17 +5574,17 @@ def _kInOut_Isoflux_inputs_usr(self, lPoly, lVIn=None): if type(lPoly) is list: for ii in range(nPoly): # Check closed and anti-clockwise - if _GG.Poly_isClockwise(lPoly[ii]): - lPoly[ii] = lPoly[ii][:, ::-1] if not np.allclose(lPoly[ii][:, 0], lPoly[ii][:, -1]): lPoly[ii] = np.concatenate( (lPoly[ii], lPoly[ii][:, 0:1]), axis=-1 ) + try: + if _GG.Poly_isClockwise(lPoly[ii]): + lPoly[ii] = lPoly[ii][:, ::-1] + except Exception as excp: + print("For structure ", ii, " : ", excp) else: - for ii in range(nPoly): - # Check closed and anti-clockwise - if _GG.Poly_isClockwise(lPoly[ii]): - lPoly[ii] = lPoly[ii][:, ::-1] + # Check closed and anti-clockwise d = np.sum((lPoly[:, :, 0]-lPoly[:, :, -1])**2, axis=1) if np.allclose(d, 0.): pass @@ -5542,6 +5593,12 @@ def _kInOut_Isoflux_inputs_usr(self, lPoly, lVIn=None): else: msg = "All poly in lPoly should be closed or all non-closed!" raise Exception(msg) + for ii in range(nPoly): + try: + if _GG.Poly_isClockwise(lPoly[ii]): + lPoly[ii] = lPoly[ii][:, ::-1] + except Exception as excp: + print("For structure ", ii, " : ", excp) # Check lVIn if lVIn is None: @@ -5612,14 +5669,14 @@ def calc_kInkOut_Isoflux(self, lPoly, lVIn=None, Lim=None, # Compute intersections assert(self._method in ['ref', 'optimized']) - if self._method=='ref': + if self._method == 'ref': for ii in range(0, nPoly): largs, dkwd = self._kInOut_Isoflux_inputs([lPoly[ii]], lVIn=[lVIn[ii]]) out = _GG.SLOW_LOS_Calc_PInOut_VesStruct(*largs, **dkwd) # PIn, POut, kin, kout, VperpIn, vperp, IIn, indout = out[] kIn[ii, :], kOut[ii, :] = out[2], out[3] - elif self._method=="optimized": + elif self._method == "optimized": for ii in range(0, nPoly): largs, dkwd = self._kInOut_Isoflux_inputs([lPoly[ii]], lVIn=[lVIn[ii]]) @@ -5921,6 +5978,9 @@ def _calc_signal_postformat( if Brightness is False: if dataname is None: dataname = r"LOS-integral x Etendue" + if E is None or np.all(np.isnan(E)): + msg = "Cannot use etendue, it was not set properly !" + raise Exception(msg) if t is None or len(t) == 1 or E.size == 1: sig = sig * E else: @@ -5980,7 +6040,7 @@ def calc_signal( reflections=True, coefs=None, ind=None, - out=object, + returnas=object, plot=True, dataname=None, fs=None, @@ -6041,7 +6101,7 @@ def calc_signal( # Format input indok, Ds, us, DL, E = self._calc_signal_preformat( - ind=ind, DL=DL, out=out, Brightness=Brightness + ind=ind, DL=DL, out=returnas, Brightness=Brightness ) if Ds is None: @@ -6135,19 +6195,9 @@ def calc_signal( # and interferometer) val = func(pts, t=t, vect=vect) # Integrate - if val.ndim == 2: - sig = np.full((val.shape[0], self.nRays), np.nan) - else: - sig = np.full((1, self.nRays), np.nan) - indpts = np.r_[0, indpts, pts.shape[1]] - for ii in range(0, self.nRays): - sig[:, ii] = ( - np.nansum( - val[:, indpts[ii]:indpts[ii + 1]], - axis=-1 - ) - * reseff[ii] - ) + sig = np.add.reduceat(val, np.r_[0, indpts], + axis=-1)*reseff[None, :] + # Format output return self._calc_signal_postformat( sig, @@ -6157,7 +6207,7 @@ def calc_signal( E=E, units=units, plot=plot, - out=out, + out=returnas, fs=fs, dmargin=dmargin, wintit=wintit, @@ -6170,7 +6220,7 @@ def calc_signal_from_Plasma2D( self, plasma2d, t=None, - newcalc=False, + newcalc=True, quant=None, ref1d=None, ref2d=None, @@ -6191,7 +6241,7 @@ def calc_signal_from_Plasma2D( reflections=True, coefs=None, ind=None, - out=object, + returnas=object, plot=True, dataname=None, fs=None, @@ -6205,7 +6255,7 @@ def calc_signal_from_Plasma2D( # Format input indok, Ds, us, DL, E = self._calc_signal_preformat( - ind=ind, out=out, t=t, Brightness=Brightness + ind=ind, out=returnas, t=t, Brightness=Brightness ) if Ds is None: @@ -6215,16 +6265,20 @@ def calc_signal_from_Plasma2D( if newcalc: # Get time vector - if t is None: + lc = [t is None, type(t) is str, type(t) is np.ndarray] + assert any(lc) + if lc[0]: out = plasma2d._checkformat_qr12RPZ( - quant=quant, - ref1d=ref1d, - ref2d=ref2d, - q2dR=q2dR, - q2dPhi=q2dPhi, - q2dZ=q2dZ, + quant=quant, + ref1d=ref1d, + ref2d=ref2d, + q2dR=q2dR, + q2dPhi=q2dPhi, + q2dZ=q2dZ, ) t = plasma2d._get_tcom(*out[:4])[0] + elif lc[1]: + t = plasma2d._ddata[t]['data'] else: t = np.atleast_1d(t).ravel() @@ -6322,7 +6376,9 @@ def funcbis(*args, **kwdargs): nbrep = np.r_[ indpts[0], np.diff(indpts), pts.shape[1] - indpts[-1] ] - vect = np.repeat(self.u, nbrep, axis=1) + vect = -np.repeat(self.u, nbrep, axis=1) + if fill_value is None: + fill_value = 0. # Get quantity values at ptsRZ # This is the slowest step (~3.8 s with res=0.02 @@ -6343,19 +6399,10 @@ def funcbis(*args, **kwdargs): fill_value=fill_value, ) - # Integrate - if val.ndim == 2: - sig = np.full((val.shape[0], self.nRays), np.nan) - else: - sig = np.full((1, self.nRays), np.nan) - - indpts = np.r_[0, indpts, pts.shape[1]] - for ii in range(0, self.nRays): - sig[:, ii] = ( - np.nansum(val[:, indpts[ii]:indpts[ii + 1]], - axis=-1) - * reseff[ii] - ) + # Integrate using ufunc reduceat for speed + # (cf. https://stackoverflow.com/questions/59079141) + sig = np.add.reduceat(val, np.r_[0, indpts], + axis=-1)*reseff[None, :] # Format output # this is the secod slowest step (~0.75 s) @@ -6367,7 +6414,7 @@ def funcbis(*args, **kwdargs): E=E, units=units, plot=plot, - out=out, + out=returnas, fs=fs, dmargin=dmargin, wintit=wintit, @@ -6728,21 +6775,29 @@ def get_summary( kout = self._dgeom["kOut"] indout = self._dgeom["indout"] lS = self._dconfig["Config"].lStruct + angles = np.arccos(-np.sum(self.u*self.dgeom['vperp'], axis=0)) # ar0 - col0 = ["nb. los", "av. length", "nb. touch"] + col0 = ["nb. los", "av. length", "min length", "max length", + "nb. touch", "av. angle", "min angle", "max angle"] ar0 = [ self.nRays, "{:.3f}".format(np.nanmean(kout)), + "{:.3f}".format(np.nanmin(kout)), + "{:.3f}".format(np.nanmax(kout)), np.unique(indout[0, :]).size, + "{:.2f}".format(np.nanmean(angles)), + "{:.2f}".format(np.nanmin(angles)), + "{:.2f}".format(np.nanmax(angles)), ] # ar1 - col1 = ["los index", "length", "touch"] + col1 = ["los index", "length", "touch", "angle (rad)"] ar1 = [ np.arange(0, self.nRays), np.around(kout, decimals=3).astype("U"), ["%s_%s" % (lS[ii].Id.Cls, lS[ii].Id.Name) for ii in indout[0, :]], + np.around(angles, decimals=2).astype('U') ] for k, v in self._dchans.items(): @@ -6838,6 +6893,47 @@ def save_to_imas( class CamLOS2D(Rays): + def get_summary( + self, + sep=" ", + line="-", + just="l", + table_sep=None, + verb=True, + return_=False, + ): + + # Prepare + kout = self._dgeom["kOut"] + indout = self._dgeom["indout"] + # lS = self._dconfig["Config"].lStruct + angles = np.arccos(-np.sum(self.u*self.dgeom['vperp'], axis=0)) + + # ar0 + col0 = ["nb. los", "av. length", "min length", "max length", + "nb. touch", "av. angle", "min angle", "max angle"] + ar0 = [ + self.nRays, + "{:.3f}".format(np.nanmean(kout)), + "{:.3f}".format(np.nanmin(kout)), + "{:.3f}".format(np.nanmax(kout)), + np.unique(indout[0, :]).size, + "{:.2f}".format(np.nanmean(angles)), + "{:.2f}".format(np.nanmin(angles)), + "{:.2f}".format(np.nanmax(angles)), + ] + + # call base method + return self._get_summary( + [ar0], + [col0], + sep=sep, + line=line, + table_sep=table_sep, + verb=verb, + return_=return_, + ) + def _isImage(self): return self._dgeom["isImage"] diff --git a/tofu/geom/_core_optics.py b/tofu/geom/_core_optics.py index e58138373..8762194d9 100644 --- a/tofu/geom/_core_optics.py +++ b/tofu/geom/_core_optics.py @@ -13,6 +13,7 @@ # Common import numpy as np +import scipy.interpolate as scpinterp import datetime as dtm import matplotlib.pyplot as plt import matplotlib as mpl @@ -292,17 +293,17 @@ def _checkformat_dmat(cls, dmat=None): assert all([ss in lkok for ss in dmat.keys()]) for kk in cls._ddef['dmat'].keys(): dmat[kk] = dmat.get(kk, cls._ddef['dmat'][kk]) - if dmat['d'] is not None: + if dmat.get('d') is not None: dmat['d'] = float(dmat['d']) - if dmat['formula'] is not None: + if dmat.get('formula') is not None: assert isinstance(dmat['formula'], str) - if dmat['density'] is not None: + if dmat.get('density') is not None: dmat['density'] = float(dmat['density']) - if dmat['lengths'] is not None: + if dmat.get('lengths') is not None: dmat['lengths'] = np.atleast_1d(dmat['lengths']).ravel() - if dmat['angles'] is not None: + if dmat.get('angles') is not None: dmat['angles'] = np.atleast_1d(dmat['angles']).ravel() - if dmat['cut'] is not None: + if dmat.get('cut') is not None: dmat['cut'] = np.atleast_1d(dmat['cut']).ravel().astype(int) assert dmat['cut'].size <= 4 return dmat @@ -312,12 +313,69 @@ def _checkformat_dbragg(cls, dbragg=None): if dbragg is None: return assert isinstance(dbragg, dict) - lkok = ['angle'] + lkok = cls._get_keys_dbragg() assert all([isinstance(ss, str) for ss in dbragg.keys()]) assert all([ss in lkok for ss in dbragg.keys()]) for kk in cls._ddef['dbragg'].keys(): dbragg[kk] = dbragg.get(kk, cls._ddef['dbragg'][kk]) + if dbragg.get('rockingcurve') is not None: + assert isinstance(dbragg['rockingcurve'], dict) + drock = dbragg['rockingcurve'] + lkeyok = ['sigam', 'deltad', 'Rmax', 'dangle', 'lamb', 'value', + 'type', 'source'] + lkout = [kk for kk in drock.keys() if kk not in lkeyok] + if len(lkout) > 0: + msg = ("Unauthorized keys in dbrag['rockingcurve']:\n" + + "\t-" + "\n\t-".join(lkout)) + raise Exception(msg) + try: + if drock.get('sigma') is not None: + dbragg['rockingcurve']['sigma'] = float(drock['sigma']) + dbragg['rockingcurve']['deltad'] = float( + drock.get('deltad', 0.)) + dbragg['rockingcurve']['Rmax'] = float( + drock.get('Rmax', 1.)) + dbragg['rockingcurve']['type'] = 'lorentz-log' + elif drock.get('dangle') is not None: + c2d = (drock.get('lamb') is not None + and drock.get('value').ndim == 2) + if c2d: + if drock['value'].shape != (drock['dangle'].size, + drock['lamb'].size): + msg = ("Tabulated 2d rocking curve should be:\n" + + "\tshape = (dangle.size, lamb.size)") + raise Exception(msg) + dbragg['rockingcurve']['dangle'] = np.r_[ + drock['dangle']] + dbragg['rockingcurve']['lamb'] = np.r_[drock['lamb']] + dbragg['rockingcurve']['value'] = drock['value'] + dbragg['rockingcurve']['type'] = 'tabulated-2d' + else: + if drock.get('lamb') is None: + msg = ("Please also specify the lamb for which " + + "the rocking curve was tabulated!") + raise Exception(msg) + dbragg['rockingcurve']['lamb'] = float(drock['lamb']) + dbragg['rockingcurve']['dangle'] = np.r_[ + drock['dangle']] + dbragg['rockingcurve']['value'] = np.r_[drock['value']] + dbragg['rockingcurve']['type'] = 'tabulated-1d' + if drock.get('source') is None: + msg = "Unknonw source for the tabulated rocking curve!" + warnings.warn(msg) + dbragg['rockingcurve']['source'] = drock.get('source') + except Exception as err: + msg = ("Provide the rocking curve as a dict with either:\n" + + "\t- parameters of a lorentzian in log10:\n" + + "\t\t{'sigma': float,\n" + + "\t\t 'deltad': float,\n" + + "\t\t 'Rmax': float}\n" + + "\t- tabulated (dangle, value), with source (url...)" + + "\t\t{'dangle': np.ndarray,\n" + + "\t\t 'value': np.ndarray,\n" + + "\t\t 'source': str}") + raise Exception(msg) return dbragg @classmethod @@ -404,7 +462,7 @@ def set_dmat(self, dmat=None): dmat = self._checkformat_dmat(dmat) self._dmat = dmat - def _set_dbragg(self, dbragg=None): + def set_dbragg(self, dbragg=None): dbragg = self._checkformat_dbragg(dbragg) self._dbragg = dbragg @@ -570,6 +628,13 @@ def summit(self): def center(self): return self._dgeom['center'] + @property + def rockingcurve(self): + if self._dbragg.get('rockingcurve') is not None: + if self._dbragg['rockingcurve'].get('type') is not None: + return self._dbragg['rockingcurve'] + raise Exception("rockingcurve was not set!") + # ----------------- # methods for color # ----------------- @@ -591,11 +656,17 @@ def get_summary(self, sep=' ', line='-', just='l', # ----------------------- # Build material col0 = ['formula', 'symmetry', 'cut', 'density', - 'd (A)', 'bragg(%s A) (deg)'%str(self._DEFLAMB)] + 'd (A)', 'bragg({:7.4} A) (deg)'.format(self._DEFLAMB*1e10), + 'rocking curve'] ar0 = [self._dmat['formula'], self._dmat['symmetry'], str(self._dmat['cut']), str(self._dmat['density']), '{0:5.3f}'.format(self._dmat['d']*1.e10), str(self.get_bragg_from_lamb(self._DEFLAMB)[0]*180./np.pi)] + try: + ar0.append(self.rockingcurve['type']) + except Exception as err: + ar0.append('None') + # ----------------------- # Build geometry @@ -692,6 +763,54 @@ def move(self, kind=None, **kwdargs): if kind == 'rotate': self._rotate(**kwdargs) + # ----------------- + # methods for rocking curve + # ----------------- + + def get_rockingcurve_func(self, lamb=None, n=None): + drock = self.rockingcurve + if drock['type'] == 'tabulated-1d': + if lamb is not None and lamb != drock['lamb']: + msg = ("rocking curve was tabulated only for:\n" + + "\tlamb = {} m\n".format(lamb) + + " => Please let lamb=None") + raise Exception(msg) + bragg = self._checkformat_bragglamb(lamb=drock['lamb'], n=n) + func = scpinterp.interp1d(drock['dangle']+bragg, drock['value'], + kind='linear', bounds_error=False, + fill_value=0, assume_sorted=True) + + elif drock['type'] == 'tabulated-2d': + lmin, lmax = drock['lamb'].min(), drock['lamb'].max() + if lamb is None or lamb < lmin or lamb > lmax: + msg = ("rocking curve was tabulated only in interval:\n" + + "\tlamb in [{}; {}] m\n".format(lmin, lmax) + + " => Please set lamb accordingly") + raise Exception(msg) + bragg = self._checkformat_bragglamb(lamb=lamb, n=n) + + def func(angle, lamb=lamb, bragg=bragg, drock=drock): + return scpinterp.interp2d(drock['dangle']+bragg, drock['lamb'], + drock['value'], kind='linear', + bounds_error=False, fill_value=0, + assume_sorted=True)(angle, lamb) + + else: + def func(angle, d=d, delta_bragg=delta_bragg, + Rmax=drock['Rmax'], sigma=drock['sigma']): + core = sigma**2/((angle - (bragg+delta_bragg))**2 + sigma**2) + if Rmax is None: + return core/(sigma*np.pi) + else: + return Rmax*core + return func + + def plot_rockingcurve(self, lamb=None, + fs=None, ax=None): + drock = self.rockingcurve + func = self.get_rockingcurve_func(bragg=bragg, n=n) + return _plot.CrystalBragg_plot_rockingcurve(func, fs=fs, ax=ax) + # ----------------- # methods for surface and contour sampling # ----------------- @@ -729,6 +848,9 @@ def sample_outline_Rays(self, res=None): def _checkformat_bragglamb(self, bragg=None, lamb=None, n=None): lc = [lamb is not None, bragg is not None] + if not any(lc): + lamb = self._DEFLAMB + lc[0] = True assert np.sum(lc) == 1, "Provide lamb xor bragg!" if lc[0]: bragg = self.get_bragg_from_lamb(np.atleast_1d(lamb), @@ -821,7 +943,8 @@ def get_Rays_envelop(self, # ----------------- def plot(self, lax=None, proj=None, res=None, element=None, - color=None, + color=None, det_cent=None, + det_nout=None, det_ei=None, det_ej=None, dP=None, dI=None, dBs=None, dBv=None, dVect=None, dIHor=None, dBsHor=None, dBvHor=None, dleg=None, @@ -836,6 +959,19 @@ def plot(self, lax=None, proj=None, res=None, element=None, # methods for generic first-approx # ----------------- + def get_phi_from_magaxis_summit(self, r, z, lamb=None, bragg=None, n=None): + # Check / format input + r = np.atleast_1d(r) + z = np.atleast_1d(z) + assert r.shape == z.shape + bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n) + + # Compute phi + + return phi + + + def get_bragg_from_lamb(self, lamb, n=None): """ Braggs' law: n*lamb = 2dsin(bragg) """ if self._dmat['d'] is None: @@ -854,27 +990,66 @@ def get_lamb_from_bragg(self, bragg, n=None): return _comp_optics.get_lamb_from_bragg(np.atleast_1d(bragg), self._dmat['d'], n=n) - def get_approx_detector_frame(self, bragg=None, lamb=None, - rcurve=None, n=None, plot=False): - """ See notes for details on notations """ + def get_detector_approx(self, bragg=None, lamb=None, + rcurve=None, n=None, + ddist=None, di=None, dj=None, + dtheta=None, dpsi=None, tilt=None, + tangent_to_rowland=None, plot=False): + """ Return approximate ideal detector geometry + + Assumes infinitesimal and ideal crystal + Assumes detector center tangential to Rowland circle + Assumes detector center matching lamb (m) / bragg (rad) + + Detector described by center position, and (nout, ei, ej) unit vectors + By convention, nout = np.cross(ei, ej) + Vectors (ei, ej) define an orthogonal frame in the detector's plane + + Return: + ------- + det_cent: np.ndarray + (3,) array of (x, y, z) coordinates of detector center + det_nout: np.ndarray + (3,) array of (x, y, z) coordinates of unit vector + perpendicular to detector' surface + oriented towards crystal + det_ei: np.ndarray + (3,) array of (x, y, z) coordinates of unit vector + defining first coordinate in detector's plane + det_ej: np.ndarray + (3,) array of (x, y, z) coordinates of unit vector + defining second coordinate in detector's plane + """ # Check / format inputs if rcurve is None: rcurve = self._dgeom['rcurve'] bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n) + if np.all(np.isnan(bragg)): + msg = ("There is no available bragg angle!\n" + + " => Check the vlue of self.dmat['d'] vs lamb") + raise Exception(msg) + + lf = ['summit', 'nout', 'e1', 'e2'] + lc = [rcurve is None] + [self._dgeom[kk] is None for kk in lf] + if any(lc): + msg = ("Some missing fields in dgeom for computation:" + + "\n\t-" + "\n\t-".join(['rcurve'] + lf)) + raise Exception(msg) # Compute crystal-centered parameters in (nout, e1, e2) func = _comp_optics.get_approx_detector_rel - det_dist, det_nout_rel, det_ei_rel = func(rcurve, bragg) + (det_dist, n_crystdet_rel, + det_nout_rel, det_ei_rel) = _comp_optics.get_approx_detector_rel( + rcurve, bragg, tangent_to_rowland=tangent_to_rowland) # Deduce absolute position in (x, y, z) - func = _comp_optics.get_det_abs_from_rel - det_cent, det_nout, det_ei, det_ej = func(det_dist, - det_nout_rel, det_ei_rel, - self._dgeom['summit'], - self._dgeom['nout'], - self._dgeom['e1'], - self._dgeom['e2']) + det_cent, det_nout, det_ei, det_ej = _comp_optics.get_det_abs_from_rel( + det_dist, n_crystdet_rel, det_nout_rel, det_ei_rel, + self._dgeom['summit'], + self._dgeom['nout'], self._dgeom['e1'], self._dgeom['e2'], + ddist=ddist, di=di, dj=dj, + dtheta=dtheta, dpsi=dpsi, tilt=tilt) if plot: dax = self.plot() @@ -891,6 +1066,31 @@ def get_approx_detector_frame(self, bragg=None, lamb=None, return det_cent, det_nout, det_ei, det_ej def get_local_noute1e2(self, theta, psi): + """ Return (nout, e1, e2) associated to pts on the crystal's surface + + All points on the spherical crystal's surface are identified + by (theta, psi) coordinates, where: + - theta = np.pi/2 for the center + - psi = 0 for the center + They are the spherical coordinates from a sphere centered on the + crystal's center of curvature. + + Return the pts themselves and the 3 perpendicular unti vectors + (nout, e1, e2), where nout is towards the outside of the sphere and + nout = np.cross(e1, e2) + + Return: + ------- + summit: np.ndarray + (3,) array of (x, y, z) coordinates of the points on the surface + nout: np.ndarray + (3,) array of (x, y, z) coordinates of outward unit vector + e1: np.ndarray + (3,) array of (x, y, z) coordinates of first unit vector + e2: np.ndarray + (3,) array of (x, y, z) coordinates of second unit vector + + """ if np.allclose([theta, psi], [np.pi/2., 0.]): summit = self._dgeom['summit'] nout = self._dgeom['nout'] @@ -946,7 +1146,7 @@ def calc_xixj_from_phibragg(self, phi=None, det_ei is None, det_ej is None] assert all(lc) or not any(lc) if all(lc): - func = self.get_approx_detector_frame + func = self.get_detector_approx det_cent, det_nout, det_ei, det_ej = func(lamb=self._DEFLAMB) # Get local summit nout, e1, e2 if non-centered @@ -967,18 +1167,45 @@ def calc_xixj_from_phibragg(self, phi=None, ax = func(bragg, xi, xj, data, ax) return xi, xj + @staticmethod + def _checkformat_pts(pts): + pts = np.atleast_1d(pts) + if pts.ndim == 1: + pts = pts.reshape((3, 1)) + if 3 not in pts.shape or pts.ndim != 2: + msg = "pts must be a (3, npts) array of (X, Y, Z) coordinates!" + raise Exception(msg) + if pts.shape[0] != 3: + pts = pts.T + return pts + @staticmethod def _checkformat_xixj(xi, xj): xi = np.atleast_1d(xi) xj = np.atleast_1d(xj) + if xi.shape == xj.shape: return xi, xj, (xi, xj) else: return xi, xj, np.meshgrid(xi, xj) + @staticmethod + def _checkformat_psidtheta(psi=None, dtheta=None, + psi_def=0., dtheta_def=0.): + if psi is None: + psi = psi_def + if dtheta is None: + dtheta = dtheta_def + psi = np.r_[psi] + dtheta = np.r_[dtheta] + if psi.size != dtheta.size: + msg = "psi and dtheta must be 1d arrays fo same size!" + raise Exception(msg) + + def calc_phibragg_from_xixj(self, xi, xj, n=None, det_cent=None, det_ei=None, det_ej=None, - theta=None, psi=None, + dtheta=None, psi=None, plot=True, ax=None, **kwdargs): # Check / format inputs @@ -987,38 +1214,309 @@ def calc_phibragg_from_xixj(self, xi, xj, n=None, lc = [det_cent is None, det_ei is None, det_ej is None] assert all(lc) or not any(lc) if all(lc): - func = self.get_approx_detector_frame - det_cent, _, det_ei, det_ej = func(lamb=self._DEFLAMB) + det_cent, _, det_ei, det_ej = self.get_detector_approx( + lamb=self._DEFLAMB) # Get local summit nout, e1, e2 if non-centered - if theta is None: - theta = np.pi/2. - if psi is None: - psi = 0. - summit, nout, e1, e2 = self.get_local_noute1e2(theta, psi) - - # Compute - func = _comp_optics.calc_braggphi_from_xixj - bragg, phi = func(xii, xjj, det_cent, det_ei, det_ej, - summit, -nout, e1, e2) + bragg, phi = self.calc_phibragg_from_pts(pts) if plot != False: - func = _plot_optics.CrystalBragg_plot_braggangle_from_xixj - lax = func(xi=xii, xj=xjj, - ax=ax, plot=plot, - bragg=bragg * 180./np.pi, - angle=phi * 180./np.pi, - braggunits='deg', angunits='deg', **kwdargs) + lax = _plot_optics.CrystalBragg_plot_braggangle_from_xixj( + xi=xii, xj=xjj, + ax=ax, plot=plot, + bragg=bragg * 180./np.pi, + angle=phi * 180./np.pi, + braggunits='deg', angunits='deg', **kwdargs) return bragg, phi - def plot_johannerror(self, lamb=None): - raise NotImplementedError + # DEPRECATED ??? + def calc_phibragg_from_pts_on_summit(self, pts, n=None): + """ Return the bragg angle and phi of pts from crystal summit - def plot_data_vs_lambphi(self, xi=None, xj=None, data=None, - det_cent=None, det_ei=None, det_ej=None, - theta=None, psi=None, n=None, - plot=True, fs=None, - cmap=None, vmin=None, vmax=None): + The pts are provided as a (x, y, z) coordinates array + The bragg angle and phi are computed from the crystal's summit + + """ + # Check / format inputs + pts = self._checkformat_pts(pts) + + # Compute + vect = pts - self._dgeom['summit'][:, None] + vect = vect / np.sqrt(np.sum(vect**2, axis=0)) + bragg = np.pi/2 - np.arccos(np.sum(vect*self._dgeom['nin'][:, None], + axis=0)) + v1 = np.sum(vect*self._dgeom['e1'][:, None], axis=0) + v2 = np.sum(vect*self._dgeom['e2'][:, None], axis=0) + phi = np.arctan2(v2, v1) + return bragg, phi + + def plot_line_tracing_on_det(self, lamb=None, n=None, + xi_bounds=None, xj_bounds=None, nphi=None, + det_cent=None, det_nout=None, + det_ei=None, det_ej=None, + johann=False, lpsi=None, ltheta=None, + rocking=False, fs=None, dmargin=None, + wintit=None, tit=None): + """ Visualize the de-focusing by ray-tracing of chosen lamb + """ + # Check / format inputs + if lamb is None: + lamb = self._DEFLAMB + lamb = np.atleast_1d(lamb).ravel() + nlamb = lamb.size + + det = np.array([[xi_bounds[0], xi_bounds[1], xi_bounds[1], + xi_bounds[0], xi_bounds[0]], + [xj_bounds[0], xj_bounds[0], xj_bounds[1], + xj_bounds[1], xj_bounds[0]]]) + + # Compute lamb / phi + _, phi = self.calc_phibragg_from_xixj( + det[0, :], det[1, :], n=n, + det_cent=det_cent, det_ei=det_ei, det_ej=det_ej, + theta=None, psi=None, plot=False) + phimin, phimax = np.nanmin(phi), np.nanmax(phi) + phimin, phimax = phimin-(phimax-phimin)/10, phimax+(phimax-phimin)/10 + del phi + + # Get reference ray-tracing + if nphi is None: + nphi = 300 + phi = np.linspace(phimin, phimax, nphi) + bragg = self._checkformat_bragglamb(lamb=lamb, n=n) + + xi = np.full((nlamb, nphi), np.nan) + xj = np.full((nlamb, nphi), np.nan) + for ll in range(nlamb): + xi[ll, :], xj[ll, :] = self.calc_xixj_from_phibragg( + bragg=bragg[ll], phi=phi, n=n, + det_cent=det_cent, det_nout=det_nout, + det_ei=det_ei, det_ej=det_ej, plot=False) + + # Get johann-error raytracing (multiple positions on crystal) + xi_er, xj_er = None, None + if johann and not rocking: + if lpsi is None or ltheta is None: + lpsi = np.linspace(-1., 1., 15) + ltheta = np.linspace(-1., 1., 15) + lpsi, ltheta = np.meshgrid(lpsi, ltheta) + lpsi = lpsi.ravel() + ltheta = ltheta.ravel() + lpsi = self._dgeom['extenthalf'][0]*np.r_[lpsi] + ltheta = np.pi/2 + self._dgeom['extenthalf'][1]*np.r_[ltheta] + npsi = lpsi.size + assert npsi == ltheta.size + + xi_er = np.full((nlamb, npsi*nphi), np.nan) + xj_er = np.full((nlamb, npsi*nphi), np.nan) + for l in range(nlamb): + for ii in range(npsi): + i0 = np.arange(ii*nphi, (ii+1)*nphi) + xi_er[l, i0], xj_er[l, i0] = self.calc_xixj_from_phibragg( + phi=phi, bragg=bragg[l], lamb=None, n=n, + theta=ltheta[ii], psi=lpsi[ii], + det_cent=det_cent, det_nout=det_nout, + det_ei=det_ei, det_ej=det_ej, plot=False) + + # Get rocking curve error + if rocking: + pass + + # Plot + ax = _plot_optics.CrystalBragg_plot_line_tracing_on_det( + lamb, xi, xj, xi_er, xj_er, det=det, + johann=johann, rocking=rocking, + fs=fs, dmargin=dmargin, wintit=wintit, tit=tit) + + def calc_johannerror(self, xi=None, xj=None, err=None, + det_cent=None, det_ei=None, det_ej=None, n=None, + lpsi=None, ltheta=None, + plot=True, fs=None, cmap=None, + vmin=None, vmax=None, tit=None, wintit=None): + """ Plot the johann error + + The johann error is the error (scattering) induced by defocalization + due to finite crystal dimensions + There is a johann error on wavelength (lamb => loss of spectral + resolution) and on directionality (phi) + If provided, lpsi and ltheta are taken as normalized variations with + respect to the crystal summit and to its extenthalf. + Typical values are: + - lpsi = [-1, 1, 1, -1] + - ltheta = [-1, -1, 1, 1] + They must have the same len() + + """ + + # Check / format inputs + xi, xj, (xii, xjj) = self._checkformat_xixj(xi, xj) + nxi = xi.size if xi is not None else np.unique(xii).size + nxj = xj.size if xj is not None else np.unique(xjj).size + + # Compute lamb / phi + bragg, phi = self.calc_phibragg_from_xixj( + xii, xjj, n=n, + det_cent=det_cent, det_ei=det_ei, det_ej=det_ej, + theta=None, psi=None, plot=False) + assert bragg.shape == phi.shape + lamb = self.get_lamb_from_bragg(bragg, n=n) + + if lpsi is None: + lpsi = np.r_[-1., 0., 1., 1., 1., 0., -1, -1] + lpsi = self._dgeom['extenthalf'][0]*np.r_[lpsi] + if ltheta is None: + ltheta = np.r_[-1., -1., -1., 0., 1., 1., 1., 0.] + ltheta = np.pi/2 + self._dgeom['extenthalf'][1]*np.r_[ltheta] + npsi = lpsi.size + assert npsi == ltheta.size + lamberr = np.full(tuple(np.r_[npsi, lamb.shape]), np.nan) + phierr = np.full(lamberr.shape, np.nan) + for ii in range(npsi): + bragg, phierr[ii, ...] = self.calc_phibragg_from_xixj( + xii, xjj, n=n, + det_cent=det_cent, det_ei=det_ei, det_ej=det_ej, + theta=ltheta[ii], psi=lpsi[ii], plot=False) + lamberr[ii, ...] = self.get_lamb_from_bragg(bragg, n=n) + err_lamb = np.nanmax(np.abs(lamb[None, ...] - lamberr), axis=0) + err_phi = np.nanmax(np.abs(phi[None, ...] - phierr), axis=0) + if plot is True: + ax = _plot_optics.CrystalBragg_plot_johannerror( + xi, xj, lamb, phi, err_lamb, err_phi, err=err, + cmap=cmap, vmin=vmin, vmax=vmax, fs=fs, tit=tit, wintit=wintit) + return err_lamb, err_phi + + def calc_phibragg_from_pts(self, pts, dtheta=None, dpsi=None): + + # Check / format pts + pts = self._checkformat_pts(pts) + + # Get local summit nout, e1, e2 if non-centered + psi, dtheta = self._checkformat_psidtheta(psi=psi, dtheta=dtheta) + summit, nout, e1, e2 = self.get_local_noute1e2(dtheta, psi) + + # Compute + bragg, phi = _comp_optics.calc_braggphi_from_xixjpts( + det_cent, det_ei, det_ej, + summit, -nout, e1, e2, pts=pts) + return phi, bragg + + def get_lamb_avail_from_pts(self, pts): + pass + + def calc_thetapsi_from_lambpts(self, pts=None, lamb=None, n=None, + ntheta=None): + + # Check / Format inputs + pts = self._checkformat_pts(pts) + npts = pts.shape[1] + + if lamb is None: + lamb = self._DEFLAMB + lamb = np.r_[lamb] + nlamb = lamb.size + bragg = self.get_bragg_from_lamb(lamb, n=n) + + dtheta, psi, indnan = _comp_optics.calc_psidthetaphi_from_pts_lamb( + pts, self._dgeom['center'], self._dgeom['rcurve'], + bragg, nlamb, npts, + self._dgeom['nout'], self._dgeom['e1'], self._dgeom['e2'], + self._dgeom['extenthalf'], ntheta=ntheta) + + # import ipdb; ipdb.set_trace() # DB + bragg = np.repeat(np.repeat(bragg[:, None], npts, axis=-1)[..., None], + ntheta, axis=-1) + bragg[indnan] = np.nan + phi[ind] = self.calc_braggphi_from_pts(pts, + dtheta=dtheta[ind], + dpsi=dpsi[ind]) + + return dtheta, psi, phi, bragg + + def plot_line_from_pts_on_det(self, lamb=None, pts=None, + xi_bounds=None, xj_bounds=None, nphi=None, + det_cent=None, det_nout=None, + det_ei=None, det_ej=None, + johann=False, lpsi=None, ltheta=None, + rocking=False, fs=None, dmargin=None, + wintit=None, tit=None): + """ Visualize the de-focusing by ray-tracing of chosen lamb + """ + # Check / format inputs + if lamb is None: + lamb = self._DEFLAMB + lamb = np.atleast_1d(lamb).ravel() + nlamb = lamb.size + + det = np.array([[xi_bounds[0], xi_bounds[1], xi_bounds[1], + xi_bounds[0], xi_bounds[0]], + [xj_bounds[0], xj_bounds[0], xj_bounds[1], + xj_bounds[1], xj_bounds[0]]]) + + # Compute lamb / phi + _, phi = self.calc_phibragg_from_xixj( + det[0, :], det[1, :], n=n, + det_cent=det_cent, det_ei=det_ei, det_ej=det_ej, + theta=None, psi=None, plot=False) + phimin, phimax = np.nanmin(phi), np.nanmax(phi) + phimin, phimax = phimin-(phimax-phimin)/10, phimax+(phimax-phimin)/10 + del phi + + # Get reference ray-tracing + if nphi is None: + nphi = 300 + phi = np.linspace(phimin, phimax, nphi) + bragg = self._checkformat_bragglamb(lamb=lamb, n=n) + + xi = np.full((nlamb, nphi), np.nan) + xj = np.full((nlamb, nphi), np.nan) + for ll in range(nlamb): + xi[ll, :], xj[ll, :] = self.calc_xixj_from_phibragg( + bragg=bragg[ll], phi=phi, n=n, + det_cent=det_cent, det_nout=det_nout, + det_ei=det_ei, det_ej=det_ej, plot=False) + + # Get johann-error raytracing (multiple positions on crystal) + xi_er, xj_er = None, None + if johann and not rocking: + if lpsi is None or ltheta is None: + lpsi = np.linspace(-1., 1., 15) + ltheta = np.linspace(-1., 1., 15) + lpsi, ltheta = np.meshgrid(lpsi, ltheta) + lpsi = lpsi.ravel() + ltheta = ltheta.ravel() + lpsi = self._dgeom['extenthalf'][0]*np.r_[lpsi] + ltheta = np.pi/2 + self._dgeom['extenthalf'][1]*np.r_[ltheta] + npsi = lpsi.size + assert npsi == ltheta.size + + xi_er = np.full((nlamb, npsi*nphi), np.nan) + xj_er = np.full((nlamb, npsi*nphi), np.nan) + for l in range(nlamb): + for ii in range(npsi): + i0 = np.arange(ii*nphi, (ii+1)*nphi) + xi_er[l, i0], xj_er[l, i0] = self.calc_xixj_from_phibragg( + phi=phi, bragg=bragg[l], lamb=None, n=n, + theta=ltheta[ii], psi=lpsi[ii], + det_cent=det_cent, det_nout=det_nout, + det_ei=det_ei, det_ej=det_ej, plot=False) + + # Get rocking curve error + if rocking: + pass + + # Plot + ax = _plot_optics.CrystalBragg_plot_line_tracing_on_det( + lamb, xi, xj, xi_er, xj_er, det=det, + johann=johann, rocking=rocking, + fs=fs, dmargin=dmargin, wintit=wintit, tit=tit) + + def plot_data_vs_lambphi(self, xi=None, xj=None, data=None, mask=None, + det_cent=None, det_ei=None, det_ej=None, + theta=None, psi=None, n=None, + nlambfit=None, nphifit=None, + magaxis=None, npaxis=None, + plot=True, fs=None, + cmap=None, vmin=None, vmax=None): # Check / format inputs assert data is not None xi, xj, (xii, xjj) = self._checkformat_xixj(xi, xj) @@ -1026,24 +1524,58 @@ def plot_data_vs_lambphi(self, xi=None, xj=None, data=None, nxj = xj.size if xj is not None else np.unique(xjj).size # Compute lamb / phi - func = self.calc_phibragg_from_xixj - bragg, phi = func(xii, xjj, n=n, - det_cent=det_cent, det_ei=det_ei, det_ej=det_ej, - theta=theta, psi=psi, plot=False) + bragg, phi = self.calc_phibragg_from_xixj( + xii, xjj, n=n, + det_cent=det_cent, det_ei=det_ei, det_ej=det_ej, + theta=theta, psi=psi, plot=False) assert bragg.shape == phi.shape == data.shape lamb = self.get_lamb_from_bragg(bragg, n=n) # Compute lambfit / phifit and spectrum1d - lambfit, phifit = _comp_optics.get_lambphifit(lamb, phi, nxi, nxj) + if mask is not None: + data[~mask] = np.nan + if nlambfit is None: + nlambfit = nxi + if nphifit is None: + nphifit = nxj + lambfit, phifit = _comp_optics.get_lambphifit(lamb, phi, + nlambfit, nphifit) lambfitbins = 0.5*(lambfit[1:] + lambfit[:-1]) ind = np.digitize(lamb, lambfitbins) - spect1d = np.array([np.nanmean(data[ind==ii]) for ii in np.unique(ind)]) + spect1d = np.array([np.nanmean(data[ind == ii]) + for ii in np.unique(ind)]) + phifitbins = 0.5*(phifit[1:] + phifit[:-1]) + ind = np.digitize(phi, phifitbins) + vertsum1d = np.array([np.nanmean(data[ind == ii]) + for ii in np.unique(ind)]) + + # Get phiref from mag axis + lambax, phiax = None, None + if magaxis is not None: + if npaxis is None: + npaxis = 1000 + thetacryst = np.arctan2(self._dgeom['summit'][1], + self._dgeom['summit'][0]) + thetaax = thetacryst + np.pi/2*np.linspace(-1, 1, npaxis) + pts = np.array([magaxis[0]*np.cos(thetaax), + magaxis[0]*np.sin(thetaax), + np.full((npaxis,), magaxis[1])]) + braggax, phiax = self.calc_phibragg_from_pts(pts) + lambax = self.get_lamb_from_bragg(braggax) + phiax = np.arctan2(np.sin(phiax-np.pi), np.cos(phiax-np.pi)) + ind = ((lambax >= lambfit[0]) & (lambax <= lambfit[-1]) + & (phiax >= phifit[0]) & (phiax <= phifit[-1])) + lambax, phiax = lambax[ind], phiax[ind] + ind = np.argsort(lambax) + lambax, phiax = lambax[ind], phiax[ind] # plot - func = _plot_optics.CrystalBragg_plot_data_vs_lambphi - ax = func(xi, xj, bragg, lamb, phi, data, - lambfit=lambfit, phifit=phifit, spect1d=spect1d, - cmap=cmap, vmin=vmin, vmax=vmax, fs=fs) + if plot: + ax = _plot_optics.CrystalBragg_plot_data_vs_lambphi( + xi, xj, bragg, lamb, phi, data, + lambfit=lambfit, phifit=phifit, spect1d=spect1d, + vertsum1d=vertsum1d, lambax=lambax, phiax=phiax, + cmap=cmap, vmin=vmin, vmax=vmax, fs=fs) return ax def plot_data_fit2d(self, xi=None, xj=None, data=None, mask=None, diff --git a/tofu/geom/_plot_optics.py b/tofu/geom/_plot_optics.py index 07e669218..9c35ad098 100644 --- a/tofu/geom/_plot_optics.py +++ b/tofu/geom/_plot_optics.py @@ -93,6 +93,7 @@ def _check_projdax_mpl(dax=None, proj=None, fs=None, wintit=None): def CrystalBragg_plot(cryst, lax=None, proj=None, res=None, element=None, color=None, dP=None, + det_cent=None, det_nout=None, det_ei=None, det_ej=None, dI=None, dBs=None, dBv=None, dVect=None, dIHor=None, dBsHor=None, dBvHor=None, dleg=None, indices=False, @@ -118,8 +119,10 @@ def CrystalBragg_plot(cryst, lax=None, proj=None, res=None, element=None, # Temporary matplotlib issue dleg = None else: - dax = _CrystalBragg_plot_crosshor(cryst, proj=proj, res=res, dax=lax, element=element, - color=color) + dax = _CrystalBragg_plot_crosshor(cryst, proj=proj, res=res, dax=lax, + element=element, color=color, + det_cent=det_cent, det_nout=det_nout, + det_ei=det_ei, det_ej=det_ej) # recompute the ax.dataLim ax0 = None @@ -139,7 +142,11 @@ def CrystalBragg_plot(cryst, lax=None, proj=None, res=None, element=None, ax0.figure.canvas.draw() return dax -def _CrystalBragg_plot_crosshor(cryst, proj=None, dax=None, element=None, res=None, + +def _CrystalBragg_plot_crosshor(cryst, proj=None, dax=None, + element=None, res=None, + det_cent=None, det_nout=None, + det_ei=None, det_ej=None, Pdict=_def.TorPd, Idict=_def.TorId, Bsdict=_def.TorBsd, Bvdict=_def.TorBvd, Vdict=_def.TorVind, color=None, ms=None, quiver_cmap=None, @@ -232,21 +239,77 @@ def _CrystalBragg_plot_crosshor(cryst, proj=None, dax=None, element=None, res=No p0 = np.repeat(summ[:,None], 3, axis=1) v = np.concatenate((nin[:, None], e1[:, None], e2[:, None]), axis=1) if dax['cross'] is not None: - dax['cross'].quiver(np.hypot(p0[0,:], p0[1,:]), p0[2,:], - np.hypot(v[0,:], v[1,:]), v[2,:], + pr = np.hypot(p0[0, :], p0[1, :]) + vr = np.hypot(p0[0, :]+v[0, :], p0[1, :]+v[1, :]) - pr + dax['cross'].quiver(pr, p0[2, :], + vr, v[2, :], np.r_[0., 0.5, 1.], cmap=quiver_cmap, angles='xy', scale_units='xy', label=cryst.Id.NameLTX+" unit vect", **Vdict) if dax['hor'] is not None: - dax['hor'].quiver(p0[0,:], p0[1,:], - v[0,:], v[1,:], + dax['hor'].quiver(p0[0, :], p0[1, :], + v[0, :], v[1, :], np.r_[0., 0.5, 1.], cmap=quiver_cmap, angles='xy', scale_units='xy', label=cryst.Id.NameLTX+" unit vect", **Vdict) + # Detector + sc = None + if det_cent is not None: + if dax['cross'] is not None: + dax['cross'].plot(np.hypot(det_cent[0], det_cent[1]), det_cent[2], + marker='x', ms=ms, c=color, label="det_cent") + if dax['hor'] is not None: + dax['hor'].plot(det_cent[0], det_cent[1], + marker='x', ms=ms, c=color, label="det_cent") + + if det_nout is not None: + assert det_ei is not None and det_ej is not None + p0 = np.repeat(det_cent[:, None], 3, axis=1) + v = np.concatenate((det_nout[:, None], det_ei[:, None], + det_ej[:, None]), axis=1) + if dax['cross'] is not None: + pr = np.hypot(p0[0, :], p0[1, :]) + vr = np.hypot(p0[0, :]+v[0, :], p0[1, :]+v[1, :]) - pr + dax['cross'].quiver(pr, p0[2, :], + vr, v[2, :], + np.r_[0., 0.5, 1.], cmap=quiver_cmap, + angles='xy', scale_units='xy', + label="det unit vect", **Vdict) + if dax['hor'] is not None: + dax['hor'].quiver(p0[0, :], p0[1, :], + v[0, :], v[1, :], + np.r_[0., 0.5, 1.], cmap=quiver_cmap, + angles='xy', scale_units='xy', + label="det unit vect", **Vdict) + return dax +# ################################################################# +# ################################################################# +# Rocking curve plot +# ################################################################# +# ################################################################# + +def CrystalBragg_plot_rockingcurve(Rmax=None, sigma=None, + bragg=None, delta_bragg=None, npts=None): + + # Prepare + if npts is None: + npts = 1000 + angle = bragg + delta_bragg + 3.*sigma*np.linspace(-1, 1, npts) + curve = func(angle) + + # Plot + if ax is None: + fig = plt.figure() + ax = fig.add_axes([0.1, 0.1, 0.8, 0.8]) + ax.plot(angle, curve, ls='-', lw=1., c='k') + ax.axvline(bragg, ls='--', lw=1, c='k') + return ax + + # ################################################################# # ################################################################# # Bragg diffraction plot @@ -342,8 +405,140 @@ def CrystalBragg_plot_braggangle_from_xixj(xi=None, xj=None, return ax +def CrystalBragg_plot_line_tracing_on_det(lamb, xi, xj, xi_err, xj_err, + det=None, + johann=None, rocking=None, + fs=None, dmargin=None, + wintit=None, tit=None): + + # Check inputs + # ------------ + + if fs is None: + fs = (6, 8) + if dmargin is None: + dmargin = {'left': 0.05, 'right': 0.99, + 'bottom': 0.06, 'top': 0.92, + 'wspace': None, 'hspace': 0.4} + + if wintit is None: + wintit = _WINTIT + if tit is None: + tit = "line tracing" + if johann is True: + tit += " - johann error" + if rocking is True: + tit += " - rocking curve" + + plot_err = johann is True or rocking is True + + # Plot + # ------------ + + fig = fig = plt.figure(figsize=fs) + gs = gridspec.GridSpec(1, 1, **dmargin) + ax0 = fig.add_subplot(gs[0, 0], aspect='equal', adjustable='datalim') + + ax0.plot(det[0, :], det[1, :], ls='-', lw=1., c='k') + for l in range(lamb.size): + lab = r'$\lambda$'+' = {:6.3f} A'.format(lamb[l]*1.e10) + l0, = ax0.plot(xi[l, :], xj[l, :], ls='-', lw=1., label=lab) + if plot_err: + ax0.plot(xi_err[l, ...], xj_err[l, ...], + ls='None', lw=1., c=l0.get_color(), + marker='.', ms=1) + + ax0.legend() + + if wintit is not False: + fig.canvas.set_window_title(wintit) + if tit is not False: + fig.suptitle(tit, size=14, weight='bold') + return [ax0] + + +def CrystalBragg_plot_johannerror(xi, xj, lamb, phi, err_lamb, err_phi, + cmap=None, vmin=None, vmax=None, + fs=None, dmargin=None, wintit=None, tit=None, + angunits='deg', err=None): + + # Check inputs + # ------------ + + if fs is None: + fs = (14, 8) + if cmap is None: + cmap = plt.cm.viridis + if dmargin is None: + dmargin = {'left': 0.05, 'right': 0.99, + 'bottom': 0.06, 'top': 0.92, + 'wspace': None, 'hspace': 0.4} + assert angunits in ['deg', 'rad'] + if angunits == 'deg': + # bragg = bragg*180./np.pi + phi = phi*180./np.pi + err_phi = err_phi*180./np.pi + + if err is None: + err = 'abs' + if err == 'rel': + err_lamb = 100.*err_lamb / (np.nanmax(lamb) - np.nanmin(lamb)) + err_phi = 100.*err_phi / (np.nanmax(phi) - np.nanmin(phi)) + err_lamb_units = '%' + err_phi_units = '%' + else: + err_lamb_units = 'm' + err_phi_units = angunits + + if wintit is None: + wintit = _WINTIT + if tit is None: + tit = False + + # pre-compute + # ------------ + + # extent + extent = (xi.min(), xi.max(), xj.min(), xj.max()) + + # Plot + # ------------ + + fig = fig = plt.figure(figsize=fs) + gs = gridspec.GridSpec(1, 3, **dmargin) + ax0 = fig.add_subplot(gs[0, 0], aspect='equal', adjustable='datalim') + ax1 = fig.add_subplot(gs[0, 1], aspect='equal', adjustable='datalim', + sharex=ax0, sharey=ax0) + ax2 = fig.add_subplot(gs[0, 2], aspect='equal', adjustable='datalim', + sharex=ax0, sharey=ax0) + + ax0.set_title('Iso-lamb and iso-phi at crystal summit') + ax1.set_title('Focalization error on lamb ({})'.format(err_lamb_units)) + ax2.set_title('Focalization error on phi ({})'.format(err_phi_units)) + + ax0.contour(xi, xj, lamb, 10, cmap=cmap) + ax0.contour(xi, xj, phi, 10, cmap=cmap, ls='--') + imlamb = ax1.imshow(err_lamb, extent=extent, aspect='equal', + origin='lower', interpolation='nearest', + vmin=vmin, vmax=vmax) + imphi = ax2.imshow(err_phi, extent=extent, aspect='equal', + origin='lower', interpolation='nearest', + vmin=vmin, vmax=vmax) + + plt.colorbar(imlamb, ax=ax1) + plt.colorbar(imphi, ax=ax2) + if wintit is not False: + fig.canvas.set_window_title(wintit) + if tit is not False: + fig.suptitle(tit, size=14, weight='bold') + + return [ax0, ax1, ax2] + + def CrystalBragg_plot_data_vs_lambphi(xi, xj, bragg, lamb, phi, data, - lambfit=None, phifit=None, spect1d=None, + lambfit=None, phifit=None, + spect1d=None, vertsum1d=None, + lambax=None, phiax=None, cmap=None, vmin=None, vmax=None, fs=None, dmargin=None, angunits='deg'): @@ -364,6 +559,9 @@ def CrystalBragg_plot_data_vs_lambphi(xi, xj, bragg, lamb, phi, data, bragg = bragg*180./np.pi phi = phi*180./np.pi phifit = phifit*180./np.pi + if phiax is not None: + phiax = 180*phiax/np.pi + # pre-compute @@ -377,29 +575,36 @@ def CrystalBragg_plot_data_vs_lambphi(xi, xj, bragg, lamb, phi, data, # ------------ fig = fig = plt.figure(figsize=fs) - gs = gridspec.GridSpec(4, 3, **dmargin) + gs = gridspec.GridSpec(4, 4, **dmargin) ax0 = fig.add_subplot(gs[:3, 0], aspect='equal', adjustable='datalim') ax1 = fig.add_subplot(gs[:3, 1], aspect='equal', adjustable='datalim', sharex=ax0, sharey=ax0) axs1 = fig.add_subplot(gs[3, 1], sharex=ax0) ax2 = fig.add_subplot(gs[:3, 2]) axs2 = fig.add_subplot(gs[3, 2], sharex=ax2, sharey=axs1) + ax3 = fig.add_subplot(gs[:3, 3], sharey=ax2) ax0.set_title('Coordinates transform') ax1.set_title('Camera image') ax2.set_title('Camera image transformed') - ax0.set_ylabel(r'incidence angle ($deg$)') + ax2.set_ylabel(r'incidence angle ($deg$)') + ax2.set_xlabel(r'$\lambda$ ($m$)') + axs2.set_xlabel(r'$\lambda$ ($m$)') + ax3.set_ylabel(r'incidence angle ($deg$)') ax0.contour(xi, xj, bragg, 10, cmap=cmap) ax0.contour(xi, xj, phi, 10, cmap=cmap, ls='--') ax1.imshow(data, extent=extent, aspect='equal', origin='lower', vmin=vmin, vmax=vmax) - axs1.plot(xi, np.sum(data, axis=0), c='k', ls='-') + axs1.plot(xi, np.nanmean(data, axis=0), c='k', ls='-') ax2.scatter(lamb.ravel(), phi.ravel(), c=data.ravel(), s=2, marker='s', edgecolors='None', cmap=cmap, vmin=vmin, vmax=vmax) axs2.plot(lambfit, spect1d, c='k', ls='-') + ax3.plot(vertsum1d, phifit, c='k', ls='-') + if phiax is not None: + ax2.plot(lambax, phiax, c='r', ls='-', lw=1.) ax2.set_xlim(extent2[0], extent2[1]) ax2.set_ylim(extent2[2], extent2[3]) diff --git a/tofu/geom/inputs/AUG_vessel_20200116.coords b/tofu/geom/inputs/AUG_vessel_20200116.coords new file mode 100644 index 000000000..05eb85fdb --- /dev/null +++ b/tofu/geom/inputs/AUG_vessel_20200116.coords @@ -0,0 +1,741 @@ +# Vessel coordinates 37095 + +# PCup + 2.151 0.562 + 1.966 0.865 + 1.966 0.874 + 2.054 0.874 + 2.233 0.582 + 2.233 0.562 + 2.151 0.562 + +# PClow + 1.877 -0.874 + 1.877 -0.864 + 2.075 -0.562 + 2.158 -0.562 + 2.158 -0.582 + 1.966 -0.874 + 1.877 -0.874 + +# LIM09 + 1.095 -0.760 + 1.095 -0.680 + 1.073 -0.680 + 1.073 -0.705 + 1.000 -0.705 + 1.000 -0.725 + 1.073 -0.725 + 1.073 -0.760 + 1.095 -0.760 + +# SBi + 1.100 -0.584 + 1.085 -0.517 + 1.072 -0.450 + 1.060 -0.382 + 1.051 -0.314 + 1.043 -0.246 + 1.037 -0.177 + 1.033 -0.108 + 1.031 -0.040 + 1.031 0.029 + 1.033 0.098 + 1.037 0.166 + 1.042 0.235 + 1.049 0.303 + 1.059 0.371 + 1.070 0.439 + 1.083 0.507 + 1.098 0.574 + 1.114 0.641 + 1.133 0.707 + 1.153 0.773 + 1.164 0.769 + 1.144 0.704 + 1.126 0.638 + 1.109 0.571 + 1.095 0.504 + 1.082 0.437 + 1.071 0.370 + 1.061 0.302 + 1.054 0.234 + 1.049 0.166 + 1.045 0.097 + 1.043 0.029 + 1.043 -0.040 + 1.045 -0.108 + 1.049 -0.176 + 1.055 -0.244 + 1.063 -0.312 + 1.072 -0.380 + 1.083 -0.448 + 1.097 -0.515 + 1.112 -0.582 + 1.100 -0.584 + +# ICRHa + 2.002 -0.524 + 1.997 -0.523 + 1.994 -0.520 + 1.992 -0.516 + 1.992 -0.511 + 1.994 -0.507 + 2.004 -0.495 + 2.017 -0.476 + 2.030 -0.457 + 2.043 -0.438 + 2.055 -0.418 + 2.066 -0.399 + 2.078 -0.379 + 2.089 -0.358 + 2.099 -0.338 + 2.109 -0.317 + 2.118 -0.296 + 2.126 -0.276 + 2.127 -0.275 + 2.136 -0.253 + 2.144 -0.231 + 2.151 -0.210 + 2.159 -0.188 + 2.165 -0.166 + 2.170 -0.149 + 2.171 -0.143 + 2.175 -0.129 + 2.177 -0.121 + 2.182 -0.098 + 2.187 -0.075 + 2.191 -0.053 + 2.195 -0.030 + 2.198 -0.007 + 2.198 -0.001 + 2.200 0.016 + 2.203 0.039 + 2.204 0.062 + 2.205 0.098 + 2.205 0.133 + 2.204 0.157 + 2.204 0.169 + 2.200 0.204 + 2.200 0.209 + 2.196 0.239 + 2.190 0.274 + 2.182 0.308 + 2.173 0.343 + 2.162 0.377 + 2.159 0.386 + 2.150 0.410 + 2.137 0.442 + 2.137 0.443 + 2.122 0.475 + 2.109 0.499 + 2.106 0.509 + 2.107 0.517 + 2.111 0.524 + 2.118 0.529 + 2.126 0.531 + 2.900 0.531 + 2.900 -0.524 + 2.002 -0.524 + +# VESiR + 2.521 0.000 + 2.506 0.210 + 2.461 0.416 + 2.387 0.613 + 2.286 0.798 + 2.159 0.966 + 2.010 1.114 + 1.841 1.240 + 1.841 1.240 + 1.705 1.303 + 1.558 1.329 + 1.409 1.315 + 1.269 1.264 + 1.147 1.177 + 1.052 1.062 + 0.990 0.927 + 0.965 0.779 + 0.965 0.779 + 0.957 0.519 + 0.953 0.260 + 0.951 0.000 + 0.953 -0.260 + 0.957 -0.519 + 0.965 -0.779 + 0.965 -0.779 + 0.990 -0.927 + 1.052 -1.062 + 1.147 -1.177 + 1.269 -1.264 + 1.409 -1.315 + 1.558 -1.329 + 1.705 -1.303 + 1.841 -1.240 + 1.841 -1.240 + 2.010 -1.114 + 2.159 -0.966 + 2.286 -0.798 + 2.387 -0.613 + 2.461 -0.416 + 2.506 -0.210 + 2.521 0.000 + +# D2c.i1 + 1.138 -0.658 + 1.141 -0.659 + 1.143 -0.660 + 1.145 -0.661 + 1.187 -0.726 + 1.187 -0.727 + 1.187 -0.728 + 1.187 -0.729 + 1.186 -0.730 + 1.165 -0.744 + 1.117 -0.669 + 1.136 -0.658 + 1.138 -0.658 + +# D2c.i2 + 1.187 -0.730 + 1.188 -0.730 + 1.189 -0.730 + 1.190 -0.731 + 1.247 -0.820 + 1.247 -0.822 + 1.244 -0.824 + 1.216 -0.824 + 1.165 -0.744 + 1.186 -0.730 + 1.187 -0.730 + +# D2c.TPib + 1.249 -0.826 + 1.250 -0.827 + 1.250 -0.827 + 1.250 -0.827 + 1.250 -0.827 + 1.259 -0.842 + 1.266 -0.858 + 1.273 -0.873 + 1.278 -0.890 + 1.282 -0.906 + 1.285 -0.923 + 1.287 -0.940 + 1.288 -0.957 + 1.287 -0.974 + 1.287 -0.975 + 1.287 -0.976 + 1.286 -0.977 + 1.286 -0.977 + 1.285 -0.978 + 1.284 -0.978 + 1.283 -0.979 + 1.282 -0.979 + 1.226 -0.979 + 1.226 -0.825 + 1.246 -0.825 + 1.246 -0.825 + 1.247 -0.825 + 1.247 -0.825 + 1.248 -0.826 + 1.248 -0.826 + 1.248 -0.826 + 1.249 -0.826 + 1.249 -0.826 + 1.249 -0.826 + +# D2c.TPi + 1.281 -0.981 + 1.282 -0.983 + 1.283 -0.985 + 1.283 -0.987 + 1.236 -1.123 + 1.235 -1.124 + 1.235 -1.125 + 1.234 -1.125 + 1.233 -1.125 + 1.232 -1.125 + 1.232 -1.125 + 1.231 -1.125 + 1.230 -1.125 + 1.217 -1.138 + 1.201 -1.122 + 1.249 -0.980 + 1.265 -0.986 + 1.267 -0.980 + 1.278 -0.980 + 1.281 -0.981 + +# D2c.TPic + 1.232 -1.125 + 1.233 -1.126 + 1.234 -1.128 + 1.244 -1.142 + 1.244 -1.142 + 1.244 -1.143 + 1.244 -1.144 + 1.243 -1.144 + 1.233 -1.154 + 1.217 -1.138 + 1.230 -1.126 + 1.230 -1.125 + 1.231 -1.125 + 1.232 -1.125 + +# D2c.domL + 1.280 -1.123 + 1.280 -1.102 + 1.280 -1.100 + 1.280 -1.098 + 1.282 -1.096 + 1.314 -1.071 + 1.315 -1.070 + 1.317 -1.069 + 1.319 -1.070 + 1.321 -1.071 + 1.337 -1.092 + 1.286 -1.133 + 1.281 -1.126 + 1.281 -1.125 + 1.280 -1.123 + +# D2c.dome + 1.320 -1.066 + 1.322 -1.064 + 1.326 -1.062 + 1.459 -1.062 + 1.461 -1.063 + 1.463 -1.064 + 1.482 -1.087 + 1.482 -1.088 + 1.482 -1.089 + 1.482 -1.090 + 1.480 -1.090 + 1.337 -1.090 + 1.320 -1.068 + 1.319 -1.067 + 1.320 -1.066 + +# D2c.domR + 1.487 -1.093 + 1.554 -1.173 + 1.555 -1.175 + 1.555 -1.175 + 1.555 -1.176 + 1.555 -1.184 + 1.555 -1.186 + 1.554 -1.188 + 1.543 -1.196 + 1.462 -1.101 + 1.472 -1.093 + 1.474 -1.092 + 1.475 -1.091 + 1.483 -1.091 + 1.485 -1.092 + 1.487 -1.093 + +# D3.BG10 + 1.554 -1.218 + 1.555 -1.216 + 1.556 -1.214 + 1.558 -1.213 + 1.562 -1.211 + 1.563 -1.210 + 1.566 -1.209 + 1.567 -1.209 + 1.579 -1.209 + 1.579 -1.215 + 1.577 -1.222 + 1.562 -1.222 + 1.562 -1.220 + 1.561 -1.219 + 1.556 -1.219 + 1.555 -1.220 + 1.555 -1.222 + 1.554 -1.222 + 1.554 -1.220 + 1.554 -1.218 + +# D3.BG1 + 1.579 -1.208 + 1.635 -0.999 + 1.639 -0.999 + 1.634 -1.021 + 1.637 -1.021 + 1.638 -1.022 + 1.639 -1.021 + 1.640 -1.020 + 1.640 -1.018 + 1.641 -1.014 + 1.642 -1.013 + 1.647 -1.015 + 1.592 -1.220 + 1.584 -1.220 + 1.587 -1.212 + 1.587 -1.211 + 1.587 -1.210 + 1.586 -1.209 + 1.585 -1.208 + 1.584 -1.208 + 1.579 -1.208 + +# D2d.BG2 + 1.636 -0.997 + 1.636 -0.995 + 1.638 -0.986 + 1.643 -0.972 + 1.649 -0.956 + 1.658 -0.937 + 1.665 -0.924 + 1.674 -0.909 + 1.687 -0.888 + 1.702 -0.863 + 1.704 -0.860 + 1.707 -0.857 + 1.711 -0.854 + 1.715 -0.850 + 1.722 -0.846 + 1.727 -0.843 + 1.732 -0.842 + 1.739 -0.840 + 1.745 -0.839 + 1.758 -0.839 + 1.763 -0.845 + 1.745 -0.868 + 1.737 -0.868 + 1.732 -0.870 + 1.728 -0.872 + 1.725 -0.875 + 1.671 -0.998 + 1.638 -0.998 + 1.636 -0.997 + +# D2d.Bl3 + 1.752 -0.826 + 1.753 -0.826 + 1.875 -0.686 + 1.876 -0.686 + 1.878 -0.685 + 1.879 -0.686 + 1.879 -0.686 + 1.939 -0.725 + 1.901 -0.783 + 1.870 -0.762 + 1.775 -0.845 + 1.772 -0.842 + 1.771 -0.841 + 1.770 -0.839 + 1.752 -0.826 + +# D2d.Bl2 + 1.879 -0.683 + 1.994 -0.550 + 1.996 -0.549 + 1.998 -0.548 + 2.000 -0.547 + 2.002 -0.547 + 2.006 -0.547 + 2.007 -0.547 + 2.008 -0.548 + 2.008 -0.549 + 2.008 -0.559 + 1.950 -0.647 + 1.978 -0.666 + 1.940 -0.723 + +# D2d.Bl1 + 2.011 -0.560 + 2.011 -0.559 + 2.018 -0.548 + 2.019 -0.547 + 2.020 -0.547 + 2.149 -0.547 + 2.150 -0.547 + 2.151 -0.548 + 2.151 -0.549 + 2.151 -0.562 + 2.013 -0.562 + 2.012 -0.562 + 2.011 -0.561 + 2.011 -0.560 + +# TPLT_1 + 1.500 1.185 + 1.336 1.093 + 1.335 1.093 + 1.334 1.093 + 1.320 1.103 + 1.328 1.114 + 1.322 1.119 + 1.367 1.178 + 1.407 1.178 + 1.497 1.196 + 1.499 1.196 + 1.501 1.195 + 1.502 1.194 + 1.503 1.192 + 1.503 1.190 + 1.503 1.188 + 1.502 1.186 + 1.500 1.185 + +# TPLT_2 + 1.331 1.087 + 1.288 1.031 + 1.287 1.030 + 1.286 1.029 + 1.284 1.029 + 1.258 1.037 + 1.312 1.108 + 1.330 1.094 + 1.331 1.093 + 1.332 1.092 + 1.332 1.090 + 1.332 1.089 + 1.331 1.087 + +# TPLT_3 + 1.282 1.023 + 1.242 0.969 + 1.240 0.967 + 1.239 0.966 + 1.238 0.965 + 1.236 0.965 + 1.235 0.965 + 1.234 0.965 + 1.209 0.973 + 1.257 1.036 + 1.281 1.030 + 1.282 1.029 + 1.283 1.028 + 1.283 1.026 + 1.283 1.024 + 1.282 1.023 + +# TPLT_4 + 1.237 0.958 + 1.208 0.891 + 1.207 0.891 + 1.207 0.890 + 1.206 0.890 + 1.206 0.889 + 1.205 0.889 + 1.204 0.888 + 1.204 0.888 + 1.203 0.888 + 1.202 0.888 + 1.201 0.889 + 1.200 0.889 + 1.200 0.889 + 1.166 0.915 + 1.209 0.973 + 1.234 0.965 + 1.234 0.965 + 1.235 0.965 + 1.235 0.964 + 1.236 0.964 + 1.236 0.963 + 1.237 0.962 + 1.237 0.961 + 1.237 0.961 + 1.237 0.960 + 1.237 0.959 + 1.237 0.958 + +# TPLT_5 + 1.203 0.881 + 1.180 0.820 + 1.179 0.818 + 1.179 0.818 + 1.178 0.816 + 1.178 0.816 + 1.167 0.802 + 1.167 0.802 + 1.166 0.801 + 1.166 0.801 + 1.165 0.800 + 1.164 0.800 + 1.163 0.800 + 1.162 0.800 + 1.161 0.800 + 1.161 0.800 + 1.160 0.801 + 1.160 0.801 + 1.119 0.832 + 1.142 0.861 + 1.131 0.869 + 1.165 0.914 + 1.201 0.886 + 1.202 0.886 + 1.202 0.886 + 1.203 0.885 + 1.203 0.884 + 1.203 0.883 + 1.203 0.882 + 1.203 0.882 + 1.203 0.881 + 1.203 0.881 + +# TPRT_2 + 1.863 0.941 + 1.775 1.071 + 1.774 1.073 + 1.774 1.075 + 1.775 1.076 + 1.776 1.078 + 1.784 1.083 + 1.801 1.076 + 1.802 1.076 + 1.802 1.075 + 1.803 1.075 + 1.804 1.075 + 1.805 1.076 + 1.806 1.076 + 1.806 1.077 + 1.807 1.077 + 1.807 1.078 + 1.808 1.078 + 1.814 1.093 + 1.839 1.083 + 1.913 0.973 + 1.914 0.972 + 1.914 0.970 + 1.913 0.968 + 1.912 0.966 + 1.870 0.939 + 1.868 0.938 + 1.866 0.938 + 1.865 0.939 + 1.863 0.941 + 1.863 0.941 + +# TPRT_3 + 1.796 1.082 + 1.729 1.106 + 1.727 1.107 + 1.726 1.108 + 1.725 1.109 + 1.725 1.111 + 1.726 1.112 + 1.733 1.132 + 1.809 1.105 + 1.802 1.084 + 1.801 1.083 + 1.800 1.082 + 1.799 1.081 + 1.797 1.081 + 1.796 1.082 + +# TPRT_4 + 1.718 1.109 + 1.651 1.134 + 1.650 1.135 + 1.649 1.136 + 1.648 1.138 + 1.648 1.140 + 1.656 1.160 + 1.732 1.133 + 1.725 1.112 + 1.724 1.111 + 1.722 1.110 + 1.720 1.109 + 1.718 1.109 + +# TPRT_5 + 1.644 1.137 + 1.509 1.190 + 1.508 1.191 + 1.507 1.195 + 1.506 1.195 + 1.504 1.198 + 1.501 1.200 + 1.494 1.203 + 1.493 1.204 + 1.493 1.205 + 1.493 1.206 + 1.493 1.206 + 1.501 1.217 + 1.655 1.161 + 1.646 1.138 + 1.646 1.137 + 1.645 1.137 + 1.644 1.137 + 1.644 1.137 + +# D2d.Bu4 + 2.121 0.547 + 2.113 0.559 + 2.113 0.560 + 2.113 0.561 + 2.114 0.562 + 2.115 0.562 + 2.228 0.562 + 2.228 0.547 + 2.121 0.547 + +# D2d.Bu3 + 2.101 0.547 + 2.003 0.707 + 2.046 0.734 + 2.081 0.676 + 2.067 0.667 + 2.063 0.664 + 2.060 0.659 + 2.058 0.653 + 2.059 0.646 + 2.061 0.640 + 2.118 0.547 + 2.101 0.547 + +# D2d.Bu2 + 2.002 0.708 + 1.905 0.866 + 1.918 0.874 + 1.969 0.791 + 1.974 0.786 + 1.980 0.782 + 1.985 0.781 + 1.991 0.781 + 1.996 0.783 + 2.010 0.792 + 2.045 0.734 + 2.002 0.708 + 2.002 0.708 + +# D2d.Bu1 + 1.905 0.868 + 1.904 0.868 + 1.904 0.869 + 1.903 0.869 + 1.903 0.869 + 1.897 0.880 + 1.896 0.881 + 1.896 0.881 + 1.896 0.881 + 1.897 0.882 + 1.897 0.882 + 1.897 0.882 + 1.897 0.883 + 1.897 0.883 + 1.897 0.883 + 1.897 0.884 + 1.875 0.921 + 1.875 0.922 + 1.875 0.923 + 1.875 0.923 + 1.875 0.924 + 1.875 0.924 + 1.924 0.952 + 1.925 0.953 + 1.927 0.953 + 1.928 0.954 + 1.930 0.954 + 1.932 0.953 + 2.022 0.925 + 2.022 0.894 + 1.949 0.894 + 1.906 0.869 + 1.905 0.868 + 1.905 0.868 + 1.905 0.868 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_D2cTPi.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2cTPi.txt new file mode 100644 index 000000000..d7bcf6375 --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2cTPi.txt @@ -0,0 +1,22 @@ +#Name = D2cTPi +#Exp = AUG +#CLs = PFC +18 0 +1.281 -0.981 +1.282 -0.983 +1.283 -0.985 +1.283 -0.987 +1.236 -1.123 +1.235 -1.124 +1.235 -1.125 +1.234 -1.125 +1.233 -1.125 +1.232 -1.125 +1.231 -1.125 +1.230 -1.125 +1.217 -1.138 +1.201 -1.122 +1.249 -0.980 +1.265 -0.986 +1.267 -0.980 +1.278 -0.980 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_D2cTPib.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2cTPib.txt new file mode 100644 index 000000000..2c45ce9a0 --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2cTPib.txt @@ -0,0 +1,27 @@ +#Name = D2cTPib +#Exp = AUG +#CLs = PFC +23 0 +1.249 -0.826 +1.250 -0.827 +1.259 -0.842 +1.266 -0.858 +1.273 -0.873 +1.278 -0.890 +1.282 -0.906 +1.285 -0.923 +1.287 -0.940 +1.288 -0.957 +1.287 -0.974 +1.287 -0.975 +1.287 -0.976 +1.286 -0.977 +1.285 -0.978 +1.284 -0.978 +1.283 -0.979 +1.282 -0.979 +1.226 -0.979 +1.226 -0.825 +1.246 -0.825 +1.247 -0.825 +1.248 -0.826 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_D2cTPic.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2cTPic.txt new file mode 100644 index 000000000..c1e21e7e8 --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2cTPic.txt @@ -0,0 +1,16 @@ +#Name = D2cTPic +#Exp = AUG +#CLs = PFC +12 0 +1.232 -1.125 +1.233 -1.126 +1.234 -1.128 +1.244 -1.142 +1.244 -1.143 +1.244 -1.144 +1.243 -1.144 +1.233 -1.154 +1.217 -1.138 +1.230 -1.126 +1.230 -1.125 +1.231 -1.125 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_D2cdomL.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2cdomL.txt new file mode 100644 index 000000000..c01783987 --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2cdomL.txt @@ -0,0 +1,18 @@ +#Name = D2cdomL +#Exp = AUG +#CLs = PFC +14 0 +1.280 -1.123 +1.280 -1.102 +1.280 -1.100 +1.280 -1.098 +1.282 -1.096 +1.314 -1.071 +1.315 -1.070 +1.317 -1.069 +1.319 -1.070 +1.321 -1.071 +1.337 -1.092 +1.286 -1.133 +1.281 -1.126 +1.281 -1.125 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_D2cdomR.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2cdomR.txt new file mode 100644 index 000000000..cce4a8d58 --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2cdomR.txt @@ -0,0 +1,18 @@ +#Name = D2CdomR +#Exp = AUG +#CLs = PFC +14 0 +1.487 -1.093 +1.554 -1.173 +1.555 -1.175 +1.555 -1.176 +1.555 -1.184 +1.555 -1.186 +1.554 -1.188 +1.543 -1.196 +1.462 -1.101 +1.472 -1.093 +1.474 -1.092 +1.475 -1.091 +1.483 -1.091 +1.485 -1.092 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_D2cdome.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2cdome.txt new file mode 100644 index 000000000..e0637a611 --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2cdome.txt @@ -0,0 +1,18 @@ +#Name = D2cdome +#Exp = AUG +#CLs = PFC +14 0 +1.320 -1.066 +1.322 -1.064 +1.326 -1.062 +1.459 -1.062 +1.461 -1.063 +1.463 -1.064 +1.482 -1.087 +1.482 -1.088 +1.482 -1.089 +1.482 -1.090 +1.480 -1.090 +1.337 -1.090 +1.320 -1.068 +1.319 -1.067 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_D2ci1.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2ci1.txt new file mode 100644 index 000000000..e1f486f9d --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2ci1.txt @@ -0,0 +1,16 @@ +#Name = D2ci1 +#Exp = AUG +#CLs = PFC +12 0 +1.138 -0.658 +1.141 -0.659 +1.143 -0.660 +1.145 -0.661 +1.187 -0.726 +1.187 -0.727 +1.187 -0.728 +1.187 -0.729 +1.186 -0.730 +1.165 -0.744 +1.117 -0.669 +1.136 -0.658 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_D2ci2.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2ci2.txt new file mode 100644 index 000000000..127e85d3e --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2ci2.txt @@ -0,0 +1,14 @@ +#Name = D2ci2 +#Exp = AUG +#CLs = PFC +10 0 +1.187 -0.730 +1.188 -0.730 +1.189 -0.730 +1.190 -0.731 +1.247 -0.820 +1.247 -0.822 +1.244 -0.824 +1.216 -0.824 +1.165 -0.744 +1.186 -0.730 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBG2.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBG2.txt new file mode 100644 index 000000000..aa384395e --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBG2.txt @@ -0,0 +1,32 @@ +#Name = D2dBG2 +#Exp = AUG +#CLs = PFC +28 0 +1.636 -0.997 +1.636 -0.995 +1.638 -0.986 +1.643 -0.972 +1.649 -0.956 +1.658 -0.937 +1.665 -0.924 +1.674 -0.909 +1.687 -0.888 +1.702 -0.863 +1.704 -0.860 +1.707 -0.857 +1.711 -0.854 +1.715 -0.850 +1.722 -0.846 +1.727 -0.843 +1.732 -0.842 +1.739 -0.840 +1.745 -0.839 +1.758 -0.839 +1.763 -0.845 +1.745 -0.868 +1.737 -0.868 +1.732 -0.870 +1.728 -0.872 +1.725 -0.875 +1.671 -0.998 +1.638 -0.998 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBl1.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBl1.txt new file mode 100644 index 000000000..2c35f270d --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBl1.txt @@ -0,0 +1,17 @@ +#Name = D2dBl1 +#Exp = AUG +#CLs = PFC +13 0 +2.011 -0.560 +2.011 -0.559 +2.018 -0.548 +2.019 -0.547 +2.020 -0.547 +2.149 -0.547 +2.150 -0.547 +2.151 -0.548 +2.151 -0.549 +2.151 -0.562 +2.013 -0.562 +2.012 -0.562 +2.011 -0.561 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBl2.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBl2.txt new file mode 100644 index 000000000..66e8536d3 --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBl2.txt @@ -0,0 +1,18 @@ +#Name = D2dBl2 +#Exp = AUG +#CLs = PFC +14 0 +1.879 -0.683 +1.994 -0.550 +1.996 -0.549 +1.998 -0.548 +2.000 -0.547 +2.002 -0.547 +2.006 -0.547 +2.007 -0.547 +2.008 -0.548 +2.008 -0.549 +2.008 -0.559 +1.950 -0.647 +1.978 -0.666 +1.940 -0.723 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBl3.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBl3.txt new file mode 100644 index 000000000..1fe40f1b5 --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBl3.txt @@ -0,0 +1,17 @@ +#Name = D2dBl3 +#Exp = AUG +#CLs = PFC +13 0 +1.752 -0.826 +1.753 -0.826 +1.875 -0.686 +1.876 -0.686 +1.878 -0.685 +1.879 -0.686 +1.939 -0.725 +1.901 -0.783 +1.870 -0.762 +1.775 -0.845 +1.772 -0.842 +1.771 -0.841 +1.770 -0.839 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBu1.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBu1.txt new file mode 100644 index 000000000..bc98c85f1 --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBu1.txt @@ -0,0 +1,26 @@ +#Name = D2dBu1 +#Exp = AUG +#CLs = PFC +22 0 +1.905 0.868 +1.904 0.868 +1.903 0.869 +1.897 0.880 +1.896 0.881 +1.897 0.882 +1.897 0.883 +1.897 0.884 +1.875 0.921 +1.875 0.922 +1.875 0.923 +1.875 0.924 +1.924 0.952 +1.925 0.953 +1.927 0.953 +1.928 0.954 +1.930 0.954 +1.932 0.953 +2.022 0.925 +2.022 0.894 +1.949 0.894 +1.906 0.869 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBu2.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBu2.txt new file mode 100644 index 000000000..45fe01d38 --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBu2.txt @@ -0,0 +1,15 @@ +#Name = D2dBu2 +#Exp = AUG +#CLs = PFC +11 0 +2.002 0.708 +1.905 0.866 +1.918 0.874 +1.969 0.791 +1.974 0.786 +1.980 0.782 +1.985 0.781 +1.991 0.781 +1.996 0.783 +2.010 0.792 +2.045 0.734 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBu3.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBu3.txt new file mode 100644 index 000000000..1f97f6fcc --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBu3.txt @@ -0,0 +1,15 @@ +#Name = D2dBu3 +#Exp = AUG +#CLs = PFC +11 0 +2.101 0.547 +2.003 0.707 +2.046 0.734 +2.081 0.676 +2.067 0.667 +2.063 0.664 +2.060 0.659 +2.058 0.653 +2.059 0.646 +2.061 0.640 +2.118 0.547 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBu4.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBu4.txt new file mode 100644 index 000000000..0dc2a3905 --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_D2dBu4.txt @@ -0,0 +1,12 @@ +#Name = D2dBu4 +#Exp = AUG +#CLs = PFC +8 0 +2.121 0.547 +2.113 0.559 +2.113 0.560 +2.113 0.561 +2.114 0.562 +2.115 0.562 +2.228 0.562 +2.228 0.547 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_D3BG1.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_D3BG1.txt new file mode 100644 index 000000000..940a5f5f0 --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_D3BG1.txt @@ -0,0 +1,24 @@ +#Name = D3BG1 +#Exp = AUG +#CLs = PFC +20 0 +1.579 -1.208 +1.635 -0.999 +1.639 -0.999 +1.634 -1.021 +1.637 -1.021 +1.638 -1.022 +1.639 -1.021 +1.640 -1.020 +1.640 -1.018 +1.641 -1.014 +1.642 -1.013 +1.647 -1.015 +1.592 -1.220 +1.584 -1.220 +1.587 -1.212 +1.587 -1.211 +1.587 -1.210 +1.586 -1.209 +1.585 -1.208 +1.584 -1.208 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_D3BG10.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_D3BG10.txt new file mode 100644 index 000000000..5c1ef9ea5 --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_D3BG10.txt @@ -0,0 +1,23 @@ +#Name = D3BG10 +#Exp = AUG +#CLs = PFC +19 0 +1.554 -1.218 +1.555 -1.216 +1.556 -1.214 +1.558 -1.213 +1.562 -1.211 +1.563 -1.210 +1.566 -1.209 +1.567 -1.209 +1.579 -1.209 +1.579 -1.215 +1.577 -1.222 +1.562 -1.222 +1.562 -1.220 +1.561 -1.219 +1.556 -1.219 +1.555 -1.220 +1.555 -1.222 +1.554 -1.222 +1.554 -1.220 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_ICRHa.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_ICRHa.txt new file mode 100644 index 000000000..27b84c96d --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_ICRHa.txt @@ -0,0 +1,65 @@ +#Name = ICRHa +#Exp = AUG +#CLs = PFC +61 0 +2.002 -0.524 +1.997 -0.523 +1.994 -0.520 +1.992 -0.516 +1.992 -0.511 +1.994 -0.507 +2.004 -0.495 +2.017 -0.476 +2.030 -0.457 +2.043 -0.438 +2.055 -0.418 +2.066 -0.399 +2.078 -0.379 +2.089 -0.358 +2.099 -0.338 +2.109 -0.317 +2.118 -0.296 +2.126 -0.276 +2.127 -0.275 +2.136 -0.253 +2.144 -0.231 +2.151 -0.210 +2.159 -0.188 +2.165 -0.166 +2.170 -0.149 +2.171 -0.143 +2.175 -0.129 +2.177 -0.121 +2.182 -0.098 +2.187 -0.075 +2.191 -0.053 +2.195 -0.030 +2.198 -0.007 +2.198 -0.001 +2.200 0.016 +2.203 0.039 +2.204 0.062 +2.205 0.098 +2.205 0.133 +2.204 0.157 +2.204 0.169 +2.200 0.204 +2.200 0.209 +2.196 0.239 +2.190 0.274 +2.182 0.308 +2.173 0.343 +2.162 0.377 +2.159 0.386 +2.150 0.410 +2.137 0.442 +2.137 0.443 +2.122 0.475 +2.109 0.499 +2.106 0.509 +2.107 0.517 +2.111 0.524 +2.118 0.529 +2.126 0.531 +2.900 0.531 +2.900 -0.524 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_LIM09.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_LIM09.txt new file mode 100644 index 000000000..9d721bd98 --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_LIM09.txt @@ -0,0 +1,12 @@ +#Name = LIM09 +#Exp = AUG +#CLs = PFC +8 0 +1.095 -0.760 +1.095 -0.680 +1.073 -0.680 +1.073 -0.705 +1.000 -0.705 +1.000 -0.725 +1.073 -0.725 +1.073 -0.760 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_PClow.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_PClow.txt new file mode 100644 index 000000000..b68e9f31f --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_PClow.txt @@ -0,0 +1,10 @@ +#Name = PClow +#Exp = AUG +#CLs = PFC +6 0 +1.877 -0.874 +1.877 -0.864 +2.075 -0.562 +2.158 -0.562 +2.158 -0.582 +1.966 -0.874 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_PCup.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_PCup.txt new file mode 100644 index 000000000..5d1739a13 --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_PCup.txt @@ -0,0 +1,10 @@ +#Name = PCup +#Exp = AUG +#CLs = PFC +6 0 +2.151 0.562 +1.966 0.865 +1.966 0.874 +2.054 0.874 +2.233 0.582 +2.233 0.562 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_SBi.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_SBi.txt new file mode 100644 index 000000000..e589b22a5 --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_SBi.txt @@ -0,0 +1,46 @@ +#Name = SBi +#Exp = AUG +#CLs = PFC +42 0 +1.100 -0.584 +1.085 -0.517 +1.072 -0.450 +1.060 -0.382 +1.051 -0.314 +1.043 -0.246 +1.037 -0.177 +1.033 -0.108 +1.031 -0.040 +1.031 0.029 +1.033 0.098 +1.037 0.166 +1.042 0.235 +1.049 0.303 +1.059 0.371 +1.070 0.439 +1.083 0.507 +1.098 0.574 +1.114 0.641 +1.133 0.707 +1.153 0.773 +1.164 0.769 +1.144 0.704 +1.126 0.638 +1.109 0.571 +1.095 0.504 +1.082 0.437 +1.071 0.370 +1.061 0.302 +1.054 0.234 +1.049 0.166 +1.045 0.097 +1.043 0.029 +1.043 -0.040 +1.045 -0.108 +1.049 -0.176 +1.055 -0.244 +1.063 -0.312 +1.072 -0.380 +1.083 -0.448 +1.097 -0.515 +1.112 -0.582 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_TPLT1.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_TPLT1.txt new file mode 100644 index 000000000..a4043bd4f --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_TPLT1.txt @@ -0,0 +1,21 @@ +#Name = TPLT1 +#Exp = AUG +#CLs = PFC +17 0 +1.500 1.185 +1.336 1.093 +1.335 1.093 +1.334 1.093 +1.320 1.103 +1.328 1.114 +1.322 1.119 +1.367 1.178 +1.407 1.178 +1.497 1.196 +1.499 1.196 +1.501 1.195 +1.502 1.194 +1.503 1.192 +1.503 1.190 +1.503 1.188 +1.502 1.186 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_TPLT2.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_TPLT2.txt new file mode 100644 index 000000000..984b2ab57 --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_TPLT2.txt @@ -0,0 +1,16 @@ +#Name = TPLT2 +#Exp = AUG +#CLs = PFC +12 0 +1.331 1.087 +1.288 1.031 +1.287 1.030 +1.286 1.029 +1.284 1.029 +1.258 1.037 +1.312 1.108 +1.330 1.094 +1.331 1.093 +1.332 1.092 +1.332 1.090 +1.332 1.089 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_TPLT3.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_TPLT3.txt new file mode 100644 index 000000000..bf32069a5 --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_TPLT3.txt @@ -0,0 +1,19 @@ +#Name = TPLT3 +#Exp = AUG +#CLs = PFC +15 0 +1.282 1.023 +1.242 0.969 +1.240 0.967 +1.239 0.966 +1.238 0.965 +1.236 0.965 +1.235 0.965 +1.234 0.965 +1.209 0.973 +1.257 1.036 +1.281 1.030 +1.282 1.029 +1.283 1.028 +1.283 1.026 +1.283 1.024 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_TPLT4.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_TPLT4.txt new file mode 100644 index 000000000..311c8f465 --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_TPLT4.txt @@ -0,0 +1,27 @@ +#Name = TPLT4 +#Exp = AUG +#CLs = PFC +23 0 +1.237 0.958 +1.208 0.891 +1.207 0.891 +1.207 0.890 +1.206 0.890 +1.206 0.889 +1.205 0.889 +1.204 0.888 +1.203 0.888 +1.202 0.888 +1.201 0.889 +1.200 0.889 +1.166 0.915 +1.209 0.973 +1.234 0.965 +1.235 0.965 +1.235 0.964 +1.236 0.964 +1.236 0.963 +1.237 0.962 +1.237 0.961 +1.237 0.960 +1.237 0.959 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_TPLT5.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_TPLT5.txt new file mode 100644 index 000000000..d6429e985 --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_TPLT5.txt @@ -0,0 +1,26 @@ +#Name = TPLT5 +#Exp = AUG +#CLs = PFC +22 0 +1.203 0.881 +1.180 0.820 +1.179 0.818 +1.178 0.816 +1.167 0.802 +1.166 0.801 +1.165 0.800 +1.164 0.800 +1.163 0.800 +1.162 0.800 +1.161 0.800 +1.160 0.801 +1.119 0.832 +1.142 0.861 +1.131 0.869 +1.165 0.914 +1.201 0.886 +1.202 0.886 +1.203 0.885 +1.203 0.884 +1.203 0.883 +1.203 0.882 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_TPRT2.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_TPRT2.txt new file mode 100644 index 000000000..5b5f14f58 --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_TPRT2.txt @@ -0,0 +1,33 @@ +#Name = TPRT2 +#Exp = AUG +#CLs = PFC +29 0 +1.863 0.941 +1.775 1.071 +1.774 1.073 +1.774 1.075 +1.775 1.076 +1.776 1.078 +1.784 1.083 +1.801 1.076 +1.802 1.076 +1.802 1.075 +1.803 1.075 +1.804 1.075 +1.805 1.076 +1.806 1.076 +1.806 1.077 +1.807 1.077 +1.807 1.078 +1.808 1.078 +1.814 1.093 +1.839 1.083 +1.913 0.973 +1.914 0.972 +1.914 0.970 +1.913 0.968 +1.912 0.966 +1.870 0.939 +1.868 0.938 +1.866 0.938 +1.865 0.939 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_TPRT3.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_TPRT3.txt new file mode 100644 index 000000000..c7dfe9d19 --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_TPRT3.txt @@ -0,0 +1,18 @@ +#Name = TPRT3 +#Exp = AUG +#CLs = PFC +14 0 +1.796 1.082 +1.729 1.106 +1.727 1.107 +1.726 1.108 +1.725 1.109 +1.725 1.111 +1.726 1.112 +1.733 1.132 +1.809 1.105 +1.802 1.084 +1.801 1.083 +1.800 1.082 +1.799 1.081 +1.797 1.081 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_TPRT4.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_TPRT4.txt new file mode 100644 index 000000000..c077d5ef8 --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_TPRT4.txt @@ -0,0 +1,16 @@ +#Name = TPRT4 +#Exp = AUG +#CLs = PFC +12 0 +1.718 1.109 +1.651 1.134 +1.650 1.135 +1.649 1.136 +1.648 1.138 +1.648 1.140 +1.656 1.160 +1.732 1.133 +1.725 1.112 +1.724 1.111 +1.722 1.110 +1.720 1.109 diff --git a/tofu/geom/inputs/TFG_PFC_ExpAUG_TPRT5.txt b/tofu/geom/inputs/TFG_PFC_ExpAUG_TPRT5.txt new file mode 100644 index 000000000..20df8a9ed --- /dev/null +++ b/tofu/geom/inputs/TFG_PFC_ExpAUG_TPRT5.txt @@ -0,0 +1,20 @@ +#Name = TPRT5 +#Exp = AUG +#CLs = PFC +16 0 +1.644 1.137 +1.509 1.190 +1.508 1.191 +1.507 1.195 +1.506 1.195 +1.504 1.198 +1.501 1.200 +1.494 1.203 +1.493 1.204 +1.493 1.205 +1.493 1.206 +1.501 1.217 +1.655 1.161 +1.646 1.138 +1.646 1.137 +1.645 1.137 diff --git a/tofu/geom/inputs/TFG_Ves_ExpAUG_VESiR.txt b/tofu/geom/inputs/TFG_Ves_ExpAUG_VESiR.txt new file mode 100644 index 000000000..cb8ca007d --- /dev/null +++ b/tofu/geom/inputs/TFG_Ves_ExpAUG_VESiR.txt @@ -0,0 +1,40 @@ +#Name = VESiR +#Exp = AUG +#CLs = Ves +36 0 +2.521 0.000 +2.506 0.210 +2.461 0.416 +2.387 0.613 +2.286 0.798 +2.159 0.966 +2.010 1.114 +1.841 1.240 +1.705 1.303 +1.558 1.329 +1.409 1.315 +1.269 1.264 +1.147 1.177 +1.052 1.062 +0.990 0.927 +0.965 0.779 +0.957 0.519 +0.953 0.260 +0.951 0.000 +0.953 -0.260 +0.957 -0.519 +0.965 -0.779 +0.990 -0.927 +1.052 -1.062 +1.147 -1.177 +1.269 -1.264 +1.409 -1.315 +1.558 -1.329 +1.705 -1.303 +1.841 -1.240 +2.010 -1.114 +2.159 -0.966 +2.286 -0.798 +2.387 -0.613 +2.461 -0.416 +2.506 -0.210 diff --git a/tofu/geom/inputs/TFG_Ves_ExpNSTX_V0.txt b/tofu/geom/inputs/TFG_Ves_ExpNSTX_V0.txt new file mode 100644 index 000000000..6d0105048 --- /dev/null +++ b/tofu/geom/inputs/TFG_Ves_ExpNSTX_V0.txt @@ -0,0 +1,139 @@ +# Cls = Ves +# Exp = NSTX +# Name = V0 +1.350000000000000000e+02 0.000000000000000000e+00 +1.822552000000000061e-01 -6.049220000000000291e-02 +1.822552000000000061e-01 -1.091094999999999982e-01 +1.822552000000000061e-01 -1.577268000000000003e-01 +1.904033000000000253e-01 -2.063441999999999776e-01 +1.904033000000000253e-01 -2.549615000000000076e-01 +1.904033000000000253e-01 -3.035788000000000375e-01 +1.904033000000000253e-01 -3.521961000000000119e-01 +1.904033000000000253e-01 -4.008133999999999864e-01 +1.904033000000000253e-01 -4.494307000000000163e-01 +1.904033000000000253e-01 -4.980479999999999907e-01 +1.904033000000000253e-01 -5.466653000000000207e-01 +1.904033000000000253e-01 -5.952825999999999951e-01 +1.904033000000000253e-01 -6.438998999999999695e-01 +1.904033000000000253e-01 -6.925172000000000549e-01 +1.904033000000000253e-01 -7.411346000000000878e-01 +1.904033000000000253e-01 -7.897518999999999512e-01 +1.904033000000000253e-01 -8.383692000000000366e-01 +1.904033000000000253e-01 -8.869865000000000110e-01 +1.904033000000000253e-01 -9.356037999999999855e-01 +1.904033000000000253e-01 -9.842211000000000709e-01 +1.904033000000000253e-01 -1.028786999999999896e+00 +2.148477000000000026e-01 -1.076053799999999949e+00 +2.528721000000000441e-01 -1.135475000000000012e+00 +2.746004000000000222e-01 -1.200297999999999865e+00 +2.718844000000000261e-01 -1.259719199999999928e+00 +2.718844000000000261e-01 -1.308336500000000013e+00 +2.718844000000000261e-01 -1.356953800000000099e+00 +2.637362999999999791e-01 -1.381262500000000060e+00 +2.678103000000000011e-01 -1.444059800000000005e+00 +2.637362999999999791e-01 -1.521712499999999801e+00 +2.637362999999999791e-01 -1.575731700000000179e+00 +3.017606999999999928e-01 -1.616246099999999908e+00 +3.708256000000000330e-01 -1.625506600000000024e+00 +4.592907000000000517e-01 -1.616246099999999908e+00 +5.326235999999999748e-01 -1.616246099999999908e+00 +6.874375999999999820e-01 -1.606117500000000087e+00 +7.689186000000000076e-01 -1.574111100000000096e+00 +8.503995999999999222e-01 -1.541699600000000059e+00 +9.318805999999999479e-01 -1.507667500000000160e+00 +1.013361600000000085e+00 -1.478497100000000009e+00 +1.062250200000000033e+00 -1.462291299999999961e+00 +1.066324300000000003e+00 -1.415699700000000227e+00 +1.102990800000000160e+00 -1.348850899999999964e+00 +1.135583199999999904e+00 -1.300233600000000100e+00 +1.176323699999999972e+00 -1.243513399999999880e+00 +1.217064200000000040e+00 -1.186793200000000104e+00 +1.257804699999999887e+00 -1.135475000000000012e+00 +1.290397100000000075e+00 -1.081455700000000020e+00 +1.322989500000000040e+00 -1.049044199999999982e+00 +1.339285700000000023e+00 -9.923239999999999839e-01 +1.355581900000000006e+00 -9.464076000000000155e-01 +1.363729999999999887e+00 -9.031923000000000590e-01 +1.380026200000000092e+00 -8.545749000000001372e-01 +1.396322400000000075e+00 -8.059576000000000517e-01 +1.412618600000000058e+00 -7.573403000000000773e-01 +1.428914800000000040e+00 -7.087229999999999919e-01 +1.445211000000000023e+00 -6.601057000000000174e-01 +1.461507200000000006e+00 -6.114884000000000430e-01 +1.477803399999999989e+00 -5.628710999999999576e-01 +1.482692299999999852e+00 -4.607746999999999815e-01 +1.502247800000000133e+00 -3.738038000000000194e-01 +1.502247800000000133e+00 -3.359903000000000195e-01 +1.518544000000000116e+00 -2.792701000000000211e-01 +1.534840199999999877e+00 -1.941898000000000235e-01 +1.551136400000000082e+00 -1.145115000000000022e-01 +1.551136400000000082e+00 -7.669800000000000229e-02 +1.559284499999999962e+00 -4.428649999999999948e-02 +1.559284499999999962e+00 4.330899999999999507e-03 +1.562000500000000125e+00 6.375200000000000311e-02 +1.542988300000000201e+00 1.218227000000000060e-01 +1.542988300000000201e+00 1.663886000000000254e-01 +1.518544000000000116e+00 2.069030000000000036e-01 +1.526692099999999996e+00 2.312115999999999894e-01 +1.518544000000000116e+00 2.757775000000000087e-01 +1.510395900000000013e+00 3.122404999999999764e-01 +1.502247800000000133e+00 3.662597000000000214e-01 +1.494099699999999808e+00 4.256808999999999732e-01 +1.475087399999999827e+00 4.634942999999999702e-01 +1.477803399999999989e+00 5.593785000000000007e-01 +1.461507200000000006e+00 6.039442999999999895e-01 +1.445211000000000023e+00 6.525615999999999639e-01 +1.428914800000000040e+00 7.011790000000001077e-01 +1.409902600000000117e+00 7.497962999999999711e-01 +1.388174299999999972e+00 8.200212999999999530e-01 +1.371878100000000211e+00 8.686386000000000385e-01 +1.355581900000000006e+00 9.199567999999999079e-01 +1.339285700000000023e+00 9.766770000000000174e-01 +1.298545200000000177e+00 1.063783099999999981e+00 +1.265952799999999989e+00 1.114426099999999975e+00 +1.225212300000000143e+00 1.160342399999999996e+00 +1.192619899999999955e+00 1.219763600000000059e+00 +1.143731300000000006e+00 1.279184700000000063e+00 +1.111138900000000040e+00 1.325101099999999921e+00 +1.054102100000000153e+00 1.402078500000000005e+00 +1.054102100000000153e+00 1.450695800000000091e+00 +9.970654000000001016e-01 1.477435300000000007e+00 +9.155844000000000760e-01 1.509846900000000103e+00 +8.341034000000000503e-01 1.540637800000000057e+00 +7.526224000000000247e-01 1.574669900000000178e+00 +6.670672999999999186e-01 1.608702099999999913e+00 +5.489197999999999578e-01 1.608702099999999913e+00 +4.755868999999999791e-01 1.608702099999999913e+00 +3.941059000000000090e-01 1.608702099999999913e+00 +2.800325000000000175e-01 1.592496300000000087e+00 +2.637362999999999791e-01 1.507416000000000089e+00 +2.474400999999999962e-01 1.462850100000000042e+00 +2.637362999999999791e-01 1.381821300000000141e+00 +2.637362999999999791e-01 1.333204000000000056e+00 +2.637362999999999791e-01 1.284586700000000192e+00 +2.637362999999999791e-01 1.235969400000000107e+00 +2.637362999999999791e-01 1.187352000000000185e+00 +2.555881999999999876e-01 1.130631900000000023e+00 +2.311439000000000132e-01 1.084715499999999944e+00 +1.985513999999999890e-01 1.021242900000000065e+00 +1.904033000000000253e-01 9.523684000000000038e-01 +1.904033000000000253e-01 9.037511000000000294e-01 +1.904033000000000253e-01 8.551337999999999440e-01 +1.904033000000000253e-01 8.065165000000000806e-01 +1.904033000000000253e-01 7.578991000000000477e-01 +1.904033000000000253e-01 7.092817999999999623e-01 +1.904033000000000253e-01 6.606645000000000989e-01 +1.904033000000000253e-01 6.120472000000000135e-01 +1.904033000000000253e-01 5.634299000000000390e-01 +1.904033000000000253e-01 5.148125999999999536e-01 +1.904033000000000253e-01 4.661952999999999792e-01 +1.904033000000000253e-01 4.175780000000000602e-01 +1.904033000000000253e-01 3.689607000000000303e-01 +1.904033000000000253e-01 3.203434000000000004e-01 +1.904033000000000253e-01 2.717260999999999704e-01 +1.904033000000000253e-01 2.231087000000000209e-01 +1.904033000000000253e-01 1.744913999999999910e-01 +1.822552000000000061e-01 1.339770000000000127e-01 +1.822552000000000061e-01 8.535970000000001057e-02 +1.822552000000000061e-01 3.674240000000000145e-02 +1.822552000000000061e-01 -1.187489999999999900e-02 diff --git a/tofu/geom/utils.py b/tofu/geom/utils.py index e1bea4af6..f880fd889 100644 --- a/tofu/geom/utils.py +++ b/tofu/geom/utils.py @@ -20,7 +20,7 @@ __all__ = ['coords_transform', 'get_nIne1e2', 'get_X12fromflat', 'compute_RaysCones', - 'create_config', + 'get_available_config', 'create_config', 'create_CamLOS1D', 'create_CamLOS2D'] @@ -642,8 +642,18 @@ def _compute_CamLOS2D_pinhole(P=None, F=0.1, D12=0.1, N12=100, _ExpWest = 'WEST' _ExpJET = 'JET' _ExpITER = 'ITER' +_ExpAUG = 'AUG' +_ExpNSTX = 'NSTX' +# Default config +_DEFCONFIG = 'ITER' + +_URL_TUTO = ('https://tofuproject.github.io/tofu/auto_examples/tutorials/' + + 'tuto_plot_create_geometry.html') # Dictionnary of unique config names +# For each config, indicates which structural elements it comprises +# Elements are sorted by class (Ves, PFC...) +# For each element, a unique txt file containing the geometry will be loaded _DCONFIG = {'WEST-V1': {'Exp': _ExpWest, 'Ves': ['V1']}, 'ITER-V1': {'Exp': _ExpITER, @@ -674,30 +684,94 @@ def _compute_CamLOS2D_pinhole(P=None, F=0.1, D12=0.1, N12=100, 'BLK11', 'BLK12', 'BLK13', 'BLK14', 'BLK15', 'BLK16', 'BLK17', 'BLK18', 'Div1', 'Div2', 'Div3', - 'Div4', 'Div5', 'Div6']} + 'Div4', 'Div5', 'Div6']}, + 'AUG-V1': {'Exp': _ExpAUG, + 'Ves': ['VESiR'], + 'PFC': ['D2cdome', 'D2cdomL', 'D2cdomR', 'D2ci1', + 'D2ci2', 'D2cTPib', 'D2cTPic', 'D2cTPi', + 'D2dBG2', 'D2dBl1', 'D2dBl2', 'D2dBl3', + 'D2dBu1', 'D2dBu2', 'D2dBu3', 'D2dBu4', + 'D3BG10', 'D3BG1', 'ICRHa', 'LIM09', 'PClow', + 'PCup', 'SBi', 'TPLT1', 'TPLT2', 'TPLT3', + 'TPLT4', 'TPLT5', 'TPRT2', 'TPRT3', 'TPRT4', + 'TPRT5']}, + 'NSTX-V0': {'Exp': _ExpNSTX, + 'Ves': ['V0']} } -# Each config can be called by various names (for benchmark and -# retro-compatibility), this table stores the available names for each unique -# config in _DCONFIG -_DCONFIG_TABLE = {'ITER': 'ITER-V2', - 'JET': 'JET-V0', - 'WEST': 'WEST-V4', - 'A1': 'WEST-V1', - 'A2': 'ITER-V1', - 'A3': 'WEST-Sep', - 'B1': 'WEST-V2', - 'B2': 'WEST-V3', - 'B3': 'WEST-V4', - 'B4': 'ITER-V2'} +# Each config can be called by various names / shortcuts (for benchmark and +# retro-compatibility), this table stores, for each shortcut, +# the associated unique name it refers to +_DCONFIG_SHORTCUTS = {'ITER': 'ITER-V2', + 'JET': 'JET-V0', + 'WEST': 'WEST-V4', + 'A1': 'WEST-V1', + 'A2': 'ITER-V1', + 'A3': 'WEST-Sep', + 'B1': 'WEST-V2', + 'B2': 'WEST-V3', + 'B3': 'WEST-V4', + 'B4': 'ITER-V2', + 'AUG': 'AUG-V1', + 'NSTX': 'NSTX-V0'} + + +def _get_listconfig(dconfig=_DCONFIG, dconfig_shortcuts=_DCONFIG_SHORTCUTS, + returnas=str): + """ Hidden function generating the config names table as a str or dict """ + assert returnas in [dict, str] + dc = {k0: [k0] + sorted([k1 for k1, v1 in dconfig_shortcuts.items() + if v1 == k0]) + for k0 in sorted(dconfig.keys())} + if returnas is dict: + return dc + else: + l0 = np.max([len(k0) for k0 in dc.keys()] + [len('unique names')]) + l1 = np.max([len(str(v0)) for v0 in dc.values()] + [len('shortcuts')]) + msg = ("\n\t" + "unique names".ljust(l0) + "\tshortcuts" + + "\n\t" + "-"*l0 + " \t" + "-"*l1 + + "\n\t- " + + "\n\t- ".join(["{}\t{}".format(k0.ljust(l0), v0) + for k0, v0 in dc.items()])) + return msg -# Default config -_DEFCONFIG = 'ITER' + +def get_available_config(dconfig=_DCONFIG, + dconfig_shortcuts=_DCONFIG_SHORTCUTS, + verb=True, returnas=False): + """ Print a table showing all pre-defined config + + Each pre-defined config in tofu can be called by its unique name or + by a series of shortcuts / alterantive names refereing to the same unique + name. this feature is useful for retro-compatibility and for making sure a + standard name always refers to the latest (most detailed) available version + of the geometry. + + Can also return the table as str + + No input arg needed: + >>> import tofu as tf + >>> tf.geom.utils.get_available_config() + + """ + msg = ("A config is the geometry of a tokamak\n" + + "You can define your own" + + ", see online tutorial at:\n\t{}\n".format(_URL_TUTO) + + "tofu also also provides some pre-defined config ready to load\n" + + "They are available via their name or via shortcuts\n" + + _get_listconfig(dconfig=dconfig, + dconfig_shortcuts=dconfig_shortcuts) + + "\n\n => to get a pre-defined config, call for example:\n" + + "\tconfig = tf.geom.utils.create_config('ITER')") + if verb is True: + print(msg) + if returnas in [None, True, str]: + return msg def _create_config_testcase(config=None, returnas='object', path=_path_testcases, dconfig=_DCONFIG, - dconfig_table=_DCONFIG_TABLE): + dconfig_shortcuts=_DCONFIG_SHORTCUTS): """ Load the desired test case configuration Choose from one of the reference preset configurations: @@ -715,16 +789,14 @@ def _create_config_testcase(config=None, returnas='object', if config in dconfig.keys(): pass - elif config in dconfig_table.keys(): + elif config in dconfig_shortcuts.keys(): # Get corresponding config - config = dconfig_table[config] + config = dconfig_shortcuts[config] else: - msg = ("The provided config name is not valid:\n" - + "Please choose among either:\n" - + "\t - unique keys: {}\n".format(list(dconfig.keys())) - + "\t - shortcuts : {}\n\n".format(list(dconfig_table.keys())) - + " => you provided: case = {}\n".format(config)) + msg = ("\nThe provided config name is not valid.\n" + + get_available_config(verb=False, returnas=str) + + "\n\n => you provided: {}\n".format(config)) raise Exception(msg) # Get file names for config @@ -735,20 +807,36 @@ def _create_config_testcase(config=None, returnas='object', for cc in lcls: for ss in dconfig[config][cc]: ff = [f for f in lf - if all([s in f for s in [cc,Exp,ss]])] - if not len(ff) == 1: - msg = "No / several matching files\n" + if all([s in f for s in [cc, Exp, ss]])] + if len(ff) == 0: + msg = "No matching files\n" msg += " Folder: %s\n"%path msg += " Criteria: [%s, %s]\n"%(cc,ss) msg += " Matching: "+"\n ".join(ff) raise Exception(msg) - pfe = os.path.join(path,ff[0]) - obj = eval('_core.'+cc).from_txt(pfe, Name=ss, Type='Tor', - Exp=dconfig[config]['Exp'], - out=returnas) - if returnas not in ['object', object]: - obj = ((ss,{'Poly':obj[0], 'pos':obj[1], 'extent':obj[2]}),) - lS.append(obj) + elif len(ff) > 1: + # More demanding criterion + ssbis, Expbis = '_'+ss+'.txt', '_Exp'+Exp+'_' + ff = [fff for fff in ff if ssbis in fff and Expbis in fff] + if len(ff) != 1: + msg = ("No / several matching files\n" + + " Folder: {}\n".format(path) + + " Criteria: [{}, {}]\n".format(cc, ss) + + " Matching: "+"\n ".join(ff)) + raise Exception(msg) + + pfe = os.path.join(path, ff[0]) + try: + obj = eval('_core.'+cc).from_txt(pfe, Name=ss, Type='Tor', + Exp=dconfig[config]['Exp'], + out=returnas) + if returnas not in ['object', object]: + obj = ((ss, {'Poly': obj[0], + 'pos': obj[1], 'extent': obj[2]}),) + lS.append(obj) + except Exception as err: + msg = "Could not be loaded: {}".format(ff[0]) + warnings.warn(msg) if returnas == 'dict': conf = dict([tt for tt in lS]) else: @@ -819,11 +907,13 @@ def create_config(case=None, Exp='Dummy', Type='Tor', any([pp is not None for pp in lp])] if np.sum(lc) > 1: msg = ("Please provide either:\n" - + "\t- case: the name of a stored case\n" - + "\t- geometrical parameters {}".format(lpstr)) + + "\t- case: the name of a pre-defined config\n" + + "\t- geometrical parameters {}\n\n".format(lpstr)) raise Exception(msg) elif not any(lc): - case = _DEFCONFIG + msg = get_available_config(verb=False, returnas=str) + raise Exception(msg) + # case = _DEFCONFIG # Get config, either from known case or geometrical parameterization if case is not None: diff --git a/tofu/imas2tofu/_core.py b/tofu/imas2tofu/_core.py index 39ee55ce1..d029ed350 100644 --- a/tofu/imas2tofu/_core.py +++ b/tofu/imas2tofu/_core.py @@ -154,8 +154,14 @@ class MultiIDSLoader(object): 'dim':'B', 'quant':'BT', 'units':'T'}, '2dBZ':{'str':'time_slice[time].ggd[0].b_field_z[0].values', 'dim':'B', 'quant':'BZ', 'units':'T'}, - '2dmeshNodes':{'str':'grids_ggd[0].grid[0].space[0].objects_per_dimension[0].object[].geometry'}, - '2dmeshFaces':{'str':'grids_ggd[0].grid[0].space[0].objects_per_dimension[2].object[].nodes'}}, + '2dmeshNodes': {'str': ('grids_ggd[0].grid[0].space[0]' + + '.objects_per_dimension[0]' + + '.object[].geometry')}, + '2dmeshFaces': {'str': ('grids_ggd[0].grid[0].space[0]' + + '.objects_per_dimension[2]' + + '.object[].nodes')}, + '2dmeshR': {'str': 'time_slice[0].profiles_2d[0].r'}, + '2dmeshZ': {'str': 'time_slice[0].profiles_2d[0].z'}}, 'core_profiles': {'t':{'str':'time'}, @@ -251,15 +257,16 @@ class MultiIDSLoader(object): 'floop_flux':{'str':'flux_loop[chan].flux.data', 'dim':'B flux', 'quant':'B flux', 'units':'Wb'}, 'floop_name':{'str':'flux_loop[chan].name'}, - 'floop_R':{'str':'flux_loop[chan].position.r', - 'dim':'distance', 'quant':'R', 'units':'m'}, - 'floop_Z':{'str':'flux_loop[chan].position.z', - 'dim':'distance', 'quant':'Z', 'units':'m'}}, + 'floop_R': {'str': 'flux_loop[chan].position.r', + 'dim': 'distance', 'quant': 'R', 'units': 'm'}, + 'floop_Z': {'str': 'flux_loop[chan].position.z', + 'dim': 'distance', 'quant': 'Z', 'units': 'm'}}, 'barometry': - {'t':{'str':'gauge[chan].pressure.time'}, - 'p':{'str':'gauge[chan].pressure.data', - 'dim':'pressure', 'quant':'p', 'units':'Pa?'}}, + {'t': {'str': 'gauge[chan].pressure.time'}, + 'names': {'str': 'gauge[chan].name'}, + 'p': {'str': 'gauge[chan].pressure.data', + 'dim': 'pressure', 'quant': 'p', 'units': 'Pa?'}}, 'neutron_diagnostic': {'t':{'str':'time', 'units':'s'}, @@ -282,6 +289,7 @@ class MultiIDSLoader(object): 'tau1keV':{'str':'channel[chan].optical_depth.data', 'dim':'optical_depth', 'quant':'tau', 'units':'adim.'}, 'validity_timed': {'str':'channel[chan].t_e.validity_timed'}, + 'names': {'str': 'channel[chan].name'}, 'Te0': {'str':'t_e_central.data', 'dim':'temperature', 'quant':'Te', 'units':'eV'}}, @@ -295,62 +303,82 @@ class MultiIDSLoader(object): 'dim':'distance', 'quant':'Z', 'units':'m'}, 'phi':{'str':'channel[chan].position.phi.data', 'dim':'angle', 'quant':'phi', 'units':'rad'}, - 'mode':{'str':'mode'}, - 'sweep':{'str':'sweep_time'}}, + 'names': {'str': 'channel[chan].name'}, + 'mode': {'str': 'mode'}, + 'sweep': {'str': 'sweep_time'}}, 'interferometer': - {'t':{'str':'time', - 'quant':'t', 'units':'s'}, - 'ne_integ':{'str':'channel[chan].n_e_line.data', - 'dim':'ne_integ', 'quant':'ne_integ', 'units':'/m2'}}, + {'t': {'str': 'time', + 'quant': 't', 'units': 's'}, + 'names': {'str': 'channel[chan].name'}, + 'ne_integ': {'str': 'channel[chan].n_e_line.data', + 'dim': 'ne_integ', 'quant': 'ne_integ', + 'units': '/m2', 'Brightness': True}}, 'polarimeter': - {'t':{'str':'time', - 'quant':'t', 'units':'s'}, - 'lamb':{'str':'channel[chan].wavelength', - 'dim':'distance', 'quant':'wavelength', 'units':'m'}, - 'fangle':{'str':'channel[chan].faraday_angle.data', - 'dim':'angle', 'quant':'faraday angle', 'units':'rad'}}, + {'t': {'str': 'time', + 'quant': 't', 'units': 's'}, + 'lamb': {'str': 'channel[chan].wavelength', + 'dim': 'distance', 'quant': 'wavelength', + 'units': 'm'}, + 'fangle': {'str': 'channel[chan].faraday_angle.data', + 'dim': 'angle', 'quant': 'faraday angle', + 'units': 'rad', 'Brightness': True}, + 'names': {'str': 'channel[chan].name'}}, 'bolometer': - {'t':{'str':'channel[chan].power.time', - 'quant':'t', 'units':'s'}, - 'power':{'str':'channel[chan].power.data', - 'dim':'power', 'quant':'power radiative', 'units':'W'}, - 'etendue':{'str':'channel[chan].etendue', - 'dim':'etendue', 'quant':'etendue', - 'units':'m2.sr'}, - 'tpower':{'str':'time','quant':'t', 'units':'s'}, - 'prad':{'str':'power_radiated_total', - 'dim':'power', 'quant':'power radiative', 'units':'W'}, - 'pradbulk':{'str':'power_radiated_inside_lcfs', - 'dim':'power', 'quant':'power radiative', - 'units':'W'}}, + {'t': {'str': 'channel[chan].power.time', + 'quant': 't', 'units': 's'}, + 'power': {'str': 'channel[chan].power.data', + 'dim': 'power', 'quant': 'power radiative', + 'units': 'W', 'Brightness': False}, + 'etendue': {'str': 'channel[chan].etendue', + 'dim': 'etendue', 'quant': 'etendue', + 'units': 'm2.sr'}, + 'names': {'str': 'channel[chan].name'}, + 'tpower': {'str': 'time', 'quant': 't', 'units': 's'}, + 'prad': {'str': 'power_radiated_total', + 'dim': 'power', 'quant': 'power radiative', + 'units': 'W'}, + 'pradbulk': {'str': 'power_radiated_inside_lcfs', + 'dim': 'power', 'quant': 'power radiative', + 'units': 'W'}}, 'soft_x_rays': - {'t':{'str':'time', - 'quant':'t', 'units':'s'}, - 'power':{'str':'channel[chan].power.data', - 'dim':'power', 'quant':'power radiative', 'units':'W'}, - 'brightness':{'str':'channel[chan].brightness.data', - 'dim':'brightness', 'quant':'brightness', 'units':'W/(m2.sr)'}, - 'etendue':{'str':'channel[chan].etendue', - 'dim':'etendue', 'quant':'etendue', 'units':'m2.sr'}}, + {'t': {'str': 'time', + 'quant': 't', 'units': 's'}, + 'power': {'str': 'channel[chan].power.data', + 'dim': 'power', 'quant': 'power radiative', + 'units': 'W', 'Brightness': False}, + 'brightness': {'str': 'channel[chan].brightness.data', + 'dim': 'brightness', 'quant': 'brightness', + 'units': 'W/(m2.sr)', 'Brightness': True}, + 'names': {'str': 'channel[chan].name'}, + 'etendue': {'str': 'channel[chan].etendue', + 'dim': 'etendue', 'quant': 'etendue', + 'units': 'm2.sr'}}, 'spectrometer_visible': {'t':{'str':'channel[chan].grating_spectrometer.radiance_spectral.time', 'quant':'t', 'units':'s'}, - 'spectra':{'str':'channel[chan].grating_spectrometer.radiance_spectral.data', - 'dim':'radiance_spectral', 'quant':'radiance_spectral', 'units':'ph/s/(m2.sr)/m'}, + 'spectra': {'str': ('channel[chan].grating_spectrometer' + + '.radiance_spectral.data'), + 'dim': 'radiance_spectral', + 'quant': 'radiance_spectral', + 'units': 'ph/s/(m2.sr)/m', 'Brightness': True}, + 'names': {'str': 'channel[chan].name'}, 'lamb':{'str':'channel[chan].grating_spectrometer.wavelengths', 'dim':'wavelength', 'quant':'wavelength', 'units':'m'}}, 'bremsstrahlung_visible': - {'t':{'str':'time', - 'quant':'t', 'units':'s'}, - 'radiance':{'str':'channel[chan].radiance_spectral.data', - 'dim':'radiance_spectral', 'quant':'radiance_spectral', - 'units':'ph/s/(m2.sr)/m'}, + {'t': {'str': 'time', + 'quant': 't', 'units': 's'}, + 'radiance': {'str': 'channel[chan].radiance_spectral.data', + 'dim': 'radiance_spectral', + 'quant': 'radiance_spectral', + 'units': 'ph/s/(m2.sr)/m', + 'Brightness': True}, + 'names': {'str': 'channel[chan].name'}, 'lamb_up': {'str':'channel[chan].filter.wavelength_upper'}, 'lamb_lo': {'str':'channel[chan].filter.wavelength_lower'}}, } @@ -479,11 +507,12 @@ class MultiIDSLoader(object): _icmod = lambda al, ar, axis=0: np.sum(al - ar, axis=axis) _eqB = lambda BT, BR, BZ: np.sqrt(BT**2 + BR**2 + BZ**2) def _rhopn1d(psi): - return np.sqrt( (psi - psi[:,0:1]) / (psi[:,-1] - psi[:,0])[:,None] ) + return np.sqrt((psi - psi[:, 0:1]) / (psi[:, -1] - psi[:, 0])[:, None]) def _rhopn2d(psi, psi0, psisep): - return np.sqrt( (psi - psi0[:,None]) / (psisep[:,None] - psi0[:,None]) ) + return np.sqrt( + (psi - psi0[:, None]) / (psisep[:, None] - psi0[:, None])) def _rhotn2d(phi): - return np.sqrt(phi / np.nanmax(phi, axis=1)[:,None]) + return np.sqrt(np.abs(phi) / np.nanmax(np.abs(phi), axis=1)[:, None]) def _eqSep(sepR, sepZ, npts=100): nt = len(sepR) @@ -614,7 +643,7 @@ def _rhosign(rho, theta): 'core_profiles':['t','Te','ne']} } - + _IDS_BASE = ['wall', 'pulse_schedule'] ################################### @@ -625,7 +654,8 @@ def _rhosign(rho, theta): def __init__(self, preset=None, dids=None, ids=None, occ=None, idd=None, shot=None, run=None, refshot=None, refrun=None, - user=None, tokamak=None, version=None, get=None, ref=True): + user=None, tokamak=None, version=None, + ids_base=None, synthdiag=None, get=None, ref=True): super(MultiIDSLoader, self).__init__() # Initialize dicts @@ -640,14 +670,24 @@ def __init__(self, preset=None, dids=None, ids=None, occ=None, idd=None, assert len(lidd) <= 1 idd = lidd[0] if len(lidd) > 0 else None self.add_ids(preset=preset, ids=ids, occ=occ, idd=idd, get=False) - if get is None and (ids is not None or preset is not None): + if ids_base is None: + if not all([iids in self._IDS_BASE + for iids in self._dids.keys()]): + ids_base = True + if ids_base is True: + self.add_ids_base(get=False) + if synthdiag is None: + synthdiag = False + if synthdiag is True: + self.add_ids_synthdiag(get=False) + if get is None and (len(self._dids) > 0 or preset is not None): get = True else: self.set_dids(dids) if get is None: get = True self._set_fsig() - if get: + if get is True: self.open_get_close() def _init_dict(self): @@ -1268,13 +1308,93 @@ def get_idd(self, idd=None): assert idd in self._didd.keys() return self._didd[idd]['idd'] + def _checkformat_ids_synthdiag(self, ids=None): + lc = [ids is None, isinstance(ids, str), isinstance(ids, list), + hasattr(ids, 'ids_properties')] + if not any(lc): + msg = ("Provided ids not understood!\n" + + "\t- provided: {}".format(str(ids))) + raise Exception(msg) + + lidssynth = [kk for kk, vv in self._didsdiag.items() + if 'synth' in vv.keys()] + if lc[0]: + ids = sorted(set(self._dids.keys()).intersection(lidssynth)) + elif lc[1]: + ids = [ids] + elif lc[3]: + ids = [ids.__class__.__name__] + + ids = sorted( + set(ids).intersection(lidssynth).intersection(self._dids.keys())) + if len(ids) == 0: + msg = ("The provided ids must be:\n" + + "\t- an is name (str)\n" + + "\t- a list of ids names\n" + + "\t- an ids instance\n" + + "\t- None\n" + + "And it must:\n" + + "\t- Already be added (cf. self.dids.keys())\n" + + "\t- Be a diagnostic ids with tabulated 'synth'") + # Turn to warning? => see user feedback + raise Exception(msg) + return ids + + def get_inputs_for_synthsignal(self, ids=None, verb=True, returnas=False): + """ Return and / or print a dict of the default inputs for desired ids + + Synthetic signal for a given diagnostic ids is computed from + signal that comes from other ids (e.g. core_profiles, equilibrium...) + For some diagnostics, the inputs required are already tabulated in + self._didsdiag[]['synth'] + + This method simply shows this already tabulated information + Advanced users may edit this hidden dictionnary to their needs - def _checkformat_ids(self, ids, occ=None, idd=None, isget=None): + """ + assert returnas in [False, True, dict, list] + ids = self._checkformat_ids_synthdiag(ids) + + # Deal with real case + if len(ids) == 1: + out = self._didsdiag[ids[0]]['synth'] + lids = sorted(out.get('dsig', {}).keys()) + if verb: + dmsg = ("\n\t-" + + "\n\t-".join([ + kk+':\n\t\t'+'\n\t\t'.join(vv) + for kk, vv in out.get('dsig', {}).items()])) + extra = {kk: vv for kk, vv in out.items() + if kk not in ['dsynth', 'dsig']} + msg = ("For computing synthetic signal for ids {}".format(ids) + + dmsg + '\n' + + "\t- Extra parameters (if any):\n" + + "\t\t{}\n".format(extra)) + print(msg) + if returnas is True: + returnas = dict + else: + out = None + lids = sorted(set(itt.chain.from_iterable([ + self._didsdiag[idsi]['synth'].get('dsig', {}).keys() + for idsi in ids]))) + if verb: + print(lids) + if returnas is True: + returnas = list + if returnas is dict: + return out + elif returnas is list: + return lids + + def _checkformat_ids(self, ids, occ=None, idd=None, isget=None, + synthdiag=False): # Check value and make dict if necessary lc = [type(ids) is str, type(ids) is list, - hasattr(ids, 'ids_properties')] + hasattr(ids, 'ids_properties'), + ids is None and synthdiag is True] if not any(lc): msg = "Arg ids must be either:\n" msg += " - str : valid ids name\n" @@ -1284,9 +1404,15 @@ def _checkformat_ids(self, ids, occ=None, idd=None, isget=None): msg += " Conditions: %s"%str(lc) raise Exception(msg) + # Synthdiag-specific + if synthdiag is True: + ids = self.get_inputs_for_synthsignal(ids=ids, verb=False, + returnas=list) + lc[1] = True + # Prepare dids[name] = {'ids':None/ids, 'needidd':bool} dids = {} - if lc[0]or lc[1]: + if lc[0] or lc[1]: if lc[0]: ids = [ids] for ids_ in ids: @@ -1323,7 +1449,6 @@ def _checkformat_ids(self, ids, occ=None, idd=None, isget=None): if dids[lids[ii]]['ids'] is not None: dids[lids[ii]]['ids'] = [dids[lids[ii]]['ids']]*nocc - # Format isget / get for ii in range(0,nids): nocc = dids[lids[ii]]['nocc'] @@ -1331,9 +1456,9 @@ def _checkformat_ids(self, ids, occ=None, idd=None, isget=None): isgeti = np.zeros((nocc,), dtype=bool) if dids[lids[ii]]['ids'] is not None: if isget is None: - isgeti = False + isgeti = np.r_[False] elif type(isget) is bool: - isgeti = bool(isget) + isgeti = np.r_[bool(isget)] elif hasattr(isget,'__iter__'): if len(isget) == nids: isgeti = np.r_[isget[ii]] @@ -1347,8 +1472,6 @@ def _checkformat_ids(self, ids, occ=None, idd=None, isget=None): return dids - - def add_ids(self, ids=None, occ=None, idd=None, preset=None, shot=None, run=None, refshot=None, refrun=None, user=None, tokamak=None, version=None, @@ -1395,14 +1518,44 @@ def add_ids(self, ids=None, occ=None, idd=None, preset=None, assert idd in self._didd.keys() # Add ids - if ids is not None: dids = self._checkformat_ids(ids, occ=occ, idd=idd, isget=isget) self._dids.update(dids) if get: - self.open_get_close(ids=ids) + self.open_get_close() + def add_ids_base(self, occ=None, idd=None, + shot=None, run=None, refshot=None, refrun=None, + user=None, tokamak=None, version=None, + ref=None, isget=None, get=None): + """ Add th list of ids stored in self._IDS_BASE + + Typically used to add a list of common ids without having to re-type + them every time + """ + self.add_ids(ids=self._IDS_BASE, occ=occ, idd=idd, + shot=shot, run=run, refshot=refshot, refrun=refrun, + user=user, tokamak=tokamak, version=version, + ref=ref, isget=isget, get=get) + + def add_ids_synthdiag(self, ids=None, occ=None, idd=None, + shot=None, run=None, refshot=None, refrun=None, + user=None, tokamak=None, version=None, + ref=None, isget=None, get=None): + """ Add pre-tabulated input ids necessary for calculating synth. signal + + The necessary input ids are given by self.get_inputs_for_synthsignal() + + """ + if get is None: + get = True + ids = self.get_inputs_for_synthsignal(ids=ids, verb=False, + returnas=list) + self.add_ids(ids=ids, occ=occ, idd=idd, preset=None, + shot=shot, run=run, refshot=refshot, refrun=refrun, + user=user, tokamak=tokamak, version=version, + ref=ref, isget=isget, get=get) def remove_ids(self, ids=None, occ=None): """ Remove an ids (optionally remove only an occurence) @@ -1529,11 +1682,12 @@ def __repr__(self): #--------------------- def _checkformat_getdata_ids(self, ids): - msg = "Arg ids must be either:\n" - msg += " - None: if self.dids only has one key\n" - msg += " - str: a valid key of self.dids\n\n" - msg += " Provided : %s\n"%ids - msg += " Available: %s"%str(list(self._dids.keys())) + msg = ("Arg ids must be either:\n" + + "\t- None: if self.dids only has one key\n" + + "\t- str: a valid key of self.dids\n\n" + + " Provided : {}\n".format(ids) + + " Available: {}\n".format(str(list(self._dids.keys()))) + + " => Consider using self.add_ids({})".format(str(ids))) lc = [ids is None, type(ids) is str] if not any(lc): @@ -1919,6 +2073,54 @@ def get_data(self, ids=None, sig=None, occ=None, If the ids has a field 'channel', indch is used to specify from which channel data shall be loaded (all by default) + Parameters + ---------- + ids: None / str + ids from which the data should be loaded + ids should be available (check self.get_summary()) + ids should be loaded if not available, using: + - self.add_ids() to add the ids + - self.open_get_close() to force loading if necessary + sig: None / str / list + shortcuts of signals to be loaded from the ids + Check available shortcuts using self.get_shortcuts(ids) + You can add custom shortcuts if needed (cf. self.add_shortcuts()) + sig can be a single str (shortcut) or a list of such + occ: None / int + occurence from which to load the data + indch: None / list / np.ndarray + If the data has channels, this lists / array of int indices can be + used to specify which channels to load from (all if None) + indt: None / list / np.ndarray + If data is time-dependent, the list / array of int indices can be + used to specify which time steps to load + stack: bool + Flag indicating whether common data (e.g.: data from different + channels) should be agregated / stacked into a single array + isclose: None / bool + Flag indicating whether the agregated data is a collection of + identical vectors, if which case it will be checked (np.isclose()) + and only a single vector will be kept + flatocc: bool + By default, the data is returned as a list for each occurence + If there is only one occ and flatocc = True, only the first element + of the list is returned + nan: bool + Flag indicating whether to check for abs(data) > 1.30 + All data is this case will be set to nan + Due to the fact IMAS default value is 1.e49 + pos: None / bool + Flag indicating whether the data should be positive (negative + values will be set to nan) + warn: bool + Flag indicating whether to print warning messages for data could + not be retrieved + + Return + ------ + dout: dict + Dictionnary containing the loaded data + """ return self._get_data(ids=ids, sig=sig, occ=occ, indch=indch, indt=indt, stack=stack, isclose=isclose, @@ -1972,6 +2174,39 @@ def get_data_all(self, dsig=None, stack=True, warnings.warn(msg) return dout + def get_events(self, occ=None, verb=True, returnas=False): + """ Return chronoligical events stored in pulse_schedule + + If verb = True => print (default) + False => don't print + If returnas = list => return as list of tuples (name, time) + np.ndarray => return as np.ndarray + False => don't return (default) + """ + + # Check / format inputs + if verb is None: + verb = True + if returnas is None: + returnas = False + assert isinstance(verb, bool) + assert returnas in [False, list, tuple] + + events = self.get_data('pulse_schedule', + sig='events', occ=occ)['events'] + name, time = zip(*events) + ind = np.argsort(time) + if verb: + name, time = zip(*events[ind]) + msg = np.array([name, time], dtype='U').T + msg = np.char.ljust(msg, np.nanmax(np.char.str_len(msg))) + print(msg) + if returnas is list: + return events[ind].tolist() + elif returnas is tuple: + name, time = zip(*events[ind]) + return name, time + #--------------------- @@ -2062,77 +2297,189 @@ def _get_t0(self, t0=None): warnings.warn(msg) return t0 + def to_Config(self, Name=None, occ=None, + description_2d=None, mobile=None, plot=True): + """ Export the content of wall ids as a tofu Config object + + Choose the occurence (occ), and index (description_2d, cf. dd_doc) to + be exported. + Specify whether to pick from limiter or mobile + If not specified, will be decided automatically from the content + Optionally plot the result - def to_Config(self, Name=None, occ=None, indDescription=None, plot=True): + This requires that the wall ids was previously loaded. + If not run: + self.add_ids('wall') + """ lidsok = ['wall'] - if indDescription is None: - indDescription = 0 # --------------------------- # Preliminary checks on data source consistency lids, lidd, shot, Exp = self._get_lidsidd_shotExp(lidsok, errshot=True, errExp=True, upper=True) - # ------------- - # Input dicts - - # config - config = None - if 'wall' in lids: - ids = 'wall' - - # occ = np.ndarray of valid int - occ = self._checkformat_getdata_occ(occ, ids) - assert occ.size == 1, "Please choose one occ only !" - occ = occ[0] - indoc = np.nonzero(self._dids[ids]['occ'] == occ)[0][0] - - wall = self._dids[ids]['ids'][indoc] - units = wall.description_2d[indDescription].limiter.unit - nunits = len(units) - - if nunits == 0: - msg = "There is no limiter unit stored !\n" - msg += "The required 2d description is empty:\n" - ms = "len(idd.%s[occ=%s].description_2d"%(ids,str(occ)) - msg += "%s[%s].limiter.unit) = 0"%(ms,str(indDescription)) + # ---------------- + # Trivial case + if 'wall' not in lids: + if plot: + msg = "ids 'wall' has not been loaded => Config not available!" raise Exception(msg) + return None - if Name is None: - Name = wall.description_2d[indDescription].type.name - if Name == '': - Name = 'imas wall' + # ---------------- + # Relevant case - import tofu.geom as mod + # Get relevant occ and description_2d + ids = 'wall' + # occ = np.ndarray of valid int + occ = self._checkformat_getdata_occ(occ, ids) + assert occ.size == 1, "Please choose one occ only !" + occ = occ[0] + indoc = np.nonzero(self._dids[ids]['occ'] == occ)[0][0] - lS = [None for _ in units] - kwargs = dict(Exp=Exp, Type='Tor') - for ii in range(0,nunits): - poly = np.array([units[ii].outline.r, units[ii].outline.z]) + if description_2d is None: + if len(self._dids[ids]['ids'][indoc].description_2d) >= 1: + description_2d = 1 + else: + description_2d = 0 + + wall = self._dids[ids]['ids'][indoc].description_2d[description_2d] + kwargs = dict(Exp=Exp, Type='Tor') + + import tofu.geom as mod + + # Get vessel + nlim = len(wall.limiter.unit) + nmob = len(wall.mobile.unit) + # onelimonly = False + + # ---------------------------------- + # Relevant only if vessel is filled + # try: + # if len(wall.vessel.unit) != 1: + # msg = "There is no / several vessel.unit!" + # raise Exception(msg) + # if len(wall.vessel.unit[0].element) != 1: + # msg = "There is no / several vessel.unit[0].element!" + # raise Exception(msg) + # if len(wall.vessel.unit[0].element[0].outline.r) < 3: + # msg = "wall.vessel polygon has less than 3 points!" + # raise Exception(msg) + # name = wall.vessel.unit[0].element[0].name + # poly = np.array([wall.vessel.unit[0].element[0].outline.r, + # wall.vessel.unit[0].element[0].outline.z]) + # except Exception as err: + # # If vessel not in vessel, sometimes stored a a single limiter + # if nlim == 1: + # name = wall.limiter.unit[0].name + # poly = np.array([wall.limiter.unit[0].outline.r, + # wall.limiter.unit[0].outline.z]) + # onelimonly = True + # else: + # msg = ("There does not seem to be any vessel, " + # + "not in wall.vessel nor in wall.limiter!") + # raise Exception(msg) + # cls = None + # if name == '': + # name = 'ImasVessel' + # if '_' in name: + # ln = name.split('_') + # if len(ln) == 2: + # cls, name = ln + # else: + # name = name.replace('_', '') + # if cls is None: + # cls = 'Ves' + # assert cls in ['Ves', 'PlasmaDomain'] + # ves = getattr(mod, cls)(Poly=poly, Name=name, **kwargs) + + # Determine if mobile or not + lS = [] + # if onelimonly is False: + if mobile is None: + if nlim == 0 and nmob > 0: + mobile = True + elif nmob == 0 and nlim > 0: + mobile = False + elif nmob == nlim: + msgw = 'wall.description_2[{}]'.format(description_2d) + msg = ("\nids wall has same number of limiter / mobile units\n" + + "\t- len({}.limiter.unit) = {}\n".format(msgw, nlim) + + "\t- len({}.mobile.unit) = {}\n".format(msgw, nmob) + + " => Choosing limiter by default") + warnings.warn(msg) + mobile = False + else: + msgw = 'wall.description_2[{}]'.format(description_2d) + msg = ("Can't decide automatically whether to choose" + + " limiter or mobile!\n" + + "\t- len({}.limiter.unit) = {}\n".format(msgw, nlim) + + "\t- len({}.mobile.unit) = {}".format(msgw, nmob)) + raise Exception(msg) + assert isinstance(mobile, bool) + + # Get PFC + if mobile is True: + units = wall.mobile.unit + else: + units = wall.limiter.unit + nunits = len(units) + + if nunits == 0: + msg = ("There is no unit stored !\n" + + "The required 2d description is empty:\n") + ms = "len(idd.{}[occ={}].description_2d".format(ids, occ) + msg += "{}[{}].limiter.unit) = 0".format(ms, + description_2d) + raise Exception(msg) + + lS = [None for _ in units] + for ii in range(0, nunits): + try: + if mobile is True: + outline = units[ii].outline[0] + else: + outline = units[ii].outline + poly = np.array([outline.r, outline.z]) if units[ii].phi_extensions.size > 0: - pos, extent = units[ii].phi_extensions.T + pos, extent = units[ii].phi_extensions.T else: pos, extent = None, None name = units[ii].name - cls = None + cls, mobi = None, None if name == '': name = 'unit{:02.0f}'.format(ii) if '_' in name: ln = name.split('_') if len(ln) == 2: cls, name = ln + elif len(ln) == 3: + cls, name, mobi = ln else: - name = name.replace('_','') + name = name.replace('_', '') if cls is None: - if ii == nunits-1: + if ii == nunits - 1: cls = 'Ves' else: cls = 'PFC' - lS[ii] = getattr(mod,cls)(Poly=poly, pos=pos, extent=extent, - Name=name, **kwargs) + mobi = mobi == 'mobile' + lS[ii] = getattr(mod, cls)(Poly=poly, pos=pos, + extent=extent, + Name=name, mobile=mobi, + **kwargs) + except Exception as err: + msg = ("PFC unit[{}] named {} ".format(ii, name) + + "could not be loaded!\n" + + str(err)) + raise Exception(msg) + + if Name is None: + Name = wall.type.name + if Name == '': + Name = 'imas wall' - config = mod.Config(lStruct=lS, Name=Name, **kwargs) + config = mod.Config(lStruct=lS, Name=Name, **kwargs) # Output if plot: @@ -2144,7 +2491,8 @@ def _checkformat_Plasma2D_dsig(self, dsig=None): lidsok = set(self._lidsplasma).intersection(self._dids.keys()) lscom = ['t'] - lsmesh = ['2dmeshNodes','2dmeshFaces'] + lsmesh = ['2dmeshNodes', '2dmeshFaces', + '2dmeshR', '2dmeshZ'] lc = [dsig is None, type(dsig) is str, @@ -2333,7 +2681,8 @@ def _checkformat_mesh_Rect(R, Z, datashape=None, assert R.ndim in [1, 2] assert Z.ndim in [1, 2] shapeu = np.unique(np.r_[R.shape, Z.shape]) - shapeRZ = [None, None] + if shapeRZ is None: + shapeRZ = [None, None] if R.ndim == 1: assert np.all(np.diff(R) > 0.) else: @@ -2370,37 +2719,25 @@ def _checkformat_mesh_Rect(R, Z, datashape=None, if datashape is not None: if None in shapeRZ: pass + shapeRZ = tuple(shapeRZ) - if shapeRZ == ['R', 'Z']: + if shapeRZ == ('R', 'Z'): assert datashape == (R.size, Z.size) - elif shapeRZ == ['Z', 'R']: + elif shapeRZ == ('Z', 'R'): assert datashape == (Z.size, R.size) else: msg = "Inconsistent data shape !" raise Exception(msg) - shapeRZ = tuple(shapeRZ) - assert shapeRZ in [('R', 'Z'), ('Z', 'R')] + if None not in shapeRZ: + shapeRZ = tuple(shapeRZ) + assert shapeRZ in [('R', 'Z'), ('Z', 'R')] return R, Z, shapeRZ, 0 - - - - - - - - - # TBF def get_mesh_from_ggd(path_to_ggd, ggdindex=0): pass - - - - - def _get_dextra(self, dextra=None, fordata=False, nan=True, pos=None): lc = [dextra == False, dextra is None, type(dextra) is str, type(dextra) is list, type(dextra) is dict] @@ -2522,8 +2859,80 @@ def _get_dextra(self, dextra=None, fordata=False, nan=True, pos=None): def to_Plasma2D(self, tlim=None, dsig=None, t0=None, Name=None, occ=None, config=None, out=object, + description_2d=None, plot=None, plot_sig=None, plot_X=None, bck=True, dextra=None, nan=True, pos=None, shapeRZ=None): + """ Export the content of some ids as a tofu Plasma2D object + + Some ids typically contain plasma 1d (radial) or 2d (mesh) profiles + They include for example ids: + - core_profiles + - core_sources + - edge_profiles + - edge_sources + - equilibrium + + tofu offers a class for handling multiple profiles characterizing a + plasma, it's called Plasma2D + This method automatically identifies the ids that may contain profiles, + extract all profiles (i.e.: all profiles identified by a shortcut, see + self.get_shortcuts()) and export everything to a fresh Plasma2D + instance. + + Parameters + ---------- + tlim: None / list + Restrict the loaded data to a time interval with tlim + if None, loads all time steps + dsig: None / dict + Specify exactly which data (shortcut) should be loaded by ids + If None, loads all available data + t0: None / float / str + Specify a time to be used as origin: + - None: absolute time vectors are untouched + - float : the roigin of all time vectors is set to t0 + - str : the origin is taken from an event in ids pulse_schedule + Name: None / str + Name to be given to the instance + If None, a default Name is built + occ: None / int + occurence to be used for loading the data + config: None / Config + Configuration (i.e.: tokamak geometry) to be used for the instance + If None, created from the wall ids with self.to_Config(). + out: type + class with which the output shall be returned + - object : as a Plasma2D instance + - dict: as a dict + description_2d: None / int + description_2d index to be used if the Config is to be built from + wall ids. See self.to_Config() + plot: None / bool + Flag whether to plot the result + plot_sig: None / str + shortcut of the signal to be plotted, if any + plot_X: None / str + shortcut of the abscissa against which to plot the signal, if any + bck: bool + Flag indicating whether to plot the grey envelop of the signal as a + background, if plot is True + dextra: None / dict + dict of extra signals (time traces) to be plotted, for context + shapeRZ: None / tuple + If provided, tuple indicating the order of 2d data arrays + associated to rectangular meshes + Only necessary when shape cannot be infered from data shape + - ('R', 'Z'): first dimension is R, second Z + - ('Z', 'R'): the other way around + + Args nan and pos are fed to self.get_data() + + Return + ------ + plasma: dict / Plasma2D + dict or Plasma2D instance depending on out + + """ # dsig dsig = self._checkformat_Plasma2D_dsig(dsig) @@ -2579,7 +2988,8 @@ def to_Plasma2D(self, tlim=None, dsig=None, t0=None, # config if config is None: - config = self.to_Config(Name=Name, occ=occ, plot=False) + config = self.to_Config(Name=Name, occ=occ, + description_2d=description_2d, plot=False) # dextra d0d, dtime0 = self._get_dextra(dextra) @@ -2685,7 +3095,7 @@ def to_Plasma2D(self, tlim=None, dsig=None, t0=None, npts, datashape = None, None keym = '{}.mesh'.format(ids) if cmesh else None for ss in set(out_.keys()).difference(lsigmesh): - assert out_[ss].ndim in [1,2] + assert out_[ss].ndim in [1, 2, 3] if out_[ss].ndim == 1: out_[ss] = np.atleast_2d(out_[ss]) shape = out_[ss].shape @@ -2708,10 +3118,10 @@ def to_Plasma2D(self, tlim=None, dsig=None, t0=None, shape = out_[ss].shape if len(shape) == 3: assert nt == shape[0] - datashape = tuple(shape[1], shape[2]) + datashape = (shape[1], shape[2]) if shapeRZ is None: - msg = ("Please provide shapeRZ" - + "indexing is ambiguous") + msg = ("Please provide shapeRZ," + + " indexing is ambiguous") raise Exception(msg) size = shape[1]*shape[2] if shapeRZ == ('R', 'Z'): @@ -2832,13 +3242,129 @@ def _checkformat_Cam_geom(self, ids=None, geomcls=None, indch=None): lgeom = [kk for kk in dir(tfg) if 'Cam' in kk] if geomcls not in [False] + lgeom: - msg = "Arg geomcls must be in %s"%str([False]+lgeom) + msg = "Arg geomcls must be in {}".format([False]+lgeom) raise Exception(msg) return geomcls + def _get_indch_geomtdata(self, indch=None, indch_auto=None, + dgeom=None, t=None, + ids=None, out=None, dsig=None, kk=None): + nch = 0 if indch is None else len(indch) + # Get from geometry of LOS consistency + if dgeom is not None: + indnan = np.logical_or(np.any(np.isnan(dgeom[0]), axis=0), + np.any(np.isnan(dgeom[1]), axis=0)) + if np.any(indnan) and not np.all(indnan): + if indch_auto is not True: + dmsg = {True: 'not available', False: 'ok'} + ls = ['index {} los {}'.format(ii, dmsg[indnan[ii]]) + for ii in range(0, dgeom[0].shape[1])] + msg = ("The geometry of all channels is not available !\n" + + "Please choose indch to get all LOS!\n" + + "Currently:\n" + + "\n ".join(ls) + + "\n\n => Solution: choose indch accordingly !") + raise Exception(msg) + else: + msg = ("Geometry missing for some los !\n" + + " => indch automatically set to:\n" + + " {}".format(indch)) + warnings.warn(msg) + if indch is None: + indch = (~indnan).nonzero()[0] + else: + indch = set(indch).intersection((~indnan).nonzero()[0]) + indch = np.array(indch, dtype=int) + + # Get from time vectors consistency + if t is not None: + if indch_auto is not True: + if indch is None: + ls = [ + 'index {} {}.shape {}'.format(ii, kk, + out[dsig[kk]][ii].shape) + for ii in range(0, len(out[dsig[kk]])) + ] + else: + ls = [ + 'index {} {}.shape {}'.format(indch[ii], kk, + out[dsig[kk]][ii].shape) + for ii in range(0, len(out[dsig[kk]])) + ] + msg = ("The following is supposed to be a np.ndarray:\n" + + " - diag: {}\n".format(ids) + + " - shortcut: {}\n".format(dsig[kk]) + + " - used as: {} input\n".format(kk) + + " Observed type: {}\n".format(type(out[dsig[kk]])) + + " Probable cause: non-uniform shape (vs channels)\n" + + " => shapes :\n " + + "\n ".join(ls) + + "\n => Solution: choose indch accordingly !") + raise Exception(msg) + ls = [t[ii].shape for ii in range(0, len(t))] + lsu = list(set([ssu for ssu in ls if 0 not in ssu])) + su = lsu[np.argmax([ls.count(ssu) for ssu in lsu])] + msg = ("indch set automatically for {}\n".format(ids) + + " (due to inhomogenous time shapes)\n" + + " - main shape: {}\n".format(su) + + " - nb. chan. selected: {}\n".format(len(indch)) + + " - indch: {}".format(indch)) + warnings.warn(msg) + + if indch is None: + indch = [ii for ii in range(0, len(t)) if ls[ii] == su] + else: + indch = [indch[ii] for ii in range(0, len(indch)) + if ls[indch[ii]] == su] + + # Get from data consistency + if all([ss is not None for ss in [out, kk, dsig]]): + if indch_auto: + ls = [out[dsig[kk]][ii].shape + for ii in range(0, len(out[dsig[kk]]))] + lsu = list(set([ssu for ssu in ls if 0 not in ssu])) + su = lsu[np.argmax([ls.count(ssu) for ssu in lsu])] + if indch is None: + indch = [ii for ii in range(0, len(out[dsig[kk]])) + if ls[ii] == su] + else: + indch = [indch[ii] for ii in range(0, len(out[dsig[kk]])) + if ls[ii] == su] + msg = ("indch set automatically for {}\n".format(ids) + + " (due to inhomogeneous data shapes)\n" + + " - main shape: {}\n".format(su) + + " - nb. chan. selected: {}\n".format(len(indch)) + + " - indch: {}".format(indch)) + warnings.warn(msg) + else: + if indch is None: + ls = [ + 'index {} {}.shape {}'.format(ii, kk, + out[dsig[kk]][ii].shape) + for ii in range(0, len(out[dsig[kk]])) + ] + else: + ls = [ + 'index {} {}.shape {}'.format(indch[ii], kk, + out[dsig[kk]][ii].shape) + for ii in range(0, len(out[dsig[kk]])) + ] + msg = ("The following is supposed to be a np.ndarray:\n" + + " - diag: {}\n".format(ids) + + " - shortcut: {}\n".format(dsig[kk]) + + " - used as: {} input\n".format(kk) + + " Observed type: {}\n".format(type(out[dsig[kk]])) + + " Probable cause: non-uniform shape (vs channels)\n" + + " => shapes :\n " + + "\n ".join(ls) + + "\n => Solution: choose indch accordingly !") + raise Exception(msg) + nchout = 0 if indch is None else len(indch) + return indch, nchout != nch + def _to_Cam_Du(self, ids, lk, indch, nan=None, pos=None): - Etendues, Surfaces = None, None + Etendues, Surfaces, names = None, None, None out = self.get_data(ids, sig=list(lk), indch=indch, nan=nan, pos=pos) if 'los_ptsRZPhi' in out.keys() and out['los_ptsRZPhi'].size > 0: @@ -2863,12 +3389,70 @@ def _to_Cam_Du(self, ids, lk, indch, nan=None, pos=None): Etendues = out['etendue'] if 'surface' in out.keys() and len(out['surface']) > 0: Surfaces = out['surface'] - return dgeom, Etendues, Surfaces + if 'names' in out.keys() and len(out['names']) > 0: + names = out['names'] + return dgeom, Etendues, Surfaces, names def to_Cam(self, ids=None, indch=None, indch_auto=False, - Name=None, occ=None, config=None, plot=True, nan=True, pos=None): + description_2d=None, + Name=None, occ=None, config=None, + plot=True, nan=True, pos=None): + """ Export the content of a diagnostic ids as a tofu CamLos1D instance + + Some ids contain the geometry of a diagnostics + They typically have a 'channels' field + Generally in the form of a set of Lines of Sights (LOS) + They include for example ids: + - interferometer + - polarimeter + - bolometer + - soft_x_rays + - bremsstrahlung_visible + - spectrometer_visible + + tofu offers a class for handling sets fo LOS as a camera: CamLOS1D + This method extracts the geometry of the desired diagnostic (ids) and + exports it as a CamLOS1D instance. + + Parameters + ---------- + ids: None / str + Specify the ids (will be checked against known diagnostics ids) + Should have a 'channels' field + If None and a unique diagnostic ids has been added, set to this one + Name: None / str + Name to be given to the instance + If None, a default Name is built + occ: None / int + occurence to be used for loading the data + indch: None / list / array + If provided, array of int indices specifying which channels shall + be loaded (fed to self.get_data()) + indch_auto: bool + If True and indch is not provided, will try to guess which channels + can be loaded. If possible all channels are loaded by default, but + only if they have uniform data (same shape, i.e.: same time + vectors). In case of channels with non-uniform data, will try to + identify a sub-group of channels with uniform data + config: None / Config + Configuration (i.e.: tokamak geometry) to be used for the instance + If None, created from the wall ids with self.to_Config(). + description_2d: None / int + description_2d index to be used if the Config is to be built from + wall ids. See self.to_Config() + plot: None / bool + Flag whether to plot the result + + Args nan and pos are fed to self.get_data() + + Return + ------ + cam: CamLOS1D + CamLOS1D instance + + """ # dsig geom = self._checkformat_Cam_geom(ids) @@ -2884,13 +3468,13 @@ def to_Cam(self, ids=None, indch=None, indch_auto=False, # config if config is None: - config = self.to_Config(Name=Name, occ=occ, plot=False) + config = self.to_Config(Name=Name, occ=occ, + description_2d=description_2d, plot=False) # dchans + dchans = {} if indch is not None: - dchans = {'ind':indch} - else: - dchans = None + dchans['ind'] = indch # cam cam = None @@ -2901,35 +3485,26 @@ def to_Cam(self, ids=None, indch=None, indch_auto=False, raise Exception(msg) if 'LOS' in geom: - lk = ['los_ptsRZPhi','etendue','surface'] + lk = ['los_ptsRZPhi', 'etendue', 'surface', 'names'] lkok = set(self._dshort[ids].keys()) lkok = lkok.union(self._dcomp[ids].keys()) lk = list(set(lk).intersection(lkok)) - dgeom, Etendues, Surfaces = self._to_Cam_Du(ids, lk, indch, - nan=nan, pos=pos) - - indnan = np.logical_or(np.any(np.isnan(dgeom[0]),axis=0), - np.any(np.isnan(dgeom[1]),axis=0)) - if np.any(indnan) and not np.all(indnan): - indch_sug = (~indnan).nonzero()[0] - if indch_auto != True: - dmsg = {True: 'not available', False:'ok'} - msg = "The geometry of all channels is not available !\n" - msg += "Please choose indch to get all channels geomery !\n" - msg += "Currently:\n" - ls = ['index %s los %s'%(ii,dmsg[indnan[ii]]) - for ii in range(0,dgeom[0].shape[1])] - msg += "\n ".join(ls) - msg += "\n\n => Solution: choose indch accordingly !" - raise Exception(msg) - else: - indch = indch_sug - dgeom, Etendues, Surfaces = self._to_Cam_Du(ids, lk, indch, - nan=nan, pos=pos) - msg = "Geometry missing for some los !\n" - msg += " => indch automatically set to:\n" - msg += " %s"%str(indch) - warnings.warn(msg) + dgeom, Etendues, Surfaces, names = self._to_Cam_Du(ids, lk, indch, + nan=nan, + pos=pos) + + # Check all channels can be used, reset indch if necessary + indch, modif = self._get_indch_geomtdata(indch=indch, + indch_auto=indch_auto, + dgeom=dgeom) + if modif is True: + dgeom, Etendues, Surfaces, names = self._to_Cam_Du(ids, lk, + indch, + nan=nan, + pos=pos) + + if names is not None: + dchans['names'] = names import tofu.geom as tfg cam = getattr(tfg, geom)(dgeom=dgeom, config=config, @@ -3008,9 +3583,113 @@ def _checkformat_Data_dsig(self, ids=None, dsig=None, data=None, X=None, def to_Data(self, ids=None, dsig=None, data=None, X=None, tlim=None, - indch=None, indch_auto=False, Name=None, occ=None, config=None, + indch=None, indch_auto=False, Name=None, occ=None, + config=None, description_2d=None, dextra=None, t0=None, datacls=None, geomcls=None, - plot=True, bck=True, fallback_X=None, nan=True, pos=None): + plot=True, bck=True, fallback_X=None, nan=True, pos=None, + return_indch=False): + """ Export the content of a diagnostic ids as a tofu DataCam1D instance + + Some ids contain the geometry and data of a diagnostics + They typically have a 'channels' field + They include for example ids: + - interferometer + - polarimeter + - bolometer + - soft_x_rays + - bremsstrahlung_visible + - spectrometer_visible + - reflectometer_profile + - ece + - magnetics + - barometry + - neutron_diagnostics + + tofu offers a class for handling data: DataCam1D + If available, this method also loads the geometry using self.to_Cam() + on the same ids. + But it will load the data even if no geometry (LOS) is available. + This method extracts the data of the desired diagnostic (ids) and + exports it as a DataCam1D instance. + + Parameters + ---------- + ids: None / str + Specify the ids (will be checked against known diagnostics ids) + Should have a 'channels' field + If None and a unique diagnostic ids has been added, set to this one + Name: None / str + Name to be given to the instance + If None, a default Name is built + occ: None / int + occurence to be used for loading the data + indch: None / list / array + If provided, array of int indices specifying which channels shall + be loaded (fed to self.get_data()) + indch_auto: bool + If True and indch is not provided, will try to guess which channels + can be loaded. If possible all channels are loaded by default, but + only if they have uniform data (same shape, i.e.: same time + vectors). In case of channels with non-uniform data, will try to + identify a sub-group of channels with uniform data + dsig: None / dict + Specify exactly which data (shortcut) should be loaded by ids + If None, loads all available data + data: None / str + If dsig is not provided, specify the shortcut of the data to be + loaded (from channels) + X: None / str + If dsig is not provided, specify the shortcut of the data to be + used as abscissa + tlim: None / list + Restrict the loaded data to a time interval with tlim + if None, loads all time steps + config: None / Config + Configuration (i.e.: tokamak geometry) to be used for the instance + If None, created from the wall ids with self.to_Config(). + description_2d: None / int + description_2d index to be used if the Config is to be built from + wall ids. See self.to_Config() + dextra: None / dict + dict of extra signals (time traces) to be plotted, for context + t0: None / float / str + Specify a time to be used as origin: + - None: absolute time vectors are untouched + - float : the roigin of all time vectors is set to t0 + - str : the origin is taken from an event in ids pulse_schedule + datacls: None / str + tofu calss to be used for the data + - None : determined from tabulated info (self._didsdiag[ids]) + - str : should be a valid data class name from tofu.data + geomcls: None / False / str + tofu class to be used for the geometry + - False: geometry not loaded + - None : determined from tabulated info (self._didsdiag[ids]) + - str : should be a valid camera class name from tofu.geom + fallback_X: None / float + fallback value for X when X is nan + X[np.isnan(X)] = fallback_X + If None, set to 1.1*np.nanmax(X) + + return_indch: bool + Flag indicating whether to return also the indch + Useful if indch was determined automatically by indch_auto + plot: None / bool + Flag whether to plot the result + bck: bool + Flag indicating whether to plot the grey envelop of the signal as a + background, if plot is True + + Args nan and pos are fed to self.get_data() + + Return + ------ + data: DataCam1D + DataCam1D instance + indch: np.ndarray + int array of indices of the loaded channels, returned only if + return_indch = True + """ # dsig datacls, geomcls, dsig = self._checkformat_Data_dsig(ids, dsig, @@ -3029,7 +3708,8 @@ def to_Data(self, ids=None, dsig=None, data=None, X=None, tlim=None, # config if config is None: - config = self.to_Config(Name=Name, occ=occ, plot=False) + config = self.to_Config(Name=Name, occ=occ, + description_2d=description_2d, plot=False) # dchans if indch is not None: @@ -3037,63 +3717,29 @@ def to_Data(self, ids=None, dsig=None, data=None, X=None, tlim=None, else: dchans = None - # cam + # ----------- + # Get geom cam = None indchanstr = self._dshort[ids][dsig['data']]['str'].index('[chan]') chanstr = self._dshort[ids][dsig['data']]['str'][:indchanstr] nchMax = len(getattr(self._dids[ids]['ids'][0], chanstr)) + dgeom = None if geomcls != False: Etendues, Surfaces = None, None if config is None: msg = "A config must be provided to compute the geometry !" raise Exception(msg) - dgeom = None if 'LOS' in geomcls: - lk = ['los_ptsRZPhi','etendue','surface'] + lk_geom = ['los_ptsRZPhi', 'etendue', 'surface'] lkok = set(self._dshort[ids].keys()) lkok = lkok.union(self._dcomp[ids].keys()) - lk = list(set(lk).intersection(lkok)) - dgeom, Etendues, Surfaces = self._to_Cam_Du(ids, lk, indch, - nan=nan, pos=pos) - - indnan = np.logical_or(np.any(np.isnan(dgeom[0]),axis=0), - np.any(np.isnan(dgeom[1]),axis=0)) - if np.any(indnan) and not np.all(indnan): - indch_sug = (~indnan).nonzero()[0] - if indch_auto != True: - dmsg = {True: 'not available', False:'ok'} - msg = "The geometry of all channels is not available !\n" - msg += "Please de-activate geometry loading (geomcls=False)\n" - msg += " or choose indch to get all channels geometry !\n" - msg += "Currently:\n" - ls = ['index %s los %s'%(ii,dmsg[indnan[ii]]) - for ii in range(0,dgeom[0].shape[1])] - msg += "\n ".join(ls) - msg += "\n\n => Solution: choose indch accordingly !" - msg += " Suggested indch (los %s):\n"%dmsg[True] - msg += " %s"%str(indch_sug) - raise Exception(msg) - else: - indch = indch_sug - dgeom, Etendues, Surfaces = self._to_Cam_Du(ids, lk, indch, - nan=nan, pos=pos) - msg = "Geometry missing for some los !\n" - msg += " => indch automatically set to:\n" - msg += " %s"%str(indch) - warnings.warn(msg) - - if dgeom is not None: - import tofu.geom as tfg - cam = getattr(tfg, geomcls)(dgeom=dgeom, config=config, - Etendues=Etendues, Surfaces=Surfaces, - Name=Name, Diag=ids, Exp=Exp, - dchans=dchans) - cam.Id.set_dUSR( {'imas-nchMax': nchMax} ) - + lk_geom = list(set(lk_geom).intersection(lkok)) + dgeom, Etendues, Surfaces, names = self._to_Cam_Du( + ids, lk_geom, indch, nan=nan, pos=pos) - # ----------------------- - # data + # ---------- + # Get time lk = sorted(dsig.keys()) dins = dict.fromkeys(lk) t = self.get_data(ids, sig=dsig.get('t', 't'), indch=indch)['t'] @@ -3103,89 +3749,47 @@ def to_Data(self, ids=None, dsig=None, data=None, X=None, tlim=None, msg += " - 't' = %s"%str(t) raise Exception(msg) + # ----------- + # Check indch if type(t) is list: - if indch_auto == True: - ls = [t[ii].shape for ii in range(0,len(t))] - lsu = list(set([ssu for ssu in ls if 0 not in ssu])) - su = lsu[np.argmax([ls.count(ssu) for ssu in lsu])] - if indch is None: - indch = [ii for ii in range(0,len(t)) if ls[ii] == su] - else: - indchcam = [ii for ii in range(0,len(t)) if ls[ii] == su] - indch = [indch[ii] for ii in range(0,len(t)) if ls[ii] == su] - t = self.get_data(ids, sig='t', indch=indch)['t'] - if cam is not None: - cam = cam.get_subset(indch=indchcam) - msg = "indch set automatically for %s\n"%ids - msg += " (due to inhomogenous time shapes)\n" - msg += " - main shape: %s\n"%str(su) - msg += " - nb. chan. selected: %s\n"%len(indch) - msg += " - indch: %s"%str(indch) - warnings.warn(msg) - - else: - msg = "The time vector does not seem to be homogeneous !\n" - msg += "Please choose indch such that all channels have same t !\n" - msg += "Currently:\n" - if indch is None: - ls = ['index %s t.shape %s'%(ii,str(t[ii].shape)) - for ii in range(0,len(t))] - else: - ls = ['index %s t.shape %s'%(indch[ii],str(t[ii].shape)) - for ii in range(0,len(t))] - msg += "\n ".join(ls) - msg += "\n => Solution: choose indch accordingly !" - raise Exception(msg) + indch, modif = self._get_indch_geomtdata(indch=indch, + indch_auto=indch_auto, + dgeom=dgeom, t=t) + assert modif is True + else: + indch, modif = self._get_indch_geomtdata(indch=indch, + indch_auto=indch_auto, + dgeom=dgeom) + if modif is True: + if geomcls is not False: + dgeom, Etendues, Surfaces, names = self._to_Cam_Du( + ids, lk_geom, indch, nan=nan, pos=pos) + t = self.get_data(ids, sig='t', indch=indch)['t'] + modif = False + + if names is not None: + dchans['names'] = names if t.ndim == 2: - assert np.all(np.isclose(t, t[0:1,:])) - t = t[0,:] + assert np.all(np.isclose(t, t[0:1, :])) + t = t[0, :] dins['t'] = t indt = self._checkformat_tlim(t, tlim=tlim)['indt'] + # ----------- + # Get data out = self.get_data(ids, sig=[dsig[k] for k in lk], indt=indt, indch=indch, nan=nan, pos=pos) for kk in set(lk).difference('t'): if not isinstance(out[dsig[kk]], np.ndarray): - if indch_auto: - ls = [out[dsig[kk]][ii].shape - for ii in range(0,len(out[dsig[kk]]))] - lsu = list(set([ssu for ssu in ls if 0 not in ssu])) - su = lsu[np.argmax([ls.count(ssu) for ssu in lsu])] - if indch is None: - indch = [ii for ii in range(0,len(out[dsig[kk]])) - if ls[ii] == su] - else: - indch = [indch[ii] for ii in range(0,len(out[dsig[kk]])) - if ls[ii] == su] + indch, modifk = self._get_indch_geomtdata( + indch=indch, indch_auto=indch_auto, + out=out, dsig=dsig, kk=kk) + if modifk is True: out = self.get_data(ids, sig=[dsig[k] for k in lk], - indt=indt, indch=indch, nan=nan, - pos=pos) - if cam is not None: - cam = cam.get_subset(indch=indch) - msg = "indch set automatically for %s\n"%ids - msg += " (due to inhomogeneous data shapes)\n" - msg += " - main shape: %s\n"%str(su) - msg += " - nb. chan. selected: %s\n"%len(indch) - msg += " - indch: %s"%str(indch) - warnings.warn(msg) - else: - msg = "The following is supposed to be a np.ndarray:\n" - msg += " - diag: %s\n"%ids - msg += " - shortcut: %s\n"%dsig[kk] - msg += " - used as: %s input\n"%kk - msg += " Observed type: %s\n"%str(type(out[dsig[kk]])) - msg += " Probable cause: non-uniform shape (vs channels)\n" - msg += " => shapes :\n " - if indch is None: - ls = ['index %s %s.shape %s'%(ii,kk,str(out[dsig[kk]][ii].shape)) - for ii in range(0,len(out[dsig[kk]]))] - else: - ls = ['index %s %s.shape %s'%(indch[ii],kk,str(out[dsig[kk]][ii].shape)) - for ii in range(0,len(out[dsig[kk]]))] - msg += "\n ".join(ls) - msg += "\n => Solution: choose indch accordingly !" - raise Exception(msg) + indt=indt, indch=indch, + nan=nan, pos=pos) + modif = True # Arrange depending on shape and field if type(out[dsig[kk]]) is not np.ndarray: @@ -3194,7 +3798,7 @@ def to_Data(self, ids=None, dsig=None, data=None, X=None, tlim=None, ipdb.set_trace() raise Exception(msg) - assert out[dsig[kk]].ndim in [1,2,3] + assert out[dsig[kk]].ndim in [1, 2, 3] if out[dsig[kk]].ndim == 1: out[dsig[kk]] = np.atleast_2d(out[dsig[kk]]) @@ -3212,6 +3816,15 @@ def to_Data(self, ids=None, dsig=None, data=None, X=None, tlim=None, assert kk == 'data' dins[kk] = np.swapaxes(out[dsig[kk]].T, 1,2) + # Update dgeom if necessary + if modif is True and geomcls is not False: + dgeom, Etendues, Surfaces, names = self._to_Cam_Du( + ids, lk_geom, indch, + nan=nan, pos=pos) + modif = False + + # -------------------------- + # Format special ids cases if ids == 'reflectometer_profile': dins['X'] = np.fliplr(dins['X']) dins['data'] = np.fliplr(dins['data']) @@ -3226,7 +3839,6 @@ def to_Data(self, ids=None, dsig=None, data=None, X=None, tlim=None, fallback_X = 1.1*np.nanmax(dins['X']) dins['X'][np.isnan(dins['X'])] = fallback_X - # Apply indt if was not done in get_data for kk,vv in dins.items(): if (vv.ndim == 2 or kk == 't') and vv.shape[0] > indt.size: @@ -3252,17 +3864,29 @@ def to_Data(self, ids=None, dsig=None, data=None, X=None, tlim=None, for tt in dextra.keys(): dextra[tt]['t'] = dextra[tt]['t'] - t0 + # -------------- + # Create objects + if geomcls is not False and dgeom is not None: + import tofu.geom as tfg + cam = getattr(tfg, geomcls)(dgeom=dgeom, config=config, + Etendues=Etendues, Surfaces=Surfaces, + Name=Name, Diag=ids, Exp=Exp, + dchans=dchans) + cam.Id.set_dUSR({'imas-nchMax': nchMax}) + import tofu.data as tfd conf = None if cam is not None else config Data = getattr(tfd, datacls)(Name=Name, Diag=ids, Exp=Exp, shot=shot, lCam=cam, config=conf, dextra=dextra, dchans=dchans, **dins) - Data.Id.set_dUSR( {'imas-nchMax': nchMax} ) if plot: Data.plot(draw=True, bck=bck) - return Data + if return_indch is True: + return Data, indch + else: + return Data def _get_synth(self, ids, dsig=None, @@ -3332,16 +3956,115 @@ def _get_synth(self, ids, dsig=None, def calc_signal(self, ids=None, dsig=None, tlim=None, t=None, res=None, quant=None, ref1d=None, ref2d=None, q2dR=None, q2dPhi=None, q2dZ=None, - Brightness=None, interp_t=None, + Brightness=None, interp_t=None, newcalc=True, indch=None, indch_auto=False, Name=None, - occ_cam=None, occ_plasma=None, config=None, + occ_cam=None, occ_plasma=None, + config=None, description_2d=None, dextra=None, t0=None, datacls=None, geomcls=None, bck=True, fallback_X=None, nan=True, pos=None, plot=True, plot_compare=None, plot_plasma=None): + """ Compute synthetic data for a diagnostics and export as DataCam1D + + Some ids typically contain plasma 1d (radial) or 2d (mesh) profiles + They include for example ids: + - core_profiles + - core_sources + - edge_profiles + - edge_sources + - equilibrium + + From these profiles, tofu can computed syntheic data for a diagnostic + ids which provides a geometry (channels.line_of_sight). + tofu extracts the geometry, and integrates the desired profile along + the lines of sight (LOS), using 2D interpolation when necessary + + It requires: + - a diagnostic ids with geometry (LOS) + - an ids containing the 1d or 2d profile to be integrated + - if necessary, an intermediate ids to interpolate the 1d profile + to 2d (e.g.: equilibrium) + + For each ids, you need to specify: + - profile ids: + profile (signal) to be integrated + quantity to be used for 1d interpolation + - equilibrium / intermediate ids: + quantity to be used for 2d interpolation + (shall be the same dimension as quantity for 1d interp.) + + This method is a combination of self.to_Plasma2D() (used for extracting + profiles and equilibrium and for interpolation) and self.to_Cam() (used + for extracting diagnostic geometry) and to_Data() (used for exportig + computed result as a tofu DataCam1D instance. + + Args ids, dsig, tlim, occ_plasma (occ), nan, pos, plot_plasma (plot) + are fed to to_Plasma2D() + Args indch, indch_auto, occ_cam (occ), config, description_2d, are fed + to to_Cam() + Args Name, bck, fallback_X, plot, t0, dextra are fed to to_Data() + + Parameters + ---------- + t: None / float / np.ndarray + time at which the synthetic signal shall be computed + If None, computed for all available time steps + res: None / float + absolute spatial resolution (sampling steps) used for Line-of-Sight + intergation (in meters) + quant: None / str + Shortcut of the quantity to be integrated + ref1d: None / str + Shortcut of the quantity to be used as reference for 1d + interpolation + ref2d: None / str + Shortcut of the quantity to be used as reference for 2d + interpolation + q2dR: None / str + If integrating an anisotropic vector field (e.g. magnetic field) + q2dR if the shortcut of the R-component of the quantity + q2dPhi: None / str + If integrating an anisotropic vector field (e.g. magnetic field) + q2dPhi if the shortcut of the Phi-component of the quantity + q2dR: None / str + If integrating an anisotropic vector field (e.g. magnetic field) + q2dZ if the shortcut of the Z-component of the quantity + Brightness: bool + Flag indicating whether the result shall be returned as a + Brightness (i.e.: line integral) or an incident flux (Brightness x + Etendue), which requires the Etendue + plot_compare: bool + Flag indicating whether to plot the experimental data against the + computed synthetic data + Return + ------ + sig: DataCam1D + DataCam1D instance + + """ + + # Check / format inputs + if plot is None: + plot = True + + if plot: + if plot_compare is None: + plot_compare = True + if plot_plasma is None: + plot_plasma = True + + # Get experimental data first if relevant + # to get correct indch for comparison + if plot and plot_compare: + data, indch = self.to_Data(ids, indch=indch, + indch_auto=indch_auto, t0=t0, + config=config, + description_2d=description_2d, + return_indch=True, plot=False) # Get camera cam = self.to_Cam(ids=ids, indch=indch, - Name=None, occ=occ_cam, config=config, + Name=None, occ=occ_cam, + config=config, description_2d=description_2d, plot=False, nan=True, pos=None) # Get relevant parameters @@ -3350,7 +4073,8 @@ def calc_signal(self, ids=None, dsig=None, tlim=None, t=None, res=None, # Get relevant plasma plasma = self.to_Plasma2D(tlim=tlim, dsig=dsig, t0=t0, - Name=None, occ=occ_plasma, config=cam.config, out=object, + Name=None, occ=occ_plasma, + config=cam.config, out=object, plot=False, dextra=dextra, nan=True, pos=None) # Intermediate computation if necessary @@ -3429,8 +4153,10 @@ def calc_signal(self, ids=None, dsig=None, tlim=None, t=None, res=None, # Calculate synthetic signal if Brightness is None: Brightness = self._didsdiag[ids]['synth'].get('Brightness', None) + dq['fill_value'] = 0. sig, units = cam.calc_signal_from_Plasma2D(plasma, res=res, t=t, Brightness=Brightness, + newcalc=newcalc, plot=False, **dq) sig._dextra = plasma.get_dextra(dextra) @@ -3438,16 +4164,35 @@ def calc_signal(self, ids=None, dsig=None, tlim=None, t=None, res=None, if ids == 'interferometer': sig = 2.*sig elif ids == 'polarimeter': - sig = 2.*sig + # For polarimeter, the vect is along the LOS + # it is not the direction of + sig = -2.*sig + + # Safety check regarding Brightness + _, _, dsig_exp = self._checkformat_Data_dsig(ids) + kdata = dsig_exp['data'] + B_exp = self._dshort[ids][kdata].get('Brightness', None) + err_comp = False + if Brightness != B_exp: + u_exp = self._dshort[ids][kdata].get('units') + msg = ("\nCalculated synthetic and chosen experimental data " + + "do not seem directly comparable !\n" + + "\t- chosen experimental data: " + + "{}, ({}), Brightness = {}\n".format(kdata, + u_exp, B_exp) + + "\t- calculated synthetic data: " + + "int({}), ({}), Brightness = {}\n".format(dq['quant'], + units, + Brightness) + + "\n => Consider changing data or Brigthness value") + err_comp = True + warnings.warn(msg) # plot if plot: - if plot_compare is None: - plot_compare = True - if plot_plasma is None: - plot_plasma = True if plot_compare: - data = self.to_Data(ids, indch=indch, t0=t0, plot=False) + if err_comp: + raise Exception(msg) sig._dlabels = data.dlabels data.plot_compare(sig) else: @@ -3468,7 +4213,7 @@ def calc_signal(self, ids=None, dsig=None, tlim=None, t=None, res=None, def load_Config(shot=None, run=None, user=None, tokamak=None, version=None, - Name=None, occ=0, indDescription=0, plot=True): + Name=None, occ=0, description_2d=None, plot=True): didd = MultiIDSLoader() didd.add_idd(shot=shot, run=run, @@ -3476,12 +4221,13 @@ def load_Config(shot=None, run=None, user=None, tokamak=None, version=None, didd.add_ids('wall', get=True) return didd.to_Config(Name=Name, occ=occ, - indDescription=indDescription, plot=plot) + description_2d=description_2d, plot=plot) # occ ? def load_Plasma2D(shot=None, run=None, user=None, tokamak=None, version=None, - tlim=None, occ=None, dsig=None, ids=None, config=None, + tlim=None, occ=None, dsig=None, ids=None, + config=None, description_2d=None, Name=None, t0=None, out=object, dextra=None, plot=None, plot_sig=None, plot_X=None, bck=True): @@ -3507,13 +4253,15 @@ def load_Plasma2D(shot=None, run=None, user=None, tokamak=None, version=None, didd.add_ids(ids=lids, get=True) return didd.to_Plasma2D(Name=Name, tlim=tlim, dsig=dsig, t0=t0, - occ=occ, config=config, out=out, + occ=occ, config=config, + description_2d=description_2d, out=out, plot=plot, plot_sig=plot_sig, plot_X=plot_X, bck=bcki, dextra=dextra) def load_Cam(shot=None, run=None, user=None, tokamak=None, version=None, - ids=None, indch=None, config=None, occ=None, Name=None, plot=True): + ids=None, indch=None, config=None, description_2d=None, + occ=None, Name=None, plot=True): didd = MultiIDSLoader() didd.add_idd(shot=shot, run=run, @@ -3528,13 +4276,15 @@ def load_Cam(shot=None, run=None, user=None, tokamak=None, version=None, didd.add_ids(ids=lids, get=True) return didd.to_Cam(ids=ids, Name=Name, indch=indch, - config=config, occ=occ, plot=plot) + config=config, description_2d=description_2d, + occ=occ, plot=plot) def load_Data(shot=None, run=None, user=None, tokamak=None, version=None, ids=None, datacls=None, geomcls=None, indch_auto=True, tlim=None, dsig=None, data=None, X=None, indch=None, - config=None, occ=None, Name=None, dextra=None, + config=None, description_2d=None, + occ=None, Name=None, dextra=None, t0=None, plot=True, bck=True): didd = MultiIDSLoader() @@ -3557,7 +4307,8 @@ def load_Data(shot=None, run=None, user=None, tokamak=None, version=None, return didd.to_Data(ids=ids, Name=Name, tlim=tlim, t0=t0, datacls=datacls, geomcls=geomcls, dsig=dsig, data=data, X=X, indch=indch, - config=config, occ=occ, dextra=dextra, + config=config, description_2d=description_2d, + occ=occ, dextra=dextra, plot=plot, bck=bck, indch_auto=indch_auto) @@ -3686,10 +4437,10 @@ def _save_to_imas(obj, shot=None, run=None, refshot=None, refrun=None, occ=None, user=None, tokamak=None, version=None, dryrun=False, tfversion=None, verb=True, **kwdargs): - dfunc = {'Struct':_save_to_imas_Struct, - 'Config':_save_to_imas_Config, - 'CamLOS1D':_save_to_imas_CamLOS1D, - 'DataCam1D':_save_to_imas_DataCam1D} + dfunc = {'Struct': _save_to_imas_Struct, + 'Config': _save_to_imas_Config, + 'CamLOS1D': _save_to_imas_CamLOS1D, + 'DataCam1D': _save_to_imas_DataCam1D} # Preliminary check on object class @@ -3741,14 +4492,23 @@ def _save_to_imas(obj, shot=None, run=None, refshot=None, refrun=None, # Class-specific functions #-------------------------------- -def _save_to_imas_Struct( obj, +def _save_to_imas_Struct(obj, shot=None, run=None, refshot=None, refrun=None, occ=None, user=None, tokamak=None, version=None, dryrun=False, tfversion=None, verb=True, - description_2d=0, unit=0): + description_2d=None, description_typeindex=None, + unit=None): if occ is None: occ = 0 + if description_2d is None: + description_2d = 0 + if description_typeindex is None: + description_typeindex = 2 + description_typeindex = int(description_typeindex) + if unit is None: + unit = 0 + # Create or open IDS # ------------------ idd, shotfile = _open_create_idd(shot=shot, run=run, @@ -3762,8 +4522,19 @@ def _save_to_imas_Struct( obj, # data # -------- idd.wall.description_2d.resize( description_2d + 1 ) - idd.wall.description_2d[description_2d].limiter.unit.resize(1) - node = idd.wall.description_2d[description_2d].limiter.unit[0] + idd.wall.description_2d[description_2d].type.index = ( + description_typeindex) + idd.wall.description_2d[description_2d].type.name = ( + '{}_{}'.format(obj.__class__.__name__, obj.Id.Name)) + idd.wall.description_2d[description_2d].type.description = ( + "tofu-generated wall. Each PFC is represented independently as a" + + " closed polygon in tofu, which saves them as disjoint PFCs") + if obj._dgeom['mobile'] is True: + idd.wall.description_2d[description_2d].mobile.unit.resize(unit+1) + node = idd.wall.description_2d[description_2d].mobile.unit[unit] + else: + idd.wall.description_2d[description_2d].limiter.unit.resize(unit+1) + node = idd.wall.description_2d[description_2d].limiter.unit[unit] node.outline.r = obj._dgeom['Poly'][0,:] node.outline.z = obj._dgeom['Poly'][1,:] if obj.noccur > 0: @@ -3793,14 +4564,17 @@ def _save_to_imas_Struct( obj, err=err0, dryrun=dryrun, verb=verb) -def _save_to_imas_Config( obj, idd=None, shotfile=None, +def _save_to_imas_Config(obj, idd=None, shotfile=None, shot=None, run=None, refshot=None, refrun=None, occ=None, user=None, tokamak=None, version=None, dryrun=False, tfversion=None, close=True, verb=True, - description_2d=None): + description_2d=None, description_typeindex=None): if occ is None: occ = 0 + if description_2d is None: + description_2d = 0 + # Create or open IDS # ------------------ if idd is None: @@ -3818,20 +4592,22 @@ def _save_to_imas_Config( obj, idd=None, shotfile=None, nS = len(lS) if len(lclsIn) != 1: - msg = "One StructIn subclass is allowed / necessary !" + msg = "One (and only one) StructIn subclass is allowed / necessary !" raise Exception(msg) - if description_2d is None: - if nS == 1 and lcls[0] in ['Ves','PlasmaDomain']: - description_2d = 0 + if description_typeindex is None: + if nS == 1 and lcls[0] in ['Ves', 'PlasmaDomain']: + description_typeindex = 0 else: - descrption_2d = 2 - assert description_2d in [0,2] + description_typeindex = 1 + assert description_typeindex in [0, 1] - # Make sure StructIn is last (IMAS requirement) - ind = lcls.index(lclsIn[0]) - lS[-1], lS[ind] = lS[ind], lS[-1] + # Check whether there is any mobile element + ismobile = any([ss._dgeom['mobile'] for ss in lS]) + # Isolate StructIn and take out from lS + ves = lS.pop(lcls.index(lclsIn[0])) + nS = len(lS) # Fill in data # ------------------ @@ -3839,23 +4615,83 @@ def _save_to_imas_Config( obj, idd=None, shotfile=None, # data # -------- idd.wall.description_2d.resize( description_2d + 1 ) - idd.wall.description_2d[description_2d].type.name = obj.Id.Name - idd.wall.description_2d[description_2d].limiter.unit.resize(nS) - for ii in range(0,nS): - node = idd.wall.description_2d[description_2d].limiter.unit[ii] - node.outline.r = lS[ii].Poly_closed[0,:] - node.outline.z = lS[ii].Poly_closed[1,:] - if lS[ii].noccur > 0: - node.phi_extensions = np.array([lS[ii].pos, lS[ii].extent]).T - node.closed = True - node.name = '%s_%s'%(lS[ii].__class__.__name__, lS[ii].Id.Name) + wall = idd.wall.description_2d[description_2d] + wall.type.name = obj.Id.Name + wall.type.index = description_typeindex + wall.type.description = ( + "tofu-generated wall. Each PFC is represented independently as a" + + " closed polygon in tofu, which saves them as disjoint PFCs") + + # Fill limiter / mobile + if ismobile: + # resize nS + 1 for vessel + wall.mobile.unit.resize(nS + 1) + units = wall.mobile.unit + for ii in range(0, nS): + units[ii].outline.resize(1) + units[ii].outline[0].r = lS[ii].Poly[0, :] + units[ii].outline[0].z = lS[ii].Poly[1, :] + if lS[ii].noccur > 0: + units[ii].phi_extensions = np.array([lS[ii].pos, + lS[ii].extent]).T + units[ii].closed = True + name = '{}_{}'.format(lS[ii].__class__.__name__, + lS[ii].Id.Name) + if lS[ii]._dgeom['mobile'] is True: + name = name + '_mobile' + units[ii].name = name + else: + # resize nS + 1 for vessel + wall.limiter.unit.resize(nS + 1) + units = wall.limiter.unit + for ii in range(0, nS): + units[ii].outline.r = lS[ii].Poly[0, :] + units[ii].outline.z = lS[ii].Poly[1, :] + if lS[ii].noccur > 0: + units[ii].phi_extensions = np.array([lS[ii].pos, + lS[ii].extent]).T + units[ii].closed = True + name = '{}_{}'.format(lS[ii].__class__.__name__, + lS[ii].Id.Name) + if lS[ii]._dgeom['mobile'] is True: + name = name + '_mobile' + units[ii].name = name + + # Add Vessel at the end + ii = nS + if ismobile: + units[ii].outline.resize(1) + units[ii].outline[0].r = ves.Poly[0, :] + units[ii].outline[0].z = ves.Poly[1, :] + else: + units[ii].outline.r = ves.Poly[0, :] + units[ii].outline.z = ves.Poly[1, :] + units[ii].closed = True + units[ii].name = '{}_{}'.format(ves.__class__.__name__, + ves.Id.Name) + + # ---------------------------------- + # Fill vessel if needed + # vesname = '{}_{}'.format(ves.__class__.__name__, ves.Id.Name) + # wall.vessel.name = vesname + # wall.vessel.index = 1 + # wall.vessel.description = ( + # "tofu-generated vessel outline, with a unique unit / element") + + # wall.vessel.unit.resize(1) + # wall.vessel.unit[0].element.resize(1) + # element = wall.vessel.unit[0].element[0] + # element.name = vesname + # element.outline.r = ves.Poly[0, :] + # element.outline.z = ves.Poly[1, :] + # ---------------------------------- # IDS properties # -------------- com = "PFC contour generated:\n" - com += " - from %s"%obj.Id.SaveName - com += " - by tofu %s"%tfversion + com += " - from {}".format(obj.Id.SaveName) + com += " - by tofu {}".format(tfversion) _fill_idsproperties(idd.wall, com, tfversion) err0 = None diff --git a/tofu/tests/tests01_geom/tests01_GG.py b/tofu/tests/tests01_geom/tests01_GG.py index b0437ff4e..01eb2ab67 100644 --- a/tofu/tests/tests01_geom/tests01_GG.py +++ b/tofu/tests/tests01_geom/tests01_GG.py @@ -113,33 +113,35 @@ def test02_Poly_CLockOrder(): # Test arbitrary 2D polygon Poly = np.array([[0.,1.,1.,0.],[0.,0.,1.,1.]]) - P = GG.Poly_Order(Poly, order='C', Clock=False, close=True, - layout='(N,cc)', layout_in=None, Test=True) - assert all([np.allclose(P[0,:],P[-1,:]), P.shape==(5,2), + P = GG.format_poly(Poly, order='C', Clock=False, close=True, + Test=True) + assert all([np.allclose(P[:, 0], P[:, -1]), P.shape == (2, 5), not GG.Poly_isClockwise(P), P.flags['C_CONTIGUOUS'], not P.flags['F_CONTIGUOUS']]) - P = GG.Poly_Order(Poly, order='F', Clock=True, close=False, - layout='(cc,N)', layout_in=None, Test=True) - assert all([not np.allclose(P[:,0],P[:,-1]), P.shape==(2,4), - GG.Poly_isClockwise(np.concatenate((P,P[:,0:1]),axis=1)), + P = GG.format_poly(Poly, order='F', Clock=True, close=False, + Test=True) + assert all([not np.allclose(P[:, 0], P[:, -1]), P.shape == (2, 4), + GG.Poly_isClockwise(np.concatenate((P, P[:, 0:1]), axis=1)), not P.flags['C_CONTIGUOUS'], P.flags['F_CONTIGUOUS']]) # Test arbitrary 3D polygon - Poly = np.array([[0.,1.,1.,0.],[0.,0.,1.,1.],[0.,0.,0.,0.]]) - P = GG.Poly_Order(Poly, order='C', Clock=False, close=False, - layout='(N,cc)', layout_in=None, Test=True) - assert all([not np.allclose(P[0,:],P[-1,:]), P.shape==(4,3), + Poly = np.array([[0., 1., 1., 0.], + [0., 0., 1., 1.], + [0., 0., 0., 0.]]) + P = GG.format_poly(Poly, order='C', Clock=False, close=False, + Test=True) + assert all([not np.allclose(P[:, 0], P[:, -1]), P.shape == (3, 4), P.flags['C_CONTIGUOUS'], not P.flags['F_CONTIGUOUS']]) - P = GG.Poly_Order(Poly, order='F', Clock=True, close=True, - layout='(cc,N)', layout_in=None, Test=True) - assert all([np.allclose(P[:,0],P[:,-1]), P.shape==(3,5), + P = GG.format_poly(Poly, order='F', Clock=True, close=True, + Test=True) + assert all([np.allclose(P[:, 0], P[:, -1]), P.shape == (3, 5), not P.flags['C_CONTIGUOUS'], P.flags['F_CONTIGUOUS']]) def test03_Poly_VolAngTor(): Poly = np.array([[1.,1.5,2.,2.,2.,1.5,1.],[0.,0.,0.,0.5,1.,1.,1.]]) - Poly = GG.Poly_Order(Poly, order='C', Clock=False, close=True, - layout='(cc,N)', Test=True) + Poly = GG.format_poly(Poly, order='C', Clock=False, close=True, + Test=True) V, B = GG.Poly_VolAngTor(Poly) assert V==1.5 assert np.allclose(B,[7./(3.*1.5),0.5]) diff --git a/tofu/tests/tests01_geom/tests03_core.py b/tofu/tests/tests01_geom/tests03_core.py index 8fdd22356..70c1930bc 100644 --- a/tofu/tests/tests01_geom/tests03_core.py +++ b/tofu/tests/tests01_geom/tests03_core.py @@ -874,7 +874,7 @@ def ffT(Pts, t=None, vect=None): resMode=rm, method=dm, minimize=mmz, ind=ind, - plot=False, out=np.ndarray, + plot=False, returnas=np.ndarray, fs=(12, 6), connect=connect) sig, units = out assert not np.all(np.isnan(sig)), str(ii) diff --git a/tofu/tests/tests02_data/tests03_core.py b/tofu/tests/tests02_data/tests03_core.py index 1d526102f..a10d8ed89 100644 --- a/tofu/tests/tests02_data/tests03_core.py +++ b/tofu/tests/tests02_data/tests03_core.py @@ -475,7 +475,7 @@ def setup_class(cls, nch=30, nt=50, SavePath='./', verb=False): lData = [None for ii in range(0,len(lc))] for ii in range(0,len(lc)): sig = lc[ii].calc_signal(emiss, t=t, res=0.01, method=lm[ii], - plot=False, out=np.ndarray)[0] + plot=False, returnas=np.ndarray)[0] sig = sig[:,:,None]*flamb[None,None,:] cla = eval('tfd.DataCam%sDSpectral'%('2' if lc[ii]._is2D() else '1')) data = cla(data=sig, Name='All', Diag='Test', diff --git a/tofu/utils.py b/tofu/utils.py index 06784657a..7868d9b39 100644 --- a/tofu/utils.py +++ b/tofu/utils.py @@ -142,7 +142,7 @@ def flatten_dict(d, parent_key='', sep=None, deep='ref', if k not in lexcept_key: if issubclass(v.__class__, ToFuObjectBase): if deep=='dict': - v = v.to_dict(deep='dict') + v = v.to_dict(sep=sep, deep='dict') elif deep=='copy': v = v.copy(deep='copy') new_key = parent_key + sep + k if parent_key else k @@ -320,7 +320,15 @@ def save(obj, path=None, name=None, sep=None, deep=False, mode='npz', # Get stripped dictionnary deep = 'dict' if deep else 'ref' if sep is None: - sep = _SEP + if mode == 'mat': + sep = '_' + else: + sep = _SEP + if mode == 'mat' and sep == '.': + msg = ("sep='.' cannot be used when mode='mat' (incompatible)\n" + + "Matlab would interpret variables as structures") + raise Exception(msg) + dd = obj.to_dict(strip=strip, sep=sep, deep=deep) pathfileext = os.path.join(path,name+'.'+mode) @@ -639,8 +647,8 @@ def _get_exception(q, ids, qtype='quantity'): def load_from_imas(shot=None, run=None, user=None, tokamak=None, version=None, - ids=None, Name=None, out=None, tlim=None, config=None, - occ=None, indch=None, indDescription=None, equilibrium=None, + ids=None, Name=None, returnas=None, tlim=None, config=None, + occ=None, indch=None, description_2d=None, equilibrium=None, dsig=None, data=None, X=None, t0=None, dextra=None, plot=True, plot_sig=None, plot_X=None, sharex=False, invertx=None, @@ -657,14 +665,16 @@ def load_from_imas(shot=None, run=None, user=None, tokamak=None, version=None, raise Exception(msg) lok = ['Config', 'Plasma2D', 'Cam', 'Data'] - c0 = out is None or out in lok + c0 = returnas is None or returnas in lok if not c0: - msg = "Arg out must be in %s"%str(lok) + msg = "Arg returnas must be in {}".format(str(lok)) raise Exception(msg) # ------------------- # Prepare ids - assert ids is None or type(ids) in [list,str] + if type(ids) not in [list, str]: + msg = "Please specify an ids to load data from!" + raise Exception(msg) if type(ids) is str: ids = [ids] if type(ids) is list: @@ -826,17 +836,17 @@ def load_from_imas(shot=None, run=None, user=None, tokamak=None, version=None, # ------------------- - # Prepare out - loutok = ['Config','Plasma2D','Cam','Data'] - c0 = out is None - c1 = out in loutok - c2 = type(out) is list and all([oo is None or oo in loutok - for oo in out]) + # Prepare returnas + loutok = ['Config', 'Plasma2D', 'Cam', 'Data'] + c0 = returnas is None + c1 = returnas in loutok + c2 = type(returnas) is list and all([oo is None or oo in loutok + for oo in returnas]) assert c0 or c1 or c2 if c0: - out = [None for _ in ids] + returnas = [None for _ in ids] elif c1: - out = [str(out) for _ in ids] + returnas = [str(returnas) for _ in ids] # Temporary caveat if nids > 1: @@ -852,35 +862,36 @@ def load_from_imas(shot=None, run=None, user=None, tokamak=None, version=None, # Config if ids[ii] == 'wall': - assert out[ii] in [None,'Config'] - out[ii] = 'Config' - if out[ii] == 'Config': - assert ids[ii] in [None,'wall'] + assert returnas[ii] in [None, 'Config'] + returnas[ii] = 'Config' + if returnas[ii] == 'Config': + assert ids[ii] in [None, 'wall'] # Plasma2D lids = imas2tofu.MultiIDSLoader._lidsplasma if ids[ii] in lids: - assert out[ii] in [None,'Plasma2D'] - out[ii] = 'Plasma2D' - if out[ii] == 'Plasma2D': + assert returnas[ii] in [None, 'Plasma2D'] + returnas[ii] = 'Plasma2D' + if returnas[ii] == 'Plasma2D': assert ids[ii] in lids # Cam or Data lids = imas2tofu.MultiIDSLoader._lidsdiag if ids[ii] in lids: - assert out[ii] in [None,'Cam','Data'] - if out[ii] is None: - out[ii] = 'Data' - if out[ii] in ['Cam','Data']: + assert returnas[ii] in [None, 'Cam', 'Data'] + if returnas[ii] is None: + returnas[ii] = 'Data' + if returnas[ii] in ['Cam', 'Data']: assert ids[ii] in lids - dout = {shot[jj]: {oo:[] for oo in set(out)} for jj in range(0,nshot)} + dout = {shot[jj]: {oo: [] for oo in set(returnas)} + for jj in range(0, nshot)} # ------------------- # Prepare plot_ and complement ids - lPla = [ii for ii in range(0,nids) if out[ii] == 'Plasma2D'] - lCam = [ii for ii in range(0,nids) if out[ii] == 'Cam'] - lDat = [ii for ii in range(0,nids) if out[ii] == 'Data'] + lPla = [ii for ii in range(0, nids) if returnas[ii] == 'Plasma2D'] + lCam = [ii for ii in range(0, nids) if returnas[ii] == 'Cam'] + lDat = [ii for ii in range(0, nids) if returnas[ii] == 'Data'] nPla, nCam, nDat = len(lPla), len(lCam), len(lDat) if nDat > 1: plot_ = False @@ -960,23 +971,23 @@ def load_from_imas(shot=None, run=None, user=None, tokamak=None, version=None, # export to instances for ii in range(0,nids): - if out[ii] == 'Config': - dout[ss]['Config'].append(multi.to_Config(Name=Name, occ=occ, - indDescription=indDescription, - plot=False)) + if returnas[ii] == 'Config': + dout[ss]['Config'].append(multi.to_Config( + Name=Name, occ=occ, + description_2d=description_2d, plot=False)) - elif out[ii] == 'Plasma2D': + elif returnas[ii] == 'Plasma2D': dout[ss]['Plasma2D'].append(multi.to_Plasma2D(Name=Name, occ=occ, tlim=tlim, dsig=dsig, t0=t0, plot=False, plot_sig=plot_sig, dextra=dextra, plot_X=plot_X, config=config, bck=bck)) - elif out[ii] == 'Cam': + elif returnas[ii] == 'Cam': dout[ss]['Cam'].append(multi.to_Cam(Name=Name, occ=occ, ids=lids[ii], indch=indch, config=config, plot=False)) - elif out[ii] == "Data": + elif returnas[ii] == "Data": dout[ss]['Data'].append(multi.to_Data(Name=Name, occ=occ, ids=lids[ii], tlim=tlim, dsig=dsig, config=config, data=data, X=X, indch=indch, @@ -990,7 +1001,7 @@ def load_from_imas(shot=None, run=None, user=None, tokamak=None, version=None, # Config & Cam for ss in shot: - for k0 in set(['Config','Cam']).intersection(out): + for k0 in set(['Config', 'Cam']).intersection(returnas): for ii in range(0, len(dout[ss][k0])): dout[ss][k0][ii].plot() @@ -1020,13 +1031,13 @@ def load_from_imas(shot=None, run=None, user=None, tokamak=None, version=None, dout = dout[shot[0]]['Data'][0] elif nshot == 1 and nPla == 1: dout = dout[shot[0]]['Plasma2D'][0] - return out + return dout def calc_from_imas(shot=None, run=None, user=None, tokamak=None, version=None, ids=None, Name=None, out=None, tlim=None, config=None, - occ=None, indch=None, indDescription=None, equilibrium=None, + occ=None, indch=None, description_2d=None, equilibrium=None, dsig=None, data=None, X=None, t0=None, dextra=None, Brightness=None, res=None, interp_t=None, plot=True, plot_compare=True, sharex=False, @@ -1625,7 +1636,7 @@ def to_dict(self, strip=None, sep=None, deep='ref'): # Call class-specific dd = self._to_dict() # --------------------- - dd['dId'] = self._get_dId() + dd['dId'] = self._get_dId(sep=sep) dd['dstrip'] = {'dict':self._dstrip, 'lexcept':None} dout = {} @@ -1646,7 +1657,7 @@ def to_dict(self, strip=None, sep=None, deep='ref'): dout = flatten_dict(dout, parent_key='', sep=sep, deep=deep) return dout - def _get_dId(self): + def _get_dId(self, sep=None): """ To be overloaded """ return {'dict':{}} @@ -1875,8 +1886,8 @@ def Id(self): """ return self._Id - def _get_dId(self): - return {'dict':self.Id.to_dict()} + def _get_dId(self, sep=None): + return {'dict': self.Id.to_dict(sep=sep)} def _reset(self): if hasattr(self,'_Id'): diff --git a/tofu/version.py b/tofu/version.py index 0ffec04af..cd4ac7d9e 100644 --- a/tofu/version.py +++ b/tofu/version.py @@ -1,2 +1,2 @@ # Do not edit, pipeline versioning governed by git tags! -__version__ = '1.4.2-a4-11-g4a3c745' +__version__ = '1.4.2b13-80-g038ef357'