diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000..e27c05fcfa --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +**/*.py -text diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 8fdfbcd48f..7285607aae 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -4,10 +4,13 @@ on: push: branches: - main + tags: + - 'v*' pull_request: branches: - main - +env: + MACOSX_DEPLOYMENT_TARGET: 14.0 jobs: Build: @@ -19,51 +22,69 @@ jobs: os: ["ubuntu-latest", "macos-latest", "windows-2019"] python-version: ["3.10"] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: + submodules: true fetch-depth: 0 - - uses: conda-incubator/setup-miniconda@v2 + - name: Git Submodule Update + shell: bash -e -l {0} + run: | + git submodule update --init --recursive + + - name: Cache conda + uses: actions/cache@v3 + env: + CACHE_NUMBER: 0 + with: + path: ~/conda_pkgs_dir + key: + ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-${{ hashFiles('ci/environment.yml') }} + + - uses: mamba-org/setup-micromamba@v1.9.0 with: - auto-update-conda: true environment-file: ci/environment.yml - python-version: ${{ matrix.python-version }} - auto-activate-base: false + create-args: >- + python=${{ matrix.python-version }} + cmake=3.30.0 + nodejs=18.20.5 - name: Install Windows Conda Packages if: contains(matrix.os, 'windows') - shell: bash -l {0} - run: conda install m2-bison=3.0.4 ninja + shell: bash -e -l {0} + run: micromamba install --freeze-installed m2-bison=3.0.4 m2-filesystem - name: Install Linux / macOS Conda Packages if: contains(matrix.os, 'ubuntu') || contains(matrix.os, 'macos') - shell: bash -l {0} - run: conda install bison=3.4 + shell: bash -e -l {0} + run: micromamba install --freeze-installed bison=3.4 - name: Conda info - shell: bash -l {0} + shell: bash -e -l {0} run: | - conda info - conda list + micromamba info + micromamba list - name: Setup Platform (Linux) if: contains(matrix.os, 'ubuntu') - shell: bash -l {0} + shell: bash -e -l {0} run: | echo "LFORTRAN_CMAKE_GENERATOR=Unix Makefiles" >> $GITHUB_ENV echo "WIN=0" >> $GITHUB_ENV echo "MACOS=0" >> $GITHUB_ENV + echo "ENABLE_RUNTIME_STACKTRACE=yes" >> $GITHUB_ENV - name: Setup Platform (macOS) if: contains(matrix.os, 'macos') - shell: bash -l {0} + shell: bash -e -l {0} run: | echo "LFORTRAN_CMAKE_GENERATOR=Unix Makefiles" >> $GITHUB_ENV echo "WIN=0" >> $GITHUB_ENV echo "MACOS=1" >> $GITHUB_ENV + echo "ENABLE_RUNTIME_STACKTRACE=yes" >> $GITHUB_ENV - name: Build (Linux / macOS) - shell: bash -l {0} + shell: bash -e -l {0} if: contains(matrix.os, 'ubuntu') || contains(matrix.os, 'macos') run: | xonsh ci/build.xsh @@ -72,17 +93,18 @@ jobs: if: contains(matrix.os, 'windows') shell: cmd run: | - set CONDA_INSTALL_LOCN=C:\\Miniconda - call %CONDA_INSTALL_LOCN%\Scripts\activate.bat - call conda activate test + set MAMBA_INSTALL_LOCN=C:\\Users\runneradmin\micromamba + call %MAMBA_INSTALL_LOCN%\Scripts\activate.bat + call micromamba activate lp set LFORTRAN_CMAKE_GENERATOR=Ninja set WIN=1 set MACOS=0 + set ENABLE_RUNTIME_STACKTRACE=no call "C:/Program Files (x86)/Microsoft Visual Studio/2019/Enterprise/VC/Auxiliary/Build/vcvars64.bat" xonsh ci\build.xsh - name: Test (Linux / macOS) - shell: bash -l {0} + shell: bash -e -l {0} if: contains(matrix.os, 'ubuntu') || contains(matrix.os, 'macos') run: | xonsh ci/test.xsh @@ -91,11 +113,572 @@ jobs: if: contains(matrix.os, 'windows') shell: cmd run: | - set CONDA_INSTALL_LOCN=C:\\Miniconda - call %CONDA_INSTALL_LOCN%\Scripts\activate.bat - call conda activate test + set MAMBA_INSTALL_LOCN=C:\\Users\runneradmin\micromamba + call %MAMBA_INSTALL_LOCN%\Scripts\activate.bat + call micromamba activate lp set LFORTRAN_CMAKE_GENERATOR=Ninja set WIN=1 set MACOS=0 call "C:/Program Files (x86)/Microsoft Visual Studio/2019/Enterprise/VC/Auxiliary/Build/vcvars64.bat" xonsh ci\test.xsh + + build_to_wasm: + name: Build LPython to WASM + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + submodules: true + fetch-depth: 0 + + - name: Git Submodule Update + shell: bash -e -l {0} + run: | + git submodule update --init --recursive + + - uses: mamba-org/setup-micromamba@v1 + with: + environment-file: ci/environment.yml + create-args: >- + python=3.10 + bison=3.4 + + - uses: hendrikmuhs/ccache-action@main + with: + variant: sccache + key: ${{ github.job }}-${{ matrix.os }} + + - name : Remove existing node + shell: bash -l {0} + run : | + which node + node -v + sudo rm -rf /usr/local/bin/node /usr/local/bin/npm + + - name: Install Emscripten from Git + shell: bash -l {0} + run: | + mkdir $HOME/ext + cd $HOME/ext + + git clone https://github.com/emscripten-core/emsdk.git + cd emsdk + + ./emsdk install 3.1.35 + ./emsdk activate 3.1.35 + ./emsdk install node-18.20.3-64bit + ./emsdk activate node-18.20.3-64bit + + - name: Show Emscripten and Node Info + shell: bash -l {0} + run: | + set -ex + # Activate PATH and other environment variables in the current terminal + source $HOME/ext/emsdk/emsdk_env.sh + emcc -v + em++ -v + which node + node -v + + - name: Build to WASM + shell: bash -l {0} + run: | + set -ex + source $HOME/ext/emsdk/emsdk_env.sh # Activate Emscripten + ./build_to_wasm.sh + + - name: Test built lpython.wasm + shell: bash -l {0} + run: | + set -ex + source $HOME/ext/emsdk/emsdk_env.sh # Activate Emscripten + which node + node -v + node src/lpython/tests/test_lpython.js + + test_pip_pkgs: + name: Test PIP Installable Packages + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + submodules: true + fetch-depth: 0 + + - name: Git Submodule Update + shell: bash -e -l {0} + run: | + git submodule update --init --recursive + + - uses: mamba-org/setup-micromamba@v1 + with: + environment-file: ci/environment.yml + create-args: >- + python=3.10 + bison=3.4 + + - uses: hendrikmuhs/ccache-action@main + with: + variant: sccache + key: ${{ github.job }}-${{ matrix.os }} + + - name: Build Linux + shell: bash -e -l {0} + run: | + ./build0.sh + cmake . -GNinja \ + -DCMAKE_BUILD_TYPE=Debug \ + -DWITH_LLVM=yes \ + -DLFORTRAN_BUILD_ALL=yes \ + -DWITH_STACKTRACE=no \ + -DWITH_RUNTIME_STACKTRACE=yes \ + -DCMAKE_PREFIX_PATH="$CONDA_PREFIX" \ + -DCMAKE_INSTALL_PREFIX=`pwd`/inst \ + -DCMAKE_C_COMPILER_LAUNCHER=sccache \ + -DCMAKE_CXX_COMPILER_LAUNCHER=sccache + + cmake --build . -j16 --target install + + - name: PIP show version + shell: bash -e -l {0} + run: | + python -m pip -V + + - name: PIP install required packages + shell: bash -e -l {0} + run: | + # Package lpynn has lpython_emulation as dependency + # Hence, it should by default install lpython_emulation + python -m pip install lpython_emulation==0.0.1.9 lpynn==0.0.1.4 numpy==1.24.3 + + - name: PIP show installed packages + shell: bash -e -l {0} + run: | + python -m pip list + + - name: Test PIP Packages with Python + shell: bash -e -l {0} + run: | + python integration_tests/test_pip_import_01.py + + # - name: Test PIP Packages with LPython + # shell: bash -e -l {0} + # run: | + # pip_pkg_path=$(python -c "import site; print(site.getsitepackages()[0])") + # echo $pip_pkg_path + # ./src/bin/lpython integration_tests/test_pip_import_01.py -I $pip_pkg_path + + debug: + name: Check Debug build + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + submodules: true + fetch-depth: 0 + + - name: Git Submodule Update + shell: bash -e -l {0} + run: | + git submodule update --init --recursive + + - uses: mamba-org/setup-micromamba@v1 + with: + environment-file: ci/environment.yml + create-args: >- + python=3.10 + bison=3.4 + + - uses: hendrikmuhs/ccache-action@main + with: + variant: sccache + key: ${{ github.job }}-${{ matrix.os }} + + - name: Build Linux + shell: bash -e -l {0} + run: | + ./build0.sh + export CXXFLAGS="-Werror" + cmake . -GNinja \ + -DCMAKE_BUILD_TYPE=Debug \ + -DWITH_LLVM=yes \ + -DLFORTRAN_BUILD_ALL=yes \ + -DWITH_STACKTRACE=no \ + -DWITH_RUNTIME_STACKTRACE=yes \ + -DCMAKE_PREFIX_PATH="$CONDA_PREFIX" \ + -DCMAKE_INSTALL_PREFIX=`pwd`/inst \ + -DCMAKE_C_COMPILER_LAUNCHER=sccache \ + -DCMAKE_CXX_COMPILER_LAUNCHER=sccache + + cmake --build . -j16 --target install + + - name: Test Linux + shell: bash -e -l {0} + run: | + ctest + ./run_tests.py -s + cd integration_tests + ./run_tests.py -b llvm c + ./run_tests.py -b llvm c -f + + release: + name: Check Release build + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + submodules: true + fetch-depth: 0 + + - name: Git Submodule Update + shell: bash -e -l {0} + run: | + git submodule update --init --recursive + + - uses: mamba-org/setup-micromamba@v1 + with: + environment-file: ci/environment.yml + create-args: >- + python=3.10 + bison=3.4 + + - uses: hendrikmuhs/ccache-action@main + with: + variant: sccache + key: ${{ github.job }}-${{ matrix.os }} + + - name: Build Linux + shell: bash -e -l {0} + run: | + ./build0.sh + export CXXFLAGS="-Werror" + cmake . -GNinja \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_CXX_FLAGS_RELEASE="-Wall -Wextra -O3 -funroll-loops -DNDEBUG" \ + -DWITH_LLVM=yes \ + -DLFORTRAN_BUILD_ALL=yes \ + -DWITH_STACKTRACE=no \ + -DWITH_RUNTIME_STACKTRACE=yes \ + -DCMAKE_PREFIX_PATH="$CONDA_PREFIX" \ + -DCMAKE_INSTALL_PREFIX=`pwd`/inst \ + -DCMAKE_C_COMPILER_LAUNCHER=sccache \ + -DCMAKE_CXX_COMPILER_LAUNCHER=sccache + + cmake --build . -j16 --target install + + - name: Test Linux + shell: bash -e -l {0} + run: | + ctest --rerun-failed --output-on-failure + ./run_tests.py -s + cd integration_tests + ./run_tests.py -b llvm c + ./run_tests.py -b llvm c -f + + cpython_interop: + name: Test CPython Interop (@pythoncall) + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + submodules: true + fetch-depth: 0 + + - name: Git Submodule Update + shell: bash -e -l {0} + run: | + git submodule update --init --recursive + + - uses: mamba-org/setup-micromamba@v1 + with: + environment-file: ci/environment.yml + create-args: >- + python=3.10 + bison=3.4 + + - uses: hendrikmuhs/ccache-action@main + with: + variant: sccache + key: ${{ github.job }}-${{ matrix.os }} + + - name: Build Linux + shell: bash -e -l {0} + run: | + ./build0.sh + cmake . -GNinja \ + -DCMAKE_BUILD_TYPE=Debug \ + -DWITH_LLVM=yes \ + -DLFORTRAN_BUILD_ALL=yes \ + -DWITH_STACKTRACE=no \ + -DWITH_RUNTIME_STACKTRACE=yes \ + -DCMAKE_PREFIX_PATH="$CONDA_PREFIX" \ + -DCMAKE_INSTALL_PREFIX=`pwd`/inst \ + -DCMAKE_C_COMPILER_LAUNCHER=sccache \ + -DCMAKE_CXX_COMPILER_LAUNCHER=sccache + + cmake --build . -j16 --target install + + - name: Test Linux + shell: bash -e -l {0} + run: | + cd integration_tests + ./run_tests.py -b cpython c_py llvm_py + ./run_tests.py -b cpython c_py llvm_py -f + + sympy: + name: Run SymPy tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + submodules: true + fetch-depth: 0 + + - name: Git Submodule Update + shell: bash -e -l {0} + run: | + git submodule update --init --recursive + + - uses: mamba-org/setup-micromamba@v1 + with: + environment-file: ci/environment.yml + create-args: >- + python=3.10 + bison=3.4 + symengine=0.12.0 + sympy=1.11.1 + + - uses: hendrikmuhs/ccache-action@main + with: + key: ${{ github.job }}-${{ matrix.os }} + + - name: Build + shell: bash -e -l {0} + run: | + ./build0.sh + cmake . -G"Unix Makefiles" \ + -DCMAKE_BUILD_TYPE=Debug \ + -DWITH_LLVM=yes \ + -DLPYTHON_BUILD_ALL=yes \ + -DWITH_STACKTRACE=no \ + -DWITH_RUNTIME_STACKTRACE=no \ + -DCMAKE_PREFIX_PATH="$CONDA_PREFIX" \ + -DCMAKE_INSTALL_PREFIX=`pwd`/inst \ + -DCMAKE_C_COMPILER_LAUNCHER=ccache \ + -DCMAKE_CXX_COMPILER_LAUNCHER=ccache + + cmake --build . -j16 --target install + + - name: Test + shell: bash -e -l {0} + run: | + cd integration_tests + ./run_tests.py -b c_sym cpython_sym llvm_sym llvm_jit + ./run_tests.py -b c_sym cpython_sym llvm_sym llvm_jit -f + + integration_tests_cpython: + name: Run Integration tests with Python ${{ matrix.python-version }} + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12"] + steps: + - uses: actions/checkout@v3 + with: + submodules: true + fetch-depth: 0 + + - name: Git Submodule Update + shell: bash -e -l {0} + run: | + git submodule update --init --recursive + + - uses: mamba-org/setup-micromamba@v1 + with: + environment-name: lp + condarc: | + channels: + - conda-forge + create-args: >- + llvmdev=11.1.0 + bison=3.4 + re2c=2.2 + zlib=1.3.1 + cmake=3.30.0 + make=4.3 + python=${{ matrix.python-version }} + numpy=1.26.4 + + - uses: hendrikmuhs/ccache-action@main + with: + key: ${{ github.job }}-${{ matrix.python-version }} + + - name: Show Python Info + shell: bash -e -l {0} + run: | + which python + python -m pip -V + python -m pip list + python --version + + - name: Build + shell: bash -e -l {0} + run: | + ./build0.sh + cmake . -G"Unix Makefiles" \ + -DCMAKE_BUILD_TYPE=Debug \ + -DWITH_LLVM=yes \ + -DLPYTHON_BUILD_ALL=yes \ + -DWITH_STACKTRACE=no \ + -DWITH_RUNTIME_STACKTRACE=no \ + -DCMAKE_PREFIX_PATH="$CONDA_PREFIX" \ + -DCMAKE_INSTALL_PREFIX=`pwd`/inst \ + -DCMAKE_C_COMPILER_LAUNCHER=ccache \ + -DCMAKE_CXX_COMPILER_LAUNCHER=ccache + + cmake --build . -j16 --target install + + - name: Test + shell: bash -e -l {0} + run: | + cd integration_tests + ./run_tests.py -b cpython c_py + + test_llvm: + name: Test LLVM ${{ matrix.llvm-version }} + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + llvm-version: ["10", "15", "16"] + steps: + - uses: actions/checkout@v4 + with: + submodules: true + fetch-depth: 0 + + - name: Git Submodule Update + shell: bash -e -l {0} + run: | + git submodule update --init --recursive + + - uses: mamba-org/setup-micromamba@v1.8.0 + with: + environment-file: ci/environment_linux_llvm.yml + create-args: >- + llvmdev=${{ matrix.llvm-version }} + + - uses: hendrikmuhs/ccache-action@main + with: + variant: sccache + key: ${{ github.job }}-${{ matrix.llvm-version }} + + - name: Build Linux + shell: bash -e -l {0} + run: | + ./build0.sh + export CXXFLAGS="-Werror" + cmake . -G"Unix Makefiles" \ + -DCMAKE_BUILD_TYPE=Debug \ + -DWITH_LLVM=yes \ + -DLFORTRAN_BUILD_ALL=yes \ + -DWITH_STACKTRACE=no \ + -DWITH_RUNTIME_STACKTRACE=yes \ + -DCMAKE_PREFIX_PATH="$CONDA_PREFIX" \ + -DCMAKE_INSTALL_PREFIX=`pwd`/inst \ + -DCMAKE_C_COMPILER_LAUNCHER=sccache \ + -DCMAKE_CXX_COMPILER_LAUNCHER=sccache + + cmake --build . -j16 --target install + + - name: Test Linux LLVM ${{ matrix.llvm-version }} + shell: bash -e -l {0} + run: | + ctest --output-on-failure + cd integration_tests + ./run_tests.py -b llvm llvm_jit + ./run_tests.py -b llvm llvm_jit -f + + build_jupyter_kernel: + name: Build Jupyter Kernel + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + submodules: true + fetch-depth: 0 + + - name: Git Submodule Update + shell: bash -e -l {0} + run: | + git submodule update --init --recursive + + - uses: mamba-org/setup-micromamba@v1 + with: + environment-file: ci/environment.yml + create-args: >- + jupyter=1.0.0 + python=3.10 + bison=3.4 + + - uses: hendrikmuhs/ccache-action@main + with: + variant: sccache + key: ${{ github.job }}-${{ matrix.os }} + + - name: Build LPython with Kernel + shell: bash -e -l {0} + run: | + ./build0.sh + export CXXFLAGS="-Werror" + cmake . -GNinja \ + -DCMAKE_BUILD_TYPE=Debug \ + -DWITH_LLVM=yes \ + -DWITH_XEUS=yes \ + -DCMAKE_PREFIX_PATH="$CONDA_PREFIX" \ + -DCMAKE_INSTALL_PREFIX="$CONDA_PREFIX" + + ninja install + ctest --output-on-failure + jupyter kernelspec list --json + + - name: Test Kernel + shell: bash -e -l {0} + run: | + ctest --output-on-failure + + upload_tarball: + name: Upload Tarball + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + submodules: true + fetch-depth: 0 + + - name: Git Submodule Update + shell: bash -e -l {0} + run: | + git submodule update --init --recursive + + - uses: mamba-org/setup-micromamba@v1 + with: + environment-file: ci/environment.yml + create-args: >- + python=3.10 + + - name: Create Source Tarball + shell: bash -e -l {0} + run: | + ./build0.sh + lpython_version=$( version + +# Generate a Python AST from Python.asdl (Python) +python grammar/asdl_py.py +# Generate a Python AST from Python.asdl (C++) +python libasr/src/libasr/asdl_cpp.py grammar/Python.asdl src/lpython/python_ast.h +# Generate a Fortran ASR from ASR.asdl (C++) +python libasr/src/libasr/asdl_cpp.py libasr/src/libasr/ASR.asdl libasr/src/libasr/asr.h +# Generate a wasm_visitor.h from libasr/src/libasr/wasm_instructions.txt (C++) +python libasr/src/libasr/wasm_instructions_visitor.py +# Generate the intrinsic_function_registry_util.h (C++) +python libasr/src/libasr/intrinsic_func_registry_util_gen.py + +# Generate the tokenizer and parser +pushd src/lpython/parser && re2c -W -b tokenizer.re -o tokenizer.cpp && popd +pushd src/lpython/parser && bison -Wall -d -r all parser.yy && popd diff --git a/build1.bat b/build1.bat new file mode 100644 index 0000000000..93f92be9c2 --- /dev/null +++ b/build1.bat @@ -0,0 +1,10 @@ +cmake ^ + -DCMAKE_BUILD_TYPE=Release ^ + -DWITH_LLVM=yes ^ + -DLPYTHON_BUILD_ALL=yes ^ + -DWITH_STACKTRACE=no ^ + -DCMAKE_PREFIX_PATH="%CONDA_PREFIX%" ^ + -DCMAKE_INSTALL_PREFIX=%cd%/inst ^ + . -G "Ninja" + +cmake --build . --config Release -j8 --target install diff --git a/build1.sh b/build1.sh index 7dd7b8325d..8bf7476431 100755 --- a/build1.sh +++ b/build1.sh @@ -8,6 +8,8 @@ cmake \ -DWITH_LLVM=yes \ -DLPYTHON_BUILD_ALL=yes \ -DWITH_STACKTRACE=yes \ + -DWITH_RUNTIME_STACKTRACE=yes \ + -DWITH_LSP=no \ -DWITH_LFORTRAN_BINARY_MODFILES=no \ -DCMAKE_PREFIX_PATH="$CMAKE_PREFIX_PATH_LPYTHON;$CONDA_PREFIX" \ -DCMAKE_INSTALL_PREFIX=`pwd`/inst \ diff --git a/build1_win.sh b/build1_win.sh deleted file mode 100644 index 90acc2fdc3..0000000000 --- a/build1_win.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env bash - -bash ci/version.sh -# Generate a Python AST from Python.asdl (Python) -python grammar/asdl_py.py -# Generate a Python AST from Python.asdl (C++) -python grammar/asdl_cpp.py grammar/Python.asdl src/lpython/python_ast.h -# Generate a Fortran ASR from ASR.asdl (C++) -python grammar/asdl_cpp.py src/libasr/ASR.asdl src/libasr/asr.h diff --git a/build_to_wasm.sh b/build_to_wasm.sh new file mode 100755 index 0000000000..2d11afb250 --- /dev/null +++ b/build_to_wasm.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -e +set -x + +mkdir -p src/bin/asset_dir +cp src/runtime/*.py src/bin/asset_dir +cp -r src/runtime/lpython src/bin/asset_dir + +./build0.sh +emcmake cmake \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_CXX_FLAGS_DEBUG="-Wall -Wextra -fexceptions" \ + -DCMAKE_CXX_FLAGS_RELEASE="-Wall -Wextra -fexceptions" \ + -DWITH_LLVM=no \ + -DLPYTHON_BUILD_ALL=yes \ + -DLPYTHON_BUILD_TO_WASM=yes \ + -DWITH_STACKTRACE=no \ + -DWITH_LSP=no \ + -DWITH_LFORTRAN_BINARY_MODFILES=no \ + -DCMAKE_PREFIX_PATH="$CMAKE_PREFIX_PATH_LFORTRAN;$CONDA_PREFIX" \ + -DCMAKE_INSTALL_PREFIX=`pwd`/inst \ + . +cmake --build . -j16 diff --git a/ci/azure_install_macos.sh b/ci/azure_install_macos.sh deleted file mode 100755 index 5012f14ae7..0000000000 --- a/ci/azure_install_macos.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash - -set -ex - -conda config --set always_yes yes --set changeps1 no -conda info -a -conda update -q conda -conda install -c conda-forge python=3.8 re2c bison=3.4 m4 xonsh llvmdev=11.0.1 toml cmake=3.17.0 jupyter pytest xeus=1.0.1 xtl nlohmann_json cppzmq jupyter_kernel_test -export MACOSX_DEPLOYMENT_TARGET="10.12" -export CONDA_PREFIX=/usr/local/miniconda -export LFORTRAN_CMAKE_GENERATOR="Unix Makefiles" -export WIN=0 -export MACOS=1 -xonsh ci/build.xsh diff --git a/ci/build.xsh b/ci/build.xsh index 5f8f6469bb..bdab77d1b9 100755 --- a/ci/build.xsh +++ b/ci/build.xsh @@ -27,17 +27,21 @@ llvm-config --components bash ci/version.sh # Generate a Fortran ASR from ASR.asdl (C++) -python grammar/asdl_cpp.py src/libasr/ASR.asdl src/libasr/asr.h +python libasr/src/libasr/asdl_cpp.py libasr/src/libasr/ASR.asdl libasr/src/libasr/asr.h # Generate a Python AST from Python.asdl (C++) -python grammar/asdl_cpp.py grammar/Python.asdl src/lpython/python_ast.h +python libasr/src/libasr/asdl_cpp.py grammar/Python.asdl src/lpython/python_ast.h # Generate a Python AST from Python.asdl (Python) python grammar/asdl_py.py +# Generate a wasm_visitor.h from libasr/src/libasr/wasm_instructions.txt (C++) +python libasr/src/libasr/wasm_instructions_visitor.py +# Generate the intrinsic_function_registry_util.h (C++) +python libasr/src/libasr/intrinsic_func_registry_util_gen.py # Generate the tokenizer and parser pushd src/lpython/parser && re2c -W -b tokenizer.re -o tokenizer.cpp && popd pushd src/lpython/parser && bison -Wall -d -r all parser.yy && popd -$lpython_version=$(cat version).strip() +$lpython_version=$(cat lp_version).strip() $dest="lpython-" + $lpython_version bash ci/create_source_tarball0.sh tar xzf dist/lpython-$lpython_version.tar.gz @@ -49,11 +53,11 @@ cd test-bld # compiled in Release mode and we get link failures if we mix and match build # modes: BUILD_TYPE = "Release" -cmake -G $LFORTRAN_CMAKE_GENERATOR -DCMAKE_VERBOSE_MAKEFILE=ON -DWITH_LLVM=yes -DWITH_XEUS=yes -DCMAKE_PREFIX_PATH=$CONDA_PREFIX -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX -DWITH_LFORTRAN_BINARY_MODFILES=no -DCMAKE_BUILD_TYPE=@(BUILD_TYPE) .. -cmake --build . --target install +cmake -G $LFORTRAN_CMAKE_GENERATOR -DCMAKE_VERBOSE_MAKEFILE=ON -DWITH_LLVM=yes -DWITH_LSP=yes -DWITH_XEUS=yes -DCMAKE_PREFIX_PATH=$CONDA_PREFIX -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX -DWITH_LFORTRAN_BINARY_MODFILES=no -DCMAKE_BUILD_TYPE=@(BUILD_TYPE) -DWITH_RUNTIME_STACKTRACE=$ENABLE_RUNTIME_STACKTRACE .. +cmake --build . --target install -j16 ./src/lpython/tests/test_lpython #./src/bin/lpython < ../src/bin/example_input.txt -ctest --output-on-failure +ctest --output-on-failure -j16 cpack -V cd ../.. diff --git a/ci/create_source_tarball.sh b/ci/create_source_tarball.sh index f1bfb4b4b7..aaf9b8e5ee 100755 --- a/ci/create_source_tarball.sh +++ b/ci/create_source_tarball.sh @@ -2,6 +2,6 @@ set -ex -lfortran_version=$1 -export dest=lfortran-$lfortran_version +lpython_version=$1 +export dest=lpython-$lpython_version bash -x -o errexit ci/create_source_tarball0.sh diff --git a/ci/create_source_tarball0.sh b/ci/create_source_tarball0.sh index bffa0f3584..ff8d6ab7db 100755 --- a/ci/create_source_tarball0.sh +++ b/ci/create_source_tarball0.sh @@ -9,9 +9,10 @@ cmake -E copy_directory src $dest/src cmake -E copy_directory share $dest/share cmake -E copy_directory cmake $dest/cmake cmake -E copy_directory examples $dest/examples +cmake -E copy_directory libasr/src/libasr $dest/libasr/src/libasr # Copy Files: -cmake -E copy CMakeLists.txt README.md LICENSE version $dest +cmake -E copy CMakeLists.txt README.md LICENSE lp_version $dest # Create the tarball cmake -E make_directory dist diff --git a/ci/environment.yml b/ci/environment.yml index f71f06dbf0..db5ded2261 100644 --- a/ci/environment.yml +++ b/ci/environment.yml @@ -4,16 +4,19 @@ channels: - defaults dependencies: - llvmdev=11.1.0 - - toml - - pytest - - jupyter - - xeus=1.0.1 - - xtl - - nlohmann_json - - cppzmq - - jupyter_kernel_test - - xonsh - - re2c - - numpy + - toml=0.10.2 + - pytest=7.2.0 + - jupyter=1.0.0 + - xeus=5.1.0 + - xeus-zmq=3.0.0 + - nlohmann_json=3.11.3 + - jupyter_kernel_test=0.4.4 + - xonsh=0.13.3 + - re2c=2.2 + - numpy=1.26.4 + - zlib=1.3.1 + - zstd=1.5.6 + - ninja=1.11.0 + - rapidjson=1.1.0 # - bison=3.4 [not win] # - m2-bison=3.4 [win] diff --git a/ci/environment_linux_llvm.yml b/ci/environment_linux_llvm.yml new file mode 100644 index 0000000000..ffe4ddd2a9 --- /dev/null +++ b/ci/environment_linux_llvm.yml @@ -0,0 +1,20 @@ +name: lp +channels: + - conda-forge + - defaults +dependencies: + - git + - pip + - make + - re2c + - toml + - zlib + - cmake + - numpy + - flake8 + - setuptools + - bison=3.4 + - python=3.10.2 + - zstd-static=1.5 + - symengine=0.12.0 + - sympy=1.11.1 diff --git a/ci/test.xsh b/ci/test.xsh index 790318a3f7..23285e1ed9 100644 --- a/ci/test.xsh +++ b/ci/test.xsh @@ -13,8 +13,21 @@ src/bin/lpython -o expr2 expr2.o # Test the new Python frontend, manually for now: src/bin/lpython --show-ast tests/doconcurrentloop_01.py src/bin/lpython --show-asr tests/doconcurrentloop_01.py -src/bin/lpython --show-cpp tests/doconcurrentloop_01.py +# src/bin/lpython --show-cpp tests/doconcurrentloop_01.py -if $WIN != "1": +if $WIN == "1": + python run_tests.py --skip-run-with-dbg --no-color +else: python run_tests.py - python integration_tests/run_tests.py + src/bin/lpython examples/expr2.py + src/bin/lpython --backend=c examples/expr2.py + cd integration_tests + + if $(uname).strip() == "Linux": + python run_tests.py -j16 -b llvm cpython c wasm + python run_tests.py -j16 -b llvm cpython c wasm -f + python run_tests.py -j16 -b x86 wasm_x86 wasm_x64 + python run_tests.py -j16 -b x86 wasm_x86 wasm_x64 -f + else: + python run_tests.py -j1 -b llvm cpython c wasm + python run_tests.py -j1 -b llvm cpython c wasm -f diff --git a/ci/upload_tarball_to_release.sh b/ci/upload_tarball_to_release.sh new file mode 100755 index 0000000000..cc8f1e1c32 --- /dev/null +++ b/ci/upload_tarball_to_release.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +set -ex + +lpython_version=$( version +echo $version > lp_version diff --git a/cmake/UserOverride.cmake b/cmake/UserOverride.cmake index 3139f24c87..5c50301187 100644 --- a/cmake/UserOverride.cmake +++ b/cmake/UserOverride.cmake @@ -9,7 +9,12 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") # g++ set(common "-Wall -Wextra") - set(CMAKE_CXX_FLAGS_RELEASE_INIT "${common} -O3 -march=native -funroll-loops -DNDEBUG") + if (CMAKE_SYSTEM_PROCESSOR MATCHES "ppc|powerpc") + set(native "-mtune=native") + else () + set(native "-march=native") + endif () + set(CMAKE_CXX_FLAGS_RELEASE_INIT "${common} -O3 ${native} -funroll-loops -DNDEBUG") set(CMAKE_CXX_FLAGS_DEBUG_INIT "${common} -g -ggdb") elseif (CMAKE_CXX_COMPILER_ID STREQUAL "Intel") # icpc diff --git a/compiler_tester/tester.py b/compiler_tester/tester.py deleted file mode 100644 index ad038a29f7..0000000000 --- a/compiler_tester/tester.py +++ /dev/null @@ -1,274 +0,0 @@ -import hashlib -import json -import os -import pathlib -import shutil -import subprocess -from typing import Mapping, Any, List - -class RunException(Exception): - pass - -class ExecuteException(Exception): - pass - -class style: - reset = 0 - bold = 1 - dim = 2 - italic = 3 - underline = 4 - blink = 5 - rblink = 6 - reversed = 7 - conceal = 8 - crossed = 9 - - -class fg: - black = 30 - red = 31 - green = 32 - yellow = 33 - blue = 34 - magenta = 35 - cyan = 36 - gray = 37 - reset = 39 - -def color(value): - return "\033[" + str(int(value)) + "m"; - -def print_check(): - print("%s✓ %s" % (color(fg.green)+color(style.bold), - color(fg.reset)+color(style.reset))) - -def bname(base, cmd, filename): - hstring = cmd - if filename: - hstring += filename - h = hashlib.sha224(hstring.encode()).hexdigest()[:7] - if filename: - bname = os.path.basename(filename) - bname, _ = os.path.splitext(bname) - return "%s-%s-%s" % (base, bname, h) - else: - return "%s-%s" % (base, h) - -def _compare_eq_dict( - left: Mapping[Any, Any], right: Mapping[Any, Any], verbose: int = 0 -) -> List[str]: - explanation = [] # type: List[str] - set_left = set(left) - set_right = set(right) - common = set_left.intersection(set_right) - same = {k: left[k] for k in common if left[k] == right[k]} - if same and verbose < 2: - explanation += ["Omitting %s identical items" % len(same)] - elif same: - explanation += ["Common items:"] - explanation += pprint.pformat(same).splitlines() - diff = {k for k in common if left[k] != right[k]} - if diff: - explanation += ["Differing items:"] - for k in diff: - explanation += [repr({k: left[k]}) + " != " + repr({k: right[k]})] - extra_left = set_left - set_right - len_extra_left = len(extra_left) - if len_extra_left: - explanation.append( - "Left contains %d more item%s:" - % (len_extra_left, "" if len_extra_left == 1 else "s") - ) - explanation.extend( - pprint.pformat({k: left[k] for k in extra_left}).splitlines() - ) - extra_right = set_right - set_left - len_extra_right = len(extra_right) - if len_extra_right: - explanation.append( - "Right contains %d more item%s:" - % (len_extra_right, "" if len_extra_right == 1 else "s") - ) - explanation.extend( - pprint.pformat({k: right[k] for k in extra_right}).splitlines() - ) - return explanation - -def fixdir(s): - local_dir = os.getcwd() - return s.replace(local_dir.encode(), "$DIR".encode()) - -def run(basename, cmd, out_dir, infile=None, extra_args=None): - """ - Runs the `cmd` and collects stdout, stderr, exit code. - - The stdout, stderr and outfile are saved in the `out_dir` directory and - all metadata is saved in a json file, whose path is returned from the - function. - - The idea is to use this function to test the compiler by running it with - an option to save the AST, ASR or LLVM IR or binary, and then ensure that - the output does not change. - - Arguments: - - basename ... name of the run - cmd ........ command to run, can use {infile} and {outfile} - out_dir .... output directory to store output - infile ..... optional input file. If present, it will check that it exists - and hash it. - extra_args . extra arguments, not part of the hash - - Examples: - - >>> run("cat2", "cat tests/cat.txt > {outfile}", "output", "tests/cat.txt") - >>> run("ls4", "ls --wrong-option", "output") - - """ - assert basename is not None and basename != "" - pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True) - if infile and not os.path.exists(infile): - raise RunException("The input file does not exist") - outfile = os.path.join(out_dir, basename + "." + "out") - cmd2 = cmd.format(infile=infile, outfile=outfile) - if extra_args: - cmd2 += " " + extra_args - r = subprocess.run(cmd2, shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - if not os.path.exists(outfile): - outfile = None - if len(r.stdout): - stdout_file = os.path.join(out_dir, basename + "." + "stdout") - open(stdout_file, "wb").write(fixdir(r.stdout)) - else: - stdout_file = None - if len(r.stderr): - stderr_file = os.path.join(out_dir, basename + "." + "stderr") - open(stderr_file, "wb").write(fixdir(r.stderr)) - else: - stderr_file = None - - if infile: - infile_hash = hashlib.sha224(open(infile, "rb").read()).hexdigest() - else: - infile_hash = None - if outfile: - outfile_hash = hashlib.sha224(open(outfile, "rb").read()).hexdigest() - outfile = os.path.basename(outfile) - else: - outfile_hash = None - if stdout_file: - stdout_hash = hashlib.sha224(open(stdout_file, "rb").read()).hexdigest() - stdout_file = os.path.basename(stdout_file) - else: - stdout_hash = None - if stderr_file: - stderr_hash = hashlib.sha224(open(stderr_file, "rb").read()).hexdigest() - stderr_file = os.path.basename(stderr_file) - else: - stderr_hash = None - data = { - "basename": basename, - "cmd": cmd, - "infile": infile, - "infile_hash": infile_hash, - "outfile": outfile, - "outfile_hash": outfile_hash, - "stdout": stdout_file, - "stdout_hash": stdout_hash, - "stderr": stderr_file, - "stderr_hash": stderr_hash, - "returncode": r.returncode, - } - json_file = os.path.join(out_dir, basename + "." + "json") - json.dump(data, open(json_file, "w"), indent=4) - return json_file - -def run_test(basename, cmd, infile=None, update_reference=False, - extra_args=None): - """ - Runs the test `cmd` and compare against reference results. - - The `cmd` is executed via `run` (passing in `basename` and `infile`) and - the output is saved in the `output` directory. The generated json file is - then compared against reference results and if it differs, the - RunException is thrown. - - Arguments: - - basename ........... name of the run - cmd ................ command to run, can use {infile} and {outfile} - infile ............. optional input file. If present, it will check that - it exists and hash it. - update_reference ... if True, it will copy the output into the reference - directory as reference results, overwriting old ones - extra_args ......... Extra arguments to append to the command that are not - part of the hash - - Examples: - - >>> run_test("cat12", "cat {infile} > {outfile}", "cat.txt", - ... update_reference=True) - >>> run_test("cat12", "cat {infile} > {outfile}", "cat.txt") - """ - s = " * %-6s " % basename - print(s, end="") - basename = bname(basename, cmd, infile) - if infile: - infile = os.path.join("tests", infile) - jo = run(basename, cmd, os.path.join("tests", "output"), infile=infile, - extra_args=extra_args) - jr = os.path.join("tests", "reference", os.path.basename(jo)) - do = json.load(open(jo)) - if update_reference: - shutil.copyfile(jo, jr) - for f in ["outfile", "stdout", "stderr"]: - if do[f]: - f_o = os.path.join(os.path.dirname(jo), do[f]) - f_r = os.path.join(os.path.dirname(jr), do[f]) - shutil.copyfile(f_o, f_r) - return - if not os.path.exists(jr): - raise RunException("The reference json file '%s' does not exist" % jr) - dr = json.load(open(jr)) - if do != dr: - e = _compare_eq_dict(do, dr) - print("The JSON metadata differs against reference results") - print("Reference JSON:", jr) - print("Output JSON: ", jo) - print("\n".join(e)) - if do["outfile_hash"] != dr["outfile_hash"]: - if do["outfile_hash"] is not None and dr["outfile_hash"] is not None: - fo = os.path.join("tests", "output", do["outfile"]) - fr = os.path.join("tests", "reference", dr["outfile"]) - if os.path.exists(fr): - print("Diff against: %s" % fr) - os.system("diff %s %s" % (fr, fo)) - else: - print("Reference file '%s' does not exist" % fr) - if do["stdout_hash"] != dr["stdout_hash"]: - if do["stdout_hash"] is not None and dr["stdout_hash"] is not None: - fo = os.path.join("tests", "output", do["stdout"]) - fr = os.path.join("tests", "reference", dr["stdout"]) - if os.path.exists(fr): - print("Diff against: %s" % fr) - os.system("diff %s %s" % (fr, fo)) - else: - print("Reference file '%s' does not exist" % fr) - if do["stderr_hash"] != dr["stderr_hash"]: - if do["stderr_hash"] is not None and dr["stderr_hash"] is not None: - fo = os.path.join("tests", "output", do["stderr"]) - fr = os.path.join("tests", "reference", dr["stderr"]) - if os.path.exists(fr): - print("Diff against: %s" % fr) - os.system("diff %s %s" % (fr, fo)) - else: - print("Reference file '%s' does not exist" % fr) - elif do["stderr_hash"] is not None and dr["stderr_hash"] is None: - fo = os.path.join("tests", "output", do["stderr"]) - print("No reference stderr output exists. Stderr:") - os.system("cat %s" % fo) - raise RunException("The reference result differs") - print_check() diff --git a/doc/mkdocs.yml b/doc/mkdocs.yml index 99f03e9c2e..2a1b4504b5 100644 --- a/doc/mkdocs.yml +++ b/doc/mkdocs.yml @@ -10,6 +10,7 @@ nav: - developer_tutorial.md - AST and ASR.md - CONTRIBUTING.md + - rebasing.md # Until https://gitlab.com/lfortran/lfortran/issues/89 is fixed, we disable the # "Edit on GitLab" link: diff --git a/doc/src/asr/asr_nodes/expression_nodes/IntrinsicFunction.md b/doc/src/asr/asr_nodes/expression_nodes/IntrinsicFunction.md new file mode 100644 index 0000000000..f5c53c6c7b --- /dev/null +++ b/doc/src/asr/asr_nodes/expression_nodes/IntrinsicFunction.md @@ -0,0 +1,107 @@ +# IntrinsicFunction + +An intrinsic function. An **expr** node. + +## Declaration + +### Syntax + +``` +IntrinsicFunction(expr* args, int intrinsic_id, int overload_id, + ttype type, expr? value) +``` + +### Arguments + +* `args` represents all arguments passed to the function +* `intrinsic_id` is the unique ID of the generic intrinsic function +* `overload_id` is the ID of the signature within the given generic function +* `type` represents the type of the output +* `value` is an optional compile time value + +### Return values + +The return value is the expression that the `IntrinsicFunction` represents. + +## Description + +**IntrinsicFunction** represents an intrinsic function (such as `Abs`, +`Modulo`, `Sin`, `Cos`, `LegendreP`, `FlipSign`, ...) that either the backend +or the middle-end (optimizer) needs to have some special logic for. Typically a +math function, but does not have to be. + +IntrinsicFunction is both side-effect-free (no writes to global variables) and +deterministic (no reads from global variables). They are also elemental: can be +vectorized over any argument(s). They can be used inside parallel code and +cached. + +The `intrinsic_id` determines the generic function uniquely (`Sin` and `Abs` +have different number, but `IntegerAbs` and `RealAbs` share the number) and +`overload_id` uniquely determines the signature starting from 0 for each +generic function (e.g., `IntegerAbs`, `RealAbs` and `ComplexAbs` can have +`overload_id` equal to 0, 1 and 2, and `RealSin`, `ComplexSin` can be 0, 1). + +Backend use cases: Some architectures have special hardware instructions for +operations like Sqrt or Sin and if they are faster than a software +implementation, the backend will use it. This includes the `FlipSign` function +which is our own "special function" that the optimizer emits for certain +conditional floating point operations, and the backend emits an efficient bit +manipulation implementation for architectures that support it. + +Middle-end use cases: the middle-end can use the high level semantics to +simplify, such as `sin(e)**2 + cos(e)**2 -> 1`, or it could approximate +expressions like `if (abs(sin(x) - 0.5) < 0.3)` with a lower accuracy version +of `sin`. + +We provide ASR -> ASR lowering transformations that substitute the given +intrinsic function with an ASR implementation using more primitive ASR nodes, +typically implemented in the surface language (say a `sin` implementation using +argument reduction and a polynomial fit, or a `sqrt` implementation using a +general power formula `x**(0.5)`, or `LegendreP(2,x)` implementation using a +formula `(3*x**2-1)/2`). + +This design also makes it possible to allow selecting using command line +options how certain intrinsic functions should be implemented, for example if +trigonometric functions should be implemented using our own fast +implementation, `libm` accurate implementation, we could also call into other +libraries. These choices should happen at the ASR level, and then the result +further optimized (such as inlined) as needed. + +## Types + +The argument types in `args` have the types of the corresponding signature as +determined by `intrinsic_id`. For example `IntegerAbs` accepts an integer, but +`RealAbs` accepts a real. + +## Examples + +The following example code creates `IntrinsicFunction` ASR node: + +```fortran +sin(0.5) +``` + +ASR: + +``` +(TranslationUnit + (SymbolTable + 1 + { + }) + [(IntrinsicFunction + [(RealConstant + 0.500000 + (Real 4 []) + )] + 0 + 0 + (Real 4 []) + (RealConstant 0.479426 (Real 4 [])) + )] +) +``` + +## See Also + +[FunctionCall]() \ No newline at end of file diff --git a/doc/src/built-in functions.md b/doc/src/built-in functions.md new file mode 100644 index 0000000000..b9804b5c28 --- /dev/null +++ b/doc/src/built-in functions.md @@ -0,0 +1,147 @@ +# Built-in Functions + +LPython has a variety of functions and types built into it that are always available. + +### abs(x) + +- **Parameter** + - x : integer (i8, i16, i32, i64), floating point number (f32, f64), complex number (c32, c64) or bool +- **Returns** : integer (i8, i16, i32, i64), floating point number (f32, f64) + +Returns the absolute value of a number. If the argument is a complex number, its magnitude is returned. + + +### bin(n) + +- **Parameters** + - n : integer (i32) +- **Returns** : str + +Returns the binary representation of n as a '0b' prefixed string. + + +### complex(x=0, y=0) + +- **Parameters** + - x : integer (i32, i64) or floating point number (f32, f64) + - y : integer (i32, i64) or floating point number (f32, f64) +- **Returns** : complex number (c32, c64) + +Returns a complex number with the provided real and imaginary parts. Both x and y should be of the same type. However, using both the 32-bit and 64-bit versions of the same type together is allowed. In that case, the returned complex number is of 64-bit type. + +Example: + +```python +real: i32 = 10 +imag: i64 = 22 +c: c64 = complex(real, imag) +``` + +### divmod(x, y) + +- **Parameters** + - x : integer (i32) + - y : integer (i32) +- **Returns** : tuple[i32, i32] + +Returns the tuple (x // y, x % y). + + +### exp(x) + +- ****Parameter**** + - x : floating point number (f32, f64) +- **Returns** : floating point number (f32, f64) between [0.0, inf] + +Returns the base-e exponential of x (ex), where e is the Euler's number (2.71828). For a very large output, the function returns **inf** indicating overflow. + + +### hex(n) + +- **Parameters** + - n : integer (i32) +- **Returns** : str + +Returns the hexadecimal representation of n as a '0x' prefixed string. + + +### len(s) + +- **Parameters** + - s : sequence (such as string, tuple, list or range) or collection (such as a dictionary or set) +- **Returns** : integer (i32) + +Returns the number of items present in an object. + + +### max(x, y) + +- **Parameters** + - x : integer (i32) or floating point number (f64) + - y : integer (i32) or floating point number (f64) +- **Returns** : integer (i32) or floating point number (f64) + +Returns the greater value between x and y. Both x and y should be of the same type. + + +### min(x, y) + +- **Parameters** + - x : integer (i32) or floating point number (f64) + - y : integer (i32) or floating point number (f64) +- **Returns** : integer (i32) or floating point number (f64) + +Returns the smaller value between x and y. Both x and y should be of the same type. + + +### mod(x, y) + +- **Parameters** + - x : integer (i32, i64) or floating point number (f32, f64) + - y : integer (i32, i64) or floating point number (f32, f64) +- **Returns** : integer (i32, i64) or floating point number (f32, f64) + +Returns the remainder of x / y, or x when x is smaller than y. Both x and y should be of the same type. + + +### pow(x, y) + +- **Parameters** + - x : integer (i32, i64), floating point number (f32, f64), complex number (c32) or bool + - y: integer (i32, i64), floating point number (f32, f64) or bool +- **Returns** : integer (i32), floating point number (f32, f64) or a complex number + +Returns xy. When x is of type bool, y must also be of the same type. If x is 32-bit complex number (c32), y can only be a 32-bit integer (i32). + +**Note** : `x ** y` is the recommended method for doing the above calculation. + + +### round(x) + +- **Parameters** + - x : integer (i8, i16, i32, i64), floating point number (f32, f64) or bool +- **Returns** : integer (i8, i16, i32, i64) + +Returns the integer nearest to x. + + +### sum(arr) + +- **Parameters** + - arr : list of integers (list[i32], list[i64]) or floating point numbers (list[i32], list[f64]) +- **Returns** : integer (i32, i64) or floating point number (f32, f64) + +Returns the sum of all elements present in the list. + + +### oct(n) + +- **Parameters** + - n : integer (i32) +- **Returns** : str + +Returns the octal representation of n as a '0o' prefixed string. + + + + diff --git a/doc/src/design.md b/doc/src/design.md index d7510e6c29..d477039a0b 100644 --- a/doc/src/design.md +++ b/doc/src/design.md @@ -91,7 +91,7 @@ ASR is the simplest way to generate Fortran code, as one does not have to worry about the detailed syntax (as in AST) about how and where things are declared. One specifies the symbol table for a module, then for each symbol (functions, global variables, types, ...) one specifies the local -variables and if this is an interface then one needs to specify where one can +variables and if this is an interface then one must specify where one can find an implementation, otherwise a body is supplied with statements, those nodes are almost the same as in AST, except that each variable is just a reference to a symbol in the symbol table (so by construction one cannot have diff --git a/doc/src/developers_example.ipynb b/doc/src/developers_example.ipynb new file mode 100644 index 0000000000..c63754c8f0 --- /dev/null +++ b/doc/src/developers_example.ipynb @@ -0,0 +1,67 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "c86338ac-53ca-4115-8c5a-8bf8a5c7113e", + "metadata": {}, + "outputs": [], + "source": [ + "%%showast\n", + "def add(x: i32, y: i32) -> i32:\n", + " return x + y" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23834b08-2f3f-45e7-a1ce-21a9fd4e5117", + "metadata": {}, + "outputs": [], + "source": [ + "%%showasr\n", + "def add(x: i32, y: i32) -> i32:\n", + " return x + y" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ec7426b4-e2e5-416c-bcae-9bb9c8926c9b", + "metadata": {}, + "outputs": [], + "source": [ + "%%showllvm\n", + "def sub(x: i32, y: i32) -> i32:\n", + " return add(x, -y)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "716c56ef-8210-4daf-aa23-96b385801014", + "metadata": {}, + "outputs": [], + "source": [ + "%%showasm\n", + "def mul(x: i32, y: i32) -> i32:\n", + " return x * y" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "LPython", + "language": "python", + "name": "lpython" + }, + "language_info": { + "file_extension": ".f90", + "mimetype": "text/x-python", + "name": "python", + "version": "2018" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/doc/src/installation.md b/doc/src/installation.md index 505e406a25..dff2027ff7 100644 --- a/doc/src/installation.md +++ b/doc/src/installation.md @@ -1,146 +1,209 @@ # Installation -All the instructions below work on Linux, macOS and Windows. +Follow the steps below to install and run LPython on Linux, Windows or macOS. + +## Prerequisites +- ### Install Conda + Follow the instructions provided [here](https://github.com/conda-forge/miniforge/#download) to install Conda on your platform (Linux, macOS and Windows) using a conda-forge distribution called Miniforge. + + For Windows, these are additional requirements: + - Miniforge Prompt + - Visual Studio (with "Desktop Development with C++" workload) + +- ### Set up your system + - Linux + - Run the following command to install some global build dependencies: + + ```bash + sudo apt-get install build-essential binutils-dev clang zlib1g-dev + ``` + - Windows + - Download and install [Microsoft Visual Studio Community](https://visualstudio.microsoft.com/downloads/) for free. + + - Run the Visual Studio Installer. Download and install the "Desktop Development with C++" workload which will install the Visual C++ Compiler (MSVC). + + - Launch the Miniforge prompt from the Desktop. It is recommended to use MiniForge instead of Powershell as the main terminal to build and write code for LPython. In the MiniForge Prompt, initialize the MSVC compiler using the below command: + + ```bash + call "C:\Program Files\Microsoft Visual Studio\2022\Community\Common7\Tools\VsDevCmd" -arch=x64 + ``` + + You can optionally test MSVC via: + + ```bash + cl /? + link /? + ``` + + Both commands must print several pages of help text. + + - Windows with WSL + - Install Miniforge Prompt and add it to path: + ```bash + wget https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-Linux-x86_64.sh -O miniconda.sh + bash miniconda.sh -b -p $HOME/conda_root + export PATH="$HOME/conda_root/bin:$PATH" + conda init bash # (shell name) + ``` + - Open a new terminal window and run the following commands to install dependencies: + ```bash + conda create -n lp -c conda-forge llvmdev=11.0.1 bison=3.4 re2c python cmake make toml clangdev git + ``` + + - Optionally, you can change the directory to a Windows location using `cd /mnt/[drive letter]/[windows location]`. For e.g. - `cd mnt/c/Users/name/source/repos/`. + + +- ### Clone the LPython repository + Make sure you have `git` installed. Type the following command to clone the repository: + + ```bash + git clone https://github.com/lcompilers/lpython.git + cd lpython + ``` + + You may also use GitHub Desktop to do the same. + +## Building LPython +- ### Linux and macOS + - Create a Conda environment: -## Binaries + ```bash + conda env create -f environment_unix.yml + conda activate lp + ``` -The recommended way to install LFortran is using Conda. -Install Conda for example by installing the -[Miniconda](https://conda.io/en/latest/miniconda.html) installation by following instructions there for your platform. -Then create a new environment (you can choose any name, here we chose `lf`) and -activate it: -```bash -conda create -n lp -conda activate lp -``` -Then install LFortran by: -```bash -conda install lfortran -c conda-forge -``` -Now the `lf` environment has the `lfortran` compiler available, you can start the -interactive prompt by executing `lfortran`, or see the command line options using -`lfortran -h`. + - Generate the prerequisite files and build in Debug Mode: -The Jupyter kernel is automatically installed by the above command, so after installing Jupyter itself: -```bash -conda install jupyter -c conda-forge -``` -You can create a Fortran based Jupyter notebook by executing: -```bash -jupyter notebook -``` -and selecting `New->Fortran`. + ```bash + # if you are developing on top of a forked repository; please run following command first + # ./generate_default_tag.sh -## Build From a Source Tarball + ./build0.sh + ./build1.sh + ``` -This method is the recommended method if you just want to install LFortran, either yourself or in a package manager (Spack, Conda, Debian, etc.). The source tarball has all the generated files included and has minimal dependencies. +- ### Windows + - Create a Conda environment using the pre-existing file: -First we have to install dependencies, for example using Conda: -```bash -conda create -n lf python cmake llvmdev -conda activate lf -``` -Then download a tarball from -[https://lfortran.org/download/](https://lfortran.org/download/), -e.g.: -```bash -wget https://lfortran.github.io/tarballs/dev/lfortran-0.9.0.tar.gz -tar xzf lfortran-0.9.0.tar.gz -cd lfortran-0.9.0 -``` -And build: -``` -cmake -DWITH_LLVM=yes -DCMAKE_INSTALL_PREFIX=`pwd`/inst . -make -j8 -make install -``` -This will install the `lfortran` into the `inst/bin`. + ```bash + conda env create -f environment_win.yml + conda activate lp + ``` -## Build From Git + - Generate the prerequisite files and build in Release Mode: -We assume you have C++ compilers installed, as well as `git` and `wget`. -In Ubuntu, you can also install `binutils-dev` for stacktraces. + ```bash + call build0.bat + call build1.bat + ``` +- ### Windows with WSL -If you do not have Conda installed, you can do so on Linux (and similarly on -other platforms): -```bash -wget --no-check-certificate https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh -bash miniconda.sh -b -p $HOME/conda_root -export PATH="$HOME/conda_root/bin:$PATH" -``` -Then prepare the environment: -```bash -conda create -n lp -c conda-forge llvmdev=11.0.1 bison=3.4 re2c python cmake make toml numpy -conda activate lp -``` -Clone the LFortran git repository: -``` -git clone https://gitlab.com/lfortran/lfortran.git -cd lfortran -``` -Generate files that are needed for the build (this step depends on `re2c`, `bison` and `python`): -```bash -./build0.sh -``` -Now the process is the same as installing from the source tarball. For example to build in Debug mode: -``` -cmake -DCMAKE_BUILD_TYPE=Debug -DWITH_LLVM=yes -DCMAKE_INSTALL_PREFIX=`pwd`/inst . -make -j8 -``` + - Activate the Conda environment: + ```bash + conda activate lp + ``` -Run tests: -```bash -ctest -./run_tests.py -``` -Run an interactive prompt: -```bash -./src/bin/lfortran -``` + - Run the following commands to build the project: + ```bash + ./build0.sh + cmake -DCMAKE_BUILD_TYPE=Debug -DWITH_LLVM=yes -DCMAKE_INSTALL_PREFIX=`pwd`/inst .\ + make -j8 + ``` -## Build on Windows with Visual Studio +## Tests -Install Conda for example by installing the Miniconda installation by following instructions there for your platform. If not already done, activate the Conda-Installation (cf. Conda installation instructions). +- ### Linux and macOS -First, clone the repo to a local folder. + - Run tests: -Launch a Conda command interpreter and run the following commands: -```bash -conda update -q conda -conda install -c conda-forge python=3.7 re2c m2-bison xonsh llvmdev=11.1.0 jupyter xeus=1.0.1 xtl nlohmann_json cppzmq jupyter_kernel_test pytest -``` -Next, `cd` to the root of the repository and run -```bash -.\build0.bat -``` + ```bash + ctest + ./run_tests.py + ``` + + - Update test references: + ``` + ./run_tests.py -u + ``` + + - Run integration tests: + + ```bash + cd integration_tests + ./run_tests.py + ``` + - In case you have recently updated macOS, you may get a warning like below in some test cases: + ```bash + ld: warning: object file (test_list_index2.out.tmp.o) was built for newer macOS version (14.0) than being linked (13.3) + ``` + This leads to mismatch of hashes with expected output in some test cases, this can be resolved by updating command line tools: + + ```bash + git clean -dfx + sudo rm -rf /Library/Developer/CommandLineTools # make sure you know what you're doing here + sudo xcode-select --install + ./build.sh + ./run_tests.py + ``` + + - Speed up Integration Tests on macOS + + Integration tests run slowly because Apple checks the hash of each + executable online before running. + + You can turn off that feature in the Privacy tab of the Security and Privacy item of System Preferences > Developer Tools > Terminal.app > "allow the apps below to run software locally that does not meet the system's security + policy." + + +- ### Windows + + - Run integration tests -Now, you can launch Visual Studio and open the LFortran folder. -Before the first build you have to set up the `ZLib`-pathes: Go to the CMake-Settings (Project -> CMake Setttings for lfortran) and check `Show advanced variables`. Scroll to the `ZLIB_...` variables and set: -- `ZLIB_INCLUDE_DIR` = \\Library\include -- `ZLIB_LIBRARY_[DEBUG|RELEASE]` = \\Library\lib\zlib.lib + ```bash + python run_tests.py --skip-run-with-dbg + ``` -Then you can generate the CMake-Cache and build the project. + - Update reference tests + + ```bash + python run_tests.py -u --skip-run-with-dbg + ``` + +## Examples (Linux and macOS) + +You can run the following examples manually in a terminal: + +```bash +./src/bin/lpython examples/expr2.py +./src/bin/lpython examples/expr2.py -o expr +./expr +./src/bin/lpython --show-ast examples/expr2.py +./src/bin/lpython --show-asr examples/expr2.py +./src/bin/lpython --show-cpp examples/expr2.py +./src/bin/lpython --show-llvm examples/expr2.py +./src/bin/lpython --show-c examples/expr2.py +``` ## Enabling the Jupyter Kernel To install the Jupyter kernel, install the following Conda packages also: ``` -conda install xeus xtl nlohmann_json cppzmq +conda install xeus=5.1.0 xeus-zmq=3.0.0 nlohmann_json ``` and enable the kernel by `-DWITH_XEUS=yes` and install into `$CONDA_PREFIX`. For example: ``` -cmake \ +cmake . -GNinja \ -DCMAKE_BUILD_TYPE=Debug \ -DWITH_LLVM=yes \ -DWITH_XEUS=yes \ -DCMAKE_PREFIX_PATH="$CONDA_PREFIX" \ - -DCMAKE_INSTALL_PREFIX="$CONDA_PREFIX" \ + -DCMAKE_INSTALL_PREFIX="$CONDA_PREFIX" . -cmake --build . -j4 --target install +ninja install ``` -To use it, install Jupyter (`conda install jupyter`) and test that the LFortran +To use it, install Jupyter (`conda install jupyter`) and test that the LPython kernel was found: ``` jupyter kernelspec list --json @@ -149,111 +212,14 @@ Then launch a Jupyter notebook as follows: ``` jupyter notebook ``` -Click `New->Fortran`. To launch a terminal jupyter LFortran console: -``` -jupyter console --kernel=fortran -``` - - -## Build From Git with Nix - -One of the ways to ensure exact environment and dependencies is with `nix`. This will ensure that system dependencies do not interfere with the development environment. If you want, you can report bugs in a `nix-shell` environment to make it easier for others to reproduce. - -### With Root - -We start by getting `nix`. The following multi-user installation will work on any machine with a Linux distribution, MacOS or Windows (via WSL): -```bash -sh <(curl -L https://nixos.org/nix/install) --daemon -``` -### Without Root - -If you would like to not provide `nix` with root access to your machine, on Linux distributions we can use [nix-portable](https://github.com/DavHau/nix-portable). -```bash -wget https://github.com/DavHau/nix-portable/releases/download/v003/nix-portable +Click `New->LPython`. To launch a terminal jupyter LPython console: ``` -Now just prepend all `nix-shell` commands with `NP_RUNTIME=bwrap ./nix-portable `. So: -```bash -# Do not -nix-shell --run "bash" -# Do -NP_RUNTIME=bwrap ./nix-portable nix-shell --run "bash" +jupyter console --kernel=lpython ``` -### Development +## Found a bug? +Please report any bugs you find at our issue tracker [here](https://github.com/lcompilers/lpython/issues). Or, even better, fork the repository on GitHub and create a Pull Request (PR). -Now we can enter the development environment: -```bash -nix-shell --run "bash" --cores 4 -j4 --pure ci/shell.nix -``` -The `--pure` flag ensures no system dependencies are used in the environment. +We welcome all changes, big or small. We will help you make a PR if you are new to git. -The build steps are the same as with the `ci`: -```bash -./build0.sh -./build1.sh -``` - -To change the compilation environment from `gcc` (default) to `clang` we can use `--argstr`: -```bash -nix-shell --run "bash" --cores 4 -j4 --pure ci/shell.nix --argstr clangOnly "yes" -``` - -## Note About Dependencies - -End users (and distributions) are encouraged to use the tarball -from [https://lfortran.org/download/](https://lfortran.org/download/), -which only depends on LLVM, CMake and a C++ compiler. - -The tarball is generated automatically by our CI (continuous integration) and -contains some autogenerated files: the parser, the AST and ASR nodes, which is generated by an ASDL -translator (requires Python). - -The instructions from git are to be used when developing LFortran itself. - -## Note for users who do not use Conda - -Following are the dependencies necessary for installing this -repository in development mode, - -- [Bison - 3.5.1](https://ftp.gnu.org/gnu/bison/bison-3.5.1.tar.xz) -- [LLVM - 11.0.1](https://github.com/llvm/llvm-project/releases/download/llvmorg-11.0.1/llvm-11.0.1.src.tar.xz) -- [re2c - 2.0.3](https://re2c.org/install/install.html) -- [binutils - 2.31.90](ftp://sourceware.org/pub/binutils/snapshots/binutils-2.31.90.tar.xz) - Make sure that you should enable the required options related to this dependency to build the dynamic libraries (the ones ending with `.so`). - -## Stacktraces - -LFortran can print stacktraces when there is an unhandled exception, as well as -on any compiler error with the `--show-stacktrace` option. This is very helpful -for developing the compiler itself to see where in LFortran the problem is. The -stacktrace support is turned off by default, to enable it, -compile LFortran with the `-DWITH_STACKTRACE=yes` cmake option after installing -the prerequisites on each platform per the instructions below. - -### Ubuntu - -In Ubuntu, `apt install binutils-dev`. - -### macOS - -If you use the default Clang compiler on macOS, then the stacktraces should -just work on both Intel and M1 based macOS (the CMake build system -automatically invokes the `dsymtuil` tool and our Python scripts to store the -debug information, see `src/bin/CMakeLists.txt` for more details). If it does -not work, please report a bug. - -If you do not like the default way, an alternative is to use bintutils. For -that, first install -[Spack](https://spack.io/), then: -``` -spack install binutils -spack find -p binutils -``` -The last command will show a full path to the installed `binutils` package. Add -this path to your shell config file, e.g.: -``` -export CMAKE_PREFIX_PATH_LFORTRAN=/Users/ondrej/repos/spack/opt/spack/darwin-catalina-broadwell/apple-clang-11.0.0/binutils-2.36.1-wy6osfm6bp2323g3jpv2sjuttthwx3gd -``` -and compile LFortran with the -`-DCMAKE_PREFIX_PATH="$CMAKE_PREFIX_PATH_LFORTRAN;$CONDA_PREFIX"` cmake option. -The `$CONDA_PREFIX` is there if you install some other dependencies (such as -`llvm`) using Conda, otherwise you can remove it. +If you have any questions or need help, please ask us at [Zulip](https://lfortran.zulipchat.com/) or on our [mailinglist](https://groups.io/g/lfortran). diff --git a/doc/src/rebasing.md b/doc/src/rebasing.md new file mode 100644 index 0000000000..26cefa3355 --- /dev/null +++ b/doc/src/rebasing.md @@ -0,0 +1,246 @@ + +You should clean your branch's commits, and we have two approaches for this. + +# Rebasing +```bash +$ git log +commit 663edf45b80128f95b9e2b3d66f062bd29ca2e0b (HEAD -> test) +Author: Abdelrahman Khaled +Date: Sun Jul 31 15:33:40 2022 +0200 + + refactor + +commit 122a02bf3dc4d656e230e722933dcb7f87ded553 +Author: Abdelrahman Khaled +Date: Sun Jul 31 15:33:15 2022 +0200 + + refactor + +commit f6e7369d0771ac9a15cb8c3a4f9403fce16bdb3d +Author: Abdelrahman Khaled +Date: Sun Jul 31 15:31:57 2022 +0200 + + Added add4.py + +commit 1db4d82e8c71d8b89e8bed29689de4059c7d2f99 +Author: Abdelrahman Khaled +Date: Sun Jul 31 15:30:34 2022 +0200 + + Refactor + +commit c9aae5771ef20b165ad27d013c189326bca7e3b8 +Author: Abdelrahman Khaled +Date: Sat Jul 30 23:38:05 2022 +0200 + + Added add3.py + +commit 75556f15c2d01a10397cc1af8bc74dbb3e958061 +Author: Abdelrahman Khaled +Date: Sat Jul 30 23:37:04 2022 +0200 + + Added add2.py + +commit 132b89e0c0d4dc174a228171f01e111e191275c6 +Author: Abdelrahman Khaled +Date: Sat Jul 30 23:36:27 2022 +0200 + + Added add.py + +commit d9ade637a48186ae8b46ba305443c21121ceb5bd (origin/main, rebasing, main) +Merge: fd17b7de6 897d74763 +Author: Gagandeep Singh +Date: Sat Jul 30 15:46:28 2022 +0530 + + Merge pull request #851 from czgdp1807/tuple01 + + Implementing tuples in LLVM backend + +``` +here, we want to make our commits as a bunch of batches. + +we will rebase with main with the interactive option `git rebase main -i`. + +but first create a backup branch before doing this interactive rebase. + +history of your commits will show up on your editor, you have options to do with commits and they are written below commits history. +```bash +pick 132b89e0c Added add.py +pick 75556f15c Added add2.py +pick c9aae5771 Added add3.py +pick 1db4d82e8 Refactor +pick f6e7369d0 Added add4.py +pick 122a02bf3 refactor +pick 663edf45b refactor + +# Rebase d9ade637a..663edf45b onto d9ade637a (7 commands) +# +# Commands: +# p, pick = use commit +# r, reword = use commit, but edit the commit message +# e, edit = use commit, but stop for amending +# s, squash = use commit, but meld into previous commit +# f, fixup [-C | -c] = like "squash" but keep only the previous +# commit's log message, unless -C is used, in which case +# keep only this commit's message; -c is same as -C but +# opens the editor +# x, exec = run command (the rest of the line) using shell +# b, break = stop here (continue rebase later with 'git rebase --continue') +# d, drop = remove commit +# l, label