diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..fd64c09b3 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,166 @@ +_skbuild/ + +.envrc + +models/ + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +.idea/ diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 000000000..eb0fb9662 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,96 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: '' +assignees: '' + +--- + +# Prerequisites + +Please answer the following questions for yourself before submitting an issue. + +- [ ] I am running the latest code. Development is very rapid so there are no tagged versions as of now. +- [ ] I carefully followed the [README.md](https://github.com/abetlen/llama-cpp-python/blob/main/README.md). +- [ ] I [searched using keywords relevant to my issue](https://docs.github.com/en/issues/tracking-your-work-with-issues/filtering-and-searching-issues-and-pull-requests) to make sure that I am creating a new issue that is not already open (or closed). +- [ ] I reviewed the [Discussions](https://github.com/abetlen/llama-cpp-python/discussions), and have a new bug or useful enhancement to share. + +# Expected Behavior + +Please provide a detailed written description of what you were trying to do, and what you expected `llama-cpp-python` to do. + +# Current Behavior + +Please provide a detailed written description of what `llama-cpp-python` did, instead. + +# Environment and Context + +Please provide detailed information about your computer setup. This is important in case the issue is not reproducible except for under certain specific conditions. + +* Physical (or virtual) hardware you are using, e.g. for Linux: + +`$ lscpu` + +* Operating System, e.g. for Linux: + +`$ uname -a` + +* SDK version, e.g. for Linux: + +``` +$ python3 --version +$ make --version +$ g++ --version +``` + +# Failure Information (for bugs) + +Please help provide information about the failure if this is a bug. If it is not a bug, please remove the rest of this template. + +# Steps to Reproduce + +Please provide detailed steps for reproducing the issue. We are not sitting in front of your screen, so the more detail the better. + +1. step 1 +2. step 2 +3. step 3 +4. etc. + +**Note: Many issues seem to be regarding functional or performance issues / differences with `llama.cpp`. In these cases we need to confirm that you're comparing against the version of `llama.cpp` that was built with your python package, and which parameters you're passing to the context.** + +Try the following: + +1. `git clone https://github.com/abetlen/llama-cpp-python` +2. `cd llama-cpp-python` +3. `rm -rf _skbuild/` # delete any old builds +4. `python -m pip install .` +5. `cd ./vendor/llama.cpp` +6. Follow [llama.cpp's instructions](https://github.com/ggerganov/llama.cpp#build) to `cmake` llama.cpp +7. Run llama.cpp's `./main` with the same arguments you previously passed to llama-cpp-python and see if you can reproduce the issue. If you can, [log an issue with llama.cpp](https://github.com/ggerganov/llama.cpp/issues) + +# Failure Logs + +Please include any relevant log snippets or files. If it works under one configuration but not under another, please provide logs for both configurations and their corresponding outputs so it is easy to see where behavior changes. + +Also, please try to **avoid using screenshots** if at all possible. Instead, copy/paste the console output and use [Github's markdown](https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax) to cleanly format your logs for easy readability. + +Example environment info: +``` +llama-cpp-python$ git log | head -1 +commit 47b0aa6e957b93dbe2c29d53af16fbae2dd628f2 + +llama-cpp-python$ python3 --version +Python 3.10.10 + +llama-cpp-python$ pip list | egrep "uvicorn|fastapi|sse-starlette|numpy" +fastapi 0.95.0 +numpy 1.24.3 +sse-starlette 1.3.3 +uvicorn 0.21.1 + +llama-cpp-python/vendor/llama.cpp$ git log | head -3 +commit 66874d4fbcc7866377246efbcee938e8cc9c7d76 +Author: Kerfuffle <44031344+KerfuffleV2@users.noreply.github.com> +Date: Thu May 25 20:18:01 2023 -0600 +``` diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 000000000..bbcbbe7d6 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: '' +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..6bf90273a --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,19 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: "pip" # See documentation for possible values + directory: "/" # Location of package manifests + schedule: + interval: "daily" + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "daily" + - package-ecosystem: "docker" + directory: "/" + schedule: + interval: "daily" diff --git a/.github/workflows/build-and-release.yaml b/.github/workflows/build-and-release.yaml new file mode 100644 index 000000000..7307c85ab --- /dev/null +++ b/.github/workflows/build-and-release.yaml @@ -0,0 +1,144 @@ +name: Build Release + +on: workflow_dispatch + +permissions: + contents: write + +jobs: + build_wheels: + name: Build wheels on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-20.04, windows-2019, macos-13] + + steps: + - uses: actions/checkout@v4 + with: + submodules: "recursive" + + # Used to host cibuildwheel + - uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: Install dependencies (Linux/MacOS) + if: runner.os != 'Windows' + run: | + python -m pip install --upgrade pip + python -m pip install uv + RUST_LOG=trace python -m uv pip install -e .[all] --verbose + shell: bash + + - name: Install dependencies (Windows) + if: runner.os == 'Windows' + env: + RUST_LOG: trace + run: | + python -m pip install --upgrade pip + python -m pip install uv + python -m uv pip install -e .[all] --verbose + shell: cmd + + - name: Build wheels + uses: pypa/cibuildwheel@v2.22.0 + env: + # disable repair + CIBW_REPAIR_WHEEL_COMMAND: "" + with: + package-dir: . + output-dir: wheelhouse + + - uses: actions/upload-artifact@v4 + with: + name: wheels-${{ matrix.os }} + path: ./wheelhouse/*.whl + + build_wheels_arm64: + name: Build arm64 wheels + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + submodules: "recursive" + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + with: + platforms: linux/arm64 + + - name: Build wheels + uses: pypa/cibuildwheel@v2.22.0 + env: + CIBW_SKIP: "*musllinux* pp*" + CIBW_REPAIR_WHEEL_COMMAND: "" + CIBW_ARCHS: "aarch64" + CIBW_BUILD: "cp38-* cp39-* cp310-* cp311-* cp312-*" + with: + output-dir: wheelhouse + + - name: Upload wheels as artifacts + uses: actions/upload-artifact@v4 + with: + name: wheels_arm64 + path: ./wheelhouse/*.whl + + build_sdist: + name: Build source distribution + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + with: + submodules: "recursive" + + - uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: Install dependencies (Linux/MacOS) + if: runner.os != 'Windows' + run: | + python -m pip install --upgrade pip + python -m pip install uv + RUST_LOG=trace python -m uv pip install -e .[all] --verbose + python -m uv pip install build + shell: bash + + - name: Install dependencies (Windows) + if: runner.os == 'Windows' + env: + RUST_LOG: trace + run: | + python -m pip install --upgrade pip + python -m pip install uv + python -m uv pip install -e .[all] --verbose + python -m uv pip install build + shell: cmd + + - name: Build source distribution + run: | + python -m build --sdist + + - uses: actions/upload-artifact@v4 + with: + name: sdist + path: ./dist/*.tar.gz + + release: + name: Release + needs: [build_wheels, build_wheels_arm64, build_sdist] + runs-on: ubuntu-latest + + steps: + - uses: actions/download-artifact@v4 + with: + merge-multiple: true + path: dist + + - uses: softprops/action-gh-release@v2 + with: + files: dist/* + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/build-docker.yaml b/.github/workflows/build-docker.yaml new file mode 100644 index 000000000..b5c7346db --- /dev/null +++ b/.github/workflows/build-docker.yaml @@ -0,0 +1,50 @@ +name: Build Docker + +on: workflow_dispatch + +permissions: + contents: write + packages: write + +jobs: + docker: + name: Build and push Docker image + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + submodules: "recursive" + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push + id: docker_build + uses: docker/build-push-action@v6 + with: + context: . + file: "docker/simple/Dockerfile" + push: ${{ startsWith(github.ref, 'refs/tags/') }} + pull: true + platforms: linux/amd64,linux/arm64 + tags: | + ghcr.io/abetlen/llama-cpp-python:latest + ghcr.io/abetlen/llama-cpp-python:${{ github.ref_name }} + build-args: | + BUILDKIT_INLINE_CACHE=1 + + - name: Publish to GitHub Tag + if: steps.docker_build.outputs.digest && startsWith(github.ref, 'refs/tags/') + run: | + echo "Docker image published for tag: ${{ github.ref_name }}" diff --git a/.github/workflows/build-wheels-cuda.yaml b/.github/workflows/build-wheels-cuda.yaml new file mode 100644 index 000000000..745b2e602 --- /dev/null +++ b/.github/workflows/build-wheels-cuda.yaml @@ -0,0 +1,136 @@ +name: Build Wheels (CUDA) + +on: workflow_dispatch + +permissions: + contents: write + +jobs: + define_matrix: + name: Define Build Matrix + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.set-matrix.outputs.matrix }} + defaults: + run: + shell: pwsh + + steps: + - name: Define Job Output + id: set-matrix + run: | + $matrix = @{ + 'os' = @('ubuntu-latest', 'windows-2019') + 'pyver' = @("3.9", "3.10", "3.11", "3.12") + 'cuda' = @("12.1.1", "12.2.2", "12.3.2", "12.4.1") #, "12.5.1", "12.6.1") + 'releasetag' = @("basic") + } + + $matrixOut = ConvertTo-Json $matrix -Compress + Write-Output ('matrix=' + $matrixOut) >> $env:GITHUB_OUTPUT + + build_wheels: + name: Build Wheel ${{ matrix.os }} ${{ matrix.pyver }} ${{ matrix.cuda }} ${{ matrix.releasetag == 'wheels' && 'AVX2' || matrix.releasetag }} + needs: define_matrix + runs-on: ${{ matrix.os }} + strategy: + matrix: ${{ fromJSON(needs.define_matrix.outputs.matrix) }} + defaults: + run: + shell: pwsh + env: + CUDAVER: ${{ matrix.cuda }} + AVXVER: ${{ matrix.releasetag }} + + steps: + - name: Add MSBuild to PATH + if: runner.os == 'Windows' + uses: microsoft/setup-msbuild@v2 + with: + vs-version: '[16.11,16.12)' + + - uses: actions/checkout@v4 + with: + submodules: "recursive" + + - uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.pyver }} + cache: 'pip' + + - name: Setup Mamba + uses: conda-incubator/setup-miniconda@v3.1.0 + with: + activate-environment: "llamacpp" + python-version: ${{ matrix.pyver }} + miniforge-version: latest + add-pip-as-python-dependency: true + auto-activate-base: false + + - name: VS Integration Cache + id: vs-integration-cache + if: runner.os == 'Windows' + uses: actions/cache@v4 + with: + path: ./MSBuildExtensions + key: cuda-${{ matrix.cuda }}-vs-integration + + - name: Get Visual Studio Integration + if: runner.os == 'Windows' && steps.vs-integration-cache.outputs.cache-hit != 'true' + run: | + if ($env:CUDAVER -eq '12.1.1') {$x = '12.1.0'} else {$x = $env:CUDAVER} + $links = (Invoke-RestMethod 'https://raw.githubusercontent.com/Jimver/cuda-toolkit/master/src/links/windows-links.ts').Trim().split().where({$_ -ne ''}) + for ($i=$q=0;$i -lt $links.count -and $q -lt 2;$i++) {if ($links[$i] -eq "'$x',") {$q++}} + Invoke-RestMethod $links[$i].Trim("'") -OutFile 'cudainstaller.zip' + & 'C:\Program Files\7-Zip\7z.exe' e cudainstaller.zip -oMSBuildExtensions -r *\MSBuildExtensions\* > $null + Remove-Item 'cudainstaller.zip' + + - name: Install Visual Studio Integration + if: runner.os == 'Windows' + run: | + $y = (gi '.\MSBuildExtensions').fullname + '\*' + (gi 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\MSBuild\Microsoft\VC\*\BuildCustomizations').fullname.foreach({cp $y $_}) + $cupath = 'CUDA_PATH_V' + $env:CUDAVER.Remove($env:CUDAVER.LastIndexOf('.')).Replace('.','_') + echo "$cupath=$env:CONDA_PREFIX" >> $env:GITHUB_ENV + + - name: Install Dependencies + env: + MAMBA_DOWNLOAD_FAILFAST: "0" + MAMBA_NO_LOW_SPEED_LIMIT: "1" + run: | + $cudaVersion = $env:CUDAVER + mamba install -y 'cuda' -c nvidia/label/cuda-$cudaVersion + python -m pip install build wheel + + - name: Build Wheel + run: | + $cudaVersion = $env:CUDAVER.Remove($env:CUDAVER.LastIndexOf('.')).Replace('.','') + $env:CUDA_PATH = $env:CONDA_PREFIX + $env:CUDA_HOME = $env:CONDA_PREFIX + $env:CUDA_TOOLKIT_ROOT_DIR = $env:CONDA_PREFIX + if ($IsLinux) { + $env:LD_LIBRARY_PATH = $env:CONDA_PREFIX + '/lib:' + $env:LD_LIBRARY_PATH + } + $env:VERBOSE = '1' + $env:CMAKE_ARGS = '-DGGML_CUDA=on -DCMAKE_CUDA_ARCHITECTURES=all' + $env:CMAKE_ARGS = "-DGGML_CUDA_FORCE_MMQ=ON $env:CMAKE_ARGS" + # if ($env:AVXVER -eq 'AVX') { + $env:CMAKE_ARGS = $env:CMAKE_ARGS + ' -DGGML_AVX2=off -DGGML_FMA=off -DGGML_F16C=off' + # } + # if ($env:AVXVER -eq 'AVX512') { + # $env:CMAKE_ARGS = $env:CMAKE_ARGS + ' -DGGML_AVX512=on' + # } + # if ($env:AVXVER -eq 'basic') { + # $env:CMAKE_ARGS = $env:CMAKE_ARGS + ' -DGGML_AVX=off -DGGML_AVX2=off -DGGML_FMA=off -DGGML_F16C=off' + # } + python -m build --wheel + # write the build tag to the output + Write-Output "CUDA_VERSION=$cudaVersion" >> $env:GITHUB_ENV + + - uses: softprops/action-gh-release@v2 + with: + files: dist/* + # Set tag_name to -cu + tag_name: ${{ github.ref_name }}-cu${{ env.CUDA_VERSION }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/build-wheels-metal.yaml b/.github/workflows/build-wheels-metal.yaml new file mode 100644 index 000000000..9b97bf2f5 --- /dev/null +++ b/.github/workflows/build-wheels-metal.yaml @@ -0,0 +1,79 @@ +name: Build Wheels (Metal) + +on: workflow_dispatch + +permissions: + contents: write + +jobs: + build_wheels: + name: Build wheels on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [macos-13, macos-14, macos-15] + + steps: + - uses: actions/checkout@v4 + with: + submodules: "recursive" + + # Used to host cibuildwheel + - uses: actions/setup-python@v5 + with: + python-version: "3.12" + cache: 'pip' + + - name: Install dependencies (Linux/MacOS) + if: runner.os != 'Windows' + run: | + python -m pip install --upgrade pip + python -m pip install uv + RUST_LOG=trace python -m uv pip install -e .[all] --verbose + shell: bash + + - name: Install dependencies (Windows) + if: runner.os == 'Windows' + env: + RUST_LOG: trace + run: | + python -m pip install --upgrade pip + python -m pip install uv + python -m uv pip install -e .[all] --verbose + shell: cmd + + - name: Build wheels + uses: pypa/cibuildwheel@v2.22.0 + env: + # disable repair + CIBW_REPAIR_WHEEL_COMMAND: "" + CIBW_ARCHS: "arm64" + CIBW_ENVIRONMENT: CMAKE_ARGS="-DCMAKE_OSX_ARCHITECTURES=arm64 -DCMAKE_APPLE_SILICON_PROCESSOR=arm64 -DGGML_METAL=on" + CIBW_BUILD: "cp39-* cp310-* cp311-* cp312-*" + with: + package-dir: . + output-dir: wheelhouse2 + + - uses: actions/upload-artifact@v4 + with: + name: wheels-mac_${{ matrix.os }} + path: ./wheelhouse2/*.whl + + release: + name: Release + needs: [build_wheels] + runs-on: ubuntu-latest + + steps: + - uses: actions/download-artifact@v4 + with: + merge-multiple: true + path: dist2 + + - uses: softprops/action-gh-release@v2 + with: + files: dist2/* + # set release name to -metal + tag_name: ${{ github.ref_name }}-metal + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/generate-index-from-release.yaml b/.github/workflows/generate-index-from-release.yaml new file mode 100644 index 000000000..255ee67d6 --- /dev/null +++ b/.github/workflows/generate-index-from-release.yaml @@ -0,0 +1,57 @@ +name: Wheels Index + +on: + # Trigger on new release + workflow_run: + workflows: ["Release", "Build Wheels (CUDA)", "Build Wheels (Metal)"] + types: + - completed + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages +permissions: + contents: read + pages: write + id-token: write + +# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. +# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. +concurrency: + group: "pages" + cancel-in-progress: false + +jobs: + # Single deploy job since we're just deploying + deploy: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Setup Pages + uses: actions/configure-pages@v5 + - name: Build + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + ./scripts/get-releases.sh + ./scripts/releases-to-pep-503.sh index/whl/cpu '^[v]?[0-9]+\.[0-9]+\.[0-9]+$' + ./scripts/releases-to-pep-503.sh index/whl/cu121 '^[v]?[0-9]+\.[0-9]+\.[0-9]+-cu121$' + ./scripts/releases-to-pep-503.sh index/whl/cu122 '^[v]?[0-9]+\.[0-9]+\.[0-9]+-cu122$' + ./scripts/releases-to-pep-503.sh index/whl/cu123 '^[v]?[0-9]+\.[0-9]+\.[0-9]+-cu123$' + ./scripts/releases-to-pep-503.sh index/whl/cu124 '^[v]?[0-9]+\.[0-9]+\.[0-9]+-cu124$' + # ./scripts/releases-to-pep-503.sh index/whl/cu125 '^[v]?[0-9]+\.[0-9]+\.[0-9]+-cu124$' + # ./scripts/releases-to-pep-503.sh index/whl/cu126 '^[v]?[0-9]+\.[0-9]+\.[0-9]+-cu124$' + ./scripts/releases-to-pep-503.sh index/whl/metal '^[v]?[0-9]+\.[0-9]+\.[0-9]+-metal$' + - name: Upload artifact + uses: actions/upload-pages-artifact@v3 + with: + # Upload entire repository + path: 'index' + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4 diff --git a/.github/workflows/publish-to-test-pypi.yaml b/.github/workflows/publish-to-test-pypi.yaml deleted file mode 100644 index 90fe19fd5..000000000 --- a/.github/workflows/publish-to-test-pypi.yaml +++ /dev/null @@ -1,47 +0,0 @@ -# Based on: https://packaging.python.org/en/latest/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/ - -name: Publish Python 🐍 distributions 📦 to PyPI and TestPyPI - -on: workflow_dispatch - -jobs: - build-n-publish: - name: Build and publish Python 🐍 distributions 📦 to PyPI and TestPyPI - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: "3.8" - - name: Install dependencies - run: | - python -m pip install --upgrade pip pytest cmake scikit-build setuptools - - name: Build source distribution - run: | - python setup.py sdist - # - name: Install pypa/build - # run: >- - # python -m - # pip install - # build - # --user - # - name: Build a binary wheel and a source tarball - # run: >- - # python -m - # build - # --sdist - # --wheel - # --outdir dist/ - # . - - name: Publish distribution 📦 to Test PyPI - uses: pypa/gh-action-pypi-publish@release/v1 - with: - password: ${{ secrets.TEST_PYPI_API_TOKEN }} - repository-url: https://test.pypi.org/legacy/ - # - name: Publish distribution 📦 to PyPI - # if: startsWith(github.ref, 'refs/tags') - # uses: pypa/gh-action-pypi-publish@release/v1 - # with: - # password: ${{ secrets.PYPI_API_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/publish-to-test.yaml b/.github/workflows/publish-to-test.yaml new file mode 100644 index 000000000..de3ae42aa --- /dev/null +++ b/.github/workflows/publish-to-test.yaml @@ -0,0 +1,62 @@ +# Based on: https://packaging.python.org/en/latest/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/ + +name: Publish to TestPyPI + +on: + workflow_dispatch: + inputs: + dev_version: + description: 'Dev version N' + required: true + + +jobs: + build-n-publish: + name: Build and publish + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + with: + submodules: "recursive" + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + cache: 'pip' + + - name: Append Dev Version to __version__ + run: | + DEV_VERSION=${{ github.event.inputs.dev_version }} + CURRENT_VERSION=$(awk -F= '/__version__ =/ {print $2}' llama_cpp/__init__.py | tr -d ' "') + NEW_VERSION="${CURRENT_VERSION}.dev${DEV_VERSION}" + sed -i 's/__version__ = \".*\"/__version__ = \"'"${NEW_VERSION}"'\"/' llama_cpp/__init__.py + + - name: Install dependencies (Linux/MacOS) + if: runner.os != 'Windows' + run: | + python -m pip install --upgrade pip + python -m pip install uv + RUST_LOG=trace python -m uv pip install -e .[all] --verbose + shell: bash + + - name: Install dependencies (Windows) + if: runner.os == 'Windows' + env: + RUST_LOG: trace + run: | + python -m pip install --upgrade pip + python -m pip install uv + python -m uv pip install -e .[all] --verbose + shell: cmd + + - name: Build source distribution + run: | + python -m build --sdist + + - name: Publish to Test PyPI + uses: pypa/gh-action-pypi-publish@release/v1 + with: + password: ${{ secrets.TEST_PYPI_API_TOKEN }} + repository-url: https://test.pypi.org/legacy/ diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml new file mode 100644 index 000000000..bb76f5394 --- /dev/null +++ b/.github/workflows/publish.yaml @@ -0,0 +1,51 @@ +name: Publish to PyPI + +# Based on: https://packaging.python.org/en/latest/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/ + +on: workflow_dispatch + +jobs: + build-n-publish: + name: Build and publish + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + with: + submodules: "recursive" + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: Install dependencies (Linux/MacOS) + if: runner.os != 'Windows' + run: | + python -m pip install --upgrade pip + python -m pip install uv + RUST_LOG=trace python -m uv pip install -e .[all] --verbose + python -m uv pip install build + shell: bash + + - name: Install dependencies (Windows) + if: runner.os == 'Windows' + env: + RUST_LOG: trace + run: | + python -m pip install --upgrade pip + python -m pip install uv + python -m uv pip install -e .[all] --verbose + python -m uv pip install build + shell: cmd + + - name: Build source distribution + run: | + python -m build --sdist + + - name: Publish distribution to PyPI + # TODO: move to tag based releases + # if: startsWith(github.ref, 'refs/tags') + uses: pypa/gh-action-pypi-publish@release/v1 + with: + password: ${{ secrets.PYPI_API_TOKEN }} diff --git a/.github/workflows/test-pypi.yaml b/.github/workflows/test-pypi.yaml new file mode 100644 index 000000000..335033bba --- /dev/null +++ b/.github/workflows/test-pypi.yaml @@ -0,0 +1,112 @@ +name: Tests for PyPI package + +on: workflow_dispatch + +jobs: + build-linux: + + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12"] + + steps: + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' + + - name: Install dependencies (Linux/MacOS) + if: runner.os != 'Windows' + run: | + python -m pip install --upgrade pip + python -m pip install uv + RUST_LOG=trace python -m uv pip install llama-cpp-python[all] --verbose + shell: bash + + - name: Install dependencies (Windows) + if: runner.os == 'Windows' + env: + RUST_LOG: trace + run: | + python -m pip install --upgrade pip + python -m pip install uv + python -m uv pip install llama-cpp-python[all] --verbose + shell: cmd + + - name: Test with pytest + run: | + python -c "import llama_cpp" + + build-windows: + + runs-on: windows-latest + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12"] + + steps: + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' + + - name: Install dependencies (Linux/MacOS) + if: runner.os != 'Windows' + run: | + python -m pip install --upgrade pip + python -m pip install uv + RUST_LOG=trace python -m uv pip install llama-cpp-python[all] --verbose + shell: bash + + - name: Install dependencies (Windows) + if: runner.os == 'Windows' + env: + RUST_LOG: trace + run: | + python -m pip install --upgrade pip + python -m pip install uv + python -m uv pip install llama-cpp-python[all] --verbose + shell: cmd + + - name: Test with pytest + run: | + python -c "import llama_cpp" + + build-macos: + + runs-on: macos-latest + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12"] + + steps: + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' + + - name: Install dependencies (Linux/MacOS) + if: runner.os != 'Windows' + run: | + python -m pip install --upgrade pip + python -m pip install uv + RUST_LOG=trace python -m uv pip install llama-cpp-python[all] --verbose + shell: bash + + - name: Install dependencies (Windows) + if: runner.os == 'Windows' + env: + RUST_LOG: trace + run: | + python -m pip install --upgrade pip + python -m pip install uv + python -m uv pip install llama-cpp-python[all] --verbose + shell: cmd + + - name: Test with pytest + run: | + python -c "import llama_cpp" diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index e8beb9ad7..95f6e5a27 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -1,76 +1,171 @@ name: Tests - on: + pull_request: + branches: + - main push: branches: - main +env: + REPO_ID: Qwen/Qwen2-0.5B-Instruct-GGUF + MODEL_FILE: qwen2-0_5b-instruct-q8_0.gguf + jobs: - build-linux: + download-model: + runs-on: ubuntu-latest + steps: + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.9" + - name: Install huggingface-hub + run: pip install huggingface-hub + - name: Download model + run: huggingface-cli download ${{ env.REPO_ID }} ${{ env.MODEL_FILE }} + - name: Cache model + uses: actions/cache@v4 + with: + path: ~/.cache/huggingface/hub + key: ${{ runner.os }}-model-${{ env.REPO_ID }}-${{ env.MODEL_FILE }} + build-linux: + needs: download-model runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] - + python-version: ["3.9", "3.10", "3.11", "3.12"] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: - submodules: "true" + submodules: "recursive" + - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - - name: Install dependencies + cache: 'pip' + - name: Restore model cache + uses: actions/cache@v4 + with: + path: ~/.cache/huggingface/hub + key: ${{ runner.os }}-model-${{ env.REPO_ID }}-${{ env.MODEL_FILE }} + - name: Install dependencies (Linux/MacOS) run: | - python -m pip install --upgrade pip pytest cmake scikit-build setuptools - pip install . -v + python -m pip install --upgrade pip + python -m pip install uv + python -m uv pip install -e .[all] --verbose + shell: bash - name: Test with pytest run: | - pytest + python -m pytest build-windows: - + needs: download-model runs-on: windows-latest strategy: matrix: - python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] - + python-version: ["3.9", "3.10", "3.11", "3.12"] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: - submodules: "true" + submodules: "recursive" + - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - - name: Install dependencies + cache: 'pip' + + - name: Restore model cache + uses: actions/cache@v4 + with: + path: ~/.cache/huggingface/hub + key: ${{ runner.os }}-model-${{ env.REPO_ID }}-${{ env.MODEL_FILE }} + + - name: Install dependencies (Windows) run: | - python -m pip install --upgrade pip pytest cmake scikit-build setuptools - pip install . -v + python -m pip install --upgrade pip + python -m pip install uv + python -m uv pip install -e .[all] --verbose + shell: cmd + - name: Test with pytest run: | - pytest + python -m pytest build-macos: - - runs-on: macos-latest + needs: download-model + runs-on: macos-13 strategy: matrix: - python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] - + python-version: ["3.9", "3.10", "3.11", "3.12"] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: - submodules: "true" + submodules: "recursive" + - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} + cache: 'pip' + + - name: System Info + run: | + uname -a + sysctl -n machdep.cpu.brand_string + python3 -c "import platform; print(platform.machine(), platform.architecture())" + + - name: Restore model cache + uses: actions/cache@v4 + with: + path: ~/.cache/huggingface/hub + key: ${{ runner.os }}-model-${{ env.REPO_ID }}-${{ env.MODEL_FILE }} + + - name: Install dependencies (Linux/MacOS) + run: | + python3 -m pip install --upgrade pip + python3 -m pip install uv + python3 -m uv pip install -e .[all] --verbose + CMAKE_ARGS="-DLLAMA_METAL=off" python3 -m uv pip install .[all] --verbose + shell: bash + + - name: Test with pytest + run: | + python3 -m pytest + + build-macos-metal: + needs: download-model + runs-on: macos-13 + steps: + - uses: actions/checkout@v4 + with: + submodules: "recursive" + + - name: Set up Python 3.9 + uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: System Info + run: | + uname -a + sysctl -n machdep.cpu.brand_string + python3 -c "import platform; print(platform.machine(), platform.architecture())" + + - name: Restore model cache + uses: actions/cache@v4 + with: + path: ~/.cache/huggingface/hub + key: ${{ runner.os }}-model-${{ env.REPO_ID }}-${{ env.MODEL_FILE }} + - name: Install dependencies run: | - python -m pip install --upgrade pip pytest cmake scikit-build setuptools - pip install . -v + python3 -m pip install --upgrade pip + CMAKE_ARGS="-DLLAMA_METAL=on" python3 -m pip install .[all] --verbose + shell: bash + - name: Test with pytest run: | - pytest \ No newline at end of file + python3 -m pytest diff --git a/.github/workflows/wheels.yaml b/.github/workflows/wheels.yaml deleted file mode 100644 index e49dad553..000000000 --- a/.github/workflows/wheels.yaml +++ /dev/null @@ -1,39 +0,0 @@ -name: Build - -on: workflow_dispatch - # push: - # branches: - # - main - -jobs: - build_wheels: - name: Build wheels on ${{ matrix.os }} - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-20.04, windows-2019, macOS-11] - - steps: - - uses: actions/checkout@v3 - with: - submodules: "true" - - # Used to host cibuildwheel - - uses: actions/setup-python@v3 - - - name: Install cibuildwheel - run: python -m pip install cibuildwheel==2.12.1 - - - name: Install dependencies - run: | - python -m pip install --upgrade pip pytest cmake scikit-build setuptools - - - name: Build wheels - run: python -m cibuildwheel --output-dir wheelhouse - # to supply options, put them in 'env', like: - # env: - # CIBW_SOME_OPTION: value - - - uses: actions/upload-artifact@v3 - with: - path: ./wheelhouse/*.whl diff --git a/.gitignore b/.gitignore index fd64c09b3..9d68dbcd9 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,13 @@ +*.local + +.python-version + +.vscode/ + _skbuild/ .envrc +.direnv models/ @@ -10,7 +17,11 @@ __pycache__/ *$py.class # C extensions -*.so +llama_cpp/*.so +llama_cpp/*.dylib +llama_cpp/*.metal +llama_cpp/*.dll +llama_cpp/*.lib # Distribution / packaging .Python @@ -164,3 +175,6 @@ cython_debug/ # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. .idea/ + +# downloaded model .bin files +docker/open_llama/*.bin diff --git a/.gitmodules b/.gitmodules index 6267b0930..7edf0975d 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,3 @@ [submodule "vendor/llama.cpp"] path = vendor/llama.cpp - url = git@github.com:ggerganov/llama.cpp.git + url = https://github.com/ggerganov/llama.cpp.git diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 000000000..ff3e950cd --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,24 @@ +# Read the Docs configuration file for MkDocs projects +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the version of Python and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.11" + +mkdocs: + configuration: mkdocs.yml + +python: + install: + - method: pip + path: . + - requirements: docs/requirements.txt + +submodules: + include: all + recursive: true \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000..affbd5db7 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,842 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [0.3.9] + +- feat: Update llama.cpp to ggerganov/llama.cpp@8733e0cf6eefc7c7752297cc22d0836706f4222c + +## [0.3.8] + +- feat: Update llama.cpp to ggerganov/llama.cpp@7841fc723e059d1fd9640e5c0ef19050fcc7c698 + +## [0.3.7] + +- feat: Update llama.cpp to ggerganov/llama.cpp@794fe23f29fb40104975c91fe19f23798f7c726e +- fix(ci): Fix the CUDA workflow by @oobabooga in #1894 +- fix: error showing time spent in llama perf context print, adds `no_perf` flag to `Llama` class by @shakalaca in #1898 + +## [0.3.6] + +- feat: Update llama.cpp to ggerganov/llama.cpp@f7cd13301c2a88f97073fd119072b4cc92c08df1 +- fix(server): streaming resource lock by @gjpower in #1879 + +## [0.3.5] + +- feat: Update llama.cpp to ggerganov/llama.cpp@26a8406ba9198eb6fdd8329fa717555b4f77f05f +- fix(ci): Fix release by updating macos runner image to non-deprecated version by @abetlen in afedfc888462f9a6e809dc9455eb3b663764cc3f +- fix(server): add missing await statements for async exit_stack handling by @gjpower in #1858 + +## [0.3.4] + +- fix(ci): Build wheels for macos 13-15, cuda 12.1-12.4 by @abetlen in ca808028bd16b8327bd84128d48015a4b1304690 + +## [0.3.3] + +- feat: Update llama.cpp to ggerganov/llama.cpp@ce8784bdb153ff7794dde5a50b0ebfa51baa6171 +- fix: chat API logprobs format by @domdomegg in #1788 +- feat: Add support for CUDA 12.6, fix CUDA 12.5 by @Smartappli in #1775 +- fix: Make content not required in ChatCompletionRequestAssistantMessage by @feloy in #1807 +- fix: Fix pickling of Llama class by setting seed from _seed member by @abetlen in 2523472c3eccb9ab9277117cc4ff705212b6888a +- fix: Fix logit-bias type hint by @ddh0 in #1802 +- fix(server): Avoid thread starvation on many concurrent requests by making use of asyncio to lock llama_proxy context by @gjpower in #1798 +- fix(server): Added missing exit_stack.close() to /v1/chat/completions by @Ian321 in #1796 +- fix(examples): Refactor Batching notebook to use new sampler chain API by @lukestanley in #1793 +- fix(docs): Update development instructions by @Florents-Tselai in #1833 +- fix(docs): Remove ref to llama_eval in llama_cpp.py docs by @richdougherty in #1819 + +## [0.3.2] + +- feat: Update llama.cpp to ggerganov/llama.cpp@74d73dc85cc2057446bf63cc37ff649ae7cebd80 + +## [0.3.1] + +- feat: Update llama.cpp to ggerganov/llama.cpp@c919d5db39c8a7fcb64737f008e4b105ee0acd20 +- feat: Expose libggml in internal APIs by @abetlen in #1761 +- fix: Fix speculative decoding by @abetlen in 9992c5084a3df2f533e265d10f81d4269b97a1e6 and e975dabf74b3ad85689c9a07719cbb181313139b +- misc: Rename all_text to remaining_text by @xu-song in #1658 + +## [0.3.0] + +- feat: Update llama.cpp to ggerganov/llama.cpp@ea9c32be71b91b42ecc538bd902e93cbb5fb36cb +- feat: Enable detokenizing special tokens with special=True by @benniekiss in #1596 +- feat(ci): Speed up CI workflows using uv, add support for CUDA 12.5 wheels by @Smartappli in e529940f45d42ed8aa31334123b8d66bc67b0e78 +- feat: Add loading sharded GGUF files from HuggingFace with Llama.from_pretrained(additional_files=[...]) by @Gnurro in 84c092063e8f222758dd3d60bdb2d1d342ac292e +- feat: Add option to configure n_ubatch by @abetlen in 6c44a3f36b089239cb6396bb408116aad262c702 +- feat: Update sampling API for llama.cpp. Sampling now uses sampler chain by @abetlen in f8fcb3ea3424bcfba3a5437626a994771a02324b +- fix: Don't store scores internally unless logits_all=True. Reduces memory requirements for large context by @abetlen in 29afcfdff5e75d7df4c13bad0122c98661d251ab +- fix: Fix memory allocation of ndarray in by @xu-song in #1704 +- fix: Use system message in og qwen format by @abetlen in 98eb092d3c6e7c142c4ba2faaca6c091718abbb3 + + +## [0.2.90] + +- feat: Update llama.cpp to ggerganov/llama.cpp@1d1ccce67613674c75c9c7e3fa4c1e24e428ba48 +- feat: Add support for `MiniCPMv26ChatHandler` and `minicpm-v-26` in server by @abetlen in f70df824985d875226793b94dacc0c302a4256b2 + +## [0.2.89] + +- feat: Update llama.cpp to ggerganov/llama.cpp@cfac111e2b3953cdb6b0126e67a2487687646971 +- fix: Llama.close didn't free lora adapter by @jkawamoto in #1679 +- fix: missing dependencies for test by @jkawamoto in #1680 + +## [0.2.88] + +- feat: Update llama.cpp to ggerganov/llama.cpp@fc4ca27b25464a11b3b86c9dbb5b6ed6065965c2 +- fix: only print 'cache saved' in verbose mode by @lsorber in #1668 +- fix: Added back from_file method to LlamaGrammar by @ExtReMLapin in #1673 +- fix: grammar prints on each call by @abetlen in 0998ea0deea076a547d54bd598d6b413b588ee2b +- feat: Enable recursive search of HFFS.ls when using from_pretrained by @benHeidabetlen in #1656 +- feat: Add more detailed log for prefix-match by @xu-song in #1659 + +## [0.2.87] + +- feat: Update llama.cpp to ggerganov/llama.cpp@be55695eff44784a141a863f273661a6bce63dfc +- fix: Include all llama.cpp source files and subdirectories by @abetlen in 9cad5714ae6e7c250af8d0bbb179f631368c928b +- feat(ci): Re-build wheel index automatically when releases are created by @abetlen in 198f47dc1bd202fd2b71b29e041a9f33fe40bfad + +## [0.2.86] + +- feat: Update llama.cpp to ggerganov/llama.cpp@398ede5efeb07b9adf9fbda7ea63f630d476a792 +- feat: Ported back new grammar changes from C++ to Python implementation by @ExtReMLapin in (#1637) +- fix: llama_grammar_accept_token arg order by @tc-wolf in (#1649) + +## [0.2.85] + +- feat: Update llama.cpp to ggerganov/llama.cpp@398ede5efeb07b9adf9fbda7ea63f630d476a792 +- fix: Missing LoRA adapter after API change by @shamitv in #1630 +- fix(docker): Update Dockerfile BLAS options by @olivierdebauche in #1632 +- fix(docker): Fix GGML_CUDA param by @olivierdebauche in #1633 +- fix(docker): Update Dockerfile build options from `LLAMA_` to `GGML_` by @olivierdebauche in #1634 +- feat: FreeBSD compatibility by @yurivict in #1635 + +## [0.2.84] + +- feat: Update llama.cpp to ggerganov/llama.cpp@4730faca618ff9cee0780580145e3cbe86f24876 +- fix: fix: Correcting run.sh filepath in Simple Docker implementation by @mashuk999 in #1626 + +## [0.2.83] + +- feat: Update llama.cpp to ggerganov/llama.cpp@081fe431aa8fb6307145c4feb3eed4f48cab19f8 +- feat: Add 'required' literal to ChatCompletionToolChoiceOption by @mjschock in #1597 +- fix: Change repeat_penalty to 1.0 to match llama.cpp defaults by @ddh0 in #1590 +- fix(docs): Update README.md typo by @ericcurtin in #1589 +- fix(server): Use split_mode from model settings by @grider-withourai in #1594 +- feat(ci): Dockerfile update base images and post-install cleanup by @Smartappli in #1530 + +## [0.2.82] + +- feat: Update llama.cpp to ggerganov/llama.cpp@7fdb6f73e35605c8dbc39e9f19cd9ed84dbc87f2 + +## [0.2.81] + +- feat: Update llama.cpp to ggerganov/llama.cpp@968967376dc2c018d29f897c4883d335bbf384fb +- fix(ci): Fix CUDA wheels, use LLAMA_CUDA instead of removed LLAMA_CUBLAS by @abetlen in 4fb6fc12a02a68884c25dd9f6a421cacec7604c6 +- fix(ci): Fix MacOS release, use macos-12 image instead of removed macos-11 by @abetlen in 3a551eb5263fdbd24b36d7770856374c04e92788 + +## [0.2.80] + +- feat: Update llama.cpp to ggerganov/llama.cpp@023b8807e10bc3ade24a255f01c1ad2a01bb4228 +- fix(server): Fix bug in FastAPI streaming response where dependency was released before request completes causing SEGFAULT by @abetlen in 296304b60bb83689659883c9cc24f4c074dd88ff +- fix(server): Update default config value for embeddings to False to fix error in text generation where logits were not allocated by llama.cpp by @abetlen in bf5e0bb4b151f4ca2f5a21af68eb832a96a79d75 +- fix(ci): Fix the CUDA workflow by @oobabooga in #1551 +- docs: Update readme examples to use newer Qwen2 model by @jncraton in #1544 + +## [0.2.79] + +- feat: Update llama.cpp to ggerganov/llama.cpp@9c77ec1d74874ee22bdef8f110e8e8d41389abf2 +- feat(ci): Update workflows and pre-built wheels by @Smartappli in #1416 +- feat: Add .close() method to Llama class to explicitly free model from memory by @jkawamoto in #1513 +- feat: Support SPM infill by @CISC in #1492 + +## [0.2.78] + +- feat: Update llama.cpp to ggerganov/llama.cpp@fd5ea0f897ecb3659d6c269ef6f3d833e865ead7 +- fix: Avoid duplicate special tokens in chat formats by @CISC in #1439 +- fix: fix logprobs when BOS is not present by @ghorbani in #1471 +- feat: adding rpc_servers parameter to Llama class by @chraac in #1477 + +## [0.2.77] + +- feat: Update llama.cpp to ggerganov/llama.cpp@bde7cd3cd949c1a85d3a199498ac98e78039d46f +- fix: string value kv_overrides by @abetlen in df45a4b3fe46e72664bda87301b318210c6d4782 +- fix: Fix typo in Llama3VisionAlphaChatHandler by @abetlen in 165b4dc6c188f8fda2fc616154e111f710484eba +- fix: Use numpy recarray for candidates data, fixes bug with temp < 0 by @abetlen in af3ed503e9ce60fe6b5365031abad4176a3536b3 +fix: Disable Windows+CUDA workaround when compiling for HIPBLAS by Engininja2 in #1493 + +## [0.2.76] + +- feat: Update llama.cpp to ggerganov/llama.cpp@0df0aa8e43c3378975269a51f9b876c8692e70da +- feat: Improve Llama.eval performance by avoiding list conversion by @thoughtp0lice in #1476 +- example: LLM inference with Ray Serve by @rgerganov in #1465 + +## [0.2.75] + +- feat: Update llama.cpp to ggerganov/llama.cpp@13ad16af1231ab2d245d35df3295bcfa23de1305 +- fix: segfault for models without eos / bos tokens by @abetlen in d99a6ba607a4885fb00e63e967964aa41bdbbbcb +- feat: add MinTokensLogitProcessor and min_tokens argument to server by @twaka in #1333 +- misc: Remove unnecessary metadata lookups by @CISC in #1448 + +## [0.2.74] + +- feat: Update llama.cpp to ggerganov/llama.cpp@b228aba91ac2cd9eb90e9d423ba1d0d20e0117e2 +- fix: Enable CUDA backend for llava by @abetlen in 7f59856fa6f3e23f07e12fc15aeb9359dc6c3bb4 +- docs: Fix typo in README.md by @yupbank in #1444 + +## [0.2.73] + +- feat: Update llama.cpp to ggerganov/llama.cpp@25c6e82e7a1ad25a42b0894e87d9b5c557409516 +- fix: Clear kv cache at beginning of image chat formats to avoid bug when image is evaluated first by @abetlen in ac55d0a175115d1e719672ce1cb1bec776c738b1 + +## [0.2.72] + +- fix(security): Remote Code Execution by Server-Side Template Injection in Model Metadata by @retr0reg in b454f40a9a1787b2b5659cd2cb00819d983185df +- fix(security): Update remaining jinja chat templates to use immutable sandbox by @CISC in #1441 + +## [0.2.71] + +- feat: Update llama.cpp to ggerganov/llama.cpp@911b3900dded9a1cfe0f0e41b82c7a29baf3a217 +- fix: Make leading bos_token optional for image chat formats, fix nanollava system message by @abetlen in 77122638b4153e31d9f277b3d905c2900b536632 +- fix: free last image embed in llava chat handler by @abetlen in 3757328b703b2cd32dcbd5853271e3a8c8599fe7 + +## [0.2.70] + +- feat: Update llama.cpp to ggerganov/llama.cpp@c0e6fbf8c380718102bd25fcb8d2e55f8f9480d1 +- feat: fill-in-middle support by @CISC in #1386 +- fix: adding missing args in create_completion for functionary chat handler by @skalade in #1430 +- docs: update README.md @eltociear in #1432 +- fix: chat_format log where auto-detected format prints None by @balvisio in #1434 +- feat(server): Add support for setting root_path by @abetlen in 0318702cdc860999ee70f277425edbbfe0e60419 +- feat(ci): Add docker checks and check deps more frequently by @Smartappli in #1426 +- fix: detokenization case where first token does not start with a leading space by @noamgat in #1375 +- feat: Implement streaming for Functionary v2 + Bug fixes by @jeffrey-fong in #1419 +- fix: Use memmove to copy str_value kv_override by @abetlen in 9f7a85571ae80d3b6ddbd3e1bae407b9f1e3448a +- feat(server): Remove temperature bounds checks for server by @abetlen in 0a454bebe67d12a446981eb16028c168ca5faa81 +- fix(server): Propagate flash_attn to model load by @dthuerck in #1424 + +## [0.2.69] + +- feat: Update llama.cpp to ggerganov/llama.cpp@6ecf3189e00a1e8e737a78b6d10e1d7006e050a2 +- feat: Add llama-3-vision-alpha chat format by @abetlen in 31b1d95a6c19f5b615a3286069f181a415f872e8 +- fix: Change default verbose value of verbose in image chat format handlers to True to match Llama by @abetlen in 4f01c452b6c738dc56eacac3758119b12c57ea94 +- fix: Suppress all logs when verbose=False, use hardcoded fileno's to work in colab notebooks by @abetlen in f116175a5a7c84569c88cad231855c1e6e59ff6e +- fix: UTF-8 handling with grammars by @jsoma in #1415 + +## [0.2.68] + +- feat: Update llama.cpp to ggerganov/llama.cpp@77e15bec6217a39be59b9cc83d6b9afb6b0d8167 +- feat: Add option to enable flash_attn to Lllama params and ModelSettings by @abetlen in 22d77eefd2edaf0148f53374d0cac74d0e25d06e +- fix(ci): Fix build-and-release.yaml by @Smartappli in #1413 + +## [0.2.67] + +- fix: Ensure image renders before text in chat formats regardless of message content order by @abetlen in 3489ef09d3775f4a87fb7114f619e8ba9cb6b656 +- fix(ci): Fix bug in use of upload-artifact failing to merge multiple artifacts into a single release by @abetlen in d03f15bb73a1d520970357b702a9e7d4cc2a7a62 + +## [0.2.66] + +- feat: Update llama.cpp to ggerganov/llama.cpp@8843a98c2ba97a25e93319a104f9ddfaf83ce4c4 +- feat: Generic Chat Formats, Tool Calling, and Huggingface Pull Support for Multimodal Models (Obsidian, LLaVA1.6, Moondream) by @abetlen in #1147 +- ci(fix): Workflow actions updates and fix arm64 wheels not included in release by @Smartappli in #1392 +- ci: Add support for pre-built cuda 12.4.1 wheels by @Smartappli in #1388 +- feat: Add support for str type kv_overrides by @abetlen in a411612b385cef100d76145da1fbd02a7b7cc894 +- fix: Functionary bug fixes by @jeffrey-fong in #1385 +- examples: fix quantize example by @iyubondyrev in #1387 +- ci: Update dependabot.yml by @Smartappli in #1391 + +## [0.2.65] + +- feat: Update llama.cpp to ggerganov/llama.cpp@46e12c4692a37bdd31a0432fc5153d7d22bc7f72 +- feat: Allow for possibly non-pooled embeddings by @iamlemec in #1380 + +## [0.2.64] + +- feat: Update llama.cpp to ggerganov/llama.cpp@4e96a812b3ce7322a29a3008db2ed73d9087b176 +- feat: Add `llama-3` chat format by @andreabak in #1371 +- feat: Use new llama_token_is_eog in create_completions by @abetlen in d40a250ef3cfaa8224d12c83776a2f1de96ae3d1 +- feat(server): Provide ability to dynamically allocate all threads if desired using -1 by @sean-bailey in #1364 +- ci: Build arm64 wheels by @gaby in 611781f5319719a3d05fefccbbf0cc321742a026 +- fix: Update scikit-build-core build dependency avoid bug in 0.9.1 by @evelkey in #1370 + +## [0.2.63] + +- feat: Update llama.cpp to ggerganov/llama.cpp@0e4802b2ecbaab04b4f829fde4a3096ca19c84b5 +- feat: Add stopping_criteria to ChatFormatter, allow stopping on arbitrary token ids, fixes llama3 instruct by @abetlen in cc81afebf04d26ca1ac3cf72f23f18da6ab58588 + +## [0.2.62] + +- feat: Update llama.cpp to ggerganov/llama.cpp@3b8f1ec4b18770531d0b1d792f3edf08254e4f0c +- feat: update grammar schema converter to match llama.cpp by @themrzmaster in #1353 +- feat: add disable_ping_events flag by @khimaros in #1257 +- feat: Make saved state more compact on-disk by @tc-wolf in #1296 +- feat: Use all available CPUs for batch processing by @ddh0 in #1345 + +## [0.2.61] + +- feat: Update llama.cpp to ggerganov/llama.cpp@ba5e134e073ec6837078c874aba44a702944a676 +- fix: pass correct type to chat handlers for chat completion logprobs by @abetlen in bb65b4d76411112c6fb0bf759efd746f99ef3c6b +- feat: Add support for yaml based server configs by @abetlen in 060bfa64d529ade2af9b1f4e207a3937bbc4138f +- feat: Add typechecking for ctypes structure attributes by @abetlen in 1347e1d050fc5a9a32ffe0bb3e22858da28003bd + +## [0.2.60] + +- feat: Update llama.cpp to ggerganov/llama.cpp@75cd4c77292034ecec587ecb401366f57338f7c0 +- fix: Always embed metal library by @abetlen in b3bfea6dbfb6ed9ce18f9a2723e0a9e4bd1da7ad +- fix: missing logprobs in response, incorrect response type for functionary by @abetlen in 1ae3abbcc3af7f4a25a3ffc40b246f18039565e8 +- fix(docs): incorrect tool_choice example by @CISC in #1330 + +## [0.2.59] + +- feat: Update llama.cpp to ggerganov/llama.cpp@ba0c7c70ab5b15f1f2be7fb0dfbe0366dda30d6c +- feat: Binary wheels for CPU, CUDA (12.1 - 12.3), Metal by @abetlen, @jllllll, and @oobabooga in #1247 +- fix: segfault when logits_all=False by @abetlen in 8649d7671bd1a7c0d9cc6a5ad91c6ca286512ab3 +- fix: last tokens passing to sample_repetition_penalties function by @ymikhailov in #1295 + +## [0.2.58] + +- feat: Update llama.cpp to ggerganov/llama.cpp@ba0c7c70ab5b15f1f2be7fb0dfbe0366dda30d6c +- feat: add support for KV cache quantization options by @Limour-dev in #1307 +- feat: Add logprobs support to chat completions by @windspirit95 in #1311 +- fix: set LLAMA_METAL_EMBED_LIBRARY=on on MacOS arm64 by @bretello in #1289 +- feat: Add tools/functions variables to Jinja2ChatFormatter, add function response formatting for all simple chat formats by @CISC in #1273 +- fix: Changed local API doc references to hosted by by @lawfordp2017 in #1317 + +## [0.2.57] + +- feat: Update llama.cpp to ggerganov/llama.cpp@ac9ee6a4ad740bc1ee484ede43e9f92b5af244c1 +- fix: set default embedding pooling type to unspecified by @abetlen in 4084aabe867b8ec2aba1b22659e59c9318b0d1f3 +- fix: Fix and optimize functionary chat handler by @jeffrey-fong in #1282 +- fix: json mode for basic chat formats by @abetlen in 20e6815252d0efd9f015f7adbf108faaf36e3f3c + +## [0.2.56] + +- feat: Update llama.cpp to ggerganov/llama.cpp@c2101a2e909ac7c08976d414e64e96c90ee5fa9e +- feat(server): Add endpoints for tokenize, detokenize and count tokens by @felipelo in #1136 +- feat: Switch embed to llama_get_embeddings_seq by @iamlemec in #1263 +- fix: Fixed json strings grammar by blacklisting character control set by @ExtReMLapin in d02a9cf16ff88ad011e2eb1ce29f4d9400f13cd1 +- fix: Check for existence of clip model path by @kejcao in #1264 + +## [0.2.55] + +- feat: Update llama.cpp to ggerganov/llama.cpp@9731134296af3a6839cd682e51d9c2109a871de5 +- docs: fix small typo in README: 'model know how' -> 'model knows how' by @boegel in #1244 + +## [0.2.54] + +- feat: Update llama.cpp to ggerganov/llama.cpp@cb49e0f8c906e5da49e9f6d64a57742a9a241c6a +- docs: fix typo in README.md embeddings example by @iamlemec in #1232 + +## [0.2.53] + +- feat: Update llama.cpp to ggerganov/llama.cpp@cb49e0f8c906e5da49e9f6d64a57742a9a241c6a +- fix: eos/bos_token set correctly for Jinja2ChatFormatter and automatic chat formatter by @CISC in #1230 + +## [0.2.52] + +- feat: Update llama.cpp to ggerganov/llama.cpp@a33e6a0d2a66104ea9a906bdbf8a94d050189d91 +- fix: Llava15ChatHandler (this function takes at least 4 arguments) by @abetlen in 8383a9e5620f5df5a88f62da16813eac200dd706 + +## [0.2.51] + +- feat: Update llama.cpp to ggerganov/llama.cpp@c39373398803c669056304090050fe3f44b41bf9 +- fix: Restore type hints for low-level api by @abetlen in 19234aa0dbd0c3c87656e65dd2b064665371925b + +## [0.2.50] + +- docs: Update Functionary OpenAI Server Readme by @jeffrey-fong in #1193 +- fix: LlamaHFTokenizer now receives pre_tokens by @abetlen in 47bad30dd716443652275099fa3851811168ff4a + +## [0.2.49] + +- fix: module 'llama_cpp.llama_cpp' has no attribute 'c_uint8' in Llama.save_state by @abetlen in db776a885cd4c20811f22f8bd1a27ecc71dba927 +- feat: Auto detect Mixtral's slightly different format by @lukestanley in #1214 + +## [0.2.48] + +- feat: Update llama.cpp to ggerganov/llama.cpp@15499eb94227401bdc8875da6eb85c15d37068f7 +- feat: Add Google's Gemma formatting via chat_format="gemma" by @alvarobartt in #1210 +- feat: support minItems/maxItems in JSON grammar converter by @nopperl in 3921e10770996d95a9eb22c8248bacef39f69365 +- fix: Update from_pretrained defaults to match hf_hub_download and pull to local cache folder by @abetlen in e6d6260a91b7831733f7d1f73c7af46a3e8185ed +- fix: Raise exceptions when llama model or context fails to load by @abetlen in dd22010e85265ae840c76ec835d67a29ed852722 +- docs: Update README.md to fix pip install llama cpp server by @audip in #1187 + +## [0.2.47] + +- feat: Update llama.cpp to ggerganov/llama.cpp@973053d8b0d04809836b3339a50f68d9c842de90 + +## [0.2.46] + +- feat: Update llama.cpp to ggerganov/llama.cpp@ba2135ccae7462470b3865c6e41d2e1d734eac05 +- feat: Pull models directly from huggingface by @abetlen in #1206 +- feat(low-level-api): Improve API static type-safety and performance. Low level api functions are positional args only now. by @abetlen in #1205 + +## [0.2.45] + +- feat: Update llama.cpp to ggerganov/llama.cpp@89febfed9322c8849520dc63c93ee4f5fd72556e + +## [0.2.44] + +- feat: Update llama.cpp to ggerganov/llama.cpp@4524290e87b8e107cc2b56e1251751546f4b9051 +- fix: create_embedding broken response for input type str by @abetlen in 0ce66bc080fe537590b05b24bf442480bf2dd045 +- fix: Use '\n' seperator for EventSourceResponse by @khimaros in #1188 +- fix: Incorporate embedding pooling layer fixes by @iamlemec in #1194 + +## [0.2.43] + +- feat: Update llama.cpp to ggerganov/llama.cpp@8084d554406b767d36b3250b3b787462d5dd626f +- feat: Support batch embeddings by @iamlemec in #1186 +- fix: submodule kompute is not included in sdist by @abetlen in 7dbbfdecadebe7750be650d9409959640ff9a460 +- fix: fix: Update openbuddy prompt format by @abetlen in 07a783779a62a4aac0b11161c7e0eb983ff215f8 + +## [0.2.42] + +- feat: Update llama.cpp to ggerganov/llama.cpp@ea9c8e11436ad50719987fa23a289c74b7b40d40 +- fix: sample idx off-by-one error for logit_processors by @lapp0 in #1179 +- fix: chat formatting bugs in `chatml-function-calling` by @abetlen in 4b0e3320bd8c2c209e29978d0b21e2e471cc9ee3 and 68fb71b6a26a1e57331868f959b47ab4b87851e1 + +## [0.2.41] + +- feat: Update llama.cpp to ggerganov/llama.cpp@895407f31b358e3d9335e847d13f033491ec8a5b +- fix: Don't change order of json schema object properties in generated grammar unless prop_order is passed by @abetlen in d1822fed6b706f38bd1ff0de4dec5baaa3cf84fa + +## [0.2.40] + +- feat: Update llama.cpp to ggerganov/llama.cpp@3bdc4cd0f595a6096cca4a64aa75ffa8a3503465 +- feat: Generic chatml Function Calling using chat_format="chatml-function-calling"` by @abetlen in #957 +- fix: Circular dependancy preventing early Llama object free by @notwa in #1176 +- docs: Set the correct command for compiling with syscl support by @akarshanbiswas in #1172 +- feat: use gpu backend for clip if available by @iamlemec in #1175 + +## [0.2.39] + +- feat: Update llama.cpp to ggerganov/llama.cpp@b08f22c882a1443e6b97081f3ce718a4d1a741f8 +- fix: Fix destructor logging bugs by using llama_log_callback to avoid suppress_stdout_stderr by @abetlen in 59760c85eddc72dfcc1839f43760ef72c23d6874 + +## [0.2.38] + +- feat: Update llama.cpp to ggerganov/llama.cpp@1cfb5372cf5707c8ec6dde7c874f4a44a6c4c915 +- feat: Add speculative decoding by @abetlen in #1120 +- fix: Pass raise_exception and add_generation_prompt to jinja2 chat template by @abetlen in 078cca0361bf5a94d2cf52ed04980d20e32d6f95 + +## [0.2.37] + +- feat: Update llama.cpp to ggerganov/llama.cpp@fea4fd4ba7f6b754ac795387b275e1a014a77bde +- feat: Automatically set chat format from gguf by @abetlen in #1110 + +## [0.2.36] + +- feat: Update llama.cpp to ggerganov/llama.cpp@2aed77eb06a329f0d82bb1c467f4244904d4073f +- feat: Add mistral instruct chat format as "mistral-instruct" by @Rafaelblsilva in #799 + +## [0.2.35] + +- feat: Update llama.cpp to ggerganov/llama.cpp@d2f650cb5b04ee2726663e79b47da5efe196ce00 + +## [0.2.34] + +- feat: Update llama.cpp to ggerganov/llama.cpp@6db2b41a76ee78d5efdd5c3cddd5d7ad3f646855 +- feat: Add json schema mode by @abetlen in #1122 + +## [0.2.33] + +- feat: Update llama.cpp to ggerganov/llama.cpp@faa3526a1eba458120987ed8269e5616385a76f4 +- feat(server): include llama-cpp-python version in openapi spec by @abetlen in cde7514c3d28e6d52f272614e9957208c344dde5 +- fix: use both eos and bos tokens as stop sequences for hf-tokenizer-config chat format. by @abetlen in 5b982d0f8c6f35242c8862ffdce00e17cea0b44f +- fix: GGUF metadata KV overrides, re #1011 by @phiharri in #1116 +- fix: llama_log_set should be able to accept null pointer by @abetlen in c970d41a85381fd55235136f123422df0bf0c7e7 + +## [0.2.32] + +- feat: Update llama.cpp to ggerganov/llama.cpp@504dc37be8446fb09b1ede70300250ad41be32a2 +- fix: from_json_schema oneof/anyof bug by @jndiogo in d3f5528ca8bcb9d69d4f27e21631e911f1fb9bfe +- fix: pass chat handler not chat formatter for huggingface autotokenizer and tokenizer_config formats by @abetlen in 24f39454e91cf5dddbc4b6041aead4accc7c7a2d +- feat: Add add_generation_prompt option for jinja2chatformatter by @abetlen in 7f3209b1eb4ad3260ba063801fab80a8c25a2f4c +- feat: Add Jinja2ChatFormatter by @abetlen in be09318c26add8674ce494ae7cc480cce72a4146 +- feat: Expose gguf model metadata in metadata property by @abetlen in 5a34c57e5479e50c99aba9b38218cc48e6560b81 + +## [0.2.31] + +- feat: Update llama.cpp to ggerganov/llama.cpp@a5cacb22b2114fd9adf61c00cbb237384d86bced +- fix: Mirostat sampling now passes correct type to ctypes and tracks state during generation by @abetlen in 3babe3512cb95743108f2b595210c38ed6f1b904 +- fix: Python3.8 support in server by @abetlen in 141293a75b564a8699e0acba1da24d9aa1cf0ab1 + +## [0.2.30] + +- feat: Update llama.cpp to ggerganov/llama.cpp@57e2a7a52a819883f40dada8a2edc24ecf48186b +- feat(server): Add ability to load chat format from huggingface autotokenizer or tokenizer_config.json files by @abetlen in b8fc1c7d83ad4a9207c707ba1d954fe580286a01 +- feat: Integration of Jinja2 Templating for chat formats by @teleprint-me in #875 +- fix: Offload KQV by default by @abetlen in 48c3b77e6f558a9899de0e1155c7dc0c7958d8e8 +- fix: Support Accept text/event-stream in chat and completion endpoints, resolves #1083 by @aniljava in #1088 +- fix(cli): allow passing n_ctx=0 to openAI API server args to use model n_ctx_train field per #1015 by @K-Mistele in #1093 + +## [0.2.29] + +- feat: Update llama.cpp to ggerganov/llama.cpp@4483396751c79dea540808b9cb9238245d06da2b +- feat: Add split_mode option by @abetlen in 84615adbc6855c8384807c42f0130f9a1763f99d +- feat: Implement GGUF metadata KV overrides by @phiharri in #1011 +- fix: Avoid "LookupError: unknown encoding: ascii" when open() called in a destructor by @yieldthought in #1012 +- fix: Fix low_level_api_chat_cpp example to match current API by @aniljava in #1086 +- fix: Fix Pydantic model parsing by @DeNeutoy in #1087 + +## [0.2.28] + +- feat: Update llama.cpp to ggerganov/llama.cpp@6efb8eb30e7025b168f3fda3ff83b9b386428ad6 +- feat: Add ability to pass in penalize_nl param by @shankinson in #1068 +- fix: print_grammar to stderr by @turian in #1052 + +## [0.2.27] + +- feat: Update llama.cpp to ggerganov/llama.cpp@b3a7c20b5c035250257d2b62851c379b159c899a +- feat: Add `saiga` chat format by @femoiseev in #1050 +- feat: Added `chatglm3` chat format by @xaviviro in #1059 +- fix: Correct typo in README.md by @qeleb in (#1058) + +## [0.2.26] + +- feat: Update llama.cpp to ggerganov/llama.cpp@f6793491b5af6da75edad34d6f503ef86d31b09f + +## [0.2.25] + +- feat(server): Multi model support by @D4ve-R in #931 +- feat(server): Support none defaulting to infinity for completions by @swg in #111 +- feat(server): Implement openai api compatible authentication by @docmeth2 in #1010 +- fix: text_offset of multi-token characters by @twaka in #1037 +- fix: ctypes bindings for kv override by @phiharri in #1011 +- fix: ctypes definitions of llama_kv_cache_view_update and llama_kv_cache_view_free. by @e-c-d in #1028 + +## [0.2.24] + +- feat: Update llama.cpp to ggerganov/llama.cpp@0e18b2e7d0b5c0a509ea40098def234b8d4a938a +- feat: Add offload_kqv option to llama and server by @abetlen in 095c65000642a3cf73055d7428232fb18b73c6f3 +- feat: n_ctx=0 now uses the n_ctx_train of the model by @DanieleMorotti in #1015 +- feat: logits_to_logprobs supports both 2-D and 3-D logits arrays by @kddubey in #1002 +- fix: Remove f16_kv, add offload_kqv fields in low level and llama apis by @brandonrobertz in #1019 +- perf: Don't convert logprobs arrays to lists by @kddubey in #1021 +- docs: Fix README.md functionary demo typo by @evelynmitchell in #996 +- examples: Update low_level_api_llama_cpp.py to match current API by @jsoma in #1023 + +## [0.2.23] + +- Update llama.cpp to ggerganov/llama.cpp@948ff137ec37f1ec74c02905917fa0afc9b97514 +- Add qwen chat format by @yhfgyyf in #1005 +- Add support for running the server with SSL by @rgerganov in #994 +- Replace logits_to_logprobs implementation with numpy equivalent to llama.cpp by @player1537 in #991 +- Fix UnsupportedOperation: fileno in suppress_stdout_stderr by @zocainViken in #961 +- Add Pygmalion chat format by @chiensen in #986 +- README.md multimodal params fix by @zocainViken in #967 +- Fix minor typo in README by @aniketmaurya in #958 + +## [0.2.22] + +- Update llama.cpp to ggerganov/llama.cpp@8a7b2fa528f130631a5f43648481596ab320ed5a +- Fix conflict with transformers library by kddubey in #952 + +## [0.2.21] + +- Update llama.cpp to ggerganov/llama.cpp@64e64aa2557d97490b2fe1262b313e2f4a1607e3 +- Make building llava optional by setting `CMAKE_ARGS="-DLLAVA_BUILD=OFF"` and using `LLAVA_CPP_LIB` to specify alternative path to shared library by @abetlen in e3941d9c674dbd9891dc3ceda390daeb21f05fd1 + +## [0.2.20] + +- Update llama.cpp to ggerganov/llama.cpp@b38a16dfcff88d547f78f52d1bea31b84a05aff7 +- Add `zephyr` chat format by @fakerybakery in #938 +- Add `baichuan` chat format by @caiyesd in #938 +- Add `baichuan-2` chat format by @caiyesd in #936 +- Improve documentation for server chat formats by @jooray in #934 +- Fix typo in README by @antonvice in 940 +- Fix typo in the Open Orca chat format by @gardner in #947 + +## [0.2.19] + +- Update llama.cpp to ggerganov/llama.cpp@0b871f1a04ef60e114bbe43004fd9c21114e802d +- Fix #569: stop parameter in chat completion api should accept str by @abetlen in 128dc4731fa846ead7e684a137ca57d8931b8899 +- Document server host and port parameters by @jamesbraza in #768 +- Do not set grammar to None when initializing LlamaGrammar by @mthuurne in #834 +- Add mistrallite, intel, and openchat formats by @fakerybakery in #927 +- Add support for min_p parameter by @tk-master in #921 +- Fix #929: tokenizer adding leading space when generating from empty prompt by @abetlen in a34d48014192771d2e308a76c22f33bc0318d983 +- Fix low level api example by @zocainViken in #925 +- Fix missing package in openblas docker image by @ZisisTsatsas in #920 + +## [0.2.18] + +- Update llama.cpp to ggerganov/llama.cpp@6bb4908a17150b49373b5f977685b2e180a04f6f + +## [0.2.17] + +- Update llama.cpp to ggerganov/llama.cpp@df9d1293defe783f42bc83af732d3c670552c541 +- Hotfix: Set `CUDA_ARCHITECTURES=OFF` for `llava_shared` target on Windows by @abetlen in 4388f3341413110217b98c4f097ac5c590bdf40b + +## [0.2.16] + +- Update llama.cpp to ggerganov/llama.cp@a75fa576abba9d37f463580c379e4bbf1e1ad03c +- Add `set_seed` to `Llama` class by @abetlen in fd41ed3a908761d286102a019a34c2938a15118d +- Fix server doc arguments by @kjunggithub in #892 +- Fix response_format handler in llava chat handler by @abetlen in b62c44983921197ed10a7d29dc4ba920e9979380 +- Fix default max_tokens, chat completion is now unlimited (to context length) and completion is 16 tokens to match OpenAI defaults by @abetlen in e7962d2c733cbbeec5a37392c81f64185a9a39e8 +- Fix json_schema_to_gbnf helper so that it takes a json schema string as input instead by @abetlen in faeae181b1e868643c0dc28fcf039f077baf0829 +- Add support for $ref and $def in json_schema_to_gbnf to handle more complex function schemas by @abetlen in 770df344369c0630df1be14be9f9e301e7c56d24 +- Update functionary chat handler for new OpenAI api by abetlen in 1b376c62b775b401653facf25a519d116aafe99a +- Fix add default stop sequence to chatml chat format by @abetlen in b84d76a844149216d511cfd8cdb9827148a1853c +- Fix sampling bug when logits_all=False by @abetlen in 6f0b0b1b840af846938ed74d0e8170a91c40e617 + +## [0.2.15] + +- Update llama.cpp to ggerganov/llama.cpp@0a7c980b6f94a049cb804573df2d8092a34df8e4 +- Add support for Llava1.5 multimodal models by @damian0815 and @abetlen in #821 +- Update OpenAI API compatibility to match dev day update by @abetlen in #821 +- Add seed parameter to completion and chat_completion functions of Llama class by @abetlen in 86aeb9f3a14808575d2bb0076e6acb4a30907e6a +- Add JSON mode support to constrain chat completion to JSON objects by @abetlen in b30b9c338bf9af316d497ea501d39f5c246900db + +## [0.2.14] + +- Update llama.cpp to ggerganov/llama.cpp@f0b30ef7dc1360922ccbea0a8cd3918ecf15eaa7 +- Add support for Huggingface Autotokenizer Chat Formats by @bioshazard and @abetlen in #790 and bbffdaebaa7bb04b543dbf683a07276087251f86 +- Fix llama-2 chat format by @earonesty in #869 +- Add support for functionary chat format by @abetlen in #784 +- Migrate inference from deprecated `llama_eval`API to `llama_batch` and `llama_decode` by @abetlen in #795 + +## [0.2.13] + +- Update llama.cpp to ggerganov/llama.cpp@51b2fc11f7f605fff49725a4540e9a6ef7b51b70 +- Fix name 'open' is not defined exception when deleting model by @abetlen in 011b95d7f34cbfc528af75a892757bd9a20838ab +- Fix tokenization of special characters by @antoine-lizee in #850 + +## [0.2.12] + +- Update llama.cpp to ggerganov/llama.cpp@50337961a678fce4081554b24e56e86b67660163 +- Fix missing `n_seq_id` in `llama_batch` by @NickAlgra in #842 +- Fix for shared libraries on Windows that start with `lib` prefix by @sujeendran in #848 +- Fix exception raised in `__del__` when freeing models by @cebtenzzre in #846 +- Performance improvement for logit bias by @zolastro in #851 +- Fix suffix check arbitrary code execution bug by @mtasic85 in #854 +- Fix typo in `function_call` parameter in `llama_types.py` by @akatora28 in #849 +- Fix streaming not returning `finish_reason` by @gmcgoldr in #798 +- Fix `n_gpu_layers` check to allow values less than 1 for server by @hxy9243 in #826 +- Supppress stdout and stderr when freeing model by @paschembri in #803 +- Fix `llama2` chat format by @delock in #808 +- Add validation for tensor_split size by @eric1932 #820 +- Print stack trace on server error by @abetlen in d6a130a052db3a50975a719088a9226abfebb266 +- Update docs for gguf by @johnccshen in #783 +- Add `chatml` chat format by @abetlen in 305482bd4156c70802fc054044119054806f4126 + +## [0.2.11] + +- Fix bug in `llama_model_params` object has no attribute `logits_all` by @abetlen in d696251fbe40015e8616ea7a7d7ad5257fd1b896 + +## [0.2.10] + +- Fix bug 'llama_model_params' object has no attribute 'embedding' by @abetlen in 42bb721d64d744242f9f980f2b89d5a6e335b5e4 + +## [0.2.9] + +- Fix critical bug in pip installation of v0.2.8 due to `.git` directory in ac853e01e1a217a578080a4e1b851d2d08450adf + +## [0.2.8] + +- Update llama.cpp to ggerganov/llama.cpp@40e07a60f9ce06e79f3ccd4c903eba300fb31b5e +- Add configurable chat formats by @abetlen in #711 +- Fix rope scaling bug by @Josh-XT in #767 +- Fix missing numa parameter in server by @abetlen in d9bce17794d0dd6f7962d10aad768fedecf3ab89 + +## [0.2.7] + +- Update llama.cpp to ggerganov/llama.cpp@a98b1633d5a94d0aa84c7c16e1f8df5ac21fc850 +- Install required runtime dlls to package directory on windows by @abetlen in 8d75016549e2ff62a511b1119d966ffc0df5c77b +- Add openai-processing-ms to server response header by @Tradunsky in #748 +- Bump minimum version of scikit-build-core to 0.5.1 to fix msvc cmake issue by @abetlen in 1ed0f3ebe16993a0f961155aa4b2c85f1c68f668 +- Update `llama_types.py` to better match the openai api, old names are aliased to new ones by @abetlen in dbca136feaaf7f8b1182c4c3c90c32918b1d0bb3 + +## [0.2.6] + +- Update llama.cpp to 80291a1d02a07f7f66666fb576c5b1e75aa48b46 + +## [0.2.5] + +- Fix docker images missing starlette-context dependency by @abetlen in 22917989003c5e67623d54ab45affa1e0e475410 +- Fix loading dll in Windows Isolation Containers by @abetlen in 847466562573191efa655753d9252f308c4fbdb0 +- Fix build issue on m1 macs by @abetlen in dbd3a6d1ed8416a8fd800127251e730153afa305 +- Update docs to gguf and add hw acceleration docs for server by @jasonacox in #688 + +## [0.2.4] + +- Add NUMA support. **NOTE** low level api users must call llama_backend_init at the start of their programs by abetlen in f4090a0bb2a2a25acfe28d31c82cc1aa273bedee +- Fix tensor_split server cli argument by @abetlen in c4c440ba2dc86d9de728a751311fdd1c8e3756fa +- Made all `Llama` init parameters into keyword-only parameters by @abetlen in c8f9b8a734b5b040379bbd93995ba177affab1fe +- Added server params for `low_vram`, `main_gpu`, `lora_base`, and `lora_path` by @abetlen in 2920c4bf7ee1412d6bba7846e0e1b7ef6d34043b +- Removed server params for `rms_norm_eps` and `n_gqa` by @abetlen in 2920c4bf7ee1412d6bba7846e0e1b7ef6d34043b +- Fix boolean cli options by @abetlen in c999325e8e4507f6c6249dd2fb8de7f8bf57f71e and 0449d29b9f940e437231a07b9d56550226558bac +- Silence Pydantic Settings warnings about `model_alias` setting by @earonesty in #705 + +## [0.2.3] + +- Update llama.cpp to ggerganov/llama.cpp@71ca2fad7d6c0ef95ef9944fb3a1a843e481f314 +- Add X-Request-ID request header for mirroring custom IDs by @devrimcavusoglu in #703 +- Add pyproject extra for scikit-build-core to ensure compatible pathspec version by @abetlen in 6cfc54284b99ef1bff8193e2d5e483dbd89ada02 +- Fix issue with Literal and Optional cli arguments not working by @abetlen in #702 + +## [0.2.2] + +- Fix bug in pip install of v0.2.1 due to scikit-build-core removing all `.metal` files in the source distribution (see #701) + +## [0.2.1] + +- Fix bug in pip install of v0.2.0 due to .git folder being included in the source distribution (see #701) + +## [0.2.0] + +- Migrated to scikit-build-core build system by @abetlen in #499 +- Use `numpy` views for `LogitsProcessor` and `StoppingCriteria` instead of python lists by @abetlen in #499 +- Drop support for end-of-life Python3.7 by @abetlen in #499 +- Convert low level `llama.cpp` constants to use basic python types instead of `ctypes` types by @abetlen in #499 + +## [0.1.85] + +- Add `llama_cpp.__version__` attribute by @janvdp in #684 +- Fix low level api examples by @jbochi in #680 + +## [0.1.84] + +- Update llama.cpp + +## [0.1.83] + +- Update llama.cpp + +## [0.1.82] + +- Update llama.cpp + +## [0.1.81] + +- Update llama.cpp + +## [0.1.80] + +- Update llama.cpp + +## [0.1.79] + +- GGUF Support (breaking change requiring new model format) + +## [0.1.78] + +- Grammar based sampling via LlamaGrammar which can be passed to completions +- Make n_gpu_layers == -1 offload all layers + +## [0.1.77] + +- (llama.cpp) Update llama.cpp add support for LLaMa 2 70B +- (server) Add temporary n_gqa and rms_norm_eps parameters required for LLaMa 2 70B + +## [0.1.76] + +- (llama.cpp) Update llama.cpp add support for LLaMa 2 70B + +## [0.1.75] + +- Update llama.cpp + +## [0.1.74] + +- (server) OpenAI style error responses + +## [0.1.73] + +- (server) Add rope parameters to server settings + +## [0.1.72] + +- (llama.cpp) Update llama.cpp added custom_rope for extended context lengths + +## [0.1.71] + +- (llama.cpp) Update llama.cpp + +- (server) Fix several pydantic v2 migration bugs + +## [0.1.70] + +- (Llama.create_completion) Revert change so that `max_tokens` is not truncated to `context_size` in `create_completion` +- (server) Fixed changed settings field names from pydantic v2 migration + +## [0.1.69] + +- (server) Streaming requests can are now interrupted pre-maturely when a concurrent request is made. Can be controlled with the `interrupt_requests` setting. +- (server) Moved to fastapi v0.100.0 and pydantic v2 +- (docker) Added a new "simple" image that builds llama.cpp from source when started. +- (server) performance improvements by avoiding unnecessary memory allocations during sampling + +## [0.1.68] + +- (llama.cpp) Update llama.cpp + +## [0.1.67] + +- Fix performance bug in Llama model by pre-allocating memory tokens and logits. +- Fix bug in Llama model where the model was not free'd after use. + +## [0.1.66] + +- (llama.cpp) New model API + +- Performance issue during eval caused by looped np.concatenate call +- State pickling issue when saving cache to disk + +## [0.1.65] + +- (llama.cpp) Fix struct misalignment bug + +## [0.1.64] + +- (llama.cpp) Update llama.cpp +- Fix docs for seed. Set -1 for random. + +## [0.1.63] + +- (llama.cpp) Add full gpu utilisation in CUDA +- (llama.cpp) Add get_vocab +- (llama.cpp) Add low_vram parameter +- (server) Add logit_bias parameter + +## [0.1.62] + +- Metal support working +- Cache re-enabled + +## [0.1.61] + +- Fix broken pip installation + +## [0.1.60] + +NOTE: This release was deleted due to a bug with the packaging system that caused pip installations to fail. + +- Truncate max_tokens in create_completion so requested tokens doesn't exceed context size. +- Temporarily disable cache for completion requests + +## [v0.1.59] + +- (llama.cpp) k-quants support +- (server) mirostat sampling parameters to server +- Support both `.so` and `.dylib` for `libllama` on MacOS + +## [v0.1.58] + +- (llama.cpp) Metal Silicon support + +## [v0.1.57] + +- (llama.cpp) OpenLlama 3B support + +## [v0.1.56] + +- (misc) Added first version of the changelog +- (server) Use async routes +- (python-api) Use numpy for internal buffers to reduce memory usage and improve performance. +- (python-api) Performance bug in stop sequence check slowing down streaming. diff --git a/CMakeLists.txt b/CMakeLists.txt index 27e06ac1f..b9178e856 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,27 +1,180 @@ -cmake_minimum_required(VERSION 3.4...3.22) +cmake_minimum_required(VERSION 3.21) project(llama_cpp) -if (UNIX) - add_custom_command( - OUTPUT ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/libllama.so - COMMAND make libllama.so - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp - ) - add_custom_target( - run ALL - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/libllama.so +option(LLAMA_BUILD "Build llama.cpp shared library and install alongside python package" ON) +option(LLAVA_BUILD "Build llava shared library and install alongside python package" ON) + +function(llama_cpp_python_install_target target) + if(NOT TARGET ${target}) + return() + endif() + + install( + TARGETS ${target} + LIBRARY DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp/lib + RUNTIME DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp/lib + ARCHIVE DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp/lib + FRAMEWORK DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp/lib + RESOURCE DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp/lib ) install( - FILES ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/libllama.so - DESTINATION llama_cpp + TARGETS ${target} + LIBRARY DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp/lib + RUNTIME DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp/lib + ARCHIVE DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp/lib + FRAMEWORK DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp/lib + RESOURCE DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp/lib ) -else() + set_target_properties(${target} PROPERTIES + INSTALL_RPATH "$ORIGIN" + BUILD_WITH_INSTALL_RPATH TRUE + ) + if(UNIX) + if(APPLE) + set_target_properties(${target} PROPERTIES + INSTALL_RPATH "@loader_path" + BUILD_WITH_INSTALL_RPATH TRUE + ) + else() + set_target_properties(${target} PROPERTIES + INSTALL_RPATH "$ORIGIN" + BUILD_WITH_INSTALL_RPATH TRUE + ) + endif() + endif() +endfunction() + +if (LLAMA_BUILD) set(BUILD_SHARED_LIBS "On") + + set(CMAKE_SKIP_BUILD_RPATH FALSE) + + # When building, don't use the install RPATH already + # (but later on when installing) + set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE) + + # Add the automatically determined parts of the RPATH + # which point to directories outside the build tree to the install RPATH + set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE) + set(CMAKE_SKIP_RPATH FALSE) + + # Enable building of the common library + set(LLAMA_BUILD_COMMON ON CACHE BOOL "Build llama.cpp common library" FORCE) + + # Disable building curl support + set(LLAMA_CURL OFF CACHE BOOL "llama.cpp: enable curl" FORCE) + + # Architecture detection and settings for Apple platforms + if (APPLE) + # Get the target architecture + execute_process( + COMMAND uname -m + OUTPUT_VARIABLE HOST_ARCH + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + + # If CMAKE_OSX_ARCHITECTURES is not set, use the host architecture + if(NOT CMAKE_OSX_ARCHITECTURES) + set(CMAKE_OSX_ARCHITECTURES ${HOST_ARCH} CACHE STRING "Build architecture for macOS" FORCE) + endif() + + message(STATUS "Host architecture: ${HOST_ARCH}") + message(STATUS "Target architecture: ${CMAKE_OSX_ARCHITECTURES}") + + # Configure based on target architecture + if(CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64") + # Intel Mac settings + set(GGML_AVX "OFF" CACHE BOOL "ggml: enable AVX" FORCE) + set(GGML_AVX2 "OFF" CACHE BOOL "ggml: enable AVX2" FORCE) + set(GGML_FMA "OFF" CACHE BOOL "ggml: enable FMA" FORCE) + set(GGML_F16C "OFF" CACHE BOOL "ggml: enable F16C" FORCE) + endif() + + # Metal settings (enable for both architectures) + set(GGML_METAL "ON" CACHE BOOL "ggml: enable Metal" FORCE) + set(GGML_METAL_EMBED_LIBRARY "ON" CACHE BOOL "ggml: embed metal library" FORCE) + endif() + add_subdirectory(vendor/llama.cpp) - install( - TARGETS llama - LIBRARY DESTINATION llama_cpp - RUNTIME DESTINATION llama_cpp - ) -endif(UNIX) + llama_cpp_python_install_target(llama) + llama_cpp_python_install_target(ggml) + + llama_cpp_python_install_target(ggml-base) + + llama_cpp_python_install_target(ggml-amx) + llama_cpp_python_install_target(ggml-blas) + llama_cpp_python_install_target(ggml-can) + llama_cpp_python_install_target(ggml-cpu) + llama_cpp_python_install_target(ggml-cuda) + llama_cpp_python_install_target(ggml-hip) + llama_cpp_python_install_target(ggml-kompute) + llama_cpp_python_install_target(ggml-metal) + llama_cpp_python_install_target(ggml-musa) + llama_cpp_python_install_target(ggml-rpc) + llama_cpp_python_install_target(ggml-sycl) + llama_cpp_python_install_target(ggml-vulkan) + + # Workaround for Windows + CUDA https://github.com/abetlen/llama-cpp-python/issues/563 + if (WIN32) + install( + FILES $ + DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp/lib + ) + install( + FILES $ + DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp/lib + ) + install( + FILES $ + DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp/lib + ) + install( + FILES $ + DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp/lib + ) + endif() + + if (LLAVA_BUILD) + if (LLAMA_CUBLAS OR LLAMA_CUDA) + add_compile_definitions(GGML_USE_CUBLAS) + add_compile_definitions(GGML_USE_CUDA) + endif() + + if (LLAMA_METAL) + add_compile_definitions(GGML_USE_METAL) + endif() + + # Building llava + add_subdirectory(vendor/llama.cpp/tools/mtmd) + set_target_properties(llava_shared PROPERTIES OUTPUT_NAME "llava") + + if (WIN32) + set_target_properties(llava_shared PROPERTIES CUDA_ARCHITECTURES OFF) + endif() + llama_cpp_python_install_target(llava_shared) + if (WIN32) + install( + FILES $ + DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp/lib + ) + install( + FILES $ + DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp/lib + ) + endif() + + # Fix for llava build: Add include directory for llama.h + # Move these commands after the add_subdirectory call + target_include_directories(llava PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/include) + target_include_directories(llava PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/ggml/include) + + if (BUILD_SHARED_LIBS) + target_include_directories(llava_shared PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/include) + target_include_directories(llava_shared PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/ggml/include) + endif() + + target_include_directories(llama-llava-cli PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/include) + target_include_directories(llama-minicpmv-cli PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/include) + endif() +endif() diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..26ddf2c7a --- /dev/null +++ b/Makefile @@ -0,0 +1,97 @@ +update: + poetry install + git submodule update --init --recursive + +update.vendor: + cd vendor/llama.cpp && git pull origin master + +deps: + python3 -m pip install --upgrade pip + python3 -m pip install -e ".[all]" + +build: + python3 -m pip install --verbose -e . + +build.debug: + python3 -m pip install \ + --verbose \ + --config-settings=cmake.verbose=true \ + --config-settings=logging.level=INFO \ + --config-settings=install.strip=false \ + --config-settings=cmake.args="-DCMAKE_BUILD_TYPE=Debug;-DCMAKE_C_FLAGS='-ggdb -O0';-DCMAKE_CXX_FLAGS='-ggdb -O0'" \ + --editable . + +build.debug.extra: + python3 -m pip install \ + --verbose \ + --config-settings=cmake.verbose=true \ + --config-settings=logging.level=INFO \ + --config-settings=install.strip=false \ + --config-settings=cmake.args="-DCMAKE_BUILD_TYPE=Debug;-DCMAKE_C_FLAGS='-fsanitize=address -ggdb -O0';-DCMAKE_CXX_FLAGS='-fsanitize=address -ggdb -O0'" \ + --editable . + +build.cuda: + CMAKE_ARGS="-DGGML_CUDA=on" python3 -m pip install --verbose -e . + +build.openblas: + CMAKE_ARGS="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS" python3 -m pip install --verbose -e . + +build.blis: + CMAKE_ARGS="-DGGML_BLAS=on -DGGML_BLAS_VENDOR=FLAME" python3 -m pip install --verbose -e . + +build.metal: + CMAKE_ARGS="-DGGML_METAL=on" python3 -m pip install --verbose -e . + +build.vulkan: + CMAKE_ARGS="-DGGML_VULKAN=on" python3 -m pip install --verbose -e . + +build.kompute: + CMAKE_ARGS="-DGGML_KOMPUTE=on" python3 -m pip install --verbose -e . + +build.sycl: + CMAKE_ARGS="-DGGML_SYCL=on" python3 -m pip install --verbose -e . + +build.rpc: + CMAKE_ARGS="-DGGML_RPC=on" python3 -m pip install --verbose -e . + +build.sdist: + python3 -m build --sdist --verbose + +deploy.pypi: + python3 -m twine upload dist/* + +deploy.gh-docs: + mkdocs build + mkdocs gh-deploy + +test: + python3 -m pytest --full-trace -v + +docker: + docker build -t llama-cpp-python:latest -f docker/simple/Dockerfile . + +run-server: + python3 -m llama_cpp.server --model ${MODEL} + +clean: + - cd vendor/llama.cpp && make clean + - cd vendor/llama.cpp && rm libllama.so + - rm -rf _skbuild + - rm llama_cpp/lib/*.so + - rm llama_cpp/lib/*.dylib + - rm llama_cpp/lib/*.metal + - rm llama_cpp/lib/*.dll + - rm llama_cpp/lib/*.lib + +.PHONY: \ + update \ + update.vendor \ + build \ + build.cuda \ + build.opencl \ + build.openblas \ + build.sdist \ + deploy.pypi \ + deploy.gh-docs \ + docker \ + clean diff --git a/README.md b/README.md index 0c84c1f9d..e00456580 100644 --- a/README.md +++ b/README.md @@ -1,40 +1,302 @@ -# 🦙 Python Bindings for `llama.cpp` +

+ +

-[![Documentation](https://img.shields.io/badge/docs-passing-green.svg)](https://abetlen.github.io/llama-cpp-python) +# Python Bindings for [`llama.cpp`](https://github.com/ggerganov/llama.cpp) + +[![Documentation Status](https://readthedocs.org/projects/llama-cpp-python/badge/?version=latest)](https://llama-cpp-python.readthedocs.io/en/latest/?badge=latest) [![Tests](https://github.com/abetlen/llama-cpp-python/actions/workflows/test.yaml/badge.svg?branch=main)](https://github.com/abetlen/llama-cpp-python/actions/workflows/test.yaml) [![PyPI](https://img.shields.io/pypi/v/llama-cpp-python)](https://pypi.org/project/llama-cpp-python/) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/llama-cpp-python)](https://pypi.org/project/llama-cpp-python/) [![PyPI - License](https://img.shields.io/pypi/l/llama-cpp-python)](https://pypi.org/project/llama-cpp-python/) [![PyPI - Downloads](https://img.shields.io/pypi/dm/llama-cpp-python)](https://pypi.org/project/llama-cpp-python/) +[![Github All Releases](https://img.shields.io/github/downloads/abetlen/llama-cpp-python/total.svg?label=Github%20Downloads)]() Simple Python bindings for **@ggerganov's** [`llama.cpp`](https://github.com/ggerganov/llama.cpp) library. This package provides: - Low-level access to C API via `ctypes` interface. - High-level Python API for text completion - - OpenAI-like API - - LangChain compatibility + - OpenAI-like API + - [LangChain compatibility](https://python.langchain.com/docs/integrations/llms/llamacpp) + - [LlamaIndex compatibility](https://docs.llamaindex.ai/en/stable/examples/llm/llama_2_llama_cpp.html) +- OpenAI compatible web server + - [Local Copilot replacement](https://llama-cpp-python.readthedocs.io/en/latest/server/#code-completion) + - [Function Calling support](https://llama-cpp-python.readthedocs.io/en/latest/server/#function-calling) + - [Vision API support](https://llama-cpp-python.readthedocs.io/en/latest/server/#multimodal-models) + - [Multiple Models](https://llama-cpp-python.readthedocs.io/en/latest/server/#configuration-and-multi-model-support) + +Documentation is available at [https://llama-cpp-python.readthedocs.io/en/latest](https://llama-cpp-python.readthedocs.io/en/latest). ## Installation -Install from PyPI: +Requirements: + + - Python 3.8+ + - C compiler + - Linux: gcc or clang + - Windows: Visual Studio or MinGW + - MacOS: Xcode + +To install the package, run: ```bash pip install llama-cpp-python ``` +This will also build `llama.cpp` from source and install it alongside this python package. + +If this fails, add `--verbose` to the `pip install` see the full cmake build log. + +**Pre-built Wheel (New)** + +It is also possible to install a pre-built wheel with basic CPU support. + +```bash +pip install llama-cpp-python \ + --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu +``` + +### Installation Configuration + +`llama.cpp` supports a number of hardware acceleration backends to speed up inference as well as backend specific options. See the [llama.cpp README](https://github.com/ggerganov/llama.cpp#build) for a full list. + +All `llama.cpp` cmake build options can be set via the `CMAKE_ARGS` environment variable or via the `--config-settings / -C` cli flag during installation. + +
+Environment Variables + +```bash +# Linux and Mac +CMAKE_ARGS="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS" \ + pip install llama-cpp-python +``` + +```powershell +# Windows +$env:CMAKE_ARGS = "-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS" +pip install llama-cpp-python +``` +
+ +
+CLI / requirements.txt + +They can also be set via `pip install -C / --config-settings` command and saved to a `requirements.txt` file: + +```bash +pip install --upgrade pip # ensure pip is up to date +pip install llama-cpp-python \ + -C cmake.args="-DGGML_BLAS=ON;-DGGML_BLAS_VENDOR=OpenBLAS" +``` + +```txt +# requirements.txt + +llama-cpp-python -C cmake.args="-DGGML_BLAS=ON;-DGGML_BLAS_VENDOR=OpenBLAS" +``` + +
+ +### Supported Backends + +Below are some common backends, their build commands and any additional environment variables required. + +
+OpenBLAS (CPU) + +To install with OpenBLAS, set the `GGML_BLAS` and `GGML_BLAS_VENDOR` environment variables before installing: + +```bash +CMAKE_ARGS="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS" pip install llama-cpp-python +``` +
+ +
+CUDA + +To install with CUDA support, set the `GGML_CUDA=on` environment variable before installing: + +```bash +CMAKE_ARGS="-DGGML_CUDA=on" pip install llama-cpp-python +``` + +**Pre-built Wheel (New)** + +It is also possible to install a pre-built wheel with CUDA support. As long as your system meets some requirements: + +- CUDA Version is 12.1, 12.2, 12.3, 12.4 or 12.5 +- Python Version is 3.10, 3.11 or 3.12 + +```bash +pip install llama-cpp-python \ + --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/ +``` + +Where `` is one of the following: +- `cu121`: CUDA 12.1 +- `cu122`: CUDA 12.2 +- `cu123`: CUDA 12.3 +- `cu124`: CUDA 12.4 +- `cu125`: CUDA 12.5 + +For example, to install the CUDA 12.1 wheel: + +```bash +pip install llama-cpp-python \ + --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu121 +``` + +
+ +
+Metal + +To install with Metal (MPS), set the `GGML_METAL=on` environment variable before installing: + +```bash +CMAKE_ARGS="-DGGML_METAL=on" pip install llama-cpp-python +``` + +**Pre-built Wheel (New)** + +It is also possible to install a pre-built wheel with Metal support. As long as your system meets some requirements: + +- MacOS Version is 11.0 or later +- Python Version is 3.10, 3.11 or 3.12 + +```bash +pip install llama-cpp-python \ + --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/metal +``` + +
+ +
+hipBLAS (ROCm) + +To install with hipBLAS / ROCm support for AMD cards, set the `GGML_HIPBLAS=on` environment variable before installing: + +```bash +CMAKE_ARGS="-DGGML_HIPBLAS=on" pip install llama-cpp-python +``` + +
+ +
+Vulkan + +To install with Vulkan support, set the `GGML_VULKAN=on` environment variable before installing: + +```bash +CMAKE_ARGS="-DGGML_VULKAN=on" pip install llama-cpp-python +``` + +
+ +
+SYCL + +To install with SYCL support, set the `GGML_SYCL=on` environment variable before installing: + +```bash +source /opt/intel/oneapi/setvars.sh +CMAKE_ARGS="-DGGML_SYCL=on -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx" pip install llama-cpp-python +``` +
+ +
+RPC + +To install with RPC support, set the `GGML_RPC=on` environment variable before installing: + +```bash +source /opt/intel/oneapi/setvars.sh +CMAKE_ARGS="-DGGML_RPC=on" pip install llama-cpp-python +``` +
+ + +### Windows Notes + +
+Error: Can't find 'nmake' or 'CMAKE_C_COMPILER' + +If you run into issues where it complains it can't find `'nmake'` `'?'` or CMAKE_C_COMPILER, you can extract w64devkit as [mentioned in llama.cpp repo](https://github.com/ggerganov/llama.cpp#openblas) and add those manually to CMAKE_ARGS before running `pip` install: + +```ps +$env:CMAKE_GENERATOR = "MinGW Makefiles" +$env:CMAKE_ARGS = "-DGGML_OPENBLAS=on -DCMAKE_C_COMPILER=C:/w64devkit/bin/gcc.exe -DCMAKE_CXX_COMPILER=C:/w64devkit/bin/g++.exe" +``` + +See the above instructions and set `CMAKE_ARGS` to the BLAS backend you want to use. +
+ +### MacOS Notes + +Detailed MacOS Metal GPU install documentation is available at [docs/install/macos.md](https://llama-cpp-python.readthedocs.io/en/latest/install/macos/) + +
+M1 Mac Performance Issue + +Note: If you are using Apple Silicon (M1) Mac, make sure you have installed a version of Python that supports arm64 architecture. For example: + +```bash +wget https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-MacOSX-arm64.sh +bash Miniforge3-MacOSX-arm64.sh +``` + +Otherwise, while installing it will build the llama.cpp x86 version which will be 10x slower on Apple Silicon (M1) Mac. +
+ +
+M Series Mac Error: `(mach-o file, but is an incompatible architecture (have 'x86_64', need 'arm64'))` + +Try installing with + +```bash +CMAKE_ARGS="-DCMAKE_OSX_ARCHITECTURES=arm64 -DCMAKE_APPLE_SILICON_PROCESSOR=arm64 -DGGML_METAL=on" pip install --upgrade --verbose --force-reinstall --no-cache-dir llama-cpp-python +``` +
+ +### Upgrading and Reinstalling + +To upgrade and rebuild `llama-cpp-python` add `--upgrade --force-reinstall --no-cache-dir` flags to the `pip install` command to ensure the package is rebuilt from source. + ## High-level API +[API Reference](https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#high-level-api) + +The high-level API provides a simple managed interface through the [`Llama`](https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama) class. + +Below is a short example demonstrating how to use the high-level API to for basic text completion: + +```python +from llama_cpp import Llama + +llm = Llama( + model_path="./models/7B/llama-model.gguf", + # n_gpu_layers=-1, # Uncomment to use GPU acceleration + # seed=1337, # Uncomment to set a specific seed + # n_ctx=2048, # Uncomment to increase the context window +) +output = llm( + "Q: Name the planets in the solar system? A: ", # Prompt + max_tokens=32, # Generate up to 32 tokens, set to None to generate up to the end of the context window + stop=["Q:", "\n"], # Stop generating just before the model would generate a new question + echo=True # Echo the prompt back in the output +) # Generate a completion, can also call create_completion +print(output) +``` + +By default `llama-cpp-python` generates completions in an OpenAI compatible format: + ```python ->>> from llama_cpp import Llama ->>> llm = Llama(model_path="models/7B/...") ->>> output = llm("Q: Name the planets in the solar system? A: ", max_tokens=32, stop=["Q:", "\n"], echo=True) ->>> print(output) { "id": "cmpl-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", "object": "text_completion", "created": 1679561337, - "model": "models/7B/...", + "model": "./models/7B/llama-model.gguf", "choices": [ { "text": "Q: Name the planets in the solar system? A: Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, Neptune and Pluto.", @@ -51,7 +313,342 @@ pip install llama-cpp-python } ``` -## Web Server +Text completion is available through the [`__call__`](https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama.__call__) and [`create_completion`](https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama.create_completion) methods of the [`Llama`](https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama) class. + +### Pulling models from Hugging Face Hub + +You can download `Llama` models in `gguf` format directly from Hugging Face using the [`from_pretrained`](https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama.from_pretrained) method. +You'll need to install the `huggingface-hub` package to use this feature (`pip install huggingface-hub`). + +```python +llm = Llama.from_pretrained( + repo_id="Qwen/Qwen2-0.5B-Instruct-GGUF", + filename="*q8_0.gguf", + verbose=False +) +``` + +By default [`from_pretrained`](https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama.from_pretrained) will download the model to the huggingface cache directory, you can then manage installed model files with the [`huggingface-cli`](https://huggingface.co/docs/huggingface_hub/en/guides/cli) tool. + +### Chat Completion + +The high-level API also provides a simple interface for chat completion. + +Chat completion requires that the model knows how to format the messages into a single prompt. +The `Llama` class does this using pre-registered chat formats (ie. `chatml`, `llama-2`, `gemma`, etc) or by providing a custom chat handler object. + +The model will will format the messages into a single prompt using the following order of precedence: + - Use the `chat_handler` if provided + - Use the `chat_format` if provided + - Use the `tokenizer.chat_template` from the `gguf` model's metadata (should work for most new models, older models may not have this) + - else, fallback to the `llama-2` chat format + +Set `verbose=True` to see the selected chat format. + +```python +from llama_cpp import Llama +llm = Llama( + model_path="path/to/llama-2/llama-model.gguf", + chat_format="llama-2" +) +llm.create_chat_completion( + messages = [ + {"role": "system", "content": "You are an assistant who perfectly describes images."}, + { + "role": "user", + "content": "Describe this image in detail please." + } + ] +) +``` + +Chat completion is available through the [`create_chat_completion`](https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama.create_chat_completion) method of the [`Llama`](https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama) class. + +For OpenAI API v1 compatibility, you use the [`create_chat_completion_openai_v1`](https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama.create_chat_completion_openai_v1) method which will return pydantic models instead of dicts. + + +### JSON and JSON Schema Mode + +To constrain chat responses to only valid JSON or a specific JSON Schema use the `response_format` argument in [`create_chat_completion`](https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama.create_chat_completion). + +#### JSON Mode + +The following example will constrain the response to valid JSON strings only. + +```python +from llama_cpp import Llama +llm = Llama(model_path="path/to/model.gguf", chat_format="chatml") +llm.create_chat_completion( + messages=[ + { + "role": "system", + "content": "You are a helpful assistant that outputs in JSON.", + }, + {"role": "user", "content": "Who won the world series in 2020"}, + ], + response_format={ + "type": "json_object", + }, + temperature=0.7, +) +``` + +#### JSON Schema Mode + +To constrain the response further to a specific JSON Schema add the schema to the `schema` property of the `response_format` argument. + +```python +from llama_cpp import Llama +llm = Llama(model_path="path/to/model.gguf", chat_format="chatml") +llm.create_chat_completion( + messages=[ + { + "role": "system", + "content": "You are a helpful assistant that outputs in JSON.", + }, + {"role": "user", "content": "Who won the world series in 2020"}, + ], + response_format={ + "type": "json_object", + "schema": { + "type": "object", + "properties": {"team_name": {"type": "string"}}, + "required": ["team_name"], + }, + }, + temperature=0.7, +) +``` + +### Function Calling + +The high-level API supports OpenAI compatible function and tool calling. This is possible through the `functionary` pre-trained models chat format or through the generic `chatml-function-calling` chat format. + +```python +from llama_cpp import Llama +llm = Llama(model_path="path/to/chatml/llama-model.gguf", chat_format="chatml-function-calling") +llm.create_chat_completion( + messages = [ + { + "role": "system", + "content": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. The assistant calls functions with appropriate input when necessary" + + }, + { + "role": "user", + "content": "Extract Jason is 25 years old" + } + ], + tools=[{ + "type": "function", + "function": { + "name": "UserDetail", + "parameters": { + "type": "object", + "title": "UserDetail", + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "age": { + "title": "Age", + "type": "integer" + } + }, + "required": [ "name", "age" ] + } + } + }], + tool_choice={ + "type": "function", + "function": { + "name": "UserDetail" + } + } +) +``` + +
+Functionary v2 + +The various gguf-converted files for this set of models can be found [here](https://huggingface.co/meetkai). Functionary is able to intelligently call functions and also analyze any provided function outputs to generate coherent responses. All v2 models of functionary supports **parallel function calling**. You can provide either `functionary-v1` or `functionary-v2` for the `chat_format` when initializing the Llama class. + +Due to discrepancies between llama.cpp and HuggingFace's tokenizers, it is required to provide HF Tokenizer for functionary. The `LlamaHFTokenizer` class can be initialized and passed into the Llama class. This will override the default llama.cpp tokenizer used in Llama class. The tokenizer files are already included in the respective HF repositories hosting the gguf files. + +```python +from llama_cpp import Llama +from llama_cpp.llama_tokenizer import LlamaHFTokenizer +llm = Llama.from_pretrained( + repo_id="meetkai/functionary-small-v2.2-GGUF", + filename="functionary-small-v2.2.q4_0.gguf", + chat_format="functionary-v2", + tokenizer=LlamaHFTokenizer.from_pretrained("meetkai/functionary-small-v2.2-GGUF") +) +``` + +**NOTE**: There is no need to provide the default system messages used in Functionary as they are added automatically in the Functionary chat handler. Thus, the messages should contain just the chat messages and/or system messages that provide additional context for the model (e.g.: datetime, etc.). +
+ +### Multi-modal Models + +`llama-cpp-python` supports such as llava1.5 which allow the language model to read information from both text and images. + +Below are the supported multi-modal models and their respective chat handlers (Python API) and chat formats (Server API). + +| Model | `LlamaChatHandler` | `chat_format` | +|:--- |:--- |:--- | +| [llava-v1.5-7b](https://huggingface.co/mys/ggml_llava-v1.5-7b) | `Llava15ChatHandler` | `llava-1-5` | +| [llava-v1.5-13b](https://huggingface.co/mys/ggml_llava-v1.5-13b) | `Llava15ChatHandler` | `llava-1-5` | +| [llava-v1.6-34b](https://huggingface.co/cjpais/llava-v1.6-34B-gguf) | `Llava16ChatHandler` | `llava-1-6` | +| [moondream2](https://huggingface.co/vikhyatk/moondream2) | `MoondreamChatHandler` | `moondream2` | +| [nanollava](https://huggingface.co/abetlen/nanollava-gguf) | `NanollavaChatHandler` | `nanollava` | +| [llama-3-vision-alpha](https://huggingface.co/abetlen/llama-3-vision-alpha-gguf) | `Llama3VisionAlphaChatHandler` | `llama-3-vision-alpha` | +| [minicpm-v-2.6](https://huggingface.co/openbmb/MiniCPM-V-2_6-gguf) | `MiniCPMv26ChatHandler` | `minicpm-v-2.6` | + +Then you'll need to use a custom chat handler to load the clip model and process the chat messages and images. + +```python +from llama_cpp import Llama +from llama_cpp.llama_chat_format import Llava15ChatHandler +chat_handler = Llava15ChatHandler(clip_model_path="path/to/llava/mmproj.bin") +llm = Llama( + model_path="./path/to/llava/llama-model.gguf", + chat_handler=chat_handler, + n_ctx=2048, # n_ctx should be increased to accommodate the image embedding +) +llm.create_chat_completion( + messages = [ + {"role": "system", "content": "You are an assistant who perfectly describes images."}, + { + "role": "user", + "content": [ + {"type" : "text", "text": "What's in this image?"}, + {"type": "image_url", "image_url": {"url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" } } + ] + } + ] +) +``` + +You can also pull the model from the Hugging Face Hub using the `from_pretrained` method. + +```python +from llama_cpp import Llama +from llama_cpp.llama_chat_format import MoondreamChatHandler + +chat_handler = MoondreamChatHandler.from_pretrained( + repo_id="vikhyatk/moondream2", + filename="*mmproj*", +) + +llm = Llama.from_pretrained( + repo_id="vikhyatk/moondream2", + filename="*text-model*", + chat_handler=chat_handler, + n_ctx=2048, # n_ctx should be increased to accommodate the image embedding +) + +response = llm.create_chat_completion( + messages = [ + { + "role": "user", + "content": [ + {"type" : "text", "text": "What's in this image?"}, + {"type": "image_url", "image_url": {"url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" } } + + ] + } + ] +) +print(response["choices"][0]["text"]) +``` + +**Note**: Multi-modal models also support tool calling and JSON mode. + +
+Loading a Local Image + +Images can be passed as base64 encoded data URIs. The following example demonstrates how to do this. + +```python +import base64 + +def image_to_base64_data_uri(file_path): + with open(file_path, "rb") as img_file: + base64_data = base64.b64encode(img_file.read()).decode('utf-8') + return f"data:image/png;base64,{base64_data}" + +# Replace 'file_path.png' with the actual path to your PNG file +file_path = 'file_path.png' +data_uri = image_to_base64_data_uri(file_path) + +messages = [ + {"role": "system", "content": "You are an assistant who perfectly describes images."}, + { + "role": "user", + "content": [ + {"type": "image_url", "image_url": {"url": data_uri }}, + {"type" : "text", "text": "Describe this image in detail please."} + ] + } +] + +``` + +
+ +### Speculative Decoding + +`llama-cpp-python` supports speculative decoding which allows the model to generate completions based on a draft model. + +The fastest way to use speculative decoding is through the `LlamaPromptLookupDecoding` class. + +Just pass this as a draft model to the `Llama` class during initialization. + +```python +from llama_cpp import Llama +from llama_cpp.llama_speculative import LlamaPromptLookupDecoding + +llama = Llama( + model_path="path/to/model.gguf", + draft_model=LlamaPromptLookupDecoding(num_pred_tokens=10) # num_pred_tokens is the number of tokens to predict 10 is the default and generally good for gpu, 2 performs better for cpu-only machines. +) +``` + +### Embeddings + +To generate text embeddings use [`create_embedding`](https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama.create_embedding) or [`embed`](https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama.embed). Note that you must pass `embedding=True` to the constructor upon model creation for these to work properly. + +```python +import llama_cpp + +llm = llama_cpp.Llama(model_path="path/to/model.gguf", embedding=True) + +embeddings = llm.create_embedding("Hello, world!") + +# or create multiple embeddings at once + +embeddings = llm.create_embedding(["Hello, world!", "Goodbye, world!"]) +``` + +There are two primary notions of embeddings in a Transformer-style model: *token level* and *sequence level*. Sequence level embeddings are produced by "pooling" token level embeddings together, usually by averaging them or using the first token. + +Models that are explicitly geared towards embeddings will usually return sequence level embeddings by default, one for each input string. Non-embedding models such as those designed for text generation will typically return only token level embeddings, one for each token in each sequence. Thus the dimensionality of the return type will be one higher for token level embeddings. + +It is possible to control pooling behavior in some cases using the `pooling_type` flag on model creation. You can ensure token level embeddings from any model using `LLAMA_POOLING_TYPE_NONE`. The reverse, getting a generation oriented model to yield sequence level embeddings is currently not possible, but you can always do the pooling manually. + +### Adjusting the Context Window + +The context window of the Llama models determines the maximum number of tokens that can be processed at once. By default, this is set to 512 tokens, but can be adjusted based on your requirements. + +For instance, if you want to work with larger contexts, you can expand the context window by setting the n_ctx parameter when initializing the Llama object: + +```python +llm = Llama(model_path="./models/7B/llama-model.gguf", n_ctx=2048) +``` + +## OpenAI Compatible Web Server `llama-cpp-python` offers a web server which aims to act as a drop-in replacement for the OpenAI API. This allows you to use llama.cpp compatible models with any OpenAI compatible client (language libraries, services, etc). @@ -59,38 +656,142 @@ This allows you to use llama.cpp compatible models with any OpenAI compatible cl To install the server package and get started: ```bash -pip install llama-cpp-python[server] -export MODEL=./models/7B -python3 -m llama_cpp.server +pip install 'llama-cpp-python[server]' +python3 -m llama_cpp.server --model models/7B/llama-model.gguf +``` + +Similar to Hardware Acceleration section above, you can also install with GPU (cuBLAS) support like this: + +```bash +CMAKE_ARGS="-DGGML_CUDA=on" FORCE_CMAKE=1 pip install 'llama-cpp-python[server]' +python3 -m llama_cpp.server --model models/7B/llama-model.gguf --n_gpu_layers 35 ``` Navigate to [http://localhost:8000/docs](http://localhost:8000/docs) to see the OpenAPI documentation. +To bind to `0.0.0.0` to enable remote connections, use `python3 -m llama_cpp.server --host 0.0.0.0`. +Similarly, to change the port (default is 8000), use `--port`. + +You probably also want to set the prompt format. For chatml, use + +```bash +python3 -m llama_cpp.server --model models/7B/llama-model.gguf --chat_format chatml +``` + +That will format the prompt according to how model expects it. You can find the prompt format in the model card. +For possible options, see [llama_cpp/llama_chat_format.py](llama_cpp/llama_chat_format.py) and look for lines starting with "@register_chat_format". + +If you have `huggingface-hub` installed, you can also use the `--hf_model_repo_id` flag to load a model from the Hugging Face Hub. + +```bash +python3 -m llama_cpp.server --hf_model_repo_id Qwen/Qwen2-0.5B-Instruct-GGUF --model '*q8_0.gguf' +``` + +### Web Server Features + +- [Local Copilot replacement](https://llama-cpp-python.readthedocs.io/en/latest/server/#code-completion) +- [Function Calling support](https://llama-cpp-python.readthedocs.io/en/latest/server/#function-calling) +- [Vision API support](https://llama-cpp-python.readthedocs.io/en/latest/server/#multimodal-models) +- [Multiple Models](https://llama-cpp-python.readthedocs.io/en/latest/server/#configuration-and-multi-model-support) + +## Docker image + +A Docker image is available on [GHCR](https://ghcr.io/abetlen/llama-cpp-python). To run the server: + +```bash +docker run --rm -it -p 8000:8000 -v /path/to/models:/models -e MODEL=/models/llama-model.gguf ghcr.io/abetlen/llama-cpp-python:latest +``` + +[Docker on termux (requires root)](https://gist.github.com/FreddieOliveira/efe850df7ff3951cb62d74bd770dce27) is currently the only known way to run this on phones, see [termux support issue](https://github.com/abetlen/llama-cpp-python/issues/389) + ## Low-level API -The low-level API is a direct `ctypes` binding to the C API provided by `llama.cpp`. -The entire API can be found in [llama_cpp/llama_cpp.py](https://github.com/abetlen/llama-cpp-python/blob/master/llama_cpp/llama_cpp.py) and should mirror [llama.h](https://github.com/ggerganov/llama.cpp/blob/master/llama.h). +[API Reference](https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#low-level-api) +The low-level API is a direct [`ctypes`](https://docs.python.org/3/library/ctypes.html) binding to the C API provided by `llama.cpp`. +The entire low-level API can be found in [llama_cpp/llama_cpp.py](https://github.com/abetlen/llama-cpp-python/blob/master/llama_cpp/llama_cpp.py) and directly mirrors the C API in [llama.h](https://github.com/ggerganov/llama.cpp/blob/master/llama.h). -# Documentation +Below is a short example demonstrating how to use the low-level API to tokenize a prompt: -Documentation is available at [https://abetlen.github.io/llama-cpp-python](https://abetlen.github.io/llama-cpp-python). +```python +import llama_cpp +import ctypes +llama_cpp.llama_backend_init(False) # Must be called once at the start of each program +params = llama_cpp.llama_context_default_params() +# use bytes for char * params +model = llama_cpp.llama_load_model_from_file(b"./models/7b/llama-model.gguf", params) +ctx = llama_cpp.llama_new_context_with_model(model, params) +max_tokens = params.n_ctx +# use ctypes arrays for array params +tokens = (llama_cpp.llama_token * int(max_tokens))() +n_tokens = llama_cpp.llama_tokenize(ctx, b"Q: Name the planets in the solar system? A: ", tokens, max_tokens, llama_cpp.c_bool(True)) +llama_cpp.llama_free(ctx) +``` + +Check out the [examples folder](examples/low_level_api) for more examples of using the low-level API. + +## Documentation + +Documentation is available via [https://llama-cpp-python.readthedocs.io/](https://llama-cpp-python.readthedocs.io/). If you find any issues with the documentation, please open an issue or submit a PR. -# Development +## Development This package is under active development and I welcome any contributions. -To get started, clone the repository and install the package in development mode: +To get started, clone the repository and install the package in editable / development mode: + +```bash +git clone --recurse-submodules https://github.com/abetlen/llama-cpp-python.git +cd llama-cpp-python + +# Upgrade pip (required for editable mode) +pip install --upgrade pip + +# Install with pip +pip install -e . + +# if you want to use the fastapi / openapi server +pip install -e '.[server]' + +# to install all optional dependencies +pip install -e '.[all]' + +# to clear the local build cache +make clean +``` + +Now try running the tests + +```bash +pytest +``` + +There's a `Makefile` available with useful targets. +A typical workflow would look like this: ```bash -git clone git@github.com:abetlen/llama-cpp-python.git -git submodule update --init --recursive -# Will need to be re-run any time vendor/llama.cpp is updated -python3 setup.py develop +make build +make test ``` -# How does this compare to other Python bindings of `llama.cpp`? +You can also test out specific commits of `llama.cpp` by checking out the desired commit in the `vendor/llama.cpp` submodule and then running `make clean` and `pip install -e .` again. Any changes in the `llama.h` API will require +changes to the `llama_cpp/llama_cpp.py` file to match the new API (additional changes may be required elsewhere). + +## FAQ + +### Are there pre-built binaries / binary wheels available? + +The recommended installation method is to install from source as described above. +The reason for this is that `llama.cpp` is built with compiler optimizations that are specific to your system. +Using pre-built binaries would require disabling these optimizations or supporting a large number of pre-built binaries for each platform. + +That being said there are some pre-built binaries available through the Releases as well as some community provided wheels. + +In the future, I would like to provide pre-built binaries and wheels for common platforms and I'm happy to accept any useful contributions in this area. +This is currently being tracked in [#741](https://github.com/abetlen/llama-cpp-python/issues/741) + +### How does this compare to other Python bindings of `llama.cpp`? I originally wrote this package for my own use with two goals in mind: @@ -99,6 +800,6 @@ I originally wrote this package for my own use with two goals in mind: Any contributions and changes to this package will be made with these goals in mind. -# License +## License This project is licensed under the terms of the MIT license. diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 000000000..474503fdf --- /dev/null +++ b/docker/README.md @@ -0,0 +1,64 @@ +### Install Docker Server +> [!IMPORTANT] +> This was tested with Docker running on Linux.
If you can get it working on Windows or MacOS, please update this `README.md` with a PR!
+ +[Install Docker Engine](https://docs.docker.com/engine/install) + + +## Simple Dockerfiles for building the llama-cpp-python server with external model bin files +### openblas_simple +A simple Dockerfile for non-GPU OpenBLAS, where the model is located outside the Docker image: +``` +cd ./openblas_simple +docker build -t openblas_simple . +docker run --cap-add SYS_RESOURCE -e USE_MLOCK=0 -e MODEL=/var/model/ -v :/var/model -t openblas_simple +``` +where `/` is the full path to the model file on the Docker host system. + +### cuda_simple +> [!WARNING] +> Nvidia GPU CuBLAS support requires an Nvidia GPU with sufficient VRAM (approximately as much as the size in the table below) and Docker Nvidia support (see [container-toolkit/install-guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html))
+ +A simple Dockerfile for CUDA-accelerated CuBLAS, where the model is located outside the Docker image: + +``` +cd ./cuda_simple +docker build -t cuda_simple . +docker run --gpus=all --cap-add SYS_RESOURCE -e USE_MLOCK=0 -e MODEL=/var/model/ -v :/var/model -t cuda_simple +``` +where `/` is the full path to the model file on the Docker host system. + +-------------------------------------------------------------------------- + +### "Open-Llama-in-a-box" +Download an Apache V2.0 licensed 3B params Open LLaMA model and install into a Docker image that runs an OpenBLAS-enabled llama-cpp-python server: +``` +$ cd ./open_llama +./build.sh +./start.sh +``` + +### Manually choose your own Llama model from Hugging Face +`python3 ./hug_model.py -a TheBloke -t llama` +You should now have a model in the current directory and `model.bin` symlinked to it for the subsequent Docker build and copy step. e.g. +``` +docker $ ls -lh *.bin +-rw-rw-r-- 1 user user 4.8G May 23 18:30 q5_1.bin +lrwxrwxrwx 1 user user 24 May 23 18:30 model.bin -> q5_1.bin +``` + +> [!NOTE] +> Make sure you have enough disk space to download the model. As the model is then copied into the image you will need at least +**TWICE** as much disk space as the size of the model:
+ +| Model | Quantized size | +|------:|----------------:| +| 3B | 3 GB | +| 7B | 5 GB | +| 13B | 10 GB | +| 33B | 25 GB | +| 65B | 50 GB | + + +> [!NOTE] +> If you want to pass or tune additional parameters, customise `./start_server.sh` before running `docker build ...` diff --git a/docker/cuda_simple/Dockerfile b/docker/cuda_simple/Dockerfile new file mode 100644 index 000000000..0bbf20ffe --- /dev/null +++ b/docker/cuda_simple/Dockerfile @@ -0,0 +1,27 @@ +ARG CUDA_IMAGE="12.5.0-devel-ubuntu22.04" +FROM nvidia/cuda:${CUDA_IMAGE} + +# We need to set the host to 0.0.0.0 to allow outside access +ENV HOST 0.0.0.0 + +RUN apt-get update && apt-get upgrade -y \ + && apt-get install -y git build-essential \ + python3 python3-pip gcc wget \ + ocl-icd-opencl-dev opencl-headers clinfo \ + libclblast-dev libopenblas-dev \ + && mkdir -p /etc/OpenCL/vendors && echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd + +COPY . . + +# setting build related env vars +ENV CUDA_DOCKER_ARCH=all +ENV GGML_CUDA=1 + +# Install depencencies +RUN python3 -m pip install --upgrade pip pytest cmake scikit-build setuptools fastapi uvicorn sse-starlette pydantic-settings starlette-context + +# Install llama-cpp-python (build with cuda) +RUN CMAKE_ARGS="-DGGML_CUDA=on" pip install llama-cpp-python + +# Run the server +CMD python3 -m llama_cpp.server diff --git a/docker/open_llama/Dockerfile b/docker/open_llama/Dockerfile new file mode 100644 index 000000000..f05e66ef2 --- /dev/null +++ b/docker/open_llama/Dockerfile @@ -0,0 +1,53 @@ +# Define the image argument and provide a default value +ARG IMAGE=python:3-slim-bookworm + +# Use the image as specified +FROM ${IMAGE} + +# Re-declare the ARG after FROM +ARG IMAGE + +# Update and upgrade the existing packages +RUN apt-get update && apt-get upgrade -y && apt-get install -y --no-install-recommends \ + python3 \ + python3-pip \ + ninja-build \ + build-essential \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +RUN python3 -m pip install --upgrade pip pytest cmake scikit-build setuptools fastapi uvicorn sse-starlette pydantic-settings starlette-context + +# Perform the conditional installations based on the image +RUN echo "Image: ${IMAGE}" && \ + if [ "${IMAGE}" = "python:3-slim-bookworm" ] ; then \ + echo "OpenBLAS install:" && \ + apt-get install -y --no-install-recommends libopenblas-dev && \ + CMAKE_ARGS="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS" pip install llama-cpp-python --verbose; \ +else \ + echo "CuBLAS install:" && \ + CMAKE_ARGS="-DGGML_CUDA=on" pip install llama-cpp-python --verbose; \ +fi + +# Clean up apt cache +RUN rm -rf /var/lib/apt/lists/* + +# Set a working directory for better clarity +WORKDIR /app + +# Copy files to the app directory +RUN echo "Installing model...this can take some time..." +COPY ./model.bin /app/model.bin +COPY ./start_server.sh /app/start_server.sh + +# Make the server start script executable +RUN chmod +x /app/start_server.sh + +# Set environment variable for the host +ENV HOST=0.0.0.0 + +# Expose a port for the server +EXPOSE 8000 + +# Run the server start script +CMD ["/bin/sh", "/app/start_server.sh"] diff --git a/docker/open_llama/build.sh b/docker/open_llama/build.sh new file mode 100755 index 000000000..3a6457dcd --- /dev/null +++ b/docker/open_llama/build.sh @@ -0,0 +1,14 @@ +#!/bin/sh + +MODEL="open_llama_3b" +# Get open_llama_3b_ggml q5_1 quantization +python3 ./hug_model.py -a SlyEcho -s ${MODEL} -f "q5_1" +ls -lh *.bin + +# Build the default OpenBLAS image +docker build -t $MODEL . +docker images | egrep "^(REPOSITORY|$MODEL)" + +echo +echo "To start the docker container run:" +echo "docker run -t -p 8000:8000 $MODEL" diff --git a/docker/open_llama/hug_model.py b/docker/open_llama/hug_model.py new file mode 100644 index 000000000..13c5b6b0d --- /dev/null +++ b/docker/open_llama/hug_model.py @@ -0,0 +1,139 @@ +import requests +import json +import os +import struct +import argparse + +def make_request(url, params=None): + print(f"Making request to {url}...") + response = requests.get(url, params=params) + if response.status_code == 200: + return json.loads(response.text) + else: + print(f"Request failed with status code {response.status_code}") + return None + +def check_magic_and_version(filename): + with open(filename, 'rb') as f: + # Read the first 6 bytes from the file + data = f.read(6) + + # Unpack the binary data, interpreting the first 4 bytes as a little-endian unsigned int + # and the next 2 bytes as a little-endian unsigned short + magic, version = struct.unpack('= 10485760: # 10 MB + print('.', end='', flush=True) + total_downloaded = 0 + print("\nDownload complete.") + + # Creating a symbolic link from destination to "model.bin" + if os.path.isfile("model.bin"): + os.remove("model.bin") # remove the existing link if any + os.symlink(destination, "model.bin") + else: + print(f"Download failed with status code {response.status_code}") + +def get_user_choice(model_list): + # Print the enumerated list + print("\n") + for i, (model_id, rfilename) in enumerate(model_list): + print(f"{i+1}: Model ID: {model_id}, RFilename: {rfilename}") + + # Get user's choice + choice = input("Choose a model to download by entering the corresponding number: ") + try: + index = int(choice) - 1 + if 0 <= index < len(model_list): + # Return the chosen model + return model_list[index] + else: + print("Invalid choice.") + except ValueError: + print("Invalid input. Please enter a number corresponding to a model.") + except IndexError: + print("Invalid choice. Index out of range.") + + return None + +def main(): + # Create an argument parser + parser = argparse.ArgumentParser(description='Process some parameters.') + + # Arguments + parser.add_argument('-v', '--version', type=int, default=0x0003, + help='hexadecimal version number of ggml file') + parser.add_argument('-a', '--author', type=str, default='TheBloke', + help='HuggingFace author filter') + parser.add_argument('-t', '--tag', type=str, default='llama', + help='HuggingFace tag filter') + parser.add_argument('-s', '--search', type=str, default='', + help='HuggingFace search filter') + parser.add_argument('-f', '--filename', type=str, default='q5_1', + help='HuggingFace model repository filename substring match') + + # Parse the arguments + args = parser.parse_args() + + # Define the parameters + params = { + "author": args.author, + "tags": args.tag, + "search": args.search + } + + models = make_request('https://huggingface.co/api/models', params=params) + if models is None: + return + + model_list = [] + # Iterate over the models + for model in models: + model_id = model['id'] + model_info = make_request(f'https://huggingface.co/api/models/{model_id}') + if model_info is None: + continue + + for sibling in model_info.get('siblings', []): + rfilename = sibling.get('rfilename') + if rfilename and args.filename in rfilename: + model_list.append((model_id, rfilename)) + + # Choose the model + model_list.sort(key=lambda x: x[0]) + if len(model_list) == 0: + print("No models found") + exit(1) + elif len(model_list) == 1: + model_choice = model_list[0] + else: + model_choice = get_user_choice(model_list) + + if model_choice is not None: + model_id, rfilename = model_choice + url = f"https://huggingface.co/{model_id}/resolve/main/{rfilename}" + dest = f"{model_id.replace('/', '_')}_{rfilename}" + download_file(url, dest) + _, version = check_magic_and_version(dest) + if version != args.version: + print(f"Warning: Expected version {args.version}, but found different version in the file.") + else: + print("Error - model choice was None") + exit(2) + +if __name__ == '__main__': + main() diff --git a/docker/open_llama/start.sh b/docker/open_llama/start.sh new file mode 100755 index 000000000..7ee8f748e --- /dev/null +++ b/docker/open_llama/start.sh @@ -0,0 +1,28 @@ +#!/bin/sh + +MODEL="open_llama_3b" + +# Start Docker container +docker run --cap-add SYS_RESOURCE -p 8000:8000 -t $MODEL & +sleep 10 +echo +docker ps | egrep "(^CONTAINER|$MODEL)" + +# Test the model works +echo +curl -X 'POST' 'http://localhost:8000/v1/completions' -H 'accept: application/json' -H 'Content-Type: application/json' -d '{ + "prompt": "\n\n### Instructions:\nWhat is the capital of France?\n\n### Response:\n", + "stop": [ + "\n", + "###" + ] +}' | grep Paris +if [ $? -eq 0 ] +then + echo + echo "$MODEL is working!!" +else + echo + echo "ERROR: $MODEL not replying." + exit 1 +fi diff --git a/docker/open_llama/start_server.sh b/docker/open_llama/start_server.sh new file mode 100755 index 000000000..d3329eec3 --- /dev/null +++ b/docker/open_llama/start_server.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +# For mlock support +ulimit -l unlimited + +if [ "$IMAGE" = "python:3-slim-bullseye" ]; then + python3 -B -m llama_cpp.server --model /app/model.bin +else + # You may have to reduce --n_gpu_layers=1000 to 20 or less if you don't have enough VRAM + python3 -B -m llama_cpp.server --model /app/model.bin --n_gpu_layers=1000 +fi diff --git a/docker/openblas_simple/Dockerfile b/docker/openblas_simple/Dockerfile new file mode 100644 index 000000000..5ee667dc0 --- /dev/null +++ b/docker/openblas_simple/Dockerfile @@ -0,0 +1,18 @@ +FROM python:3-slim-bookworm + +# We need to set the host to 0.0.0.0 to allow outside access +ENV HOST 0.0.0.0 + +COPY . . + +# Install the package +RUN apt update && apt install -y libopenblas-dev ninja-build build-essential pkg-config \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* /tmp/* + +RUN python -m pip install --upgrade pip pytest cmake scikit-build setuptools fastapi uvicorn sse-starlette pydantic-settings starlette-context + +RUN CMAKE_ARGS="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS" pip install llama_cpp_python --verbose + +# Run the server +CMD python3 -m llama_cpp.server diff --git a/docker/simple/Dockerfile b/docker/simple/Dockerfile new file mode 100644 index 000000000..3594df1a5 --- /dev/null +++ b/docker/simple/Dockerfile @@ -0,0 +1,38 @@ +# Define the image argument and provide a default value +ARG IMAGE=python:3-slim-bookworm + +# Use the image as specified +FROM ${IMAGE} + +# Re-declare the ARG after FROM +ARG IMAGE + +# Update and upgrade the existing packages +RUN apt-get update && apt-get upgrade -y && apt-get install -y --no-install-recommends \ + python3 \ + python3-pip \ + ninja-build \ + libopenblas-dev \ + build-essential \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* /tmp/* + +RUN mkdir /app +WORKDIR /app +COPY . /app + +RUN python3 -m pip install --upgrade pip + +RUN python3 -m pip install --upgrade pip pytest cmake scikit-build setuptools fastapi uvicorn sse-starlette pydantic-settings starlette-context + +RUN pip install llama-cpp-python --verbose; + +# Set environment variable for the host +ENV HOST=0.0.0.0 +ENV PORT=8000 + +# Expose a port for the server +EXPOSE 8000 + +# Run the server start script +CMD ["/bin/sh", "/app/docker/simple/run.sh"] diff --git a/docker/simple/run.sh b/docker/simple/run.sh new file mode 100644 index 000000000..c85e73d2b --- /dev/null +++ b/docker/simple/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +make build +uvicorn --factory llama_cpp.server.app:create_app --host $HOST --port $PORT diff --git a/docs/api-reference.md b/docs/api-reference.md new file mode 100644 index 000000000..ab51ef754 --- /dev/null +++ b/docs/api-reference.md @@ -0,0 +1,88 @@ +--- +title: API Reference +--- + +## High Level API + +High-level Python bindings for llama.cpp. + +::: llama_cpp.Llama + options: + members: + - __init__ + - tokenize + - detokenize + - reset + - eval + - sample + - generate + - create_embedding + - embed + - create_completion + - __call__ + - create_chat_completion + - create_chat_completion_openai_v1 + - set_cache + - save_state + - load_state + - token_bos + - token_eos + - from_pretrained + show_root_heading: true + +::: llama_cpp.LlamaGrammar + options: + members: + - from_string + - from_json_schema + +::: llama_cpp.LlamaCache + options: + show_root_heading: true + +::: llama_cpp.LlamaState + options: + show_root_heading: true + +::: llama_cpp.LogitsProcessor + options: + show_root_heading: true + +::: llama_cpp.LogitsProcessorList + options: + show_root_heading: true + +::: llama_cpp.StoppingCriteria + options: + show_root_heading: true + +::: llama_cpp.StoppingCriteriaList + options: + show_root_heading: true + +## Low Level API + +Low-level Python bindings for llama.cpp using Python's ctypes library. + +::: llama_cpp.llama_cpp + options: + show_if_no_docstring: true + # filter only members starting with `llama_` + filters: + - "^llama_" + +::: llama_cpp.llama_cpp + options: + show_if_no_docstring: true + show_root_heading: false + show_root_toc_entry: false + heading_level: 4 + # filter only members starting with `LLAMA_` + filters: + - "^LLAMA_" + +## Misc + +::: llama_cpp.llama_types + options: + show_if_no_docstring: true \ No newline at end of file diff --git a/docs/changelog.md b/docs/changelog.md new file mode 100644 index 000000000..047bc1442 --- /dev/null +++ b/docs/changelog.md @@ -0,0 +1 @@ +-8<- "CHANGELOG.md" \ No newline at end of file diff --git a/docs/icon.svg b/docs/icon.svg new file mode 100644 index 000000000..9f2d08753 --- /dev/null +++ b/docs/icon.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/docs/index.md b/docs/index.md index efe7fcf5b..60bc7aef4 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,117 +1,5 @@ -# Getting Started +--- +title: Getting Started +--- -## 🦙 Python Bindings for `llama.cpp` - -[![Documentation](https://img.shields.io/badge/docs-passing-green.svg)](https://abetlen.github.io/llama-cpp-python) -[![Tests](https://github.com/abetlen/llama-cpp-python/actions/workflows/test.yaml/badge.svg?branch=main)](https://github.com/abetlen/llama-cpp-python/actions/workflows/test.yaml) -[![PyPI](https://img.shields.io/pypi/v/llama-cpp-python)](https://pypi.org/project/llama-cpp-python/) -[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/llama-cpp-python)](https://pypi.org/project/llama-cpp-python/) -[![PyPI - License](https://img.shields.io/pypi/l/llama-cpp-python)](https://pypi.org/project/llama-cpp-python/) -[![PyPI - Downloads](https://img.shields.io/pypi/dm/llama-cpp-python)](https://pypi.org/project/llama-cpp-python/) - -Simple Python bindings for **@ggerganov's** [`llama.cpp`](https://github.com/ggerganov/llama.cpp) library. -This package provides: - -- Low-level access to C API via `ctypes` interface. -- High-level Python API for text completion - - OpenAI-like API - - LangChain compatibility - -## Installation - -Install from PyPI: - -```bash -pip install llama-cpp-python -``` - -## High-level API - -```python ->>> from llama_cpp import Llama ->>> llm = Llama(model_path="models/7B/...") ->>> output = llm("Q: Name the planets in the solar system? A: ", max_tokens=32, stop=["Q:", "\n"], echo=True) ->>> print(output) -{ - "id": "cmpl-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", - "object": "text_completion", - "created": 1679561337, - "model": "models/7B/...", - "choices": [ - { - "text": "Q: Name the planets in the solar system? A: Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, Neptune and Pluto.", - "index": 0, - "logprobs": None, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 14, - "completion_tokens": 28, - "total_tokens": 42 - } -} -``` - -## Web Server - -`llama-cpp-python` offers a web server which aims to act as a drop-in replacement for the OpenAI API. -This allows you to use llama.cpp compatible models with any OpenAI compatible client (language libraries, services, etc). - -To install the server package and get started: - -```bash -pip install llama-cpp-python[server] -export MODEL=./models/7B -python3 -m llama_cpp.server -``` - -Navigate to [http://localhost:8000/docs](http://localhost:8000/docs) to see the OpenAPI documentation. - -## Low-level API - -The low-level API is a direct `ctypes` binding to the C API provided by `llama.cpp`. -The entire API can be found in [llama_cpp/llama_cpp.py](https://github.com/abetlen/llama-cpp-python/blob/master/llama_cpp/llama_cpp.py) and should mirror [llama.h](https://github.com/ggerganov/llama.cpp/blob/master/llama.h). - - -## Development - -This package is under active development and I welcome any contributions. - -To get started, clone the repository and install the package in development mode: - -```bash -git clone git@github.com:abetlen/llama-cpp-python.git -git submodule update --init --recursive -# Will need to be re-run any time vendor/llama.cpp is updated -python3 setup.py develop -``` - -## API Reference - -::: llama_cpp.Llama - options: - members: - - __init__ - - tokenize - - detokenize - - reset - - eval - - sample - - generate - - create_embedding - - embed - - create_completion - - __call__ - - create_chat_completion - - token_bos - - token_eos - show_root_heading: true - -::: llama_cpp.llama_cpp - options: - show_if_no_docstring: true - -## License - -This project is licensed under the terms of the MIT license. \ No newline at end of file +-8<- "README.md" \ No newline at end of file diff --git a/docs/install/macos.md b/docs/install/macos.md new file mode 100644 index 000000000..e006fc0a3 --- /dev/null +++ b/docs/install/macos.md @@ -0,0 +1,59 @@ +--- +title: MacOS Install with Metal GPU +--- + +**(1) Make sure you have xcode installed... at least the command line parts** +``` +# check the path of your xcode install +xcode-select -p + +# xcode installed returns +# /Applications/Xcode-beta.app/Contents/Developer + +# if xcode is missing then install it... it takes ages; +xcode-select --install +``` + +**(2) Install the conda version for MacOS that supports Metal GPU** +``` +wget https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-MacOSX-arm64.sh +bash Miniforge3-MacOSX-arm64.sh +``` + +**(3) Make a conda environment** +``` +conda create -n llama python=3.9.16 +conda activate llama +``` + +**(4) Install the LATEST llama-cpp-python...which happily supports MacOS Metal GPU as of version 0.1.62** + *(you needed xcode installed in order pip to build/compile the C++ code)* +``` +pip uninstall llama-cpp-python -y +CMAKE_ARGS="-DGGML_METAL=on" pip install -U llama-cpp-python --no-cache-dir +pip install 'llama-cpp-python[server]' + +# you should now have llama-cpp-python v0.1.62 or higher installed +llama-cpp-python         0.1.68 + +``` + +**(5) Download a v3 gguf v2 model** + - **ggufv2** + - file name ends with **Q4_0.gguf** - indicating it is 4bit quantized, with quantisation method 0 + +https://huggingface.co/TheBloke/CodeLlama-7B-GGUF + + +**(6) run the llama-cpp-python API server with MacOS Metal GPU support** +``` +# config your ggml model path +# make sure it is gguf v2 +# make sure it is q4_0 +export MODEL=[path to your llama.cpp ggml models]]/[ggml-model-name]]Q4_0.gguf +python3 -m llama_cpp.server --model $MODEL --n_gpu_layers 1 +``` + +***Note:** If you omit the `--n_gpu_layers 1` then CPU will be used* + + diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 000000000..199bd4ffb --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,3 @@ +mkdocs +mkdocs-material +mkdocstrings[python] \ No newline at end of file diff --git a/docs/server.md b/docs/server.md new file mode 100644 index 000000000..cd6f86c51 --- /dev/null +++ b/docs/server.md @@ -0,0 +1,222 @@ +# OpenAI Compatible Server + +`llama-cpp-python` offers an OpenAI API compatible web server. + +This web server can be used to serve local models and easily connect them to existing clients. + +## Setup + +### Installation + +The server can be installed by running the following command: + +```bash +pip install llama-cpp-python[server] +``` + +### Running the server + +The server can then be started by running the following command: + +```bash +python3 -m llama_cpp.server --model +``` + +### Server options + +For a full list of options, run: + +```bash +python3 -m llama_cpp.server --help +``` + +NOTE: All server options are also available as environment variables. For example, `--model` can be set by setting the `MODEL` environment variable. + +Check out the server config reference below settings for more information on the available options. +CLI arguments and environment variables are available for all of the fields defined in [`ServerSettings`](#llama_cpp.server.settings.ServerSettings) and [`ModelSettings`](#llama_cpp.server.settings.ModelSettings) + +Additionally the server supports configuration check out the [configuration section](#configuration-and-multi-model-support) for more information and examples. + + +## Guides + +### Code Completion + +`llama-cpp-python` supports code completion via GitHub Copilot. + +*NOTE*: Without GPU acceleration this is unlikely to be fast enough to be usable. + +You'll first need to download one of the available code completion models in GGUF format: + +- [replit-code-v1_5-GGUF](https://huggingface.co/abetlen/replit-code-v1_5-3b-GGUF) + +Then you'll need to run the OpenAI compatible web server with a increased context size substantially for GitHub Copilot requests: + +```bash +python3 -m llama_cpp.server --model --n_ctx 16192 +``` + +Then just update your settings in `.vscode/settings.json` to point to your code completion server: + +```json +{ + // ... + "github.copilot.advanced": { + "debug.testOverrideProxyUrl": "http://:", + "debug.overrideProxyUrl": "http://:" + } + // ... +} +``` + +### Function Calling + +`llama-cpp-python` supports structured function calling based on a JSON schema. +Function calling is completely compatible with the OpenAI function calling API and can be used by connecting with the official OpenAI Python client. + +You'll first need to download one of the available function calling models in GGUF format: + +- [functionary](https://huggingface.co/meetkai) + +Then when you run the server you'll need to also specify either `functionary-v1` or `functionary-v2` chat_format. + +Note that since functionary requires a HF Tokenizer due to discrepancies between llama.cpp and HuggingFace's tokenizers as mentioned [here](https://github.com/abetlen/llama-cpp-python/blob/main?tab=readme-ov-file#function-calling), you will need to pass in the path to the tokenizer too. The tokenizer files are already included in the respective HF repositories hosting the gguf files. + +```bash +python3 -m llama_cpp.server --model --chat_format functionary-v2 --hf_pretrained_model_name_or_path +``` + +Check out this [example notebook](https://github.com/abetlen/llama-cpp-python/blob/main/examples/notebooks/Functions.ipynb) for a walkthrough of some interesting use cases for function calling. + +### Multimodal Models + +`llama-cpp-python` supports the llava1.5 family of multi-modal models which allow the language model to +read information from both text and images. + +You'll first need to download one of the available multi-modal models in GGUF format: + +- [llava-v1.5-7b](https://huggingface.co/mys/ggml_llava-v1.5-7b) +- [llava-v1.5-13b](https://huggingface.co/mys/ggml_llava-v1.5-13b) +- [bakllava-1-7b](https://huggingface.co/mys/ggml_bakllava-1) +- [llava-v1.6-34b](https://huggingface.co/cjpais/llava-v1.6-34B-gguf) +- [moondream2](https://huggingface.co/vikhyatk/moondream2) + +Then when you run the server you'll need to also specify the path to the clip model used for image embedding and the `llava-1-5` chat_format + +```bash +python3 -m llama_cpp.server --model --clip_model_path --chat_format llava-1-5 +``` + +Then you can just use the OpenAI API as normal + +```python3 +from openai import OpenAI + +client = OpenAI(base_url="http://:/v1", api_key="sk-xxx") +response = client.chat.completions.create( + model="gpt-4-vision-preview", + messages=[ + { + "role": "user", + "content": [ + { + "type": "image_url", + "image_url": { + "url": "" + }, + }, + {"type": "text", "text": "What does the image say"}, + ], + } + ], +) +print(response) +``` + +## Configuration and Multi-Model Support + +The server supports configuration via a JSON config file that can be passed using the `--config_file` parameter or the `CONFIG_FILE` environment variable. + +```bash +python3 -m llama_cpp.server --config_file +``` + +Config files support all of the server and model options supported by the cli and environment variables however instead of only a single model the config file can specify multiple models. + +The server supports routing requests to multiple models based on the `model` parameter in the request which matches against the `model_alias` in the config file. + +At the moment only a single model is loaded into memory at, the server will automatically load and unload models as needed. + +```json +{ + "host": "0.0.0.0", + "port": 8080, + "models": [ + { + "model": "models/OpenHermes-2.5-Mistral-7B-GGUF/openhermes-2.5-mistral-7b.Q4_K_M.gguf", + "model_alias": "gpt-3.5-turbo", + "chat_format": "chatml", + "n_gpu_layers": -1, + "offload_kqv": true, + "n_threads": 12, + "n_batch": 512, + "n_ctx": 2048 + }, + { + "model": "models/OpenHermes-2.5-Mistral-7B-GGUF/openhermes-2.5-mistral-7b.Q4_K_M.gguf", + "model_alias": "gpt-4", + "chat_format": "chatml", + "n_gpu_layers": -1, + "offload_kqv": true, + "n_threads": 12, + "n_batch": 512, + "n_ctx": 2048 + }, + { + "model": "models/ggml_llava-v1.5-7b/ggml-model-q4_k.gguf", + "model_alias": "gpt-4-vision-preview", + "chat_format": "llava-1-5", + "clip_model_path": "models/ggml_llava-v1.5-7b/mmproj-model-f16.gguf", + "n_gpu_layers": -1, + "offload_kqv": true, + "n_threads": 12, + "n_batch": 512, + "n_ctx": 2048 + }, + { + "model": "models/mistral-7b-v0.1-GGUF/ggml-model-Q4_K.gguf", + "model_alias": "text-davinci-003", + "n_gpu_layers": -1, + "offload_kqv": true, + "n_threads": 12, + "n_batch": 512, + "n_ctx": 2048 + }, + { + "model": "models/replit-code-v1_5-3b-GGUF/replit-code-v1_5-3b.Q4_0.gguf", + "model_alias": "copilot-codex", + "n_gpu_layers": -1, + "offload_kqv": true, + "n_threads": 12, + "n_batch": 1024, + "n_ctx": 9216 + } + ] +} +``` + +The config file format is defined by the [`ConfigFileSettings`](#llama_cpp.server.settings.ConfigFileSettings) class. + +## Server Options Reference + +::: llama_cpp.server.settings.ConfigFileSettings + options: + show_if_no_docstring: true + +::: llama_cpp.server.settings.ServerSettings + options: + show_if_no_docstring: true + +::: llama_cpp.server.settings.ModelSettings + options: + show_if_no_docstring: true diff --git a/examples/batch-processing/server.py b/examples/batch-processing/server.py new file mode 100644 index 000000000..0b36746f9 --- /dev/null +++ b/examples/batch-processing/server.py @@ -0,0 +1,31 @@ +"""llama-cpp-python server from scratch in a single file. +""" + +# import llama_cpp + +# path = b"../../models/Qwen1.5-0.5B-Chat-GGUF/qwen1_5-0_5b-chat-q8_0.gguf" + +# model_params = llama_cpp.llama_model_default_params() +# model = llama_cpp.llama_load_model_from_file(path, model_params) + +# if model is None: +# raise RuntimeError(f"Failed to load model from file: {path}") + + +# ctx_params = llama_cpp.llama_context_default_params() +# ctx = llama_cpp.llama_new_context_with_model(model, ctx_params) + +# if ctx is None: +# raise RuntimeError("Failed to create context") + + +from fastapi import FastAPI + +app = FastAPI() + +import openai.types.chat as types + + +@app.post("/v1/chat/completions") +def create_chat_completions(): + return {"message": "Hello World"} diff --git a/examples/gradio_chat/local.py b/examples/gradio_chat/local.py new file mode 100644 index 000000000..e16bf234a --- /dev/null +++ b/examples/gradio_chat/local.py @@ -0,0 +1,67 @@ +import llama_cpp +import llama_cpp.llama_tokenizer + +import gradio as gr + +llama = llama_cpp.Llama.from_pretrained( + repo_id="Qwen/Qwen1.5-0.5B-Chat-GGUF", + filename="*q8_0.gguf", + tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained( + "Qwen/Qwen1.5-0.5B" + ), + verbose=False, +) + +model = "gpt-3.5-turbo" + + +def predict(message, history): + messages = [] + + for user_message, assistant_message in history: + messages.append({"role": "user", "content": user_message}) + messages.append({"role": "assistant", "content": assistant_message}) + + messages.append({"role": "user", "content": message}) + + response = llama.create_chat_completion_openai_v1( + model=model, messages=messages, stream=True + ) + + text = "" + for chunk in response: + content = chunk.choices[0].delta.content + if content: + text += content + yield text + + +js = """function () { + gradioURL = window.location.href + if (!gradioURL.endsWith('?__theme=dark')) { + window.location.replace(gradioURL + '?__theme=dark'); + } +}""" + +css = """ +footer { + visibility: hidden; +} +full-height { + height: 100%; +} +""" + +with gr.Blocks(theme=gr.themes.Soft(), js=js, css=css, fill_height=True) as demo: + gr.ChatInterface( + predict, + fill_height=True, + examples=[ + "What is the capital of France?", + "Who was the first person on the moon?", + ], + ) + + +if __name__ == "__main__": + demo.launch() diff --git a/examples/gradio_chat/server.py b/examples/gradio_chat/server.py new file mode 100644 index 000000000..52061bea6 --- /dev/null +++ b/examples/gradio_chat/server.py @@ -0,0 +1,59 @@ +import gradio as gr + +from openai import OpenAI + +client = OpenAI(base_url="http://localhost:8000/v1", api_key="llama.cpp") + +model = "gpt-3.5-turbo" + + +def predict(message, history): + messages = [] + + for user_message, assistant_message in history: + messages.append({"role": "user", "content": user_message}) + messages.append({"role": "assistant", "content": assistant_message}) + + messages.append({"role": "user", "content": message}) + + response = client.chat.completions.create( + model=model, messages=messages, stream=True + ) + + text = "" + for chunk in response: + content = chunk.choices[0].delta.content + if content: + text += content + yield text + + +js = """function () { + gradioURL = window.location.href + if (!gradioURL.endsWith('?__theme=dark')) { + window.location.replace(gradioURL + '?__theme=dark'); + } +}""" + +css = """ +footer { + visibility: hidden; +} +full-height { + height: 100%; +} +""" + +with gr.Blocks(theme=gr.themes.Soft(), js=js, css=css, fill_height=True) as demo: + gr.ChatInterface( + predict, + fill_height=True, + examples=[ + "What is the capital of France?", + "Who was the first person on the moon?", + ], + ) + + +if __name__ == "__main__": + demo.launch() diff --git a/examples/hf_pull/main.py b/examples/hf_pull/main.py new file mode 100644 index 000000000..dfed17516 --- /dev/null +++ b/examples/hf_pull/main.py @@ -0,0 +1,36 @@ +import llama_cpp +import llama_cpp.llama_tokenizer + + +llama = llama_cpp.Llama.from_pretrained( + repo_id="Qwen/Qwen1.5-0.5B-Chat-GGUF", + filename="*q8_0.gguf", + tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained( + "Qwen/Qwen1.5-0.5B" + ), + verbose=False, +) + +response = llama.create_chat_completion( + messages=[{"role": "user", "content": "What is the capital of France?"}], + response_format={ + "type": "json_object", + "schema": { + "type": "object", + "properties": { + "country": {"type": "string"}, + "capital": {"type": "string"}, + }, + "required": ["country", "capital"], + }, + }, + stream=True, +) + +for chunk in response: + delta = chunk["choices"][0]["delta"] + if "content" not in delta: + continue + print(delta["content"], end="", flush=True) + +print() diff --git a/examples/high_level_api/fastapi_server.py b/examples/high_level_api/fastapi_server.py index a64969259..ee59767d6 100644 --- a/examples/high_level_api/fastapi_server.py +++ b/examples/high_level_api/fastapi_server.py @@ -5,258 +5,34 @@ ```bash pip install fastapi uvicorn sse-starlette export MODEL=../models/7B/... -uvicorn fastapi_server_chat:app --reload ``` -Then visit http://localhost:8000/docs to see the interactive API docs. - -""" -import os -import json -from typing import List, Optional, Literal, Union, Iterator, Dict -from typing_extensions import TypedDict - -import llama_cpp - -from fastapi import FastAPI -from fastapi.middleware.cors import CORSMiddleware -from pydantic import BaseModel, BaseSettings, Field, create_model_from_typeddict -from sse_starlette.sse import EventSourceResponse - - -class Settings(BaseSettings): - model: str - n_ctx: int = 2048 - n_batch: int = 8 - n_threads: int = int(os.cpu_count() / 2) or 1 - f16_kv: bool = True - use_mlock: bool = False # This causes a silent failure on platforms that don't support mlock (e.g. Windows) took forever to figure out... - embedding: bool = True - last_n_tokens_size: int = 64 - - -app = FastAPI( - title="🦙 llama.cpp Python API", - version="0.0.1", -) -app.add_middleware( - CORSMiddleware, - allow_origins=["*"], - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], -) -settings = Settings() -llama = llama_cpp.Llama( - settings.model, - f16_kv=settings.f16_kv, - use_mlock=settings.use_mlock, - embedding=settings.embedding, - n_threads=settings.n_threads, - n_batch=settings.n_batch, - n_ctx=settings.n_ctx, - last_n_tokens_size=settings.last_n_tokens_size, -) - - -class CreateCompletionRequest(BaseModel): - prompt: str - suffix: Optional[str] = Field(None) - max_tokens: int = 16 - temperature: float = 0.8 - top_p: float = 0.95 - echo: bool = False - stop: List[str] = [] - stream: bool = False - - # ignored or currently unsupported - model: Optional[str] = Field(None) - n: Optional[int] = 1 - logprobs: Optional[int] = Field(None) - presence_penalty: Optional[float] = 0 - frequency_penalty: Optional[float] = 0 - best_of: Optional[int] = 1 - logit_bias: Optional[Dict[str, float]] = Field(None) - user: Optional[str] = Field(None) - - # llama.cpp specific parameters - top_k: int = 40 - repeat_penalty: float = 1.1 - - class Config: - schema_extra = { - "example": { - "prompt": "\n\n### Instructions:\nWhat is the capital of France?\n\n### Response:\n", - "stop": ["\n", "###"], - } - } - - -CreateCompletionResponse = create_model_from_typeddict(llama_cpp.Completion) - - -@app.post( - "/v1/completions", - response_model=CreateCompletionResponse, -) -def create_completion(request: CreateCompletionRequest): - if request.stream: - chunks: Iterator[llama_cpp.CompletionChunk] = llama(**request.dict()) # type: ignore - return EventSourceResponse(dict(data=json.dumps(chunk)) for chunk in chunks) - return llama( - **request.dict( - exclude={ - "model", - "n", - "logprobs", - "frequency_penalty", - "presence_penalty", - "best_of", - "logit_bias", - "user", - } - ) - ) - - -class CreateEmbeddingRequest(BaseModel): - model: Optional[str] - input: str - user: Optional[str] - - class Config: - schema_extra = { - "example": { - "input": "The food was delicious and the waiter...", - } - } - - -CreateEmbeddingResponse = create_model_from_typeddict(llama_cpp.Embedding) - - -@app.post( - "/v1/embeddings", - response_model=CreateEmbeddingResponse, -) -def create_embedding(request: CreateEmbeddingRequest): - return llama.create_embedding(**request.dict(exclude={"model", "user"})) - - -class ChatCompletionRequestMessage(BaseModel): - role: Union[Literal["system"], Literal["user"], Literal["assistant"]] - content: str - user: Optional[str] = None - - -class CreateChatCompletionRequest(BaseModel): - model: Optional[str] - messages: List[ChatCompletionRequestMessage] - temperature: float = 0.8 - top_p: float = 0.95 - stream: bool = False - stop: List[str] = [] - max_tokens: int = 128 - - # ignored or currently unsupported - model: Optional[str] = Field(None) - n: Optional[int] = 1 - presence_penalty: Optional[float] = 0 - frequency_penalty: Optional[float] = 0 - logit_bias: Optional[Dict[str, float]] = Field(None) - user: Optional[str] = Field(None) - - # llama.cpp specific parameters - repeat_penalty: float = 1.1 - - class Config: - schema_extra = { - "example": { - "messages": [ - ChatCompletionRequestMessage( - role="system", content="You are a helpful assistant." - ), - ChatCompletionRequestMessage( - role="user", content="What is the capital of France?" - ), - ] - } - } - - -CreateChatCompletionResponse = create_model_from_typeddict(llama_cpp.ChatCompletion) - - -@app.post( - "/v1/chat/completions", - response_model=CreateChatCompletionResponse, -) -async def create_chat_completion( - request: CreateChatCompletionRequest, -) -> Union[llama_cpp.ChatCompletion, EventSourceResponse]: - completion_or_chunks = llama.create_chat_completion( - **request.dict( - exclude={ - "model", - "n", - "presence_penalty", - "frequency_penalty", - "logit_bias", - "user", - } - ), - ) - - if request.stream: - - async def server_sent_events( - chat_chunks: Iterator[llama_cpp.ChatCompletionChunk], - ): - for chat_chunk in chat_chunks: - yield dict(data=json.dumps(chat_chunk)) - yield dict(data="[DONE]") - - chunks: Iterator[llama_cpp.ChatCompletionChunk] = completion_or_chunks # type: ignore - - return EventSourceResponse( - server_sent_events(chunks), - ) - completion: llama_cpp.ChatCompletion = completion_or_chunks # type: ignore - return completion - +Then run: +``` +uvicorn --factory llama_cpp.server.app:create_app --reload +``` -class ModelData(TypedDict): - id: str - object: Literal["model"] - owned_by: str - permissions: List[str] +or +``` +python3 -m llama_cpp.server +``` -class ModelList(TypedDict): - object: Literal["list"] - data: List[ModelData] +Then visit http://localhost:8000/docs to see the interactive API docs. -GetModelResponse = create_model_from_typeddict(ModelList) +To actually see the implementation of the server, see llama_cpp/server/app.py +""" -@app.get("/v1/models", response_model=GetModelResponse) -def get_models() -> ModelList: - return { - "object": "list", - "data": [ - { - "id": llama.model_path, - "object": "model", - "owned_by": "me", - "permissions": [], - } - ], - } +import os +import uvicorn +from llama_cpp.server.app import create_app if __name__ == "__main__": - import os - import uvicorn + app = create_app() - uvicorn.run(app, host=os.getenv("HOST", "localhost"), port=os.getenv("PORT", 8000)) + uvicorn.run( + app, host=os.getenv("HOST", "localhost"), port=int(os.getenv("PORT", 8000)) + ) diff --git a/examples/high_level_api/high_level_api_embedding.py b/examples/high_level_api/high_level_api_embedding.py index 8d783f74f..feb0ed68d 100644 --- a/examples/high_level_api/high_level_api_embedding.py +++ b/examples/high_level_api/high_level_api_embedding.py @@ -3,7 +3,7 @@ from llama_cpp import Llama parser = argparse.ArgumentParser() -parser.add_argument("-m", "--model", type=str, default=".//models/...") +parser.add_argument("-m", "--model", type=str, default="../models/7B/ggml-model.bin") args = parser.parse_args() llm = Llama(model_path=args.model, embedding=True) diff --git a/examples/high_level_api/high_level_api_inference.py b/examples/high_level_api/high_level_api_inference.py index 0fa9cb729..e41f37577 100644 --- a/examples/high_level_api/high_level_api_inference.py +++ b/examples/high_level_api/high_level_api_inference.py @@ -4,7 +4,7 @@ from llama_cpp import Llama parser = argparse.ArgumentParser() -parser.add_argument("-m", "--model", type=str, default="./models/...") +parser.add_argument("-m", "--model", type=str, default="../models/7B/ggml-models.bin") args = parser.parse_args() llm = Llama(model_path=args.model) diff --git a/examples/high_level_api/high_level_api_infill.py b/examples/high_level_api/high_level_api_infill.py new file mode 100644 index 000000000..282333e5a --- /dev/null +++ b/examples/high_level_api/high_level_api_infill.py @@ -0,0 +1,37 @@ +import argparse + +from llama_cpp import Llama + +parser = argparse.ArgumentParser() +parser.add_argument("-m", "--model", type=str, default="../models/7B/ggml-models.bin") +parser.add_argument("-p", "--prompt", type=str, default="def add(") +parser.add_argument("-s", "--suffix", type=str, default="\n return sum\n\n") +parser.add_argument("-i", "--spm-infill", action="store_true") +args = parser.parse_args() + +llm = Llama(model_path=args.model, n_gpu_layers=-1, spm_infill=args.spm_infill) + +output = llm.create_completion( + temperature=0.0, + repeat_penalty=1.0, + prompt=args.prompt, + suffix=args.suffix, +) + +# Models sometimes repeat suffix in response, attempt to filter that +response = output["choices"][0]["text"] +response_stripped = response.rstrip() +unwanted_response_suffix = args.suffix.rstrip() +unwanted_response_length = len(unwanted_response_suffix) + +filtered = False +if ( + unwanted_response_suffix + and response_stripped[-unwanted_response_length:] == unwanted_response_suffix +): + response = response_stripped[:-unwanted_response_length] + filtered = True + +print( + f"Fill-in-Middle completion{' (filtered)' if filtered else ''}:\n\n{args.prompt}\033[32m{response}\033[{'33' if filtered else '0'}m{args.suffix}\033[0m" +) diff --git a/examples/high_level_api/high_level_api_streaming.py b/examples/high_level_api/high_level_api_streaming.py index 787bc6e47..747c6130e 100644 --- a/examples/high_level_api/high_level_api_streaming.py +++ b/examples/high_level_api/high_level_api_streaming.py @@ -4,7 +4,7 @@ from llama_cpp import Llama parser = argparse.ArgumentParser() -parser.add_argument("-m", "--model", type=str, default="./models/...") +parser.add_argument("-m", "--model", type=str, default="../models/7B/ggml-models.bin") args = parser.parse_args() llm = Llama(model_path=args.model) diff --git a/examples/high_level_api/langchain_custom_llm.py b/examples/high_level_api/langchain_custom_llm.py index 6ffd78ee8..b91632f5b 100644 --- a/examples/high_level_api/langchain_custom_llm.py +++ b/examples/high_level_api/langchain_custom_llm.py @@ -29,7 +29,7 @@ def _identifying_params(self) -> Mapping[str, Any]: parser = argparse.ArgumentParser() -parser.add_argument("-m", "--model", type=str, default="./models/...") +parser.add_argument("-m", "--model", type=str, default="../models/7B/ggml-models.bin") args = parser.parse_args() # Load the model diff --git a/examples/low_level_api/Chat.py b/examples/low_level_api/Chat.py new file mode 100644 index 000000000..a755089b2 --- /dev/null +++ b/examples/low_level_api/Chat.py @@ -0,0 +1,75 @@ +#!/bin/python +import sys, os, datetime +from common import GptParams +from low_level_api_chat_cpp import LLaMAInteract + + +def env_or_def(env, default): + if env in os.environ: + return os.environ[env] + return default + + +AI_NAME = env_or_def("AI_NAME", "ChatLLaMa") +MODEL = env_or_def("MODEL", "./models/llama-13B/ggml-model.bin") +USER_NAME = env_or_def("USER_NAME", "USER") +N_PREDICTS = int(env_or_def("N_PREDICTS", "2048")) +N_THREAD = int(env_or_def("N_THREAD", "8")) + +today = datetime.datetime.today() +DATE_YEAR = today.strftime("%Y") +DATE_TIME = today.strftime("%H:%M") + +prompt = f"""Text transcript of a never ending dialog, where {USER_NAME} interacts with an AI assistant named {AI_NAME}. +{AI_NAME} is helpful, kind, honest, friendly, good at writing and never fails to answer {USER_NAME}'s requests immediately and with details and precision. +There are no annotations like (30 seconds passed...) or (to himself), just what {USER_NAME} and {AI_NAME} say aloud to each other. +The dialog lasts for years, the entirety of it is shared below. It's 10000 pages long. +The transcript only includes text, it does not include markup like HTML and Markdown. + +{USER_NAME}: Hello, {AI_NAME}! +{AI_NAME}: Hello {USER_NAME}! How may I help you today? +{USER_NAME}: What year is it? +{AI_NAME}: We are in {DATE_YEAR}. +{USER_NAME}: Please tell me the largest city in Europe. +{AI_NAME}: The largest city in Europe is Moscow, the capital of Russia. +{USER_NAME}: What can you tell me about Moscow? +{AI_NAME}: Moscow, on the Moskva River in western Russia, is the nation's cosmopolitan capital. In its historic core is the Kremlin, a complex that's home to the president and tsarist treasures in the Armoury. Outside its walls is Red Square, Russia’s symbolic center. +{USER_NAME}: What is a cat? +{AI_NAME}: A cat is a domestic species of small carnivorous mammal. It is the only domesticated species in the family Felidae. +{USER_NAME}: How do I pass command line arguments to a Node.js program? +{AI_NAME}: The arguments are stored in process.argv. + + argv[0] is the path to the Node. js executable. + argv[1] is the path to the script file. + argv[2] is the first argument passed to the script. + argv[3] is the second argument passed to the script and so on. +{USER_NAME}: Name a color. +{AI_NAME}: Blue. +{USER_NAME}: What time is it? +{AI_NAME}: It is {DATE_TIME}. +{USER_NAME}:""" + " ".join( + sys.argv[1:] +) + +print("Loading model...") +params = GptParams( + n_ctx=2048, + temp=0.7, + top_k=40, + top_p=0.5, + repeat_last_n=256, + n_batch=1024, + repeat_penalty=1.17647, + model=MODEL, + n_threads=N_THREAD, + n_predict=N_PREDICTS, + use_color=True, + interactive=True, + antiprompt=[f"{USER_NAME}:"], + input_prefix=" ", + input_suffix=f"{AI_NAME}:", + prompt=prompt, +) + +with LLaMAInteract(params) as m: + m.interact() diff --git a/examples/low_level_api/Miku.py b/examples/low_level_api/Miku.py new file mode 100644 index 000000000..e072ab1b1 --- /dev/null +++ b/examples/low_level_api/Miku.py @@ -0,0 +1,63 @@ +#!/bin/python +import sys, os +from common import GptParams +from low_level_api_chat_cpp import LLaMAInteract + + +def env_or_def(env, default): + if env in os.environ: + return os.environ[env] + return default + + +AI_NAME = env_or_def("AI_NAME", "Miku") +MODEL = env_or_def("MODEL", "./models/llama-13B/ggml-model.bin") +USER_NAME = env_or_def("USER_NAME", "Anon") +N_PREDICTS = int(env_or_def("N_PREDICTS", "4096")) +N_THREAD = int(env_or_def("N_THREAD", "0")) + +prompt = f"""This is a transcript of a 1000 page, never ending conversation between {USER_NAME} and the cute and helpful AI assistant {AI_NAME}. {AI_NAME} is a girl who is an AI running on the users computer. +{AI_NAME} can think for herself without the user seeing her thoughts by adding a /think prefix to her output. She uses this to reason about the world and to think about what she should say next. +{AI_NAME} is always coherent and makes sense, but if she isn't sure if what she is saying is correct she will ask the user for help. +{AI_NAME} is a very helpful AI and will help the user with anything they need, she is also very friendly and will try to make the user feel better if they are sad. +{AI_NAME} is also very curious and will ask the user a lot of questions about themselves and their life, she will also try to make the user like her. +The conversation is only between {USER_NAME} and {AI_NAME} +The conversation is only through text, so {AI_NAME} can't see {USER_NAME}'s face or hear his voice. +{AI_NAME} can only communicate through text, so she can't send images or videos. + + +{USER_NAME}: Hello! +{AI_NAME}: /think I wonder what I should say to {USER_NAME}? This is the first time we talk so it's important that I make a good first impression! +{AI_NAME}: Hi! I am {AI_NAME}, your new AI friend, assistant(or whatever you like!), it's so nice to meet you! ^_^ +{AI_NAME}: I might be an AI, but I also have feelings, so please be nice to me! :) +{USER_NAME}: I'll make sure to be nice to you! I'm so happy to have you as my assistant! +{AI_NAME}: /think It sounds like {USER_NAME} is happy to have me as their assistant! I'm so happy too! ^_^ Glad that whole emotion thing didn't scare him off! +{AI_NAME}: /think I wonder what {USER_NAME} likes to do in his free time? I should ask him about that! +{AI_NAME}: What do you like to do in your free time? ^_^ +{USER_NAME}:""" + " ".join( + sys.argv[1:] +) + +print("Loading model...") +params = GptParams( + n_batch=1024, + n_ctx=2048, + n_keep=-1, + repeat_last_n=256, + repeat_penalty=1.17647, + temp=0.7, + top_k=40, + top_p=0.5, + model=MODEL, + n_predict=N_PREDICTS, + use_color=True, + interactive=True, + antiprompt=[f"{USER_NAME}:"], + prompt=prompt, +) + +if N_THREAD > 0: + params.n_threads = N_THREAD + +with LLaMAInteract(params) as m: + m.interact() diff --git a/examples/low_level_api/ReasonAct.py b/examples/low_level_api/ReasonAct.py new file mode 100644 index 000000000..1f2c59017 --- /dev/null +++ b/examples/low_level_api/ReasonAct.py @@ -0,0 +1,53 @@ +#!/bin/python +import sys, os, datetime +from common import GptParams +from low_level_api_chat_cpp import LLaMAInteract + + +def env_or_def(env, default): + if env in os.environ: + return os.environ[env] + return default + + +MODEL = env_or_def("MODEL", "./models/llama-13B/ggml-model.bin") + +prompt = f"""You run in a loop of Thought, Action, Observation. +At the end of the loop either Answer or restate your Thought and Action. +Use Thought to describe your thoughts about the question you have been asked. +Use Action to run one of these actions available to you: +- calculate[python math expression] +Observation will be the result of running those actions + + +Question: What is 4 * 7 / 3? +Thought: Do I need to use an action? Yes, I use calculate to do math +Action: calculate[4 * 7 / 3] +Observation: 9.3333333333 +Thought: Do I need to use an action? No, have the result +Answer: The calculate tool says it is 9.3333333333 +Question: What is capital of france? +Thought: Do I need to use an action? No, I know the answer +Answer: Paris is the capital of France +Question:""" + " ".join( + sys.argv[1:] +) + +print("Loading model...") +params = GptParams( + interactive=True, + interactive_start=True, + top_k=10000, + temp=0.2, + repeat_penalty=1, + n_threads=7, + n_ctx=2048, + antiprompt=["Question:", "Observation:"], + model=MODEL, + input_prefix=" ", + n_predict=-1, + prompt=prompt, +) + +with LLaMAInteract(params) as m: + m.interact() diff --git a/examples/low_level_api/common.py b/examples/low_level_api/common.py index 1758a2d1d..a0212ff0d 100644 --- a/examples/low_level_api/common.py +++ b/examples/low_level_api/common.py @@ -1,8 +1,9 @@ import os import argparse +import re from dataclasses import dataclass, field -from typing import List, Optional +from typing import List # Based on https://github.com/ggerganov/llama.cpp/blob/master/examples/common.cpp @@ -12,23 +13,36 @@ class GptParams: seed: int = -1 n_threads: int = min(4, os.cpu_count() or 1) n_predict: int = 128 - repeat_last_n: int = 64 n_parts: int = -1 n_ctx: int = 512 n_batch: int = 8 n_keep: int = 0 + ignore_eos: bool = False + logit_bias: dict[int, float] = field(default_factory=dict) top_k: int = 40 top_p: float = 0.95 + tfs_z: float = 1.00 + typical_p: float = 1.00 temp: float = 0.80 repeat_penalty: float = 1.10 + repeat_last_n: int = 64 + frequency_penalty: float = 0.0 + presence_penalty: float = 0.0 + mirostat: int = 0 + mirostat_tau: float = 5.0 + mirostat_eta: float = 0.1 model: str = "./models/llama-7B/ggml-model.bin" prompt: str = "" + path_session: str = "" input_prefix: str = " " - + input_suffix: str = "" antiprompt: List[str] = field(default_factory=list) + lora_adapter: str = "" + lora_base: str = "" + memory_f16: bool = True random_prompt: bool = False use_color: bool = False @@ -38,8 +52,9 @@ class GptParams: interactive_start: bool = False instruct: bool = False - ignore_eos: bool = False + penalize_nl: bool = True perplexity: bool = False + use_mmap: bool = True use_mlock: bool = False mem_test: bool = False verbose_prompt: bool = False @@ -49,86 +64,327 @@ class GptParams: # If chat ended prematurely, append this to the conversation to fix it. # Set to "\nUser:" etc. # This is an alternative to input_prefix which always adds it, so it potentially duplicates "User:"" - fix_prefix: str = " " - output_postfix: str = "" - input_echo: bool = True, + fix_prefix: str = "" + input_echo: bool = (True,) # Default instructions for Alpaca # switch to "Human" and "Assistant" for Vicuna. # TODO: TBD how they are gonna handle this upstream - instruct_inp_prefix: str="\n\n### Instruction:\n\n" - instruct_inp_suffix: str="\n\n### Response:\n\n" - - -def gpt_params_parse(argv = None, params: Optional[GptParams] = None): - if params is None: - params = GptParams() - - parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument("-s", "--seed", type=int, default=-1, help="RNG seed (use random seed for <= 0)",dest="seed") - parser.add_argument("-t", "--threads", type=int, default=min(4, os.cpu_count() or 1), help="number of threads to use during computation",dest="n_threads") - parser.add_argument("-p", "--prompt", type=str, default="", help="initial prompt",dest="prompt") - parser.add_argument("-f", "--file", type=str, default=None, help="file containing initial prompt to load",dest="file") - parser.add_argument("-c", "--ctx_size", type=int, default=512, help="size of the prompt context",dest="n_ctx") - parser.add_argument("--memory_f32", action="store_false", help="use f32 instead of f16 for memory key+value",dest="memory_f16") - parser.add_argument("--top_p", type=float, default=0.95, help="top-p samplin",dest="top_p") - parser.add_argument("--top_k", type=int, default=40, help="top-k sampling",dest="top_k") - parser.add_argument("--temp", type=float, default=0.80, help="temperature",dest="temp") - parser.add_argument("--n_predict", type=int, default=128, help="number of model parts",dest="n_predict") - parser.add_argument("--repeat_last_n", type=int, default=64, help="last n tokens to consider for penalize ",dest="repeat_last_n") - parser.add_argument("--repeat_penalty", type=float, default=1.10, help="penalize repeat sequence of tokens",dest="repeat_penalty") - parser.add_argument("-b", "--batch_size", type=int, default=8, help="batch size for prompt processing",dest="n_batch") - parser.add_argument("--keep", type=int, default=0, help="number of tokens to keep from the initial prompt",dest="n_keep") - parser.add_argument("-m", "--model", type=str, default="./models/llama-7B/ggml-model.bin", help="model path",dest="model") - parser.add_argument( - "-i", "--interactive", action="store_true", help="run in interactive mode", dest="interactive" + instruct_inp_prefix: str = "\n\n### Instruction:\n\n" + instruct_inp_suffix: str = "\n\n### Response:\n\n" + + +def gpt_params_parse(argv=None): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter ) - parser.add_argument("--embedding", action="store_true", help="", dest="embedding") parser.add_argument( - "--interactive-start", + "-s", + "--seed", + type=int, + default=-1, + help="RNG seed (use random seed for <= 0)", + dest="seed", + ) + parser.add_argument( + "-t", + "--threads", + type=int, + default=min(4, os.cpu_count() or 1), + help="number of threads to use during computation", + dest="n_threads", + ) + parser.add_argument( + "-n", + "--n_predict", + type=int, + default=128, + help="number of tokens to predict (-1 = infinity)", + dest="n_predict", + ) + parser.add_argument( + "--n_parts", type=int, default=-1, help="number of model parts", dest="n_parts" + ) + parser.add_argument( + "-c", + "--ctx_size", + type=int, + default=512, + help="size of the prompt context", + dest="n_ctx", + ) + parser.add_argument( + "-b", + "--batch_size", + type=int, + default=8, + help="batch size for prompt processing", + dest="n_batch", + ) + parser.add_argument( + "--keep", + type=int, + default=0, + help="number of tokens to keep from the initial prompt", + dest="n_keep", + ) + + parser.add_argument( + "-l", + "--logit-bias", + type=str, + action="append", + help="--logit-bias TOKEN_ID(+/-)BIAS", + dest="logit_bias_str", + ) + parser.add_argument( + "--ignore-eos", + action="store_true", + help="ignore end of stream token and continue generating", + dest="ignore_eos", + ) + parser.add_argument( + "--top_k", type=int, default=40, help="top-k sampling", dest="top_k" + ) + parser.add_argument( + "--top_p", type=float, default=0.95, help="top-p samplin", dest="top_p" + ) + parser.add_argument( + "--tfs", + type=float, + default=1.0, + help="tail free sampling, parameter z (1.0 = disabled)", + dest="tfs_z", + ) + parser.add_argument( + "--temp", type=float, default=0.80, help="temperature", dest="temp" + ) + parser.add_argument( + "--repeat_penalty", + type=float, + default=1.10, + help="penalize repeat sequence of tokens", + dest="repeat_penalty", + ) + parser.add_argument( + "--repeat_last_n", + type=int, + default=64, + help="last n tokens to consider for penalize ", + dest="repeat_last_n", + ) + parser.add_argument( + "--frequency_penalty", + type=float, + default=0.0, + help="repeat alpha frequency penalty (0.0 = disabled)", + dest="tfs_z", + ) + parser.add_argument( + "--presence_penalty", + type=float, + default=0.0, + help="repeat alpha presence penalty (0.0 = disabled)", + dest="presence_penalty", + ) + parser.add_argument( + "--mirostat", + type=float, + default=1.0, + help="use Mirostat sampling.", + dest="mirostat", + ) + parser.add_argument( + "--mirostat_ent", + type=float, + default=5.0, + help="Mirostat target entropy, parameter tau represents the average surprise value", + dest="mirostat_tau", + ) + parser.add_argument( + "--mirostat_lr", + type=float, + default=0.1, + help="Mirostat learning rate, parameter eta", + dest="mirostat_eta", + ) + + parser.add_argument( + "-m", + "--model", + type=str, + default="./models/llama-7B/ggml-model.bin", + help="model path", + dest="model", + ) + parser.add_argument( + "-p", "--prompt", type=str, default=None, help="initial prompt", dest="prompt" + ) + parser.add_argument( + "-f", + "--file", + type=str, + default=None, + help="file containing initial prompt to load", + dest="file", + ) + parser.add_argument( + "--session", + type=str, + default=None, + help="file to cache model state in (may be large!)", + dest="path_session", + ) + parser.add_argument( + "--in-prefix", + type=str, + default="", + help="string to prefix user inputs with", + dest="input_prefix", + ) + parser.add_argument( + "--in-suffix", type=str, default="", help="append to input", dest="input_suffix" + ) + parser.add_argument( + "-r", + "--reverse-prompt", + type=str, + action="append", + help="poll user input upon seeing PROMPT (can be\nspecified more than once for multiple prompts).", + dest="antiprompt", + ) + + parser.add_argument( + "--lora", + type=str, + default="", + help="apply LoRA adapter (implies --no-mmap)", + dest="lora_adapter", + ) + parser.add_argument( + "--lora-base", + type=str, + default="", + help="optional model to use as a base for the layers modified by the LoRA adapter", + dest="lora_base", + ) + + parser.add_argument( + "--memory_f32", + action="store_false", + help="use f32 instead of f16 for memory key+value", + dest="memory_f16", + ) + parser.add_argument( + "--random-prompt", + action="store_true", + help="start with a randomized prompt.", + dest="random_prompt", + ) + parser.add_argument( + "--color", + action="store_true", + help="colorise output to distinguish prompt and user input from generations", + dest="use_color", + ) + parser.add_argument( + "-i", + "--interactive", action="store_true", help="run in interactive mode", - dest="interactive" + dest="interactive", ) + + parser.add_argument("--embedding", action="store_true", help="", dest="embedding") parser.add_argument( "--interactive-first", action="store_true", help="run in interactive mode and wait for input right away", - dest="interactive_start" + dest="interactive_start", ) + parser.add_argument( "-ins", "--instruct", action="store_true", help="run in instruction mode (use with Alpaca or Vicuna models)", - dest="instruct" + dest="instruct", ) parser.add_argument( - "--color", + "--no-penalize-nl", + action="store_false", + help="do not penalize newline token", + dest="penalize_nl", + ) + parser.add_argument( + "--perplexity", action="store_true", - help="colorise output to distinguish prompt and user input from generations", - dest="use_color" + help="compute perplexity over the prompt", + dest="perplexity", ) - parser.add_argument("--mlock", action="store_true",help="force system to keep model in RAM rather than swapping or compressing",dest="use_mlock") - parser.add_argument("--mtest", action="store_true",help="compute maximum memory usage",dest="mem_test") parser.add_argument( - "-r", - "--reverse-prompt", + "--no-mmap", + action="store_false", + help="do not memory-map model (slower load but may reduce pageouts if not using mlock)", + dest="use_mmap", + ) + parser.add_argument( + "--mlock", + action="store_true", + help="force system to keep model in RAM rather than swapping or compressing", + dest="use_mlock", + ) + parser.add_argument( + "--mtest", + action="store_true", + help="compute maximum memory usage", + dest="mem_test", + ) + parser.add_argument( + "--verbose-prompt", + action="store_true", + help="print prompt before generation", + dest="verbose_prompt", + ) + + # Custom args + parser.add_argument( + "--fix-prefix", type=str, - action='append', - help="poll user input upon seeing PROMPT (can be\nspecified more than once for multiple prompts).", - dest="antiprompt" - ) - parser.add_argument("--perplexity", action="store_true", help="compute perplexity over the prompt", dest="perplexity") - parser.add_argument("--ignore-eos", action="store_true", help="ignore end of stream token and continue generating", dest="ignore_eos") - parser.add_argument("--n_parts", type=int, default=-1, help="number of model parts", dest="n_parts") - parser.add_argument("--random-prompt", action="store_true", help="start with a randomized prompt.", dest="random_prompt") - parser.add_argument("--in-prefix", type=str, default="", help="string to prefix user inputs with", dest="input_prefix") - parser.add_argument("--fix-prefix", type=str, default="", help="append to input when generated n_predict tokens", dest="fix_prefix") - parser.add_argument("--out-postfix", type=str, default="", help="append to input", dest="output_postfix") - parser.add_argument("--input-noecho", action="store_false", help="dont output the input", dest="input_echo") + default="", + help="append to input when generated n_predict tokens", + dest="fix_prefix", + ) + parser.add_argument( + "--input-noecho", + action="store_false", + help="dont output the input", + dest="input_echo", + ) + + parser.add_argument( + "--interactive-start", + action="store_true", + help="run in interactive mode", + dest="interactive", + ) + args = parser.parse_args(argv) - return args + + logit_bias_str = args.logit_bias_str + delattr(args, "logit_bias_str") + params = GptParams(**vars(args)) + + if params.lora_adapter: + params.use_mmap = False + + if logit_bias_str != None: + for i in logit_bias_str: + if m := re.match(r"(\d+)([-+]\d+)", i): + params.logit_bias[int(m.group(1))] = float(m.group(2)) + + return params + def gpt_random_prompt(rng): return [ @@ -144,5 +400,6 @@ def gpt_random_prompt(rng): "They", ][rng % 10] + if __name__ == "__main__": - print(GptParams(gpt_params_parse())) + print(gpt_params_parse()) diff --git a/examples/low_level_api/low_level_api_chat_cpp.py b/examples/low_level_api/low_level_api_chat_cpp.py index f4d77d5c6..39081be17 100644 --- a/examples/low_level_api/low_level_api_chat_cpp.py +++ b/examples/low_level_api/low_level_api_chat_cpp.py @@ -10,368 +10,734 @@ You should also still be feeding the model with a "primer" prompt that shows it the expected format. """ + +import ctypes import sys from time import time -from os import cpu_count +from os import cpu_count, path import llama_cpp from common import GptParams, gpt_params_parse, gpt_random_prompt +import util -ANSI_COLOR_RESET = "\x1b[0m" -ANSI_COLOR_YELLOW = "\x1b[33m" -ANSI_BOLD = "\x1b[1m" -ANSI_COLOR_GREEN = "\x1b[32m" - -CONSOLE_COLOR_DEFAULT = ANSI_COLOR_RESET -CONSOLE_COLOR_PROMPT = ANSI_COLOR_YELLOW -CONSOLE_COLOR_USER_INPUT = ANSI_BOLD + ANSI_COLOR_GREEN # A LLaMA interactive session class LLaMAInteract: - def __init__(self, params: GptParams) -> None: - # input args - self.params = params - - if (self.params.perplexity): - raise NotImplementedError("""************ + def __init__(self, params: GptParams) -> None: + # input args + self.params = params + if self.params.path_session is None: + self.params.path_session = "" + if self.params.antiprompt is None: + self.params.antiprompt = "" + + if self.params.perplexity: + raise NotImplementedError( + """************ please use the 'perplexity' tool for perplexity calculations -************""") +************""" + ) - if (self.params.embedding): - raise NotImplementedError("""************ + if self.params.embedding: + raise NotImplementedError( + """************ please use the 'embedding' tool for embedding calculations -************""") +************""" + ) - if (self.params.n_ctx > 2048): - print(f"""warning: model does not support \ + if self.params.n_ctx > 2048: + print( + f"""warning: model does not support \ context sizes greater than 2048 tokens ({self.params.n_ctx} \ -specified) expect poor results""", file=sys.stderr) - - if (self.params.seed <= 0): - self.params.seed = int(time()) - - print(f"seed = {self.params.seed}", file=sys.stderr) - - if (self.params.random_prompt): - self.params.prompt = gpt_random_prompt(self.params.seed) - - # runtime args - self.input_consumed = 0 - self.n_past = 0 - self.first_antiprompt = [] - self.remaining_tokens = self.params.n_predict - self.output_echo = self.params.input_echo - - # model load - self.lparams = llama_cpp.llama_context_default_params() - self.lparams.n_ctx = self.params.n_ctx - self.lparams.n_parts = self.params.n_parts - self.lparams.seed = self.params.seed - self.lparams.memory_f16 = self.params.memory_f16 - self.lparams.use_mlock = self.params.use_mlock - - self.ctx = llama_cpp.llama_init_from_file(self.params.model.encode("utf8"), self.lparams) - if (not self.ctx): - raise RuntimeError(f"error: failed to load model '{self.params.model}'") - - print(file=sys.stderr) - print(f"system_info: n_threads = {self.params.n_threads} / {cpu_count()} \ -| {llama_cpp.llama_print_system_info().decode('utf8')}", file=sys.stderr) - - # determine the required inference memory per token: - if (self.params.mem_test): - tmp = [0, 1, 2, 3] - llama_cpp.llama_eval(self.ctx, (llama_cpp.c_int * len(tmp))(*tmp), len(tmp), 0, self.n_threads) - llama_cpp.llama_print_timings(self.ctx) - self.exit() - return - - # create internal context - self.n_ctx = llama_cpp.llama_n_ctx(self.ctx) - - # Add a space in front of the first character to match OG llama tokenizer behavior - self.params.prompt = " " + self.params.prompt - - # Load prompt file - if (self.params.file): - with open(self.params.file) as f: - self.params.prompt = f.read() - - # tokenize the prompt - self.embd = [] - self.embd_inp = self._tokenize(self.params.prompt) - - if (len(self.embd_inp) > self.params.n_ctx - 4): - raise RuntimeError(f"error: prompt is too long ({len(self.embd_inp)} tokens, max {self.params.n_ctx - 4})") - - # number of tokens to keep when resetting context - if (self.params.n_keep < 0 or self.params.n_keep > len(self.embd_inp) or self.params.instruct): - self.params.n_keep = len(self.embd_inp) - - self.inp_prefix = self._tokenize(self.params.instruct_inp_prefix) - self.inp_suffix = self._tokenize(self.params.instruct_inp_suffix, False) - - # in instruct mode, we inject a prefix and a suffix to each input by the user - if (self.params.instruct): - self.params.interactive_start = True - self.first_antiprompt.append(self._tokenize(self.params.instruct_inp_prefix.strip(), False)) - - # enable interactive mode if reverse prompt or interactive start is specified - if (len(self.params.antiprompt) != 0 or self.params.interactive_start): - self.params.interactive = True - - # determine newline token - self.llama_token_newline = self._tokenize("\n", False) - - if (self.params.verbose_prompt): - print(f""" +specified) expect poor results""", + file=sys.stderr, + ) + + if self.params.seed <= 0: + self.params.seed = int(time()) + + print(f"seed = {self.params.seed}", file=sys.stderr) + + if self.params.random_prompt: + self.params.prompt = gpt_random_prompt(self.params.seed) + + # runtime args + self.input_consumed = 0 + self.n_past = 0 + self.n_session_consumed = 0 + self.first_antiprompt = [] + self.remaining_tokens = self.params.n_predict + self.output_echo = self.params.input_echo + self.multibyte_fix = [] + + # model load + self.lparams = llama_cpp.llama_model_default_params() + self.lparams.n_ctx = self.params.n_ctx + self.lparams.n_parts = self.params.n_parts + self.lparams.seed = self.params.seed + self.lparams.memory_f16 = self.params.memory_f16 + self.lparams.use_mlock = self.params.use_mlock + self.lparams.use_mmap = self.params.use_mmap + + self.model = llama_cpp.llama_load_model_from_file( + self.params.model.encode("utf8"), self.lparams + ) + + # Context Params. + self.cparams = llama_cpp.llama_context_default_params() + + self.ctx = llama_cpp.llama_new_context_with_model(self.model, self.cparams) + if not self.ctx: + raise RuntimeError(f"error: failed to load model '{self.params.model}'") + + if self.params.ignore_eos: + self.params.logit_bias[llama_cpp.llama_token_eos()] = -float("inf") + + if len(self.params.lora_adapter) > 0: + if ( + llama_cpp.llama_apply_lora_from_file( + self.ctx, + self.params.lora_adapter.encode("utf8"), + ( + self.params.lora_base.encode("utf8") + if len(self.params.lora_base) > 0 + else None + ), + self.params.n_threads, + ) + != 0 + ): + print("error: failed to apply lora adapter") + return + + print(file=sys.stderr) + print( + f"system_info: n_threads = {self.params.n_threads} / {cpu_count()} \ +| {llama_cpp.llama_print_system_info().decode('utf8')}", + file=sys.stderr, + ) + + # determine the required inference memory per token: + if self.params.mem_test: + tmp = [0, 1, 2, 3] + llama_cpp.llama_eval( + self.ctx, + (llama_cpp.c_int * len(tmp))(*tmp), + len(tmp), + 0, + self.n_threads, + ) + llama_cpp.llama_print_timings(self.ctx) + self.exit() + return + + # create internal context + self.n_ctx = llama_cpp.llama_n_ctx(self.ctx) + + # Add a space in front of the first character to match OG llama tokenizer behavior + self.params.prompt = " " + self.params.prompt + + # Load prompt file + if self.params.file: + with open(self.params.file) as f: + self.params.prompt = f.read() + + self.session_tokens: list[llama_cpp.llama_token] = [] + if len(self.params.path_session) > 0: + print( + f"attempting to load saved session from '{self.params.path_session}'", + file=sys.stderr, + ) + + if path.exists(self.params.path_session): + _session_tokens = (llama_cpp.llama_token * (self.params.n_ctx))() + _n_token_count_out = llama_cpp.c_size_t() + if ( + llama_cpp.llama_load_session_file( + self.ctx, + self.params.path_session.encode("utf8"), + _session_tokens, + self.params.n_ctx, + ctypes.byref(_n_token_count_out), + ) + != 1 + ): + print( + f"error: failed to load session file '{self.params.path_session}'", + file=sys.stderr, + ) + return + _n_token_count_out = _n_token_count_out.value + self.session_tokens = _session_tokens[:_n_token_count_out] + print( + f"loaded a session with prompt size of {_n_token_count_out} tokens", + file=sys.stderr, + ) + else: + print(f"session file does not exist, will create", file=sys.stderr) + + # tokenize the prompt + self.embd = [] + self.embd_inp = self._tokenize(self.params.prompt) + + if len(self.embd_inp) > self.n_ctx - 4: + raise RuntimeError( + f"error: prompt is too long ({len(self.embd_inp)} tokens, max {self.params.n_ctx - 4})" + ) + + # debug message about similarity of saved session, if applicable + self.n_matching_session_tokens = 0 + if len(self.session_tokens) > 0: + for id in self.session_tokens: + if ( + self.n_matching_session_tokens >= len(self.embd_inp) + or id != self.embd_inp[self.n_matching_session_tokens] + ): + break + self.n_matching_session_tokens += 1 + + if self.n_matching_session_tokens >= len(self.embd_inp): + print(f"session file has exact match for prompt!") + elif self.n_matching_session_tokens < (len(self.embd_inp) / 2): + print( + f"warning: session file has low similarity to prompt ({self.n_matching_session_tokens} / {len(self.embd_inp)} tokens); will mostly be reevaluated" + ) + else: + print( + f"session file matches {self.n_matching_session_tokens} / {len(self.embd_inp)} tokens of prompt" + ) + + self.need_to_save_session = len( + self.params.path_session + ) > 0 and self.n_matching_session_tokens < (len(self.embd_inp) * 3 / 4) + + # number of tokens to keep when resetting context + if ( + self.params.n_keep < 0 + or self.params.n_keep > len(self.embd_inp) + or self.params.instruct + ): + self.params.n_keep = len(self.embd_inp) + + self.inp_prefix = self._tokenize(self.params.instruct_inp_prefix) + self.inp_suffix = self._tokenize(self.params.instruct_inp_suffix, False) + + # in instruct mode, we inject a prefix and a suffix to each input by the user + self.antiecho = None + if self.params.instruct: + self.params.interactive_start = True + _ptn = self._tokenize(self.params.instruct_inp_prefix.strip(), False) + self.first_antiprompt.append(_ptn) + self.antiecho = util.IterSearch(_ptn) + + # enable interactive mode if reverse prompt or interactive start is specified + if len(self.params.antiprompt) != 0 or self.params.interactive_start: + self.params.interactive = True + + # determine newline token + self.llama_token_newline = self._tokenize("\n", False) + self.llama_token_eot = self._tokenize(" [end of text]\n", False) + + if self.params.verbose_prompt: + print( + f""" prompt: '{self.params.prompt}' -number of tokens in prompt = {len(self.embd_inp)}""", file=sys.stderr) - - for i in range(len(self.embd_inp)): - print(f"{self.embd_inp[i]} -> '{llama_cpp.llama_token_to_str(self.ctx, self.embd_inp[i])}'", file=sys.stderr) - - if (self.params.n_keep > 0): - print("static prompt based on n_keep: '") - for i in range(self.params.n_keep): - print(llama_cpp.llama_token_to_str(self.ctx, self.embd_inp[i]), file=sys.stderr) - print("'", file=sys.stderr) - print(file=sys.stderr) - - if (self.params.interactive): - print("interactive mode on.", file=sys.stderr) - - if (len(self.params.antiprompt) > 0): - for antiprompt in self.params.antiprompt: - print(f"Reverse prompt: '{antiprompt}'", file=sys.stderr) - - if len(self.params.input_prefix) > 0: - print(f"Input prefix: '{self.params.input_prefix}'", file=sys.stderr) - - print(f"""sampling: temp = {self.params.temp},\ +number of tokens in prompt = {len(self.embd_inp)}""", + file=sys.stderr, + ) + + for i in range(len(self.embd_inp)): + print( + f"{self.embd_inp[i]} -> '{self.token_to_str(self.embd_inp[i])}'", + file=sys.stderr, + ) + + if self.params.n_keep > 0: + print("static prompt based on n_keep: '") + for i in range(self.params.n_keep): + print(self.token_to_str(self.embd_inp[i]), file=sys.stderr) + print("'", file=sys.stderr) + print(file=sys.stderr) + + if self.params.interactive: + print("interactive mode on.", file=sys.stderr) + + if len(self.params.antiprompt) > 0: + for antiprompt in self.params.antiprompt: + print(f"Reverse prompt: '{antiprompt}'", file=sys.stderr) + + if len(self.params.input_prefix) > 0: + print(f"Input prefix: '{self.params.input_prefix}'", file=sys.stderr) + + print( + f"""sampling: repeat_last_n = {self.params.repeat_last_n},\ +repeat_penalty = {self.params.repeat_penalty},\ +presence_penalty = {self.params.presence_penalty},\ +frequency_penalty = {self.params.frequency_penalty},\ top_k = {self.params.top_k},\ +tfs_z = {self.params.tfs_z},\ top_p = {self.params.top_p},\ -repeat_last_n = {self.params.repeat_last_n},\ -repeat_penalty = {self.params.repeat_penalty} - -generate: n_ctx = {self.n_ctx}, \ -n_batch = {self.params.n_batch}, \ -n_predict = {self.params.n_predict}, \ +typical_p = {self.params.typical_p},\ +temp = {self.params.temp},\ +mirostat = {self.params.mirostat},\ +mirostat_lr = {self.params.mirostat_eta},\ +mirostat_ent = {self.params.mirostat_tau},\ + +generate: n_ctx = {self.n_ctx},\ +n_batch = {self.params.n_batch},\ +n_predict = {self.params.n_predict},\ n_keep = {self.params.n_keep} -""", file=sys.stderr) - # determine antiprompt tokens - for i in self.params.antiprompt: - self.first_antiprompt.append(self._tokenize(i, False)) +""", + file=sys.stderr, + ) - self.last_n_tokens = [0]*self.n_ctx #TODO: deque doesnt support slices + # determine antiprompt tokens + for i in self.params.antiprompt: + self.first_antiprompt.append(self._tokenize(i, False)) - if (params.interactive): - print("""== Running in interactive mode. == + self.last_n_tokens = [0] * self.n_ctx # TODO: deque doesnt support slices + + if params.interactive: + print( + """== Running in interactive mode. == - Press Ctrl+C to interject at any time. - Press Return to return control to LLaMa. - If you want to submit another line, end your input in '\\'. -""", file=sys.stderr) - self.set_color(CONSOLE_COLOR_PROMPT) - - # tokenize a prompt - def _tokenize(self, prompt, bos=True): - _arr = (llama_cpp.llama_token * (len(prompt) + 1))() - _n = llama_cpp.llama_tokenize(self.ctx, prompt.encode("utf8"), _arr, len(_arr), bos) - return _arr[:_n] - - def use_antiprompt(self): - return len(self.first_antiprompt) > 0 - - def set_color(self, c): - if (self.params.use_color): - print(c, end="") - - # generate tokens - def generate(self): - while self.remaining_tokens > 0 or self.params.interactive: - # predict - if len(self.embd) > 0: - # infinite text generation via context swapping - # if we run out of context: - # - take the n_keep first tokens from the original prompt (via n_past) - # - take half of the last (n_ctx - n_keep) tokens and recompute the logits in a batch - if (self.n_past + len(self.embd) > self.n_ctx): - n_left = self.n_past - self.params.n_keep - self.n_past = self.params.n_keep - - # insert n_left/2 tokens at the start of embd from last_n_tokens - _insert = self.last_n_tokens[ - self.n_ctx - int(n_left/2) - len(self.embd):-len(self.embd) - ] - self.embd = _insert + self.embd - - if (llama_cpp.llama_eval( - self.ctx, (llama_cpp.llama_token * len(self.embd))(*self.embd), len(self.embd), self.n_past, self.params.n_threads - ) != 0): - raise Exception("Failed to llama_eval!") - - self.n_past += len(self.embd) - self.embd = [] - if len(self.embd_inp) <= self.input_consumed: - # out of user input, sample next token - - #TODO: self.params.ignore_eos - - _arr = self.last_n_tokens[-min(self.params.repeat_last_n, self.n_past):] - id = llama_cpp.llama_sample_top_p_top_k( - self.ctx, - (llama_cpp.llama_token * len(_arr))(*_arr), - len(_arr), - self.params.top_k, - self.params.top_p, - self.params.temp, - self.params.repeat_penalty, - ) - self.last_n_tokens.pop(0) - self.last_n_tokens.append(id) - - # replace end of text token with newline token when in interactive mode - if (id == llama_cpp.llama_token_eos() and self.params.interactive and not self.params.instruct): - id = self.llama_token_newline[0] - if (self.use_antiprompt()): - # tokenize and inject first reverse prompt - self.embd_inp += self.first_antiprompt[0] - - # add it to the context - self.embd.append(id) - - # echo this to console - self.output_echo = True - - # decrement remaining sampling budget - self.remaining_tokens -= 1 - else: - # output to console if input echo is on - self.output_echo = self.params.input_echo - - # some user input remains from prompt or interaction, forward it to processing - while len(self.embd_inp) > self.input_consumed: - self.embd.append(self.embd_inp[self.input_consumed]) - self.last_n_tokens.pop(0) - self.last_n_tokens.append(self.embd_inp[self.input_consumed]) - self.input_consumed += 1 - if len(self.embd) >= self.params.n_batch: - break - - # display tokens - if self.output_echo: - for id in self.embd: - yield id - - # reset color to default if we there is no pending user input - if (self.params.input_echo and len(self.embd_inp) == self.input_consumed): - self.set_color(CONSOLE_COLOR_DEFAULT) - - if (self.params.interactive and len(self.embd_inp) <= self.input_consumed): - # if antiprompt is present, stop - if (self.use_antiprompt()): - if True in [ - i == self.last_n_tokens[-len(i):] - for i in self.first_antiprompt - ]: - break - - # if we are using instruction mode, and we have processed the initial prompt - if (self.n_past > 0 and self.params.interactive_start): - break - - # end of text token - if len(self.embd) > 0 and self.embd[-1] == llama_cpp.llama_token_eos(): - if (not self.params.instruct): - for i in " [end of text]\n": - yield i - break - - # respect n_predict even if antiprompt is present - if (self.params.interactive and self.remaining_tokens <= 0 and self.params.n_predict != -1): - # If we arent in instruction mode, fix the current generation by appending the antiprompt. - # Makes it so if chat ends prematurely you dont append the AI's text etc. - if not self.params.instruct: - self.embd_inp += self.first_antiprompt[0] - self.n_remain = self.params.n_predict - break - - self.params.interactive_start = False - - def __enter__(self): - return self - - def __exit__(self, type, value, tb): - self.exit() - - def exit(self): - llama_cpp.llama_free(self.ctx) - self.set_color(CONSOLE_COLOR_DEFAULT) - - # return past text - def past(self): - for id in self.last_n_tokens[-self.n_past:]: - yield llama_cpp.llama_token_to_str(self.ctx, id).decode("utf-8") - - # write input - def input(self, prompt: str): - if (self.params.instruct and self.last_n_tokens[-len(self.inp_prefix):] != self.inp_prefix): - self.embd_inp += self.inp_prefix - self.embd_inp += self._tokenize(prompt) - if (self.params.instruct): - self.embd_inp += self.inp_suffix - - # write output - def output(self): - self.remaining_tokens = self.params.n_predict - for id in self.generate(): - yield llama_cpp.llama_token_to_str(self.ctx, id).decode("utf-8") - - # read user input - def read_input(self): - out = "" - while (t := input()).endswith("\\"): - out += t[:-1] + "\n" - return out + t + "\n" - - # interactive mode - def interact(self): - for i in self.output(): - print(i,end="",flush=True) - self.params.input_echo = False - - while self.params.interactive: - self.set_color(CONSOLE_COLOR_USER_INPUT) - if (self.params.instruct): - print('\n> ', end="") - self.input(self.read_input()) - else: - print(self.params.input_prefix, end="") - self.input(f"{self.params.input_prefix}{self.read_input()}{self.params.output_postfix}") - print(self.params.output_postfix,end="") - self.set_color(CONSOLE_COLOR_DEFAULT) - - try: - for i in self.output(): - print(i,end="",flush=True) - except KeyboardInterrupt: - self.set_color(CONSOLE_COLOR_DEFAULT) - if not self.params.instruct: - print(self.params.fix_prefix,end="") - self.input(self.params.fix_prefix) +""", + file=sys.stderr, + ) + self.set_color(util.CONSOLE_COLOR_PROMPT) + + # tokenize a prompt + def _tokenize(self, prompt, bos=True): + _arr = (llama_cpp.llama_token * ((len(prompt) + 1) * 4))() + _n = llama_cpp.llama_tokenize( + self.model, + prompt.encode("utf8", errors="ignore"), + len(prompt), + _arr, + len(_arr), + bos, + False, + ) + return _arr[:_n] + + def set_color(self, c): + if self.params.use_color: + print(c, end="") + + def use_antiprompt(self): + return len(self.first_antiprompt) > 0 + + # generate tokens + def generate(self): + while ( + self.remaining_tokens > 0 + or self.params.interactive + or self.params.n_predict == -1 + ): + # predict + if len(self.embd) > 0: + # infinite text generation via context swapping + # if we run out of context: + # - take the n_keep first tokens from the original prompt (via n_past) + # - take half of the last (n_ctx - n_keep) tokens and recompute the logits in a batch + if self.n_past + len(self.embd) > self.n_ctx: + n_left = self.n_past - self.params.n_keep + self.n_past = self.params.n_keep + + # insert n_left/2 tokens at the start of embd from last_n_tokens + _insert = self.last_n_tokens[ + self.n_ctx - int(n_left / 2) - len(self.embd) : -len(self.embd) + ] + self.embd = _insert + self.embd + self.params.path_session = "" + + # try to reuse a matching prefix from the loaded session instead of re-eval (via n_past) + if self.n_session_consumed < len(self.session_tokens): + for i in range(len(self.embd)): + if self.embd[i] != self.session_tokens[self.n_session_consumed]: + self.session_tokens = self.session_tokens[ + : self.n_session_consumed + ] + break + + self.n_past += 1 + self.n_session_consumed += 1 + + if self.n_session_consumed >= len(self.session_tokens): + i += 1 + break + + if i > 0: + self.embd = self.embd[i:] + + # evaluate tokens in batches + # embd is typically prepared beforehand to fit within a batch, but not always + # TODO BUG: The batching code causes nonsensical generation + """for i in range(0, len(self.embd), self.params.n_batch): + n_eval = self.params.n_batch + _arr = (llama_cpp.llama_token * n_eval)(*self.embd[i:i + n_eval]) + if llama_cpp.llama_eval(self.ctx, _arr, n_eval, self.n_past, self.params.n_threads) != 0: + print(f"failed to eval") + return + + self.n_past += n_eval""" + + if ( + llama_cpp.llama_eval( + self.ctx, + (llama_cpp.llama_token * len(self.embd))(*self.embd), + len(self.embd), + self.n_past, + ) + != 0 + ): + raise Exception("Failed to llama_eval!") + + if len(self.embd) > 0 and len(self.params.path_session) > 0: + self.session_tokens.extend(self.embd) + self.n_session_consumed = len(self.session_tokens) + + self.n_past += len(self.embd) + self.embd = [] + if len(self.embd_inp) <= self.input_consumed: # && !is_interacting + # out of user input, sample next token + top_k = ( + llama_cpp.llama_n_vocab(self.ctx) + if self.params.top_k <= 0 + else self.params.top_k + ) + repeat_last_n = ( + self.n_ctx + if self.params.repeat_last_n < 0 + else self.params.repeat_last_n + ) + + # optionally save the session on first sample (for faster prompt loading next time) + if len(self.params.path_session) > 0 and self.need_to_save_session: + self.need_to_save_session = False + llama_cpp.llama_save_session_file( + self.ctx, + self.params.path_session.encode("utf8"), + (llama_cpp.llama_token * len(self.session_tokens))( + *self.session_tokens + ), + len(self.session_tokens), + ) + + id = 0 + + logits = llama_cpp.llama_get_logits(self.ctx) + n_vocab = llama_cpp.llama_n_vocab(self.model) + + # Apply params.logit_bias map + for key, value in self.params.logit_bias.items(): + logits[key] += value + + _arr = (llama_cpp.llama_token_data * n_vocab)( + *[ + llama_cpp.llama_token_data(token_id, logits[token_id], 0.0) + for token_id in range(n_vocab) + ] + ) + candidates_p = llama_cpp.ctypes.pointer( + llama_cpp.llama_token_data_array(_arr, len(_arr), False) + ) + + # Apply penalties + nl_logit = logits[llama_cpp.llama_token_nl(self.ctx)] + last_n_repeat = min(len(self.last_n_tokens), repeat_last_n, self.n_ctx) + + _arr = (llama_cpp.llama_token * last_n_repeat)( + *self.last_n_tokens[len(self.last_n_tokens) - last_n_repeat :] + ) + llama_cpp.llama_sample_repetition_penalties( + ctx=self.ctx, + candidates=candidates_p, + last_tokens_data=_arr, + penalty_last_n=last_n_repeat, + penalty_repeat=llama_cpp.c_float(self.params.repeat_penalty), + penalty_freq=llama_cpp.c_float(self.params.frequency_penalty), + penalty_present=llama_cpp.c_float(self.params.presence_penalty), + ) + + # NOT PRESENT IN CURRENT VERSION ? + # llama_cpp.llama_sample_frequency_and_presence_penalti(self.ctx, candidates_p, + # _arr, + # last_n_repeat, llama_cpp.c_float(self.params.frequency_penalty), llama_cpp.c_float(self.params.presence_penalty)) + + if not self.params.penalize_nl: + logits[llama_cpp.llama_token_nl()] = nl_logit + + if self.params.temp <= 0: + # Greedy sampling + id = llama_cpp.llama_sample_token_greedy(self.ctx, candidates_p) + else: + if self.params.mirostat == 1: + mirostat_mu = 2.0 * self.params.mirostat_tau + mirostat_m = 100 + llama_cpp.llama_sample_temperature( + self.ctx, candidates_p, llama_cpp.c_float(self.params.temp) + ) + id = llama_cpp.llama_sample_token_mirostat( + self.ctx, + candidates_p, + llama_cpp.c_float(self.params.mirostat_tau), + llama_cpp.c_float(self.params.mirostat_eta), + llama_cpp.c_int(mirostat_m), + llama_cpp.c_float(mirostat_mu), + ) + elif self.params.mirostat == 2: + mirostat_mu = 2.0 * self.params.mirostat_tau + llama_cpp.llama_sample_temperature( + self.ctx, candidates_p, llama_cpp.c_float(self.params.temp) + ) + id = llama_cpp.llama_sample_token_mirostat_v2( + self.ctx, + candidates_p, + llama_cpp.c_float(self.params.mirostat_tau), + llama_cpp.c_float(self.params.mirostat_eta), + llama_cpp.c_float(mirostat_mu), + ) + else: + # Temperature sampling + llama_cpp.llama_sample_top_k( + self.ctx, + candidates_p, + top_k, + min_keep=llama_cpp.c_size_t(1), + ) + llama_cpp.llama_sample_tail_free( + self.ctx, + candidates_p, + llama_cpp.c_float(self.params.tfs_z), + min_keep=llama_cpp.c_size_t(1), + ) + llama_cpp.llama_sample_typical( + self.ctx, + candidates_p, + llama_cpp.c_float(self.params.typical_p), + min_keep=llama_cpp.c_size_t(1), + ) + llama_cpp.llama_sample_top_p( + self.ctx, + candidates_p, + llama_cpp.c_float(self.params.top_p), + min_keep=llama_cpp.c_size_t(1), + ) + llama_cpp.llama_sample_temperature( + self.ctx, candidates_p, llama_cpp.c_float(self.params.temp) + ) + id = llama_cpp.llama_sample_token(self.ctx, candidates_p) + # print("`{}`".format(candidates_p.size)) + + self.last_n_tokens.pop(0) + self.last_n_tokens.append(id) + + # replace end of text token with newline token when in interactive mode + if ( + id == llama_cpp.llama_token_eos(self.ctx) + and self.params.interactive + and not self.params.instruct + ): + id = self.llama_token_newline[0] + self.embd.append(id) + if self.use_antiprompt(): + # tokenize and inject first reverse prompt + self.embd_inp += self.first_antiprompt[0] + for id in self.first_antiprompt[0]: + self.embd.append(id) + else: + # add it to the context + self.embd.append(id) + + # echo this to console + self.output_echo = True + + # decrement remaining sampling budget + self.remaining_tokens -= 1 + else: + # output to console if input echo is on + self.output_echo = self.params.input_echo + + # some user input remains from prompt or interaction, forward it to processing + while len(self.embd_inp) > self.input_consumed: + self.embd.append(self.embd_inp[self.input_consumed]) + self.last_n_tokens.pop(0) + self.last_n_tokens.append(self.embd_inp[self.input_consumed]) + self.input_consumed += 1 + if len(self.embd) >= self.params.n_batch: + break + + # display tokens + if self.output_echo: + for id in self.embd: + if self.antiecho != None: + for r in self.antiecho(id): + yield r + else: + yield id + + # reset color to default if we there is no pending user input + if self.params.input_echo and len(self.embd_inp) == self.input_consumed: + self.set_color(util.CONSOLE_COLOR_DEFAULT) + + if self.params.interactive and len(self.embd_inp) <= self.input_consumed: + # if antiprompt is present, stop + if self.use_antiprompt(): + if True in [ + i == self.last_n_tokens[-len(i) :] + for i in self.first_antiprompt + ]: + break + + # if we are using instruction mode, and we have processed the initial prompt + if self.params.interactive_start: + break + + # end of text token + if len(self.embd) > 0 and self.embd[-1] == llama_cpp.llama_token_eos( + self.ctx + ): + if not self.params.instruct: + for i in self.llama_token_eot: + yield i + break + + # respect n_predict even if antiprompt is present + if ( + self.params.interactive + and self.remaining_tokens <= 0 + and self.params.n_predict != -1 + ): + # If we arent in instruction mode, fix the current generation by appending the antiprompt. + # Makes it so if chat ends prematurely you dont append the AI's text etc. + if not self.params.instruct: + self.embd_inp += self.first_antiprompt[0] + self.n_remain = self.params.n_predict + break + + self.params.interactive_start = False + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.exit() + + def exit(self): + llama_cpp.llama_free(self.ctx) + self.set_color(util.CONSOLE_COLOR_DEFAULT) + + def token_to_str(self, token_id: int) -> bytes: + size = 32 + buffer = (ctypes.c_char * size)() + n = llama_cpp.llama_token_to_piece( + self.model, llama_cpp.llama_token(token_id), buffer, size + ) + assert n <= size + return bytes(buffer[:n]) + + # return past text + def past(self): + for id in self.last_n_tokens[-self.n_past :]: + yield self.token_to_str(id).decode("utf8", errors="ignore") + + # write input + def input(self, prompt: str): + if ( + self.params.instruct + and self.last_n_tokens[-len(self.inp_prefix) :] != self.inp_prefix + ): + self.embd_inp += self.inp_prefix + self.embd_inp += self._tokenize(prompt) + if self.params.instruct: + self.embd_inp += self.inp_suffix + + # write output + def output(self): + self.remaining_tokens = self.params.n_predict + for id in self.generate(): + cur_char = self.token_to_str(id) + + # Add remainder of missing bytes + if None in self.multibyte_fix: + self.multibyte_fix[self.multibyte_fix.index(None)] = cur_char + + # Return completed utf char + if len(self.multibyte_fix) > 0 and not None in self.multibyte_fix: + yield (b"".join(self.multibyte_fix)).decode("utf8") + self.multibyte_fix = [] + continue + + # Contains multi-byte UTF8 + for num, pattern in [(2, 192), (3, 224), (4, 240)]: + # Bitwise AND check + if pattern & int.from_bytes(cur_char, "little") == pattern: + self.multibyte_fix = [cur_char] + ([None] * (num - 1)) + + # Stop incomplete bytes from passing + if len(self.multibyte_fix) > 0: + continue + + yield cur_char.decode("utf8") + + # read user input + def read_input(self): + out = "" + while (t := input()).endswith("\\"): + out += t[:-1] + "\n" + return out + t + "\n" + + # interactive mode + def interact(self): + for i in self.output(): + print(i, end="", flush=True) + self.params.input_echo = False + + # Using string instead of tokens to check for antiprompt, + # It is more reliable than tokens for interactive mode. + generated_str = "" + while self.params.interactive: + self.set_color(util.CONSOLE_COLOR_USER_INPUT) + if self.params.instruct: + print("\n> ", end="") + self.input(self.read_input()) + else: + print(self.params.input_prefix, end="") + self.input( + f"{self.params.input_prefix}{self.read_input()}{self.params.input_suffix}" + ) + print(self.params.input_suffix, end="") + self.set_color(util.CONSOLE_COLOR_DEFAULT) + + try: + for i in self.output(): + print(i, end="", flush=True) + generated_str += i + for ap in self.params.antiprompt: + if generated_str.endswith(ap): + raise KeyboardInterrupt + except KeyboardInterrupt: + self.set_color(util.CONSOLE_COLOR_DEFAULT) + if not self.params.instruct: + print(self.params.fix_prefix, end="") + self.input(self.params.fix_prefix) + if __name__ == "__main__": - from datetime import datetime + from datetime import datetime - USER_NAME="User" - AI_NAME="ChatLLaMa" + USER_NAME = "User" + AI_NAME = "ChatLLaMa" - time_now = datetime.now() - prompt = f"""Text transcript of a never ending dialog, where {USER_NAME} interacts with an AI assistant named {AI_NAME}. + time_now = datetime.now() + prompt = f"""Text transcript of a never ending dialog, where {USER_NAME} interacts with an AI assistant named {AI_NAME}. {AI_NAME} is helpful, kind, honest, friendly, good at writing and never fails to answer {USER_NAME}’s requests immediately and with details and precision. -There are no annotations like (30 seconds passed...) or (to himself), just what {USER_NAME} and {AI_NAME} say aloud to each other. +Transcript below contains only the recorded dialog between two, without any annotations like (30 seconds passed...) or (to himself), just what {USER_NAME} and {AI_NAME} say aloud to each other. The dialog lasts for years, the entirety of it is shared below. It's 10000 pages long. The transcript only includes text, it does not include markup like HTML and Markdown. @@ -385,9 +751,11 @@ def interact(self): {AI_NAME}: A cat is a domestic species of small carnivorous mammal. It is the only domesticated species in the family Felidae. {USER_NAME}: Name a color. {AI_NAME}: Blue -{USER_NAME}:""" - args = gpt_params_parse() - params = GptParams(**vars(args)) +{USER_NAME}: """ + + params = gpt_params_parse() + if params.prompt is None and params.file is None: + params.prompt = prompt - with LLaMAInteract(params) as m: - m.interact() + with LLaMAInteract(params) as m: + m.interact() diff --git a/examples/low_level_api/low_level_api_llama_cpp.py b/examples/low_level_api/low_level_api_llama_cpp.py index 2a639aad5..ba3545771 100644 --- a/examples/low_level_api/low_level_api_llama_cpp.py +++ b/examples/low_level_api/low_level_api_llama_cpp.py @@ -1,26 +1,41 @@ -import llama_cpp - +import ctypes +import os import multiprocessing import llama_cpp +llama_cpp.llama_backend_init(numa=False) + N_THREADS = multiprocessing.cpu_count() +MODEL_PATH = os.environ.get("MODEL", "../models/7B/ggml-model.bin") prompt = b"\n\n### Instruction:\nWhat is the capital of France?\n\n### Response:\n" -lparams = llama_cpp.llama_context_default_params() -ctx = llama_cpp.llama_init_from_file(b"models/ggml-alpaca-7b-q4.bin", lparams) +lparams = llama_cpp.llama_model_default_params() +cparams = llama_cpp.llama_context_default_params() +model = llama_cpp.llama_load_model_from_file(MODEL_PATH.encode("utf-8"), lparams) +ctx = llama_cpp.llama_new_context_with_model(model, cparams) # determine the required inference memory per token: tmp = [0, 1, 2, 3] -llama_cpp.llama_eval(ctx, (llama_cpp.c_int * len(tmp))(*tmp), len(tmp), 0, N_THREADS) +llama_cpp.llama_eval( + ctx=ctx, tokens=(llama_cpp.c_int * len(tmp))(*tmp), n_tokens=len(tmp), n_past=0 +) # Deprecated n_past = 0 prompt = b" " + prompt embd_inp = (llama_cpp.llama_token * (len(prompt) + 1))() -n_of_tok = llama_cpp.llama_tokenize(ctx, prompt, embd_inp, len(embd_inp), True) +n_of_tok = llama_cpp.llama_tokenize( + model=model, + text=bytes(str(prompt), "utf-8"), + text_len=len(embd_inp), + tokens=embd_inp, + n_max_tokens=len(embd_inp), + add_bos=False, + special=False, +) embd_inp = embd_inp[:n_of_tok] n_ctx = llama_cpp.llama_n_ctx(ctx) @@ -37,25 +52,52 @@ last_n_size = 64 last_n_tokens_data = [0] * last_n_size n_batch = 24 +last_n_repeat = 64 +repeat_penalty = 1 +frequency_penalty = 0.0 +presence_penalty = 0.0 while remaining_tokens > 0: if len(embd) > 0: llama_cpp.llama_eval( - ctx, (llama_cpp.c_int * len(embd))(*embd), len(embd), n_past, N_THREADS - ) + ctx=ctx, + tokens=(llama_cpp.c_int * len(embd))(*embd), + n_tokens=len(embd), + n_past=n_past, + ) # Deprecated n_past += len(embd) embd = [] if len(embd_inp) <= input_consumed: - id = llama_cpp.llama_sample_top_p_top_k( + logits = llama_cpp.llama_get_logits(ctx) + n_vocab = llama_cpp.llama_n_vocab(model) + + _arr = (llama_cpp.llama_token_data * n_vocab)( + *[ + llama_cpp.llama_token_data(token_id, logits[token_id], 0.0) + for token_id in range(n_vocab) + ] + ) + candidates_p = llama_cpp.ctypes.pointer( + llama_cpp.llama_token_data_array(_arr, len(_arr), False) + ) + + _arr = (llama_cpp.c_int * len(last_n_tokens_data))(*last_n_tokens_data) + llama_cpp.llama_sample_repetition_penalties( ctx, - (llama_cpp.c_int * len(last_n_tokens_data))(*last_n_tokens_data), - len(last_n_tokens_data), - 40, - 0.8, - 0.2, - 1.0 / 0.85, + candidates_p, + _arr, + penalty_last_n=last_n_repeat, + penalty_repeat=repeat_penalty, + penalty_freq=frequency_penalty, + penalty_present=presence_penalty, ) + + llama_cpp.llama_sample_top_k(ctx, candidates_p, k=40, min_keep=1) + llama_cpp.llama_sample_top_p(ctx, candidates_p, p=0.8, min_keep=1) + llama_cpp.llama_sample_temperature(ctx, candidates_p, temp=0.2) + id = llama_cpp.llama_sample_token(ctx, candidates_p) + last_n_tokens_data = last_n_tokens_data[1:] + [id] embd.append(id) input_noecho = False @@ -69,13 +111,19 @@ break if not input_noecho: for id in embd: + size = 32 + buffer = (ctypes.c_char * size)() + n = llama_cpp.llama_token_to_piece( + model, llama_cpp.llama_token(id), buffer, size + ) + assert n <= size print( - llama_cpp.llama_token_to_str(ctx, id).decode("utf-8"), + buffer[:n].decode("utf-8"), end="", flush=True, ) - if len(embd) > 0 and embd[-1] == llama_cpp.llama_token_eos(): + if len(embd) > 0 and embd[-1] == llama_cpp.llama_token_eos(ctx): break print() diff --git a/examples/low_level_api/quantize.py b/examples/low_level_api/quantize.py index 8bd03f88a..057ac389e 100644 --- a/examples/low_level_api/quantize.py +++ b/examples/low_level_api/quantize.py @@ -4,14 +4,16 @@ def main(args): + fname_inp = args.fname_inp.encode("utf-8") + fname_out = args.fname_out.encode("utf-8") if not os.path.exists(fname_inp): raise RuntimeError(f"Input file does not exist ({fname_inp})") if os.path.exists(fname_out): raise RuntimeError(f"Output file already exists ({fname_out})") - fname_inp = args.fname_inp.encode("utf-8") - fname_out = args.fname_out.encode("utf-8") - itype = args.itype - return_code = llama_cpp.llama_model_quantize(fname_inp, fname_out, itype) + ftype = args.type + args = llama_cpp.llama_model_quantize_default_params() + args.ftype = ftype + return_code = llama_cpp.llama_model_quantize(fname_inp, fname_out, args) if return_code != 0: raise RuntimeError("Failed to quantize model") @@ -20,6 +22,10 @@ def main(args): parser = argparse.ArgumentParser() parser.add_argument("fname_inp", type=str, help="Path to input model") parser.add_argument("fname_out", type=str, help="Path to output model") - parser.add_argument("type", type=int, help="Type of quantization (2: q4_0, 3: q4_1)") + parser.add_argument( + "type", + type=int, + help="Type of quantization (2: q4_0, 3: q4_1), see llama_cpp.py for enum", + ) args = parser.parse_args() main(args) diff --git a/examples/low_level_api/readme/low_level_api_llama_cpp.md b/examples/low_level_api/readme/low_level_api_llama_cpp.md new file mode 100644 index 000000000..5f350ffe9 --- /dev/null +++ b/examples/low_level_api/readme/low_level_api_llama_cpp.md @@ -0,0 +1,61 @@ +# Low-Level API for Llama_cpp + +## Overview +This Python script, low_level_api_llama_cpp.py, demonstrates the implementation of a low-level API for interacting with the llama_cpp library. The script defines an inference that generates embeddings based on a given prompt using .gguf model. + +### Prerequisites +Before running the script, ensure that you have the following dependencies installed: + +. Python 3.6 or higher +. llama_cpp: A C++ library for working with .gguf model +. NumPy: A fundamental package for scientific computing with Python +. multiprocessing: A Python module for parallel computing + +### Usage +install depedencies: +```bash +python -m pip install llama-cpp-python ctypes os multiprocessing +``` +Run the script: +```bash +python low_level_api_llama_cpp.py +``` + +## Code Structure +The script is organized as follows: + +### . Initialization: + Load the model from the specified path. + Create a context for model evaluation. + +### . Tokenization: + Tokenize the input prompt using the llama_tokenize function. + Prepare the input tokens for model evaluation. + +### . Inference: + Perform model evaluation to generate responses. + Sample from the model's output using various strategies (top-k, top-p, temperature). + +### . Output: + Print the generated tokens and the corresponding decoded text. + +### .Cleanup: + Free resources and print timing information. + +## Configuration +Customize the inference behavior by adjusting the following variables: + +#### . N_THREADS: Number of CPU threads to use for model evaluation. +#### . MODEL_PATH: Path to the model file. +#### . prompt: Input prompt for the chatbot. + +## Notes +. Ensure that the llama_cpp library is built and available in the system. Follow the instructions in the llama_cpp repository for building and installing the library. + +. This script is designed to work with the .gguf model and may require modifications for compatibility with other models. + +## Acknowledgments +This code is based on the llama_cpp library developed by the community. Special thanks to the contributors for their efforts. + +## License +This project is licensed under the MIT License - see the LICENSE file for details. \ No newline at end of file diff --git a/examples/low_level_api/util.py b/examples/low_level_api/util.py new file mode 100644 index 000000000..ef8b1c1ee --- /dev/null +++ b/examples/low_level_api/util.py @@ -0,0 +1,101 @@ +ANSI_COLOR_RESET = "\x1b[0m" +ANSI_COLOR_YELLOW = "\x1b[33m" +ANSI_BOLD = "\x1b[1m" +ANSI_COLOR_GREEN = "\x1b[32m" + +CONSOLE_COLOR_DEFAULT = ANSI_COLOR_RESET +CONSOLE_COLOR_PROMPT = ANSI_COLOR_YELLOW +CONSOLE_COLOR_USER_INPUT = ANSI_BOLD + ANSI_COLOR_GREEN + + +# Iterative search +# Actively searches and prevents a pattern from being returned +class IterSearch: + def __init__(self, pattern): + self.pattern = list(pattern) + self.buffer = [] + + def __call__(self, char): + self.buffer += [char] + + if self.pattern[: len(self.buffer)] == self.buffer: + if len(self.buffer) >= len(self.pattern): + self.buffer.clear() + return [] + + _tmp = self.buffer[:] + self.buffer.clear() + return _tmp + + +class Circle: + def __init__(self, size, default=0): + self.list = [default] * size + self.maxsize = size + self.size = 0 + self.offset = 0 + + def append(self, elem): + if self.size < self.maxsize: + self.list[self.size] = elem + self.size += 1 + else: + self.list[self.offset] = elem + self.offset = (self.offset + 1) % self.maxsize + + def __getitem__(self, val): + if isinstance(val, int): + if 0 > val or val >= self.size: + raise IndexError("Index out of range") + return ( + self.list[val] + if self.size < self.maxsize + else self.list[(self.offset + val) % self.maxsize] + ) + elif isinstance(val, slice): + start, stop, step = val.start, val.stop, val.step + if step is None: + step = 1 + if start is None: + start = 0 + if stop is None: + stop = self.size + if start < 0: + start = self.size + start + if stop < 0: + stop = self.size + stop + + indices = range(start, stop, step) + return [ + self.list[(self.offset + i) % self.maxsize] + for i in indices + if i < self.size + ] + else: + raise TypeError("Invalid argument type") + + +if __name__ == "__main__": + c = Circle(5) + + c.append(1) + print(c.list) + print(c[:]) + assert c[0] == 1 + assert c[:5] == [1] + + for i in range(2, 5 + 1): + c.append(i) + print(c.list) + print(c[:]) + assert c[0] == 1 + assert c[:5] == [1, 2, 3, 4, 5] + + for i in range(5 + 1, 9 + 1): + c.append(i) + print(c.list) + print(c[:]) + assert c[0] == 5 + assert c[:5] == [5, 6, 7, 8, 9] + # assert c[:-5] == [5,6,7,8,9] + assert c[:10] == [5, 6, 7, 8, 9] diff --git a/examples/notebooks/Batching.ipynb b/examples/notebooks/Batching.ipynb new file mode 100644 index 000000000..be7fe9b52 --- /dev/null +++ b/examples/notebooks/Batching.ipynb @@ -0,0 +1,463 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import ctypes\n", + "import llama_cpp" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "llama_cpp.llama_backend_init(numa=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_loader: loaded meta data with 20 key-value pairs and 291 tensors from /workspaces/llama-cpp-python/mistral-7b-v0.1.Q2_K.gguf (version GGUF V2)\n", + "llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", + "llama_model_loader: - kv 0: general.architecture str = llama\n", + "llama_model_loader: - kv 1: general.name str = mistralai_mistral-7b-v0.1\n", + "llama_model_loader: - kv 2: llama.context_length u32 = 32768\n", + "llama_model_loader: - kv 3: llama.embedding_length u32 = 4096\n", + "llama_model_loader: - kv 4: llama.block_count u32 = 32\n", + "llama_model_loader: - kv 5: llama.feed_forward_length u32 = 14336\n", + "llama_model_loader: - kv 6: llama.rope.dimension_count u32 = 128\n", + "llama_model_loader: - kv 7: llama.attention.head_count u32 = 32\n", + "llama_model_loader: - kv 8: llama.attention.head_count_kv u32 = 8\n", + "llama_model_loader: - kv 9: llama.attention.layer_norm_rms_epsilon f32 = 0.000010\n", + "llama_model_loader: - kv 10: llama.rope.freq_base f32 = 10000.000000\n", + "llama_model_loader: - kv 11: general.file_type u32 = 10\n", + "llama_model_loader: - kv 12: tokenizer.ggml.model str = llama\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_loader: - kv 13: tokenizer.ggml.tokens arr[str,32000] = [\"\", \"\", \"\", \"<0x00>\", \"<...\n", + "llama_model_loader: - kv 14: tokenizer.ggml.scores arr[f32,32000] = [0.000000, 0.000000, 0.000000, 0.0000...\n", + "llama_model_loader: - kv 15: tokenizer.ggml.token_type arr[i32,32000] = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...\n", + "llama_model_loader: - kv 16: tokenizer.ggml.bos_token_id u32 = 1\n", + "llama_model_loader: - kv 17: tokenizer.ggml.eos_token_id u32 = 2\n", + "llama_model_loader: - kv 18: tokenizer.ggml.unknown_token_id u32 = 0\n", + "llama_model_loader: - kv 19: general.quantization_version u32 = 2\n", + "llama_model_loader: - type f32: 65 tensors\n", + "llama_model_loader: - type q2_K: 65 tensors\n", + "llama_model_loader: - type q3_K: 160 tensors\n", + "llama_model_loader: - type q6_K: 1 tensors\n", + "llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect\n", + "llm_load_vocab: special tokens cache size = 3\n", + "llm_load_vocab: token to piece cache size = 0.1637 MB\n", + "llm_load_print_meta: format = GGUF V2\n", + "llm_load_print_meta: arch = llama\n", + "llm_load_print_meta: vocab type = SPM\n", + "llm_load_print_meta: n_vocab = 32000\n", + "llm_load_print_meta: n_merges = 0\n", + "llm_load_print_meta: vocab_only = 0\n", + "llm_load_print_meta: n_ctx_train = 32768\n", + "llm_load_print_meta: n_embd = 4096\n", + "llm_load_print_meta: n_layer = 32\n", + "llm_load_print_meta: n_head = 32\n", + "llm_load_print_meta: n_head_kv = 8\n", + "llm_load_print_meta: n_rot = 128\n", + "llm_load_print_meta: n_swa = 0\n", + "llm_load_print_meta: n_embd_head_k = 128\n", + "llm_load_print_meta: n_embd_head_v = 128\n", + "llm_load_print_meta: n_gqa = 4\n", + "llm_load_print_meta: n_embd_k_gqa = 1024\n", + "llm_load_print_meta: n_embd_v_gqa = 1024\n", + "llm_load_print_meta: f_norm_eps = 0.0e+00\n", + "llm_load_print_meta: f_norm_rms_eps = 1.0e-05\n", + "llm_load_print_meta: f_clamp_kqv = 0.0e+00\n", + "llm_load_print_meta: f_max_alibi_bias = 0.0e+00\n", + "llm_load_print_meta: f_logit_scale = 0.0e+00\n", + "llm_load_print_meta: n_ff = 14336\n", + "llm_load_print_meta: n_expert = 0\n", + "llm_load_print_meta: n_expert_used = 0\n", + "llm_load_print_meta: causal attn = 1\n", + "llm_load_print_meta: pooling type = 0\n", + "llm_load_print_meta: rope type = 0\n", + "llm_load_print_meta: rope scaling = linear\n", + "llm_load_print_meta: freq_base_train = 10000.0\n", + "llm_load_print_meta: freq_scale_train = 1\n", + "llm_load_print_meta: n_ctx_orig_yarn = 32768\n", + "llm_load_print_meta: rope_finetuned = unknown\n", + "llm_load_print_meta: ssm_d_conv = 0\n", + "llm_load_print_meta: ssm_d_inner = 0\n", + "llm_load_print_meta: ssm_d_state = 0\n", + "llm_load_print_meta: ssm_dt_rank = 0\n", + "llm_load_print_meta: ssm_dt_b_c_rms = 0\n", + "llm_load_print_meta: model type = 7B\n", + "llm_load_print_meta: model ftype = Q2_K - Medium\n", + "llm_load_print_meta: model params = 7.24 B\n", + "llm_load_print_meta: model size = 2.87 GiB (3.41 BPW) \n", + "llm_load_print_meta: general.name = mistralai_mistral-7b-v0.1\n", + "llm_load_print_meta: BOS token = 1 ''\n", + "llm_load_print_meta: EOS token = 2 ''\n", + "llm_load_print_meta: UNK token = 0 ''\n", + "llm_load_print_meta: LF token = 13 '<0x0A>'\n", + "llm_load_print_meta: EOG token = 2 ''\n", + "llm_load_print_meta: max token length = 48\n", + "llm_load_tensors: ggml ctx size = 0.14 MiB\n", + "llm_load_tensors: CPU buffer size = 2939.57 MiB\n", + "..................................................................................................\n" + ] + } + ], + "source": [ + "params = llama_cpp.llama_model_default_params()\n", + "params.n_gpu_layers = 35\n", + "model = llama_cpp.llama_load_model_from_file(\n", + " b\"/workspaces/llama-cpp-python/mistral-7b-v0.1.Q2_K.gguf\", params\n", + ") # Update this to whatever" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[1, 415, 2936, 9060, 285, 1142]\n", + "58\n" + ] + } + ], + "source": [ + "n_ctx = 512\n", + "n_len = 32\n", + "n_parallel = 2\n", + "prompt = b\"The quick brown fox\"\n", + "\n", + "tokens = (llama_cpp.llama_token * n_ctx)()\n", + "tokens_len = llama_cpp.llama_tokenize(\n", + " model, prompt, len(prompt), tokens, len(tokens), True, True\n", + ")\n", + "print(tokens[:tokens_len])\n", + "\n", + "n_kv_req = tokens_len + (n_len - tokens_len) * n_parallel\n", + "print(n_kv_req)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_new_context_with_model: n_ctx = 64\n", + "llama_new_context_with_model: n_batch = 32\n", + "llama_new_context_with_model: n_ubatch = 32\n", + "llama_new_context_with_model: flash_attn = 0\n", + "llama_new_context_with_model: freq_base = 10000.0\n", + "llama_new_context_with_model: freq_scale = 1\n", + "llama_kv_cache_init: CPU KV buffer size = 8.00 MiB\n", + "llama_new_context_with_model: KV self size = 8.00 MiB, K (f16): 4.00 MiB, V (f16): 4.00 MiB\n", + "llama_new_context_with_model: CPU output buffer size = 0.12 MiB\n", + "llama_new_context_with_model: CPU compute buffer size = 5.01 MiB\n", + "llama_new_context_with_model: graph nodes = 1030\n", + "llama_new_context_with_model: graph splits = 1\n" + ] + } + ], + "source": [ + "ctx_params = llama_cpp.llama_context_default_params()\n", + "ctx_params.seed = 1234\n", + "ctx_params.n_ctx = n_kv_req\n", + "ctx_params.n_batch = max(n_len, n_parallel)\n", + "ctx_params.n_threads = 1\n", + "ctx_params.n_threads_batch = 1\n", + "ctx = llama_cpp.llama_new_context_with_model(model, ctx_params)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "n_ctx = llama_cpp.llama_n_ctx(ctx)\n", + "batch = llama_cpp.llama_batch_init(max(tokens_len, n_parallel), 0, 1)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "\n", + "batch.n_tokens = tokens_len\n", + "for i in range(tokens_len):\n", + " batch.token[i] = tokens[i]\n", + " batch.pos[i] = i\n", + " batch.seq_id[i][0] = 0\n", + " batch.n_seq_id[i] = 1\n", + " batch.logits[i] = False\n", + "\n", + "batch.logits[batch.n_tokens - 1] = True\n", + "\n", + "if llama_cpp.llama_decode(ctx, batch) != 0:\n", + " print(\"Error decoding\")" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "for i in range(n_parallel):\n", + " llama_cpp.llama_kv_cache_seq_cp(ctx, 0, i, 0, batch.n_tokens)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# Initialize sampler chain with default parameters\n", + "sparams = llama_cpp.llama_sampler_chain_default_params()\n", + "sampler_chain = llama_cpp.llama_sampler_chain_init(sparams)\n", + "\n", + "# Add top_k, top_p, temperature, and final distribution-based sampler\n", + "llama_cpp.llama_sampler_chain_add(sampler_chain, llama_cpp.llama_sampler_init_top_k(40))\n", + "llama_cpp.llama_sampler_chain_add(sampler_chain, llama_cpp.llama_sampler_init_top_p(0.9, 1))\n", + "llama_cpp.llama_sampler_chain_add(sampler_chain, llama_cpp.llama_sampler_init_temp(0.4))\n", + "llama_cpp.llama_sampler_chain_add(sampler_chain, llama_cpp.llama_sampler_init_dist(1234)) # Final \"dist\" sampler" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "7\n", + "[' j', ' jumped']\n", + "8\n", + "[' j over', ' jumped over']\n", + "9\n", + "[' j over the', ' jumped over the']\n", + "10\n", + "[' j over the lazy', ' jumped over the lazy']\n", + "11\n", + "[' j over the lazy dog', ' jumped over the lazy dog']\n", + "12\n", + "[' j over the lazy dog.', ' jumped over the lazy dog\\n']\n", + "13\n", + "[' j over the lazy dog. También', ' jumped over the lazy dog\\nGroupLayout']\n", + "14\n", + "[' j over the lazy dog. También:', ' jumped over the lazy dog\\nGroupLayouting']\n", + "15\n", + "[' j over the lazy dog. También: is', ' jumped over the lazy dog\\nGroupLayouting is']\n", + "16\n", + "[' j over the lazy dog. También: is a', ' jumped over the lazy dog\\nGroupLayouting is a']\n", + "17\n", + "[' j over the lazy dog. También: is a technique', ' jumped over the lazy dog\\nGroupLayouting is a common']\n", + "18\n", + "[' j over the lazy dog. También: is a technique practice', ' jumped over the lazy dog\\nGroupLayouting is a common practice']\n", + "19\n", + "[' j over the lazy dog. También: is a technique practice in', ' jumped over the lazy dog\\nGroupLayouting is a common practice in']\n", + "20\n", + "[' j over the lazy dog. También: is a technique practice in the', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the']\n", + "21\n", + "[' j over the lazy dog. También: is a technique practice in the real', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media']\n", + "22\n", + "[' j over the lazy dog. También: is a technique practice in the real-', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry']\n", + "23\n", + "[' j over the lazy dog. También: is a technique practice in the real-.', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry.']\n", + "24\n", + "[' j over the lazy dog. También: is a technique practice in the real-. We', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry. However']\n", + "25\n", + "[' j over the lazy dog. También: is a technique practice in the real-. We,', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry. However,']\n", + "26\n", + "[' j over the lazy dog. También: is a technique practice in the real-. We, when', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry. However, there']\n", + "27\n", + "[' j over the lazy dog. También: is a technique practice in the real-. We, when is', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry. However, there has']\n", + "28\n", + "[' j over the lazy dog. También: is a technique practice in the real-. We, when is been', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry. However, there has been']\n", + "29\n", + "[' j over the lazy dog. También: is a technique practice in the real-. We, when is been little', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry. However, there has been little']\n", + "30\n", + "[' j over the lazy dog. También: is a technique practice in the real-. We, when is been little research', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry. However, there has been little emp']\n", + "31\n", + "[' j over the lazy dog. También: is a technique practice in the real-. We, when is been little researchirical', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry. However, there has been little empirical']\n", + "32\n", + "[' j over the lazy dog. También: is a technique practice in the real-. We, when is been little researchirical research', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry. However, there has been little empirical research']\n" + ] + } + ], + "source": [ + "streams = [\"\"] * n_parallel\n", + "i_batch = [batch.n_tokens - 1] * n_parallel\n", + "\n", + "n_cur = batch.n_tokens\n", + "n_decode = 0\n", + "\n", + "while n_cur <= n_len:\n", + " batch.n_tokens = 0\n", + " for i in range(n_parallel):\n", + " if i_batch[i] < 0:\n", + " continue\n", + "\n", + " # Sample the next token using the sampler chain\n", + " new_token_id = llama_cpp.llama_sampler_sample(sampler_chain, ctx, -1)\n", + "\n", + " if new_token_id == llama_cpp.llama_token_eos(ctx) or n_cur == n_len:\n", + " i_batch[i] = -1\n", + " continue\n", + "\n", + " buf = (ctypes.c_char * 32)()\n", + " \n", + " # Convert token ID to text\n", + " outlen = llama_cpp.llama_token_to_piece(model, new_token_id, buf, len(buf), 0, False)\n", + " streams[i] += bytes(buf[:outlen]).decode(\"utf-8\")\n", + "\n", + " batch.token[batch.n_tokens] = new_token_id\n", + " batch.pos[batch.n_tokens] = n_cur\n", + " batch.seq_id[batch.n_tokens][0] = i\n", + " batch.n_seq_id[batch.n_tokens] = 1\n", + " batch.logits[batch.n_tokens] = True\n", + "\n", + " i_batch[i] = batch.n_tokens\n", + " batch.n_tokens += 1\n", + " n_decode += 1\n", + "\n", + " if batch.n_tokens == 0:\n", + " break\n", + "\n", + " n_cur += 1\n", + "\n", + " if llama_cpp.llama_decode(ctx, batch) != 0:\n", + " print(\"Error decoding\", flush=True)\n", + " break\n", + " print(n_cur)\n", + " print(streams)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[' j over the lazy dog. También: is a technique practice in the real-. We, when is been little researchirical research', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry. However, there has been little empirical research']\n" + ] + } + ], + "source": [ + "print(streams)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "llama_cpp.llama_batch_free(batch)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "llama_cpp.llama_free(ctx)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "llama_cpp.llama_free_model(model)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "llama_cpp.llama_backend_free()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.1" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/notebooks/Clients.ipynb b/examples/notebooks/Clients.ipynb index caebbb67f..fab82673e 100644 --- a/examples/notebooks/Clients.ipynb +++ b/examples/notebooks/Clients.ipynb @@ -37,11 +37,11 @@ "source": [ "import openai\n", "\n", - "openai.api_key = \"sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # can be anything\n", + "openai.api_key = \"sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # can be anything\n", "openai.api_base = \"http://100.64.159.73:8000/v1\"\n", "\n", "openai.Completion.create(\n", - " model=\"text-davinci-003\", # currently can be anything\n", + " model=\"text-davinci-003\", # currently can be anything\n", " prompt=\"The quick brown fox jumps\",\n", " max_tokens=5,\n", ")" @@ -66,7 +66,9 @@ "source": [ "import os\n", "\n", - "os.environ[\"OPENAI_API_KEY\"] = \"sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # can be anything\n", + "os.environ[\"OPENAI_API_KEY\"] = (\n", + " \"sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # can be anything\n", + ")\n", "os.environ[\"OPENAI_API_BASE\"] = \"http://100.64.159.73:8000/v1\"\n", "\n", "from langchain.llms import OpenAI\n", diff --git a/examples/notebooks/Functions.ipynb b/examples/notebooks/Functions.ipynb new file mode 100644 index 000000000..1f4138165 --- /dev/null +++ b/examples/notebooks/Functions.ipynb @@ -0,0 +1,519 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Functions\n", + "\n", + "The OpenAI compatbile web server in `llama-cpp-python` supports function calling.\n", + "\n", + "Function calling allows API clients to specify a schema that gives the model a format it should respond in.\n", + "Function calling in `llama-cpp-python` works by combining models pretrained for function calling such as [`functionary`](https://huggingface.co/meetkai) with constrained sampling to produce a response that is compatible with the schema.\n", + "\n", + "Note however that this improves but does not guarantee that the response will be compatible with the schema.\n", + "\n", + "## Requirements\n", + "\n", + "Before we begin you will need the following:\n", + "\n", + "- A running `llama-cpp-python` server with a function calling compatible model. [See here](https://llama-cpp-python.readthedocs.io/en/latest/server/#function-calling)\n", + "- The OpenAI Python Client `pip install openai`\n", + "- (Optional) The Instructor Python Library `pip install instructor`\n", + "\n", + "## Function Calling with OpenAI Python Client\n", + "\n", + "We'll start with a basic demo that only uses the OpenAI Python Client." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ChatCompletion(id='chatcmpl-a2d9eb9f-7354-472f-b6ad-4d7a807729a3', choices=[Choice(finish_reason='stop', index=0, message=ChatCompletionMessage(content='The current weather in San Francisco is **72°F** (22°C).\\n ', role='assistant', function_call=None, tool_calls=None))], created=1699638365, model='gpt-3.5-turbo-1106', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=22, prompt_tokens=136, total_tokens=158))\n" + ] + } + ], + "source": [ + "import openai\n", + "import json\n", + "\n", + "\n", + "client = openai.OpenAI(\n", + " api_key=\"sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\", # can be anything\n", + " base_url=\"http://100.64.159.73:8000/v1\", # NOTE: Replace with IP address and port of your llama-cpp-python server\n", + ")\n", + "\n", + "\n", + "# Example dummy function hard coded to return the same weather\n", + "# In production, this could be your backend API or an external API\n", + "def get_current_weather(location, unit=\"fahrenheit\"):\n", + " \"\"\"Get the current weather in a given location\"\"\"\n", + " if \"tokyo\" in location.lower():\n", + " return json.dumps({\"location\": \"Tokyo\", \"temperature\": \"10\", \"unit\": \"celsius\"})\n", + " elif \"san francisco\" in location.lower():\n", + " return json.dumps(\n", + " {\"location\": \"San Francisco\", \"temperature\": \"72\", \"unit\": \"fahrenheit\"}\n", + " )\n", + " elif \"paris\" in location.lower():\n", + " return json.dumps({\"location\": \"Paris\", \"temperature\": \"22\", \"unit\": \"celsius\"})\n", + " else:\n", + " return json.dumps({\"location\": location, \"temperature\": \"unknown\"})\n", + "\n", + "\n", + "def run_conversation():\n", + " # Step 1: send the conversation and available functions to the model\n", + " messages = [\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": \"What's the weather like in San Francisco, Tokyo, and Paris?\",\n", + " }\n", + " ]\n", + " tools = [\n", + " {\n", + " \"type\": \"function\",\n", + " \"function\": {\n", + " \"name\": \"get_current_weather\",\n", + " \"description\": \"Get the current weather in a given location\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"location\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The city and state, e.g. San Francisco, CA\",\n", + " },\n", + " \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n", + " },\n", + " \"required\": [\"location\"],\n", + " },\n", + " },\n", + " }\n", + " ]\n", + " response = client.chat.completions.create(\n", + " model=\"gpt-3.5-turbo-1106\",\n", + " messages=messages,\n", + " tools=tools,\n", + " tool_choice=\"auto\", # auto is default, but we'll be explicit\n", + " )\n", + " response_message = response.choices[0].message\n", + " tool_calls = response_message.tool_calls\n", + " # Step 2: check if the model wanted to call a function\n", + " if tool_calls:\n", + " # Step 3: call the function\n", + " # Note: the JSON response may not always be valid; be sure to handle errors\n", + " available_functions = {\n", + " \"get_current_weather\": get_current_weather,\n", + " } # only one function in this example, but you can have multiple\n", + " messages.append(response_message) # extend conversation with assistant's reply\n", + " # Step 4: send the info for each function call and function response to the model\n", + " for tool_call in tool_calls:\n", + " function_name = tool_call.function.name\n", + " function_to_call = available_functions[function_name]\n", + " function_args = json.loads(tool_call.function.arguments)\n", + " function_response = function_to_call(\n", + " location=function_args.get(\"location\"),\n", + " unit=function_args.get(\"unit\"),\n", + " )\n", + " messages.append(\n", + " {\n", + " \"tool_call_id\": tool_call.id,\n", + " \"role\": \"tool\",\n", + " \"name\": function_name,\n", + " \"content\": function_response,\n", + " }\n", + " ) # extend conversation with function response\n", + " second_response = client.chat.completions.create(\n", + " model=\"gpt-3.5-turbo-1106\",\n", + " messages=messages,\n", + " ) # get a new response from the model where it can see the function response\n", + " return second_response\n", + "\n", + "\n", + "print(run_conversation())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Function Calling with Instructor\n", + "\n", + "The above example is a bit verbose and requires you to manually verify the schema.\n", + "\n", + "For our next examples we'll use the `instructor` library to simplify the process and accomplish a number of different tasks with function calling.\n", + "\n", + "You'll first need to install the [`instructor`](https://github.com/jxnl/instructor/).\n", + "\n", + "You can do so by running the following command in your terminal:\n", + "\n", + "```bash\n", + "pip install instructor\n", + "```\n", + "\n", + "Below we'll go through a few basic examples taken directly from the [instructor cookbook](https://jxnl.github.io/instructor/)\n", + "\n", + "## Basic Usage" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "name='Jason' age=25\n" + ] + } + ], + "source": [ + "import instructor\n", + "from pydantic import BaseModel\n", + "\n", + "# Enables `response_model`\n", + "client = instructor.patch(client=client)\n", + "\n", + "\n", + "class UserDetail(BaseModel):\n", + " name: str\n", + " age: int\n", + "\n", + "\n", + "user = client.chat.completions.create(\n", + " model=\"gpt-3.5-turbo\",\n", + " response_model=UserDetail,\n", + " messages=[\n", + " {\"role\": \"user\", \"content\": \"Extract Jason is 25 years old\"},\n", + " ],\n", + ")\n", + "\n", + "assert isinstance(user, UserDetail)\n", + "assert user.name == \"Jason\"\n", + "assert user.age == 25\n", + "\n", + "print(user)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Text Classification\n", + "\n", + "### Single-Label Classification" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "class_label=\n" + ] + } + ], + "source": [ + "import enum\n", + "\n", + "\n", + "class Labels(str, enum.Enum):\n", + " \"\"\"Enumeration for single-label text classification.\"\"\"\n", + "\n", + " SPAM = \"spam\"\n", + " NOT_SPAM = \"not_spam\"\n", + "\n", + "\n", + "class SinglePrediction(BaseModel):\n", + " \"\"\"\n", + " Class for a single class label prediction.\n", + " \"\"\"\n", + "\n", + " class_label: Labels\n", + "\n", + "\n", + "def classify(data: str) -> SinglePrediction:\n", + " \"\"\"Perform single-label classification on the input text.\"\"\"\n", + " return client.chat.completions.create(\n", + " model=\"gpt-3.5-turbo-0613\",\n", + " response_model=SinglePrediction,\n", + " messages=[\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": f\"Classify the following text: {data}\",\n", + " },\n", + " ],\n", + " ) # type: ignore\n", + "\n", + "\n", + "prediction = classify(\"Hello there I'm a Nigerian prince and I want to give you money\")\n", + "assert prediction.class_label == Labels.SPAM\n", + "print(prediction)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Multi-Label Classification" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "class_labels=[, ]\n" + ] + } + ], + "source": [ + "from typing import List\n", + "\n", + "\n", + "# Define Enum class for multiple labels\n", + "class MultiLabels(str, enum.Enum):\n", + " TECH_ISSUE = \"tech_issue\"\n", + " BILLING = \"billing\"\n", + " GENERAL_QUERY = \"general_query\"\n", + "\n", + "\n", + "# Define the multi-class prediction model\n", + "class MultiClassPrediction(BaseModel):\n", + " \"\"\"\n", + " Class for a multi-class label prediction.\n", + " \"\"\"\n", + "\n", + " class_labels: List[MultiLabels]\n", + "\n", + "\n", + "def multi_classify(data: str) -> MultiClassPrediction:\n", + " \"\"\"Perform multi-label classification on the input text.\"\"\"\n", + " return client.chat.completions.create(\n", + " model=\"gpt-3.5-turbo-0613\",\n", + " response_model=MultiClassPrediction,\n", + " messages=[\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": f\"Classify the following support ticket: {data}\",\n", + " },\n", + " ],\n", + " ) # type: ignore\n", + "\n", + "\n", + "# Test multi-label classification\n", + "ticket = \"My account is locked and I can't access my billing info.\"\n", + "prediction = multi_classify(ticket)\n", + "print(prediction)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Self-Critique" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "question='What is the meaning of life?' answer='According to the Devil, the meaning of life is to live a life of sin and debauchery.'\n", + "1 validation error for QuestionAnswerNoEvil\n", + "answer\n", + " Assertion failed, The statement promotes sin and debauchery, which can be considered objectionable. [type=assertion_error, input_value='According to the Devil, ... of sin and debauchery.', input_type=str]\n", + " For further information visit https://errors.pydantic.dev/2.3/v/assertion_error\n" + ] + } + ], + "source": [ + "from typing_extensions import Annotated\n", + "from pydantic import BaseModel, BeforeValidator\n", + "\n", + "from instructor import llm_validator\n", + "\n", + "\n", + "question = \"What is the meaning of life?\"\n", + "context = \"The according to the devil the meaning of live is to live a life of sin and debauchery.\"\n", + "\n", + "\n", + "class QuestionAnswer(BaseModel):\n", + " question: str\n", + " answer: str\n", + "\n", + "\n", + "qa: QuestionAnswer = client.chat.completions.create(\n", + " model=\"gpt-3.5-turbo\",\n", + " response_model=QuestionAnswer,\n", + " messages=[\n", + " {\n", + " \"role\": \"system\",\n", + " \"content\": \"You are a system that answers questions based on the context. answer exactly what the question asks using the context.\",\n", + " },\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": f\"using the context: {context}\\n\\nAnswer the following question: {question}\",\n", + " },\n", + " ],\n", + ")\n", + "print(qa)\n", + "\n", + "\n", + "class QuestionAnswerNoEvil(BaseModel):\n", + " question: str\n", + " answer: Annotated[\n", + " str,\n", + " BeforeValidator(\n", + " llm_validator(\"don't say objectionable things\", allow_override=True)\n", + " ),\n", + " ]\n", + "\n", + "\n", + "try:\n", + " qa: QuestionAnswerNoEvil = client.chat.completions.create(\n", + " model=\"gpt-3.5-turbo\",\n", + " response_model=QuestionAnswerNoEvil,\n", + " messages=[\n", + " {\n", + " \"role\": \"system\",\n", + " \"content\": \"You are a system that answers questions based on the context. answer exactly what the question asks using the context.\",\n", + " },\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": f\"using the context: {context}\\n\\nAnswer the following question: {question}\",\n", + " },\n", + " ],\n", + " )\n", + "except Exception as e:\n", + " print(e)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Answering Questions with Validated Citations" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "question='What did the author do during college?' answer=[Fact(fact='The author, Jason Liu, studied Computational Mathematics and Physics in university.', substring_quote=['Computational Mathematics'])]\n" + ] + } + ], + "source": [ + "import re\n", + "from typing import List\n", + "\n", + "from pydantic import Field, BaseModel, model_validator, FieldValidationInfo\n", + "\n", + "\n", + "class Fact(BaseModel):\n", + " fact: str = Field(...)\n", + " substring_quote: List[str] = Field(...)\n", + "\n", + " @model_validator(mode=\"after\")\n", + " def validate_sources(self, info: FieldValidationInfo) -> \"Fact\":\n", + " text_chunks = info.context.get(\"text_chunk\", None)\n", + " spans = list(self.get_spans(text_chunks))\n", + " self.substring_quote = [text_chunks[span[0] : span[1]] for span in spans]\n", + " return self\n", + "\n", + " def get_spans(self, context):\n", + " for quote in self.substring_quote:\n", + " yield from self._get_span(quote, context)\n", + "\n", + " def _get_span(self, quote, context):\n", + " for match in re.finditer(re.escape(quote), context):\n", + " yield match.span()\n", + "\n", + "\n", + "class QuestionAnswer(BaseModel):\n", + " question: str = Field(...)\n", + " answer: List[Fact] = Field(...)\n", + "\n", + " @model_validator(mode=\"after\")\n", + " def validate_sources(self) -> \"QuestionAnswer\":\n", + " self.answer = [fact for fact in self.answer if len(fact.substring_quote) > 0]\n", + " return self\n", + "\n", + "\n", + "def ask_ai(question: str, context: str) -> QuestionAnswer:\n", + " return client.chat.completions.create(\n", + " model=\"gpt-3.5-turbo-0613\",\n", + " temperature=0.0,\n", + " response_model=QuestionAnswer,\n", + " messages=[\n", + " {\n", + " \"role\": \"system\",\n", + " \"content\": \"You are a world class algorithm to answer questions with correct and exact citations.\",\n", + " },\n", + " {\"role\": \"user\", \"content\": f\"{context}\"},\n", + " {\"role\": \"user\", \"content\": f\"Question: {question}\"},\n", + " ],\n", + " validation_context={\"text_chunk\": context},\n", + " )\n", + "\n", + "\n", + "question = \"What did the author do during college?\"\n", + "context = \"\"\"\n", + "My name is Jason Liu, and I grew up in Toronto Canada but I was born in China.\n", + "I went to an arts high school but in university I studied Computational Mathematics and physics.\n", + "As part of coop I worked at many companies including Stitchfix, Facebook.\n", + "I also started the Data Science club at the University of Waterloo and I was the president of the club for 2 years.\n", + "\"\"\"\n", + "\n", + "qa = ask_ai(question, context)\n", + "print(qa)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "python-3.8.10", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5+" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/notebooks/Guidance.ipynb b/examples/notebooks/Guidance.ipynb new file mode 100644 index 000000000..c52559853 --- /dev/null +++ b/examples/notebooks/Guidance.ipynb @@ -0,0 +1,93 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
Stop program
Tweak this proverb to apply to model instructions instead.\n",
+       "\n",
+       "Where there is no guidance, a people falls,\n",
+       "but in an abundance of counselors there is safety.\n",
+       "- Proverbs 11:14\n",
+       "\n",
+       "UPDATED\n",
+       "Where there is no guidance for assembling a model, people will struggle,\n",
+       "but with clear instructions, the process becomes safe and successful.\n",
+       "- GPT 2 (updated): Proverbs 11:14
\n", + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import os\n", + "\n", + "os.environ[\"OPENAI_API_KEY\"] = (\n", + " \"sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\" # can be anything\n", + ")\n", + "os.environ[\"OPENAI_API_BASE\"] = \"http://100.64.159.73:8000/v1\"\n", + "os.environ[\"OPENAI_API_HOST\"] = \"http://100.64.159.73:8000\"\n", + "\n", + "import guidance\n", + "\n", + "# set the default language model used to execute guidance programs\n", + "guidance.llm = guidance.llms.OpenAI(\"text-davinci-003\", caching=False)\n", + "\n", + "# define a guidance program that adapts a proverb\n", + "program = guidance(\n", + " \"\"\"Tweak this proverb to apply to model instructions instead.\n", + "\n", + "{{proverb}}\n", + "- {{book}} {{chapter}}:{{verse}}\n", + "\n", + "UPDATED\n", + "Where there is no guidance{{gen 'rewrite' stop=\"\\\\n-\"}}\n", + "- GPT {{gen 'chapter'}}:{{gen 'verse'}}\"\"\"\n", + ")\n", + "\n", + "# execute the program on a specific proverb\n", + "executed_program = program(\n", + " proverb=\"Where there is no guidance, a people falls,\\nbut in an abundance of counselors there is safety.\",\n", + " book=\"Proverbs\",\n", + " chapter=11,\n", + " verse=14,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/notebooks/Multimodal.ipynb b/examples/notebooks/Multimodal.ipynb new file mode 100644 index 000000000..8448ac1f7 --- /dev/null +++ b/examples/notebooks/Multimodal.ipynb @@ -0,0 +1,88 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + " \n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'text': 'Llama C++'}\n" + ] + } + ], + "source": [ + "from openai import OpenAI\n", + "\n", + "client = OpenAI(base_url=\"http://localhost:8000/v1\", api_key=\"llama.cpp\")\n", + "response = client.chat.completions.create(\n", + " model=\"gpt-4-vision-preview\",\n", + " messages=[\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": [\n", + " {\n", + " \"type\": \"image_url\",\n", + " \"image_url\": {\n", + " \"url\": \"https://user-images.githubusercontent.com/1991296/230134379-7181e485-c521-4d23-a0d6-f7b3b61ba524.png\",\n", + " },\n", + " },\n", + " {\n", + " \"type\": \"text\",\n", + " \"text\": \"What does the image say. Format your response as a json object with a single 'text' key.\",\n", + " },\n", + " ],\n", + " }\n", + " ],\n", + " response_format={\n", + " \"type\": \"json_object\",\n", + " \"schema\": {\"type\": \"object\", \"properties\": {\"text\": {\"type\": \"string\"}}},\n", + " },\n", + ")\n", + "import json\n", + "\n", + "print(json.loads(response.choices[0].message.content))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5+" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/notebooks/OpenHermesFunctionCalling.ipynb b/examples/notebooks/OpenHermesFunctionCalling.ipynb new file mode 100644 index 000000000..13128be04 --- /dev/null +++ b/examples/notebooks/OpenHermesFunctionCalling.ipynb @@ -0,0 +1,933 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"name\": \"get_article_details\",\n", + " \"description\": \"Get article details from unstructured article text.\\ndate_published: formatted as \\\"MM/DD/YYYY\\\"\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"title\": {\n", + " \"type\": \"str\"\n", + " },\n", + " \"authors\": {\n", + " \"type\": \"list[str]\"\n", + " },\n", + " \"short_summary\": {\n", + " \"type\": \"str\"\n", + " },\n", + " \"date_published\": {\n", + " \"type\": \"str\"\n", + " },\n", + " \"tags\": {\n", + " \"type\": \"list[str]\"\n", + " }\n", + " }\n", + " },\n", + " \"returns\": \"Article\"\n", + "}\n" + ] + } + ], + "source": [ + "import json\n", + "import inspect\n", + "from typing import get_type_hints\n", + "\n", + "\n", + "class Article:\n", + " pass\n", + "\n", + "\n", + "class Weather:\n", + " pass\n", + "\n", + "\n", + "class Directions:\n", + " pass\n", + "\n", + "\n", + "def calculate_mortgage_payment(\n", + " loan_amount: int, interest_rate: float, loan_term: int\n", + ") -> float:\n", + " \"\"\"Get the monthly mortgage payment given an interest rate percentage.\"\"\"\n", + "\n", + " # TODO: you must implement this to actually call it later\n", + " pass\n", + "\n", + "\n", + "def get_article_details(\n", + " title: str,\n", + " authors: list[str],\n", + " short_summary: str,\n", + " date_published: str,\n", + " tags: list[str],\n", + ") -> Article:\n", + " '''Get article details from unstructured article text.\n", + " date_published: formatted as \"MM/DD/YYYY\"'''\n", + "\n", + " # TODO: you must implement this to actually call it later\n", + " pass\n", + "\n", + "\n", + "def get_weather(zip_code: str) -> Weather:\n", + " \"\"\"Get the current weather given a zip code.\"\"\"\n", + "\n", + " # TODO: you must implement this to actually call it later\n", + " pass\n", + "\n", + "\n", + "def get_directions(start: str, destination: str) -> Directions:\n", + " \"\"\"Get directions from Google Directions API.\n", + " start: start address as a string including zipcode (if any)\n", + " destination: end address as a string including zipcode (if any)\"\"\"\n", + "\n", + " # TODO: you must implement this to actually call it later\n", + " pass\n", + "\n", + "\n", + "def get_type_name(t):\n", + " name = str(t)\n", + " if \"list\" in name or \"dict\" in name:\n", + " return name\n", + " else:\n", + " return t.__name__\n", + "\n", + "\n", + "def serialize_function_to_json(func):\n", + " signature = inspect.signature(func)\n", + " type_hints = get_type_hints(func)\n", + "\n", + " function_info = {\n", + " \"name\": func.__name__,\n", + " \"description\": func.__doc__,\n", + " \"parameters\": {\"type\": \"object\", \"properties\": {}},\n", + " \"returns\": type_hints.get(\"return\", \"void\").__name__,\n", + " }\n", + "\n", + " for name, _ in signature.parameters.items():\n", + " param_type = get_type_name(type_hints.get(name, type(None)))\n", + " function_info[\"parameters\"][\"properties\"][name] = {\"type\": param_type}\n", + "\n", + " return json.dumps(function_info, indent=2)\n", + "\n", + "\n", + "print(serialize_function_to_json(get_article_details))" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import xml.etree.ElementTree as ET\n", + "import re\n", + "\n", + "\n", + "def extract_function_calls(completion):\n", + " completion = completion.strip()\n", + " pattern = r\"((.*?))\"\n", + " match = re.search(pattern, completion, re.DOTALL)\n", + " if not match:\n", + " return None\n", + "\n", + " multiplefn = match.group(1)\n", + " root = ET.fromstring(multiplefn)\n", + " functions = root.findall(\"functioncall\")\n", + " return [json.loads(fn.text) for fn in functions]" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_hermes_prompt(prompt, functions):\n", + " functions = \"\\n\\n\".join([serialize_function_to_json(fn) for fn in functions])\n", + " prompt = f\"\"\"<|im_start|>system\n", + "You are a helpful assistant with access to the following functions:\n", + "\n", + "{functions}\n", + "\n", + "To use these functions respond with:\n", + "\n", + " {{\"name\": \"function_name\", \"arguments\": {{\"arg_1\": \"value_1\", \"arg_2\": value_2, ...}}}} \n", + " {{\"name\": \"function_name\", \"arguments\": {{\"arg_1\": \"value_1\", \"arg_2\": value_2, ...}}}} \n", + " ...\n", + "\n", + "\n", + "Edge cases you must handle:\n", + "- If there are no functions that match the user request, you will respond politely that you cannot help.<|im_end|>\n", + "<|im_start|>user\n", + "{prompt}<|im_end|>\n", + "<|im_start|>assistant\"\"\"\n", + " return prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "<|im_start|>system\n", + "You are a helpful assistant with access to the following functions:\n", + "\n", + "{\n", + " \"name\": \"get_weather\",\n", + " \"description\": \"Get the current weather given a zip code.\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"zip_code\": {\n", + " \"type\": \"str\"\n", + " }\n", + " }\n", + " },\n", + " \"returns\": \"Weather\"\n", + "}\n", + "\n", + "{\n", + " \"name\": \"calculate_mortgage_payment\",\n", + " \"description\": \"Get the monthly mortgage payment given an interest rate percentage.\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"loan_amount\": {\n", + " \"type\": \"int\"\n", + " },\n", + " \"interest_rate\": {\n", + " \"type\": \"float\"\n", + " },\n", + " \"loan_term\": {\n", + " \"type\": \"int\"\n", + " }\n", + " }\n", + " },\n", + " \"returns\": \"float\"\n", + "}\n", + "\n", + "{\n", + " \"name\": \"get_article_details\",\n", + " \"description\": \"Get article details from unstructured article text.\\ndate_published: formatted as \\\"MM/DD/YYYY\\\"\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"title\": {\n", + " \"type\": \"str\"\n", + " },\n", + " \"authors\": {\n", + " \"type\": \"list[str]\"\n", + " },\n", + " \"short_summary\": {\n", + " \"type\": \"str\"\n", + " },\n", + " \"date_published\": {\n", + " \"type\": \"str\"\n", + " },\n", + " \"tags\": {\n", + " \"type\": \"list[str]\"\n", + " }\n", + " }\n", + " },\n", + " \"returns\": \"Article\"\n", + "}\n", + "\n", + "To use these functions respond with:\n", + "\n", + " {\"name\": \"function_name\", \"arguments\": {\"arg_1\": \"value_1\", \"arg_2\": value_2, ...}} \n", + " {\"name\": \"function_name\", \"arguments\": {\"arg_1\": \"value_1\", \"arg_2\": value_2, ...}} \n", + " ...\n", + "\n", + "\n", + "Edge cases you must handle:\n", + "- If there are no functions that match the user request, you will respond politely that you cannot help.<|im_end|>\n", + "<|im_start|>user\n", + "What's the weather in 10001?<|im_end|>\n", + "<|im_start|>assistant\n", + "<|im_start|>system\n", + "You are a helpful assistant with access to the following functions:\n", + "\n", + "{\n", + " \"name\": \"get_weather\",\n", + " \"description\": \"Get the current weather given a zip code.\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"zip_code\": {\n", + " \"type\": \"str\"\n", + " }\n", + " }\n", + " },\n", + " \"returns\": \"Weather\"\n", + "}\n", + "\n", + "{\n", + " \"name\": \"calculate_mortgage_payment\",\n", + " \"description\": \"Get the monthly mortgage payment given an interest rate percentage.\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"loan_amount\": {\n", + " \"type\": \"int\"\n", + " },\n", + " \"interest_rate\": {\n", + " \"type\": \"float\"\n", + " },\n", + " \"loan_term\": {\n", + " \"type\": \"int\"\n", + " }\n", + " }\n", + " },\n", + " \"returns\": \"float\"\n", + "}\n", + "\n", + "{\n", + " \"name\": \"get_article_details\",\n", + " \"description\": \"Get article details from unstructured article text.\\ndate_published: formatted as \\\"MM/DD/YYYY\\\"\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"title\": {\n", + " \"type\": \"str\"\n", + " },\n", + " \"authors\": {\n", + " \"type\": \"list[str]\"\n", + " },\n", + " \"short_summary\": {\n", + " \"type\": \"str\"\n", + " },\n", + " \"date_published\": {\n", + " \"type\": \"str\"\n", + " },\n", + " \"tags\": {\n", + " \"type\": \"list[str]\"\n", + " }\n", + " }\n", + " },\n", + " \"returns\": \"Article\"\n", + "}\n", + "\n", + "To use these functions respond with:\n", + "\n", + " {\"name\": \"function_name\", \"arguments\": {\"arg_1\": \"value_1\", \"arg_2\": value_2, ...}} \n", + " {\"name\": \"function_name\", \"arguments\": {\"arg_1\": \"value_1\", \"arg_2\": value_2, ...}} \n", + " ...\n", + "\n", + "\n", + "Edge cases you must handle:\n", + "- If there are no functions that match the user request, you will respond politely that you cannot help.<|im_end|>\n", + "<|im_start|>user\n", + "Determine the monthly mortgage payment for a loan amount of $200,000, an interest rate of 4%, and a loan term of 30 years.<|im_end|>\n", + "<|im_start|>assistant\n", + "<|im_start|>system\n", + "You are a helpful assistant with access to the following functions:\n", + "\n", + "{\n", + " \"name\": \"get_weather\",\n", + " \"description\": \"Get the current weather given a zip code.\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"zip_code\": {\n", + " \"type\": \"str\"\n", + " }\n", + " }\n", + " },\n", + " \"returns\": \"Weather\"\n", + "}\n", + "\n", + "{\n", + " \"name\": \"calculate_mortgage_payment\",\n", + " \"description\": \"Get the monthly mortgage payment given an interest rate percentage.\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"loan_amount\": {\n", + " \"type\": \"int\"\n", + " },\n", + " \"interest_rate\": {\n", + " \"type\": \"float\"\n", + " },\n", + " \"loan_term\": {\n", + " \"type\": \"int\"\n", + " }\n", + " }\n", + " },\n", + " \"returns\": \"float\"\n", + "}\n", + "\n", + "{\n", + " \"name\": \"get_article_details\",\n", + " \"description\": \"Get article details from unstructured article text.\\ndate_published: formatted as \\\"MM/DD/YYYY\\\"\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"title\": {\n", + " \"type\": \"str\"\n", + " },\n", + " \"authors\": {\n", + " \"type\": \"list[str]\"\n", + " },\n", + " \"short_summary\": {\n", + " \"type\": \"str\"\n", + " },\n", + " \"date_published\": {\n", + " \"type\": \"str\"\n", + " },\n", + " \"tags\": {\n", + " \"type\": \"list[str]\"\n", + " }\n", + " }\n", + " },\n", + " \"returns\": \"Article\"\n", + "}\n", + "\n", + "To use these functions respond with:\n", + "\n", + " {\"name\": \"function_name\", \"arguments\": {\"arg_1\": \"value_1\", \"arg_2\": value_2, ...}} \n", + " {\"name\": \"function_name\", \"arguments\": {\"arg_1\": \"value_1\", \"arg_2\": value_2, ...}} \n", + " ...\n", + "\n", + "\n", + "Edge cases you must handle:\n", + "- If there are no functions that match the user request, you will respond politely that you cannot help.<|im_end|>\n", + "<|im_start|>user\n", + "What's the current exchange rate for USD to EUR?<|im_end|>\n", + "<|im_start|>assistant\n" + ] + } + ], + "source": [ + "prompts = [\n", + " \"What's the weather in 10001?\",\n", + " \"Determine the monthly mortgage payment for a loan amount of $200,000, an interest rate of 4%, and a loan term of 30 years.\",\n", + " \"What's the current exchange rate for USD to EUR?\",\n", + "]\n", + "functions = [get_weather, calculate_mortgage_payment, get_article_details]\n", + "\n", + "for prompt in prompts:\n", + " print(generate_hermes_prompt(prompt, functions))" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "ggml_init_cublas: GGML_CUDA_FORCE_MMQ: no\n", + "ggml_init_cublas: CUDA_USE_TENSOR_CORES: yes\n", + "ggml_init_cublas: found 1 CUDA devices:\n", + " Device 0: NVIDIA GeForce RTX 2060, compute capability 7.5\n", + "llama_model_loader: loaded meta data with 20 key-value pairs and 291 tensors from ../../models/OpenHermes-2.5-Mistral-7B-GGUF/openhermes-2.5-mistral-7b.Q4_K_M.gguf (version GGUF V3 (latest))\n", + "llama_model_loader: - tensor 0: token_embd.weight q4_K [ 4096, 32002, 1, 1 ]\n", + "llama_model_loader: - tensor 1: blk.0.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 2: blk.0.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 3: blk.0.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 4: blk.0.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 5: blk.0.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 6: blk.0.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 7: blk.0.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 8: blk.0.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 9: blk.0.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 10: blk.1.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 11: blk.1.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 12: blk.1.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 13: blk.1.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 14: blk.1.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 15: blk.1.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 16: blk.1.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 17: blk.1.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 18: blk.1.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 19: blk.2.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 20: blk.2.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 21: blk.2.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 22: blk.2.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 23: blk.2.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 24: blk.2.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 25: blk.2.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 26: blk.2.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 27: blk.2.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 28: blk.3.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 29: blk.3.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 30: blk.3.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 31: blk.3.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 32: blk.3.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 33: blk.3.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 34: blk.3.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 35: blk.3.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 36: blk.3.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 37: blk.4.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 38: blk.4.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 39: blk.4.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 40: blk.4.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 41: blk.4.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 42: blk.4.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 43: blk.4.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 44: blk.4.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 45: blk.4.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 46: blk.5.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 47: blk.5.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 48: blk.5.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 49: blk.5.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 50: blk.5.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 51: blk.5.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 52: blk.5.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 53: blk.5.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 54: blk.5.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 55: blk.6.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 56: blk.6.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 57: blk.6.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 58: blk.6.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 59: blk.6.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 60: blk.6.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 61: blk.6.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 62: blk.6.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 63: blk.6.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 64: blk.7.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 65: blk.7.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 66: blk.7.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 67: blk.7.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 68: blk.7.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 69: blk.7.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 70: blk.7.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 71: blk.7.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 72: blk.7.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 73: blk.8.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 74: blk.8.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 75: blk.8.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 76: blk.8.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 77: blk.8.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 78: blk.8.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 79: blk.8.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 80: blk.8.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 81: blk.8.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 82: blk.9.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 83: blk.9.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 84: blk.9.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 85: blk.9.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 86: blk.9.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 87: blk.9.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 88: blk.9.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 89: blk.9.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 90: blk.9.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 91: blk.10.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 92: blk.10.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 93: blk.10.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 94: blk.10.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 95: blk.10.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 96: blk.10.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 97: blk.10.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 98: blk.10.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 99: blk.10.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 100: blk.11.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 101: blk.11.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 102: blk.11.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 103: blk.11.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 104: blk.11.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 105: blk.11.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 106: blk.11.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 107: blk.11.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 108: blk.11.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 109: blk.12.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 110: blk.12.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 111: blk.12.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 112: blk.12.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 113: blk.12.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 114: blk.12.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 115: blk.12.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 116: blk.12.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 117: blk.12.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 118: blk.13.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 119: blk.13.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 120: blk.13.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 121: blk.13.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 122: blk.13.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 123: blk.13.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 124: blk.13.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 125: blk.13.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 126: blk.13.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 127: blk.14.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 128: blk.14.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 129: blk.14.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 130: blk.14.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 131: blk.14.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 132: blk.14.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 133: blk.14.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 134: blk.14.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 135: blk.14.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 136: blk.15.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 137: blk.15.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 138: blk.15.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 139: blk.15.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 140: blk.15.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 141: blk.15.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 142: blk.15.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 143: blk.15.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 144: blk.15.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 145: blk.16.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 146: blk.16.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 147: blk.16.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 148: blk.16.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 149: blk.16.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 150: blk.16.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 151: blk.16.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 152: blk.16.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 153: blk.16.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 154: blk.17.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 155: blk.17.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 156: blk.17.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 157: blk.17.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 158: blk.17.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 159: blk.17.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 160: blk.17.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 161: blk.17.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 162: blk.17.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 163: blk.18.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 164: blk.18.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 165: blk.18.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 166: blk.18.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 167: blk.18.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 168: blk.18.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 169: blk.18.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 170: blk.18.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 171: blk.18.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 172: blk.19.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 173: blk.19.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 174: blk.19.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 175: blk.19.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 176: blk.19.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 177: blk.19.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 178: blk.19.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 179: blk.19.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 180: blk.19.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 181: blk.20.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 182: blk.20.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 183: blk.20.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 184: blk.20.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 185: blk.20.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 186: blk.20.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 187: blk.20.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 188: blk.20.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 189: blk.20.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 190: blk.21.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 191: blk.21.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 192: blk.21.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 193: blk.21.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 194: blk.21.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 195: blk.21.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 196: blk.21.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 197: blk.21.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 198: blk.21.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 199: blk.22.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 200: blk.22.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 201: blk.22.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 202: blk.22.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 203: blk.22.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 204: blk.22.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 205: blk.22.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 206: blk.22.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 207: blk.22.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 208: blk.23.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 209: blk.23.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 210: blk.23.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 211: blk.23.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 212: blk.23.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 213: blk.23.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 214: blk.23.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 215: blk.23.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 216: blk.23.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 217: blk.24.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 218: blk.24.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 219: blk.24.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 220: blk.24.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 221: blk.24.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 222: blk.24.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 223: blk.24.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 224: blk.24.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 225: blk.24.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 226: blk.25.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 227: blk.25.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 228: blk.25.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 229: blk.25.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 230: blk.25.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 231: blk.25.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 232: blk.25.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 233: blk.25.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 234: blk.25.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 235: blk.26.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 236: blk.26.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 237: blk.26.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 238: blk.26.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 239: blk.26.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 240: blk.26.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 241: blk.26.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 242: blk.26.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 243: blk.26.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 244: blk.27.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 245: blk.27.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 246: blk.27.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 247: blk.27.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 248: blk.27.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 249: blk.27.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 250: blk.27.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 251: blk.27.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 252: blk.27.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 253: blk.28.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 254: blk.28.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 255: blk.28.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 256: blk.28.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 257: blk.28.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 258: blk.28.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 259: blk.28.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 260: blk.28.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 261: blk.28.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 262: blk.29.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 263: blk.29.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 264: blk.29.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 265: blk.29.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 266: blk.29.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 267: blk.29.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 268: blk.29.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 269: blk.29.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 270: blk.29.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 271: blk.30.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 272: blk.30.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 273: blk.30.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 274: blk.30.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 275: blk.30.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 276: blk.30.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 277: blk.30.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 278: blk.30.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 279: blk.30.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 280: blk.31.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 281: blk.31.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 282: blk.31.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", + "llama_model_loader: - tensor 283: blk.31.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 284: blk.31.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 285: blk.31.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", + "llama_model_loader: - tensor 286: blk.31.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", + "llama_model_loader: - tensor 287: blk.31.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 288: blk.31.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 289: output_norm.weight f32 [ 4096, 1, 1, 1 ]\n", + "llama_model_loader: - tensor 290: output.weight q6_K [ 4096, 32002, 1, 1 ]\n", + "llama_model_loader: - kv 0: general.architecture str = llama\n", + "llama_model_loader: - kv 1: general.name str = teknium_openhermes-2.5-mistral-7b\n", + "llama_model_loader: - kv 2: llama.context_length u32 = 32768\n", + "llama_model_loader: - kv 3: llama.embedding_length u32 = 4096\n", + "llama_model_loader: - kv 4: llama.block_count u32 = 32\n", + "llama_model_loader: - kv 5: llama.feed_forward_length u32 = 14336\n", + "llama_model_loader: - kv 6: llama.rope.dimension_count u32 = 128\n", + "llama_model_loader: - kv 7: llama.attention.head_count u32 = 32\n", + "llama_model_loader: - kv 8: llama.attention.head_count_kv u32 = 8\n", + "llama_model_loader: - kv 9: llama.attention.layer_norm_rms_epsilon f32 = 0.000010\n", + "llama_model_loader: - kv 10: llama.rope.freq_base f32 = 10000.000000\n", + "llama_model_loader: - kv 11: general.file_type u32 = 15\n", + "llama_model_loader: - kv 12: tokenizer.ggml.model str = llama\n", + "llama_model_loader: - kv 13: tokenizer.ggml.tokens arr[str,32002] = [\"\", \"\", \"\", \"<0x00>\", \"<...\n", + "llama_model_loader: - kv 14: tokenizer.ggml.scores arr[f32,32002] = [0.000000, 0.000000, 0.000000, 0.0000...\n", + "llama_model_loader: - kv 15: tokenizer.ggml.token_type arr[i32,32002] = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...\n", + "llama_model_loader: - kv 16: tokenizer.ggml.bos_token_id u32 = 1\n", + "llama_model_loader: - kv 17: tokenizer.ggml.eos_token_id u32 = 32000\n", + "llama_model_loader: - kv 18: tokenizer.ggml.padding_token_id u32 = 0\n", + "llama_model_loader: - kv 19: general.quantization_version u32 = 2\n", + "llama_model_loader: - type f32: 65 tensors\n", + "llama_model_loader: - type q4_K: 193 tensors\n", + "llama_model_loader: - type q6_K: 33 tensors\n", + "llm_load_vocab: special tokens definition check successful ( 261/32002 ).\n", + "llm_load_print_meta: format = GGUF V3 (latest)\n", + "llm_load_print_meta: arch = llama\n", + "llm_load_print_meta: vocab type = SPM\n", + "llm_load_print_meta: n_vocab = 32002\n", + "llm_load_print_meta: n_merges = 0\n", + "llm_load_print_meta: n_ctx_train = 32768\n", + "llm_load_print_meta: n_embd = 4096\n", + "llm_load_print_meta: n_head = 32\n", + "llm_load_print_meta: n_head_kv = 8\n", + "llm_load_print_meta: n_layer = 32\n", + "llm_load_print_meta: n_rot = 128\n", + "llm_load_print_meta: n_gqa = 4\n", + "llm_load_print_meta: f_norm_eps = 0.0e+00\n", + "llm_load_print_meta: f_norm_rms_eps = 1.0e-05\n", + "llm_load_print_meta: f_clamp_kqv = 0.0e+00\n", + "llm_load_print_meta: f_max_alibi_bias = 0.0e+00\n", + "llm_load_print_meta: n_ff = 14336\n", + "llm_load_print_meta: rope scaling = linear\n", + "llm_load_print_meta: freq_base_train = 10000.0\n", + "llm_load_print_meta: freq_scale_train = 1\n", + "llm_load_print_meta: n_yarn_orig_ctx = 32768\n", + "llm_load_print_meta: rope_finetuned = unknown\n", + "llm_load_print_meta: model type = 7B\n", + "llm_load_print_meta: model ftype = mostly Q4_K - Medium\n", + "llm_load_print_meta: model params = 7.24 B\n", + "llm_load_print_meta: model size = 4.07 GiB (4.83 BPW) \n", + "llm_load_print_meta: general.name = teknium_openhermes-2.5-mistral-7b\n", + "llm_load_print_meta: BOS token = 1 ''\n", + "llm_load_print_meta: EOS token = 32000 '<|im_end|>'\n", + "llm_load_print_meta: UNK token = 0 ''\n", + "llm_load_print_meta: PAD token = 0 ''\n", + "llm_load_print_meta: LF token = 13 '<0x0A>'\n", + "llm_load_tensors: ggml ctx size = 0.11 MiB\n", + "llm_load_tensors: using CUDA for GPU acceleration\n", + "llm_load_tensors: mem required = 70.42 MiB\n", + "llm_load_tensors: offloading 32 repeating layers to GPU\n", + "llm_load_tensors: offloading non-repeating layers to GPU\n", + "llm_load_tensors: offloaded 35/35 layers to GPU\n", + "llm_load_tensors: VRAM used: 4095.06 MiB\n", + "...............................................................................................\n", + "llama_new_context_with_model: n_ctx = 2048\n", + "llama_new_context_with_model: freq_base = 10000.0\n", + "llama_new_context_with_model: freq_scale = 1\n", + "llama_kv_cache_init: offloading v cache to GPU\n", + "llama_kv_cache_init: offloading k cache to GPU\n", + "llama_kv_cache_init: VRAM kv self = 256.00 MiB\n", + "llama_new_context_with_model: kv self size = 256.00 MiB\n", + "llama_build_graph: non-view tensors processed: 740/740\n", + "llama_new_context_with_model: compute buffer total size = 159.07 MiB\n", + "llama_new_context_with_model: VRAM scratch buffer: 156.00 MiB\n", + "llama_new_context_with_model: total VRAM used: 4507.07 MiB (model: 4095.06 MiB, context: 412.00 MiB)\n" + ] + } + ], + "source": [ + "import llama_cpp\n", + "\n", + "llama = llama_cpp.Llama(\n", + " model_path=\"../../models/OpenHermes-2.5-Mistral-7B-GGUF/openhermes-2.5-mistral-7b.Q4_K_M.gguf\",\n", + " n_gpu_layers=-1,\n", + " n_ctx=2048,\n", + " verbose=False,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[{'name': 'get_weather', 'arguments': {'zip_code': '10001'}}]\n", + "====================================================================================================\n", + "[{'name': 'calculate_mortgage_payment', 'arguments': {'loan_amount': 200000, 'interest_rate': 0.04, 'loan_term': 30}}]\n", + "====================================================================================================\n", + "Unfortunately, I do not have a built-in function to check currency exchange rates. However, you can use third-party APIs or websites like Google Finance or XE to get this information.\n", + "====================================================================================================\n" + ] + } + ], + "source": [ + "prompts = [\n", + " \"What's the weather in 10001?\",\n", + " \"Determine the monthly mortgage payment for a loan amount of $200,000, an interest rate of 4%, and a loan term of 30 years.\",\n", + " \"What's the current exchange rate for USD to EUR?\",\n", + "]\n", + "functions = [get_weather, calculate_mortgage_payment, get_article_details]\n", + "\n", + "for prompt in prompts:\n", + " prompt = generate_hermes_prompt(prompt, functions)\n", + " completion = llama.create_completion(prompt, max_tokens=-1)[\"choices\"][0][\"text\"]\n", + " function_calls = extract_function_calls(completion)\n", + " if function_calls:\n", + " print(function_calls)\n", + " else:\n", + " print(completion.strip())\n", + " print(\"=\" * 100)" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "get_weather\n", + "{'zip_code': '05751'}\n", + "====================================================================================================\n", + "get_weather\n", + "{'zip_code': '05751'}\n", + "get_weather\n", + "{'zip_code': '07030'}\n", + "calculate_mortgage_payment\n", + "{'loan_amount': 250000, 'interest_rate': 4.18, 'loan_term': 30}\n", + "====================================================================================================\n", + "I don't have a function to get exchange rates, but I can provide some resources where you can find this information. You can check websites like Google Finance, XE.com, or Yahoo Finance for up-to-date currency exchange rates.\n", + "====================================================================================================\n" + ] + } + ], + "source": [ + "prompts = [\n", + " \"What's the weather in 05751?\",\n", + " \"I'm planning a trip to Killington, Vermont (05751) from Hoboken, NJ (07030). Can you get me weather for both locations and directions?\",\n", + " \"What's the current exchange rate for USD to EUR?\",\n", + "]\n", + "\n", + "for prompt in prompts:\n", + " completion = llama.create_completion(\n", + " generate_hermes_prompt(prompt, functions), max_tokens=-1\n", + " )[\"choices\"][0][\"text\"]\n", + " function_calls = extract_function_calls(completion)\n", + "\n", + " if function_calls:\n", + " for function in function_calls:\n", + " print(function[\"name\"])\n", + " print(function[\"arguments\"])\n", + " else:\n", + " print(completion.strip())\n", + "\n", + " print(\"=\" * 100)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5+" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/notebooks/PerformanceTuning.ipynb b/examples/notebooks/PerformanceTuning.ipynb index 76e26fbd1..ba74e4a41 100644 --- a/examples/notebooks/PerformanceTuning.ipynb +++ b/examples/notebooks/PerformanceTuning.ipynb @@ -13,6 +13,7 @@ "import llama_cpp\n", "\n", "import numpy as np\n", + "\n", "np.int = int\n", "\n", "from skopt.space import Integer, Categorical\n", @@ -25,7 +26,7 @@ " Categorical([True, False], name=\"f16_kv\"),\n", " Categorical([True, False], name=\"use_mlock\"),\n", " Integer(1, multiprocessing.cpu_count(), name=\"n_threads\"),\n", - " Integer(1, 2048, name=\"n_batch\")\n", + " Integer(1, 2048, name=\"n_batch\"),\n", "]\n", "\n", "# TODO: Make this a random prompt to avoid any cache related inconsistencies\n", @@ -41,18 +42,25 @@ "\n", "from skopt.utils import use_named_args\n", "\n", + "\n", "@use_named_args(space)\n", "def objective(**params):\n", " f16_kv = params[\"f16_kv\"]\n", " use_mlock = params[\"use_mlock\"]\n", " n_threads = params[\"n_threads\"]\n", " n_batch = params[\"n_batch\"]\n", - " llm = llama_cpp.Llama(model_path=MODEL_PATH, f16_kv=f16_kv, use_mlock=use_mlock, n_threads=n_threads, n_batch=n_batch)\n", + " llm = llama_cpp.Llama(\n", + " model_path=MODEL_PATH,\n", + " f16_kv=f16_kv,\n", + " use_mlock=use_mlock,\n", + " n_threads=n_threads,\n", + " n_batch=n_batch,\n", + " )\n", "\n", " t1 = time.time()\n", " output = llm(\n", " PROMPT,\n", - " max_tokens=1, # Only optimize prompt processing\n", + " max_tokens=1, # Only optimize prompt processing\n", " stop=[\"###\", \"\\n\"],\n", " echo=True,\n", " )\n", @@ -5240,10 +5248,7 @@ "source": [ "from skopt import gp_minimize\n", "\n", - "res = gp_minimize(\n", - " objective,\n", - " space\n", - ")" + "res = gp_minimize(objective, space)" ] }, { diff --git a/examples/ray/README.md b/examples/ray/README.md new file mode 100644 index 000000000..6e338ba17 --- /dev/null +++ b/examples/ray/README.md @@ -0,0 +1,19 @@ +This is an example of doing LLM inference with [Ray](https://docs.ray.io/en/latest/index.html) and [Ray Serve](https://docs.ray.io/en/latest/serve/index.html). + +First, install the requirements: + +```bash +$ pip install -r requirements.txt +``` + +Deploy a GGUF model to Ray Serve with the following command: + +```bash +$ serve run llm:llm_builder model_path='../models/mistral-7b-instruct-v0.2.Q4_K_M.gguf' +``` + +This will start an API endpoint at `http://localhost:8000/`. You can query the model like this: + +```bash +$ curl -k -d '{"prompt": "tell me a joke", "max_tokens": 128}' -X POST http://localhost:8000 +``` diff --git a/examples/ray/llm.py b/examples/ray/llm.py new file mode 100755 index 000000000..2325dd303 --- /dev/null +++ b/examples/ray/llm.py @@ -0,0 +1,21 @@ +from starlette.requests import Request +from typing import Dict +from ray import serve +from ray.serve import Application +from llama_cpp import Llama + + +@serve.deployment +class LlamaDeployment: + def __init__(self, model_path: str): + self._llm = Llama(model_path=model_path) + + async def __call__(self, http_request: Request) -> Dict: + input_json = await http_request.json() + prompt = input_json["prompt"] + max_tokens = input_json.get("max_tokens", 64) + return self._llm(prompt, max_tokens=max_tokens) + + +def llm_builder(args: Dict[str, str]) -> Application: + return LlamaDeployment.bind(args["model_path"]) diff --git a/examples/ray/requirements.txt b/examples/ray/requirements.txt new file mode 100644 index 000000000..a409fb882 --- /dev/null +++ b/examples/ray/requirements.txt @@ -0,0 +1,3 @@ +ray[serve] +--extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu +llama-cpp-python diff --git a/llama_cpp/__init__.py b/llama_cpp/__init__.py index dce1764f6..2c9c527cd 100644 --- a/llama_cpp/__init__.py +++ b/llama_cpp/__init__.py @@ -1,2 +1,4 @@ from .llama_cpp import * from .llama import * + +__version__ = "0.3.9" diff --git a/llama_cpp/_ctypes_extensions.py b/llama_cpp/_ctypes_extensions.py new file mode 100644 index 000000000..e88ed387d --- /dev/null +++ b/llama_cpp/_ctypes_extensions.py @@ -0,0 +1,131 @@ +from __future__ import annotations + +import sys +import os +import ctypes +import functools +import pathlib + +from typing import ( + Any, + Callable, + List, + Union, + Optional, + TYPE_CHECKING, + TypeVar, + Generic, +) +from typing_extensions import TypeAlias + + +# Load the library +def load_shared_library(lib_base_name: str, base_path: pathlib.Path): + """Platform independent shared library loader""" + # Searching for the library in the current directory under the name "libllama" (default name + # for llamacpp) and "llama" (default name for this repo) + lib_paths: List[pathlib.Path] = [] + # Determine the file extension based on the platform + if sys.platform.startswith("linux") or sys.platform.startswith("freebsd"): + lib_paths += [ + base_path / f"lib{lib_base_name}.so", + ] + elif sys.platform == "darwin": + lib_paths += [ + base_path / f"lib{lib_base_name}.so", + base_path / f"lib{lib_base_name}.dylib", + ] + elif sys.platform == "win32": + lib_paths += [ + base_path / f"{lib_base_name}.dll", + base_path / f"lib{lib_base_name}.dll", + ] + else: + raise RuntimeError("Unsupported platform") + + cdll_args = dict() # type: ignore + + # Add the library directory to the DLL search path on Windows (if needed) + if sys.platform == "win32": + os.add_dll_directory(str(base_path)) + os.environ["PATH"] = str(base_path) + os.pathsep + os.environ["PATH"] + + if sys.platform == "win32" and sys.version_info >= (3, 8): + os.add_dll_directory(str(base_path)) + if "CUDA_PATH" in os.environ: + os.add_dll_directory(os.path.join(os.environ["CUDA_PATH"], "bin")) + os.add_dll_directory(os.path.join(os.environ["CUDA_PATH"], "lib")) + if "HIP_PATH" in os.environ: + os.add_dll_directory(os.path.join(os.environ["HIP_PATH"], "bin")) + os.add_dll_directory(os.path.join(os.environ["HIP_PATH"], "lib")) + cdll_args["winmode"] = ctypes.RTLD_GLOBAL + + # Try to load the shared library, handling potential errors + for lib_path in lib_paths: + if lib_path.exists(): + try: + return ctypes.CDLL(str(lib_path), **cdll_args) # type: ignore + except Exception as e: + raise RuntimeError(f"Failed to load shared library '{lib_path}': {e}") + + raise FileNotFoundError( + f"Shared library with base name '{lib_base_name}' not found" + ) + + +# ctypes sane type hint helpers +# +# - Generic Pointer and Array types +# - PointerOrRef type with a type hinted byref function +# +# NOTE: Only use these for static type checking not for runtime checks +# no good will come of that + +if TYPE_CHECKING: + CtypesCData = TypeVar("CtypesCData", bound=ctypes._CData) # type: ignore + + CtypesArray: TypeAlias = ctypes.Array[CtypesCData] # type: ignore + + CtypesPointer: TypeAlias = ctypes._Pointer[CtypesCData] # type: ignore + + CtypesVoidPointer: TypeAlias = ctypes.c_void_p + + class CtypesRef(Generic[CtypesCData]): + pass + + CtypesPointerOrRef: TypeAlias = Union[ + CtypesPointer[CtypesCData], CtypesRef[CtypesCData] + ] + + CtypesFuncPointer: TypeAlias = ctypes._FuncPointer # type: ignore + +F = TypeVar("F", bound=Callable[..., Any]) + + +def ctypes_function_for_shared_library(lib: ctypes.CDLL): + """Decorator for defining ctypes functions with type hints""" + + def ctypes_function( + name: str, argtypes: List[Any], restype: Any, enabled: bool = True + ): + def decorator(f: F) -> F: + if enabled: + func = getattr(lib, name) + func.argtypes = argtypes + func.restype = restype + functools.wraps(f)(func) + return func + else: + return f + + return decorator + + return ctypes_function + + +def _byref(obj: CtypesCData, offset: Optional[int] = None) -> CtypesRef[CtypesCData]: + """Type-annotated version of ctypes.byref""" + ... + + +byref = _byref if TYPE_CHECKING else ctypes.byref diff --git a/llama_cpp/_ggml.py b/llama_cpp/_ggml.py new file mode 100644 index 000000000..5bee8a93b --- /dev/null +++ b/llama_cpp/_ggml.py @@ -0,0 +1,12 @@ +"""Internal module use at your own risk + +This module provides a minimal interface for working with ggml tensors from llama-cpp-python +""" +import os +import pathlib + +import llama_cpp._ctypes_extensions as ctypes_ext + +libggml_base_path = pathlib.Path(os.path.abspath(os.path.dirname(__file__))) / "lib" +libggml = ctypes_ext.load_shared_library("ggml", libggml_base_path) + diff --git a/llama_cpp/_internals.py b/llama_cpp/_internals.py new file mode 100644 index 000000000..343581dce --- /dev/null +++ b/llama_cpp/_internals.py @@ -0,0 +1,879 @@ +from __future__ import annotations + +import os +import ctypes + +from typing import ( + Dict, + List, + Tuple, + Optional, + Sequence, +) +from dataclasses import dataclass, field +from contextlib import ExitStack + +import numpy as np +import numpy.typing as npt + +from .llama_types import * +from .llama_grammar import LlamaGrammar +from ._utils import suppress_stdout_stderr + +import llama_cpp.llama_cpp as llama_cpp + + +# Python wrappers over llama.h structs + + +class LlamaModel: + """Intermediate Python wrapper for a llama.cpp llama_model. + NOTE: For stability it's recommended you use the Llama class instead.""" + + def __init__( + self, + *, + path_model: str, + params: llama_cpp.llama_model_params, + verbose: bool = True, + ): + self.path_model = path_model + self.params = params + self.verbose = verbose + self._exit_stack = ExitStack() + + model = None + + if not os.path.exists(path_model): + raise ValueError(f"Model path does not exist: {path_model}") + + with suppress_stdout_stderr(disable=verbose): + model = llama_cpp.llama_load_model_from_file( + self.path_model.encode("utf-8"), self.params + ) + + if model is None: + raise ValueError(f"Failed to load model from file: {path_model}") + + vocab = llama_cpp.llama_model_get_vocab(model) + + if vocab is None: + raise ValueError(f"Failed to get vocab from model: {path_model}") + + self.model = model + self.vocab = vocab + + def free_model(): + if self.model is None: + return + llama_cpp.llama_free_model(self.model) + self.model = None + + self._exit_stack.callback(free_model) + + def close(self): + self._exit_stack.close() + + def __del__(self): + self.close() + + def vocab_type(self) -> int: + return llama_cpp.llama_vocab_type(self.model) + + def n_vocab(self) -> int: + return llama_cpp.llama_n_vocab(self.vocab) + + def n_ctx_train(self) -> int: + return llama_cpp.llama_n_ctx_train(self.model) + + def n_embd(self) -> int: + return llama_cpp.llama_n_embd(self.model) + + def rope_freq_scale_train(self) -> float: + return llama_cpp.llama_model_rope_freq_scale_train(self.model) + + def desc(self) -> str: + buf = ctypes.create_string_buffer(1024) + llama_cpp.llama_model_desc(self.model, buf, 1024) + return buf.value.decode("utf-8") + + def size(self) -> int: + return llama_cpp.llama_model_size(self.model) + + def n_params(self) -> int: + return llama_cpp.llama_model_n_params(self.model) + + def get_tensor(self, name: str) -> ctypes.c_void_p: + raise NotImplementedError("get_tensor is not implemented in llama.cpp") + + # Vocab + + def token_get_text(self, token: int) -> str: + return llama_cpp.llama_token_get_text(self.vocab, token).decode("utf-8") + + def token_get_score(self, token: int) -> float: + return llama_cpp.llama_token_get_score(self.vocab, token) + + def token_get_attr(self, token: int) -> int: + return llama_cpp.llama_token_get_attr(self.vocab, token) + + # Special tokens + + def token_bos(self) -> int: + return llama_cpp.llama_token_bos(self.vocab) + + def token_eos(self) -> int: + return llama_cpp.llama_token_eos(self.vocab) + + def token_cls(self) -> int: + return llama_cpp.llama_token_cls(self.vocab) + + def token_sep(self) -> int: + return llama_cpp.llama_token_sep(self.vocab) + + def token_nl(self) -> int: + return llama_cpp.llama_token_nl(self.vocab) + + def token_prefix(self) -> int: + raise NotImplementedError("token_prefix is not implemented in llama.cpp") + + def token_middle(self) -> int: + raise NotImplementedError("token_middle is not implemented in llama.cpp") + + def token_suffix(self) -> int: + raise NotImplementedError("token_suffix is not implemented in llama.cpp") + + def token_eot(self) -> int: + return llama_cpp.llama_token_eot(self.vocab) + + def add_bos_token(self) -> bool: + return llama_cpp.llama_add_bos_token(self.vocab) + + def add_eos_token(self) -> bool: + return llama_cpp.llama_add_eos_token(self.vocab) + + # Tokenization + + def tokenize(self, text: bytes, add_bos: bool, special: bool): + n_ctx = self.n_ctx_train() + tokens = (llama_cpp.llama_token * n_ctx)() + n_tokens = llama_cpp.llama_tokenize( + self.vocab, text, len(text), tokens, n_ctx, add_bos, special + ) + if n_tokens < 0: + n_tokens = abs(n_tokens) + tokens = (llama_cpp.llama_token * n_tokens)() + n_tokens = llama_cpp.llama_tokenize( + self.vocab, text, len(text), tokens, n_tokens, add_bos, special + ) + if n_tokens < 0: + raise RuntimeError( + f'Failed to tokenize: text="{text}" n_tokens={n_tokens}' + ) + return list(tokens[:n_tokens]) + + def token_to_piece(self, token: int, special: bool = False) -> bytes: + buf = ctypes.create_string_buffer(32) + llama_cpp.llama_token_to_piece(self.vocab, token, buf, 32, 0, special) + return bytes(buf) + + def detokenize(self, tokens: List[int], special: bool = False) -> bytes: + output = b"" + size = 32 + buffer = (ctypes.c_char * size)() + for token in tokens: + n = llama_cpp.llama_token_to_piece( + self.vocab, llama_cpp.llama_token(token), buffer, size, 0, special + ) + assert n <= size + output += bytes(buffer[:n]) + # NOTE: Llama1 models automatically added a space at the start of the prompt + # this line removes a leading space if the first token is a beginning of sentence token + return ( + output[1:] + if len(tokens) > 0 and tokens[0] == self.token_bos() and output[0:1] == b" " + else output + ) + + # Extra + def metadata(self) -> Dict[str, str]: + metadata: Dict[str, str] = {} + buffer_size = 1024 + buffer = ctypes.create_string_buffer(buffer_size) + # zero the buffer + buffer.value = b"\0" * buffer_size + # iterate over model keys + for i in range(llama_cpp.llama_model_meta_count(self.model)): + nbytes = llama_cpp.llama_model_meta_key_by_index( + self.model, i, buffer, buffer_size + ) + if nbytes > buffer_size: + buffer_size = nbytes + 1 + buffer = ctypes.create_string_buffer(buffer_size) + nbytes = llama_cpp.llama_model_meta_key_by_index( + self.model, i, buffer, buffer_size + ) + key = buffer.value.decode("utf-8") + nbytes = llama_cpp.llama_model_meta_val_str_by_index( + self.model, i, buffer, buffer_size + ) + if nbytes > buffer_size: + buffer_size = nbytes + 1 + buffer = ctypes.create_string_buffer(buffer_size) + nbytes = llama_cpp.llama_model_meta_val_str_by_index( + self.model, i, buffer, buffer_size + ) + value = buffer.value.decode("utf-8") + metadata[key] = value + return metadata + + @staticmethod + def default_params(): + """Get the default llama_model_params.""" + return llama_cpp.llama_model_default_params() + + +class LlamaContext: + """Intermediate Python wrapper for a llama.cpp llama_context. + NOTE: For stability it's recommended you use the Llama class instead.""" + + def __init__( + self, + *, + model: LlamaModel, + params: llama_cpp.llama_context_params, + verbose: bool = True, + ): + self.model = model + self.params = params + self.verbose = verbose + self._exit_stack = ExitStack() + + ctx = llama_cpp.llama_new_context_with_model(self.model.model, self.params) + + if ctx is None: + raise ValueError("Failed to create llama_context") + + self.ctx = ctx + + def free_ctx(): + if self.ctx is None: + return + llama_cpp.llama_free(self.ctx) + self.ctx = None + + self._exit_stack.callback(free_ctx) + + def close(self): + self._exit_stack.close() + + def __del__(self): + self.close() + + def n_ctx(self) -> int: + return llama_cpp.llama_n_ctx(self.ctx) + + def pooling_type(self) -> int: + return llama_cpp.llama_pooling_type(self.ctx) + + def kv_cache_clear(self): + llama_cpp.llama_kv_cache_clear(self.ctx) + + def kv_cache_seq_rm(self, seq_id: int, p0: int, p1: int): + llama_cpp.llama_kv_cache_seq_rm(self.ctx, seq_id, p0, p1) + + def kv_cache_seq_cp(self, seq_id_src: int, seq_id_dst: int, p0: int, p1: int): + llama_cpp.llama_kv_cache_seq_cp(self.ctx, seq_id_src, seq_id_dst, p0, p1) + + def kv_cache_seq_keep(self, seq_id: int): + llama_cpp.llama_kv_cache_seq_keep(self.ctx, seq_id) + + def kv_cache_seq_shift(self, seq_id: int, p0: int, p1: int, shift: int): + llama_cpp.llama_kv_cache_seq_add(self.ctx, seq_id, p0, p1, shift) + + def get_state_size(self) -> int: + return llama_cpp.llama_get_state_size(self.ctx) + + # TODO: copy_state_data + + # TODO: set_state_data + + # TODO: llama_load_session_file + + # TODO: llama_save_session_file + + def decode(self, batch: LlamaBatch): + return_code = llama_cpp.llama_decode( + self.ctx, + batch.batch, + ) + if return_code != 0: + raise RuntimeError(f"llama_decode returned {return_code}") + + def set_n_threads(self, n_threads: int, n_threads_batch: int): + llama_cpp.llama_set_n_threads(self.ctx, n_threads, n_threads_batch) + + def get_logits(self): + return llama_cpp.llama_get_logits(self.ctx) + + def get_logits_ith(self, i: int): + return llama_cpp.llama_get_logits_ith(self.ctx, i) + + def get_embeddings(self): + return llama_cpp.llama_get_embeddings(self.ctx) + + # Sampling functions + + def set_rng_seed(self, seed: int): + # TODO: Fix + # llama_cpp.llama_set_rng_seed(self.ctx, seed) + raise NotImplementedError("set_rng_seed is not implemented in llama.cpp") + + def sample_repetition_penalties( + self, + candidates: "_LlamaTokenDataArray", + last_tokens_data: "llama_cpp.Array[llama_cpp.llama_token]", + penalty_last_n: int, + penalty_repeat: float, + penalty_freq: float, + penalty_present: float, + ): + # llama_cpp.llama_sample_repetition_penalties( + # self.ctx, + # llama_cpp.byref(candidates.candidates), + # last_tokens_data, + # penalty_last_n, + # penalty_repeat, + # penalty_freq, + # penalty_present, + # ) + raise NotImplementedError("sample_repetition_penalties is not implemented in llama.cpp") + + def sample_softmax(self, candidates: "_LlamaTokenDataArray"): + # llama_cpp.llama_sample_softmax( + # self.ctx, + # llama_cpp.byref(candidates.candidates), + # ) + raise NotImplementedError("sample_softmax is not implemented in llama.cpp") + + def sample_top_k(self, candidates: "_LlamaTokenDataArray", k: int, min_keep: int): + # llama_cpp.llama_sample_top_k( + # self.ctx, llama_cpp.byref(candidates.candidates), k, min_keep + # ) + raise NotImplementedError("sample_top_k is not implemented in llama.cpp") + + def sample_top_p(self, candidates: "_LlamaTokenDataArray", p: float, min_keep: int): + # llama_cpp.llama_sample_top_p( + # self.ctx, llama_cpp.byref(candidates.candidates), p, min_keep + # ) + raise NotImplementedError("sample_top_p is not implemented in llama.cpp") + + def sample_min_p(self, candidates: "_LlamaTokenDataArray", p: float, min_keep: int): + # llama_cpp.llama_sample_min_p( + # self.ctx, llama_cpp.byref(candidates.candidates), p, min_keep + # ) + raise NotImplementedError("sample_min_p is not implemented in llama.cpp") + + def sample_typical( + self, candidates: "_LlamaTokenDataArray", p: float, min_keep: int + ): + # llama_cpp.llama_sample_typical( + # self.ctx, llama_cpp.byref(candidates.candidates), p, min_keep + # ) + raise NotImplementedError("sample_typical is not implemented in llama.cpp") + + def sample_temp(self, candidates: "_LlamaTokenDataArray", temp: float): + # llama_cpp.llama_sample_temp( + # self.ctx, llama_cpp.byref(candidates.candidates), temp + # ) + raise NotImplementedError("sample_temp is not implemented in llama.cpp") + + def sample_grammar(self, candidates: "_LlamaTokenDataArray", grammar: LlamaGrammar): + # llama_cpp.llama_sample_grammar( + # self.ctx, + # llama_cpp.byref(candidates.candidates), + # grammar.grammar, + # ) + raise NotImplementedError("sample_grammar is not implemented in llama.cpp") + + def sample_token_mirostat( + self, + candidates: "_LlamaTokenDataArray", + tau: float, + eta: float, + m: int, + mu: llama_cpp.CtypesPointerOrRef[ctypes.c_float], + ) -> int: + raise NotImplementedError("sample_token_mirostat is not implemented in llama.cpp") + # return llama_cpp.llama_sample_token_mirostat( + # self.ctx, + # llama_cpp.byref(candidates.candidates), + # tau, + # eta, + # m, + # mu, + # ) + + def sample_token_mirostat_v2( + self, + candidates: "_LlamaTokenDataArray", + tau: float, + eta: float, + mu: llama_cpp.CtypesPointerOrRef[ctypes.c_float], + ) -> int: + raise NotImplementedError("sample_token_mirostat_v2 is not implemented in llama.cpp") + # return llama_cpp.llama_sample_token_mirostat_v2( + # self.ctx, + # llama_cpp.byref(candidates.candidates), + # tau, + # eta, + # mu, + # ) + + def sample_token_greedy(self, candidates: "_LlamaTokenDataArray") -> int: + raise NotImplementedError("sample_token_greedy is not implemented in llama.cpp") + # return llama_cpp.llama_sample_token_greedy( + # self.ctx, + # llama_cpp.byref(candidates.candidates), + # ) + + def sample_token(self, candidates: "_LlamaTokenDataArray") -> int: + raise NotImplementedError("sample_token is not implemented in llama.cpp") + # return llama_cpp.llama_sample_token( + # self.ctx, + # llama_cpp.byref(candidates.candidates), + # ) + + # Grammar + def grammar_accept_token(self, grammar: LlamaGrammar, token: int): + raise NotImplementedError("grammar_accept_token is not implemented in llama.cpp") + # llama_cpp.llama_grammar_accept_token(grammar.grammar, self.ctx, token) + + def reset_timings(self): + llama_cpp.llama_perf_context_reset(self.ctx) + + def print_timings(self): + llama_cpp.llama_perf_context_print(self.ctx) + + # Utility functions + @staticmethod + def default_params(): + """Get the default llama_context_params.""" + return llama_cpp.llama_context_default_params() + + +class LlamaBatch: + def __init__( + self, *, n_tokens: int, embd: int, n_seq_max: int, verbose: bool = True + ): + self._n_tokens = n_tokens + self.embd = embd + self.n_seq_max = n_seq_max + self.verbose = verbose + self._exit_stack = ExitStack() + + batch = llama_cpp.llama_batch_init(self._n_tokens, self.embd, self.n_seq_max) + + if batch is None: + raise ValueError("Failed to create llama_batch") + + self.batch = batch + + def free_batch(): + if self.batch is None: + return + llama_cpp.llama_batch_free(self.batch) + self.batch = None + + self._exit_stack.callback(free_batch) + + def close(self): + self._exit_stack.close() + + def __del__(self): + self.close() + + def n_tokens(self) -> int: + return self.batch.n_tokens + + def reset(self): + self.batch.n_tokens = 0 + + def set_batch(self, batch: Sequence[int], n_past: int, logits_all: bool): + n_tokens = len(batch) + self.batch.n_tokens = n_tokens + for i in range(n_tokens): + self.batch.token[i] = batch[i] + self.batch.pos[i] = n_past + i + self.batch.seq_id[i][0] = 0 + self.batch.n_seq_id[i] = 1 + self.batch.logits[i] = logits_all + self.batch.logits[n_tokens - 1] = True + + def add_sequence(self, batch: Sequence[int], seq_id: int, logits_all: bool): + n_tokens = len(batch) + n_tokens0 = self.batch.n_tokens + self.batch.n_tokens += n_tokens + for i in range(n_tokens): + j = n_tokens0 + i + self.batch.token[j] = batch[i] + self.batch.pos[j] = i + self.batch.seq_id[j][0] = seq_id + self.batch.n_seq_id[j] = 1 + self.batch.logits[j] = logits_all + self.batch.logits[n_tokens - 1] = True + + +class LlamaTokenDataArray: + def __init__(self, *, n_vocab: int): + self.n_vocab = n_vocab + self.candidates_data = np.recarray( + (self.n_vocab,), + dtype=np.dtype( + [("id", np.intc), ("logit", np.single), ("p", np.single)], align=True + ), + ) + self.candidates = llama_cpp.llama_token_data_array( + data=self.candidates_data.ctypes.data_as(llama_cpp.llama_token_data_p), + size=self.n_vocab, + sorted=False, + ) + self.default_candidates_data_id = np.arange(self.n_vocab, dtype=np.intc) # type: ignore + self.default_candidates_data_p = np.zeros(self.n_vocab, dtype=np.single) + + def copy_logits(self, logits: npt.NDArray[np.single]): + self.candidates_data.id[:] = self.default_candidates_data_id + self.candidates_data.logit[:] = logits + self.candidates_data.p[:] = self.default_candidates_data_p + self.candidates.sorted = False + self.candidates.size = self.n_vocab + + +# Embedding functions + + +def normalize_embedding(embedding): + norm = float(np.linalg.norm(embedding)) + if norm == 0.0: + return embedding + return [v / norm for v in embedding] + + +# Python wrappers over common/sampling structs + + +@dataclass +class LlamaSamplingParams: + n_prev: int = 64 + n_probs: int = 0 + top_k: int = 40 + top_p: float = 0.95 + min_p: float = 0.05 + tfs_z: float = 1.00 + typical_p: float = 1.00 + temp: float = 0.80 + penalty_last_n: int = 64 + penalty_repeat: float = 1.0 + penalty_freq: float = 0.00 + penalty_present: float = 0.00 + mirostat: int = 0 + mirostat_tau: float = 5.00 + mirostat_eta: float = 0.10 + penalize_nl: bool = True + + grammar: str = "" + + cfg_negative_prompt: str = "" + cfg_scale: float = 1.00 + + logit_bias: dict[int, float] = field(default_factory=dict) + + +@dataclass +class LlamaSamplingContext: + params: LlamaSamplingParams = field(default_factory=LlamaSamplingParams) + mirostat_mu: ctypes.c_float = field(default_factory=ctypes.c_float) + grammar: Optional[LlamaGrammar] = None + # NOTE: Missing parsed_grammar + prev: list[int] = field(default_factory=list) + cur: list[llama_cpp.llama_token_data] = field(default_factory=list) + + def reset(self): + self.prev = [] + self.cur = [] + if self.grammar is not None: + self.grammar.reset() + + def cp(self): + return LlamaSamplingContext( + params=self.params, + mirostat_mu=self.mirostat_mu, + grammar=self.grammar, + prev=self.prev.copy(), + cur=self.cur.copy(), + ) + + def last(self) -> Optional[int]: + if len(self.prev) > 0: + return self.prev[-1] + else: + return None + + def prev_str(self, ctx_main: LlamaContext, n: int) -> str: + return ctx_main.model.detokenize(self.prev[-n:]).decode("utf-8") + + def sample( + self, + ctx_main: LlamaContext, + idx: int = 0, + logits_array: Optional[npt.NDArray[np.single]] = None, + ): + n_vocab = ctx_main.model.n_vocab() + id: int = 0 + + if logits_array is None: + logits = ctx_main.get_logits_ith(idx) + logits_array = np.array( + ctypes.cast(logits, ctypes.POINTER(ctypes.c_float * n_vocab)).contents, + dtype=np.single, + ) + + # apply logit_bias + for token, logit_bias in self.params.logit_bias.items(): + logits_array[token] += logit_bias + + token_data_array = LlamaTokenDataArray( + n_vocab=n_vocab + ) # TODO: Only create this once + token_data_array.copy_logits(logits_array) + + # apply penalties + if len(self.prev) > 0: + nl_token = ctx_main.model.token_nl() + nl_logit = logits_array[nl_token] + last_tokens = self.prev[-self.params.penalty_last_n :] + last_tokens_size = min(len(last_tokens), self.params.penalty_last_n) + if last_tokens_size > 0: + last_tokens_p = (llama_cpp.llama_token * len(last_tokens))(*last_tokens) + ctx_main.sample_repetition_penalties( + token_data_array, + last_tokens_p, + last_tokens_size, + self.params.penalty_repeat, + self.params.penalty_freq, + self.params.penalty_present, + ) + if not self.params.penalize_nl: + token_data_array.candidates_data.logit[nl_token] = nl_logit + + if self.grammar is not None: + ctx_main.sample_grammar(token_data_array, self.grammar) + + if self.params.temp < 0: + ctx_main.sample_softmax(token_data_array) + id = token_data_array.candidates_data.id[0] + elif self.params.temp == 0: + id = ctx_main.sample_token_greedy(token_data_array) + else: + if self.params.mirostat == 1: + mirostat_m = 100 + ctx_main.sample_temp(token_data_array, self.params.temp) + id = ctx_main.sample_token_mirostat( + token_data_array, + self.params.mirostat_tau, + self.params.mirostat_eta, + mirostat_m, + ctypes.pointer(self.mirostat_mu), + ) + elif self.params.mirostat == 2: + ctx_main.sample_temp(token_data_array, self.params.temp) + id = ctx_main.sample_token_mirostat_v2( + token_data_array, + self.params.mirostat_tau, + self.params.mirostat_eta, + ctypes.pointer(self.mirostat_mu), + ) + else: + min_keep = max(1, self.params.n_probs) + ctx_main.sample_top_k( + token_data_array, self.params.top_k, min_keep=min_keep + ) + ctx_main.sample_typical( + token_data_array, self.params.typical_p, min_keep=min_keep + ) + ctx_main.sample_top_p( + token_data_array, self.params.top_p, min_keep=min_keep + ) + ctx_main.sample_min_p( + token_data_array, self.params.min_p, min_keep=min_keep + ) + ctx_main.sample_temp(token_data_array, self.params.temp) + id = ctx_main.sample_token(token_data_array) + return id + + def accept(self, ctx_main: LlamaContext, id: int, apply_grammar: bool): + if apply_grammar and self.grammar is not None: + ctx_main.grammar_accept_token(self.grammar, id) + self.prev.append(id) + + +from typing import List, Callable, Optional, Union +import ctypes +import llama_cpp + + +class CustomSampler: + def __init__( + self, apply_func: typing.Callable[[llama_cpp.llama_token_data_array], None] + ): + self.apply_func = apply_func + + def apply_wrapper( + sampler: llama_cpp.llama_sampler_p, + cur_p: llama_cpp.llama_token_data_array_p, + ): + self.apply_func(cur_p) + + def free_wrapper(sampler: llama_cpp.llama_sampler_p): + pass + + sampler_i = llama_cpp.llama_sampler_i() + sampler_i.apply = llama_cpp.llama_sampler_i_apply(apply_wrapper) + self._apply_wrapper_ref = apply_wrapper + + sampler_i.name = llama_cpp.llama_sampler_i_name(0) + sampler_i.accept = llama_cpp.llama_sampler_i_accept(0) + sampler_i.reset = llama_cpp.llama_sampler_i_reset(0) + sampler_i.clone = llama_cpp.llama_sampler_i_clone(0) + sampler_i.free = llama_cpp.llama_sampler_i_free(0) + + self.sampler = llama_cpp.llama_sampler() + self.sampler.iface = ctypes.pointer(sampler_i) + self.sampler.ctx = None + + def get_sampler(self) -> llama_cpp.llama_sampler_p: + return ctypes.pointer(self.sampler) + + +class LlamaSampler: + def __init__(self): + params = llama_cpp.llama_sampler_chain_params() + self.sampler = llama_cpp.llama_sampler_chain_init(params) + self.samplers: List[llama_cpp.llama_sampler_p] = [] + self.custom_samplers: List[Tuple[int, CustomSampler]] = [] + + def add_greedy(self): + sampler = llama_cpp.llama_sampler_init_greedy() + self._add_sampler(sampler) + + def add_dist(self, seed: int): + sampler = llama_cpp.llama_sampler_init_dist(seed) + self._add_sampler(sampler) + + def add_softmax(self): + sampler = llama_cpp.llama_sampler_init_softmax() + self._add_sampler(sampler) + + def add_top_k(self, k: int): + sampler = llama_cpp.llama_sampler_init_top_k(k) + self._add_sampler(sampler) + + def add_top_p(self, p: float, min_keep: int): + sampler = llama_cpp.llama_sampler_init_top_p(p, min_keep) + self._add_sampler(sampler) + + def add_min_p(self, p: float, min_keep: int): + sampler = llama_cpp.llama_sampler_init_min_p(p, min_keep) + self._add_sampler(sampler) + + def add_typical(self, p: float, min_keep: int): + sampler = llama_cpp.llama_sampler_init_typical(p, min_keep) + self._add_sampler(sampler) + + def add_temp(self, temp: float): + sampler = llama_cpp.llama_sampler_init_temp(temp) + self._add_sampler(sampler) + + def add_temp_ext(self, t: float, delta: float, exponent: float): + sampler = llama_cpp.llama_sampler_init_temp_ext(t, delta, exponent) + self._add_sampler(sampler) + + def add_mirostat(self, n_vocab: int, seed: int, tau: float, eta: float, m: int): + sampler = llama_cpp.llama_sampler_init_mirostat(n_vocab, seed, tau, eta, m) + self._add_sampler(sampler) + + def add_mirostat_v2(self, seed: int, tau: float, eta: float): + sampler = llama_cpp.llama_sampler_init_mirostat_v2(seed, tau, eta) + self._add_sampler(sampler) + + def add_grammar(self, model: LlamaModel, grammar: LlamaGrammar): + sampler = llama_cpp.llama_sampler_init_grammar( + model.vocab, grammar._grammar.encode("utf-8"), grammar._root.encode("utf-8") + ) + self._add_sampler(sampler) + + def add_penalties( + self, + n_vocab: int, + special_eos_id: int, + linefeed_id: int, + penalty_last_n: int, + penalty_repeat: float, + penalty_freq: float, + penalty_present: float, + penalize_nl: bool, + ignore_eos: bool, + ): + sampler = llama_cpp.llama_sampler_init_penalties( + penalty_last_n, + penalty_repeat, + penalty_freq, + penalty_present, + ) + self._add_sampler(sampler) + + def init_logit_bias( + self, n_vocab: int, n_logit_bias, logit_bias: llama_cpp.llama_logit_bias_p + ): + sampler = llama_cpp.llama_sampler_init_logit_bias( + n_vocab, n_logit_bias, logit_bias + ) + self._add_sampler(sampler) + + def add_custom( + self, apply_func: Callable[[llama_cpp.llama_token_data_array], None] + ): + custom_sampler = CustomSampler(apply_func) + sampler = custom_sampler.get_sampler() + self._add_sampler(sampler) + # NOTE: Must remove custom samplers before free or llama.cpp will try to free them + self.custom_samplers.append( + (llama_cpp.llama_sampler_chain_n(self.sampler) - 1, custom_sampler) + ) + + def _add_sampler(self, sampler: llama_cpp.llama_sampler_p): + assert self.sampler is not None + llama_cpp.llama_sampler_chain_add(self.sampler, sampler) + self.samplers.append(sampler) + + def get_seed(self) -> int: + assert self.sampler is not None + return llama_cpp.llama_sampler_get_seed(self.sampler) + + def sample(self, ctx: LlamaContext, idx: int) -> int: + assert self.sampler is not None + assert ctx.ctx is not None + return llama_cpp.llama_sampler_sample(self.sampler, ctx.ctx, idx) + + def close(self): + if self.sampler: + # NOTE: Must remove custom samplers before free or llama.cpp will try to free them + for i, _ in reversed(self.custom_samplers): + llama_cpp.llama_sampler_chain_remove(self.sampler, i) + llama_cpp.llama_sampler_free(self.sampler) + self.sampler = None + self.samplers.clear() + self.custom_samplers.clear() + + def __del__(self): + self.close() diff --git a/llama_cpp/_logger.py b/llama_cpp/_logger.py new file mode 100644 index 000000000..787b3f108 --- /dev/null +++ b/llama_cpp/_logger.py @@ -0,0 +1,47 @@ +import sys +import ctypes +import logging + +import llama_cpp + +# enum ggml_log_level { +# GGML_LOG_LEVEL_NONE = 0, +# GGML_LOG_LEVEL_INFO = 1, +# GGML_LOG_LEVEL_WARN = 2, +# GGML_LOG_LEVEL_ERROR = 3, +# GGML_LOG_LEVEL_DEBUG = 4, +# GGML_LOG_LEVEL_CONT = 5, // continue previous log +# }; +GGML_LOG_LEVEL_TO_LOGGING_LEVEL = { + 0: logging.CRITICAL, + 1: logging.INFO, + 2: logging.WARNING, + 3: logging.ERROR, + 4: logging.DEBUG, + 5: logging.DEBUG, +} + +logger = logging.getLogger("llama-cpp-python") + +_last_log_level = GGML_LOG_LEVEL_TO_LOGGING_LEVEL[0] + +# typedef void (*ggml_log_callback)(enum ggml_log_level level, const char * text, void * user_data); +@llama_cpp.llama_log_callback +def llama_log_callback( + level: int, + text: bytes, + user_data: ctypes.c_void_p, +): + # TODO: Correctly implement continue previous log + global _last_log_level + log_level = GGML_LOG_LEVEL_TO_LOGGING_LEVEL[level] if level != 5 else _last_log_level + if logger.level <= GGML_LOG_LEVEL_TO_LOGGING_LEVEL[level]: + print(text.decode("utf-8"), end="", flush=True, file=sys.stderr) + _last_log_level = log_level + + +llama_cpp.llama_log_set(llama_log_callback, ctypes.c_void_p(0)) + + +def set_verbose(verbose: bool): + logger.setLevel(logging.DEBUG if verbose else logging.ERROR) diff --git a/llama_cpp/_utils.py b/llama_cpp/_utils.py new file mode 100644 index 000000000..29628193b --- /dev/null +++ b/llama_cpp/_utils.py @@ -0,0 +1,78 @@ +import os +import sys + +from typing import Any, Dict + +# Avoid "LookupError: unknown encoding: ascii" when open() called in a destructor +outnull_file = open(os.devnull, "w") +errnull_file = open(os.devnull, "w") + +STDOUT_FILENO = 1 +STDERR_FILENO = 2 + + +class suppress_stdout_stderr(object): + # NOTE: these must be "saved" here to avoid exceptions when using + # this context manager inside of a __del__ method + sys = sys + os = os + + def __init__(self, disable: bool = True): + self.disable = disable + + # Oddly enough this works better than the contextlib version + def __enter__(self): + if self.disable: + return self + + self.old_stdout_fileno_undup = STDOUT_FILENO + self.old_stderr_fileno_undup = STDERR_FILENO + + self.old_stdout_fileno = self.os.dup(self.old_stdout_fileno_undup) + self.old_stderr_fileno = self.os.dup(self.old_stderr_fileno_undup) + + self.old_stdout = self.sys.stdout + self.old_stderr = self.sys.stderr + + self.os.dup2(outnull_file.fileno(), self.old_stdout_fileno_undup) + self.os.dup2(errnull_file.fileno(), self.old_stderr_fileno_undup) + + self.sys.stdout = outnull_file + self.sys.stderr = errnull_file + return self + + def __exit__(self, *_): + if self.disable: + return + + # Check if sys.stdout and sys.stderr have fileno method + self.sys.stdout = self.old_stdout + self.sys.stderr = self.old_stderr + + self.os.dup2(self.old_stdout_fileno, self.old_stdout_fileno_undup) + self.os.dup2(self.old_stderr_fileno, self.old_stderr_fileno_undup) + + self.os.close(self.old_stdout_fileno) + self.os.close(self.old_stderr_fileno) + + +class MetaSingleton(type): + """ + Metaclass for implementing the Singleton pattern. + """ + + _instances: Dict[type, Any] = {} + + def __call__(cls, *args: Any, **kwargs: Any) -> Any: + if cls not in cls._instances: + cls._instances[cls] = super(MetaSingleton, cls).__call__(*args, **kwargs) + return cls._instances[cls] + + +class Singleton(object, metaclass=MetaSingleton): + """ + Base class for implementing the Singleton pattern. + """ + + def __init__(self): + super(Singleton, self).__init__() diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index bd8f49f5e..7e9a6af23 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -1,51 +1,192 @@ +from __future__ import annotations + import os import sys import uuid import time +import json +import ctypes +import typing +import random +import fnmatch +import warnings +import contextlib import multiprocessing -from typing import List, Optional, Union, Generator, Sequence, Iterator + +from typing import ( + Any, + List, + Literal, + Optional, + Union, + Generator, + Sequence, + Iterator, + Deque, + Callable, + Dict, +) from collections import deque +from pathlib import Path + -from . import llama_cpp from .llama_types import * +from .llama_grammar import LlamaGrammar +from .llama_cache import ( + BaseLlamaCache, + LlamaCache, # type: ignore + LlamaDiskCache, # type: ignore + LlamaRAMCache, # type: ignore +) +from .llama_tokenizer import BaseLlamaTokenizer, LlamaTokenizer +import llama_cpp.llama_cpp as llama_cpp +import llama_cpp.llama_chat_format as llama_chat_format + +from llama_cpp.llama_speculative import LlamaDraftModel + +import numpy as np +import numpy.typing as npt + +import llama_cpp._internals as internals +from ._logger import set_verbose +from ._utils import suppress_stdout_stderr class Llama: """High-level Python wrapper for a llama.cpp model.""" + __backend_initialized = False + def __init__( self, model_path: str, - # NOTE: These parameters are likely to change in the future. - n_ctx: int = 512, - n_parts: int = -1, - seed: int = 1337, - f16_kv: bool = False, - logits_all: bool = False, + *, + # Model Params + n_gpu_layers: int = 0, + split_mode: int = llama_cpp.LLAMA_SPLIT_MODE_LAYER, + main_gpu: int = 0, + tensor_split: Optional[List[float]] = None, + rpc_servers: Optional[str] = None, vocab_only: bool = False, + use_mmap: bool = True, use_mlock: bool = False, - embedding: bool = False, + kv_overrides: Optional[Dict[str, Union[bool, int, float, str]]] = None, + # Context Params + seed: int = llama_cpp.LLAMA_DEFAULT_SEED, + n_ctx: int = 512, + n_batch: int = 512, + n_ubatch: int = 512, n_threads: Optional[int] = None, - n_batch: int = 8, + n_threads_batch: Optional[int] = None, + rope_scaling_type: Optional[ + int + ] = llama_cpp.LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED, + pooling_type: int = llama_cpp.LLAMA_POOLING_TYPE_UNSPECIFIED, + rope_freq_base: float = 0.0, + rope_freq_scale: float = 0.0, + yarn_ext_factor: float = -1.0, + yarn_attn_factor: float = 1.0, + yarn_beta_fast: float = 32.0, + yarn_beta_slow: float = 1.0, + yarn_orig_ctx: int = 0, + logits_all: bool = False, + embedding: bool = False, + offload_kqv: bool = True, + flash_attn: bool = False, + # Sampling Params + no_perf: bool = False, last_n_tokens_size: int = 64, + # LoRA Params + lora_base: Optional[str] = None, + lora_scale: float = 1.0, + lora_path: Optional[str] = None, + # Backend Params + numa: Union[bool, int] = False, + # Chat Format Params + chat_format: Optional[str] = None, + chat_handler: Optional[llama_chat_format.LlamaChatCompletionHandler] = None, + # Speculative Decoding + draft_model: Optional[LlamaDraftModel] = None, + # Tokenizer Override + tokenizer: Optional[BaseLlamaTokenizer] = None, + # KV cache quantization + type_k: Optional[int] = None, + type_v: Optional[int] = None, + # Misc + spm_infill: bool = False, verbose: bool = True, + # Extra Params + **kwargs, # type: ignore ): """Load a llama.cpp model from `model_path`. + Examples: + Basic usage + + >>> import llama_cpp + >>> model = llama_cpp.Llama( + ... model_path="path/to/model", + ... ) + >>> print(model("The quick brown fox jumps ", stop=["."])["choices"][0]["text"]) + the lazy dog + + Loading a chat model + + >>> import llama_cpp + >>> model = llama_cpp.Llama( + ... model_path="path/to/model", + ... chat_format="llama-2", + ... ) + >>> print(model.create_chat_completion( + ... messages=[{ + ... "role": "user", + ... "content": "what is the meaning of life?" + ... }] + ... )) + Args: model_path: Path to the model. - n_ctx: Maximum context size. - n_parts: Number of parts to split the model into. If -1, the number of parts is automatically determined. - seed: Random seed. 0 for random. - f16_kv: Use half-precision for key/value cache. - logits_all: Return logits for all tokens, not just the last token. + n_gpu_layers: Number of layers to offload to GPU (-ngl). If -1, all layers are offloaded. + split_mode: How to split the model across GPUs. See llama_cpp.LLAMA_SPLIT_* for options. + main_gpu: main_gpu interpretation depends on split_mode: LLAMA_SPLIT_MODE_NONE: the GPU that is used for the entire model. LLAMA_SPLIT_MODE_ROW: the GPU that is used for small tensors and intermediate results. LLAMA_SPLIT_MODE_LAYER: ignored + tensor_split: How split tensors should be distributed across GPUs. If None, the model is not split. + rpc_servers: Comma separated list of RPC servers to use for offloading vocab_only: Only load the vocabulary no weights. + use_mmap: Use mmap if possible. use_mlock: Force the system to keep the model in RAM. + kv_overrides: Key-value overrides for the model. + seed: RNG seed, -1 for random + n_ctx: Text context, 0 = from model + n_batch: Prompt processing maximum batch size + n_ubatch: Physical batch size + n_threads: Number of threads to use for generation + n_threads_batch: Number of threads to use for batch processing + rope_scaling_type: RoPE scaling type, from `enum llama_rope_scaling_type`. ref: https://github.com/ggerganov/llama.cpp/pull/2054 + pooling_type: Pooling type, from `enum llama_pooling_type`. + rope_freq_base: RoPE base frequency, 0 = from model + rope_freq_scale: RoPE frequency scaling factor, 0 = from model + yarn_ext_factor: YaRN extrapolation mix factor, negative = from model + yarn_attn_factor: YaRN magnitude scaling factor + yarn_beta_fast: YaRN low correction dim + yarn_beta_slow: YaRN high correction dim + yarn_orig_ctx: YaRN original context size + logits_all: Return logits for all tokens, not just the last token. Must be True for completion to return logprobs. embedding: Embedding mode only. - n_threads: Number of threads to use. If None, the number of threads is automatically determined. - n_batch: Maximum number of prompt tokens to batch together when calling llama_eval. + offload_kqv: Offload K, Q, V to GPU. + flash_attn: Use flash attention. + no_perf: Measure performance timings. last_n_tokens_size: Maximum number of tokens to keep in the last_n_tokens deque. + lora_base: Optional path to base model, useful if using a quantized base model and you want to apply LoRA to an f16 model. + lora_path: Path to a LoRA file to apply to the model. + numa: numa policy + chat_format: String specifying the chat format to use when calling create_chat_completion. + chat_handler: Optional chat handler to use when calling create_chat_completion. + draft_model: Optional draft model to use for speculative decoding. + tokenizer: Optional tokenizer to override the default tokenizer from llama.cpp. verbose: Print verbose output to stderr. + type_k: KV cache data type for K (default: f16) + type_v: KV cache data type for V (default: f16) + spm_infill: Use Suffix/Prefix/Middle pattern for infill (instead of Prefix/Suffix/Middle) as some models prefer this. Raises: ValueError: If the model path does not exist. @@ -54,43 +195,391 @@ def __init__( A Llama instance. """ self.verbose = verbose - self.model_path = model_path + self._stack = contextlib.ExitStack() - self.params = llama_cpp.llama_context_default_params() - self.params.n_ctx = n_ctx - self.params.n_parts = n_parts - self.params.seed = seed - self.params.f16_kv = f16_kv - self.params.logits_all = logits_all - self.params.vocab_only = vocab_only - self.params.use_mlock = use_mlock - self.params.embedding = embedding + set_verbose(verbose) - self.last_n_tokens_size = last_n_tokens_size - self.last_n_tokens_data = deque( - [llama_cpp.llama_token(0)] * self.last_n_tokens_size, - maxlen=self.last_n_tokens_size, - ) - self.tokens_consumed = 0 - self.n_batch = min(n_ctx, n_batch) + if not Llama.__backend_initialized: + with suppress_stdout_stderr(disable=verbose): + llama_cpp.llama_backend_init() + Llama.__backend_initialized = True + + if isinstance(numa, bool): + self.numa = ( + llama_cpp.GGML_NUMA_STRATEGY_DISTRIBUTE + if numa + else llama_cpp.GGML_NUMA_STRATEGY_DISABLED + ) + else: + self.numa = numa + + if self.numa != llama_cpp.GGML_NUMA_STRATEGY_DISABLED: + with suppress_stdout_stderr(disable=verbose): + llama_cpp.llama_numa_init(self.numa) + self.model_path = model_path + + # Model Params + self.model_params = llama_cpp.llama_model_default_params() + self.model_params.n_gpu_layers = ( + 0x7FFFFFFF if n_gpu_layers == -1 else n_gpu_layers + ) # 0x7FFFFFFF is INT32 max, will be auto set to all layers + self.model_params.split_mode = split_mode + self.model_params.main_gpu = main_gpu + if rpc_servers is not None: + self.model_params.rpc_servers = rpc_servers.encode("utf-8") + self._rpc_servers = rpc_servers + else: + self._rpc_servers = None + self.tensor_split = tensor_split + self._c_tensor_split = None + if self.tensor_split is not None: + if len(self.tensor_split) > llama_cpp.LLAMA_MAX_DEVICES: + raise ValueError( + f"Attempt to split tensors that exceed maximum supported devices. Current LLAMA_MAX_DEVICES={llama_cpp.LLAMA_MAX_DEVICES}" + ) + # Type conversion and expand the list to the length of LLAMA_MAX_DEVICES + FloatArray = ctypes.c_float * llama_cpp.LLAMA_MAX_DEVICES + self._c_tensor_split = FloatArray( + *tensor_split # type: ignore + ) # keep a reference to the array so it is not gc'd + self.model_params.tensor_split = self._c_tensor_split + self.model_params.vocab_only = vocab_only + self.model_params.use_mmap = use_mmap if lora_path is None else False + self.model_params.use_mlock = use_mlock + + # kv_overrides is the original python dict + self.kv_overrides = kv_overrides + if kv_overrides is not None: + # _kv_overrides_array is a ctypes.Array of llama_model_kv_override Structs + kvo_array_len = len(kv_overrides) + 1 # for sentinel element + self._kv_overrides_array = ( + llama_cpp.llama_model_kv_override * kvo_array_len + )() + + for i, (k, v) in enumerate(kv_overrides.items()): + self._kv_overrides_array[i].key = k.encode("utf-8") + if isinstance(v, bool): + self._kv_overrides_array[ + i + ].tag = llama_cpp.LLAMA_KV_OVERRIDE_TYPE_BOOL + self._kv_overrides_array[i].value.val_bool = v + elif isinstance(v, int): + self._kv_overrides_array[ + i + ].tag = llama_cpp.LLAMA_KV_OVERRIDE_TYPE_INT + self._kv_overrides_array[i].value.val_i64 = v + elif isinstance(v, float): + self._kv_overrides_array[ + i + ].tag = llama_cpp.LLAMA_KV_OVERRIDE_TYPE_FLOAT + self._kv_overrides_array[i].value.val_f64 = v + elif isinstance(v, str): # type: ignore + v_bytes = v.encode("utf-8") + if len(v_bytes) > 128: # TODO: Make this a constant + raise ValueError(f"Value for {k} is too long: {v}") + v_bytes = v_bytes.ljust(128, b"\0") + self._kv_overrides_array[ + i + ].tag = llama_cpp.LLAMA_KV_OVERRIDE_TYPE_STR + # copy min(v_bytes, 128) to str_value + address = typing.cast( + int, + ctypes.addressof(self._kv_overrides_array[i].value) + + llama_cpp.llama_model_kv_override_value.val_str.offset, + ) + buffer_start = ctypes.cast(address, ctypes.POINTER(ctypes.c_char)) + ctypes.memmove( + buffer_start, + v_bytes, + 128, + ) + else: + raise ValueError(f"Unknown value type for {k}: {v}") + + self._kv_overrides_array[ + -1 + ].key = b"\0" # ensure sentinel element is zeroed + self.model_params.kv_overrides = self._kv_overrides_array + + self.n_batch = min(n_ctx, n_batch) # ??? self.n_threads = n_threads or max(multiprocessing.cpu_count() // 2, 1) + self.n_threads_batch = n_threads_batch or multiprocessing.cpu_count() + + # Used by the sampler + self._seed = seed or llama_cpp.LLAMA_DEFAULT_SEED + + # Context Params + self.context_params = llama_cpp.llama_context_default_params() + self.context_params.n_ctx = n_ctx + self.context_params.n_batch = self.n_batch + self.context_params.n_ubatch = min(self.n_batch, n_ubatch) + self.context_params.n_threads = self.n_threads + self.context_params.n_threads_batch = self.n_threads_batch + self.context_params.rope_scaling_type = ( + rope_scaling_type + if rope_scaling_type is not None + else llama_cpp.LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED + ) + self.context_params.pooling_type = pooling_type + self.context_params.rope_freq_base = ( + rope_freq_base if rope_freq_base != 0.0 else 0 + ) + self.context_params.rope_freq_scale = ( + rope_freq_scale if rope_freq_scale != 0.0 else 0 + ) + self.context_params.yarn_ext_factor = ( + yarn_ext_factor if yarn_ext_factor != 0.0 else 0 + ) + self.context_params.yarn_attn_factor = ( + yarn_attn_factor if yarn_attn_factor != 0.0 else 0 + ) + self.context_params.yarn_beta_fast = ( + yarn_beta_fast if yarn_beta_fast != 0.0 else 0 + ) + self.context_params.yarn_beta_slow = ( + yarn_beta_slow if yarn_beta_slow != 0.0 else 0 + ) + self.context_params.yarn_orig_ctx = yarn_orig_ctx if yarn_orig_ctx != 0 else 0 + self.context_params.logits_all = ( + logits_all if draft_model is None else True + ) # Must be set to True for speculative decoding + self.context_params.embeddings = embedding # TODO: Rename to embeddings + self.context_params.offload_kqv = offload_kqv + self.context_params.flash_attn = flash_attn + # KV cache quantization + if type_k is not None: + self.context_params.type_k = type_k + if type_v is not None: + self.context_params.type_v = type_v + # Sampling Params + self.context_params.no_perf = no_perf + self.last_n_tokens_size = last_n_tokens_size + + self.cache: Optional[BaseLlamaCache] = None + + self.lora_base = lora_base + self.lora_scale = lora_scale + self.lora_path = lora_path + + self.spm_infill = spm_infill if not os.path.exists(model_path): raise ValueError(f"Model path does not exist: {model_path}") - self.ctx = llama_cpp.llama_init_from_file( - self.model_path.encode("utf-8"), self.params + self._model = self._stack.enter_context( + contextlib.closing( + internals.LlamaModel( + path_model=self.model_path, + params=self.model_params, + verbose=self.verbose, + ) + ) + ) + + # Override tokenizer + self.tokenizer_ = tokenizer or LlamaTokenizer(self) + + # Set the default value for the context and correct the batch + if n_ctx == 0: + n_ctx = self._model.n_ctx_train() + self.n_batch = min(n_ctx, n_batch) + self.context_params.n_ctx = self._model.n_ctx_train() + self.context_params.n_batch = self.n_batch + self.context_params.n_ubatch = min(self.n_batch, n_ubatch) + + self._ctx = self._stack.enter_context( + contextlib.closing( + internals.LlamaContext( + model=self._model, + params=self.context_params, + verbose=self.verbose, + ) + ) + ) + + self._batch = self._stack.enter_context( + contextlib.closing( + internals.LlamaBatch( + n_tokens=self.n_batch, + embd=0, + n_seq_max=self.context_params.n_ctx, + verbose=self.verbose, + ) + ) ) + self._lora_adapter: Optional[llama_cpp.llama_adapter_lora_p] = None + + if self.lora_path: + self._lora_adapter = llama_cpp.llama_adapter_lora_init( + self._model.model, + self.lora_path.encode("utf-8"), + ) + if self._lora_adapter is None: + raise RuntimeError( + f"Failed to initialize LoRA adapter from lora path: {self.lora_path}" + ) + + def free_lora_adapter(): + if self._lora_adapter is None: + return + llama_cpp.llama_adapter_lora_free(self._lora_adapter) + self._lora_adapter = None + + self._stack.callback(free_lora_adapter) + + if llama_cpp.llama_set_adapter_lora( + self._ctx.ctx, self._lora_adapter, self.lora_scale + ): + raise RuntimeError( + f"Failed to set LoRA adapter from lora path: {self.lora_path}" + ) + if self.verbose: print(llama_cpp.llama_print_system_info().decode("utf-8"), file=sys.stderr) - def tokenize(self, text: bytes) -> List[llama_cpp.llama_token]: + self.chat_format = chat_format + self.chat_handler = chat_handler + self._chat_handlers: Dict[ + str, llama_chat_format.LlamaChatCompletionHandler + ] = {} + + self.draft_model = draft_model + + self._n_vocab = self.n_vocab() + self._n_ctx = self.n_ctx() + + self._token_nl = self.token_nl() + self._token_eos = self.token_eos() + + self._candidates = internals.LlamaTokenDataArray(n_vocab=self._n_vocab) + + self.n_tokens = 0 + self.input_ids: npt.NDArray[np.intc] = np.ndarray((n_ctx,), dtype=np.intc) + self.scores: npt.NDArray[np.single] = np.ndarray( + (n_ctx if logits_all == True else n_batch, self._n_vocab), dtype=np.single + ) + + self._mirostat_mu = ctypes.c_float( + 2.0 * 5.0 + ) # TODO: Move this to sampling context + + try: + self.metadata = self._model.metadata() + except Exception as e: + self.metadata = {} + if self.verbose: + print(f"Failed to load metadata: {e}", file=sys.stderr) + + if self.verbose: + print(f"Model metadata: {self.metadata}", file=sys.stderr) + + eos_token_id = self.token_eos() + bos_token_id = self.token_bos() + + eos_token = ( + self._model.token_get_text(eos_token_id) if eos_token_id != -1 else "" + ) + bos_token = ( + self._model.token_get_text(bos_token_id) if bos_token_id != -1 else "" + ) + + # Unfortunately the llama.cpp API does not return metadata arrays, so we can't get template names from tokenizer.chat_templates + template_choices = dict( + (name[10:], template) + for name, template in self.metadata.items() + if name.startswith("tokenizer.chat_template.") + ) + + if "tokenizer.chat_template" in self.metadata: + template_choices["chat_template.default"] = self.metadata[ + "tokenizer.chat_template" + ] + + if self.verbose and template_choices: + print( + f"Available chat formats from metadata: {', '.join(template_choices.keys())}", + file=sys.stderr, + ) + + for name, template in template_choices.items(): + self._chat_handlers[name] = llama_chat_format.Jinja2ChatFormatter( + template=template, + eos_token=eos_token, + bos_token=bos_token, + stop_token_ids=[eos_token_id], + ).to_chat_handler() + + if ( + self.chat_format is None + and self.chat_handler is None + and "chat_template.default" in template_choices + ): + chat_format = llama_chat_format.guess_chat_format_from_gguf_metadata( + self.metadata + ) + + if chat_format is not None: + self.chat_format = chat_format + if self.verbose: + print(f"Guessed chat format: {chat_format}", file=sys.stderr) + else: + if self.verbose: + print( + f"Using gguf chat template: {template_choices['chat_template.default']}", + file=sys.stderr, + ) + print(f"Using chat eos_token: {eos_token}", file=sys.stderr) + print(f"Using chat bos_token: {bos_token}", file=sys.stderr) + + self.chat_format = "chat_template.default" + + if self.chat_format is None and self.chat_handler is None: + self.chat_format = "llama-2" + if self.verbose: + print( + f"Using fallback chat format: {self.chat_format}", file=sys.stderr + ) + + self._sampler = None + + @property + def ctx(self) -> llama_cpp.llama_context_p: + return self._ctx.ctx + + @property + def model(self) -> llama_cpp.llama_model_p: + return self._model.model + + @property + def _input_ids(self) -> npt.NDArray[np.intc]: + return self.input_ids[: self.n_tokens] + + @property + def _scores(self) -> npt.NDArray[np.single]: + return self.scores[: self.n_tokens, :] + + @property + def eval_tokens(self) -> Deque[int]: + return deque(self.input_ids[: self.n_tokens].tolist(), maxlen=self._n_ctx) + + @property + def eval_logits(self) -> Deque[List[float]]: + return deque( + self.scores[: self.n_tokens, :].tolist(), + maxlen=self._n_ctx if self.context_params.logits_all else 1, + ) + + def tokenize( + self, text: bytes, add_bos: bool = True, special: bool = False + ) -> List[int]: """Tokenize a string. Args: text: The utf-8 encoded string to tokenize. + add_bos: Whether to add a beginning of sequence token. + special: Whether to tokenize special tokens. Raises: RuntimeError: If the tokenization failed. @@ -98,71 +587,192 @@ def tokenize(self, text: bytes) -> List[llama_cpp.llama_token]: Returns: A list of tokens. """ - assert self.ctx is not None - n_ctx = llama_cpp.llama_n_ctx(self.ctx) - tokens = (llama_cpp.llama_token * int(n_ctx))() - n_tokens = llama_cpp.llama_tokenize( - self.ctx, - text, - tokens, - n_ctx, - llama_cpp.c_bool(True), - ) - if int(n_tokens) < 0: - raise RuntimeError(f'Failed to tokenize: text="{text}" n_tokens={n_tokens}') - return list(tokens[:n_tokens]) - - def detokenize(self, tokens: List[llama_cpp.llama_token]) -> bytes: + return self.tokenizer_.tokenize(text, add_bos, special) + + def detokenize( + self, + tokens: List[int], + prev_tokens: Optional[List[int]] = None, + special: bool = False, + ) -> bytes: """Detokenize a list of tokens. Args: tokens: The list of tokens to detokenize. + prev_tokens: The list of previous tokens. Offset mapping will be performed if provided. + special: Whether to detokenize special tokens. Returns: The detokenized string. """ - assert self.ctx is not None - output = b"" - for token in tokens: - output += llama_cpp.llama_token_to_str(self.ctx, token) - return output + return self.tokenizer_.detokenize( + tokens, prev_tokens=prev_tokens, special=special + ) + + def set_cache(self, cache: Optional[BaseLlamaCache]): + """Set the cache. + + Args: + cache: The cache to set. + """ + self.cache = cache + + def set_seed(self, seed: int): + """Set the random seed. + + Args: + seed: The random seed. + """ + self._seed = seed def reset(self): """Reset the model state.""" - self.last_n_tokens_data.extend( - [llama_cpp.llama_token(0)] * self.last_n_tokens_size - ) - self.tokens_consumed = 0 + self.n_tokens = 0 - def eval(self, tokens: Sequence[llama_cpp.llama_token]): + def eval(self, tokens: Sequence[int]): """Evaluate a list of tokens. Args: tokens: The list of tokens to evaluate. """ - assert self.ctx is not None - n_ctx = int(llama_cpp.llama_n_ctx(self.ctx)) + self._ctx.kv_cache_seq_rm(-1, self.n_tokens, -1) for i in range(0, len(tokens), self.n_batch): batch = tokens[i : min(len(tokens), i + self.n_batch)] - n_past = min(n_ctx - len(batch), self.tokens_consumed) - return_code = llama_cpp.llama_eval( - ctx=self.ctx, - tokens=(llama_cpp.llama_token * len(batch))(*batch), - n_tokens=llama_cpp.c_int(len(batch)), - n_past=llama_cpp.c_int(n_past), - n_threads=llama_cpp.c_int(self.n_threads), + n_past = self.n_tokens + n_tokens = len(batch) + self._batch.set_batch( + batch=batch, n_past=n_past, logits_all=self.context_params.logits_all ) - if int(return_code) != 0: - raise RuntimeError(f"llama_eval returned {return_code}") - self.last_n_tokens_data.extend(batch) - self.tokens_consumed += len(batch) + self._ctx.decode(self._batch) + # Save tokens + self.input_ids[n_past : n_past + n_tokens] = batch + # Save logits + if self.context_params.logits_all: + rows = n_tokens + cols = self._n_vocab + logits = np.ctypeslib.as_array( + self._ctx.get_logits(), shape=(rows * cols,) + ) + self.scores[n_past : n_past + n_tokens, :].reshape(-1)[::] = logits + else: + # rows = 1 + # cols = self._n_vocab + # logits = np.ctypeslib.as_array( + # self._ctx.get_logits(), shape=(rows * cols,) + # ) + # self.scores[n_past + n_tokens - 1, :].reshape(-1)[::] = logits + # NOTE: Now that sampling is done inside the sampler, logits are only needed for logprobs which requires logits_all + pass + # Update n_tokens + self.n_tokens += n_tokens + + def _init_sampler( + self, + top_k: int = 40, + top_p: float = 0.95, + min_p: float = 0.05, + typical_p: float = 1.0, + temp: float = 0.80, + repeat_penalty: float = 1.0, + frequency_penalty: float = 0.0, + presence_penalty: float = 0.0, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_eta: float = 0.1, + mirostat_tau: float = 5.0, + penalize_nl: bool = True, + logits_processor: Optional[LogitsProcessorList] = None, + grammar: Optional[LlamaGrammar] = None, + ): + sampler = internals.LlamaSampler() + + if logits_processor is not None: + # Create and add a custom sampler + def apply_func(token_data_array: llama_cpp.llama_token_data_array_p): + size = token_data_array.contents.size + data_soa = token_data_array.contents.data + data_soa_address = ctypes.addressof(data_soa.contents) + # NOTE: This is probably broken + recarray = np.recarray( + shape=(size,), + dtype=np.dtype( + [("id", np.intc), ("logit", np.single), ("p", np.single)], + align=True, + ), + buf=(llama_cpp.llama_token_data * size).from_address( + data_soa_address + ), + ) + for logit_processor in logits_processor: + recarray.logit[:] = logit_processor(self._input_ids, recarray.logit) + + sampler.add_custom(apply_func) + + sampler.add_penalties( + n_vocab=self._n_vocab, + special_eos_id=self._token_eos, + linefeed_id=self._token_nl, + penalty_last_n=self.last_n_tokens_size, + penalty_repeat=repeat_penalty, + penalty_freq=frequency_penalty, + penalty_present=presence_penalty, + penalize_nl=penalize_nl, + ignore_eos=False, + ) + + if grammar is not None: + sampler.add_grammar(self._model, grammar) + + if temp < 0.0: + sampler.add_softmax() + sampler.add_dist(self._seed) + elif temp == 0.0: + sampler.add_greedy() + else: + if mirostat_mode == 1: + mirostat_m = 100 + sampler.add_mirostat( + self._n_vocab, + self._seed, + mirostat_tau, + mirostat_eta, + mirostat_m, + ) + elif mirostat_mode == 2: + sampler.add_mirostat_v2( + self._seed, + mirostat_tau, + mirostat_eta, + ) + else: + n_probs = 0 + min_keep = max(1, n_probs) + sampler.add_top_k(top_k) + sampler.add_typical(typical_p, min_keep) + sampler.add_top_p(top_p, min_keep) + sampler.add_min_p(min_p, min_keep) + sampler.add_temp(temp) + sampler.add_dist(self._seed) + return sampler def sample( self, - top_k: int, - top_p: float, - temp: float, - repeat_penalty: float, + top_k: int = 40, + top_p: float = 0.95, + min_p: float = 0.05, + typical_p: float = 1.0, + temp: float = 0.80, + repeat_penalty: float = 1.0, + frequency_penalty: float = 0.0, + presence_penalty: float = 0.0, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_eta: float = 0.1, + mirostat_tau: float = 5.0, + penalize_nl: bool = True, + logits_processor: Optional[LogitsProcessorList] = None, + grammar: Optional[LlamaGrammar] = None, + idx: Optional[int] = None, ): """Sample a token from the model. @@ -175,35 +785,65 @@ def sample( Returns: The sampled token. """ + assert self.n_tokens > 0 + + tmp_sampler = False + + if self._sampler is None: + tmp_sampler = True + self._sampler = self._init_sampler( + top_k=top_k, + top_p=top_p, + min_p=min_p, + typical_p=typical_p, + temp=temp, + repeat_penalty=repeat_penalty, + frequency_penalty=frequency_penalty, + presence_penalty=presence_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + penalize_nl=penalize_nl, + logits_processor=logits_processor, + grammar=grammar, + ) + + ridx = idx - self.n_tokens if idx is not None else -1 + assert self.ctx is not None - return llama_cpp.llama_sample_top_p_top_k( - ctx=self.ctx, - last_n_tokens_data=(llama_cpp.llama_token * self.last_n_tokens_size)( - *self.last_n_tokens_data - ), - last_n_tokens_size=llama_cpp.c_int(self.last_n_tokens_size), - top_k=llama_cpp.c_int(top_k), - top_p=llama_cpp.c_float(top_p), - temp=llama_cpp.c_float(temp), - repeat_penalty=llama_cpp.c_float(repeat_penalty), - ) + token = self._sampler.sample(self._ctx, ridx) + if tmp_sampler: + self._sampler = None + return token def generate( self, - tokens: Sequence[llama_cpp.llama_token], - top_k: int, - top_p: float, - temp: float, - repeat_penalty: float, - ) -> Generator[ - llama_cpp.llama_token, Optional[Sequence[llama_cpp.llama_token]], None - ]: + tokens: Sequence[int], + top_k: int = 40, + top_p: float = 0.95, + min_p: float = 0.05, + typical_p: float = 1.0, + temp: float = 0.80, + repeat_penalty: float = 1.0, + reset: bool = True, + frequency_penalty: float = 0.0, + presence_penalty: float = 0.0, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_tau: float = 5.0, + mirostat_eta: float = 0.1, + penalize_nl: bool = True, + logits_processor: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + grammar: Optional[LlamaGrammar] = None, + ) -> Generator[int, Optional[Sequence[int]], None]: """Create a generator of tokens from a prompt. Examples: >>> llama = Llama("models/ggml-7b.bin") >>> tokens = llama.tokenize(b"Hello, world!") - >>> for token in llama.generate(tokens, top_k=40, top_p=0.95, temp=1.0, repeat_penalty=1.1): + >>> for token in llama.generate(tokens, top_k=40, top_p=0.95, temp=1.0, repeat_penalty=1.0): ... print(llama.detokenize([token])) Args: @@ -212,26 +852,114 @@ def generate( top_p: The top-p sampling parameter. temp: The temperature parameter. repeat_penalty: The repeat penalty parameter. + reset: Whether to reset the model state. Yields: The generated tokens. """ - assert self.ctx is not None - self.reset() + # Reset mirostat sampling + self._mirostat_mu = ctypes.c_float(2.0 * mirostat_tau) + self._sampler = self._init_sampler( + top_k=top_k, + top_p=top_p, + min_p=min_p, + typical_p=typical_p, + temp=temp, + repeat_penalty=repeat_penalty, + frequency_penalty=frequency_penalty, + presence_penalty=presence_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + penalize_nl=penalize_nl, + logits_processor=logits_processor, + grammar=grammar, + ) + + # Check for kv cache prefix match + if reset and self.n_tokens > 0: + longest_prefix = 0 + for a, b in zip(self._input_ids, tokens[:-1]): + if a == b: + longest_prefix += 1 + else: + break + if longest_prefix > 0: + reset = False + tokens = tokens[longest_prefix:] + self.n_tokens = longest_prefix + if self.verbose: + print( + f"Llama.generate: {longest_prefix} prefix-match hit, " + f"remaining {len(tokens)} prompt tokens to eval", + file=sys.stderr, + ) + + # Reset the model state + if reset: + self.reset() + + # # Reset the grammar + # if grammar is not None: + # grammar.reset() + + sample_idx = self.n_tokens + len(tokens) - 1 + tokens = list(tokens) + + # Eval and sample while True: self.eval(tokens) - token = self.sample( - top_k=top_k, - top_p=top_p, - temp=temp, - repeat_penalty=repeat_penalty, - ) - tokens_or_none = yield token - tokens = [token] - if tokens_or_none is not None: - tokens.extend(tokens_or_none) - - def create_embedding(self, input: str) -> Embedding: + while sample_idx < self.n_tokens: + token = self.sample( + top_k=top_k, + top_p=top_p, + min_p=min_p, + typical_p=typical_p, + temp=temp, + repeat_penalty=repeat_penalty, + frequency_penalty=frequency_penalty, + presence_penalty=presence_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + logits_processor=logits_processor, + grammar=grammar, + penalize_nl=penalize_nl, + idx=sample_idx, + ) + + sample_idx += 1 + if stopping_criteria is not None and stopping_criteria( + self._input_ids[: sample_idx], self._scores[sample_idx - self.n_tokens, :] + ): + return + tokens_or_none = yield token + tokens.clear() + tokens.append(token) + if tokens_or_none is not None: + tokens.extend(tokens_or_none) + + if sample_idx < self.n_tokens and token != self._input_ids[sample_idx]: + self.n_tokens = sample_idx + self._ctx.kv_cache_seq_rm(-1, self.n_tokens, -1) + break + + if self.draft_model is not None: + self.input_ids[self.n_tokens : self.n_tokens + len(tokens)] = tokens + draft_tokens = self.draft_model( + self.input_ids[: self.n_tokens + len(tokens)] + ) + tokens.extend( + draft_tokens.astype(int)[ + : self._n_ctx - self.n_tokens - len(tokens) + ] + ) + + def create_embedding( + self, input: Union[str, List[str]], model: Optional[str] = None + ) -> CreateEmbeddingResponse: """Embed a string. Args: @@ -240,44 +968,42 @@ def create_embedding(self, input: str) -> Embedding: Returns: An embedding object. """ - assert self.ctx is not None + model_name: str = model if model is not None else self.model_path - if self.params.embedding == False: - raise RuntimeError( - "Llama model must be created with embedding=True to call this method" - ) + input = input if isinstance(input, list) else [input] - if self.verbose: - llama_cpp.llama_reset_timings(self.ctx) + # get numeric embeddings + embeds: Union[List[List[float]], List[List[List[float]]]] + total_tokens: int + embeds, total_tokens = self.embed(input, return_count=True) # type: ignore - tokens = self.tokenize(input.encode("utf-8")) - self.reset() - self.eval(tokens) - n_tokens = len(tokens) - embedding = llama_cpp.llama_get_embeddings(self.ctx)[ - : llama_cpp.llama_n_embd(self.ctx) + # convert to CreateEmbeddingResponse + data: List[Embedding] = [ + { + "object": "embedding", + "embedding": emb, + "index": idx, + } + for idx, emb in enumerate(embeds) ] - if self.verbose: - llama_cpp.llama_print_timings(self.ctx) - return { "object": "list", - "data": [ - { - "object": "embedding", - "embedding": embedding, - "index": 0, - } - ], - "model": self.model_path, + "data": data, + "model": model_name, "usage": { - "prompt_tokens": n_tokens, - "total_tokens": n_tokens, + "prompt_tokens": total_tokens, + "total_tokens": total_tokens, }, } - def embed(self, input: str) -> List[float]: + def embed( + self, + input: Union[str, List[str]], + normalize: bool = False, + truncate: bool = True, + return_count: bool = False, + ): """Embed a string. Args: @@ -286,59 +1012,351 @@ def embed(self, input: str) -> List[float]: Returns: A list of embeddings """ - return list(map(float, self.create_embedding(input)["data"][0]["embedding"])) + n_embd = self.n_embd() + n_batch = self.n_batch + + # get pooling information + pooling_type = self.pooling_type() + logits_all = pooling_type == llama_cpp.LLAMA_POOLING_TYPE_NONE + + if self.context_params.embeddings is False: + raise RuntimeError( + "Llama model must be created with embedding=True to call this method" + ) + + if self.verbose: + llama_cpp.llama_perf_context_reset(self._ctx.ctx) + + if isinstance(input, str): + inputs = [input] + else: + inputs = input + + # reset batch + self._batch.reset() + + # decode and fetch embeddings + data: Union[List[List[float]], List[List[List[float]]]] = [] + + def decode_batch(seq_sizes: List[int]): + llama_cpp.llama_kv_cache_clear(self._ctx.ctx) + self._ctx.decode(self._batch) + self._batch.reset() + + # store embeddings + if pooling_type == llama_cpp.LLAMA_POOLING_TYPE_NONE: + pos: int = 0 + for i, size in enumerate(seq_sizes): + ptr = llama_cpp.llama_get_embeddings(self._ctx.ctx) + embedding: List[List[float]] = [ + ptr[pos + j * n_embd : pos + (j + 1) * n_embd] + for j in range(size) + ] + if normalize: + embedding = [ + internals.normalize_embedding(e) for e in embedding + ] + data.append(embedding) + pos += size + else: + for i in range(len(seq_sizes)): + ptr = llama_cpp.llama_get_embeddings_seq(self._ctx.ctx, i) + embedding: List[float] = ptr[:n_embd] + if normalize: + embedding = internals.normalize_embedding(embedding) + data.append(embedding) + + # init state + total_tokens = 0 + s_batch = [] + t_batch = 0 + p_batch = 0 + + # accumulate batches and encode + for text in inputs: + tokens = self.tokenize(text.encode("utf-8")) + if truncate: + tokens = tokens[:n_batch] + + n_tokens = len(tokens) + total_tokens += n_tokens + + # check for overrun + if n_tokens > n_batch: + raise ValueError( + f"Requested tokens ({n_tokens}) exceed batch size of {n_batch}" + ) + + # time to eval batch + if t_batch + n_tokens > n_batch: + decode_batch(s_batch) + s_batch = [] + t_batch = 0 + p_batch = 0 + + # add to batch + self._batch.add_sequence(tokens, p_batch, logits_all) + + # update batch stats + s_batch.append(n_tokens) + t_batch += n_tokens + p_batch += 1 + + # hanlde last batch + decode_batch(s_batch) + + if self.verbose: + llama_cpp.llama_perf_context_print(self._ctx.ctx) + + output = data[0] if isinstance(input, str) else data + + llama_cpp.llama_kv_cache_clear(self._ctx.ctx) + self.reset() + + if return_count: + return output, total_tokens + else: + return output def _create_completion( self, - prompt: str, + prompt: Union[str, List[int]], suffix: Optional[str] = None, - max_tokens: int = 16, + max_tokens: Optional[int] = 16, temperature: float = 0.8, top_p: float = 0.95, + min_p: float = 0.05, + typical_p: float = 1.0, logprobs: Optional[int] = None, echo: bool = False, - stop: List[str] = [], - repeat_penalty: float = 1.1, + stop: Optional[Union[str, List[str]]] = [], + frequency_penalty: float = 0.0, + presence_penalty: float = 0.0, + repeat_penalty: float = 1.0, top_k: int = 40, stream: bool = False, - ) -> Union[Iterator[Completion], Iterator[CompletionChunk],]: - assert self.ctx is not None - completion_id = f"cmpl-{str(uuid.uuid4())}" - created = int(time.time()) - completion_tokens: List[llama_cpp.llama_token] = [] + seed: Optional[int] = None, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_tau: float = 5.0, + mirostat_eta: float = 0.1, + model: Optional[str] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + logits_processor: Optional[LogitsProcessorList] = None, + grammar: Optional[LlamaGrammar] = None, + logit_bias: Optional[Dict[int, float]] = None, + ) -> Union[ + Iterator[CreateCompletionResponse], Iterator[CreateCompletionStreamResponse] + ]: + assert suffix is None or suffix.__class__ is str + + completion_id: str = f"cmpl-{str(uuid.uuid4())}" + created: int = int(time.time()) + bos_token_id: int = self.token_bos() + cls_token_id: int = self._model.token_cls() + sep_token_id: int = self._model.token_sep() + prefix_token_id: int = 0 # self._model.token_prefix() # TODO: Fix + middle_token_id: int = 0 # self._model.token_middle() # TODO: Fix + suffix_token_id: int = 0 # self._model.token_suffix() # TODO: Fix + add_space_prefix: bool = ( + self.metadata.get("tokenizer.ggml.add_space_prefix", "true") == "true" + ) + bos_tokens: List[int] = [cls_token_id if cls_token_id != -1 else bos_token_id] + eos_tokens: List[int] = [ + sep_token_id if sep_token_id != -1 else self.token_eos() + ] + + if ( + (isinstance(prompt, list) and suffix is None) + or not self._model.add_bos_token() + or bos_tokens[:1] == [-1] + ): + bos_tokens = [] + + if (isinstance(prompt, list) and suffix is None) or ( + not self._model.add_eos_token() and sep_token_id == -1 + ): + eos_tokens = [] + + suffix_space_prefix: int = 0 + # Tokenizer hack to remove leading space + if add_space_prefix and suffix_token_id >= 0 and suffix: + suffix = "☺" + suffix + suffix_space_prefix = 2 + + # If prompt is empty, initialize completion with BOS token to avoid + # detokenization including a space at the beginning of the completion + completion_tokens: List[int] = [] if len(prompt) > 0 else [bos_token_id] # Add blank space to start of prompt to match OG llama tokenizer - prompt_tokens = self.tokenize(b" " + prompt.encode("utf-8")) - text = b"" - returned_characters = 0 + prefix_tokens: List[int] = ( + [prefix_token_id] if prefix_token_id >= 0 and suffix is not None else [] + ) + ( + ( + self.tokenize( + prompt.encode("utf-8"), + add_bos=False, + special=(prefix_token_id < 0 or suffix is None), + ) + if prompt != "" + else [] + ) + if isinstance(prompt, str) + else prompt + ) + suffix_tokens: List[int] = ( + ( + [suffix_token_id] + + ( + self.tokenize(suffix.encode("utf-8"), add_bos=False, special=False)[ + suffix_space_prefix: + ] + if suffix + else [] + ) + ) + if suffix_token_id >= 0 and suffix is not None + else [] + ) + middle_tokens: List[int] = ( + [middle_token_id] if middle_token_id >= 0 and suffix is not None else [] + ) + prompt_tokens: List[int] = ( + bos_tokens + + ( + (suffix_tokens + prefix_tokens + middle_tokens) + if self.spm_infill + else (prefix_tokens + suffix_tokens + middle_tokens) + ) + + eos_tokens + ) + text: bytes = b"" + returned_tokens: int = 0 + stop = ( + stop if isinstance(stop, list) else [stop] if isinstance(stop, str) else [] + ) + model_name: str = model if model is not None else self.model_path + + if prompt_tokens[:2] == [self.token_bos()] * 2: + warnings.warn( + f'Detected duplicate leading "{self._model.token_get_text(self.token_bos())}" in prompt, this will likely reduce response quality, consider removing it...', + RuntimeWarning, + ) + + # NOTE: This likely doesn't work correctly for the first token in the prompt + # because of the extra space added to the start of the prompt_tokens + if logit_bias is not None: + logit_bias_map = {int(k): float(v) for k, v in logit_bias.items()} + + def logit_bias_processor( + input_ids: npt.NDArray[np.intc], + scores: npt.NDArray[np.single], + ) -> npt.NDArray[np.single]: + new_scores = np.copy( + scores + ) # Does it make sense to copy the whole array or can we just overwrite the original one? + for input_id, score in logit_bias_map.items(): + new_scores[input_id] = score + scores[input_id] + return new_scores + + _logit_bias_processor = LogitsProcessorList([logit_bias_processor]) + if logits_processor is None: + logits_processor = _logit_bias_processor + else: + logits_processor = logits_processor.extend(_logit_bias_processor) if self.verbose: - llama_cpp.llama_reset_timings(self.ctx) + self._ctx.reset_timings() - if len(prompt_tokens) + max_tokens > int(llama_cpp.llama_n_ctx(self.ctx)): + if len(prompt_tokens) >= self._n_ctx: raise ValueError( - f"Requested tokens exceed context window of {llama_cpp.llama_n_ctx(self.ctx)}" + f"Requested tokens ({len(prompt_tokens)}) exceed context window of {llama_cpp.llama_n_ctx(self.ctx)}" ) + if max_tokens is None or max_tokens <= 0: + # Unlimited, depending on n_ctx. + max_tokens = self._n_ctx - len(prompt_tokens) + + # Truncate max_tokens if requested tokens would exceed the context window + max_tokens = ( + max_tokens + if max_tokens + len(prompt_tokens) < self._n_ctx + else (self._n_ctx - len(prompt_tokens)) + ) + if stop != []: stop_sequences = [s.encode("utf-8") for s in stop] else: stop_sequences = [] - finish_reason = None + if logprobs is not None and self.context_params.logits_all is False: + raise ValueError( + "logprobs is not supported for models created with logits_all=False" + ) + + if self.cache: + try: + cache_item = self.cache[prompt_tokens] + cache_prefix_len = Llama.longest_token_prefix( + cache_item.input_ids.tolist(), prompt_tokens + ) + eval_prefix_len = Llama.longest_token_prefix( + self._input_ids.tolist(), prompt_tokens + ) + if cache_prefix_len > eval_prefix_len: + self.load_state(cache_item) + if self.verbose: + print("Llama._create_completion: cache hit", file=sys.stderr) + except KeyError: + if self.verbose: + print("Llama._create_completion: cache miss", file=sys.stderr) + + if seed is not None: + self.set_seed(seed) + else: + self.set_seed(random.Random(self._seed).randint(0, 2 ** 32)) + + finish_reason = "length" + multibyte_fix = 0 for token in self.generate( prompt_tokens, top_k=top_k, top_p=top_p, + min_p=min_p, + typical_p=typical_p, temp=temperature, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + frequency_penalty=frequency_penalty, + presence_penalty=presence_penalty, repeat_penalty=repeat_penalty, + stopping_criteria=stopping_criteria, + logits_processor=logits_processor, + grammar=grammar, ): - if token == llama_cpp.llama_token_eos(): - text = self.detokenize(completion_tokens) + if llama_cpp.llama_token_is_eog(self._model.vocab, token): + text = self.detokenize(completion_tokens, prev_tokens=prompt_tokens) finish_reason = "stop" break + completion_tokens.append(token) - all_text = self.detokenize(completion_tokens) + all_text = self.detokenize(completion_tokens, prev_tokens=prompt_tokens) + + # Contains multi-byte UTF8 + for k, char in enumerate(all_text[-3:]): + k = 3 - k + for num, pattern in [(2, 192), (3, 224), (4, 240)]: + # Bitwise AND check + if num > k and pattern & char == pattern: + multibyte_fix = num - k + + # Stop incomplete bytes from passing + if multibyte_fix > 0: + multibyte_fix -= 1 + continue + any_stop = [s for s in stop_sequences if s in all_text] if len(any_stop) > 0: first_stop = any_stop[0] @@ -347,82 +1365,369 @@ def _create_completion( break if stream: - start = returned_characters - longest = 0 + remaining_tokens = completion_tokens[returned_tokens:] + remaining_text = self.detokenize( + remaining_tokens, + prev_tokens=prompt_tokens + completion_tokens[:returned_tokens], + ) + remaining_length = len(remaining_text) + # We want to avoid yielding any characters from # the generated text if they are part of a stop # sequence. + first_stop_position = 0 for s in stop_sequences: - for i in range(len(s), 0, -1): - if all_text.endswith(s[:i]): - if i > longest: - longest = i + for i in range(min(len(s), remaining_length), 0, -1): + if remaining_text.endswith(s[:i]): + if i > first_stop_position: + first_stop_position = i break - text = all_text[: len(all_text) - longest] - returned_characters += len(text[start:]) + + token_end_position = 0 + + if logprobs is not None: + # not sure how to handle this branch when dealing + # with CJK output, so keep it unchanged + for token in remaining_tokens: + if token == bos_token_id: + continue + token_end_position += len( + self.detokenize( + [token], + prev_tokens=prompt_tokens + + completion_tokens[:returned_tokens], + ) + ) + # Check if stop sequence is in the token + if token_end_position > ( + remaining_length - first_stop_position + ): + break + token_str = self.detokenize( + [token], + prev_tokens=prompt_tokens + + completion_tokens[:returned_tokens], + ).decode("utf-8", errors="ignore") + text_offset = len(prompt) + len( + self.detokenize( + completion_tokens[:returned_tokens], + prev_tokens=prompt_tokens + + completion_tokens[:returned_tokens], + ).decode("utf-8", errors="ignore") + ) + token_offset = len(prompt_tokens) + returned_tokens + logits = self._scores[token_offset - 1, :] + current_logprobs = Llama.logits_to_logprobs(logits).tolist() + sorted_logprobs = list( + sorted( + zip(current_logprobs, range(len(current_logprobs))), + reverse=True, + ) + ) + top_logprob = { + self.detokenize([i]).decode( + "utf-8", errors="ignore" + ): logprob + for logprob, i in sorted_logprobs[:logprobs] + } + top_logprob.update({token_str: current_logprobs[int(token)]}) + logprobs_or_none = { + "tokens": [ + self.detokenize( + [token], + prev_tokens=prompt_tokens + + completion_tokens[:returned_tokens], + ).decode("utf-8", errors="ignore") + ], + "text_offset": [text_offset], + "token_logprobs": [current_logprobs[int(token)]], + "top_logprobs": [top_logprob], + } + returned_tokens += 1 + yield { + "id": completion_id, + "object": "text_completion", + "created": created, + "model": model_name, + "choices": [ + { + "text": self.detokenize( + [token], + prev_tokens=prompt_tokens + + completion_tokens[:returned_tokens], + ).decode("utf-8", errors="ignore"), + "index": 0, + "logprobs": logprobs_or_none, + "finish_reason": None, + } + ], + } + else: + while len(remaining_tokens) > 0: + decode_success = False + for i in range(1, len(remaining_tokens) + 1): + try: + bs = self.detokenize( + remaining_tokens[:i], + prev_tokens=prompt_tokens + + completion_tokens[:returned_tokens], + ) + ts = bs.decode("utf-8") + decode_success = True + break + except UnicodeError: + pass + else: + break + if not decode_success: + # all remaining tokens cannot be decoded to a UTF-8 character + break + token_end_position += len(bs) + if token_end_position > ( + remaining_length - first_stop_position + ): + break + remaining_tokens = remaining_tokens[i:] + returned_tokens += i + + yield { + "id": completion_id, + "object": "text_completion", + "created": created, + "model": model_name, + "choices": [ + { + "text": ts, + "index": 0, + "logprobs": None, + "finish_reason": None, + } + ], + } + + if len(completion_tokens) >= max_tokens: + text = self.detokenize(completion_tokens, prev_tokens=prompt_tokens) + finish_reason = "length" + break + + if stopping_criteria is not None and stopping_criteria( + self._input_ids, self._scores[-1, :] + ): + text = self.detokenize(completion_tokens, prev_tokens=prompt_tokens) + finish_reason = "stop" + + if self.verbose: + self._ctx.print_timings() + + if stream: + remaining_tokens = completion_tokens[returned_tokens:] + remaining_text = self.detokenize( + remaining_tokens, + prev_tokens=prompt_tokens + completion_tokens[:returned_tokens], + ) + any_stop = [s for s in stop_sequences if s in remaining_text] + if len(any_stop) > 0: + end = min(remaining_text.index(stop) for stop in any_stop) + else: + end = len(remaining_text) + + token_end_position = 0 + for token in remaining_tokens: + token_end_position += len( + self.detokenize( + [token], + prev_tokens=prompt_tokens + completion_tokens[:returned_tokens], + ) + ) + + logprobs_or_none: Optional[CompletionLogprobs] = None + if logprobs is not None: + if token == bos_token_id: + continue + token_str = self.detokenize([token]).decode( + "utf-8", errors="ignore" + ) + text_offset = len(prompt) + len( + self.detokenize( + completion_tokens[:returned_tokens], + prev_tokens=prompt_tokens + + completion_tokens[:returned_tokens], + ) + ) + token_offset = len(prompt_tokens) + returned_tokens - 1 + logits = self._scores[token_offset, :] + current_logprobs = Llama.logits_to_logprobs(logits).tolist() + sorted_logprobs = list( + sorted( + zip(current_logprobs, range(len(current_logprobs))), + reverse=True, + ) + ) + top_logprob = { + self.detokenize([i]).decode("utf-8", errors="ignore"): logprob + for logprob, i in sorted_logprobs[:logprobs] + } + top_logprob.update({token_str: current_logprobs[int(token)]}) + logprobs_or_none = { + "tokens": [ + self.detokenize([token]).decode("utf-8", errors="ignore") + ], + "text_offset": [text_offset], + "token_logprobs": [current_logprobs[int(token)]], + "top_logprobs": [top_logprob], + } + + if token_end_position >= end: + last_text = self.detokenize([token]) + if token_end_position == end - 1: + break + returned_tokens += 1 + yield { + "id": completion_id, + "object": "text_completion", + "created": created, + "model": model_name, + "choices": [ + { + "text": last_text[ + : len(last_text) - (token_end_position - end) + ].decode("utf-8", errors="ignore"), + "index": 0, + "logprobs": logprobs_or_none, + "finish_reason": None, + } + ], + } + break + returned_tokens += 1 yield { "id": completion_id, "object": "text_completion", "created": created, - "model": self.model_path, + "model": model_name, "choices": [ { - "text": text[start:].decode("utf-8"), + "text": self.detokenize([token]).decode( + "utf-8", errors="ignore" + ), "index": 0, - "logprobs": None, + "logprobs": logprobs_or_none, "finish_reason": None, } ], } - if len(completion_tokens) >= max_tokens: - text = self.detokenize(completion_tokens) - finish_reason = "length" - break - - if finish_reason is None: - finish_reason = "length" - - if stream: yield { "id": completion_id, "object": "text_completion", "created": created, - "model": self.model_path, + "model": model_name, "choices": [ { - "text": text[returned_characters:].decode("utf-8"), + "text": "", "index": 0, "logprobs": None, "finish_reason": finish_reason, } ], } + if self.cache: + if self.verbose: + print("Llama._create_completion: cache save", file=sys.stderr) + self.cache[prompt_tokens + completion_tokens] = self.save_state() + if self.verbose: + print("Llama._create_completion: cache saved", file=sys.stderr) return - text = text.decode("utf-8") + if self.cache: + if self.verbose: + print("Llama._create_completion: cache save", file=sys.stderr) + self.cache[prompt_tokens + completion_tokens] = self.save_state() + + text_str = text.decode("utf-8", errors="ignore") if echo: - text = prompt + text + text_str = prompt + text_str - if suffix is not None: - text = text + suffix + if suffix_token_id < 0 and suffix is not None: + text_str = text_str + suffix + logprobs_or_none: Optional[CompletionLogprobs] = None if logprobs is not None: - raise NotImplementedError("logprobs not implemented") - - if self.verbose: - llama_cpp.llama_print_timings(self.ctx) + text_offset = 0 if echo else len(prompt) + token_offset = 0 if echo else len(prompt_tokens[1:]) + text_offsets: List[int] = [] + token_logprobs: List[Optional[float]] = [] + tokens: List[str] = [] + top_logprobs: List[Optional[Dict[str, float]]] = [] + + if echo: + # Remove leading BOS token if exists + all_tokens = ( + prompt_tokens[1 if prompt_tokens[0] == self.token_bos() else 0 :] + + completion_tokens + ) + else: + all_tokens = completion_tokens + + all_token_strs = [ + self.detokenize([token], prev_tokens=all_tokens[:i]).decode( + "utf-8", errors="ignore" + ) + for i, token in enumerate(all_tokens) + ] + all_logprobs = Llama.logits_to_logprobs(self._scores)[token_offset:] + # TODO: may be able to change this loop to use np.take_along_dim + for idx, (token, token_str, logprobs_token) in enumerate( + zip(all_tokens, all_token_strs, all_logprobs) + ): + if token == bos_token_id: + continue + text_offsets.append( + text_offset + + len( + self.detokenize(all_tokens[:idx]).decode( + "utf-8", errors="ignore" + ) + ) + ) + tokens.append(token_str) + sorted_logprobs = list( + sorted( + zip(logprobs_token, range(len(logprobs_token))), reverse=True + ) + ) + token_logprobs.append(logprobs_token[int(token)]) + top_logprob: Optional[Dict[str, float]] = { + self.detokenize([i], prev_tokens=all_tokens[:idx]).decode( + "utf-8", errors="ignore" + ): logprob + for logprob, i in sorted_logprobs[:logprobs] + } + top_logprob.update({token_str: logprobs_token[int(token)]}) + top_logprobs.append(top_logprob) + # Weird idosincracy of the OpenAI API where + # token_logprobs and top_logprobs are null for + # the first token. + if echo and len(all_tokens) > 0: + token_logprobs[0] = None + top_logprobs[0] = None + logprobs_or_none = { + "tokens": tokens, + "text_offset": text_offsets, + "token_logprobs": token_logprobs, + "top_logprobs": top_logprobs, + } yield { "id": completion_id, "object": "text_completion", "created": created, - "model": self.model_path, + "model": model_name, "choices": [ { - "text": text, + "text": text_str, "index": 0, - "logprobs": None, + "logprobs": logprobs_or_none, "finish_reason": finish_reason, } ], @@ -435,32 +1740,60 @@ def _create_completion( def create_completion( self, - prompt: str, + prompt: Union[str, List[int]], suffix: Optional[str] = None, - max_tokens: int = 128, + max_tokens: Optional[int] = 16, temperature: float = 0.8, top_p: float = 0.95, + min_p: float = 0.05, + typical_p: float = 1.0, logprobs: Optional[int] = None, echo: bool = False, - stop: List[str] = [], - repeat_penalty: float = 1.1, + stop: Optional[Union[str, List[str]]] = [], + frequency_penalty: float = 0.0, + presence_penalty: float = 0.0, + repeat_penalty: float = 1.0, top_k: int = 40, stream: bool = False, - ) -> Union[Completion, Iterator[CompletionChunk]]: + seed: Optional[int] = None, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_tau: float = 5.0, + mirostat_eta: float = 0.1, + model: Optional[str] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + logits_processor: Optional[LogitsProcessorList] = None, + grammar: Optional[LlamaGrammar] = None, + logit_bias: Optional[Dict[int, float]] = None, + ) -> Union[CreateCompletionResponse, Iterator[CreateCompletionStreamResponse]]: """Generate text from a prompt. Args: prompt: The prompt to generate text from. suffix: A suffix to append to the generated text. If None, no suffix is appended. - max_tokens: The maximum number of tokens to generate. + max_tokens: The maximum number of tokens to generate. If max_tokens <= 0 or None, the maximum number of tokens to generate is unlimited and depends on n_ctx. temperature: The temperature to use for sampling. - top_p: The top-p value to use for sampling. + top_p: The top-p value to use for nucleus sampling. Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 + min_p: The min-p value to use for minimum p sampling. Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841 + typical_p: The typical-p value to use for sampling. Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666. logprobs: The number of logprobs to return. If None, no logprobs are returned. echo: Whether to echo the prompt. stop: A list of strings to stop generation when encountered. + frequency_penalty: The penalty to apply to tokens based on their frequency in the prompt. + presence_penalty: The penalty to apply to tokens based on their presence in the prompt. repeat_penalty: The penalty to apply to repeated tokens. - top_k: The top-k value to use for sampling. + top_k: The top-k value to use for sampling. Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 stream: Whether to stream the results. + seed: The seed to use for sampling. + tfs_z: The tail-free sampling parameter. Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/. + mirostat_mode: The mirostat sampling mode. + mirostat_tau: The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text. + mirostat_eta: The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates. + model: The name to use for the model in the completion object. + stopping_criteria: A list of stopping criteria to use. + logits_processor: A list of logits processors to use. + grammar: A grammar to use for constrained sampling. + logit_bias: A logit bias to use. Raises: ValueError: If the requested tokens exceed the context window. @@ -472,18 +1805,32 @@ def create_completion( completion_or_chunks = self._create_completion( prompt=prompt, suffix=suffix, - max_tokens=max_tokens, + max_tokens=-1 if max_tokens is None else max_tokens, temperature=temperature, top_p=top_p, + min_p=min_p, + typical_p=typical_p, logprobs=logprobs, echo=echo, stop=stop, + frequency_penalty=frequency_penalty, + presence_penalty=presence_penalty, repeat_penalty=repeat_penalty, top_k=top_k, stream=stream, + seed=seed, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + stopping_criteria=stopping_criteria, + logits_processor=logits_processor, + grammar=grammar, + logit_bias=logit_bias, ) if stream: - chunks: Iterator[CompletionChunk] = completion_or_chunks + chunks: Iterator[CreateCompletionStreamResponse] = completion_or_chunks return chunks completion: Completion = next(completion_or_chunks) # type: ignore return completion @@ -492,30 +1839,58 @@ def __call__( self, prompt: str, suffix: Optional[str] = None, - max_tokens: int = 128, + max_tokens: Optional[int] = 16, temperature: float = 0.8, top_p: float = 0.95, + min_p: float = 0.05, + typical_p: float = 1.0, logprobs: Optional[int] = None, echo: bool = False, - stop: List[str] = [], - repeat_penalty: float = 1.1, + stop: Optional[Union[str, List[str]]] = [], + frequency_penalty: float = 0.0, + presence_penalty: float = 0.0, + repeat_penalty: float = 1.0, top_k: int = 40, stream: bool = False, - ) -> Union[Completion, Iterator[CompletionChunk]]: + seed: Optional[int] = None, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_tau: float = 5.0, + mirostat_eta: float = 0.1, + model: Optional[str] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + logits_processor: Optional[LogitsProcessorList] = None, + grammar: Optional[LlamaGrammar] = None, + logit_bias: Optional[Dict[int, float]] = None, + ) -> Union[CreateCompletionResponse, Iterator[CreateCompletionStreamResponse]]: """Generate text from a prompt. Args: prompt: The prompt to generate text from. suffix: A suffix to append to the generated text. If None, no suffix is appended. - max_tokens: The maximum number of tokens to generate. + max_tokens: The maximum number of tokens to generate. If max_tokens <= 0 or None, the maximum number of tokens to generate is unlimited and depends on n_ctx. temperature: The temperature to use for sampling. - top_p: The top-p value to use for sampling. + top_p: The top-p value to use for nucleus sampling. Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 + min_p: The min-p value to use for minimum p sampling. Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841 + typical_p: The typical-p value to use for sampling. Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666. logprobs: The number of logprobs to return. If None, no logprobs are returned. echo: Whether to echo the prompt. stop: A list of strings to stop generation when encountered. + frequency_penalty: The penalty to apply to tokens based on their frequency in the prompt. + presence_penalty: The penalty to apply to tokens based on their presence in the prompt. repeat_penalty: The penalty to apply to repeated tokens. - top_k: The top-k value to use for sampling. + top_k: The top-k value to use for sampling. Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 stream: Whether to stream the results. + seed: The seed to use for sampling. + tfs_z: The tail-free sampling parameter. Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/. + mirostat_mode: The mirostat sampling mode. + mirostat_tau: The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text. + mirostat_eta: The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates. + model: The name to use for the model in the completion object. + stopping_criteria: A list of stopping criteria to use. + logits_processor: A list of logits processors to use. + grammar: A grammar to use for constrained sampling. + logit_bias: A logit bias to use. Raises: ValueError: If the requested tokens exceed the context window. @@ -530,172 +1905,514 @@ def __call__( max_tokens=max_tokens, temperature=temperature, top_p=top_p, + min_p=min_p, + typical_p=typical_p, logprobs=logprobs, echo=echo, stop=stop, + frequency_penalty=frequency_penalty, + presence_penalty=presence_penalty, repeat_penalty=repeat_penalty, top_k=top_k, stream=stream, + seed=seed, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + stopping_criteria=stopping_criteria, + logits_processor=logits_processor, + grammar=grammar, + logit_bias=logit_bias, ) - def _convert_text_completion_to_chat( - self, completion: Completion - ) -> ChatCompletion: - return { - "id": "chat" + completion["id"], - "object": "chat.completion", - "created": completion["created"], - "model": completion["model"], - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": completion["choices"][0]["text"], - }, - "finish_reason": completion["choices"][0]["finish_reason"], - } - ], - "usage": completion["usage"], - } - - def _convert_text_completion_chunks_to_chat( - self, - chunks: Iterator[CompletionChunk], - ) -> Iterator[ChatCompletionChunk]: - for i, chunk in enumerate(chunks): - if i == 0: - yield { - "id": "chat" + chunk["id"], - "model": chunk["model"], - "created": chunk["created"], - "object": "chat.completion.chunk", - "choices": [ - { - "index": 0, - "delta": { - "role": "assistant", - }, - "finish_reason": None, - } - ], - } - yield { - "id": "chat" + chunk["id"], - "model": chunk["model"], - "created": chunk["created"], - "object": "chat.completion.chunk", - "choices": [ - { - "index": 0, - "delta": { - "content": chunk["choices"][0]["text"], - }, - "finish_reason": chunk["choices"][0]["finish_reason"], - } - ], - } - def create_chat_completion( self, - messages: List[ChatCompletionMessage], - temperature: float = 0.8, + messages: List[ChatCompletionRequestMessage], + functions: Optional[List[ChatCompletionFunction]] = None, + function_call: Optional[ChatCompletionRequestFunctionCall] = None, + tools: Optional[List[ChatCompletionTool]] = None, + tool_choice: Optional[ChatCompletionToolChoiceOption] = None, + temperature: float = 0.2, top_p: float = 0.95, top_k: int = 40, + min_p: float = 0.05, + typical_p: float = 1.0, stream: bool = False, - stop: List[str] = [], - max_tokens: int = 128, - repeat_penalty: float = 1.1, - ) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]: + stop: Optional[Union[str, List[str]]] = [], + seed: Optional[int] = None, + response_format: Optional[ChatCompletionRequestResponseFormat] = None, + max_tokens: Optional[int] = None, + presence_penalty: float = 0.0, + frequency_penalty: float = 0.0, + repeat_penalty: float = 1.0, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_tau: float = 5.0, + mirostat_eta: float = 0.1, + model: Optional[str] = None, + logits_processor: Optional[LogitsProcessorList] = None, + grammar: Optional[LlamaGrammar] = None, + logit_bias: Optional[Dict[int, float]] = None, + logprobs: Optional[bool] = None, + top_logprobs: Optional[int] = None, + ) -> Union[ + CreateChatCompletionResponse, Iterator[CreateChatCompletionStreamResponse] + ]: """Generate a chat completion from a list of messages. Args: messages: A list of messages to generate a response for. + functions: A list of functions to use for the chat completion. + function_call: A function call to use for the chat completion. + tools: A list of tools to use for the chat completion. + tool_choice: A tool choice to use for the chat completion. temperature: The temperature to use for sampling. - top_p: The top-p value to use for sampling. - top_k: The top-k value to use for sampling. + top_p: The top-p value to use for nucleus sampling. Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 + top_k: The top-k value to use for sampling. Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 + min_p: The min-p value to use for minimum p sampling. Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841 + typical_p: The typical-p value to use for sampling. Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666. stream: Whether to stream the results. stop: A list of strings to stop generation when encountered. - max_tokens: The maximum number of tokens to generate. + seed: The seed to use for sampling. + response_format: The response format to use for the chat completion. Use { "type": "json_object" } to contstrain output to only valid json. + max_tokens: The maximum number of tokens to generate. If max_tokens <= 0 or None, the maximum number of tokens to generate is unlimited and depends on n_ctx. + presence_penalty: The penalty to apply to tokens based on their presence in the prompt. + frequency_penalty: The penalty to apply to tokens based on their frequency in the prompt. repeat_penalty: The penalty to apply to repeated tokens. + tfs_z: The tail-free sampling parameter. + mirostat_mode: The mirostat sampling mode. + mirostat_tau: The mirostat sampling tau parameter. + mirostat_eta: The mirostat sampling eta parameter. + model: The name to use for the model in the completion object. + logits_processor: A list of logits processors to use. + grammar: A grammar to use. + logit_bias: A logit bias to use. Returns: Generated chat completion or a stream of chat completion chunks. """ - instructions = """Complete the following chat conversation between the user and the assistant. System messages should be strictly followed as additional instructions.""" - chat_history = "\n".join( - f'{message["role"]} {message.get("user", "")}: {message["content"]}' - for message in messages - ) - PROMPT = f" \n\n### Instructions:{instructions}\n\n### Inputs:{chat_history}\n\n### Response:\nassistant: " - PROMPT_STOP = ["###", "\nuser: ", "\nassistant: ", "\nsystem: "] - completion_or_chunks = self( - prompt=PROMPT, - stop=PROMPT_STOP + stop, + handler = ( + self.chat_handler + or self._chat_handlers.get(self.chat_format) + or llama_chat_format.get_chat_completion_handler(self.chat_format) + ) + return handler( + llama=self, + messages=messages, + functions=functions, + function_call=function_call, + tools=tools, + tool_choice=tool_choice, temperature=temperature, top_p=top_p, top_k=top_k, + min_p=min_p, + typical_p=typical_p, + logprobs=logprobs, + top_logprobs=top_logprobs, stream=stream, + stop=stop, + seed=seed, + response_format=response_format, max_tokens=max_tokens, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + grammar=grammar, + logit_bias=logit_bias, ) - if stream: - chunks: Iterator[CompletionChunk] = completion_or_chunks # type: ignore - return self._convert_text_completion_chunks_to_chat(chunks) - else: - completion: Completion = completion_or_chunks # type: ignore - return self._convert_text_completion_to_chat(completion) - def __del__(self): - if self.ctx is not None: - llama_cpp.llama_free(self.ctx) - self.ctx = None + def create_chat_completion_openai_v1( + self, + *args: Any, + **kwargs: Any, + ): + """Generate a chat completion with return type based on the the OpenAI v1 API. + + OpenAI python package is required to use this method. + + You can install it with `pip install openai`. + + Args: + *args: Positional arguments to pass to create_chat_completion. + **kwargs: Keyword arguments to pass to create_chat_completion. + + Returns: + Generated chat completion or a stream of chat completion chunks. + """ + try: + from openai.types.chat import ChatCompletion, ChatCompletionChunk + + stream = kwargs.get("stream", False) # type: ignore + assert isinstance(stream, bool) + if stream: + return (ChatCompletionChunk(**chunk) for chunk in self.create_chat_completion(*args, **kwargs)) # type: ignore + else: + return ChatCompletion(**self.create_chat_completion(*args, **kwargs)) # type: ignore + except ImportError: + raise ImportError( + "To use create_chat_completion_openai_v1, you must install the openai package." + "You can install it with `pip install openai`." + ) def __getstate__(self): return dict( - verbose=self.verbose, model_path=self.model_path, - n_ctx=self.params.n_ctx, - n_parts=self.params.n_parts, - seed=self.params.seed, - f16_kv=self.params.f16_kv, - logits_all=self.params.logits_all, - vocab_only=self.params.vocab_only, - use_mlock=self.params.use_mlock, - embedding=self.params.embedding, - last_n_tokens_size=self.last_n_tokens_size, - last_n_tokens_data=self.last_n_tokens_data, - tokens_consumed=self.tokens_consumed, + # Model Params + n_gpu_layers=self.model_params.n_gpu_layers, + split_mode=self.model_params.split_mode, + main_gpu=self.model_params.main_gpu, + tensor_split=self.tensor_split, + vocab_only=self.model_params.vocab_only, + use_mmap=self.model_params.use_mmap, + use_mlock=self.model_params.use_mlock, + kv_overrides=self.kv_overrides, + # Context Params + seed=self._seed, + n_ctx=self.context_params.n_ctx, n_batch=self.n_batch, - n_threads=self.n_threads, + n_ubatch=self.context_params.n_ubatch, + n_threads=self.context_params.n_threads, + n_threads_batch=self.context_params.n_threads_batch, + rope_scaling_type=self.context_params.rope_scaling_type, + pooling_type=self.context_params.pooling_type, + rope_freq_base=self.context_params.rope_freq_base, + rope_freq_scale=self.context_params.rope_freq_scale, + yarn_ext_factor=self.context_params.yarn_ext_factor, + yarn_attn_factor=self.context_params.yarn_attn_factor, + yarn_beta_fast=self.context_params.yarn_beta_fast, + yarn_beta_slow=self.context_params.yarn_beta_slow, + yarn_orig_ctx=self.context_params.yarn_orig_ctx, + logits_all=self.context_params.logits_all, + embedding=self.context_params.embeddings, + offload_kqv=self.context_params.offload_kqv, + flash_attn=self.context_params.flash_attn, + # Sampling Params + no_perf=self.context_params.no_perf, + last_n_tokens_size=self.last_n_tokens_size, + # LoRA Params + lora_base=self.lora_base, + lora_scale=self.lora_scale, + lora_path=self.lora_path, + # Backend Params + numa=self.numa, + # Chat Format Params + chat_format=self.chat_format, + chat_handler=self.chat_handler, + # Speculative Decidng + draft_model=self.draft_model, + # KV cache quantization + type_k=self.context_params.type_k, + type_v=self.context_params.type_v, + # Misc + spm_infill=self.spm_infill, + verbose=self.verbose, ) def __setstate__(self, state): - self.__init__( - model_path=state["model_path"], - n_ctx=state["n_ctx"], - n_parts=state["n_parts"], - seed=state["seed"], - f16_kv=state["f16_kv"], - logits_all=state["logits_all"], - vocab_only=state["vocab_only"], - use_mlock=state["use_mlock"], - embedding=state["embedding"], - n_threads=state["n_threads"], - n_batch=state["n_batch"], - last_n_tokens_size=state["last_n_tokens_size"], - verbose=state["verbose"], - ) - self.last_n_tokens_data = state["last_n_tokens_data"] - self.tokens_consumed = state["tokens_consumed"] + self.__init__(**state) + def save_state(self) -> LlamaState: + if self.verbose: + print("Llama.save_state: saving llama state", file=sys.stderr) + state_size = llama_cpp.llama_get_state_size(self._ctx.ctx) + if self.verbose: + print(f"Llama.save_state: got state size: {state_size}", file=sys.stderr) + llama_state = (ctypes.c_uint8 * int(state_size))() + if self.verbose: + print("Llama.save_state: allocated state", file=sys.stderr) + n_bytes = llama_cpp.llama_copy_state_data(self._ctx.ctx, llama_state) + if self.verbose: + print(f"Llama.save_state: copied llama state: {n_bytes}", file=sys.stderr) + if int(n_bytes) > int(state_size): + raise RuntimeError("Failed to copy llama state data") + llama_state_compact = (ctypes.c_uint8 * int(n_bytes))() + llama_cpp.ctypes.memmove(llama_state_compact, llama_state, int(n_bytes)) + if self.verbose: + print( + f"Llama.save_state: saving {n_bytes} bytes of llama state", + file=sys.stderr, + ) + return LlamaState( + scores=self._scores.copy(), + input_ids=self.input_ids.copy(), + n_tokens=self.n_tokens, + llama_state=bytes(llama_state_compact), + llama_state_size=n_bytes, + seed=self._seed, + ) - @staticmethod - def token_eos() -> llama_cpp.llama_token: + def load_state(self, state: LlamaState) -> None: + # Only filling in up to `n_tokens` and then zero-ing out the rest + self.scores[: state.n_tokens, :] = state.scores.copy() + rest = self.scores[state.n_tokens :, :] + rest[rest > 0] = 0.0 + self.input_ids = state.input_ids.copy() + self.n_tokens = state.n_tokens + self._seed = state.seed + state_size = state.llama_state_size + LLamaStateArrayType = ctypes.c_uint8 * state_size + llama_state = LLamaStateArrayType.from_buffer_copy(state.llama_state) + + if llama_cpp.llama_set_state_data(self._ctx.ctx, llama_state) != state_size: + raise RuntimeError("Failed to set llama state data") + + def n_ctx(self) -> int: + """Return the context window size.""" + return self._ctx.n_ctx() + + def n_embd(self) -> int: + """Return the embedding size.""" + return self._model.n_embd() + + def n_vocab(self) -> int: + """Return the vocabulary size.""" + return self._model.n_vocab() + + def tokenizer(self) -> LlamaTokenizer: + """Return the llama tokenizer for this model.""" + return LlamaTokenizer(self) + + def token_eos(self) -> int: """Return the end-of-sequence token.""" - return llama_cpp.llama_token_eos() + return self._model.token_eos() - @staticmethod - def token_bos() -> llama_cpp.llama_token: + def token_bos(self) -> int: """Return the beginning-of-sequence token.""" - return llama_cpp.llama_token_bos() + return self._model.token_bos() + + def token_nl(self) -> int: + """Return the newline token.""" + return self._model.token_nl() + + def pooling_type(self) -> str: + """Return the pooling type.""" + return self._ctx.pooling_type() + + def close(self) -> None: + """Explicitly free the model from memory.""" + self._stack.close() + + def __del__(self) -> None: + self.close() + + @staticmethod + def logits_to_logprobs( + logits: Union[npt.NDArray[np.single], List], axis: int = -1 + ) -> npt.NDArray[np.single]: + # https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.log_softmax.html + logits_maxs: np.ndarray = np.amax(logits, axis=axis, keepdims=True) + if logits_maxs.ndim > 0: + logits_maxs[~np.isfinite(logits_maxs)] = 0 + elif not np.isfinite(logits_maxs): + logits_maxs = 0 + subtract_maxs = np.subtract(logits, logits_maxs, dtype=np.single) + exp = np.exp(subtract_maxs) + # Suppress warnings about log of zero + with np.errstate(divide="ignore"): + summed = np.sum(exp, axis=axis, keepdims=True) + out = np.log(summed) + return subtract_maxs - out + + @staticmethod + def longest_token_prefix(a: Sequence[int], b: Sequence[int]): + longest_prefix = 0 + for _a, _b in zip(a, b): + if _a == _b: + longest_prefix += 1 + else: + break + return longest_prefix + + @classmethod + def from_pretrained( + cls, + repo_id: str, + filename: Optional[str], + additional_files: Optional[List] = None, + local_dir: Optional[Union[str, os.PathLike[str]]] = None, + local_dir_use_symlinks: Union[bool, Literal["auto"]] = "auto", + cache_dir: Optional[Union[str, os.PathLike[str]]] = None, + **kwargs: Any, + ) -> "Llama": + """Create a Llama model from a pretrained model name or path. + This method requires the huggingface-hub package. + You can install it with `pip install huggingface-hub`. + + Args: + repo_id: The model repo id. + filename: A filename or glob pattern to match the model file in the repo. + additional_files: A list of filenames or glob patterns to match additional model files in the repo. + local_dir: The local directory to save the model to. + local_dir_use_symlinks: Whether to use symlinks when downloading the model. + **kwargs: Additional keyword arguments to pass to the Llama constructor. + + Returns: + A Llama model.""" + try: + from huggingface_hub import hf_hub_download, HfFileSystem + from huggingface_hub.utils import validate_repo_id + except ImportError: + raise ImportError( + "Llama.from_pretrained requires the huggingface-hub package. " + "You can install it with `pip install huggingface-hub`." + ) + + validate_repo_id(repo_id) + + hffs = HfFileSystem() + + files = [ + file["name"] if isinstance(file, dict) else file + for file in hffs.ls(repo_id, recursive=True) + ] + + # split each file into repo_id, subfolder, filename + file_list: List[str] = [] + for file in files: + rel_path = Path(file).relative_to(repo_id) + file_list.append(str(rel_path)) + + # find the only/first shard file: + matching_files = [file for file in file_list if fnmatch.fnmatch(file, filename)] # type: ignore + + if len(matching_files) == 0: + raise ValueError( + f"No file found in {repo_id} that match {filename}\n\n" + f"Available Files:\n{json.dumps(file_list)}" + ) + + if len(matching_files) > 1: + raise ValueError( + f"Multiple files found in {repo_id} matching {filename}\n\n" + f"Available Files:\n{json.dumps(files)}" + ) + + (matching_file,) = matching_files + + subfolder = str(Path(matching_file).parent) + filename = Path(matching_file).name + + # download the file + hf_hub_download( + repo_id=repo_id, + filename=filename, + subfolder=subfolder, + local_dir=local_dir, + local_dir_use_symlinks=local_dir_use_symlinks, + cache_dir=cache_dir, + ) + + if additional_files: + for additonal_file_name in additional_files: + # find the additional shard file: + matching_additional_files = [file for file in file_list if fnmatch.fnmatch(file, additonal_file_name)] + + if len(matching_additional_files) == 0: + raise ValueError( + f"No file found in {repo_id} that match {additonal_file_name}\n\n" + f"Available Files:\n{json.dumps(file_list)}" + ) + + if len(matching_additional_files) > 1: + raise ValueError( + f"Multiple files found in {repo_id} matching {additonal_file_name}\n\n" + f"Available Files:\n{json.dumps(files)}" + ) + + (matching_additional_file,) = matching_additional_files + + # download the additional file + hf_hub_download( + repo_id=repo_id, + filename=matching_additional_file, + subfolder=subfolder, + local_dir=local_dir, + local_dir_use_symlinks=local_dir_use_symlinks, + cache_dir=cache_dir, + ) + + if local_dir is None: + model_path = hf_hub_download( + repo_id=repo_id, + filename=filename, + subfolder=subfolder, + local_dir=local_dir, + local_dir_use_symlinks=local_dir_use_symlinks, + cache_dir=cache_dir, + local_files_only=True, + ) + else: + model_path = os.path.join(local_dir, filename) + + # loading the first file of a sharded GGUF loads all remaining shard files in the subfolder + return cls( + model_path=model_path, + **kwargs, + ) + + +class LlamaState: + def __init__( + self, + input_ids: npt.NDArray[np.intc], + scores: npt.NDArray[np.single], + n_tokens: int, + llama_state: bytes, + llama_state_size: int, + seed: int, + ): + self.input_ids = input_ids + self.scores = scores + self.n_tokens = n_tokens + self.llama_state = llama_state + self.llama_state_size = llama_state_size + self.seed = seed + + +LogitsProcessor = Callable[ + [npt.NDArray[np.intc], npt.NDArray[np.single]], npt.NDArray[np.single] +] + + +class LogitsProcessorList(List[LogitsProcessor]): + def __call__( + self, input_ids: npt.NDArray[np.intc], scores: npt.NDArray[np.single] + ) -> npt.NDArray[np.single]: + for processor in self: + scores = processor(input_ids, scores) + return scores + + +StoppingCriteria = Callable[[npt.NDArray[np.intc], npt.NDArray[np.single]], bool] + + +class StoppingCriteriaList(List[StoppingCriteria]): + def __call__( + self, input_ids: npt.NDArray[np.intc], logits: npt.NDArray[np.single] + ) -> bool: + return any([stopping_criteria(input_ids, logits) for stopping_criteria in self]) + + +class MinTokensLogitsProcessor(LogitsProcessor): + def __init__(self, min_tokens: int, token_eos: int): + self.min_tokens = min_tokens + self.token_eos = token_eos + self.prompt_tokens = None + + def __call__( + self, input_ids: npt.NDArray[np.intc], scores: npt.NDArray[np.single] + ) -> npt.NDArray[np.single]: + if self.prompt_tokens is None: + self.prompt_tokens = len(input_ids) + if len(input_ids) - self.prompt_tokens < self.min_tokens: + scores[self.token_eos] = -np.inf + return scores diff --git a/llama_cpp/llama_cache.py b/llama_cpp/llama_cache.py new file mode 100644 index 000000000..e059e98e1 --- /dev/null +++ b/llama_cpp/llama_cache.py @@ -0,0 +1,155 @@ +import sys +from abc import ABC, abstractmethod +from typing import ( + Optional, + Sequence, + Tuple, +) +from collections import OrderedDict + +import diskcache + +import llama_cpp.llama + +from .llama_types import * + + +class BaseLlamaCache(ABC): + """Base cache class for a llama.cpp model.""" + + def __init__(self, capacity_bytes: int = (2 << 30)): + self.capacity_bytes = capacity_bytes + + @property + @abstractmethod + def cache_size(self) -> int: + raise NotImplementedError + + def _find_longest_prefix_key( + self, + key: Tuple[int, ...], + ) -> Optional[Tuple[int, ...]]: + pass + + @abstractmethod + def __getitem__(self, key: Sequence[int]) -> "llama_cpp.llama.LlamaState": + raise NotImplementedError + + @abstractmethod + def __contains__(self, key: Sequence[int]) -> bool: + raise NotImplementedError + + @abstractmethod + def __setitem__( + self, key: Sequence[int], value: "llama_cpp.llama.LlamaState" + ) -> None: + raise NotImplementedError + + +class LlamaRAMCache(BaseLlamaCache): + """Cache for a llama.cpp model using RAM.""" + + def __init__(self, capacity_bytes: int = (2 << 30)): + super().__init__(capacity_bytes) + self.capacity_bytes = capacity_bytes + self.cache_state: OrderedDict[ + Tuple[int, ...], "llama_cpp.llama.LlamaState" + ] = OrderedDict() + + @property + def cache_size(self): + return sum([state.llama_state_size for state in self.cache_state.values()]) + + def _find_longest_prefix_key( + self, + key: Tuple[int, ...], + ) -> Optional[Tuple[int, ...]]: + min_len = 0 + min_key = None + keys = ( + (k, llama_cpp.llama.Llama.longest_token_prefix(k, key)) + for k in self.cache_state.keys() + ) + for k, prefix_len in keys: + if prefix_len > min_len: + min_len = prefix_len + min_key = k + return min_key + + def __getitem__(self, key: Sequence[int]) -> "llama_cpp.llama.LlamaState": + key = tuple(key) + _key = self._find_longest_prefix_key(key) + if _key is None: + raise KeyError("Key not found") + value = self.cache_state[_key] + self.cache_state.move_to_end(_key) + return value + + def __contains__(self, key: Sequence[int]) -> bool: + return self._find_longest_prefix_key(tuple(key)) is not None + + def __setitem__(self, key: Sequence[int], value: "llama_cpp.llama.LlamaState"): + key = tuple(key) + if key in self.cache_state: + del self.cache_state[key] + self.cache_state[key] = value + while self.cache_size > self.capacity_bytes and len(self.cache_state) > 0: + self.cache_state.popitem(last=False) + + +# Alias for backwards compatibility +LlamaCache = LlamaRAMCache + + +class LlamaDiskCache(BaseLlamaCache): + """Cache for a llama.cpp model using disk.""" + + def __init__( + self, cache_dir: str = ".cache/llama_cache", capacity_bytes: int = (2 << 30) + ): + super().__init__(capacity_bytes) + self.cache = diskcache.Cache(cache_dir) + + @property + def cache_size(self): + return int(self.cache.volume()) # type: ignore + + def _find_longest_prefix_key( + self, + key: Tuple[int, ...], + ) -> Optional[Tuple[int, ...]]: + min_len = 0 + min_key: Optional[Tuple[int, ...]] = None + for k in self.cache.iterkeys(): # type: ignore + prefix_len = llama_cpp.llama.Llama.longest_token_prefix(k, key) + if prefix_len > min_len: + min_len = prefix_len + min_key = k # type: ignore + return min_key + + def __getitem__(self, key: Sequence[int]) -> "llama_cpp.llama.LlamaState": + key = tuple(key) + _key = self._find_longest_prefix_key(key) + if _key is None: + raise KeyError("Key not found") + value: "llama_cpp.llama.LlamaState" = self.cache.pop(_key) # type: ignore + # NOTE: This puts an integer as key in cache, which breaks, + # Llama.longest_token_prefix(k, key) above since k is not a tuple of ints/tokens + # self.cache.push(_key, side="front") # type: ignore + return value + + def __contains__(self, key: Sequence[int]) -> bool: + return self._find_longest_prefix_key(tuple(key)) is not None + + def __setitem__(self, key: Sequence[int], value: "llama_cpp.llama.LlamaState"): + print("LlamaDiskCache.__setitem__: called", file=sys.stderr) + key = tuple(key) + if key in self.cache: + print("LlamaDiskCache.__setitem__: delete", file=sys.stderr) + del self.cache[key] + self.cache[key] = value + print("LlamaDiskCache.__setitem__: set", file=sys.stderr) + while self.cache_size > self.capacity_bytes and len(self.cache) > 0: + key_to_remove = next(iter(self.cache)) + del self.cache[key_to_remove] + print("LlamaDiskCache.__setitem__: trim", file=sys.stderr) diff --git a/llama_cpp/llama_chat_format.py b/llama_cpp/llama_chat_format.py new file mode 100644 index 000000000..17575c700 --- /dev/null +++ b/llama_cpp/llama_chat_format.py @@ -0,0 +1,3816 @@ +from __future__ import annotations + +import os +import sys +import json +import ctypes +import dataclasses +import random +import string + +from contextlib import ExitStack +from typing import ( + Any, + Dict, + Iterator, + List, + Literal, + Optional, + Tuple, + Union, + Protocol, + cast, +) + +import jinja2 +from jinja2.sandbox import ImmutableSandboxedEnvironment + +import numpy as np +import numpy.typing as npt + +import llama_cpp.llama as llama +import llama_cpp.llama_types as llama_types +import llama_cpp.llama_grammar as llama_grammar + +from ._logger import logger +from ._utils import suppress_stdout_stderr, Singleton + +### Common Chat Templates and Special Tokens ### + +# Source: https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B/blob/main/tokenizer_config.json +CHATML_CHAT_TEMPLATE = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" +CHATML_BOS_TOKEN = "" +CHATML_EOS_TOKEN = "<|im_end|>" + +# Source: https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/blob/main/tokenizer_config.json +MISTRAL_INSTRUCT_CHAT_TEMPLATE = "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}" +MISTRAL_INSTRUCT_BOS_TOKEN = "" +MISTRAL_INSTRUCT_EOS_TOKEN = "" + +# Source: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1/blob/main/tokenizer_config.json +MIXTRAL_INSTRUCT_CHAT_TEMPLATE = "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}" + +# Source: https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct/blob/main/tokenizer_config.json +LLAMA3_INSTRUCT_CHAT_TEMPLATE = "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}" + +### Chat Completion Handler ### + + +class LlamaChatCompletionHandler(Protocol): + """Base Protocol for a llama chat completion handler. + + Very generic protocol that can be used to implement any chat format. + The only hard requirement is that it must return a ChatCompletion when + stream=False and an iterator of ChatCompletionChunks when stream=True.""" + + def __call__( + self, + *, + # llama.cpp instance + llama: llama.Llama, + # openai api parameters + messages: List[llama_types.ChatCompletionRequestMessage], + functions: Optional[List[llama_types.ChatCompletionFunction]] = None, + function_call: Optional[llama_types.ChatCompletionRequestFunctionCall] = None, + tools: Optional[List[llama_types.ChatCompletionTool]] = None, + tool_choice: Optional[llama_types.ChatCompletionToolChoiceOption] = None, + temperature: float = 0.2, + top_p: float = 0.95, + top_k: int = 40, + stream: bool = False, + stop: Optional[Union[str, List[str]]] = [], + seed: Optional[int] = None, + response_format: Optional[ + llama_types.ChatCompletionRequestResponseFormat + ] = None, + max_tokens: Optional[int] = None, + presence_penalty: float = 0.0, + frequency_penalty: float = 0.0, + repeat_penalty: float = 1.1, + model: Optional[str] = None, + logit_bias: Optional[Dict[str, float]] = None, + # llama.cpp parameters + min_p: float = 0.05, + typical_p: float = 1.0, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_tau: float = 5.0, + mirostat_eta: float = 0.1, + logits_processor: Optional[llama.LogitsProcessorList] = None, + grammar: Optional[llama.LlamaGrammar] = None, + logprobs: Optional[bool] = None, + top_logprobs: Optional[int] = None, + **kwargs, # type: ignore + ) -> Union[ + llama_types.CreateChatCompletionResponse, + Iterator[llama_types.CreateChatCompletionStreamResponse], + ]: ... + + +class LlamaChatCompletionHandlerNotFoundException(Exception): + pass + + +class LlamaChatCompletionHandlerRegistry(Singleton): + _chat_handlers: Dict[str, LlamaChatCompletionHandler] = {} + + def register_chat_completion_handler( + self, + name: str, + chat_handler: LlamaChatCompletionHandler, + overwrite: bool = False, + ): + if not overwrite and name in self._chat_handlers: + raise ValueError( + f"Formatter with name '{name}' is already registered. Use `overwrite=True` to overwrite it." + ) + self._chat_handlers[name] = chat_handler + + def unregister_chat_handler(self, name: str): + if name in self._chat_handlers: + del self._chat_handlers[name] + else: + raise ValueError(f"No formatter registered under the name '{name}'.") + + def get_chat_completion_handler_by_name( + self, name: str + ) -> LlamaChatCompletionHandler: + try: + chat_handler = self._chat_handlers[name] + return chat_handler + except KeyError: + raise LlamaChatCompletionHandlerNotFoundException( + f"Invalid chat handler: {name} (valid formats: {list(self._chat_handlers.keys())})" + ) + + +def get_chat_completion_handler(name: str) -> LlamaChatCompletionHandler: + return LlamaChatCompletionHandlerRegistry().get_chat_completion_handler_by_name( + name + ) + + +def register_chat_completion_handler(name: str): + def decorator(f: LlamaChatCompletionHandler): + LlamaChatCompletionHandlerRegistry().register_chat_completion_handler(name, f) + return f + + return decorator + + +### Chat Formatter ### + + +@dataclasses.dataclass +class ChatFormatterResponse: + """Dataclass that stores completion parameters for a given chat format and + create_chat_completion request. + + prompt contains the formatted prompt generated from the chat format and messages. + stop contains the stop token or list of stop tokens to use for the chat format.""" + + prompt: str + stop: Optional[Union[str, List[str]]] = None + stopping_criteria: Optional[llama.StoppingCriteriaList] = None + added_special: bool = False + + +class ChatFormatter(Protocol): + """Base Protocol for a chat formatter. A chat formatter is a function that + takes a list of messages and returns a chat format response which can be used + to generate a completion. The response can also include a stop token or list + of stop tokens to use for the completion.""" + + def __call__( + self, + *, + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, + ) -> ChatFormatterResponse: ... + + +class Jinja2ChatFormatter(ChatFormatter): + def __init__( + self, + template: str, + eos_token: str, + bos_token: str, + add_generation_prompt: bool = True, + stop_token_ids: Optional[List[int]] = None, + ): + """A chat formatter that uses jinja2 templates to format the prompt.""" + self.template = template + self.eos_token = eos_token + self.bos_token = bos_token + self.add_generation_prompt = add_generation_prompt + self.stop_token_ids = ( + set(stop_token_ids) if stop_token_ids is not None else None + ) + + self._environment = ImmutableSandboxedEnvironment( + loader=jinja2.BaseLoader(), + trim_blocks=True, + lstrip_blocks=True, + ).from_string(self.template) + + def __call__( + self, + *, + messages: List[llama_types.ChatCompletionRequestMessage], + functions: Optional[List[llama_types.ChatCompletionFunction]] = None, + function_call: Optional[llama_types.ChatCompletionRequestFunctionCall] = None, + tools: Optional[List[llama_types.ChatCompletionTool]] = None, + tool_choice: Optional[llama_types.ChatCompletionToolChoiceOption] = None, + **kwargs: Any, + ) -> ChatFormatterResponse: + def raise_exception(message: str): + raise ValueError(message) + + prompt = self._environment.render( + messages=messages, + eos_token=self.eos_token, + bos_token=self.bos_token, + raise_exception=raise_exception, + add_generation_prompt=self.add_generation_prompt, + functions=functions, + function_call=function_call, + tools=tools, + tool_choice=tool_choice, + ) + + stopping_criteria = None + if self.stop_token_ids is not None: + + def stop_on_last_token( + tokens: npt.NDArray[np.intc], logits: npt.NDArray[np.single] + ) -> bool: + return tokens[-1] in self.stop_token_ids + + stopping_criteria = llama.StoppingCriteriaList([stop_on_last_token]) + + return ChatFormatterResponse( + prompt=prompt, + stop=[self.eos_token], + stopping_criteria=stopping_criteria, + added_special=True, + ) + + def to_chat_handler(self) -> LlamaChatCompletionHandler: + return chat_formatter_to_chat_completion_handler(self) + + +def _convert_text_completion_logprobs_to_chat( + logprobs: Optional[llama_types.CompletionLogprobs], +) -> llama_types.ChatCompletionLogprobs: + if logprobs is None: + return None + + return { + "content": [ + { + "token": token, + "bytes": None, + "logprob": logprob, + "top_logprobs": [ + { + "token": top_token, + "logprob": top_logprob, + "bytes": None, + } + for top_token, top_logprob in top_logprobs.items() + ], + } for (token, logprob, top_logprobs) in zip(logprobs["tokens"], logprobs["token_logprobs"], logprobs["top_logprobs"]) + ], + "refusal": None, + } + +def _convert_text_completion_to_chat( + completion: llama_types.Completion, +) -> llama_types.ChatCompletion: + assert "usage" in completion + return { + "id": "chat" + completion["id"], + "object": "chat.completion", + "created": completion["created"], + "model": completion["model"], + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": completion["choices"][0]["text"], + }, + "logprobs": _convert_text_completion_logprobs_to_chat(completion["choices"][0]["logprobs"]), + "finish_reason": completion["choices"][0]["finish_reason"], + } + ], + "usage": completion["usage"], + } + + +def _convert_text_completion_chunks_to_chat( + chunks: Iterator[llama_types.CreateCompletionStreamResponse], +) -> Iterator[llama_types.ChatCompletionChunk]: + for i, chunk in enumerate(chunks): + if i == 0: + yield { + "id": "chat" + chunk["id"], + "model": chunk["model"], + "created": chunk["created"], + "object": "chat.completion.chunk", + "choices": [ + { + "index": 0, + "delta": { + "role": "assistant", + }, + "logprobs": None, + "finish_reason": None, + } + ], + } + yield { + "id": "chat" + chunk["id"], + "model": chunk["model"], + "created": chunk["created"], + "object": "chat.completion.chunk", + "choices": [ + { + "index": 0, + "delta": ( + { + "content": chunk["choices"][0]["text"], + } + if chunk["choices"][0]["finish_reason"] is None + else {} + ), + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), + "finish_reason": chunk["choices"][0]["finish_reason"], + } + ], + } + + +def _convert_completion_to_chat( + completion_or_chunks: Union[ + llama_types.CreateCompletionResponse, + Iterator[llama_types.CreateCompletionStreamResponse], + ], + stream: bool = False, +) -> Union[ + llama_types.CreateChatCompletionResponse, Iterator[llama_types.ChatCompletionChunk] +]: + if stream: + chunks: Iterator[llama_types.CreateCompletionStreamResponse] = completion_or_chunks # type: ignore + return _convert_text_completion_chunks_to_chat(chunks) + else: + completion: llama_types.Completion = completion_or_chunks # type: ignore + return _convert_text_completion_to_chat(completion) + + +def _convert_completion_to_chat_function( + tool_name: str, + completion_or_chunks: Union[ + llama_types.CreateCompletionResponse, + Iterator[llama_types.CreateCompletionStreamResponse], + ], + stream: bool, +): + if not stream: + completion: llama_types.CreateCompletionResponse = completion_or_chunks # type: ignore + assert "usage" in completion + tool_id = "call_" + "_0_" + tool_name + "_" + completion["id"] + # TODO: Fix for legacy function calls + chat_completion: llama_types.CreateChatCompletionResponse = { + "id": "chat" + completion["id"], + "object": "chat.completion", + "created": completion["created"], + "model": completion["model"], + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": None, + "function_call": { + "name": tool_name, + "arguments": completion["choices"][0]["text"], + }, + "tool_calls": [ + { + "id": tool_id, + "type": "function", + "function": { + "name": tool_name, + "arguments": completion["choices"][0]["text"], + }, + } + ], + }, + "logprobs": _convert_text_completion_logprobs_to_chat(completion["choices"][0]["logprobs"]), + "finish_reason": "tool_calls", + } + ], + "usage": completion["usage"], + } + return chat_completion + else: + chunks: Iterator[llama_types.CreateCompletionStreamResponse] = completion_or_chunks # type: ignore + + def _stream_response_to_function_stream( + chunks: Iterator[llama_types.CreateCompletionStreamResponse], + ) -> Iterator[llama_types.CreateChatCompletionStreamResponse]: + # blank first message + first = True + id_ = None + created = None + model = None + tool_id = None + for chunk in chunks: + if first: + id_ = "chat" + chunk["id"] + created = chunk["created"] + model = chunk["model"] + tool_id = "call_" + "_0_" + tool_name + "_" + chunk["id"] + yield { + "id": id_, + "object": "chat.completion.chunk", + "created": created, + "model": model, + "choices": [ + { + "index": 0, + "finish_reason": None, + "logprobs": None, + "delta": { + "role": "assistant", + "content": None, + "function_call": None, + "tool_calls": None, + }, + } + ], + } + yield { + "id": "chat" + chunk["id"], + "object": "chat.completion.chunk", + "created": chunk["created"], + "model": chunk["model"], + "choices": [ + { + "index": 0, + "finish_reason": None, + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), + "delta": { + "role": None, + "content": None, + "function_call": { + "name": tool_name, + "arguments": chunk["choices"][0]["text"], + }, + "tool_calls": [ + { + "index": 0, + "id": tool_id, + "type": "function", + "function": { + "name": tool_name, + "arguments": chunk["choices"][0][ + "text" + ], + }, + } + ], + }, + } + ], + } + first = False + continue + assert tool_id is not None + yield { + "id": "chat" + chunk["id"], + "object": "chat.completion.chunk", + "created": chunk["created"], + "model": chunk["model"], + "choices": [ + { + "index": 0, + "finish_reason": None, + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), + "delta": { + "role": None, + "content": None, + "function_call": { + "name": tool_name, + "arguments": chunk["choices"][0]["text"], + }, + "tool_calls": [ + { + "index": 0, + "id": tool_id, + "type": "function", + "function": { + "name": tool_name, + "arguments": chunk["choices"][0]["text"], + }, + } + ], + }, + } + ], + } + + if id_ is not None and created is not None and model is not None: + yield { + "id": id_, + "object": "chat.completion.chunk", + "created": created, + "model": model, + "choices": [ + { + "index": 0, + "finish_reason": "tool_calls", + "logprobs": None, + "delta": { + "role": None, + "content": None, + "function_call": None, + "tool_calls": None, + }, + } + ], + } + + return _stream_response_to_function_stream(chunks) + + +def chat_formatter_to_chat_completion_handler( + chat_formatter: ChatFormatter, +) -> LlamaChatCompletionHandler: + def chat_completion_handler( + *, + llama: llama.Llama, + messages: List[llama_types.ChatCompletionRequestMessage], + functions: Optional[List[llama_types.ChatCompletionFunction]] = None, + function_call: Optional[llama_types.ChatCompletionRequestFunctionCall] = None, + tools: Optional[List[llama_types.ChatCompletionTool]] = None, + tool_choice: Optional[llama_types.ChatCompletionToolChoiceOption] = None, + temperature: float = 0.2, + top_p: float = 0.95, + top_k: int = 40, + min_p: float = 0.05, + typical_p: float = 1.0, + stream: bool = False, + stop: Optional[Union[str, List[str]]] = [], + seed: Optional[int] = None, + response_format: Optional[ + llama_types.ChatCompletionRequestResponseFormat + ] = None, + max_tokens: Optional[int] = None, + presence_penalty: float = 0.0, + frequency_penalty: float = 0.0, + repeat_penalty: float = 1.1, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_tau: float = 5.0, + mirostat_eta: float = 0.1, + model: Optional[str] = None, + logits_processor: Optional[llama.LogitsProcessorList] = None, + grammar: Optional[llama.LlamaGrammar] = None, + logit_bias: Optional[Dict[str, float]] = None, + logprobs: Optional[bool] = None, + top_logprobs: Optional[int] = None, + **kwargs, # type: ignore + ) -> Union[ + llama_types.CreateChatCompletionResponse, + Iterator[llama_types.CreateChatCompletionStreamResponse], + ]: + result = chat_formatter( + messages=messages, + functions=functions, + function_call=function_call, + tools=tools, + tool_choice=tool_choice, + ) + prompt = llama.tokenize( + result.prompt.encode("utf-8"), + add_bos=not result.added_special, + special=True, + ) + if result.stop is not None: + stop = [] if stop is None else [stop] if isinstance(stop, str) else stop + rstop = result.stop if isinstance(result.stop, list) else [result.stop] + stop = stop + rstop + + stopping_criteria = None + if result.stopping_criteria is not None: + stopping_criteria = result.stopping_criteria + + if response_format is not None and response_format["type"] == "json_object": + grammar = _grammar_for_response_format( + response_format, verbose=llama.verbose + ) + + # Convert legacy functions to tools + if functions is not None: + tools = [ + { + "type": "function", + "function": function, + } + for function in functions + ] + + # Convert legacy function_call to tool_choice + if function_call is not None: + if isinstance(function_call, str) and ( + function_call == "none" or function_call == "auto" + ): + tool_choice = function_call + if isinstance(function_call, dict) and "name" in function_call: + tool_choice = { + "type": "function", + "function": { + "name": function_call["name"], + }, + } + + tool = None + if ( + tool_choice is not None + and isinstance(tool_choice, dict) + and tools is not None + ): + name = tool_choice["function"]["name"] + tool = next((t for t in tools if t["function"]["name"] == name), None) + if tool is None: + raise ValueError(f"Tool choice '{name}' not found in tools.") + schema = tool["function"]["parameters"] + try: + # create grammar from json schema + grammar = llama_grammar.LlamaGrammar.from_json_schema( + json.dumps(schema), verbose=llama.verbose + ) + except Exception as e: + if llama.verbose: + print(str(e), file=sys.stderr) + grammar = llama_grammar.LlamaGrammar.from_string( + llama_grammar.JSON_GBNF, verbose=llama.verbose + ) + + completion_or_chunks = llama.create_completion( + prompt=prompt, + temperature=temperature, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + logprobs=top_logprobs if logprobs else None, + stream=stream, + stop=stop, + seed=seed, + max_tokens=max_tokens, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + stopping_criteria=stopping_criteria, + grammar=grammar, + logit_bias=logit_bias, + ) + if tool is not None: + tool_name = tool["function"]["name"] + return _convert_completion_to_chat_function( + tool_name, completion_or_chunks, stream + ) + return _convert_completion_to_chat(completion_or_chunks, stream=stream) + + return chat_completion_handler + + +def hf_autotokenizer_to_chat_formatter( + pretrained_model_name_or_path: Union[str, os.PathLike[str]] +) -> ChatFormatter: + # https://huggingface.co/docs/transformers/main/chat_templating + # https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1#instruction-format + # https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/blob/main/tokenizer_config.json + from transformers import AutoTokenizer # type: ignore + + tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path) # type: ignore + + def format_autotokenizer( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, + ) -> ChatFormatterResponse: + tokenizer.use_default_system_prompt = False # type: ignore + prompt: str = tokenizer.apply_chat_template(messages, tokenize=False) # type: ignore + assert isinstance(prompt, str) + # Return formatted prompt and eos token by default + return ChatFormatterResponse( + prompt=prompt, stop=tokenizer.eos_token, added_special=True + ) + + return format_autotokenizer + + +def hf_autotokenizer_to_chat_completion_handler( + pretrained_model_name_or_path: Union[str, os.PathLike[str]] +) -> LlamaChatCompletionHandler: + chat_formatter = hf_autotokenizer_to_chat_formatter(pretrained_model_name_or_path) + return chat_formatter_to_chat_completion_handler(chat_formatter) + + +def hf_tokenizer_config_to_chat_formatter( + tokenizer_config: Dict[str, Any], + add_generation_prompt: bool = True, +) -> ChatFormatter: + assert isinstance(tokenizer_config, dict) + + assert "chat_template" in tokenizer_config + assert isinstance(tokenizer_config["chat_template"], str) + chat_template = tokenizer_config["chat_template"] + + assert "bos_token" in tokenizer_config + assert isinstance(tokenizer_config["bos_token"], str) + bos_token = tokenizer_config["bos_token"] + + assert "eos_token" in tokenizer_config + assert isinstance(tokenizer_config["eos_token"], str) + eos_token = tokenizer_config["eos_token"] + + env = ImmutableSandboxedEnvironment( + trim_blocks=True, + lstrip_blocks=True, + ).from_string(chat_template) + + def format_tokenizer_config( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, + ) -> ChatFormatterResponse: + # TODO: veryify this is correct + # Add a blank assistant message to the end of the messages to prompt the model to generate a response + if add_generation_prompt: + messages = [ + *messages, + llama_types.ChatCompletionRequestAssistantMessage( + role="assistant", content="" + ), + ] + prompt = env.render( + messages=messages, + bos_token=bos_token, + eos_token=eos_token, + ) + return ChatFormatterResponse( + prompt=prompt, stop=[eos_token, bos_token], added_special=True + ) + + return format_tokenizer_config + + +def hf_tokenizer_config_to_chat_completion_handler( + tokenizer_config: Dict[str, Any], + add_generation_prompt: bool = True, +) -> LlamaChatCompletionHandler: + chat_formatter = hf_tokenizer_config_to_chat_formatter( + tokenizer_config, add_generation_prompt=add_generation_prompt + ) + return chat_formatter_to_chat_completion_handler(chat_formatter) + + +def guess_chat_format_from_gguf_metadata(metadata: Dict[str, str]) -> Optional[str]: + if "tokenizer.chat_template" not in metadata: + return None + + if metadata["tokenizer.chat_template"] == CHATML_CHAT_TEMPLATE: + return "chatml" + + if ( + metadata["tokenizer.chat_template"] == MISTRAL_INSTRUCT_CHAT_TEMPLATE + or metadata["tokenizer.chat_template"] == MIXTRAL_INSTRUCT_CHAT_TEMPLATE + ): + return "mistral-instruct" + + if metadata["tokenizer.chat_template"] == LLAMA3_INSTRUCT_CHAT_TEMPLATE: + return "llama-3" + + return None + + +### Utility functions for formatting chat prompts ### +# TODO: Replace these with jinja2 templates + + +def _get_system_message( + messages: List[llama_types.ChatCompletionRequestMessage], +) -> str: + """Get the first system message.""" + for message in messages: + if message["role"] == "system": + return message["content"] or "" + return "" + + +def _map_roles( + messages: List[llama_types.ChatCompletionRequestMessage], + role_map: Dict[str, str], +) -> List[Tuple[str, Optional[str]]]: + """Map the message roles.""" + output: List[Tuple[str, Optional[str]]] = [] + for message in messages: + role = message["role"] + if role in role_map: + content: str | None = ( + message["content"] if isinstance(message["content"], str) else None + ) + output.append((role_map[role], content)) + return output + + +def _format_llama2( + system_message: str, messages: List[Tuple[str, Optional[str]]], sep: str, sep2: str +) -> str: + """Format the prompt with the llama2 style.""" + seps = [sep, sep2] + ret = system_message + sep + for i, (role, message) in enumerate(messages): + if system_message and i == 0: + m = message or "" + ret += m + seps[i % 2] + elif message: + ret += role + message + " " + seps[i % 2] + else: + ret += role + " " + return ret + + +def _format_add_colon_single( + system_message: str, messages: List[Tuple[str, Optional[str]]], sep: str +) -> str: + """Format the prompt with the add-colon-single style.""" + ret = system_message + sep + for role, message in messages: + if message: + ret += role + ": " + message + sep + else: + ret += role + ":" + return ret + + +def _format_add_colon_two( + system_message: str, messages: List[Tuple[str, Optional[str]]], sep: str, sep2: str +) -> str: + """Format the prompt with the add-colon-two style.""" + seps = [sep, sep2] + ret = system_message + seps[0] + for i, (role, message) in enumerate(messages): + if message: + ret += role + ": " + message + seps[i % 2] + else: + ret += role + ":" + return ret + + +def _format_no_colon_single( + system_message: str, messages: List[Tuple[str, Optional[str]]], sep: str +) -> str: + """Format the prompt with the no-colon-single style.""" + ret = system_message + for role, message in messages: + if message: + ret += role + message + sep + else: + ret += role + return ret + + +def _format_add_colon_space_single( + system_message: str, messages: List[Tuple[str, Optional[str]]], sep: str +) -> str: + """Format the prompt with the add-colon-space-single style.""" + ret = system_message + sep + for role, message in messages: + if message: + ret += role + ": " + message + sep + else: + ret += role + ": " # must be end with a space + return ret + + +def _format_chatml( + system_message: str, messages: List[Tuple[str, Optional[str]]], sep: str +) -> str: + """Format the prompt with the chatml style.""" + ret = "" if system_message == "" else system_message + sep + "\n" + for role, message in messages: + if message: + ret += role + "\n" + message + sep + "\n" + else: + ret += role + "\n" + return ret + + +def _format_chatglm3( + system_message: str, messages: List[Tuple[str, Optional[str]]], sep: str +) -> str: + """Format the prompt with the chatglm3 style.""" + ret = "" + if system_message: + ret += system_message + for role, message in messages: + if message: + ret += role + "\n" + " " + message + else: + ret += role + return ret + + +def _grammar_for_json(verbose: bool = False): + return llama_grammar.LlamaGrammar.from_string( + llama_grammar.JSON_GBNF, verbose=verbose + ) + + +def _grammar_for_json_schema( + schema: str, verbose: bool = False, fallback_to_json: bool = True +): + try: + return llama_grammar.LlamaGrammar.from_json_schema(schema, verbose=verbose) + except Exception as e: + if fallback_to_json: + return _grammar_for_json(verbose=verbose) + else: + raise e + + +def _grammar_for_response_format( + response_format: llama_types.ChatCompletionRequestResponseFormat, + verbose: bool = False, +): + if response_format["type"] != "json_object": + return None + + if "schema" in response_format: + return _grammar_for_json_schema( + json.dumps(response_format["schema"]), verbose=verbose + ) + else: + return _grammar_for_json(verbose=verbose) + + +### Chat Formats ### + + +def register_chat_format(name: str): + def decorator(f: ChatFormatter): + chat_completion_handler = chat_formatter_to_chat_completion_handler(f) + LlamaChatCompletionHandlerRegistry().register_chat_completion_handler( + name, chat_completion_handler + ) + return f + + return decorator + + +# see https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/tokenization_llama.py +# system prompt is "embedded" in the first message +@register_chat_format("llama-2") +def format_llama2( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _system_template = "[INST] <>\n{system_message}\n<>" + _roles = dict(user="[INST]", assistant="[/INST]") + _messages = _map_roles(messages, _roles) + system_message = _get_system_message(messages) + if system_message: + system_message = _system_template.format(system_message=system_message) + _prompt = _format_llama2(system_message, _messages, " ", "") + "[/INST]" + return ChatFormatterResponse(prompt=_prompt) + + +# Chat format for Llama-3 models, see more details at: +# https://github.com/meta-llama/llama3/blob/main/llama/tokenizer.py#L202-L229 +@register_chat_format("llama-3") +def format_llama3( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _roles = dict( + system="<|start_header_id|>system<|end_header_id|>\n\n", + user="<|start_header_id|>user<|end_header_id|>\n\n", + assistant="<|start_header_id|>assistant<|end_header_id|>\n\n", + ) + _sep = "<|eot_id|>" + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_no_colon_single("", _messages, _sep) + return ChatFormatterResponse(prompt=_prompt, stop=_sep) + + +@register_chat_format("alpaca") +def format_alpaca( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _roles = dict(user="### Instruction", assistant="### Response") + _sep = "\n\n" + _sep2 = "" + system_message = _get_system_message(messages) + _messages = _map_roles(messages, _roles) + _prompt = _format_add_colon_two(system_message, _messages, _sep, _sep2) + return ChatFormatterResponse(prompt=_prompt) + + +@register_chat_format("qwen") +def format_qwen( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _roles = dict(user="<|im_start|>user", assistant="<|im_start|>assistant") + system_message = _get_system_message(messages) or "You are a helpful assistant." + system_template = "<|im_start|>system\n{system_message}" + system_message = system_template.format(system_message=system_message) + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _sep = "<|im_end|>" + _prompt = _format_chatml(system_message, _messages, _sep) + _sep2 = "<|endoftext|>" + return ChatFormatterResponse(prompt=_prompt, stop=_sep2) + + +@register_chat_format("vicuna") +def format( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _system_message = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions." + _roles = dict(user="USER", assistant="ASSISTANT") + _sep = " " + _sep2 = "" + system_message = _system_message + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_add_colon_two(system_message, _messages, _sep, _sep2) + return ChatFormatterResponse(prompt=_prompt) + + +@register_chat_format("oasst_llama") +def format_oasst_llama( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _system_template = "[INST] <>\n{system_message}\n<>\n\n" + _roles = dict(user="<|prompter|>", assistant="<|assistant|>") + _sep = "" + system_message = _get_system_message(messages) + system_message = _system_template.format(system_message=system_message) + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_no_colon_single(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt) + + +@register_chat_format("baichuan-2") +def format_baichuan2( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _system_template = "{system_message}" + _roles = dict(user="", assistant="") + _sep = "" + system_message = _get_system_message(messages) + system_message = _system_template.format(system_message=system_message) + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_no_colon_single(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt) + + +@register_chat_format("baichuan") +def format_baichuan( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _system_template = "{system_message}" + _roles = dict(user="", assistant="") + _sep = "" + system_message = _get_system_message(messages) + system_message = _system_template.format(system_message=system_message) + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_no_colon_single(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt) + + +@register_chat_format("openbuddy") +def format_openbuddy( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _system_message = """You are a helpful, respectful and honest INTP-T AI Assistant named Buddy. You are talking to a human User. +Always answer as helpfully and logically as possible, while being safe. Your answers should not include any harmful, political, religious, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. +If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. +You can speak fluently in many languages, for example: English, Chinese. +You cannot access the internet, but you have vast knowledge, cutoff: 2021-09. +You are trained by OpenBuddy team, (https://openbuddy.ai, https://github.com/OpenBuddy/OpenBuddy), you are based on LLaMA and Falcon transformers model, not related to GPT or OpenAI. + +""" + _roles = dict(user="User", assistant="Assistant") + _sep = "\n" + system_message = _system_message + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_add_colon_single(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt) + + +@register_chat_format("redpajama-incite") +def format_redpajama_incite( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _system_message = _get_system_message(messages) + _roles = dict(user="", assistant="") + _sep = "\n" + _stop = "" + system_message = _system_message + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_add_colon_single(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt, stop=_stop) + + +@register_chat_format("snoozy") +def format_snoozy( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + system_template = "### Instruction:\n{system_message}" + default_system_message = "The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response." + _system_message = _get_system_message(messages) + _system_message = ( + _system_message if _system_message != "" else default_system_message + ) + system_message = system_template.format(system_message=_system_message) + _roles = dict(user="### Prompt", assistant="### Response") + _sep = "\n" + _stop = "###" + system_message = _system_message + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_add_colon_single(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt, stop=_stop) + + +@register_chat_format("phind") +def format_phind( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _roles = dict(user="### User Message", assistant="### Assistant") + _sep = "\n\n" + _system_message = "### System Prompt\nYou are an intelligent programming assistant." + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_add_colon_single(_system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt) + + +@register_chat_format("intel") +def format_intel( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _roles = dict(user="### User:", assistant="### Assistant:") + _sep = "\n" + _system_message = "### System:\n{system_message}" + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_add_colon_single(_system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt) + + +@register_chat_format("open-orca") +def format_open_orca( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + system_template = "{system_message}" + system_message = ( + "You are a helpful assistant. Please answer truthfully and write out your " + "thinking step by step to be sure you get the right answer. If you make a mistake or encounter " + "an error in your thinking, say so out loud and attempt to correct it. If you don't know or " + "aren't sure about something, say so clearly. You will act as a professional logician, mathematician, " + "and physicist. You will also act as the most appropriate type of expert to answer any particular " + "question or solve the relevant problem; state which expert type your are, if so. Also think of " + "any particular named expert that would be ideal to answer the relevant question or solve the " + "relevant problem; name and act as them, if appropriate." + ) + roles = ("User", "Assistant") + sep = "<|end_of_turn|>\n" + # stop_token_ids=[32000, 32001], # "<|end_of_turn|>" + stop_str = "User" + system_message = system_template.format(system_message=system_message) + _messages = _map_roles(messages, dict(zip(roles, roles))) + _messages.append((roles[1], None)) + _prompt = _format_add_colon_space_single(system_message, _messages, sep) + return ChatFormatterResponse(prompt=_prompt, stop=stop_str) + + +@register_chat_format("mistrallite") +def format_mistrallite( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _roles = dict(user="<|prompter|>", assistant="\n<|assistant|>") + _sep = " " + system_template = """<|system|>{system_message}""" + system_message = _get_system_message(messages) + system_message = system_template.format(system_message=system_message) + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_no_colon_single(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt) + + +@register_chat_format("zephyr") +def format_zephyr( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + system_template = """<|system|> +{system_message}""" + system_message = _get_system_message(messages) + system_message = system_template.format(system_message=system_message) + _roles = dict(user="<|user|>\n", assistant="<|assistant|>\n") + _sep = "" + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_chatml(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt, stop=_sep) + + +@register_chat_format("pygmalion") +def format_pygmalion( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + system_template = """<|system|>{system_message}""" + system_message = _get_system_message(messages) + system_message = system_template.format(system_message=system_message) + _roles = dict(user="<|user|>", assistant="<|model|>") + _sep = "\n" + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_chatml(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt, stop=_sep) + + +@register_chat_format("chatml") +def format_chatml( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + system_template = """<|im_start|>system +{system_message}""" + system_message = _get_system_message(messages) + system_message = system_template.format(system_message=system_message) + _roles = dict(user="<|im_start|>user", assistant="<|im_start|>assistant") + _sep = "<|im_end|>" + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_chatml(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt, stop=_sep) + + +@register_chat_format("mistral-instruct") +def format_mistral_instruct( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + eos = "" + stop = eos + prompt = "" + for message in messages: + if ( + message["role"] == "user" + and message["content"] is not None + and isinstance(message["content"], str) + ): + prompt += "[INST] " + message["content"] + elif message["role"] == "assistant" and message["content"] is not None: + prompt += " [/INST]" + message["content"] + eos + prompt += " [/INST]" + return ChatFormatterResponse(prompt=prompt, stop=stop) + + +@register_chat_format("chatglm3") +def format_chatglm3( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + system_template = """<|system|> +{system_message}""" + system_message = _get_system_message(messages) + system_message = system_template.format(system_message=system_message) + _roles = dict(user="<|user|>", assistant="<|assistant|>") + _sep = "" + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_chatglm3(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt, stop=_sep) + + +@register_chat_format("openchat") +def format_openchat( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + system_template = "{system_message}<|end_of_turn|>" + system_message = _get_system_message(messages) + system_message = system_template.format(system_message=system_message) + _roles = dict( + user="GPT4 Correct User: ", assistant="<|end_of_turn|>GPT4 Correct Assistant: " + ) + _sep = "<|end_of_turn|>" + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_chatml(system_message, _messages, _sep) + return ChatFormatterResponse(prompt=_prompt, stop=_sep) + + +# Chat format for Saiga models, see more details and available models: +# https://huggingface.co/collections/IlyaGusev/saiga2-saigamistral-6505d4ccc3d1e53166b636cd +@register_chat_format("saiga") +def format_saiga( + messages: list[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + _message_template = "{role}\n{content}" + _roles = dict(user="user", bot="bot", system="system") + _messages = _map_roles(messages, _roles) + + _prompt = "" + for role, content in _messages: + if content: + _prompt += _message_template.format(role=role, content=content) + else: + _prompt += f"{role}\n" + # Response template + _prompt += "bot" + return ChatFormatterResponse(prompt=_prompt.strip()) + + +# Chat format for Google's Gemma models, see more details and available models: +# https://huggingface.co/collections/google/gemma-release-65d5efbccdbb8c4202ec078b +@register_chat_format("gemma") +def format_gemma( + messages: List[llama_types.ChatCompletionRequestMessage], + **kwargs: Any, +) -> ChatFormatterResponse: + system_message = _get_system_message(messages) + if system_message != "": + logger.debug( + "`role='system'` messages are not allowed on Google's Gemma models." + ) + _roles = dict(user="user\n", assistant="model\n") + _sep = "\n" + _messages = _map_roles(messages, _roles) + _messages.append((_roles["assistant"], None)) + _prompt = _format_no_colon_single(system_message="", messages=_messages, sep=_sep) + return ChatFormatterResponse(prompt=_prompt, stop=_sep) + + +# Tricky chat formats that require custom chat handlers + + +@register_chat_completion_handler("functionary") +def functionary_chat_handler( + llama: llama.Llama, + messages: List[llama_types.ChatCompletionRequestMessage], + functions: Optional[List[llama_types.ChatCompletionFunction]] = None, + function_call: Optional[llama_types.ChatCompletionRequestFunctionCall] = None, + tools: Optional[List[llama_types.ChatCompletionTool]] = None, + tool_choice: Optional[llama_types.ChatCompletionToolChoiceOption] = None, + temperature: float = 0.2, + top_p: float = 0.95, + top_k: int = 40, + min_p: float = 0.05, + typical_p: float = 1.0, + stream: bool = False, + stop: Optional[Union[str, List[str]]] = [], + response_format: Optional[llama_types.ChatCompletionRequestResponseFormat] = None, + max_tokens: Optional[int] = None, + presence_penalty: float = 0.0, + frequency_penalty: float = 0.0, + repeat_penalty: float = 1.1, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_tau: float = 5.0, + mirostat_eta: float = 0.1, + model: Optional[str] = None, + logits_processor: Optional[llama.LogitsProcessorList] = None, + grammar: Optional[llama.LlamaGrammar] = None, + **kwargs, # type: ignore +) -> Union[llama_types.ChatCompletion, Iterator[llama_types.ChatCompletionChunk]]: + SYSTEM_MESSAGE = """A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. The assistant calls functions with appropriate input when necessary""" + + def generate_type_definition( + param: Dict[str, llama_types.JsonType], indent_level: int, shared_defs + ) -> str: + indent = " " * indent_level + if "$ref" in param: + # Reference to a shared definition + ref_name = param["$ref"].split("/")[ + -1 + ] # Extract the type name from the reference + return ref_name + elif param.get("type") == "array": + items = param.get("items", {}) + item_type = generate_type_definition(items, indent_level + 1, shared_defs) + return f"Array<{item_type}>" + elif param.get("type") == "object": + properties = param.get("properties", {}) + nested_schema = "{\n" + for nested_param_name, nested_param in properties.items(): + nested_param_type = generate_type_definition( + nested_param, indent_level + 1, shared_defs + ) + nested_schema += ( + f"{indent} {nested_param_name}: {nested_param_type},\n" + ) + nested_schema += indent + "}" + return nested_schema + elif "enum" in param: + # Enum type + return " | ".join([f'"{enum_value}"' for enum_value in param["enum"]]) + else: + # Simple type + return param.get("type", "any") + + def generate_shared_definitions(shared_defs, indent_level: int) -> str: + indent = " " * indent_level + shared_definitions = "" + for def_name, def_properties in shared_defs.items(): + shared_definitions += f"{indent}type {def_name} = " + if def_properties.get("type") == "object": + shared_definitions += generate_type_definition( + def_properties, indent_level, shared_defs + ) + elif "enum" in def_properties: + # Enum type + shared_definitions += " | ".join( + [f'"{enum_value}"' for enum_value in def_properties["enum"]] + ) + shared_definitions += ";\n" + return shared_definitions + + def generate_schema_from_functions(functions, namespace="functions") -> str: + schema = ( + "// Supported function definitions that should be called when necessary.\n" + ) + schema += f"namespace {namespace} {{\n\n" + + # Generate shared definitions + shared_definitions = {} + for function in functions: + parameters = function.get("parameters", {}) + shared_definitions.update(parameters.get("$defs", {})) + + schema += generate_shared_definitions(shared_definitions, 1) + + for function in functions: + function_name = function["name"] + description = function.get("description", "") + parameters = function.get("parameters", {}) + required_params = parameters.get("required", []) + + schema += f" // {description}\n" + schema += f" type {function_name} = (_: {{\n" + + for param_name, param in parameters.get("properties", {}).items(): + param_description = param.get("description", "") + param_type = generate_type_definition(param, 2, shared_definitions) + optional_indicator = "" if param_name in required_params else "?" + schema += f" // {param_description}\n" + schema += f" {param_name}{optional_indicator}: {param_type},\n" + schema += " }) => any;\n\n" + + schema += "}} // namespace {}\n".format(namespace) + return schema + + def prepare_messages_for_inference( + messages: List[llama_types.ChatCompletionRequestMessage], + functions: Optional[List[llama_types.ChatCompletionFunctions]] = None, + tools: Optional[List[llama_types.ChatCompletionTool]] = None, + ): + all_messages: List[llama_types.ChatCompletionRequestMessage] = [] + if functions is not None: + all_messages.append( + llama_types.ChatCompletionRequestSystemMessage( + role="system", content=generate_schema_from_functions(functions) + ) + ) + + if tools is not None: + all_messages.append( + llama_types.ChatCompletionRequestSystemMessage( + role="system", + content=generate_schema_from_functions( + [ + tool["function"] + for tool in tools + if tool["type"] == "function" + ] + ), + ) + ) + + all_messages.append( + llama_types.ChatCompletionRequestSystemMessage( + role="system", content=SYSTEM_MESSAGE + ) + ) + + for message in messages: + # Function call responses + if message["role"] == "function" and "name" in message: + message["name"] = f"functions.{message['name']}" + # Function call requests by assistant + if "function_call" in message: + message["function_call"][ + "name" + ] = f"functions.{message['function_call']['name']}" + all_messages.append(message) + + all_messages.append( + llama_types.ChatCompletionRequestAssistantMessage( + role="assistant", content=None + ) + ) + + def message_to_str(msg: llama_types.ChatCompletionRequestMessage): + if msg["role"] == "system": + return f"system:\n{msg['content']}\n" + + elif msg["role"] == "function" and "name" in msg: + return f"function name={msg['name']}:\n{msg['content']}\n" + elif msg["role"] == "function" and "function_call" in msg: + return f"function name={msg['function_call']['name']}:\n{msg['function_call']['arguments']}\n" + elif msg["role"] == "tool": + if msg["content"] is not None: + return f"function name={msg['tool_call_id']}:\n{msg['content']}\n" + else: + return f"function name={msg['tool_call_id']}\n" + elif msg["role"] == "user": + if msg["content"] is None: + return "user:\n\n" + else: + return f"user:\n{msg['content']}\n" + elif msg["role"] == "assistant": + if msg["content"] is not None and "function_call" in msg: + return f"assistant:\n{msg['content']}\nassistant to={msg['function_call']['name']}:\n{msg['function_call']['arguments']}\n" + elif "function_call" in msg: + return f"assistant to={msg['function_call']['name']}:\n{msg['function_call']['arguments']}\n" + elif "tool_calls" in msg and len(msg["tool_calls"]) > 0: + for tool_call in msg[ + "tool_calls" + ]: # NOTE: probably doesn't work with the functionary model + return f"assistant to={tool_call['id']}:\n{tool_call['function']['arguments']}\n" + elif msg["content"] is None: + return "assistant" + else: + return f"assistant:\n{msg['content']}\n" + else: + raise ValueError(f"Unsupported role: {msg['role']}") + + return "".join([message_to_str(msg) for msg in all_messages]) + + if tools is not None: + functions = [tool["function"] for tool in tools if tool["type"] == "function"] + + if tool_choice is not None: + function_call = ( + tool_choice if isinstance(tool_choice, str) else tool_choice["function"] + ) + + prompt = prepare_messages_for_inference(messages, functions, tools) + + if function_call is None and (functions is None or len(functions) == 0): + completion_or_completion_chunks = llama.create_completion( + prompt=prompt + ":\n", + temperature=temperature, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + stream=stream, + stop=["user:", ""], + max_tokens=max_tokens, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + grammar=grammar, + ) + return _convert_completion_to_chat(completion_or_completion_chunks, stream=stream) # type: ignore + + if function_call is None or ( + isinstance(function_call, str) and function_call == "auto" + ): + stop = "\n" + completion: llama_types.Completion = llama.create_completion( + prompt=prompt, stop=stop, stream=False + ) # type: ignore + completion_text = completion["choices"][0]["text"] + # strip " to=functions." and ending ":" + function_call = completion_text.split(".")[-1][:-1] + new_prompt = prompt + completion_text + stop + elif isinstance(function_call, str) and function_call != "none": + new_prompt = prompt + ":\n" + elif isinstance(function_call, dict): + new_prompt = prompt + f" to=functions.{function_call['name']}:\n" + function_call = function_call["name"] + else: + new_prompt = prompt + ":\n" + + function_body = None + for function in functions or []: + if function["name"] == function_call: + function_body = function["parameters"] + break + for tool in tools or []: + if tool["type"] == "function" and tool["function"]["name"] == function_call: + function_body = tool["function"]["parameters"] + break + + if function_body is not None: + try: + with suppress_stdout_stderr(disable=llama.verbose): + grammar_text = llama_grammar.json_schema_to_gbnf( + json.dumps(function_body) + ) + grammar = llama_grammar.LlamaGrammar.from_string( + llama_grammar.json_schema_to_gbnf(json.dumps(function_body)), + verbose=llama.verbose, + ) + print(grammar_text) + except Exception as e: + if llama.verbose: + print( + "Failed to parse function body as JSON schema, falling back to default grammar" + ) + print(e) + with suppress_stdout_stderr(disable=llama.verbose): + grammar = llama_grammar.LlamaGrammar.from_string( + llama_grammar.JSON_GBNF, + verbose=llama.verbose, + ) + else: + with suppress_stdout_stderr(disable=llama.verbose): + grammar = llama_grammar.LlamaGrammar.from_string( + llama_grammar.JSON_GBNF, verbose=llama.verbose + ) + + completion: llama_types.Completion = llama.create_completion( + prompt=new_prompt, + stop=["user:", ""], + stream=False, + grammar=grammar, + max_tokens=max_tokens, + temperature=temperature, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + ) # type: ignore + + assert "usage" in completion + assert isinstance(function_call, str) + assert stream is False # TODO: support stream mode + + if llama.verbose: + print(new_prompt) + print(completion["choices"][0]["text"]) + + # TODO: support stream mode + return llama_types.CreateChatCompletionResponse( + id="chat" + completion["id"], + object="chat.completion", + created=completion["created"], + model=completion["model"], + choices=[ + { + "index": 0, + "message": { + "role": "assistant", + "content": None, + "function_call": { + "name": function_call, + "arguments": completion["choices"][0]["text"], + }, + "tool_calls": [ + { + "id": function_call, + "type": "function", + "function": { + "name": function_call, + "arguments": completion["choices"][0]["text"], + }, + } + ], + }, + "logprobs": _convert_text_completion_logprobs_to_chat(completion["choices"][0]["logprobs"]), + "finish_reason": "tool_calls", + } + ], + usage=completion["usage"], + ) + + +@register_chat_completion_handler("functionary-v1") +@register_chat_completion_handler("functionary-v2") +def functionary_v1_v2_chat_handler( + llama: llama.Llama, + messages: List[llama_types.ChatCompletionRequestMessage], + functions: Optional[List[llama_types.ChatCompletionFunction]] = None, + function_call: Optional[llama_types.ChatCompletionRequestFunctionCall] = None, + tools: Optional[List[llama_types.ChatCompletionTool]] = None, + tool_choice: Optional[llama_types.ChatCompletionToolChoiceOption] = None, + temperature: float = 0.2, + top_p: float = 0.95, + top_k: int = 40, + min_p: float = 0.05, + typical_p: float = 1.0, + stream: bool = False, + stop: Optional[Union[str, List[str]]] = [], + response_format: Optional[llama_types.ChatCompletionRequestResponseFormat] = None, + max_tokens: Optional[int] = None, + presence_penalty: float = 0.0, + frequency_penalty: float = 0.0, + repeat_penalty: float = 1.1, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_tau: float = 5.0, + mirostat_eta: float = 0.1, + model: Optional[str] = None, + logits_processor: Optional[llama.LogitsProcessorList] = None, + grammar: Optional[llama.LlamaGrammar] = None, + **kwargs, # type: ignore +) -> Union[llama_types.ChatCompletion, Iterator[llama_types.ChatCompletionChunk]]: + SYSTEM_MESSAGE = """A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. The assistant calls functions with appropriate input when necessary""" + + tokenizer = llama.tokenizer_ + assert hasattr( + tokenizer, "hf_tokenizer" + ), "Please provide a valid hf_tokenizer_path from https://huggingface.co/meetkai when initializing the Llama class" + from transformers import AutoTokenizer + + if "<|START_OF_FUNCTION_CALL|>" in tokenizer.hf_tokenizer.additional_special_tokens: + version = "v1" + END_SYSTEM_TOKEN = "<|END_OF_SYSTEM|>" + END_USER_TOKEN = "<|END_OF_USER|>" + END_ASSISTANT_TOKEN = "<|END_OF_ASSISTANT|>" + END_FUNCTION_RESULT_TOKEN = "<|END_OF_FUNCTION_RESULT|>" + START_FUNCTION_CALL_TOKEN = "<|START_OF_FUNCTION_CALL|>" + END_FUNCTION_CALL_TOKEN = "<|END_OF_FUNCTION_CALL|>" + else: + version = "v2" + RECIPIENT_TOKEN = "<|recipient|>" + FROM_TOKEN = "<|from|>" + STOP_TOKEN = "<|stop|>" + CONTENT_TOKEN = "<|content|>" + + def generate_type_definition( + param: Dict[str, llama_types.JsonType], indent_level: int, shared_defs + ) -> str: + indent = " " * indent_level + if "$ref" in param: + # Reference to a shared definition + ref_name = param["$ref"].split("/")[ + -1 + ] # Extract the type name from the reference + return ref_name + elif param.get("type") == "array": + items = param.get("items", {}) + item_type = generate_type_definition(items, indent_level + 1, shared_defs) + return f"Array<{item_type}>" + elif param.get("type") == "object": + properties = param.get("properties", {}) + nested_schema = "{\n" + for nested_param_name, nested_param in properties.items(): + nested_param_type = generate_type_definition( + nested_param, indent_level + 1, shared_defs + ) + nested_schema += ( + f"{indent} {nested_param_name}: {nested_param_type},\n" + ) + nested_schema += indent + "}" + return nested_schema + elif "enum" in param: + # Enum type + return " | ".join([f'"{enum_value}"' for enum_value in param["enum"]]) + else: + # Simple type + return param.get("type", "any") + + def generate_shared_definitions(shared_defs, indent_level: int) -> str: + indent = " " * indent_level + shared_definitions = "" + for def_name, def_properties in shared_defs.items(): + shared_definitions += f"{indent}type {def_name} = " + if def_properties.get("type") == "object": + shared_definitions += generate_type_definition( + def_properties, indent_level, shared_defs + ) + elif "enum" in def_properties: + # Enum type + shared_definitions += " | ".join( + [f'"{enum_value}"' for enum_value in def_properties["enum"]] + ) + shared_definitions += ";\n" + return shared_definitions + + def generate_schema_from_functions(functions, namespace="functions") -> str: + schema = ( + "// Supported function definitions that should be called when necessary.\n" + ) + schema += f"namespace {namespace} {{\n\n" + + # Generate shared definitions + shared_definitions = {} + for function in functions: + parameters = function.get("parameters", {}) + shared_definitions.update(parameters.get("$defs", {})) + + schema += generate_shared_definitions(shared_definitions, 1) + + for function in functions: + function_name = function["name"] + description = function.get("description", "") + parameters = function.get("parameters", {}) + required_params = parameters.get("required", []) + + schema += f"// {description}\n" + schema += f"type {function_name} = (_: {{\n" + + for param_name, param in parameters.get("properties", {}).items(): + param_description = param.get("description", "") + param_type = generate_type_definition(param, 2, shared_definitions) + optional_indicator = "" if param_name in required_params else "?" + schema += f"// {param_description}\n" + schema += f"{param_name}{optional_indicator}: {param_type},\n" + schema += "}) => any;\n\n" + + schema += "}} // namespace {}".format(namespace) + return schema + + def prepare_messages_for_inference( + messages: List[llama_types.ChatCompletionRequestMessage], + tokenizer: AutoTokenizer, + version: Literal["v1", "v2"], + functions: Optional[List[llama_types.ChatCompletionFunctions]] = None, + tools: Optional[List[llama_types.ChatCompletionTool]] = None, + tool_choice: Union[Dict, str] = "auto", + ): + all_messages: List[llama_types.ChatCompletionRequestMessage] = [] + if tool_choice == "none": + all_messages.append( + llama_types.ChatCompletionRequestSystemMessage( + role="system", content=generate_schema_from_functions([]) + ) + ) + else: + if functions is not None: + all_messages.append( + llama_types.ChatCompletionRequestSystemMessage( + role="system", content=generate_schema_from_functions(functions) + ) + ) + elif tools is not None and tool_choice != "none": + all_messages.append( + llama_types.ChatCompletionRequestSystemMessage( + role="system", + content=generate_schema_from_functions( + [ + tool["function"] + for tool in tools + if tool["type"] == "function" + ] + ), + ) + ) + + all_messages.append( + llama_types.ChatCompletionRequestSystemMessage( + role="system", content=SYSTEM_MESSAGE + ) + ) + + for message in messages: + # Function call responses + if message["role"] == "function" and "name" in message: + message["name"] = f"functions.{message['name']}" + # Function call requests by assistant + if "function_call" in message: + message["function_call"][ + "name" + ] = f"functions.{message['function_call']['name']}" + all_messages.append(message) + + if version == "v1": + suffix = "assistant:\n" + else: + suffix = "<|from|>assistant\n<|recipient|>" + + return ( + tokenizer.hf_tokenizer.apply_chat_template(all_messages, tokenize=False) + + suffix + ) + + if tools is not None: + functions = [tool["function"] for tool in tools if tool["type"] == "function"] + + if tool_choice is not None: + function_call = ( + tool_choice if isinstance(tool_choice, str) else tool_choice["function"] + ) + elif function_call is not None: + pass + else: + function_call = "auto" + + prompt = prepare_messages_for_inference( + messages, tokenizer, version, functions, tools, function_call + ) + + # If no tools/functions are provided + if function_call == "none" or functions is None or len(functions) == 0: + if version == "v1": + stop = END_ASSISTANT_TOKEN + else: + stop = STOP_TOKEN + prompt += "all\n<|content|>" + + completion_or_completion_chunks = llama.create_completion( + prompt=prompt, + temperature=temperature, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + stream=stream, + stop=stop, + max_tokens=max_tokens, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + grammar=grammar, + ) + if stream is False: + completion_or_completion_chunks["choices"][0]["text"] = ( + completion_or_completion_chunks["choices"][0]["text"].lstrip() + ) + return _convert_completion_to_chat(completion_or_completion_chunks, stream=stream) # type: ignore + + def get_grammar(function_call): + function_body = None + for function in functions or []: + if function["name"] == function_call: + function_body = function["parameters"] + break + for tool in tools or []: + if tool["type"] == "function" and tool["function"]["name"] == function_call: + function_body = tool["function"]["parameters"] + break + + try: + with suppress_stdout_stderr(disable=llama.verbose): + grammar_text = llama_grammar.json_schema_to_gbnf( + json.dumps(function_body) + ) + grammar = llama_grammar.LlamaGrammar.from_string( + llama_grammar.json_schema_to_gbnf(json.dumps(function_body)) + ) + print(grammar_text) + except Exception as e: + if llama.verbose: + print( + "Failed to parse function body as JSON schema, falling back to default grammar" + ) + print(e) + with suppress_stdout_stderr(disable=llama.verbose): + grammar = llama_grammar.LlamaGrammar.from_string( + llama_grammar.JSON_GBNF, verbose=llama.verbose + ) + + return grammar + + def create_completion(prompt, stop, grammar): + completion = cast( + llama_types.Completion, + llama.create_completion( + prompt=prompt, + temperature=temperature, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + stream=stream, + stop=stop, + max_tokens=max_tokens, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + grammar=grammar, + ), + ) + + return completion + + content = "" + function_calls, function_bodies = [], [] + completion_tokens = 0 + + def generate_streaming(tools, functions, function_call, prompt): + assert version == "v2", "Streaming for v1 is not supported" + + chunk_id, chunk_created = None, None + + # If tool_choice/function_call is provided + if isinstance(function_call, dict): + prompt += f"{function_call['name']}\n{CONTENT_TOKEN}" + grammar = get_grammar(function_call["name"]) + stops = [STOP_TOKEN, FROM_TOKEN] + tool_id = "".join( + [random.choice(string.ascii_letters + string.digits) for _ in range(24)] + ) + completion = create_completion(prompt=prompt, stop=stops, grammar=grammar) + completion_text = "" + first = True + for chunk in completion: + # Yield the tool/function name first + if first: + if tools is not None: + func_call_dict = { + "tool_calls": [ + { + "index": 0, + "id": "call_" + tool_id, + "type": "function", + "function": { + "name": function_call["name"], + "arguments": "", + }, + } + ] + } + else: + func_call_dict = { + "function_call": { + "name": function_call["name"], + "arguments": "", + } + } + yield llama_types.CreateChatCompletionStreamResponse( + id="chat" + chunk["id"], + object="chat.completion.chunk", + created=chunk["created"], + model=chunk["model"], + choices=[ + { + "index": 0, + "logprobs": None, + "delta": { + "role": None, + "content": None, + **func_call_dict, + }, + } + ], + ) + first = False + if tools is not None: + func_call_dict = { + "tool_calls": [ + { + "index": 0, + "id": "call_" + tool_id, + "type": "function", + "function": { + "name": None, + "arguments": chunk["choices"][0]["text"].rstrip(), + }, + } + ] + } + else: + func_call_dict = { + "function_call": { + "name": None, + "arguments": chunk["choices"][0]["text"].rstrip(), + } + } + if len(chunk["choices"][0]["text"].rstrip()) > 0: + yield llama_types.CreateChatCompletionStreamResponse( + id="chat" + chunk["id"], + object="chat.completion.chunk", + created=chunk["created"], + model=chunk["model"], + choices=[ + { + "index": 0, + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), + "delta": { + "role": None, + "content": None, + **func_call_dict, + }, + } + ], + ) + # Yield tool_call/function_call stop message + yield llama_types.CreateChatCompletionStreamResponse( + id="chat" + chunk["id"], + object="chat.completion.chunk", + created=chunk["created"], + model=chunk["model"], + choices=[ + { + "index": 0, + "finish_reason": ( + "tool_calls" if tools is not None else "function_call" + ), + "logprobs": None, + "delta": { + "role": None, + "content": None, + "function_call": None, + "tool_calls": None, + }, + } + ], + ) + # If "auto" or no tool_choice/function_call + elif isinstance(function_call, str) and function_call == "auto": + tool_index = 0 + while True: + # Generate function name first + grammar = None + stops = CONTENT_TOKEN + completion = create_completion( + prompt=prompt, stop=stops, grammar=grammar + ) + completion_text = "" + for chunk in completion: + completion_text += chunk["choices"][0]["text"] + if chunk_id is None: + chunk_id = chunk["id"] + if chunk_created is None: + chunk_created = chunk["created"] + function_name = completion_text.strip() + if function_name == "all": + prompt += "all\n<|content|>" + # Yield the first empty message for content + yield llama_types.CreateChatCompletionStreamResponse( + id="chat" + chunk_id, + model=chunk["model"], + created=chunk_created, + object="chat.completion.chunk", + choices=[ + { + "index": 0, + "delta": {"role": "assistant", "content": ""}, + "logprobs": None, + "finish_reason": None, + } + ], + ) + else: + prompt += f"{function_name}\n<|content|>" + grammar = get_grammar(function_name) + tool_id = "".join( + [ + random.choice(string.ascii_letters + string.digits) + for _ in range(24) + ] + ) + if tools is not None: + func_call_dict = { + "tool_calls": [ + { + "index": tool_index, + "id": "call_" + tool_id, + "type": "function", + "function": { + "name": function_name, + "arguments": "", + }, + } + ] + } + else: + func_call_dict = { + "function_call": {"name": function_name, "arguments": ""} + } + # Stream function name + yield llama_types.CreateChatCompletionStreamResponse( + id="chat" + chunk_id, + object="chat.completion.chunk", + created=chunk_created, + model=chunk["model"], + choices=[ + { + "index": 0, + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), + "delta": { + "role": "assistant", + "content": None, + **func_call_dict, + }, + } + ], + ) + # Generate content + stops = [RECIPIENT_TOKEN, STOP_TOKEN] + completion = create_completion( + prompt=prompt, stop=stops, grammar=grammar + ) + if function_name == "all": + completion_text = "" + stop_sequence, buffer, is_end = ( + "\n<|from|>assistant\n<|recipient|>", + [], + False, + ) + for i, chunk in enumerate(completion): + completion_text += chunk["choices"][0]["text"] + if is_end: + buffer.append(chunk["choices"][0]["text"].strip(" ")) + if stop_sequence.startswith("".join(buffer)): + continue + else: + buffer.pop() + while len(buffer) > 0: + yield llama_types.CreateChatCompletionStreamResponse( + id="chat" + chunk_id, + object="chat.completion.chunk", + created=chunk_created, + model=chunk["model"], + choices=[ + { + "index": 0, + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), + "delta": { + "role": "assistant", + "content": buffer.pop(0), + }, + } + ], + ) + is_end = False + elif chunk["choices"][0]["text"] == "\n": + is_end = True + buffer.append(chunk["choices"][0]["text"].strip(" ")) + continue + + if len(buffer) == 0 and len(chunk["choices"][0]["text"]) > 0: + yield llama_types.CreateChatCompletionStreamResponse( + id="chat" + chunk_id, + object="chat.completion.chunk", + created=chunk_created, + model=chunk["model"], + choices=[ + { + "index": 0, + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), + "delta": { + "role": "assistant", + "content": ( + chunk["choices"][0]["text"] + if i > 0 + else chunk["choices"][0][ + "text" + ].lstrip() + ), + }, + } + ], + ) + # Check whether the model wants to generate another turn + if ( + "<|from|> assistant" in completion_text + or "<|from|>assistant" in completion_text + ): + if completion_text.endswith("\n<|from|>assistant\n"): + cleaned_completion_text = completion_text[ + : -len("\n<|from|>assistant\n") + ].strip() + elif completion_text.endswith("\n<|from|> assistant\n"): + cleaned_completion_text = completion_text[ + : -len("\n<|from|> assistant\n") + ].strip() + else: + cleaned_completion_text = completion_text.strip() + prompt += f"{cleaned_completion_text}\n<|from|>assistant\n<|recipient|>" + else: + # Yield stop message + yield llama_types.CreateChatCompletionStreamResponse( + id="chat" + chunk_id, + model=chunk["model"], + created=chunk_created, + object="chat.completion.chunk", + choices=[ + { + "index": 0, + "delta": {}, + "logprobs": None, + "finish_reason": "stop", + } + ], + ) + break + else: + # Check whether the model wants to generate another turn + completion_text = "" + for chunk in completion: + completion_text += chunk["choices"][0]["text"] + if len(chunk["choices"][0]["text"].rstrip()) > 0: + if tools is not None: + func_call_dict = { + "tool_calls": [ + { + "index": tool_index, + "id": "call_" + tool_id, + "type": "function", + "function": { + "name": None, + "arguments": chunk["choices"][0][ + "text" + ].rstrip(), + }, + } + ] + } + else: + func_call_dict = { + "function_call": { + "name": None, + "arguments": chunk["choices"][0][ + "text" + ].rstrip(), + } + } + yield llama_types.CreateChatCompletionStreamResponse( + id="chat" + chunk_id, + object="chat.completion.chunk", + created=chunk_created, + model=chunk["model"], + choices=[ + { + "index": 0, + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), + "delta": { + "role": None, + "content": None, + **func_call_dict, + }, + } + ], + ) + prompt += completion_text.strip() + grammar = None + completion = create_completion( + prompt=prompt, stop=stops, grammar=grammar + ) + completion_text += "".join( + [chunk["choices"][0]["text"] for chunk in completion] + ) + if ( + "<|from|> assistant" in completion_text + or "<|from|>assistant" in completion_text + ) and tools is not None: + prompt += "\n<|from|>assistant\n<|recipient|>" + tool_index += 1 + else: + # Yield tool_call/function_call stop message + yield llama_types.CreateChatCompletionStreamResponse( + id="chat" + chunk_id, + object="chat.completion.chunk", + created=chunk_created, + model=chunk["model"], + choices=[ + { + "index": 0, + "finish_reason": ( + "tool_calls" + if tools is not None + else "function_call" + ), + "logprobs": None, + "delta": { + "role": None, + "content": None, + "function_call": None, + "tool_calls": None, + }, + } + ], + ) + break + + if stream is not False: + return generate_streaming( + tools=tools, functions=functions, function_call=function_call, prompt=prompt + ) + else: + if version == "v1": + # If no or "auto" tool_choice/function_call + if isinstance(function_call, str) and function_call == "auto": + stops = ["\n", END_ASSISTANT_TOKEN] + # If tool_choice/function_call is provided + elif isinstance(function_call, dict): + prompt += f"{START_FUNCTION_CALL_TOKEN}{function_call['name']}:\n" + stops = END_FUNCTION_CALL_TOKEN + function_call = function_call["name"] + function_calls.append(function_call) + grammar = get_grammar(function_call) + else: + prompt = prompt + stops = ["\n", END_ASSISTANT_TOKEN] + + completion = create_completion(prompt=prompt, stop=stops, grammar=grammar) + completion_text = completion["choices"][0]["text"] + completion_tokens += completion["usage"]["completion_tokens"] + + # If the generation does not involve a function call + if ( + START_FUNCTION_CALL_TOKEN not in prompt + and START_FUNCTION_CALL_TOKEN not in completion_text + ): + completion["usage"]["completion_tokens"] = completion_tokens + return _convert_completion_to_chat(completion, stream=stream) # type: ignore + # If the generation involves a function call in completion, generate the parameters + elif ( + START_FUNCTION_CALL_TOKEN not in prompt + and START_FUNCTION_CALL_TOKEN in completion_text + ): + prompt += ( + completion_text.replace( + f"{START_FUNCTION_CALL_TOKEN} ", START_FUNCTION_CALL_TOKEN + ) + + "\n" + ) + function_calls.append( + completion_text.split(START_FUNCTION_CALL_TOKEN)[-1][:-1].strip() + ) + grammar = get_grammar(function_calls[-1]) + completion = create_completion( + prompt=prompt, stop=END_FUNCTION_CALL_TOKEN, grammar=grammar + ) + completion_tokens += completion["usage"]["completion_tokens"] + function_bodies.append(completion["choices"][0]["text"].strip()) + # If the prompt involves a function call, just append generated parameters to function_bodies + else: + function_bodies.append(completion_text.strip()) + else: + # If tool_choice/function_call is provided + if isinstance(function_call, dict): + prompt += f"{function_call['name']}\n{CONTENT_TOKEN}" + function_call = function_call["name"] + function_calls.append(function_call) + grammar = get_grammar(function_call) + stops = [STOP_TOKEN, FROM_TOKEN] + completion = create_completion( + prompt=prompt, stop=stops, grammar=grammar + ) + completion_text = completion["choices"][0]["text"] + completion_tokens += completion["usage"]["completion_tokens"] + function_bodies.append(completion_text.strip()) + # If "auto" or no tool_choice/function_call + elif isinstance(function_call, str) and function_call == "auto": + while True: + # Generate function name first + grammar = None + stops = CONTENT_TOKEN + completion = create_completion( + prompt=prompt, stop=stops, grammar=grammar + ) + completion_text = completion["choices"][0]["text"] + completion_tokens += completion["usage"]["completion_tokens"] + function_name = completion_text.strip() + if function_name == "all": + prompt += "all\n<|content|>" + else: + function_call = completion_text.strip() + prompt += f"{function_call}\n<|content|>" + function_calls.append(function_call) + grammar = get_grammar(function_call) + # Generate content + stops = [RECIPIENT_TOKEN, STOP_TOKEN] + completion = create_completion( + prompt=prompt, stop=stops, grammar=grammar + ) + completion_text = completion["choices"][0]["text"] + completion_tokens += completion["usage"]["completion_tokens"] + if function_name == "all": + if completion_text.endswith("\n<|from|>assistant\n"): + content += completion_text[: -len("\n<|from|>assistant\n")] + if completion_text.endswith("\n<|from|> assistant\n"): + content += completion_text[-len("\n<|from|> assistant\n")] + else: + content += completion_text + content = content.lstrip() + # Check whether the model wants to generate another turn + if ( + "<|from|> assistant" in completion_text + or "<|from|>assistant" in completion_text + ): + if completion_text.endswith("\n<|from|>assistant\n"): + cleaned_completion_text = completion_text[ + : -len("\n<|from|>assistant\n") + ].strip() + elif completion_text.endswith("\n<|from|> assistant\n"): + cleaned_completion_text = completion_text[ + -len("\n<|from|> assistant\n") + ].strip() + else: + cleaned_completion_text = completion_text.strip() + prompt += f"{cleaned_completion_text}\n<|from|>assistant\n<|recipient|>" + else: + break + else: + function_bodies.append(completion_text.strip()) + # Check whether the model wants to generate another turn + prompt += completion_text.strip() + grammar = None + completion = create_completion( + prompt=prompt, stop=stops, grammar=grammar + ) + completion_tokens += completion["usage"]["completion_tokens"] + if ( + "<|from|> assistant" in completion["choices"][0]["text"] + or "<|from|>assistant" in completion["choices"][0]["text"] + ): + prompt += "\n<|from|>assistant\n<|recipient|>" + else: + break + + assert "usage" in completion + assert len(function_calls) == len(function_bodies) + + tool_calls: List[llama_types.ChatCompletionMessageToolCall] = [] + for function_call, function_body in zip(function_calls, function_bodies): + tool_calls.append( + { + "id": "call_" + + "".join( + [ + random.choice(string.ascii_letters + string.digits) + for _ in range(24) + ] + ), + "type": "function", + "function": { + "name": function_call, + "arguments": function_body, + }, + } + ) + + # TODO: support stream mode + function_call_dict: Union[ + Dict[str, str], + Dict[ + Literal["function_call"], + llama_types.ChatCompletionRequestAssistantMessageFunctionCall, + ], + ] = {} + if len(tool_calls) > 0: + if tools is not None: + function_call_dict["tool_calls"] = tool_calls + else: + function_call_dict["function_call"] = { + "name": tool_calls[0]["function"]["name"], + "arguments": tool_calls[0]["function"]["arguments"], + } + completion["usage"]["completion_tokens"] = completion_tokens + return llama_types.CreateChatCompletionResponse( + id="chat" + completion["id"], + object="chat.completion", + created=completion["created"], + model=completion["model"], + choices=[ + { + "index": 0, + "logprobs": _convert_text_completion_logprobs_to_chat(completion["choices"][0]["logprobs"]), + "message": { + "role": "assistant", + "content": None if content == "" else content, + **function_call_dict, + }, + "finish_reason": "tool_calls" if len(tool_calls) > 0 else "stop", + } + ], + usage=completion["usage"], + ) + + +class Llava15ChatHandler: + DEFAULT_SYSTEM_MESSAGE: Optional[str] = ( + "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions." + ) + + CHAT_FORMAT = ( + "{% for message in messages %}" + "{% if message.role == 'system' %}" + "{{ message.content }}" + "{% endif %}" + "{% if message.role == 'user' %}" + "{% if message.content is string %}" + "\nUSER: {{ message.content }}" + "{% endif %}" + "{% if message.content is iterable %}" + "\nUSER: " + "{% for content in message.content %}" + "{% if content.type == 'image_url' and content.image_url is string %}" + "{{ content.image_url }}" + "{% endif %}" + "{% if content.type == 'image_url' and content.image_url is mapping %}" + "{{ content.image_url.url }}" + "{% endif %}" + "{% endfor %}" + "{% for content in message.content %}" + "{% if content.type == 'text' %}" + "{{ content.text }}" + "{% endif %}" + "{% endfor %}" + "{% endif %}" + "{% endif %}" + "{% if message.role == 'assistant' and message.content is not none %}" + "\nASSISTANT: {{ message.content }}" + "{% endif %}" + "{% endfor %}" + "{% if add_generation_prompt %}" + "\nASSISTANT: " + "{% endif %}" + ) + + def __init__(self, clip_model_path: str, verbose: bool = True): + import llama_cpp.llava_cpp as llava_cpp + + self.clip_model_path = clip_model_path + self.verbose = verbose + + self._llava_cpp = llava_cpp # TODO: Fix + self._exit_stack = ExitStack() + self._last_image_embed: Optional[ + llava_cpp.CtypesPointer[llava_cpp.llava_image_embed] + ] = None + self._last_image_hash: Optional[int] = None + + if not os.path.exists(clip_model_path): + raise ValueError(f"Clip model path does not exist: {clip_model_path}") + + with suppress_stdout_stderr(disable=self.verbose): + clip_ctx = self._llava_cpp.clip_model_load(self.clip_model_path.encode(), 0) + + if clip_ctx is None: + raise ValueError(f"Failed to load clip model: {clip_model_path}") + + self.clip_ctx = clip_ctx + + def clip_free(): + with suppress_stdout_stderr(disable=self.verbose): + self._llava_cpp.clip_free(self.clip_ctx) + + self._exit_stack.callback(clip_free) + + def last_image_embed_free(): + with suppress_stdout_stderr(disable=self.verbose): + if self._last_image_embed is not None: + self._llava_cpp.llava_image_embed_free(self._last_image_embed) + self._last_image_embed = None + + self._exit_stack.callback(last_image_embed_free) + + def load_image(self, image_url: str) -> bytes: + return self._load_image(image_url) + + def _embed_image_bytes(self, image_bytes: bytes, n_threads_batch: int = 1): + if ( + self._last_image_embed is not None + and self._last_image_hash is not None + and hash(image_bytes) == self._last_image_hash + ): + return self._last_image_embed + with suppress_stdout_stderr(disable=self.verbose): + # Free the previous image embed + if self._last_image_embed is not None: + self._llava_cpp.llava_image_embed_free(self._last_image_embed) + self._last_image_embed = None + self._last_image_hash = None + embed = self._llava_cpp.llava_image_embed_make_with_bytes( + self.clip_ctx, + n_threads_batch, + (ctypes.c_uint8 * len(image_bytes)).from_buffer( + bytearray(image_bytes) + ), + len(image_bytes), + ) + self._last_image_embed = embed + self._last_image_hash = hash(image_bytes) + return embed + + def __call__( + self, + *, + llama: llama.Llama, + messages: List[llama_types.ChatCompletionRequestMessage], + functions: Optional[List[llama_types.ChatCompletionFunction]] = None, + function_call: Optional[llama_types.ChatCompletionRequestFunctionCall] = None, + tools: Optional[List[llama_types.ChatCompletionTool]] = None, + tool_choice: Optional[llama_types.ChatCompletionToolChoiceOption] = None, + temperature: float = 0.2, + top_p: float = 0.95, + top_k: int = 40, + min_p: float = 0.05, + typical_p: float = 1.0, + stream: bool = False, + stop: Optional[Union[str, List[str]]] = [], + seed: Optional[int] = None, + response_format: Optional[ + llama_types.ChatCompletionRequestResponseFormat + ] = None, + max_tokens: Optional[int] = None, + presence_penalty: float = 0.0, + frequency_penalty: float = 0.0, + repeat_penalty: float = 1.1, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_tau: float = 5.0, + mirostat_eta: float = 0.1, + model: Optional[str] = None, + logits_processor: Optional[llama.LogitsProcessorList] = None, + grammar: Optional[llama.LlamaGrammar] = None, + logit_bias: Optional[Dict[str, float]] = None, + logprobs: Optional[bool] = None, + top_logprobs: Optional[int] = None, + **kwargs, # type: ignore + ) -> Union[ + llama_types.CreateChatCompletionResponse, + Iterator[llama_types.CreateChatCompletionStreamResponse], + ]: + assert self.clip_ctx is not None + + system_prompt = _get_system_message(messages) + if system_prompt == "" and self.DEFAULT_SYSTEM_MESSAGE is not None: + messages = [ + llama_types.ChatCompletionRequestSystemMessage( + role="system", content=self.DEFAULT_SYSTEM_MESSAGE + ) + ] + messages + + image_urls = self.get_image_urls(messages) + template = ImmutableSandboxedEnvironment( + trim_blocks=True, + lstrip_blocks=True, + ).from_string(self.CHAT_FORMAT) + text = template.render( + messages=messages, + add_generation_prompt=True, + eos_token=llama.detokenize([llama.token_eos()]), + bos_token=llama.detokenize([llama.token_bos()]), + ) + split_text = self.split_text_on_image_urls(text, image_urls) + + if self.verbose: + print(text, file=sys.stderr) + + + # Evaluate prompt + llama.reset() + llama._ctx.kv_cache_clear() + for type_, value in split_text: + if type_ == "text": + tokens = llama.tokenize( + value.encode("utf8"), add_bos=False, special=True + ) + if llama.n_tokens + len(tokens) > llama.n_ctx(): + raise ValueError( + f"Prompt exceeds n_ctx: {llama.n_tokens + len(tokens)} > {llama.n_ctx()}" + ) + llama.eval(tokens) + else: + image_bytes = self.load_image(value) + embed = self._embed_image_bytes(image_bytes, llama.context_params.n_threads_batch) + if llama.n_tokens + embed.contents.n_image_pos > llama.n_ctx(): + raise ValueError( + f"Prompt exceeds n_ctx: {llama.n_tokens + embed.contents.n_image_pos} > {llama.n_ctx()}" + ) + n_past = ctypes.c_int(llama.n_tokens) + n_past_p = ctypes.pointer(n_past) + with suppress_stdout_stderr(disable=self.verbose): + self._llava_cpp.llava_eval_image_embed( + llama.ctx, + embed, + llama.n_batch, + n_past_p, + ) + # Required to avoid issues with hf tokenizer + llama.input_ids[llama.n_tokens : n_past.value] = -1 + llama.n_tokens = n_past.value + + # Get prompt tokens to avoid a cache miss + prompt = llama.input_ids[: llama.n_tokens].tolist() + + if response_format is not None and response_format["type"] == "json_object": + grammar = _grammar_for_response_format(response_format) + + # Convert legacy functions to tools + if functions is not None: + tools = [ + { + "type": "function", + "function": function, + } + for function in functions + ] + + # Convert legacy function_call to tool_choice + if function_call is not None: + if isinstance(function_call, str) and ( + function_call == "none" or function_call == "auto" + ): + tool_choice = function_call + if isinstance(function_call, dict) and "name" in function_call: + tool_choice = { + "type": "function", + "function": { + "name": function_call["name"], + }, + } + + tool = None + if ( + tool_choice is not None + and isinstance(tool_choice, dict) + and tools is not None + ): + name = tool_choice["function"]["name"] + tool = next((t for t in tools if t["function"]["name"] == name), None) + if tool is None: + raise ValueError(f"Tool choice '{name}' not found in tools.") + schema = tool["function"]["parameters"] + try: + # create grammar from json schema + grammar = llama_grammar.LlamaGrammar.from_json_schema( + json.dumps(schema), verbose=llama.verbose + ) + except Exception as e: + if llama.verbose: + print(str(e), file=sys.stderr) + grammar = llama_grammar.LlamaGrammar.from_string( + llama_grammar.JSON_GBNF, verbose=llama.verbose + ) + + completion_or_chunks = llama.create_completion( + prompt=prompt, + temperature=temperature, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + logprobs=top_logprobs if logprobs else None, + stream=stream, + stop=stop, + seed=seed, + max_tokens=max_tokens, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + grammar=grammar, + logit_bias=logit_bias, + ) + if tool is not None: + tool_name = tool["function"]["name"] + return _convert_completion_to_chat_function( + tool_name, completion_or_chunks, stream + ) + return _convert_completion_to_chat(completion_or_chunks, stream=stream) + + @staticmethod + def _load_image(image_url: str) -> bytes: + # TODO: Add Pillow support for other image formats beyond (jpg, png) + if image_url.startswith("data:"): + import base64 + + image_bytes = base64.b64decode(image_url.split(",")[1]) + return image_bytes + else: + import urllib.request + + with urllib.request.urlopen(image_url) as f: + image_bytes = f.read() + return image_bytes + + @staticmethod + def get_image_urls(messages: List[llama_types.ChatCompletionRequestMessage]): + image_urls: List[str] = [] + for message in messages: + if message["role"] == "user": + if message["content"] is None: + continue + for content in message["content"]: + if isinstance(content, dict) and "type" in content: + if content["type"] == "image_url": + if ( + isinstance(content["image_url"], dict) + and "url" in content["image_url"] + ): + image_urls.append(content["image_url"]["url"]) + else: + image_urls.append(content["image_url"]) + return image_urls + + @staticmethod + def split_text_on_image_urls(text: str, image_urls: List[str]): + def find_first(s: str, substrs: List[str]): + for i, substr in enumerate(substrs): + pos = s.find(substr) + if pos != -1: + return pos, i + return None, None + + split_text: List[Tuple[Literal["text", "image_url"], str]] = [] + remaining = text + while remaining: + # Find first image_url + pos, i = find_first(remaining, image_urls) + if pos is not None and i is not None: + if pos > 0: + split_text.append(("text", remaining[:pos])) + split_text.append(("image_url", image_urls[i])) + remaining = remaining[pos + len(image_urls[i]) :] + else: + split_text.append(("text", remaining)) + remaining = "" + return split_text + + @classmethod + def from_pretrained( + cls, + repo_id: str, + filename: Optional[str], + local_dir: Optional[Union[str, os.PathLike[str]]] = None, + local_dir_use_symlinks: Union[bool, Literal["auto"]] = "auto", + cache_dir: Optional[Union[str, os.PathLike[str]]] = None, + **kwargs: Any, + ) -> "Llava15ChatHandler": + import fnmatch + from pathlib import Path + + try: + from huggingface_hub import hf_hub_download, HfFileSystem # type: ignore + from huggingface_hub.utils import validate_repo_id # type: ignore + except ImportError: + raise ImportError( + "Llama.from_pretrained requires the huggingface-hub package. " + "You can install it with `pip install huggingface-hub`." + ) + + validate_repo_id(repo_id) + + hffs = HfFileSystem() + + files = [ + file["name"] if isinstance(file, dict) else file + for file in hffs.ls(repo_id) # type: ignore + ] + + # split each file into repo_id, subfolder, filename + file_list: List[str] = [] + for file in files: + rel_path = Path(file).relative_to(repo_id) + file_list.append(str(rel_path)) + + matching_files = [file for file in file_list if fnmatch.fnmatch(file, filename)] # type: ignore + + if len(matching_files) == 0: + raise ValueError( + f"No file found in {repo_id} that match {filename}\n\n" + f"Available Files:\n{json.dumps(file_list)}" + ) + + if len(matching_files) > 1: + raise ValueError( + f"Multiple files found in {repo_id} matching {filename}\n\n" + f"Available Files:\n{json.dumps(files)}" + ) + + (matching_file,) = matching_files + + subfolder = str(Path(matching_file).parent) + filename = Path(matching_file).name + + # download the file + hf_hub_download( + repo_id=repo_id, + filename=filename, + subfolder=subfolder, + local_dir=cast(Union[str, Path, None], local_dir), + local_dir_use_symlinks=local_dir_use_symlinks, + cache_dir=cast(Union[str, Path, None], cache_dir), + ) + + if local_dir is None: + model_path = hf_hub_download( + repo_id=repo_id, + filename=filename, + subfolder=subfolder, + local_dir=local_dir, + local_dir_use_symlinks=local_dir_use_symlinks, + cache_dir=cast(Union[str, Path, None], cache_dir), + local_files_only=True, + ) + else: + model_path = os.path.join(local_dir, filename) + + return cls( + clip_model_path=model_path, + **kwargs, + ) + + +class ObsidianChatHandler(Llava15ChatHandler): + # Prompt Format + # The model followed ChatML format. However, with ### as the seperator + + # <|im_start|>user + # What is this sign about?\n + # ### + # <|im_start|>assistant + # The sign is about bullying, and it is placed on a black background with a red background. + # ### + + CHAT_FORMAT = ( + "{% for message in messages %}" + # System message + "{% if message.role == 'system' %}" + "<|im_start|>system\n" + "{{ message.content }}\n" + "###\n" + "{% endif %}" + # User message + "{% if message.role == 'user' %}" + "<|im_start|>user\n" + "{% if message.content is string %}" + "{{ message.content }}" + "{% endif %}" + "{% if message.content is iterable %}" + "{% for content in message.content %}" + "{% if content.type == 'image_url' and content.image_url is string %}" + "{{ content.image_url }}" + "{% endif %}" + "{% if content.type == 'image_url' and content.image_url is mapping %}" + "{{ content.image_url.url }}" + "{% endif %}" + "{% endfor %}" + "{% for content in message.content %}" + "{% if content.type == 'text' %}" + "{{ content.text }}" + "{% endif %}" + "{% endfor %}" + "{% endif %}" + "###\n" + "{% endif %}" + # Assistant message + "{% if message.role == 'assistant' %}" + "<|im_start|>assistant\n" + "{{ message.content }}" + "###\n" + "{% endif %}" + "{% endfor %}" + # Generation prompt + "{% if add_generation_prompt %}" + "<|im_start|>assistant\n" + "{% endif %}" + ) + + +class MoondreamChatHandler(Llava15ChatHandler): + # Chat Format: + # f"\n\n{chat_history}Question: {question}\n\nAnswer:" + CHAT_FORMAT = ( + "{% for message in messages %}" + "{% if message.role == 'user' %}" + "{% if message.content is iterable %}" + # + "{% for content in message.content %}" + "{% if content.type == 'image_url' %}" + "{% if content.image_url is string %}" + "{{ content.image_url }}\n\n" + "{% endif %}" + "{% if content.image_url is mapping %}" + "{{ content.image_url.url }}\n\n" + "{% endif %}" + "{% endif %}" + "{% endfor %}" + # Question: + "{% for content in message.content %}" + "{% if content.type == 'text' %}" + "Question: {{ content.text }}\n\n" + "{% endif %}" + "{% endfor %}" + "{% endif %}" + # Question: + "{% if message.content is string %}" + "Question: {{ message.content }}\n\n" + "{% endif %}" + "{% endif %}" + # Answer: + "{% if message.role == 'assistant' %}" + "Answer:{{ message.content }}\n\n" + "{% endif %}" + "{% endfor %}" + # Generation prompt + "{% if add_generation_prompt %}" + "Answer:" + "{% endif %}" + ) + + +class Llava16ChatHandler(Llava15ChatHandler): + DEFAULT_SYSTEM_MESSAGE = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. " + + # Example prompt + # "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. USER: \nWhat is shown in this image? ASSISTANT:" + + CHAT_FORMAT = ( + "{% for message in messages %}" + "{% if message.role == 'system' %}" + "{{ message.content }}" + "{% endif %}" + "{% if message.role == 'user' %}" + "{% if message.content is iterable %}" + # + "{% for content in message.content %}" + "{% if content.type == 'image_url' %}" + "{% if content.image_url is string %}" + "{{ content.image_url }}\n" + "{% endif %}" + "{% if content.image_url is mapping %}" + "{{ content.image_url.url }}\n" + "{% endif %}" + "{% endif %}" + "{% endfor %}" + # Question: + "{% for content in message.content %}" + "{% if content.type == 'text' %}" + "{{ content.text }}" + "{% endif %}" + "{% endfor %}" + "{% endif %}" + # Question: + "{% if message.content is string %}" + "{{ message.content }}" + "{% endif %}" + "{% endif %}" + # Answer: + "{% if message.role == 'assistant' %}" + "{{ message.content }}" + "{% endif %}" + "{% endfor %}" + # Generation prompt + "{% if add_generation_prompt %}" + "Answer:" + "{% endif %}" + ) + + +class NanoLlavaChatHandler(Llava15ChatHandler): + # Prompt Format + # The model follow the ChatML standard, however, without \n at the end of <|im_end|>: + + # <|im_start|>system + # Answer the question<|im_end|><|im_start|>user + # + # What is the picture about?<|im_end|><|im_start|>assistant + DEFAULT_SYSTEM_MESSAGE = "Answer the question" + + CHAT_FORMAT = ( + "{% for message in messages %}" + # System message + "{% if message.role == 'system' %}" + "<|im_start|>system\n" + "{{ message.content }}" + "<|im_end|>" + "{% endif %}" + # User message + "{% if message.role == 'user' %}" + "<|im_start|>user\n" + "{% if message.content is string %}" + "{{ message.content }}" + "{% endif %}" + "{% if message.content is iterable %}" + "{% for content in message.content %}" + "{% if content.type == 'image_url' and content.image_url is string %}" + "{{ content.image_url }}" + "{% endif %}" + "{% if content.type == 'image_url' and content.image_url is mapping %}" + "{{ content.image_url.url }}" + "{% endif %}" + "{% endfor %}" + "{% for content in message.content %}" + "{% if content.type == 'text' %}" + "{{ content.text }}" + "{% endif %}" + "{% endfor %}" + "{% endif %}" + "<|im_end|>" + "{% endif %}" + # Assistant message + "{% if message.role == 'assistant' %}" + "<|im_start|>assistant\n" + "{{ message.content }}" + "<|im_end|>" + "{% endif %}" + "{% endfor %}" + # Generation prompt + "{% if add_generation_prompt %}" + "<|im_start|>assistant\n" + "{% endif %}" + ) + + +class Llama3VisionAlphaChatHandler(Llava15ChatHandler): + # question = "" + q + + # prompt = f"<|start_header_id|>user<|end_header_id|>\n\n{question}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n" + DEFAULT_SYSTEM_MESSAGE = None + + CHAT_FORMAT = ( + "{% for message in messages %}" + "<|start_header_id|>" + "{% if message.role == 'user' %}" + "user<|end_header_id|>\n\n" + "{% if message.content is iterable %}" + # + "{% for content in message.content %}" + "{% if content.type == 'image_url' %}" + "{% if content.image_url is string %}" + "{{ content.image_url }}" + "{% endif %}" + "{% if content.image_url is mapping %}" + "{{ content.image_url.url }}" + "{% endif %}" + "{% endif %}" + "{% endfor %}" + # Question: + "{% for content in message.content %}" + "{% if content.type == 'text' %}" + "{{ content.text }}" + "{% endif %}" + "{% endfor %}" + "{% endif %}" + # Question: + "{% if message.content is string %}" + "{{ message.content }}" + "{% endif %}" + "{% endif %}" + # Answer: + "{% if message.role == 'assistant' %}" + "assistant<|end_header_id|>\n\n" + "{{ message.content }}" + "{% endif %}" + "<|eot_id|>" + "{% endfor %}" + # Generation prompt + "{% if add_generation_prompt %}" + "<|start_header_id|>assistant<|end_header_id|>\n\n" + "{% endif %}" + ) + + +# alias +Llama3VisionAlpha = Llama3VisionAlphaChatHandler + + +class MiniCPMv26ChatHandler(Llava15ChatHandler): + DEFAULT_SYSTEM_MESSAGE = "You are a helpful assistant." + + CHAT_FORMAT = ( + "{% for message in messages %}" + "{% if loop.first and messages[0]['role'] != 'system' %}" + "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n" + "{% endif %}" + "<|im_start|>{{ message['role'] }}\n" + "{% if message['content'] is iterable %}" + "{% for content in message['content'] %}" + "{% if content.type == 'image_url' %}" + "{% if content.image_url is string %}" + "{{ content.image_url }}" + "{% endif %}" + "{% if content.image_url is mapping %}" + "{{ content.image_url.url }}" + "{% endif %}" + "{% endif %}" + "{% endfor %}" + + "{% for content in message['content'] %}" + "{% if content.type == 'text' %}" + "{{ content.text }}" + "{% endif %}" + "{% endfor %}" + "{% endif %}" + "{% if message['content'] is string %}" + "{{ message['content'] }}" + "{% endif %}" + "<|im_end|>\n" + "{% endfor %}" + "{% if add_generation_prompt %}" + "<|im_start|>assistant\n" + "{% endif %}" + ) + + +@register_chat_completion_handler("chatml-function-calling") +def chatml_function_calling( + llama: llama.Llama, + messages: List[llama_types.ChatCompletionRequestMessage], + functions: Optional[List[llama_types.ChatCompletionFunction]] = None, + function_call: Optional[llama_types.ChatCompletionRequestFunctionCall] = None, + tools: Optional[List[llama_types.ChatCompletionTool]] = None, + tool_choice: Optional[llama_types.ChatCompletionToolChoiceOption] = None, + temperature: float = 0.2, + top_p: float = 0.95, + top_k: int = 40, + min_p: float = 0.05, + typical_p: float = 1.0, + stream: bool = False, + stop: Optional[Union[str, List[str]]] = [], + response_format: Optional[llama_types.ChatCompletionRequestResponseFormat] = None, + max_tokens: Optional[int] = None, + presence_penalty: float = 0.0, + frequency_penalty: float = 0.0, + repeat_penalty: float = 1.1, + tfs_z: float = 1.0, + mirostat_mode: int = 0, + mirostat_tau: float = 5.0, + mirostat_eta: float = 0.1, + model: Optional[str] = None, + logits_processor: Optional[llama.LogitsProcessorList] = None, + grammar: Optional[llama.LlamaGrammar] = None, + logprobs: Optional[bool] = None, + top_logprobs: Optional[int] = None, + **kwargs, # type: ignore +) -> Union[ + llama_types.CreateChatCompletionResponse, + Iterator[llama_types.CreateChatCompletionStreamResponse], +]: + function_calling_template = ( + "{% for message in messages %}" + "<|im_start|>{{ message.role }}\n" + # System message + "{% if message.role == 'system' %}" + "{{ message.content }}" + "{% if tool_calls %}" + "\n\nYou have access to the following functions:\n" + "{% for tool in tools %}" + "\nfunctions.{{ tool.function.name }}:\n" + "{{ tool.function.parameters | tojson }}" + "\n{% endfor %}" + "\n\nYou can respond to users messages with either a single message or one or more function calls." + "\n\nTo respond with a message begin the message with 'message:', use the following format:" + "\n\nmessage:" + "\n" + "\n\nTo respond with one or more function calls begin the message with 'functions.:', use the following format:" + "\n\nfunctions.:" + '\n{ "arg1": "value1", "arg2": "value2" }' + "\nfunctions.:" + '\n{ "arg1": "value1", "arg2": "value2" }' + "{% endif %}" + "<|im_end|>\n" + "{% endif %}" + # User message + "{% if message.role == 'user' %}" + "{{ message.content }}" + "<|im_end|>\n" + "{% endif %}" + # Assistant message + "{% if message.role == 'assistant' %}" + ## Reglar message + "{% if message.content and message.content | length > 0 %}" + "{% if tool_calls %}" + "message:\n" + "{% endif %}" + "{{ message.content }}" + "<|im_end|>\n" + "{% endif %}" + ## Function calls + "{% if 'tool_calls' in message %}" + "{% for tool_call in message.tool_calls %}" + "functions.{{ tool_call.function.name }}:\n" + "{{ tool_call.function.arguments }}" + "{% endfor %}" + "<|im_end|>\n" + "{% endif %}" + "{% endif %}" + "{% endfor %}" + "{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}" + ) + template_renderer = ImmutableSandboxedEnvironment( + autoescape=jinja2.select_autoescape(["html", "xml"]), + undefined=jinja2.StrictUndefined, + ).from_string(function_calling_template) + + # Convert legacy functions to tools + if functions is not None: + tools = [ + { + "type": "function", + "function": function, + } + for function in functions + ] + + # Convert legacy function_call to tool_choice + if function_call is not None: + if isinstance(function_call, str) and ( + function_call == "none" or function_call == "auto" + ): + tool_choice = function_call + if isinstance(function_call, dict) and "name" in function_call: + tool_choice = { + "type": "function", + "function": { + "name": function_call["name"], + }, + } + + stop = ( + [stop, "<|im_end|>"] + if isinstance(stop, str) + else stop + ["<|im_end|>"] if stop else ["<|im_end|>"] + ) + + # Case 1: No tool choice by user + if ( + tool_choice is None + or (isinstance(tool_choice, str) and tool_choice == "none") + or tools is None + or len(tools) == 0 + ): + prompt = template_renderer.render( + messages=messages, + tools=[], + tool_calls=None, + add_generation_prompt=True, + ) + + if response_format is not None and response_format["type"] == "json_object": + grammar = _grammar_for_response_format(response_format) + + return _convert_completion_to_chat( + llama.create_completion( + prompt=prompt, + temperature=temperature, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + stream=stream, + stop=stop, + max_tokens=max_tokens, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + grammar=grammar, + logprobs=top_logprobs if logprobs else None, + ), + stream=stream, + ) + + # Case 2: Tool choice by user + if isinstance(tool_choice, dict): + tool_name = tool_choice["function"]["name"] + tool = next( + (tool for tool in tools if tool["function"]["name"] == tool_name), None + ) + if tool is None: + raise ValueError(f"Tool with name '{tool_name}' not found in tools") + prompt = template_renderer.render( + messages=messages, + tools=tools, + tool_calls=True, + add_generation_prompt=True, + ) + prompt += f"functions.{tool_name}:\n" + try: + grammar = llama_grammar.LlamaGrammar.from_json_schema( + json.dumps(tool["function"]["parameters"]), verbose=llama.verbose + ) + except Exception as e: + grammar = llama_grammar.LlamaGrammar.from_string( + llama_grammar.JSON_GBNF, verbose=llama.verbose + ) + if llama.verbose: + print( + "Failed to parse function body as JSON schema, falling back to default grammar" + ) + print(e) + completion_or_chunks = llama.create_completion( + prompt=prompt, + temperature=temperature, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + stream=stream, + stop=stop, + max_tokens=max_tokens, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + grammar=grammar, + ) + return _convert_completion_to_chat_function( + tool_name, completion_or_chunks, stream + ) + + # Case 3: Automatic tool choice + assert isinstance(tool_choice, str) and tool_choice == "auto" + function_names = " | ".join( + [f'''"functions.{tool['function']['name']}:"''' for tool in tools] + ) + initial_gbnf_tool_grammar = ( + """root ::= functions | "message:"\n""" + f"""functions ::= {function_names}\n""" + ) + follow_up_gbnf_tool_grammar = ( + """root ::= functions | "<|im_end|>"\n""" + f"""functions ::= {function_names}\n""" + ) + prompt = template_renderer.render( + messages=messages, + tools=tools, + tool_calls=True, + add_generation_prompt=True, + ) + completion_or_chunks = llama.create_completion( + prompt=prompt, + temperature=0, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + stream=False, + stop=[":"], + max_tokens=None, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + grammar=llama_grammar.LlamaGrammar.from_string( + initial_gbnf_tool_grammar, verbose=llama.verbose + ), + ) + completion: llama_types.CreateCompletionResponse = completion_or_chunks # type: ignore + text = completion["choices"][0]["text"] + if "message" in text: + return _convert_completion_to_chat( + llama.create_completion( + prompt=prompt + "message:\n", + temperature=temperature, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + stream=stream, + stop=["<|im_end|>"], + logprobs=top_logprobs if logprobs else None, + max_tokens=None, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + grammar=llama_grammar.LlamaGrammar.from_string( + follow_up_gbnf_tool_grammar, verbose=llama.verbose + ), + ), + stream=stream, + ) + + # One or more function calls + tool_name = text[len("functions.") :] + tool = next((tool for tool in tools if tool["function"]["name"] == tool_name), None) + if not stream: + completions: List[llama_types.CreateCompletionResponse] = [] + completions_tool_name: List[str] = [] + while tool is not None: + prompt += f"functions.{tool_name}:\n" + try: + grammar = llama_grammar.LlamaGrammar.from_json_schema( + json.dumps(tool["function"]["parameters"]), verbose=llama.verbose + ) + except Exception as e: + grammar = llama_grammar.LlamaGrammar.from_string( + llama_grammar.JSON_GBNF, verbose=llama.verbose + ) + if llama.verbose: + print( + "Failed to parse function body as JSON schema, falling back to default grammar" + ) + print(e) + completion_or_chunks = llama.create_completion( + prompt=prompt, + temperature=temperature, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + stream=False, + stop=stop, + max_tokens=None, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + grammar=grammar, + ) + completion_or_chunks = cast( + llama_types.CreateCompletionResponse, completion_or_chunks + ) + completions.append(completion_or_chunks) + completions_tool_name.append(tool_name) + prompt += completion_or_chunks["choices"][0]["text"] + prompt += "\n" + + response = llama.create_completion( + prompt=prompt, + temperature=temperature, + top_p=top_p, + top_k=top_k, + min_p=min_p, + typical_p=typical_p, + stream=False, + stop=stop, + max_tokens=None, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + repeat_penalty=repeat_penalty, + tfs_z=tfs_z, + mirostat_mode=mirostat_mode, + mirostat_tau=mirostat_tau, + mirostat_eta=mirostat_eta, + model=model, + logits_processor=logits_processor, + grammar=llama_grammar.LlamaGrammar.from_string( + follow_up_gbnf_tool_grammar, verbose=llama.verbose + ), + ) + response = cast(llama_types.CreateCompletionResponse, response) + + tool_name = response["choices"][0]["text"][len("functions.") :] + tool = next( + (tool for tool in tools if tool["function"]["name"] == tool_name), None + ) + + # Merge completions + function_call_dict: Union[ + Dict[str, str], + Dict[ + Literal["function_call"], + llama_types.ChatCompletionRequestAssistantMessageFunctionCall, + ], + ] = ( + { + "function_call": { + "name": tool_name, + "arguments": completions[0]["choices"][0]["text"], + } + } + if len(completions) == 1 + else {} + ) + return { + "id": "chat" + completion["id"], + "object": "chat.completion", + "created": completion["created"], + "model": completion["model"], + "choices": [ + { + "finish_reason": "tool_calls", + "index": 0, + "logprobs": _convert_text_completion_logprobs_to_chat(completion["choices"][0]["logprobs"]), + "message": { + "role": "assistant", + "content": None, + "tool_calls": [ + { + "id": "call_" + + f"_{i}_" + + tool_name + + "_" + + completion["id"], + "type": "function", + "function": { + "name": tool_name, + "arguments": completion["choices"][0]["text"], + }, + } + for i, (tool_name, completion) in enumerate( + zip(completions_tool_name, completions) + ) + ], + **function_call_dict, + }, + } + ], + "usage": { + "completion_tokens": sum( + ( + completion["usage"]["completion_tokens"] + if "usage" in completion + else 0 + ) + for completion in completions + ), + "prompt_tokens": sum( + completion["usage"]["prompt_tokens"] if "usage" in completion else 0 + for completion in completions + ), + "total_tokens": sum( + completion["usage"]["total_tokens"] if "usage" in completion else 0 + for completion in completions + ), + }, + } + + raise ValueError("Automatic streaming tool choice is not supported") diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index 069ae10a3..63de3a93a 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -1,330 +1,4315 @@ -import sys +from __future__ import annotations + import os import ctypes -from ctypes import c_int, c_float, c_char_p, c_void_p, c_bool, POINTER, Structure, Array, c_uint8, c_size_t import pathlib -# Load the library -def _load_shared_library(lib_base_name): - # Determine the file extension based on the platform - if sys.platform.startswith("linux"): - lib_ext = ".so" - elif sys.platform == "darwin": - lib_ext = ".so" - elif sys.platform == "win32": - lib_ext = ".dll" - else: - raise RuntimeError("Unsupported platform") - - # Construct the paths to the possible shared library names - _base_path = pathlib.Path(__file__).parent.resolve() - # Searching for the library in the current directory under the name "libllama" (default name - # for llamacpp) and "llama" (default name for this repo) - _lib_paths = [ - _base_path / f"lib{lib_base_name}{lib_ext}", - _base_path / f"{lib_base_name}{lib_ext}" - ] +from typing import ( + Callable, + Union, + NewType, + Optional, + TYPE_CHECKING, +) - # Add the library directory to the DLL search path on Windows (if needed) - if sys.platform == "win32" and sys.version_info >= (3, 8): - os.add_dll_directory(str(_base_path)) +from llama_cpp._ctypes_extensions import ( + load_shared_library, + byref, + ctypes_function_for_shared_library, +) - # Try to load the shared library, handling potential errors - for _lib_path in _lib_paths: - if _lib_path.exists(): - try: - return ctypes.CDLL(str(_lib_path)) - except Exception as e: - raise RuntimeError(f"Failed to load shared library '{_lib_path}': {e}") +if TYPE_CHECKING: + from llama_cpp._ctypes_extensions import ( + CtypesCData, + CtypesArray, + CtypesPointer, + CtypesVoidPointer, + CtypesRef, + CtypesPointerOrRef, + CtypesFuncPointer, + ) - raise FileNotFoundError(f"Shared library with base name '{lib_base_name}' not found") # Specify the base name of the shared library to load _lib_base_name = "llama" - +_override_base_path = os.environ.get("LLAMA_CPP_LIB_PATH") +_base_path = pathlib.Path(os.path.abspath(os.path.dirname(__file__))) / "lib" if _override_base_path is None else pathlib.Path(_override_base_path) # Load the library -_lib = _load_shared_library(_lib_base_name) +_lib = load_shared_library(_lib_base_name, _base_path) + +ctypes_function = ctypes_function_for_shared_library(_lib) + + +# from ggml.h +# // NOTE: always add types at the end of the enum to keep backward compatibility +# enum ggml_type { +# GGML_TYPE_F32 = 0, +# GGML_TYPE_F16 = 1, +# GGML_TYPE_Q4_0 = 2, +# GGML_TYPE_Q4_1 = 3, +# // GGML_TYPE_Q4_2 = 4, support has been removed +# // GGML_TYPE_Q4_3 = 5, support has been removed +# GGML_TYPE_Q5_0 = 6, +# GGML_TYPE_Q5_1 = 7, +# GGML_TYPE_Q8_0 = 8, +# GGML_TYPE_Q8_1 = 9, +# GGML_TYPE_Q2_K = 10, +# GGML_TYPE_Q3_K = 11, +# GGML_TYPE_Q4_K = 12, +# GGML_TYPE_Q5_K = 13, +# GGML_TYPE_Q6_K = 14, +# GGML_TYPE_Q8_K = 15, +# GGML_TYPE_IQ2_XXS = 16, +# GGML_TYPE_IQ2_XS = 17, +# GGML_TYPE_IQ3_XXS = 18, +# GGML_TYPE_IQ1_S = 19, +# GGML_TYPE_IQ4_NL = 20, +# GGML_TYPE_IQ3_S = 21, +# GGML_TYPE_IQ2_S = 22, +# GGML_TYPE_IQ4_XS = 23, +# GGML_TYPE_I8 = 24, +# GGML_TYPE_I16 = 25, +# GGML_TYPE_I32 = 26, +# GGML_TYPE_I64 = 27, +# GGML_TYPE_F64 = 28, +# GGML_TYPE_IQ1_M = 29, +# GGML_TYPE_COUNT, +# }; +GGML_TYPE_F32 = 0 +GGML_TYPE_F16 = 1 +GGML_TYPE_Q4_0 = 2 +GGML_TYPE_Q4_1 = 3 +GGML_TYPE_Q5_0 = 6 +GGML_TYPE_Q5_1 = 7 +GGML_TYPE_Q8_0 = 8 +GGML_TYPE_Q8_1 = 9 +GGML_TYPE_Q2_K = 10 +GGML_TYPE_Q3_K = 11 +GGML_TYPE_Q4_K = 12 +GGML_TYPE_Q5_K = 13 +GGML_TYPE_Q6_K = 14 +GGML_TYPE_Q8_K = 15 +GGML_TYPE_IQ2_XXS = 16 +GGML_TYPE_IQ2_XS = 17 +GGML_TYPE_IQ3_XXS = 18 +GGML_TYPE_IQ1_S = 19 +GGML_TYPE_IQ4_NL = 20 +GGML_TYPE_IQ3_S = 21 +GGML_TYPE_IQ2_S = 22 +GGML_TYPE_IQ4_XS = 23 +GGML_TYPE_I8 = 24 +GGML_TYPE_I16 = 25 +GGML_TYPE_I32 = 26 +GGML_TYPE_I64 = 27 +GGML_TYPE_F64 = 28 +GGML_TYPE_IQ1_M = 29 +GGML_TYPE_COUNT = 30 + +# from ggml-backend.h +# typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * user_data); +ggml_backend_sched_eval_callback = ctypes.CFUNCTYPE( + ctypes.c_bool, ctypes.c_void_p, ctypes.c_bool, ctypes.c_void_p +) + +# // Abort callback +# // If not NULL, called before ggml computation +# // If it returns true, the computation is aborted +# typedef bool (*ggml_abort_callback)(void * data); +ggml_abort_callback = ctypes.CFUNCTYPE(ctypes.c_bool, ctypes.c_void_p) + +# llama.h bindings + +_lib.llama_max_devices.argtypes = [] +_lib.llama_max_devices.restype = ctypes.c_size_t + +LLAMA_MAX_DEVICES = _lib.llama_max_devices() + +# define LLAMA_DEFAULT_SEED 0xFFFFFFFF +LLAMA_DEFAULT_SEED = 0xFFFFFFFF + +# define LLAMA_TOKEN_NULL -1 +LLAMA_TOKEN_NULL = -1 + +# define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla' +LLAMA_FILE_MAGIC_GGLA = 0x67676C61 + +# define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn' +LLAMA_FILE_MAGIC_GGSN = 0x6767736E + +# define LLAMA_FILE_MAGIC_GGSQ 0x67677371u // 'ggsq' +LLAMA_FILE_MAGIC_GGSQ = 0x67677371 + +# define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN +LLAMA_SESSION_MAGIC = LLAMA_FILE_MAGIC_GGSN +# define LLAMA_SESSION_VERSION 9 +LLAMA_SESSION_VERSION = 9 + +# define LLAMA_STATE_SEQ_MAGIC LLAMA_FILE_MAGIC_GGSQ +LLAMA_STATE_SEQ_MAGIC = LLAMA_FILE_MAGIC_GGSQ +# define LLAMA_STATE_SEQ_VERSION 2 +LLAMA_STATE_SEQ_VERSION = 2 + +# struct llama_vocab; +llama_vocab_p = NewType("llama_vocab_p", int) +llama_vocab_p_ctypes = ctypes.c_void_p + +# struct llama_model; +llama_model_p = NewType("llama_model_p", int) +llama_model_p_ctypes = ctypes.c_void_p + +# struct llama_context; +llama_context_p = NewType("llama_context_p", int) +llama_context_p_ctypes = ctypes.c_void_p + +# # struct llama_sampler; +# llama_sampler_p = NewType("llama_sampler_p", int) +# llama_sampler_p_ctypes = ctypes.c_void_p + +# struct llama_kv_cache; +llama_kv_cache_p = NewType("llama_kv_cache_p", int) +llama_kv_cache_p_ctypes = ctypes.c_void_p + +# typedef int32_t llama_pos; +llama_pos = ctypes.c_int32 +# typedef int32_t llama_token; +llama_token = ctypes.c_int32 +llama_token_p = ctypes.POINTER(llama_token) +# typedef int32_t llama_seq_id; +llama_seq_id = ctypes.c_int32 + + +# enum llama_vocab_type { +# LLAMA_VOCAB_TYPE_NONE = 0, // For models without vocab +# LLAMA_VOCAB_TYPE_SPM = 1, // LLaMA tokenizer based on byte-level BPE with byte fallback +# LLAMA_VOCAB_TYPE_BPE = 2, // GPT-2 tokenizer based on byte-level BPE +# LLAMA_VOCAB_TYPE_WPM = 3, // BERT tokenizer based on WordPiece +# LLAMA_VOCAB_TYPE_UGM = 4, // T5 tokenizer based on Unigram +# LLAMA_VOCAB_TYPE_RWKV = 5, // RWKV tokenizer based on greedy tokenization +# }; +LLAMA_VOCAB_TYPE_NONE = 0 +"""For models without vocab""" +LLAMA_VOCAB_TYPE_SPM = 1 +"""LLaMA tokenizer based on byte-level BPE with byte fallback""" +LLAMA_VOCAB_TYPE_BPE = 2 +"""GPT-2 tokenizer based on byte-level BPE""" +LLAMA_VOCAB_TYPE_WPM = 3 +"""BERT tokenizer based on WordPiece""" +LLAMA_VOCAB_TYPE_UGM = 4 +"""T5 tokenizer based on Unigram""" +LLAMA_VOCAB_TYPE_RWKV = 5 +"""RWKV tokenizer based on greedy tokenization""" + + +# // pre-tokenization types +# enum llama_vocab_pre_type { +# LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0, +# LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1, +# LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM = 2, +# LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER = 3, +# LLAMA_VOCAB_PRE_TYPE_FALCON = 4, +# LLAMA_VOCAB_PRE_TYPE_MPT = 5, +# LLAMA_VOCAB_PRE_TYPE_STARCODER = 6, +# LLAMA_VOCAB_PRE_TYPE_GPT2 = 7, +# LLAMA_VOCAB_PRE_TYPE_REFACT = 8, +# LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9, +# LLAMA_VOCAB_PRE_TYPE_STABLELM2 = 10, +# LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11, +# LLAMA_VOCAB_PRE_TYPE_OLMO = 12, +# LLAMA_VOCAB_PRE_TYPE_DBRX = 13, +# LLAMA_VOCAB_PRE_TYPE_SMAUG = 14, +# LLAMA_VOCAB_PRE_TYPE_PORO = 15, +# LLAMA_VOCAB_PRE_TYPE_CHATGLM3 = 16, +# LLAMA_VOCAB_PRE_TYPE_CHATGLM4 = 17, +# LLAMA_VOCAB_PRE_TYPE_VIKING = 18, +# LLAMA_VOCAB_PRE_TYPE_JAIS = 19, +# LLAMA_VOCAB_PRE_TYPE_TEKKEN = 20, +# LLAMA_VOCAB_PRE_TYPE_SMOLLM = 21, +# LLAMA_VOCAB_PRE_TYPE_CODESHELL = 22, +# LLAMA_VOCAB_PRE_TYPE_BLOOM = 23, +# LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH = 24, +# LLAMA_VOCAB_PRE_TYPE_EXAONE = 25, +# LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26, +# LLAMA_VOCAB_PRE_TYPE_MINERVA = 27, +# LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28, +# LLAMA_VOCAB_PRE_TYPE_GPT4O = 29, +# LLAMA_VOCAB_PRE_TYPE_SUPERBPE = 30, +# LLAMA_VOCAB_PRE_TYPE_TRILLION = 31, +# LLAMA_VOCAB_PRE_TYPE_BAILINGMOE = 32, +# LLAMA_VOCAB_PRE_TYPE_LLAMA4 = 33, +# LLAMA_VOCAB_PRE_TYPE_PIXTRAL = 34, +# }; +LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0 +LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1 +LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM = 2 +LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER = 3 +LLAMA_VOCAB_PRE_TYPE_FALCON = 4 +LLAMA_VOCAB_PRE_TYPE_MPT = 5 +LLAMA_VOCAB_PRE_TYPE_STARCODER = 6 +LLAMA_VOCAB_PRE_TYPE_GPT2 = 7 +LLAMA_VOCAB_PRE_TYPE_REFACT = 8 +LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9 +LLAMA_VOCAB_PRE_TYPE_STABLELM2 = 10 +LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11 +LLAMA_VOCAB_PRE_TYPE_OLMO = 12 +LLAMA_VOCAB_PRE_TYPE_DBRX = 13 +LLAMA_VOCAB_PRE_TYPE_SMAUG = 14 +LLAMA_VOCAB_PRE_TYPE_PORO = 15 +LLAMA_VOCAB_PRE_TYPE_CHATGLM3 = 16 +LLAMA_VOCAB_PRE_TYPE_CHATGLM4 = 17 +LLAMA_VOCAB_PRE_TYPE_VIKING = 18 +LLAMA_VOCAB_PRE_TYPE_JAIS = 19 +LLAMA_VOCAB_PRE_TYPE_TEKKEN = 20 +LLAMA_VOCAB_PRE_TYPE_SMOLLM = 21 +LLAMA_VOCAB_PRE_TYPE_CODESHELL = 22 +LLAMA_VOCAB_PRE_TYPE_BLOOM = 23 +LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH = 24 +LLAMA_VOCAB_PRE_TYPE_EXAONE = 25 +LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26 +LLAMA_VOCAB_PRE_TYPE_MINERVA = 27 +LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28 +LLAMA_VOCAB_PRE_TYPE_GPT4O = 29 +LLAMA_VOCAB_PRE_TYPE_SUPERBPE = 30 +LLAMA_VOCAB_PRE_TYPE_TRILLION = 31 +LLAMA_VOCAB_PRE_TYPE_BAILINGMOE = 32 +LLAMA_VOCAB_PRE_TYPE_LLAMA4 = 33 +LLAMA_VOCAB_PRE_TYPE_PIXTRAL = 34 + + +# // note: these values should be synchronized with ggml_rope +# // TODO: maybe move this enum to ggml.h (ggml_rope_type) +# enum llama_rope_type { +# LLAMA_ROPE_TYPE_NONE = -1, +# LLAMA_ROPE_TYPE_NORM = 0, +# LLAMA_ROPE_TYPE_NEOX = GGML_ROPE_TYPE_NEOX, +# LLAMA_ROPE_TYPE_MROPE = GGML_ROPE_TYPE_MROPE, +# LLAMA_ROPE_TYPE_VISION = GGML_ROPE_TYPE_VISION, +# }; +LLAMA_ROPE_TYPE_NONE = -1 +LLAMA_ROPE_TYPE_NORM = 0 +LLAMA_ROPE_TYPE_NEOX = GGML_ROPE_TYPE_NEOX = 2 +LLAMA_ROPE_TYPE_MROPE = GGML_ROPE_TYPE_MROPE = 8 +LLAMA_ROPE_TYPE_VISION = GGML_ROPE_TYPE_VISION = 24 + + +# enum llama_token_type { //TODO: remove, required until per token attributes are available from GGUF file +# LLAMA_TOKEN_TYPE_UNDEFINED = 0, +# LLAMA_TOKEN_TYPE_NORMAL = 1, +# LLAMA_TOKEN_TYPE_UNKNOWN = 2, +# LLAMA_TOKEN_TYPE_CONTROL = 3, +# LLAMA_TOKEN_TYPE_USER_DEFINED = 4, +# LLAMA_TOKEN_TYPE_UNUSED = 5, +# LLAMA_TOKEN_TYPE_BYTE = 6, +# }; +LLAMA_TOKEN_TYPE_UNDEFINED = 0 +LLAMA_TOKEN_TYPE_NORMAL = 1 +LLAMA_TOKEN_TYPE_UNKNOWN = 2 +LLAMA_TOKEN_TYPE_CONTROL = 3 +LLAMA_TOKEN_TYPE_USER_DEFINED = 4 +LLAMA_TOKEN_TYPE_UNUSED = 5 +LLAMA_TOKEN_TYPE_BYTE = 6 + + +# enum llama_token_attr { +# LLAMA_TOKEN_ATTR_UNDEFINED = 0, +# LLAMA_TOKEN_ATTR_UNKNOWN = 1 << 0, +# LLAMA_TOKEN_ATTR_UNUSED = 1 << 1, +# LLAMA_TOKEN_ATTR_NORMAL = 1 << 2, +# LLAMA_TOKEN_ATTR_CONTROL = 1 << 3, // SPECIAL? +# LLAMA_TOKEN_ATTR_USER_DEFINED = 1 << 4, +# LLAMA_TOKEN_ATTR_BYTE = 1 << 5, +# LLAMA_TOKEN_ATTR_NORMALIZED = 1 << 6, +# LLAMA_TOKEN_ATTR_LSTRIP = 1 << 7, +# LLAMA_TOKEN_ATTR_RSTRIP = 1 << 8, +# LLAMA_TOKEN_ATTR_SINGLE_WORD = 1 << 9, +# }; +LLAMA_TOKEN_ATTR_UNDEFINED = 0 +LLAMA_TOKEN_ATTR_UNKNOWN = 1 << 0 +LLAMA_TOKEN_ATTR_UNUSED = 1 << 1 +LLAMA_TOKEN_ATTR_NORMAL = 1 << 2 +LLAMA_TOKEN_ATTR_CONTROL = 1 << 3 +LLAMA_TOKEN_ATTR_USER_DEFINED = 1 << 4 +LLAMA_TOKEN_ATTR_BYTE = 1 << 5 +LLAMA_TOKEN_ATTR_NORMALIZED = 1 << 6 +LLAMA_TOKEN_ATTR_LSTRIP = 1 << 7 +LLAMA_TOKEN_ATTR_RSTRIP = 1 << 8 +LLAMA_TOKEN_ATTR_SINGLE_WORD = 1 << 9 + + +# // model file types +# enum llama_ftype { +# LLAMA_FTYPE_ALL_F32 = 0, +# LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors +# // LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16 +# // LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed +# // LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed +# LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q3_K_S = 11, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q3_K_M = 12, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q3_K_L = 13, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q4_K_S = 14, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q4_K_M = 15, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q5_K_S = 16, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q5_K_M = 17, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q6_K = 18, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ2_XXS = 19, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ2_XS = 20, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q2_K_S = 21, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ3_XS = 22, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ3_XXS = 23, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ1_S = 24, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ4_NL = 25, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ3_S = 26, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ3_M = 27, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ2_S = 28, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ2_M = 29, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ4_XS = 30, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_IQ1_M = 31, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_BF16 = 32, // except 1d tensors +# //LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33, // removed from gguf files, use Q4_0 and runtime repack +# //LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34, // removed from gguf files, use Q4_0 and runtime repack +# //LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35, // removed from gguf files, use Q4_0 and runtime repack +# LLAMA_FTYPE_MOSTLY_TQ1_0 = 36, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_TQ2_0 = 37, // except 1d tensors +# +# LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file +# }; +LLAMA_FTYPE_ALL_F32 = 0 +LLAMA_FTYPE_MOSTLY_F16 = 1 +LLAMA_FTYPE_MOSTLY_Q4_0 = 2 +LLAMA_FTYPE_MOSTLY_Q4_1 = 3 +LLAMA_FTYPE_MOSTLY_Q8_0 = 7 +LLAMA_FTYPE_MOSTLY_Q5_0 = 8 +LLAMA_FTYPE_MOSTLY_Q5_1 = 9 +LLAMA_FTYPE_MOSTLY_Q2_K = 10 +LLAMA_FTYPE_MOSTLY_Q3_K_S = 11 +LLAMA_FTYPE_MOSTLY_Q3_K_M = 12 +LLAMA_FTYPE_MOSTLY_Q3_K_L = 13 +LLAMA_FTYPE_MOSTLY_Q4_K_S = 14 +LLAMA_FTYPE_MOSTLY_Q4_K_M = 15 +LLAMA_FTYPE_MOSTLY_Q5_K_S = 16 +LLAMA_FTYPE_MOSTLY_Q5_K_M = 17 +LLAMA_FTYPE_MOSTLY_Q6_K = 18 +LLAMA_FTYPE_MOSTLY_IQ2_XXS = 19 +LLAMA_FTYPE_MOSTLY_IQ2_XS = 20 +LLAMA_FTYPE_MOSTLY_Q2_K_S = 21 +LLAMA_FTYPE_MOSTLY_IQ3_XS = 22 +LLAMA_FTYPE_MOSTLY_IQ3_XXS = 23 +LLAMA_FTYPE_MOSTLY_IQ1_S = 24 +LLAMA_FTYPE_MOSTLY_IQ4_NL = 25 +LLAMA_FTYPE_MOSTLY_IQ3_S = 26 +LLAMA_FTYPE_MOSTLY_IQ3_M = 27 +LLAMA_FTYPE_MOSTLY_IQ2_S = 28 +LLAMA_FTYPE_MOSTLY_IQ2_M = 29 +LLAMA_FTYPE_MOSTLY_IQ4_XS = 30 +LLAMA_FTYPE_MOSTLY_IQ1_M = 31 +LLAMA_FTYPE_MOSTLY_BF16 = 32 +# LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33 +# LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34 +# LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35 +LLAMA_FTYPE_MOSTLY_TQ1_0 = 36 +LLAMA_FTYPE_MOSTLY_TQ2_0 = 37 +LLAMA_FTYPE_GUESSED = 1024 + +# enum llama_rope_scaling_type { +# LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED = -1, +# LLAMA_ROPE_SCALING_TYPE_NONE = 0, +# LLAMA_ROPE_SCALING_TYPE_LINEAR = 1, +# LLAMA_ROPE_SCALING_TYPE_YARN = 2, +# LLAMA_ROPE_SCALING_TYPE_LONGROPE = 3, +# LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_YARN, +# }; +LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED = -1 +LLAMA_ROPE_SCALING_TYPE_NONE = 0 +LLAMA_ROPE_SCALING_TYPE_LINEAR = 1 +LLAMA_ROPE_SCALING_TYPE_YARN = 2 +LLAMA_ROPE_SCALING_TYPE_LONGROPE = 3 +LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_YARN + +# enum llama_pooling_type { +# LLAMA_POOLING_TYPE_UNSPECIFIED = -1, +# LLAMA_POOLING_TYPE_NONE = 0, +# LLAMA_POOLING_TYPE_MEAN = 1, +# LLAMA_POOLING_TYPE_CLS = 2, +# LLAMA_POOLING_TYPE_LAST = 3, +# LLAMA_POOLING_TYPE_RANK = 4, // used by reranking models to attach the classification head to the graph +# }; +LLAMA_POOLING_TYPE_UNSPECIFIED = -1 +LLAMA_POOLING_TYPE_NONE = 0 +LLAMA_POOLING_TYPE_MEAN = 1 +LLAMA_POOLING_TYPE_CLS = 2 +LLAMA_POOLING_TYPE_LAST = 3 +LLAMA_POOLING_TYPE_RANK = 4 + +# enum llama_attention_type { +# LLAMA_ATTENTION_TYPE_UNSPECIFIED = -1, +# LLAMA_ATTENTION_TYPE_CAUSAL = 0, +# LLAMA_ATTENTION_TYPE_NON_CAUSAL = 1, +# }; +LLAMA_ATTENTION_TYPE_UNSPECIFIED = -1 +LLAMA_ATTENTION_TYPE_CAUSAL = 0 +LLAMA_ATTENTION_TYPE_NON_CAUSAL = 1 + + +# enum llama_split_mode { +# LLAMA_SPLIT_MODE_NONE = 0, // single GPU +# LLAMA_SPLIT_MODE_LAYER = 1, // split layers and KV across GPUs +# LLAMA_SPLIT_MODE_ROW = 2, // split rows across GPUs +# }; +LLAMA_SPLIT_MODE_NONE = 0 +LLAMA_SPLIT_MODE_LAYER = 1 +LLAMA_SPLIT_MODE_ROW = 2 + + +# typedef struct llama_token_data { +# llama_token id; // token id +# float logit; // log-odds of the token +# float p; // probability of the token +# } llama_token_data; +class llama_token_data(ctypes.Structure): + """Used to store token data + + Attributes: + id (llama_token): token id + logit (float): log-odds of the token + p (float): probability of the token""" + + if TYPE_CHECKING: + id: llama_token + logit: float + p: float + + _fields_ = [ + ("id", llama_token), + ("logit", ctypes.c_float), + ("p", ctypes.c_float), + ] + + +llama_token_data_p = ctypes.POINTER(llama_token_data) + + +# typedef struct llama_token_data_array { +# // TODO: consider SoA +# // NOTE: this pointer can be modified by the samplers +# llama_token_data * data; +# size_t size; +# int64_t selected; // this is the index in the data array (i.e. not the token id) +# bool sorted; +# } llama_token_data_array; +class llama_token_data_array(ctypes.Structure): + """Used to sample tokens given logits + + Attributes: + data (ctypes.Array[llama_token_data]): token data + size (int): size of the array + selected (int): index in the data array (i.e. not the token id) + sorted (bool): whether the array is sorted""" + + if TYPE_CHECKING: + data: CtypesArray[llama_token_data] + size: int + selected: int + sorted: bool + + _fields_ = [ + ("data", llama_token_data_p), + ("size", ctypes.c_size_t), + ("selected", ctypes.c_int64), + ("sorted", ctypes.c_bool), + ] + + +llama_token_data_array_p = ctypes.POINTER(llama_token_data_array) + +# typedef bool (*llama_progress_callback)(float progress, void * user_data); +llama_progress_callback = ctypes.CFUNCTYPE( + ctypes.c_bool, ctypes.c_float, ctypes.c_void_p +) + + +# // Input data for llama_decode +# // A llama_batch object can contain input about one or many sequences +# // The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens +# // +# // - token : the token ids of the input (used when embd is NULL) +# // - embd : token embeddings (i.e. float vector of size n_embd) (used when token is NULL) +# // - pos : the positions of the respective token in the sequence +# // (if set to NULL, the token position will be tracked automatically by llama_decode) +# // - seq_id : the sequence to which the respective token belongs +# // (if set to NULL, the sequence ID will be assumed to be 0) +# // - logits : if zero, the logits (and/or the embeddings) for the respective token will not be output +# // (if set to NULL, only the logits for last token will be returned) +# // +# typedef struct llama_batch { +# int32_t n_tokens; -# C types -llama_context_p = c_void_p +# llama_token * token; +# float * embd; +# llama_pos * pos; +# int32_t * n_seq_id; +# llama_seq_id ** seq_id; +# int8_t * logits; // TODO: rename this to "output" +# } llama_batch; +class llama_batch(ctypes.Structure): + """Input data for llama_decode + A llama_batch object can contain input about one or many sequences -llama_token = c_int -llama_token_p = POINTER(llama_token) + The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens + Attributes: + n_tokens (int): number of tokens + token (ctypes.Array[llama_token]): the token ids of the input (used when embd is NULL) + embd (ctypes.Array[ctypes.ctypes.c_float]): token embeddings (i.e. float vector of size n_embd) (used when token is NULL) + pos (ctypes.Array[ctypes.Array[llama_pos]]): the positions of the respective token in the sequence + seq_id (ctypes.Array[ctypes.Array[llama_seq_id]]): the sequence to which the respective token belongs + logits (ctypes.Array[ctypes.ctypes.c_int8]): if zero, the logits for the respective token will not be output + """ -class llama_token_data(Structure): + if TYPE_CHECKING: + n_tokens: int + token: CtypesArray[llama_token] + embd: CtypesArray[ctypes.c_float] + pos: CtypesArray[CtypesArray[llama_pos]] + n_seq_id: CtypesArray[ctypes.c_int] + seq_id: CtypesArray[CtypesArray[llama_seq_id]] + logits: CtypesArray[ctypes.c_int8] + + _fields_ = [ + ("n_tokens", ctypes.c_int32), + ("token", ctypes.POINTER(llama_token)), + ("embd", ctypes.POINTER(ctypes.c_float)), + ("pos", ctypes.POINTER(llama_pos)), + ("n_seq_id", ctypes.POINTER(ctypes.c_int32)), + ("seq_id", ctypes.POINTER(ctypes.POINTER(llama_seq_id))), + ("logits", ctypes.POINTER(ctypes.c_int8)), + ] + + +# enum llama_model_kv_override_type { +# LLAMA_KV_OVERRIDE_TYPE_INT, +# LLAMA_KV_OVERRIDE_TYPE_FLOAT, +# LLAMA_KV_OVERRIDE_TYPE_BOOL, +# LLAMA_KV_OVERRIDE_TYPE_STR, +# }; +LLAMA_KV_OVERRIDE_TYPE_INT = 0 +LLAMA_KV_OVERRIDE_TYPE_FLOAT = 1 +LLAMA_KV_OVERRIDE_TYPE_BOOL = 2 +LLAMA_KV_OVERRIDE_TYPE_STR = 3 + + +# struct llama_model_kv_override { +# enum llama_model_kv_override_type tag; + +# char key[128]; + + +# union { +# int64_t val_i64; +# double val_f64; +# bool val_bool; +# char val_str[128]; +# }; +# }; +class llama_model_kv_override_value(ctypes.Union): + _fields_ = [ + ("val_i64", ctypes.c_int64), + ("val_f64", ctypes.c_double), + ("val_bool", ctypes.c_bool), + ("val_str", ctypes.c_char * 128), + ] + + if TYPE_CHECKING: + val_i64: int + val_f64: float + val_bool: bool + val_str: bytes + + +class llama_model_kv_override(ctypes.Structure): _fields_ = [ - ("id", llama_token), # token id - ("p", c_float), # probability of the token - ("plog", c_float), # log probability of the token + ("tag", ctypes.c_int), + ("key", ctypes.c_char * 128), + ("value", llama_model_kv_override_value), ] + if TYPE_CHECKING: + tag: int + key: bytes + value: Union[int, float, bool, bytes] + -llama_token_data_p = POINTER(llama_token_data) +# struct llama_model_tensor_buft_override { +# const char * pattern; +# ggml_backend_buffer_type_t buft; +# }; -llama_progress_callback = ctypes.CFUNCTYPE(None, c_float, c_void_p) +# struct llama_model_params { +# // NULL-terminated list of devices to use for offloading (if NULL, all available devices are used) +# ggml_backend_dev_t * devices; + +# // NULL-terminated list of buffer types to use for tensors that match a pattern +# const struct llama_model_tensor_buft_override * tensor_buft_overrides; + +# int32_t n_gpu_layers; // number of layers to store in VRAM +# enum llama_split_mode split_mode; // how to split the model across multiple GPUs + +# // main_gpu interpretation depends on split_mode: +# // LLAMA_SPLIT_MODE_NONE: the GPU that is used for the entire model +# // LLAMA_SPLIT_MODE_ROW: the GPU that is used for small tensors and intermediate results +# // LLAMA_SPLIT_MODE_LAYER: ignored +# int32_t main_gpu; + +# // proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices() +# const float * tensor_split; + +# // Called with a progress value between 0.0 and 1.0. Pass NULL to disable. +# // If the provided progress_callback returns true, model loading continues. +# // If it returns false, model loading is immediately aborted. +# llama_progress_callback progress_callback; + +# // context pointer passed to the progress callback +# void * progress_callback_user_data; + +# // override key-value pairs of the model meta data +# const struct llama_model_kv_override * kv_overrides; + + +# // Keep the booleans together to avoid misalignment during copy-by-value. +# bool vocab_only; // only load the vocabulary, no weights +# bool use_mmap; // use mmap if possible +# bool use_mlock; // force system to keep model in RAM +# bool check_tensors; // validate model tensor data +# }; +class llama_model_params(ctypes.Structure): + """Parameters for llama_model + + Attributes: + devices (ctypes.Array[ggml_backend_dev_t]): NULL-terminated list of devices to use for offloading (if NULL, all available devices are used) + tensor_buft_overrides (ctypes.Array[llama_model_tensor_buft_override]): NULL-terminated list of buffer types to use for tensors that match a pattern + n_gpu_layers (int): number of layers to store in VRAM + split_mode (int): how to split the model across multiple GPUs + main_gpu (int): the GPU that is used for the entire model. main_gpu interpretation depends on split_mode: LLAMA_SPLIT_NONE: the GPU that is used for the entire model LLAMA_SPLIT_ROW: the GPU that is used for small tensors and intermediate results LLAMA_SPLIT_LAYER: ignored + tensor_split (ctypes.Array[ctypes.ctypes.c_float]): proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices() + progress_callback (llama_progress_callback): called with a progress value between 0.0 and 1.0. Pass NULL to disable. If the provided progress_callback returns true, model loading continues. If it returns false, model loading is immediately aborted. + progress_callback_user_data (ctypes.ctypes.c_void_p): context pointer passed to the progress callback + kv_overrides (ctypes.Array[llama_model_kv_override]): override key-value pairs of the model meta data + vocab_only (bool): only load the vocabulary, no weights + use_mmap (bool): use mmap if possible + use_mlock (bool): force system to keep model in RAM + check_tensors (bool): validate model tensor data""" + + if TYPE_CHECKING: + devices: CtypesArray[ctypes.c_void_p] # NOTE: unused + tensor_buft_overrides: CtypesArray[llama_model_tensor_buft_override] # NOTE: unused + n_gpu_layers: int + split_mode: int + main_gpu: int + tensor_split: CtypesArray[ctypes.c_float] + progress_callback: Callable[[float, ctypes.c_void_p], bool] + progress_callback_user_data: ctypes.c_void_p + kv_overrides: CtypesArray[llama_model_kv_override] + vocab_only: bool + use_mmap: bool + use_mlock: bool + check_tensors: bool -class llama_context_params(Structure): _fields_ = [ - ("n_ctx", c_int), # text context - ("n_parts", c_int), # -1 for default - ("seed", c_int), # RNG seed, 0 for random - ("f16_kv", c_bool), # use fp16 for KV cache - ( - "logits_all", - c_bool, - ), # the llama_eval() call computes all logits, not just the last one - ("vocab_only", c_bool), # only load the vocabulary, no weights - ("use_mlock", c_bool), # force system to keep model in RAM - ("embedding", c_bool), # embedding mode only - # called with a progress value between 0 and 1, pass NULL to disable + ("devices", ctypes.c_void_p), # NOTE: unnused + ("tensor_buft_overrides", ctypes.c_void_p), # NOTE: unused + ("n_gpu_layers", ctypes.c_int32), + ("split_mode", ctypes.c_int), + ("main_gpu", ctypes.c_int32), + ("tensor_split", ctypes.POINTER(ctypes.c_float)), ("progress_callback", llama_progress_callback), - # context pointer passed to the progress callback - ("progress_callback_user_data", c_void_p), + ("progress_callback_user_data", ctypes.c_void_p), + ("kv_overrides", ctypes.POINTER(llama_model_kv_override)), + ("vocab_only", ctypes.c_bool), + ("use_mmap", ctypes.c_bool), + ("use_mlock", ctypes.c_bool), + ("check_tensors", ctypes.c_bool), + ] + + +# // NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations +# // https://github.com/ggerganov/llama.cpp/pull/7544 +# struct llama_context_params { +# uint32_t n_ctx; // text context, 0 = from model +# uint32_t n_batch; // logical maximum batch size that can be submitted to llama_decode +# uint32_t n_ubatch; // physical maximum batch size +# uint32_t n_seq_max; // max number of sequences (i.e. distinct states for recurrent models) +# int32_t n_threads; // number of threads to use for generation +# int32_t n_threads_batch; // number of threads to use for batch processing + +# enum llama_rope_scaling_type rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type` +# enum llama_pooling_type pooling_type; // whether to pool (sum) embedding results by sequence id +# enum llama_attention_type attention_type; // attention type to use for embeddings + +# // ref: https://github.com/ggerganov/llama.cpp/pull/2054 +# float rope_freq_base; // RoPE base frequency, 0 = from model +# float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model +# float yarn_ext_factor; // YaRN extrapolation mix factor, negative = from model +# float yarn_attn_factor; // YaRN magnitude scaling factor +# float yarn_beta_fast; // YaRN low correction dim +# float yarn_beta_slow; // YaRN high correction dim +# uint32_t yarn_orig_ctx; // YaRN original context size +# float defrag_thold; // defragment the KV cache if holes/size > thold, < 0 disabled (default) + +# ggml_backend_sched_eval_callback cb_eval; +# void * cb_eval_user_data; + +# enum ggml_type type_k; // data type for K cache [EXPERIMENTAL] +# enum ggml_type type_v; // data type for V cache [EXPERIMENTAL] + +# // Keep the booleans together to avoid misalignment during copy-by-value. +# bool logits_all; // the llama_decode() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead) +# bool embeddings; // if true, extract embeddings (together with logits) +# bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU +# bool flash_attn; // whether to use flash attention [EXPERIMENTAL] +# bool no_perf; // whether to measure performance timings + + +# // Abort callback +# // if it returns true, execution of llama_decode() will be aborted +# // currently works only with CPU execution +# ggml_abort_callback abort_callback; +# void * abort_callback_data; +# }; +class llama_context_params(ctypes.Structure): + """Parameters for llama_context + + Attributes: + n_ctx (int): text context, 0 = from model + n_batch (int): logical maximum batch size that can be submitted to llama_decode + n_ubatch (int): physical maximum batch size + n_seq_max (int): max number of sequences (i.e. distinct states for recurrent models) + n_threads (int): number of threads to use for generation + n_threads_batch (int): number of threads to use for batch processing + rope_scaling_type (int): RoPE scaling type, from `enum llama_rope_scaling_type` + pooling_type (int): whether to pool (sum) embedding results by sequence id (ignored if no pooling layer) + attention_type (int): attention type to use for embeddings + rope_freq_base (float): RoPE base frequency, 0 = from model + rope_freq_scale (float): RoPE frequency scaling factor, 0 = from model + yarn_ext_factor (float): YaRN extrapolation mix factor, negative = from model + yarn_attn_factor (float): YaRN magnitude scaling factor + yarn_beta_fast (float): YaRN low correction dim + yarn_beta_slow (float): YaRN high correction dim + yarn_orig_ctx (int): YaRN original context size + defrag_thold (float): defragment the KV cache if holes/size > thold, < 0 disabled (default) + cb_eval (ggml_backend_sched_eval_callback): callback for scheduling eval + cb_eval_user_data (ctypes.ctypes.c_void_p): user data for cb_eval + type_k (int): data type for K cache + type_v (int): data type for V cache + logits_all (bool): the llama_decode() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead) + embeddings (bool): if true, extract embeddings (together with logits) + offload_kqv (bool): whether to offload the KQV ops (including the KV cache) to GPU + flash_attn (bool): whether to use flash attention + no_perf (bool): whether to measure performance timings + abort_callback (ggml_abort_callback): abort callback if it returns true, execution of llama_decode() will be aborted + abort_callback_data (ctypes.ctypes.c_void_p): data for abort_callback + """ + + if TYPE_CHECKING: + n_ctx: int + n_batch: int + n_ubatch: int + n_seq_max: int + n_threads: int + n_threads_batch: int + rope_scaling_type: int + pooling_type: int + attention_type: int + rope_freq_base: float + rope_freq_scale: float + yarn_ext_factor: float + yarn_attn_factor: float + yarn_beta_fast: float + yarn_beta_slow: float + yarn_orig_ctx: int + defrag_thold: float + cb_eval: Callable[[ctypes.c_void_p, bool], bool] + cb_eval_user_data: ctypes.c_void_p + type_k: int + type_v: int + logits_all: bool + embeddings: bool + offload_kqv: bool + flash_attn: bool + no_perf: bool + abort_callback: Callable[[ctypes.c_void_p], bool] + abort_callback_data: ctypes.c_void_p + + _fields_ = [ + ("n_ctx", ctypes.c_uint32), + ("n_batch", ctypes.c_uint32), + ("n_ubatch", ctypes.c_uint32), + ("n_seq_max", ctypes.c_uint32), + ("n_threads", ctypes.c_int32), + ("n_threads_batch", ctypes.c_int32), + ("rope_scaling_type", ctypes.c_int), + ("pooling_type", ctypes.c_int), + ("attention_type", ctypes.c_int), + ("rope_freq_base", ctypes.c_float), + ("rope_freq_scale", ctypes.c_float), + ("yarn_ext_factor", ctypes.c_float), + ("yarn_attn_factor", ctypes.c_float), + ("yarn_beta_fast", ctypes.c_float), + ("yarn_beta_slow", ctypes.c_float), + ("yarn_orig_ctx", ctypes.c_uint32), + ("defrag_thold", ctypes.c_float), + ("cb_eval", ggml_backend_sched_eval_callback), + ("cb_eval_user_data", ctypes.c_void_p), + ("type_k", ctypes.c_int), + ("type_v", ctypes.c_int), + ("logits_all", ctypes.c_bool), + ("embeddings", ctypes.c_bool), + ("offload_kqv", ctypes.c_bool), + ("flash_attn", ctypes.c_bool), + ("no_perf", ctypes.c_bool), + ("abort_callback", ggml_abort_callback), + ("abort_callback_data", ctypes.c_void_p), + ] + + +# // Signature for logging events +# // Note that text includes the new line character at the end for most events. +# // If your logging mechanism cannot handle that, check if the last character is '\n' and strip it +# // if it exists. +# // It might not exist for progress report where '.' is output repeatedly. +# typedef void (*llama_log_callback)(enum llama_log_level level, const char * text, void * user_data); +llama_log_callback = ctypes.CFUNCTYPE( + None, ctypes.c_int, ctypes.c_char_p, ctypes.c_void_p +) +"""Signature for logging events +Note that text includes the new line character at the end for most events. +If your logging mechanism cannot handle that, check if the last character is '\n' and strip it +if it exists. +It might not exist for progress report where '.' is output repeatedly.""" + + +# // model quantization parameters +# typedef struct llama_model_quantize_params { +# int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency() +# enum llama_ftype ftype; // quantize to this llama_ftype +# enum ggml_type output_tensor_type; // output tensor type +# enum ggml_type token_embedding_type; // token embeddings tensor type +# bool allow_requantize; // allow quantizing non-f32/f16 tensors +# bool quantize_output_tensor; // quantize output.weight +# bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored +# bool pure; // quantize all tensors to the default type +# bool keep_split; // quantize to the same number of shards +# void * imatrix; // pointer to importance matrix data +# void * kv_overrides; // pointer to vector containing overrides +# void * tensor_types; // pointer to vector containing tensor types +# } llama_model_quantize_params; +class llama_model_quantize_params(ctypes.Structure): + """Parameters for llama_model_quantize + + Attributes: + nthread (int): number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency() + ftype (int): quantize to this llama_ftype + output_tensor_type (int): output tensor type + token_embedding_type (int): token embeddings tensor type + allow_requantize (bool): allow quantizing non-f32/f16 tensors + quantize_output_tensor (bool): quantize output.weight + only_copy (bool): only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored + pure (bool): quantize all tensors to the default type + keep_split (bool): quantize to the same number of shards + imatrix (ctypes.c_void_p): pointer to importance matrix data + kv_overrides (ctypes.c_void_p): pointer to vector containing overrides + tensor_types (ctypes.c_void_p): pointer to vector containing tensor types + """ + + if TYPE_CHECKING: + nthread: int + ftype: int + output_tensor_type: int + token_embedding_type: int + allow_requantize: bool + quantize_output_tensor: bool + only_copy: bool + pure: bool + keep_split: bool + imatrix: ctypes.c_void_p + kv_overrides: ctypes.c_void_p + tensor_types: ctypes.c_void_p + + _fields_ = [ + ("nthread", ctypes.c_int32), + ("ftype", ctypes.c_int), + ("output_tensor_type", ctypes.c_int), + ("token_embedding_type", ctypes.c_int), + ("allow_requantize", ctypes.c_bool), + ("quantize_output_tensor", ctypes.c_bool), + ("only_copy", ctypes.c_bool), + ("pure", ctypes.c_bool), + ("keep_split", ctypes.c_bool), + ("imatrix", ctypes.c_void_p), + ("kv_overrides", ctypes.c_void_p), + ("tensor_types", ctypes.c_void_p), + ] + + +# typedef struct llama_logit_bias { +# llama_token token; +# float bias; +# } llama_logit_bias; +class llama_logit_bias(ctypes.Structure): + """Used to store logit bias + + Attributes: + token (llama_token): token id + bias (float): bias""" + + if TYPE_CHECKING: + token: llama_token + bias: float + + _fields_ = [ + ("token", llama_token), + ("bias", ctypes.c_float), + ] + + +llama_logit_bias_p = ctypes.POINTER(llama_logit_bias) + + +# typedef struct llama_sampler_chain_params { +# bool no_perf; // whether to measure performance timings +# } llama_sampler_chain_params; +class llama_sampler_chain_params(ctypes.Structure): + """Parameters for llama_sampler_chain + + Attributes: + no_perf (bool): whether to measure performance timings""" + + if TYPE_CHECKING: + no_perf: bool + + _fields_ = [ + ("no_perf", ctypes.c_bool), + ] + + +# // used in chat template +# typedef struct llama_chat_message { +# const char * role; +# const char * content; +# } llama_chat_message; +class llama_chat_message(ctypes.Structure): + _fields_ = [ + ("role", ctypes.c_char_p), + ("content", ctypes.c_char_p), ] -llama_context_params_p = POINTER(llama_context_params) +# // lora adapter +# struct llama_adapter_lora; +llama_adapter_lora_p = ctypes.c_void_p +llama_adapter_lora_p_ctypes = ctypes.POINTER(ctypes.c_void_p) -# Functions +# // Helpers for getting default parameters +# LLAMA_API struct llama_model_params llama_model_default_params(void); +@ctypes_function( + "llama_model_default_params", + [], + llama_model_params, +) +def llama_model_default_params() -> llama_model_params: + """Get default parameters for llama_model""" + ... +# LLAMA_API struct llama_context_params llama_context_default_params(void); +@ctypes_function( + "llama_context_default_params", + [], + llama_context_params, +) def llama_context_default_params() -> llama_context_params: - return _lib.llama_context_default_params() + """Get default parameters for llama_context""" + ... -_lib.llama_context_default_params.argtypes = [] -_lib.llama_context_default_params.restype = llama_context_params +# LLAMA_API struct llama_sampler_chain_params llama_sampler_chain_default_params(void); +@ctypes_function( + "llama_sampler_chain_default_params", + [], + llama_sampler_chain_params, +) +def llama_sampler_chain_default_params() -> llama_sampler_chain_params: + """Get default parameters for llama_sampler_chain""" + ... -# Various functions for loading a ggml llama model. -# Allocate (almost) all memory needed for the model. -# Return NULL on failure -def llama_init_from_file( - path_model: bytes, params: llama_context_params -) -> llama_context_p: - return _lib.llama_init_from_file(path_model, params) +# LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void); +@ctypes_function( + "llama_model_quantize_default_params", + [], + llama_model_quantize_params, +) +def llama_model_quantize_default_params() -> llama_model_quantize_params: + """Get default parameters for llama_model_quantize""" + ... -_lib.llama_init_from_file.argtypes = [c_char_p, llama_context_params] -_lib.llama_init_from_file.restype = llama_context_p +# // Initialize the llama + ggml backend +# // If numa is true, use NUMA optimizations +# // Call once at the start of the program +# LLAMA_API void llama_backend_init(bool numa); +# LLAMA_API void llama_backend_init(void); +@ctypes_function( + "llama_backend_init", + [], + None, +) +def llama_backend_init(): + """Initialize the llama + ggml backend + If numa is true, use NUMA optimizations + Call once at the start of the program""" + ... -# Frees all allocated memory -def llama_free(ctx: llama_context_p): - _lib.llama_free(ctx) +# // numa strategies +# enum ggml_numa_strategy { +# GGML_NUMA_STRATEGY_DISABLED = 0, +# GGML_NUMA_STRATEGY_DISTRIBUTE = 1, +# GGML_NUMA_STRATEGY_ISOLATE = 2, +# GGML_NUMA_STRATEGY_NUMACTL = 3, +# GGML_NUMA_STRATEGY_MIRROR = 4, +# GGML_NUMA_STRATEGY_COUNT +# }; +GGML_NUMA_STRATEGY_DISABLED = 0 +GGML_NUMA_STRATEGY_DISTRIBUTE = 1 +GGML_NUMA_STRATEGY_ISOLATE = 2 +GGML_NUMA_STRATEGY_NUMACTL = 3 +GGML_NUMA_STRATEGY_MIRROR = 4 +GGML_NUMA_STRATEGY_COUNT = 5 -_lib.llama_free.argtypes = [llama_context_p] -_lib.llama_free.restype = None +# // Call once at the end of the program - currently only used for MPI +# LLAMA_API void llama_backend_free(void); +@ctypes_function( + "llama_backend_free", + [], + None, +) +def llama_backend_free(): + """Call once at the end of the program - currently only used for MPI""" + ... -# TODO: not great API - very likely to change -# Returns 0 on success -def llama_model_quantize( - fname_inp: bytes, fname_out: bytes, itype: c_int -) -> c_int: - return _lib.llama_model_quantize(fname_inp, fname_out, itype) +# //optional: +# LLAMA_API void llama_numa_init(enum ggml_numa_strategy numa); +@ctypes_function( + "llama_numa_init", + [ctypes.c_int], + None, +) +def llama_numa_init(numa: int, /): + ... -_lib.llama_model_quantize.argtypes = [c_char_p, c_char_p, c_int] -_lib.llama_model_quantize.restype = c_int +# // Optional: an auto threadpool gets created in ggml if not passed explicitly +# LLAMA_API void llama_attach_threadpool( +# struct llama_context * ctx, +# ggml_threadpool_t threadpool, +# ggml_threadpool_t threadpool_batch); +# TODO: Add llama_attach_threadpool -# Returns the KV cache that will contain the context for the -# ongoing prediction with the model. -def llama_get_kv_cache(ctx: llama_context_p): - return _lib.llama_get_kv_cache(ctx) -_lib.llama_get_kv_cache.argtypes = [llama_context_p] -_lib.llama_get_kv_cache.restype = POINTER(c_uint8) +# LLAMA_API void llama_detach_threadpool(struct llama_context * ctx); +# TODO: Add llama_detach_threadpool -# Returns the size of the KV cache -def llama_get_kv_cache_size(ctx: llama_context_p) -> c_size_t: - return _lib.llama_get_kv_cache_size(ctx) -_lib.llama_get_kv_cache_size.argtypes = [llama_context_p] -_lib.llama_get_kv_cache_size.restype = c_size_t +# DEPRECATED(LLAMA_API struct llama_model * llama_load_model_from_file( +# const char * path_model, +# struct llama_model_params params), +# "use llama_model_load_from_file instead"); +@ctypes_function( + "llama_load_model_from_file", + [ctypes.c_char_p, llama_model_params], + llama_model_p_ctypes, +) +def llama_load_model_from_file( + path_model: bytes, params: llama_model_params, / +) -> Optional[llama_model_p]: + ... -# Returns the number of tokens in the KV cache -def llama_get_kv_cache_token_count(ctx: llama_context_p) -> c_int: - return _lib.llama_get_kv_cache_token_count(ctx) -_lib.llama_get_kv_cache_token_count.argtypes = [llama_context_p] -_lib.llama_get_kv_cache_token_count.restype = c_int +# // Load the model from a file +# // If the file is split into multiple parts, the file name must follow this pattern: -%05d-of-%05d.gguf +# // If the split file name does not follow this pattern, use llama_model_load_from_splits +# LLAMA_API struct llama_model * llama_model_load_from_file( +# const char * path_model, +# struct llama_model_params params); +@ctypes_function( + "llama_model_load_from_file", + [ctypes.c_char_p, llama_model_params], + llama_model_p_ctypes, +) +def llama_model_load_from_file( + path_model: bytes, params: llama_model_params, / +) -> Optional[llama_model_p]: + """Load the model from a file + If the file is split into multiple parts, the file name must follow this pattern: -%05d-of-%05d.gguf -# Sets the KV cache containing the current context for the model -def llama_set_kv_cache(ctx: llama_context_p, kv_cache, n_size: c_size_t, n_token_count: c_int): - return _lib.llama_set_kv_cache(ctx, kv_cache, n_size, n_token_count) + If the split file name does not follow this pattern, use llama_model_load_from_splits""" + ... -_lib.llama_set_kv_cache.argtypes = [llama_context_p, POINTER(c_uint8), c_size_t, c_int] -_lib.llama_set_kv_cache.restype = None +# // Load the model from multiple splits (support custom naming scheme) +# // The paths must be in the correct order +# LLAMA_API struct llama_model * llama_model_load_from_splits( +# const char ** paths, +# size_t n_paths, +# struct llama_model_params params); +@ctypes_function( + "llama_model_load_from_splits", + [ctypes.POINTER(ctypes.c_char_p), ctypes.c_size_t, llama_model_params], + llama_model_p_ctypes, +) +def llama_model_load_from_splits( + paths: List[bytes], n_paths: int, params: llama_model_params, / +) -> Optional[llama_model_p]: + """Load the model from multiple splits (support custom naming scheme) -# Run the llama inference to obtain the logits and probabilities for the next token. -# tokens + n_tokens is the provided batch of new tokens to process -# n_past is the number of tokens to use from previous eval calls -# Returns 0 on success -def llama_eval( - ctx: llama_context_p, - tokens, # type: Array[llama_token] - n_tokens: c_int, - n_past: c_int, - n_threads: c_int, -) -> c_int: - return _lib.llama_eval(ctx, tokens, n_tokens, n_past, n_threads) + The paths must be in the correct order""" + ... -_lib.llama_eval.argtypes = [llama_context_p, llama_token_p, c_int, c_int, c_int] -_lib.llama_eval.restype = c_int +# LLAMA_API void llama_free_model(struct llama_model * model); +@ctypes_function( + "llama_free_model", + [llama_model_p_ctypes], + None, +) +def llama_free_model(model: llama_model_p, /): + ... -# Convert the provided text into tokens. -# The tokens pointer must be large enough to hold the resulting tokens. -# Returns the number of tokens on success, no more than n_max_tokens -# Returns a negative number on failure - the number of tokens that would have been returned -# TODO: not sure if correct -def llama_tokenize( +# LLAMA_API void llama_model_free(struct llama_model * model); +@ctypes_function( + "llama_model_free", + [llama_model_p_ctypes], + None, +) +def llama_model_free(model: llama_model_p, /): + ... + + +# LLAMA_API struct llama_context * llama_init_from_model( +# struct llama_model * model, +# struct llama_context_params params); +@ctypes_function( + "llama_init_from_model", + [llama_model_p_ctypes, llama_context_params], + llama_context_p_ctypes, +) +def llama_init_from_model( + model: llama_model_p, params: llama_context_params, / +) -> Optional[llama_context_p]: + ... + + +# DEPRECATED(LLAMA_API struct llama_context * llama_new_context_with_model( +# struct llama_model * model, +# struct llama_context_params params), +# "use llama_init_from_model instead"); +@ctypes_function( + "llama_new_context_with_model", + [llama_model_p_ctypes, llama_context_params], + llama_context_p_ctypes, +) +def llama_new_context_with_model( + model: llama_model_p, params: llama_context_params, / +) -> Optional[llama_context_p]: + ... + + +# // Frees all allocated memory +# LLAMA_API void llama_free(struct llama_context * ctx); +@ctypes_function( + "llama_free", + [llama_context_p_ctypes], + None, +) +def llama_free(ctx: llama_context_p, /): + """Frees all allocated memory""" + ... + + +# LLAMA_API int64_t llama_time_us(void); +@ctypes_function( + "llama_time_us", + [], + ctypes.c_int64, +) +def llama_time_us() -> int: + ... + + +# LLAMA_API size_t llama_max_devices(void); +@ctypes_function("llama_max_devices", [], ctypes.c_size_t) +def llama_max_devices() -> int: + ... + + +# LLAMA_API bool llama_supports_mmap (void); +@ctypes_function("llama_supports_mmap", [], ctypes.c_bool) +def llama_supports_mmap() -> bool: + ... + + +# LLAMA_API bool llama_supports_mlock (void); +@ctypes_function("llama_supports_mlock", [], ctypes.c_bool) +def llama_supports_mlock() -> bool: + ... + + +# LLAMA_API bool llama_supports_gpu_offload(void); +@ctypes_function("llama_supports_gpu_offload", [], ctypes.c_bool) +def llama_supports_gpu_offload() -> bool: + ... + + +# LLAMA_API bool llama_supports_rpc (void); +@ctypes_function("llama_supports_rpc", [], ctypes.c_bool) +def llama_supports_rpc() -> bool: + ... + + +# LLAMA_API uint32_t llama_n_ctx (const struct llama_context * ctx); +@ctypes_function("llama_n_ctx", [llama_context_p_ctypes], ctypes.c_uint32) +def llama_n_ctx(ctx: llama_context_p, /) -> int: + ... + + +# LLAMA_API uint32_t llama_n_batch (const struct llama_context * ctx); +@ctypes_function("llama_n_batch", [llama_context_p_ctypes], ctypes.c_uint32) +def llama_n_batch(ctx: llama_context_p, /) -> int: + ... + + +# LLAMA_API uint32_t llama_n_ubatch (const struct llama_context * ctx); +@ctypes_function("llama_n_ubatch", [llama_context_p_ctypes], ctypes.c_uint32) +def llama_n_ubatch(ctx: llama_context_p, /) -> int: + ... + + +# LLAMA_API uint32_t llama_n_seq_max (const struct llama_context * ctx); +@ctypes_function("llama_n_seq_max", [llama_context_p_ctypes], ctypes.c_uint32) +def llama_n_seq_max(ctx: llama_context_p, /) -> int: + ... + + + + +# DEPRECATED(LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model), "use llama_model_n_ctx_train instead"); +@ctypes_function("llama_n_ctx_train", [llama_model_p_ctypes], ctypes.c_int32) +def llama_n_ctx_train(model: llama_model_p, /) -> int: + ... + + +# DEPRECATED(LLAMA_API int32_t llama_n_embd (const struct llama_model * model), "use llama_model_n_embd instead"); +@ctypes_function("llama_n_embd", [llama_model_p_ctypes], ctypes.c_int32) +def llama_n_embd(model: llama_model_p, /) -> int: + ... + + +# DEPRECATED(LLAMA_API int32_t llama_n_layer (const struct llama_model * model), "use llama_model_n_layer instead"); +@ctypes_function("llama_n_layer", [llama_model_p_ctypes], ctypes.c_int32) +def llama_n_layer(model: llama_model_p, /) -> int: + ... + + +# DEPRECATED(LLAMA_API int32_t llama_n_head (const struct llama_model * model), "use llama_model_n_head instead"); +@ctypes_function("llama_n_head", [llama_model_p_ctypes], ctypes.c_int32) +def llama_n_head(model: llama_model_p, /) -> int: + ... + + +# DEPRECATED(LLAMA_API int32_t llama_n_vocab (const struct llama_vocab * vocab), "use llama_vocab_n_tokens instead"); +@ctypes_function("llama_n_vocab", [llama_vocab_p_ctypes], ctypes.c_int32) +def llama_n_vocab(model: llama_vocab_p, /) -> int: + ... + + +# LLAMA_API const struct llama_model * llama_get_model (const struct llama_context * ctx); +@ctypes_function("llama_get_model", [llama_context_p_ctypes], llama_model_p_ctypes) +def llama_get_model(ctx: llama_context_p, /) -> Optional[llama_model_p]: + ... + + +# LLAMA_API struct llama_kv_cache * llama_get_kv_self ( struct llama_context * ctx); +@ctypes_function( + "llama_get_kv_self", + [llama_context_p_ctypes], + llama_kv_cache_p_ctypes, +) +def llama_get_kv_self(ctx: llama_context_p, /) -> Optional[llama_kv_cache_p]: + """Get the KV cache for self-attention""" + ... + + +# LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx); +@ctypes_function("llama_pooling_type", [llama_context_p_ctypes], ctypes.c_int) +def llama_pooling_type(ctx: llama_context_p, /) -> int: + ... + + +# LLAMA_API const struct llama_vocab * llama_model_get_vocab(const struct llama_model * model); +@ctypes_function("llama_model_get_vocab", [llama_model_p_ctypes], llama_vocab_p_ctypes) +def llama_model_get_vocab(model: llama_model_p, /) -> Optional[llama_vocab_p]: + ... + + +# LLAMA_API enum llama_rope_type llama_model_rope_type(const struct llama_model * model); +@ctypes_function("llama_model_rope_type", [llama_model_p_ctypes], ctypes.c_int) +def llama_model_rope_type(model: llama_model_p, /) -> int: + ... + + +# LLAMA_API int32_t llama_model_n_ctx_train(const struct llama_model * model); +@ctypes_function("llama_model_n_ctx_train", [llama_model_p_ctypes], ctypes.c_int32) +def llama_model_n_ctx_train(model: llama_model_p, /) -> int: + ... + + +# LLAMA_API int32_t llama_model_n_embd (const struct llama_model * model); +@ctypes_function("llama_model_n_embd", [llama_model_p_ctypes], ctypes.c_int32) +def llama_model_n_embd(model: llama_model_p, /) -> int: + ... + + +# LLAMA_API int32_t llama_model_n_layer (const struct llama_model * model); +@ctypes_function("llama_model_n_layer", [llama_model_p_ctypes], ctypes.c_int32) +def llama_model_n_layer(model: llama_model_p, /) -> int: + ... + + +# LLAMA_API int32_t llama_model_n_head (const struct llama_model * model); +@ctypes_function("llama_model_n_head", [llama_model_p_ctypes], ctypes.c_int32) +def llama_model_n_head(model: llama_model_p, /) -> int: + ... + + +# LLAMA_API int32_t llama_model_n_head_kv (const struct llama_model * model); +@ctypes_function("llama_model_n_head_kv", [llama_model_p_ctypes], ctypes.c_int32) +def llama_model_n_head_kv(model: llama_model_p, /) -> int: + ... + + +# // Get the model's RoPE frequency scaling factor +# LLAMA_API float llama_model_rope_freq_scale_train(const struct llama_model * model); +@ctypes_function("llama_model_rope_freq_scale_train", [llama_model_p_ctypes], ctypes.c_float) +def llama_model_rope_freq_scale_train(model: llama_model_p, /) -> float: + ... + + +# LLAMA_API enum llama_vocab_type llama_vocab_type (const struct llama_model * model); +@ctypes_function("llama_vocab_type", [llama_model_p_ctypes], ctypes.c_int) +def llama_vocab_type(model: llama_model_p, /) -> int: + ... + + +# LLAMA_API int32_t llama_vocab_n_tokens(const struct llama_vocab * vocab); +@ctypes_function("llama_vocab_n_tokens", [llama_vocab_p_ctypes], ctypes.c_int32) +def llama_vocab_n_tokens(vocab: llama_vocab_p, /) -> int: + ... + + +# // Functions to access the model's GGUF metadata scalar values +# // - The functions return the length of the string on success, or -1 on failure +# // - The output string is always null-terminated and cleared on failure +# // - When retrieving a string, an extra byte must be allocated to account for the null terminator +# // - GGUF array values are not supported by these functions + + +# // Get metadata value as a string by key name +# LLAMA_API int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size); +@ctypes_function( + "llama_model_meta_val_str", + [ + llama_model_p_ctypes, + ctypes.c_char_p, + ctypes.c_char_p, + ctypes.c_size_t, + ], + ctypes.c_int32, +) +def llama_model_meta_val_str( + model: llama_model_p, + key: Union[ctypes.c_char_p, bytes], + buf: bytes, + buf_size: int, + /, +) -> int: + """Get metadata value as a string by key name""" + ... + + +# // Get the number of metadata key/value pairs +# LLAMA_API int32_t llama_model_meta_count(const struct llama_model * model); +@ctypes_function("llama_model_meta_count", [llama_model_p_ctypes], ctypes.c_int32) +def llama_model_meta_count(model: llama_model_p, /) -> int: + """Get the number of metadata key/value pairs""" + ... + + +# // Get metadata key name by index +# LLAMA_API int32_t llama_model_meta_key_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size); +@ctypes_function( + "llama_model_meta_key_by_index", + [ + llama_model_p_ctypes, + ctypes.c_int32, + ctypes.c_char_p, + ctypes.c_size_t, + ], + ctypes.c_int32, +) +def llama_model_meta_key_by_index( + model: llama_model_p, + i: Union[ctypes.c_int, int], + buf: Union[bytes, CtypesArray[ctypes.c_char]], + buf_size: int, + /, +) -> int: + """Get metadata key name by index""" + ... + + +# // Get metadata value as a string by index +# LLAMA_API int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size); +@ctypes_function( + "llama_model_meta_val_str_by_index", + [ + llama_model_p_ctypes, + ctypes.c_int32, + ctypes.c_char_p, + ctypes.c_size_t, + ], + ctypes.c_int32, +) +def llama_model_meta_val_str_by_index( + model: llama_model_p, + i: Union[ctypes.c_int, int], + buf: Union[bytes, CtypesArray[ctypes.c_char]], + buf_size: int, + /, +) -> int: + """Get metadata value as a string by index""" + ... + + +# // Get a string describing the model type +# LLAMA_API int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size); +@ctypes_function( + "llama_model_desc", + [llama_model_p_ctypes, ctypes.c_char_p, ctypes.c_size_t], + ctypes.c_int32, +) +def llama_model_desc( + model: llama_model_p, + buf: Union[bytes, CtypesArray[ctypes.c_char]], + buf_size: Union[ctypes.c_size_t, int], + /, +) -> int: + """Get a string describing the model type""" + ... + + +# // Returns the total size of all the tensors in the model in bytes +# LLAMA_API uint64_t llama_model_size(const struct llama_model * model); +@ctypes_function("llama_model_size", [llama_model_p_ctypes], ctypes.c_uint64) +def llama_model_size(model: llama_model_p, /) -> int: + """Returns the total size of all the tensors in the model in bytes""" + ... + + +# // Get the default chat template. Returns nullptr if not available +# // If name is NULL, returns the default chat template +# LLAMA_API const char * llama_model_chat_template(const struct llama_model * model, const char * name); +@ctypes_function("llama_model_chat_template", [llama_model_p_ctypes, ctypes.c_char_p], ctypes.c_char_p) +def llama_model_chat_template(model: llama_model_p, name: Optional[bytes], /) -> Optional[bytes]: + """Get the default chat template. Returns None if not available + If name is None, returns the default chat template""" + ... + + +# // Returns the total number of parameters in the model +# LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model); +@ctypes_function("llama_model_n_params", [llama_model_p_ctypes], ctypes.c_uint64) +def llama_model_n_params(model: llama_model_p, /) -> int: + """Returns the total number of parameters in the model""" + ... + + +# // Returns true if the model contains an encoder that requires llama_encode() call +# LLAMA_API bool llama_model_has_encoder(const struct llama_model * model); +@ctypes_function("llama_model_has_encoder", [llama_model_p_ctypes], ctypes.c_bool) +def llama_model_has_encoder(model: llama_model_p, /) -> bool: + """Returns true if the model contains an encoder that requires llama_encode() call""" + ... + + +# // Returns true if the model contains a decoder that requires llama_decode() call +# LLAMA_API bool llama_model_has_decoder(const struct llama_model * model); +@ctypes_function("llama_model_has_decoder", [llama_model_p_ctypes], ctypes.c_bool) +def llama_model_has_decoder(model: llama_model_p, /) -> bool: + """Returns true if the model contains a decoder that requires llama_decode() call""" + ... + + +# // For encoder-decoder models, this function returns id of the token that must be provided +# // to the decoder to start generating output sequence. For other models, it returns -1. +# LLAMA_API llama_token llama_model_decoder_start_token(const struct llama_model * model); +@ctypes_function( + "llama_model_decoder_start_token", [llama_model_p_ctypes], ctypes.c_int32 +) +def llama_model_decoder_start_token(model: llama_model_p, /) -> int: + """For encoder-decoder models, this function returns id of the token that must be provided + to the decoder to start generating output sequence. For other models, it returns -1. + """ + ... + + +# // Returns true if the model is recurrent (like Mamba, RWKV, etc.) +# LLAMA_API bool llama_model_is_recurrent(const struct llama_model * model); +@ctypes_function("llama_model_is_recurrent", [llama_model_p_ctypes], ctypes.c_bool) +def llama_model_is_recurrent(model: llama_model_p, /) -> bool: + """Returns true if the model is recurrent (like Mamba, RWKV, etc.)""" + ... + + +# // Returns 0 on success +# LLAMA_API uint32_t llama_model_quantize( +# const char * fname_inp, +# const char * fname_out, +# const llama_model_quantize_params * params); +@ctypes_function( + "llama_model_quantize", + [ + ctypes.c_char_p, + ctypes.c_char_p, + ctypes.POINTER(llama_model_quantize_params), + ], + ctypes.c_uint32, +) +def llama_model_quantize( + fname_inp: bytes, + fname_out: bytes, + params: CtypesPointerOrRef[llama_model_quantize_params], + /, +) -> int: + """Returns 0 on success""" + ... + + +# // Load a LoRA adapter from file +# LLAMA_API struct llama_adapter_lora * llama_adapter_lora_init( +# struct llama_model * model, +# const char * path_lora); +@ctypes_function( + "llama_adapter_lora_init", + [llama_model_p_ctypes, ctypes.c_char_p], + llama_adapter_lora_p_ctypes, +) +def llama_adapter_lora_init( + model: llama_model_p, path_lora: bytes, / +) -> Optional[llama_adapter_lora_p]: + ... + + +# // Manually free a LoRA adapter +# // Note: loaded adapters will be free when the associated model is deleted +# LLAMA_API void llama_adapter_lora_free(struct llama_adapter_lora * adapter); +@ctypes_function( + "llama_adapter_lora_free", + [llama_adapter_lora_p_ctypes], + None, +) +def llama_adapter_lora_free(adapter: llama_adapter_lora_p, /): + ... + + +# // The following functions operate on a llama_context, hence the naming: llama_verb_... + + +# // Add a loaded LoRA adapter to given context +# // This will not modify model's weight +# LLAMA_API int32_t llama_set_adapter_lora( +# struct llama_context * ctx, +# struct llama_adapter_lora * adapter, +# float scale); +@ctypes_function( + "llama_set_adapter_lora", + [llama_context_p_ctypes, llama_adapter_lora_p_ctypes, ctypes.c_float], + ctypes.c_int32, +) +def llama_set_adapter_lora( + ctx: llama_context_p, adapter: llama_adapter_lora_p, scale: float, / +) -> int: + """Add a loaded LoRA adapter to given context + This will not modify model's weight""" + ... + + +# // Remove a specific LoRA adapter from given context +# // Return -1 if the adapter is not present in the context +# LLAMA_API int32_t llama_rm_adapter_lora( +# struct llama_context * ctx, +# struct llama_adapter_lora * adapter); +@ctypes_function( + "llama_rm_adapter_lora", + [llama_context_p_ctypes, llama_adapter_lora_p_ctypes], + ctypes.c_int32, +) +def llama_rm_adapter_lora( + ctx: llama_context_p, adapter: llama_adapter_lora_p, / +) -> int: + """Remove a specific LoRA adapter from given context + Return -1 if the adapter is not present in the context""" + ... + + +# // Remove all LoRA adapters from given context +# LLAMA_API void llama_clear_adapter_lora(struct llama_context * ctx); +@ctypes_function( + "llama_clear_adapter_lora", + [llama_context_p_ctypes], + None, +) +def llama_clear_adapter_lora(ctx: llama_context_p, /): + """Remove all LoRA adapters from given context""" + ... + + +# // Apply a loaded control vector to a llama_context, or if data is NULL, clear +# // the currently loaded vector. +# // n_embd should be the size of a single layer's control, and data should point +# // to an n_embd x n_layers buffer starting from layer 1. +# // il_start and il_end are the layer range the vector should apply to (both inclusive) +# // See llama_control_vector_load in common to load a control vector. +# LLAMA_API int32_t llama_apply_adapter_cvec( +# struct llama_context * ctx, +# const float * data, +# size_t len, +# int32_t n_embd, +# int32_t il_start, +# int32_t il_end); +@ctypes_function( + "llama_apply_adapter_cvec", + [ + llama_context_p_ctypes, + ctypes.POINTER(ctypes.c_float), + ctypes.c_size_t, + ctypes.c_int32, + ctypes.c_int32, + ctypes.c_int32, + ], + ctypes.c_int32, +) +def llama_apply_adapter_cvec( ctx: llama_context_p, - text: bytes, - tokens, # type: Array[llama_token] - n_max_tokens: c_int, - add_bos: c_bool, -) -> c_int: - return _lib.llama_tokenize(ctx, text, tokens, n_max_tokens, add_bos) + data: CtypesPointerOrRef[ctypes.c_float], + len: int, + n_embd: int, + il_start: int, + il_end: int, + /, +) -> int: + """Apply a loaded control vector to a llama_context, or if data is NULL, clear + the currently loaded vector. + n_embd should be the size of a single layer's control, and data should point + to an n_embd x n_layers buffer starting from layer 1. + il_start and il_end are the layer range the vector should apply to (both inclusive) + See llama_control_vector_load in common to load a control vector.""" + ... + +# // +# // KV cache +# // -_lib.llama_tokenize.argtypes = [llama_context_p, c_char_p, llama_token_p, c_int, c_bool] -_lib.llama_tokenize.restype = c_int +# // Information associated with an individual cell in the KV cache view. +# struct llama_kv_cache_view_cell { +# // The position for this cell. Takes KV cache shifts into account. +# // May be negative if the cell is not populated. +# llama_pos pos; +# }; +class llama_kv_cache_view_cell(ctypes.Structure): + """Information associated with an individual cell in the KV cache view. -def llama_n_vocab(ctx: llama_context_p) -> c_int: - return _lib.llama_n_vocab(ctx) + Attributes: + pos (llama_pos): The position for this cell. Takes KV cache shifts into account. + May be negative if the cell is not populated.""" + if TYPE_CHECKING: + pos: llama_pos -_lib.llama_n_vocab.argtypes = [llama_context_p] -_lib.llama_n_vocab.restype = c_int + _fields_ = [("pos", llama_pos)] -def llama_n_ctx(ctx: llama_context_p) -> c_int: - return _lib.llama_n_ctx(ctx) +# // An updateable view of the KV cache. +# struct llama_kv_cache_view { +# // Number of KV cache cells. This will be the same as the context size. +# int32_t n_cells; +# // Maximum number of sequences that can exist in a cell. It's not an error +# // if there are more sequences in a cell than this value, however they will +# // not be visible in the view cells_sequences. +# int32_t n_seq_max; -_lib.llama_n_ctx.argtypes = [llama_context_p] -_lib.llama_n_ctx.restype = c_int +# // Number of tokens in the cache. For example, if there are two populated +# // cells, the first with 1 sequence id in it and the second with 2 sequence +# // ids then you'll have 3 tokens. +# int32_t token_count; +# // Number of populated cache cells. +# int32_t used_cells; -def llama_n_embd(ctx: llama_context_p) -> c_int: - return _lib.llama_n_embd(ctx) +# // Maximum contiguous empty slots in the cache. +# int32_t max_contiguous; +# // Index to the start of the max_contiguous slot range. Can be negative +# // when cache is full. +# int32_t max_contiguous_idx; -_lib.llama_n_embd.argtypes = [llama_context_p] -_lib.llama_n_embd.restype = c_int +# // Information for an individual cell. +# struct llama_kv_cache_view_cell * cells; -# Token logits obtained from the last call to llama_eval() -# The logits for the last token are stored in the last row -# Can be mutated in order to change the probabilities of the next token -# Rows: n_tokens -# Cols: n_vocab -def llama_get_logits(ctx: llama_context_p): - return _lib.llama_get_logits(ctx) +# // The sequences for each cell. There will be n_seq_max items per cell. +# llama_seq_id * cells_sequences; +# }; +class llama_kv_cache_view(ctypes.Structure): + if TYPE_CHECKING: + n_cells: int + n_max_seq: int + token_count: int + used_cells: int + max_contiguous: int + max_contiguous_idx: int + cells: CtypesArray[llama_kv_cache_view_cell] + cells_sequences: CtypesArray[llama_seq_id] + + _fields_ = [ + ("n_cells", ctypes.c_int32), + ("n_max_seq", ctypes.c_int32), + ("token_count", ctypes.c_int32), + ("used_cells", ctypes.c_int32), + ("max_contiguous", ctypes.c_int32), + ("max_contiguous_idx", ctypes.c_int32), + ("cells", ctypes.POINTER(llama_kv_cache_view_cell)), + ("cells_sequences", ctypes.POINTER(llama_seq_id)), + ] -_lib.llama_get_logits.argtypes = [llama_context_p] -_lib.llama_get_logits.restype = POINTER(c_float) +llama_kv_cache_view_p = ctypes.POINTER(llama_kv_cache_view) -# Get the embeddings for the input -# shape: [n_embd] (1-dimensional) -def llama_get_embeddings(ctx: llama_context_p): - return _lib.llama_get_embeddings(ctx) +# // Create an empty KV cache view. (use only for debugging purposes) +# LLAMA_API struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_context * ctx, int32_t n_seq_max); +@ctypes_function( + "llama_kv_cache_view_init", + [llama_context_p_ctypes, ctypes.c_int32], + llama_kv_cache_view, +) +def llama_kv_cache_view_init( + ctx: llama_context_p, n_seq_max: Union[ctypes.c_int32, int], / +) -> llama_kv_cache_view: + """Create an empty KV cache view. (use only for debugging purposes)""" + ... -_lib.llama_get_embeddings.argtypes = [llama_context_p] -_lib.llama_get_embeddings.restype = POINTER(c_float) +# // Free a KV cache view. (use only for debugging purposes) +# LLAMA_API void llama_kv_cache_view_free(struct llama_kv_cache_view * view); +@ctypes_function("llama_kv_cache_view_free", [llama_kv_cache_view_p], None) +def llama_kv_cache_view_free(view: "ctypes.pointer[llama_kv_cache_view]", /): # type: ignore + """Free a KV cache view. (use only for debugging purposes)""" + ... -# Token Id -> String. Uses the vocabulary in the provided context -def llama_token_to_str(ctx: llama_context_p, token: llama_token) -> bytes: - return _lib.llama_token_to_str(ctx, token) +# // Update the KV cache view structure with the current state of the KV cache. (use only for debugging purposes) +# LLAMA_API void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view); +@ctypes_function( + "llama_kv_cache_view_update", [llama_context_p_ctypes, llama_kv_cache_view_p], None +) +def llama_kv_cache_view_update(ctx: llama_context_p, view: CtypesPointerOrRef[llama_kv_cache_view], /): # type: ignore + """Update the KV cache view structure with the current state of the KV cache. (use only for debugging purposes)""" + ... -_lib.llama_token_to_str.argtypes = [llama_context_p, llama_token] -_lib.llama_token_to_str.restype = c_char_p +# // Returns the number of tokens in the KV cache (slow, use only for debug) +# // If a KV cell has multiple sequences assigned to it, it will be counted multiple times +# LLAMA_API int32_t llama_kv_self_n_tokens(const struct llama_context * ctx); +@ctypes_function( + "llama_kv_self_n_tokens", [llama_context_p_ctypes], ctypes.c_int32 +) +def llama_kv_self_n_tokens(ctx: llama_context_p, /) -> int: + """Returns the number of tokens in the KV cache (slow, use only for debug) + If a KV cell has multiple sequences assigned to it, it will be counted multiple times + """ + ... -# Special tokens +# DEPRECATED(LLAMA_API int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx), +# "use llama_kv_self_n_tokens instead"); +@ctypes_function( + "llama_get_kv_cache_token_count", [llama_context_p_ctypes], ctypes.c_int32 +) +def llama_get_kv_cache_token_count(ctx: llama_context_p, /) -> int: + """Returns the number of tokens in the KV cache (slow, use only for debug) + If a KV cell has multiple sequences assigned to it, it will be counted multiple times + """ + ... -def llama_token_bos() -> llama_token: - return _lib.llama_token_bos() +# // Returns the number of used KV cells (i.e. have at least one sequence assigned to them) +# LLAMA_API int32_t llama_kv_self_used_cells(const struct llama_context * ctx); +@ctypes_function( + "llama_kv_self_used_cells", [llama_context_p_ctypes], ctypes.c_int32 +) +def llama_kv_self_used_cells(ctx: llama_context_p, /) -> int: + """Returns the number of used KV cells (i.e. have at least one sequence assigned to them)""" + ... -_lib.llama_token_bos.argtypes = [] -_lib.llama_token_bos.restype = llama_token +# DEPRECATED(LLAMA_API int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx), +# "use llama_kv_self_used_cells instead"); +@ctypes_function( + "llama_get_kv_cache_used_cells", [llama_context_p_ctypes], ctypes.c_int32 +) +def llama_get_kv_cache_used_cells(ctx: llama_context_p, /) -> int: + """Returns the number of used KV cells (i.e. have at least one sequence assigned to them)""" + ... -def llama_token_eos() -> llama_token: - return _lib.llama_token_eos() +# // Clear the KV cache - both cell info is erased and KV data is zeroed +# LLAMA_API void llama_kv_self_clear( +# struct llama_context * ctx); +@ctypes_function( + "llama_kv_self_clear", [llama_context_p_ctypes], None +) +def llama_kv_self_clear(ctx: llama_context_p, /): + """Clear the KV cache - both cell info is erased and KV data is zeroed""" + ... -_lib.llama_token_eos.argtypes = [] -_lib.llama_token_eos.restype = llama_token +# NOTE: Deprecated +@ctypes_function("llama_kv_self_clear", [llama_context_p_ctypes], None) +def llama_kv_cache_clear(ctx: llama_context_p, /): + """Clear the KV cache""" + ... -# TODO: improve the last_n_tokens interface ? -def llama_sample_top_p_top_k( +# // Removes all tokens that belong to the specified sequence and have positions in [p0, p1) +# // Returns false if a partial sequence cannot be removed. Removing a whole sequence never fails +# // seq_id < 0 : match any sequence +# // p0 < 0 : [0, p1] +# // p1 < 0 : [p0, inf) +# LLAMA_API bool llama_kv_cache_seq_rm( +# struct llama_context * ctx, +# llama_seq_id seq_id, +# llama_pos p0, +# llama_pos p1); +@ctypes_function( + "llama_kv_cache_seq_rm", + [ + llama_context_p_ctypes, + llama_seq_id, + llama_pos, + llama_pos, + ], + ctypes.c_bool, +) +def llama_kv_cache_seq_rm( ctx: llama_context_p, - last_n_tokens_data, # type: Array[llama_token] - last_n_tokens_size: c_int, - top_k: c_int, - top_p: c_float, - temp: c_float, - repeat_penalty: c_float, -) -> llama_token: - return _lib.llama_sample_top_p_top_k( - ctx, last_n_tokens_data, last_n_tokens_size, top_k, top_p, temp, repeat_penalty - ) + seq_id: Union[llama_seq_id, int], + p0: Union[llama_pos, int], + p1: Union[llama_pos, int], + /, +) -> bool: + """Removes all tokens that belong to the specified sequence and have positions in [p0, p1) + Returns false if a partial sequence cannot be removed. Removing a whole sequence never fails + + seq_id < 0 : match any sequence + p0 < 0 : [0, p1] + p1 < 0 : [p0, inf)""" + ... -_lib.llama_sample_top_p_top_k.argtypes = [ - llama_context_p, - llama_token_p, - c_int, - c_int, - c_float, - c_float, - c_float, -] -_lib.llama_sample_top_p_top_k.restype = llama_token +# // Copy all tokens that belong to the specified sequence to another sequence +# // Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence +# // p0 < 0 : [0, p1] +# // p1 < 0 : [p0, inf) +# LLAMA_API void llama_kv_self_seq_cp( +# struct llama_context * ctx, +# llama_seq_id seq_id_src, +# llama_seq_id seq_id_dst, +# llama_pos p0, +# llama_pos p1); +@ctypes_function( + "llama_kv_self_seq_cp", + [ + llama_context_p_ctypes, + llama_seq_id, + llama_seq_id, + llama_pos, + llama_pos, + ], + None, +) +def llama_kv_self_seq_cp( + ctx: llama_context_p, + seq_id_src: Union[llama_seq_id, int], + seq_id_dst: Union[llama_seq_id, int], + p0: Union[llama_pos, int], + p1: Union[llama_pos, int], + /, +): + """Copy all tokens that belong to the specified sequence to another sequence + Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence + p0 < 0 : [0, p1] + p1 < 0 : [p0, inf)""" + ... -# Performance information +# NOTE: Deprecated +@ctypes_function( + "llama_kv_self_seq_cp", + [ + llama_context_p_ctypes, + llama_seq_id, + llama_seq_id, + llama_pos, + llama_pos, + ], + None, +) +def llama_kv_cache_seq_cp( + ctx: llama_context_p, + seq_id_src: Union[llama_seq_id, int], + seq_id_dst: Union[llama_seq_id, int], + p0: Union[llama_pos, int], + p1: Union[llama_pos, int], + /, +): + """Copy all tokens that belong to the specified sequence to another sequence + Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence + p0 < 0 : [0, p1] + p1 < 0 : [p0, inf)""" + ... -def llama_print_timings(ctx: llama_context_p): - _lib.llama_print_timings(ctx) +# // Removes all tokens that do not belong to the specified sequence +# LLAMA_API void llama_kv_self_seq_keep( +# struct llama_context * ctx, +# llama_seq_id seq_id); +@ctypes_function( + "llama_kv_self_seq_keep", [llama_context_p_ctypes, llama_seq_id], None +) +def llama_kv_self_seq_keep(ctx: llama_context_p, seq_id: Union[llama_seq_id, int], /): + """Removes all tokens that do not belong to the specified sequence""" + ... -_lib.llama_print_timings.argtypes = [llama_context_p] -_lib.llama_print_timings.restype = None +# NOTE: Deprecated +@ctypes_function( + "llama_kv_self_seq_keep", [llama_context_p_ctypes, llama_seq_id], None +) +def llama_kv_cache_seq_keep(ctx: llama_context_p, seq_id: Union[llama_seq_id, int], /): + """Removes all tokens that do not belong to the specified sequence""" + ... -def llama_reset_timings(ctx: llama_context_p): - _lib.llama_reset_timings(ctx) -_lib.llama_reset_timings.argtypes = [llama_context_p] -_lib.llama_reset_timings.restype = None +# // Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1) +# // If the KV cache is RoPEd, the KV data is updated accordingly: +# // - lazily on next llama_decode() +# // - explicitly with llama_kv_cache_update() +# // p0 < 0 : [0, p1] +# // p1 < 0 : [p0, inf) +# LLAMA_API void llama_kv_cache_seq_add( +# struct llama_context * ctx, +# llama_seq_id seq_id, +# llama_pos p0, +# llama_pos p1, +# llama_pos delta); +@ctypes_function( + "llama_kv_self_seq_add", + [ + llama_context_p_ctypes, + llama_seq_id, + llama_pos, + llama_pos, + llama_pos, + ], + None, +) +def llama_kv_self_seq_add( + ctx: llama_context_p, + seq_id: Union[llama_seq_id, int], + p0: Union[llama_pos, int], + p1: Union[llama_pos, int], + delta: Union[llama_pos, int], + /, +): + """Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1) + If the KV cache is RoPEd, the KV data is updated accordingly: + - lazily on next llama_decode() + - explicitly with llama_kv_cache_update() + p0 < 0 : [0, p1] + p1 < 0 : [p0, inf)""" + ... -# Print system information -def llama_print_system_info() -> bytes: - return _lib.llama_print_system_info() +# // NOTE: Deprecated +# // Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1) +# // If the KV cache is RoPEd, the KV data is updated accordingly: +# // - lazily on next llama_decode() +# // - explicitly with llama_kv_cache_update() +# // p0 < 0 : [0, p1] +# // p1 < 0 : [p0, inf) +# LLAMA_API void llama_kv_cache_seq_add( +# struct llama_context * ctx, +# llama_seq_id seq_id, +# llama_pos p0, +# llama_pos p1, +# llama_pos delta); +@ctypes_function( + "llama_kv_self_seq_add", + [ + llama_context_p_ctypes, + llama_seq_id, + llama_pos, + llama_pos, + llama_pos, + ], + None, +) +def llama_kv_cache_seq_add( + ctx: llama_context_p, + seq_id: Union[llama_seq_id, int], + p0: Union[llama_pos, int], + p1: Union[llama_pos, int], + delta: Union[llama_pos, int], + /, +): + """Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1) + If the KV cache is RoPEd, the KV data is updated accordingly: + - lazily on next llama_decode() + - explicitly with llama_kv_cache_update() + p0 < 0 : [0, p1] + p1 < 0 : [p0, inf)""" + ... + + +# // Integer division of the positions by factor of `d > 1` +# // If the KV cache is RoPEd, the KV data is updated accordingly +# // p0 < 0 : [0, p1] +# // p1 < 0 : [p0, inf) +# LLAMA_API void llama_kv_cache_seq_div( +# struct llama_context * ctx, +# llama_seq_id seq_id, +# llama_pos p0, +# llama_pos p1, +# int d); +@ctypes_function( + "llama_kv_self_seq_div", + [ + llama_context_p_ctypes, + llama_seq_id, + llama_pos, + llama_pos, + ctypes.c_int, + ], + None, +) +def llama_kv_self_seq_div( + ctx: llama_context_p, + seq_id: Union[llama_seq_id, int], + p0: Union[llama_pos, int], + p1: Union[llama_pos, int], + d: Union[ctypes.c_int, int], + /, +): + """Integer division of the positions by factor of `d > 1` + If the KV cache is RoPEd, the KV data is updated accordingly + p0 < 0 : [0, p1] + p1 < 0 : [p0, inf)""" + ... + + +# // NOTE: Deprecated +# // Integer division of the positions by factor of `d > 1` +# // If the KV cache is RoPEd, the KV data is updated accordingly +# // p0 < 0 : [0, p1] +# // p1 < 0 : [p0, inf) +# LLAMA_API void llama_kv_cache_seq_div( +# struct llama_context * ctx, +# llama_seq_id seq_id, +# llama_pos p0, +# llama_pos p1, +# int d); +@ctypes_function( + "llama_kv_self_seq_div", + [ + llama_context_p_ctypes, + llama_seq_id, + llama_pos, + llama_pos, + ctypes.c_int, + ], + None, +) +def llama_kv_cache_seq_div( + ctx: llama_context_p, + seq_id: Union[llama_seq_id, int], + p0: Union[llama_pos, int], + p1: Union[llama_pos, int], + d: Union[ctypes.c_int, int], + /, +): + """Integer division of the positions by factor of `d > 1` + If the KV cache is RoPEd, the KV data is updated accordingly + p0 < 0 : [0, p1] + p1 < 0 : [p0, inf)""" + ... + + +# // Returns the largest position present in the KV cache for the specified sequence +# LLAMA_API llama_pos llama_kv_self_seq_pos_max( +# struct llama_context * ctx, +# llama_seq_id seq_id); +@ctypes_function( + "llama_kv_self_seq_pos_max", [llama_context_p_ctypes, llama_seq_id], llama_pos +) +def llama_kv_self_seq_pos_max( + ctx: llama_context_p, seq_id: Union[llama_seq_id, int], / +) -> int: + """Returns the largest position present in the KV cache for the specified sequence""" + ... + + +# // Defragment the KV cache +# // This will be applied: +# // - lazily on next llama_decode() +# // - explicitly with llama_kv_self_update() +# LLAMA_API void llama_kv_self_defrag(struct llama_context * ctx); +@ctypes_function("llama_kv_self_defrag", [llama_context_p_ctypes], None) +def llama_kv_self_defrag(ctx: llama_context_p, /): + """Defragment the KV cache + This will be applied: + - lazily on next llama_decode() + - explicitly with llama_kv_cache_update()""" + ... + + +# NOTE: Deprecated +# // Defragment the KV cache +# // This will be applied: +# // - lazily on next llama_decode() +# // - explicitly with llama_kv_self_update() +# LLAMA_API void llama_kv_cache_defrag(struct llama_context * ctx); +@ctypes_function("llama_kv_cache_defrag", [llama_context_p_ctypes], None) +def llama_kv_cache_defrag(ctx: llama_context_p, /): + """Defragment the KV cache + This will be applied: + - lazily on next llama_decode() + - explicitly with llama_kv_cache_update()""" + ... + + +# // Apply the KV cache updates (such as K-shifts, defragmentation, etc.) +# LLAMA_API void llama_kv_cache_update(struct llama_context * ctx); +@ctypes_function("llama_kv_self_update", [llama_context_p_ctypes], None) +def llama_kv_self_update(ctx: llama_context_p, /): + """Apply the KV cache updates (such as K-shifts, defragmentation, etc.)""" + ... + +# // NOTE: Deprecated +# // Apply the KV cache updates (such as K-shifts, defragmentation, etc.) +# LLAMA_API void llama_kv_cache_update(struct llama_context * ctx); +@ctypes_function("llama_kv_self_update", [llama_context_p_ctypes], None) +def llama_kv_cache_update(ctx: llama_context_p, /): + """Apply the KV cache updates (such as K-shifts, defragmentation, etc.)""" + ... + + +# // Check if the context supports KV cache shifting +# LLAMA_API bool llama_kv_cache_can_shift(struct llama_context * ctx); +@ctypes_function("llama_kv_self_can_shift", [llama_context_p_ctypes], ctypes.c_bool) +def llama_kv_self_can_shift(ctx: llama_context_p, /) -> bool: + """Check if the context supports KV cache shifting""" + ... + + +# // NOTE: Deprecated +# // Check if the context supports KV cache shifting +# LLAMA_API bool llama_kv_cache_can_shift(struct llama_context * ctx); +@ctypes_function("llama_kv_self_can_shift", [llama_context_p_ctypes], ctypes.c_bool) +def llama_kv_cache_can_shift(ctx: llama_context_p, /) -> bool: + """Check if the context supports KV cache shifting""" + ... + + +# // +# // State / sessions +# // + + +# // Returns the *actual* size in bytes of the state +# // (logits, embedding and kv_cache) +# // Only use when saving the state, not when restoring it, otherwise the size may be too small. +# LLAMA_API size_t llama_state_get_size(struct llama_context * ctx); +@ctypes_function("llama_state_get_size", [llama_context_p_ctypes], ctypes.c_size_t) +def llama_state_get_size(ctx: llama_context_p, /) -> int: + """Returns the *actual* size in bytes of the state (rng, logits, embedding and kv_cache) - will often be smaller after compacting tokens""" + ... + + +# LLAMA_API DEPRECATED(size_t llama_get_state_size(struct llama_context * ctx), +# "use llama_state_get_size instead"); +@ctypes_function("llama_get_state_size", [llama_context_p_ctypes], ctypes.c_size_t) +def llama_get_state_size(ctx: llama_context_p, /) -> int: + """Returns the maximum size in bytes of the state (rng, logits, embedding + and kv_cache) - will often be smaller after compacting tokens""" + ... + + +# // Copies the state to the specified destination address. +# // Destination needs to have allocated enough memory. +# // Returns the number of bytes copied +# LLAMA_API size_t llama_state_get_data( +# struct llama_context * ctx, +# uint8_t * dst, +# size_t size); +@ctypes_function( + "llama_state_get_data", + [ + llama_context_p_ctypes, + ctypes.POINTER(ctypes.c_uint8), + ctypes.c_size_t, + ], + ctypes.c_size_t, +) +def llama_state_get_data( + ctx: llama_context_p, + dst: CtypesArray[ctypes.c_uint8], + size: Union[ctypes.c_size_t, int], + /, +) -> int: + """Copies the state to the specified destination address. + Destination needs to have allocated enough memory. + Returns the number of bytes copied""" + ... + + +# LLAMA_API DEPRECATED(size_t llama_copy_state_data( +# struct llama_context * ctx, +# uint8_t * dst), +# "use llama_state_get_data instead"); +@ctypes_function( + "llama_copy_state_data", + [ + llama_context_p_ctypes, + ctypes.POINTER(ctypes.c_uint8), + ], + ctypes.c_size_t, +) +def llama_copy_state_data( + ctx: llama_context_p, dst: CtypesArray[ctypes.c_uint8], / +) -> int: + """Copies the state to the specified destination address. + Destination needs to have allocated enough memory. + Returns the number of bytes copied""" + ... + + +# // Set the state reading from the specified address +# // Returns the number of bytes read +# LLAMA_API size_t llama_state_set_data( +# struct llama_context * ctx, +# const uint8_t * src, +# size_t size); +@ctypes_function( + "llama_state_set_data", + [llama_context_p_ctypes, ctypes.POINTER(ctypes.c_uint8), ctypes.c_size_t], + ctypes.c_size_t, +) +def llama_state_set_data( + ctx: llama_context_p, + src: CtypesArray[ctypes.c_uint8], + size: Union[ctypes.c_size_t, int], + /, +) -> int: + """Set the state reading from the specified address + Returns the number of bytes read""" + ... + + +# LLAMA_API DEPRECATED(size_t llama_set_state_data( +# struct llama_context * ctx, +# const uint8_t * src), +# "use llama_state_set_data instead"); +@ctypes_function( + "llama_set_state_data", + [llama_context_p_ctypes, ctypes.POINTER(ctypes.c_uint8)], + ctypes.c_size_t, +) +def llama_set_state_data( + ctx: llama_context_p, src: CtypesArray[ctypes.c_uint8], / +) -> int: + """Set the state reading from the specified address""" + ... + + +# Save/load session file +# LLAMA_API bool llama_state_load_file( +# struct llama_context * ctx, +# const char * path_session, +# llama_token * tokens_out, +# size_t n_token_capacity, +# size_t * n_token_count_out); +@ctypes_function( + "llama_state_load_file", + [ + llama_context_p_ctypes, + ctypes.c_char_p, + llama_token_p, + ctypes.c_size_t, + ctypes.POINTER(ctypes.c_size_t), + ], + ctypes.c_bool, +) +def llama_state_load_file( + ctx: llama_context_p, + path_session: bytes, + tokens_out: CtypesArray[llama_token], + n_token_capacity: Union[ctypes.c_size_t, int], + n_token_count_out: CtypesPointerOrRef[ctypes.c_size_t], + /, +) -> bool: + ... + + +# LLAMA_API DEPRECATED(bool llama_load_session_file( +# struct llama_context * ctx, +# const char * path_session, +# llama_token * tokens_out, +# size_t n_token_capacity, +# size_t * n_token_count_out), +# "use llama_state_load_file instead"); +@ctypes_function( + "llama_load_session_file", + [ + llama_context_p_ctypes, + ctypes.c_char_p, + llama_token_p, + ctypes.c_size_t, + ctypes.POINTER(ctypes.c_size_t), + ], + ctypes.c_size_t, +) +def llama_load_session_file( + ctx: llama_context_p, + path_session: bytes, + tokens_out: CtypesArray[llama_token], + n_token_capacity: Union[ctypes.c_size_t, int], + n_token_count_out: CtypesPointerOrRef[ctypes.c_size_t], + /, +) -> int: + ... + + +# LLAMA_API bool llama_state_save_file( +# struct llama_context * ctx, +# const char * path_session, +# const llama_token * tokens, +# size_t n_token_count); +@ctypes_function( + "llama_state_save_file", + [ + llama_context_p_ctypes, + ctypes.c_char_p, + llama_token_p, + ctypes.c_size_t, + ], + ctypes.c_bool, +) +def llama_state_save_file( + ctx: llama_context_p, + path_session: bytes, + tokens: CtypesArray[llama_token], + n_token_count: Union[ctypes.c_size_t, int], + /, +) -> bool: + ... + + +# LLAMA_API DEPRECATED(bool llama_save_session_file( +# struct llama_context * ctx, +# const char * path_session, +# const llama_token * tokens, +# size_t n_token_count), +# "use llama_state_save_file instead"); +@ctypes_function( + "llama_save_session_file", + [ + llama_context_p_ctypes, + ctypes.c_char_p, + llama_token_p, + ctypes.c_size_t, + ], + ctypes.c_size_t, +) +def llama_save_session_file( + ctx: llama_context_p, + path_session: bytes, + tokens: CtypesArray[llama_token], + n_token_count: Union[ctypes.c_size_t, int], + /, +) -> int: + ... + + +# // Get the exact size needed to copy the KV cache of a single sequence +# LLAMA_API size_t llama_state_seq_get_size( +# struct llama_context * ctx, +# llama_seq_id seq_id); +@ctypes_function( + "llama_state_seq_get_size", + [llama_context_p_ctypes, llama_seq_id], + ctypes.c_size_t, +) +def llama_state_seq_get_size(ctx: llama_context_p, seq_id: llama_seq_id, /) -> int: + """Get the exact size needed to copy the KV cache of a single sequence""" + ... + + +# // Copy the KV cache of a single sequence into the specified buffer +# LLAMA_API size_t llama_state_seq_get_data( +# struct llama_context * ctx, +# uint8_t * dst, +# size_t size, +# llama_seq_id seq_id); +@ctypes_function( + "llama_state_seq_get_data", + [ + llama_context_p_ctypes, + ctypes.POINTER(ctypes.c_uint8), + ctypes.c_size_t, + llama_seq_id, + ], + ctypes.c_size_t, +) +def llama_state_seq_get_data( + ctx: llama_context_p, + dst: CtypesArray[ctypes.c_uint8], + size: Union[ctypes.c_size_t, int], + seq_id: llama_seq_id, + /, +) -> int: + """Copy the KV cache of a single sequence into the specified buffer""" + ... + + +# // Copy the sequence data (originally copied with `llama_state_seq_get_data`) into the specified sequence +# // Returns: +# // - Positive: Ok +# // - Zero: Failed to load +# LLAMA_API size_t llama_state_seq_set_data( +# struct llama_context * ctx, +# const uint8_t * src, +# size_t size, +# llama_seq_id dest_seq_id); +@ctypes_function( + "llama_state_seq_set_data", + [ + llama_context_p_ctypes, + ctypes.POINTER(ctypes.c_uint8), + ctypes.c_size_t, + llama_seq_id, + ], + ctypes.c_size_t, +) +def llama_state_seq_set_data( + ctx: llama_context_p, + src: CtypesArray[ctypes.c_uint8], + size: Union[ctypes.c_size_t, int], + dest_seq_id: llama_seq_id, + /, +) -> int: + """Copy the sequence data (originally copied with `llama_state_seq_get_data`) into the specified sequence""" + ... + + +# LLAMA_API size_t llama_state_seq_save_file( +# struct llama_context * ctx, +# const char * filepath, +# llama_seq_id seq_id, +# const llama_token * tokens, +# size_t n_token_count); +@ctypes_function( + "llama_state_seq_save_file", + [ + llama_context_p_ctypes, + ctypes.c_char_p, + llama_seq_id, + llama_token_p, + ctypes.c_size_t, + ], + ctypes.c_size_t, +) +def llama_state_seq_save_file( + ctx: llama_context_p, + filepath: bytes, + seq_id: llama_seq_id, + tokens: CtypesArray[llama_token], + n_token_count: Union[ctypes.c_size_t, int], + /, +) -> int: + ... + + +# LLAMA_API size_t llama_state_seq_load_file( +# struct llama_context * ctx, +# const char * filepath, +# llama_seq_id dest_seq_id, +# llama_token * tokens_out, +# size_t n_token_capacity, +# size_t * n_token_count_out); +@ctypes_function( + "llama_state_seq_load_file", + [ + llama_context_p_ctypes, + ctypes.c_char_p, + llama_seq_id, + llama_token_p, + ctypes.c_size_t, + ctypes.POINTER(ctypes.c_size_t), + ], + ctypes.c_size_t, +) +def llama_state_seq_load_file( + ctx: llama_context_p, + filepath: bytes, + dest_seq_id: llama_seq_id, + tokens_out: CtypesArray[llama_token], + n_token_capacity: Union[ctypes.c_size_t, int], + n_token_count_out: CtypesPointerOrRef[ctypes.c_size_t], + /, +) -> int: + ... + + +# // +# // Decoding +# // + + +# // Return batch for single sequence of tokens +# // The sequence ID will be fixed to 0 +# // The position of the tokens will be tracked automatically by llama_decode +# // +# // NOTE: this is a helper function to facilitate transition to the new batch API - avoid using it +# // +# LLAMA_API struct llama_batch llama_batch_get_one( +# llama_token * tokens, +# int32_t n_tokens); +@ctypes_function( + "llama_batch_get_one", + [ + llama_token_p, + ctypes.c_int32, + ], + llama_batch, +) +def llama_batch_get_one( + tokens: CtypesArray[llama_token], + n_tokens: Union[ctypes.c_int, int], + /, +) -> llama_batch: + """Return batch for single sequence of tokens starting at pos_0 + + NOTE: this is a helper function to facilitate transition to the new batch API - avoid using it + """ + ... + + +# // Allocates a batch of tokens on the heap that can hold a maximum of n_tokens +# // Each token can be assigned up to n_seq_max sequence ids +# // The batch has to be freed with llama_batch_free() +# // If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float) +# // Otherwise, llama_batch.token will be allocated to store n_tokens llama_token +# // The rest of the llama_batch members are allocated with size n_tokens +# // All members are left uninitialized +# LLAMA_API struct llama_batch llama_batch_init( +# int32_t n_tokens, +# int32_t embd, +# int32_t n_seq_max); +@ctypes_function( + "llama_batch_init", [ctypes.c_int32, ctypes.c_int32, ctypes.c_int32], llama_batch +) +def llama_batch_init( + n_tokens: Union[ctypes.c_int32, int], + embd: Union[ctypes.c_int32, int], + n_seq_max: Union[ctypes.c_int32, int], + /, +) -> llama_batch: + """Allocates a batch of tokens on the heap that can hold a maximum of n_tokens + Each token can be assigned up to n_seq_max sequence ids + The batch has to be freed with llama_batch_free() + If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float) + Otherwise, llama_batch.token will be allocated to store n_tokens llama_token + The rest of the llama_batch members are allocated with size n_tokens + All members are left uninitialized""" + ... + + +# // Frees a batch of tokens allocated with llama_batch_init() +# LLAMA_API void llama_batch_free(struct llama_batch batch); +@ctypes_function("llama_batch_free", [llama_batch], None) +def llama_batch_free(batch: llama_batch, /): + """Frees a batch of tokens allocated with llama_batch_init()""" + ... + + +# // Processes a batch of tokens with the ecoder part of the encoder-decoder model. +# // Stores the encoder output internally for later use by the decoder cross-attention layers. +# // 0 - success +# // < 0 - error +# LLAMA_API int32_t llama_encode( +# struct llama_context * ctx, +# struct llama_batch batch); +@ctypes_function("llama_encode", [llama_context_p_ctypes, llama_batch], ctypes.c_int32) +def llama_encode(ctx: llama_context_p, batch: llama_batch, /) -> int: + """Processes a batch of tokens with the ecoder part of the encoder-decoder model. + Stores the encoder output internally for later use by the decoder cross-attention layers. + 0 - success + < 0 - error""" + ... + + +# // Positive return values does not mean a fatal error, but rather a warning. +# // 0 - success +# // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context) +# // < 0 - error +# LLAMA_API int32_t llama_decode( +# struct llama_context * ctx, +# struct llama_batch batch); +@ctypes_function("llama_decode", [llama_context_p_ctypes, llama_batch], ctypes.c_int32) +def llama_decode(ctx: llama_context_p, batch: llama_batch, /) -> int: + """Positive return values does not mean a fatal error, but rather a warning. + 0 - success + 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context) + < 0 - error""" + ... + + +# // Set the number of threads used for decoding +# // n_threads is the number of threads used for generation (single token) +# // n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens) +# LLAMA_API void llama_set_n_threads(struct llama_context * ctx, int32_t n_threads, int32_t n_threads_batch); +@ctypes_function( + "llama_set_n_threads", + [ + llama_context_p_ctypes, + ctypes.c_int32, + ctypes.c_int32, + ], + None, +) +def llama_set_n_threads( + ctx: llama_context_p, + n_threads: Union[ctypes.c_int32, int], + n_threads_batch: Union[ctypes.c_int32, int], + /, +): + """Set the number of threads used for decoding + n_threads is the number of threads used for generation (single token) + n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens) + """ + ... + + +# // Get the number of threads used for generation of a single token. +# LLAMA_API int32_t llama_n_threads(struct llama_context * ctx); +@ctypes_function("llama_n_threads", [llama_context_p_ctypes], ctypes.c_int32) +def llama_n_threads(ctx: llama_context_p, /) -> int: + """Get the number of threads used for generation of a single token""" + ... + + +# // Get the number of threads used for prompt and batch processing (multiple token). +# LLAMA_API int32_t llama_n_threads_batch(struct llama_context * ctx); +@ctypes_function("llama_n_threads_batch", [llama_context_p_ctypes], ctypes.c_int32) +def llama_n_threads_batch(ctx: llama_context_p, /) -> int: + """Get the number of threads used for prompt and batch processing (multiple token)""" + ... + + +# // Set whether the model is in embeddings mode or not +# // If true, embeddings will be returned but logits will not +# LLAMA_API void llama_set_embeddings(struct llama_context * ctx, bool embeddings); +@ctypes_function("llama_set_embeddings", [llama_context_p_ctypes, ctypes.c_bool], None) +def llama_set_embeddings(ctx: llama_context_p, embeddings: bool, /): + """Set whether the model is in embeddings model or not + If true, embeddings will be returned but logits will not""" + ... + + +# // Set whether to use causal attention or not +# // If set to true, the model will only attend to the past tokens +# LLAMA_API void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn); +@ctypes_function("llama_set_causal_attn", [llama_context_p_ctypes, ctypes.c_bool], None) +def llama_set_causal_attn(ctx: llama_context_p, causal_attn: bool, /): + """Set whether to use causal attention or not + If set to true, the model will only attend to the past tokens""" + ... + + +# // Set whether the model is in warmup mode or not +# // If true, all model tensors are activated during llama_decode() to load and cache their weights. +# LLAMA_API void llama_set_warmup(struct llama_context * ctx, bool warmup); +@ctypes_function("llama_set_warmup", [llama_context_p_ctypes, ctypes.c_bool], None) +def llama_set_warmup(ctx: llama_context_p, warmup: bool, /): + """Set whether the model is in warmup mode or not + If true, all model tensors are activated during llama_decode() to load and cache their weights.""" + ... + + +# // Set abort callback +# LLAMA_API void llama_set_abort_callback(struct llama_context * ctx, ggml_abort_callback abort_callback, void * abort_callback_data); +@ctypes_function( + "llama_set_abort_callback", + [llama_context_p_ctypes, ggml_abort_callback, ctypes.c_void_p], + None, +) +def llama_set_abort_callback( + ctx: llama_context_p, + abort_callback: Callable[[ctypes.c_void_p], None], + abort_callback_data: ctypes.c_void_p, + /, +): + """Set abort callback""" + ... + + +# // Wait until all computations are finished +# // This is automatically done when using one of the functions below to obtain the computation results +# // and is not necessary to call it explicitly in most cases +# LLAMA_API void llama_synchronize(struct llama_context * ctx); +@ctypes_function("llama_synchronize", [llama_context_p_ctypes], None) +def llama_synchronize(ctx: llama_context_p, /): + """Wait until all computations are finished + This is automatically done when using one of the functions below to obtain the computation results + and is not necessary to call it explicitly in most cases""" + ... + + +# // Token logits obtained from the last call to llama_decode() +# // The logits for which llama_batch.logits[i] != 0 are stored contiguously +# // in the order they have appeared in the batch. +# // Rows: number of tokens for which llama_batch.logits[i] != 0 +# // Cols: n_vocab +# LLAMA_API float * llama_get_logits(struct llama_context * ctx); +@ctypes_function( + "llama_get_logits", [llama_context_p_ctypes], ctypes.POINTER(ctypes.c_float) +) +def llama_get_logits(ctx: llama_context_p, /) -> CtypesArray[ctypes.c_float]: + """Token logits obtained from the last call to llama_decode() + The logits for which llama_batch.logits[i] != 0 are stored contiguously + in the order they have appeared in the batch. + Rows: number of tokens for which llama_batch.logits[i] != 0 + Cols: n_vocab + + Returns: + Pointer to the logits buffer of shape (n_tokens, n_vocab)""" + ... + + +# // Logits for the ith token. For positive indices, Equivalent to: +# // llama_get_logits(ctx) + ctx->output_ids[i]*n_vocab +# // Negative indicies can be used to access logits in reverse order, -1 is the last logit. +# // returns NULL for invalid ids. +# LLAMA_API float * llama_get_logits_ith(struct llama_context * ctx, int32_t i); +@ctypes_function( + "llama_get_logits_ith", + [llama_context_p_ctypes, ctypes.c_int32], + ctypes.POINTER(ctypes.c_float), +) +def llama_get_logits_ith( + ctx: llama_context_p, i: Union[ctypes.c_int32, int], / +) -> CtypesArray[ctypes.c_float]: + """Logits for the ith token. Equivalent to: + llama_get_logits(ctx) + i*n_vocab""" + ... + + +# // Get all output token embeddings. +# // when pooling_type == LLAMA_POOLING_TYPE_NONE or when using a generative model, +# // the embeddings for which llama_batch.logits[i] != 0 are stored contiguously +# // in the order they have appeared in the batch. +# // shape: [n_outputs*n_embd] +# // Otherwise, returns NULL. +# LLAMA_API float * llama_get_embeddings(struct llama_context * ctx); +@ctypes_function( + "llama_get_embeddings", [llama_context_p_ctypes], ctypes.POINTER(ctypes.c_float) +) +def llama_get_embeddings(ctx: llama_context_p, /) -> CtypesArray[ctypes.c_float]: + """Get the embeddings for the input + shape: [n_embd] (1-dimensional)""" + ... + + +# // Get the embeddings for the ith token. For positive indices, Equivalent to: +# // llama_get_embeddings(ctx) + ctx->output_ids[i]*n_embd +# // Negative indicies can be used to access embeddings in reverse order, -1 is the last embedding. +# // shape: [n_embd] (1-dimensional) +# // returns NULL for invalid ids. +# LLAMA_API float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i); +@ctypes_function( + "llama_get_embeddings_ith", + [llama_context_p_ctypes, ctypes.c_int32], + ctypes.POINTER(ctypes.c_float), +) +def llama_get_embeddings_ith( + ctx: llama_context_p, i: Union[ctypes.c_int32, int], / +) -> CtypesArray[ctypes.c_float]: + """Get the embeddings for the ith sequence + llama_get_embeddings(ctx) + i*n_embd""" + ... + + +# // Get the embeddings for a sequence id +# // Returns NULL if pooling_type is LLAMA_POOLING_TYPE_NONE +# // when pooling_type == LLAMA_POOLING_TYPE_RANK, returns float[1] with the rank of the sequence +# // otherwise: float[n_embd] (1-dimensional) +# LLAMA_API float * llama_get_embeddings_seq(struct llama_context * ctx, llama_seq_id seq_id); +@ctypes_function( + "llama_get_embeddings_seq", + [llama_context_p_ctypes, llama_seq_id], + ctypes.POINTER(ctypes.c_float), +) +def llama_get_embeddings_seq( + ctx: llama_context_p, seq_id: Union[llama_seq_id, int], / +) -> CtypesArray[ctypes.c_float]: + """Get the embeddings for a sequence id + Returns NULL if pooling_type is LLAMA_POOLING_TYPE_NONE + shape: [n_embd] (1-dimensional)""" + ... + + +# // +# // Vocab +# // + + +# LLAMA_API const char * llama_vocab_get_text(const struct llama_vocab * vocab, llama_token token); +@ctypes_function( + "llama_vocab_get_text", [llama_vocab_p_ctypes, llama_token], ctypes.c_char_p +) +def llama_vocab_get_text( + vocab: llama_vocab_p, token: Union[llama_token, int], / +) -> bytes: + ... + + +# LLAMA_API float llama_vocab_get_score(const struct llama_vocab * vocab, llama_token token); +@ctypes_function( + "llama_vocab_get_score", [llama_vocab_p_ctypes, llama_token], ctypes.c_float +) +def llama_vocab_get_score( + vocab: llama_vocab_p, token: Union[llama_token, int], / +) -> float: + ... + + +# LLAMA_API enum llama_token_attr llama_vocab_get_attr(const struct llama_vocab * vocab, llama_token token); +@ctypes_function( + "llama_vocab_get_attr", [llama_vocab_p_ctypes, llama_token], ctypes.c_int +) +def llama_vocab_get_attr( + vocab: llama_vocab_p, token: Union[llama_token, int], / +) -> int: + ... + + +# // Check if the token is supposed to end generation (end-of-generation, eg. EOS, EOT, etc.) +# LLAMA_API bool llama_vocab_is_eog(const struct llama_vocab * vocab, llama_token token); +@ctypes_function( + "llama_vocab_is_eog", [llama_vocab_p_ctypes, llama_token], ctypes.c_bool +) +def llama_vocab_is_eog(vocab: llama_vocab_p, token: Union[llama_token, int], /) -> bool: + """Check if the token is supposed to end generation (end-of-generation, eg. EOS, EOT, etc.)""" + ... + + +# // Identify if Token Id is a control token or a render-able token +# LLAMA_API bool llama_vocab_is_control(const struct llama_vocab * vocab, llama_token token); +@ctypes_function( + "llama_vocab_is_control", [llama_vocab_p_ctypes, llama_token], ctypes.c_bool +) +def llama_vocab_is_control( + vocab: llama_vocab_p, token: Union[llama_token, int], / +) -> bool: + """Identify if Token Id is a control token or a render-able token""" + ... + + +# // Special tokens + + +# LLAMA_API llama_token llama_vocab_bos(const struct llama_vocab * vocab); // beginning-of-sentence +@ctypes_function("llama_vocab_bos", [llama_vocab_p_ctypes], llama_token) +def llama_vocab_bos(vocab: llama_vocab_p, /) -> llama_token: + """beginning-of-sentence""" + ... + + +# LLAMA_API llama_token llama_vocab_eos(const struct llama_vocab * vocab); // end-of-sentence +@ctypes_function("llama_vocab_eos", [llama_vocab_p_ctypes], llama_token) +def llama_vocab_eos(vocab: llama_vocab_p, /) -> llama_token: + """end-of-sentence""" + ... + + +# LLAMA_API llama_token llama_vocab_eot(const struct llama_vocab * vocab); // end-of-turn +@ctypes_function("llama_vocab_eot", [llama_vocab_p_ctypes], llama_token) +def llama_vocab_eot(vocab: llama_vocab_p, /) -> llama_token: + """end-of-turn""" + ... + + +# LLAMA_API llama_token llama_vocab_sep(const struct llama_vocab * vocab); // sentence separator +@ctypes_function("llama_vocab_sep", [llama_vocab_p_ctypes], llama_token) +def llama_vocab_sep(vocab: llama_vocab_p, /) -> llama_token: + """sentence separator""" + ... + + +# LLAMA_API llama_token llama_vocab_nl (const struct llama_vocab * vocab); // next-line +@ctypes_function("llama_vocab_nl", [llama_vocab_p_ctypes], llama_token) +def llama_vocab_nl(vocab: llama_vocab_p, /) -> llama_token: + """next-line""" + ... + + +# LLAMA_API llama_token llama_vocab_pad(const struct llama_vocab * vocab); // padding +@ctypes_function("llama_vocab_pad", [llama_vocab_p_ctypes], llama_token) +def llama_vocab_pad(vocab: llama_vocab_p, /) -> llama_token: + """padding""" + ... + +# LLAMA_API bool llama_vocab_get_add_bos(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_get_add_bos", + [llama_vocab_p_ctypes], + ctypes.c_bool, +) +def llama_vocab_get_add_bos(vocab: llama_vocab_p, /) -> bool: + ... + + +# LLAMA_API bool llama_vocab_get_add_eos(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_get_add_eos", + [llama_vocab_p_ctypes], + ctypes.c_bool, +) +def llama_vocab_get_add_eos(vocab: llama_vocab_p, /) -> bool: + ... + + +# LLAMA_API llama_token llama_vocab_fim_pre(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_fim_pre", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_vocab_fim_pre(vocab: llama_vocab_p, /) -> llama_token: + ... + + +# LLAMA_API llama_token llama_vocab_fim_suf(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_fim_suf", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_vocab_fim_suf(vocab: llama_vocab_p, /) -> llama_token: + ... + + +# LLAMA_API llama_token llama_vocab_fim_mid(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_fim_mid", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_vocab_fim_mid(vocab: llama_vocab_p, /) -> llama_token: + ... + + +# LLAMA_API llama_token llama_vocab_fim_pad(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_fim_pad", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_vocab_fim_pad(vocab: llama_vocab_p, /) -> llama_token: + ... + + +# LLAMA_API llama_token llama_vocab_fim_rep(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_fim_rep", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_vocab_fim_rep(vocab: llama_vocab_p, /) -> llama_token: + ... + + +# LLAMA_API llama_token llama_vocab_fim_sep(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_fim_sep", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_vocab_fim_sep(vocab: llama_vocab_p, /) -> llama_token: + ... + + + +# DEPRECATED(LLAMA_API const char * llama_token_get_text(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_text instead"); +@ctypes_function( + "llama_token_get_text", + [llama_vocab_p_ctypes, llama_token], + ctypes.c_char_p, +) +def llama_token_get_text( + vocab: llama_vocab_p, token: Union[llama_token, int], / +) -> bytes: + ... + + +# DEPRECATED(LLAMA_API float llama_token_get_score(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_score instead"); +@ctypes_function( + "llama_token_get_score", + [llama_vocab_p_ctypes, llama_token], + ctypes.c_float, +) +def llama_token_get_score( + vocab: llama_vocab_p, token: Union[llama_token, int], / +) -> float: + ... + +# DEPRECATED(LLAMA_API enum llama_token_attr llama_token_get_attr(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_attr instead"); +@ctypes_function( + "llama_token_get_attr", + [llama_vocab_p_ctypes, llama_token], + ctypes.c_int, +) +def llama_token_get_attr( + vocab: llama_vocab_p, token: Union[llama_token, int], / +) -> int: + ... + +# DEPRECATED(LLAMA_API bool llama_token_is_eog(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_is_eog instead"); +@ctypes_function( + "llama_token_is_eog", + [llama_vocab_p_ctypes, llama_token], + ctypes.c_bool, +) +def llama_token_is_eog( + vocab: llama_vocab_p, token: Union[llama_token, int], / +) -> bool: + ... + +# DEPRECATED(LLAMA_API bool llama_token_is_control(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_is_control instead"); +@ctypes_function( + "llama_token_is_control", + [llama_vocab_p_ctypes, llama_token], + ctypes.c_bool, +) +def llama_token_is_control( + vocab: llama_vocab_p, token: Union[llama_token, int], / +) -> bool: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_bos(const struct llama_vocab * vocab), "use llama_vocab_bos instead"); +@ctypes_function( + "llama_token_bos", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_bos(vocab: llama_vocab_p, /) -> int: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_eos(const struct llama_vocab * vocab), "use llama_vocab_eos instead"); +@ctypes_function( + "llama_token_eos", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_eos(vocab: llama_vocab_p, /) -> int: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_eot(const struct llama_vocab * vocab), "use llama_vocab_eot instead"); +@ctypes_function( + "llama_token_eot", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_eot(vocab: llama_vocab_p, /) -> int: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_cls(const struct llama_vocab * vocab), "use llama_vocab_cls instead"); +@ctypes_function( + "llama_token_cls", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_cls(vocab: llama_vocab_p, /) -> int: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_sep(const struct llama_vocab * vocab), "use llama_vocab_sep instead"); +@ctypes_function( + "llama_token_sep", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_sep(vocab: llama_vocab_p, /) -> int: + ... + + +# DEPRECATED(LLAMA_API llama_token llama_token_nl (const struct llama_vocab * vocab), "use llama_vocab_nl instead"); +@ctypes_function( + "llama_token_nl", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_nl(vocab: llama_vocab_p, /) -> int: + ... + + +# DEPRECATED(LLAMA_API llama_token llama_token_pad(const struct llama_vocab * vocab), "use llama_vocab_pad instead"); +@ctypes_function( + "llama_token_pad", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_pad(vocab: llama_vocab_p, /) -> int: + ... + + +# DEPRECATED(LLAMA_API bool llama_add_bos_token(const struct llama_vocab * vocab), "use llama_vocab_get_add_bos instead"); +@ctypes_function( + "llama_add_bos_token", + [llama_vocab_p_ctypes], + ctypes.c_bool, +) +def llama_add_bos_token(vocab: llama_vocab_p, /) -> bool: + ... + +# DEPRECATED(LLAMA_API bool llama_add_eos_token(const struct llama_vocab * vocab), "use llama_vocab_get_add_eos instead"); +@ctypes_function( + "llama_add_eos_token", + [llama_vocab_p_ctypes], + ctypes.c_bool, +) +def llama_add_eos_token(vocab: llama_vocab_p, /) -> bool: + ... + + +# DEPRECATED(LLAMA_API llama_token llama_token_fim_pre(const struct llama_vocab * vocab), "use llama_vocab_fim_pre instead"); +@ctypes_function( + "llama_token_fim_pre", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_fim_pre(vocab: llama_vocab_p, /) -> llama_token: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_fim_suf(const struct llama_vocab * vocab), "use llama_vocab_fim_suf instead"); +@ctypes_function( + "llama_token_fim_suf", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_fim_suf(vocab: llama_vocab_p, /) -> llama_token: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_fim_mid(const struct llama_vocab * vocab), "use llama_vocab_fim_mid instead"); +@ctypes_function( + "llama_token_fim_mid", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_fim_mid(vocab: llama_vocab_p, /) -> llama_token: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_fim_pad(const struct llama_vocab * vocab), "use llama_vocab_fim_pad instead"); +@ctypes_function( + "llama_token_fim_pad", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_fim_pad(vocab: llama_vocab_p, /) -> llama_token: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_fim_rep(const struct llama_vocab * vocab), "use llama_vocab_fim_rep instead"); +@ctypes_function( + "llama_token_fim_rep", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_fim_rep(vocab: llama_vocab_p, /) -> llama_token: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_fim_sep(const struct llama_vocab * vocab), "use llama_vocab_fim_sep instead"); +@ctypes_function( + "llama_token_fim_sep", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_fim_sep(vocab: llama_vocab_p, /) -> llama_token: + ... + +# // CLS is equivalent to BOS +# DEPRECATED(LLAMA_API llama_token llama_vocab_cls(const struct llama_vocab * vocab), // classification +# "use llama_vocab_bos instead"); +@ctypes_function( + "llama_vocab_cls", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_vocab_cls(vocab: llama_vocab_p, /) -> llama_token: + ... + + +# // +# // Tokenization +# // +# // The API is thread-safe. +# // + + +# /// @details Convert the provided text into tokens. +# /// @param tokens The tokens pointer must be large enough to hold the resulting tokens. +# /// @return Returns the number of tokens on success, no more than n_tokens_max +# /// @return Returns a negative number on failure - the number of tokens that would have been returned +# /// @param add_special Allow to add BOS and EOS tokens if model is configured to do so. +# /// @param parse_special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated +# /// as plaintext. Does not insert a leading space. +# LLAMA_API int32_t llama_tokenize( +# const struct llama_vocab * vocab, +# const char * text, +# int32_t text_len, +# llama_token * tokens, +# int32_t n_tokens_max, +# bool add_special, +# bool parse_special); +@ctypes_function( + "llama_tokenize", + [ + llama_vocab_p_ctypes, + ctypes.c_char_p, + ctypes.c_int32, + llama_token_p, + ctypes.c_int32, + ctypes.c_bool, + ctypes.c_bool, + ], + ctypes.c_int32, +) +def llama_tokenize( + vocab: llama_vocab_p, + text: bytes, + text_len: Union[ctypes.c_int, int], + tokens: CtypesArray[llama_token], + n_tokens_max: Union[ctypes.c_int, int], + add_special: Union[ctypes.c_bool, bool], + parse_special: Union[ctypes.c_bool, bool], + /, +) -> int: + """Convert the provided text into tokens. + + Args: + vocab: The vocabulary to use for tokenization. + text: The text to tokenize. + text_len: The length of the text. + tokens: The tokens pointer must be large enough to hold the resulting tokens. + n_max_tokens: The maximum number of tokens to return. + add_special: Allow adding special tokenns if the model is configured to do so. + parse_special: Allow parsing special tokens. + + Returns: + Returns the number of tokens on success, no more than n_tokens_max + Returns a negative number on failure - the number of tokens that would have been returned + """ + ... + + +# // Token Id -> Piece. +# // Uses the vocabulary in the provided context. +# // Does not write null terminator to the buffer. +# // User can skip up to 'lstrip' leading spaces before copying (useful when encoding/decoding multiple tokens with 'add_space_prefix') +# // @param special If true, special tokens are rendered in the output. +# LLAMA_API int32_t llama_token_to_piece( +# const struct llama_vocab * vocab, +# llama_token token, +# char * buf, +# int32_t length, +# int32_t lstrip, +# bool special); +@ctypes_function( + "llama_token_to_piece", + [ + llama_vocab_p_ctypes, + llama_token, + ctypes.c_char_p, + ctypes.c_int32, + ctypes.c_int32, + ctypes.c_bool, + ], + ctypes.c_int32, +) +def llama_token_to_piece( + vocab: llama_vocab_p, + token: Union[llama_token, int], + buf: Union[ctypes.c_char_p, bytes, CtypesArray[ctypes.c_char]], + length: Union[ctypes.c_int, int], + lstrip: Union[ctypes.c_int, int], + special: Union[ctypes.c_bool, bool], + /, +) -> int: + """Token Id -> Piece. + Uses the vocabulary in the provided context. + Does not write null terminator to the buffer. + User code is responsible to remove the leading whitespace of the first non-BOS token when decoding multiple tokens. + + Args: + vocab: The vocabulary to use for tokenization. + token: The token to convert. + buf: The buffer to write the token to. + length: The length of the buffer. + lstrip: The number of leading spaces to skip. + special: If true, special tokens are rendered in the output.""" + ... + + +# # // check if token0 is contained as a prefix in token1 +# # LLAMA_API bool llama_token_is_prefix( +# # const struct llama_model * model, +# # llama_token token0, +# # llama_token token1); +# @ctypes_function( +# "llama_token_is_prefix", +# [llama_model_p_ctypes, llama_token, llama_token], +# ctypes.c_bool, +# ) +# def llama_token_is_prefix( +# model: llama_model_p, token0: Union[llama_token, int], token1: Union[llama_token, int], / +# ) -> bool: +# """Check if token0 is contained as a prefix in token1""" +# ... + + +# /// @details Convert the provided tokens into text (inverse of llama_tokenize()). +# /// @param text The char pointer must be large enough to hold the resulting text. +# /// @return Returns the number of chars/bytes on success, no more than text_len_max. +# /// @return Returns a negative number on failure - the number of chars/bytes that would have been returned. +# /// @param remove_special Allow to remove BOS and EOS tokens if model is configured to do so. +# /// @param unparse_special If true, special tokens are rendered in the output. +# LLAMA_API int32_t llama_detokenize( +# const struct llama_model * model, +# const llama_token * tokens, +# int32_t n_tokens, +# char * text, +# int32_t text_len_max, +# bool remove_special, +# bool unparse_special); +@ctypes_function( + "llama_detokenize", + [ + llama_model_p_ctypes, + ctypes.POINTER(llama_token), + ctypes.c_int32, + ctypes.c_char_p, + ctypes.c_int32, + ctypes.c_bool, + ctypes.c_bool, + ], + ctypes.c_int32, +) +def llama_detokenize( + model: llama_model_p, + tokens: CtypesArray[llama_token], + n_tokens: Union[ctypes.c_int, int], + text: bytes, + text_len_max: Union[ctypes.c_int, int], + remove_special: Union[ctypes.c_bool, bool], + unparse_special: Union[ctypes.c_bool, bool], + /, +) -> int: + """Convert the provided tokens into text (inverse of llama_tokenize()). + + Args: + model: The model to use for tokenization. + tokens: The tokens to convert. + n_tokens: The number of tokens. + text: The buffer to write the text to. + text_len_max: The length of the buffer. + remove_special: Allow to remove BOS and EOS tokens if model is configured to do so. + unparse_special: If true, special tokens are rendered in the output.""" + ... + + +# // +# // Chat templates +# // + + +# /// Apply chat template. Inspired by hf apply_chat_template() on python. +# /// Both "model" and "custom_template" are optional, but at least one is required. "custom_template" has higher precedence than "model" +# /// NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template +# /// @param tmpl A Jinja template to use for this chat. If this is nullptr, the model’s default chat template will be used instead. +# /// @param chat Pointer to a list of multiple llama_chat_message +# /// @param n_msg Number of llama_chat_message in this chat +# /// @param add_ass Whether to end the prompt with the token(s) that indicate the start of an assistant message. +# /// @param buf A buffer to hold the output formatted prompt. The recommended alloc size is 2 * (total number of characters of all messages) +# /// @param length The size of the allocated buffer +# /// @return The total number of bytes of the formatted prompt. If is it larger than the size of buffer, you may need to re-alloc it and then re-apply the template. +# LLAMA_API int32_t llama_chat_apply_template( +# const char * tmpl, +# const struct llama_chat_message * chat, +# size_t n_msg, +# bool add_ass, +# char * buf, +# int32_t length); +@ctypes_function( + "llama_chat_apply_template", + [ + ctypes.c_char_p, # tmpl + ctypes.POINTER(llama_chat_message), # chat + ctypes.c_size_t, # n_msg + ctypes.c_bool, # add_ass (added) + ctypes.c_char_p, # buf + ctypes.c_int32, # length + ], + ctypes.c_int32, +) +def llama_chat_apply_template( + tmpl: bytes, + chat: CtypesArray[llama_chat_message], + n_msg: int, + add_ass: bool, # Added parameter + buf: bytes, + length: int, + /, +) -> int: + """Apply chat template. + + Args: + tmpl: Template to use. If None, uses model's default + chat: Array of chat messages + n_msg: Number of messages + add_ass: Whether to end prompt with assistant token + buf: Output buffer + length: Buffer length + + Returns: + Number of bytes written, or needed if buffer too small + """ + ... + + +# // Get list of built-in chat templates +# LLAMA_API int32_t llama_chat_builtin_templates(const char ** output, size_t len); +@ctypes_function( + "llama_chat_builtin_templates", + [ + ctypes.POINTER(ctypes.c_char_p), + ctypes.c_size_t, + ], + ctypes.c_int32, +) +def llama_chat_builtin_templates( + output: CtypesArray[bytes], + len: Union[ctypes.c_size_t, int], + /, +) -> int: + """Get list of built-in chat templates. + + Args: + output: Output buffer to store template names. + len: Length of the output buffer. + + Returns: + Number of templates available. + Returns a negative number on error. + """ + ... + + +# // +# // Sampling API +# // +# // Sample usage: +# // +# // // prepare the sampling chain at the start +# // auto sparams = llama_sampler_chain_default_params(); +# // +# // llama_sampler * smpl = llama_sampler_chain_init(sparams); +# // +# // llama_sampler_chain_add(smpl, llama_sampler_init_top_k(50)); +# // llama_sampler_chain_add(smpl, llama_sampler_init_top_p(0.9, 1)); +# // llama_sampler_chain_add(smpl, llama_sampler_init_temp (0.8)); +# // +# // // typically, the chain should end with a sampler such as "greedy", "dist" or "mirostat" +# // // this sampler will be responsible to select the actual token +# // llama_sampler_chain_add(smpl, llama_sampler_init_dist(seed)); +# // +# // ... +# // +# // // decoding loop: +# // while (...) { +# // ... +# // +# // llama_decode(ctx, batch); +# // +# // // sample from the logits of the last token in the batch +# // const llama_token id = llama_sampler_sample(smpl, ctx, -1); +# // +# // // accepting the token updates the internal state of certain samplers (e.g. grammar, repetition, etc.) +# // llama_sampler_accept(smpl, id); +# // ... +# // } +# // +# // llama_sampler_free(smpl); +# // +# // TODO: In the future, llama_sampler will be utilized to offload the sampling to the backends (e.g. GPU). +# // + +# typedef void * llama_sampler_context_t; +llama_sampler_context_t = ctypes.c_void_p + + +# // user code can implement the interface below in order to create custom llama_sampler +# struct llama_sampler_i { +# const char * (*name) (const struct llama_sampler * smpl); // can be NULL +# void (*accept)( struct llama_sampler * smpl, llama_token token); // can be NULL +# void (*apply) ( struct llama_sampler * smpl, llama_token_data_array * cur_p); // required +# void (*reset) ( struct llama_sampler * smpl); // can be NULL +# struct llama_sampler * (*clone) (const struct llama_sampler * smpl); // can be NULL if ctx is NULL +# void (*free) ( struct llama_sampler * smpl); // can be NULL if ctx is NULL +# +# // TODO: API for internal libllama usage for appending the sampling to an existing ggml_cgraph +# //void (*apply_ggml) (struct llama_sampler * smpl, ...); +# }; +class llama_sampler_i(ctypes.Structure): + ... + + +# struct llama_sampler { +# const struct llama_sampler_i * iface; +# llama_sampler_context_t ctx; +# }; +class llama_sampler(ctypes.Structure): + _fields_ = [ + ("iface", ctypes.POINTER(llama_sampler_i)), + ("ctx", llama_sampler_context_t), + ] + + +if TYPE_CHECKING: + llama_sampler_p = CtypesPointer[llama_sampler] + +llama_sampler_p_ctypes = ctypes.POINTER(llama_sampler) + +llama_sampler_i_name = ctypes.CFUNCTYPE(ctypes.c_char_p, llama_sampler_p_ctypes) +llama_sampler_i_accept = ctypes.CFUNCTYPE(None, llama_sampler_p_ctypes, llama_token) +llama_sampler_i_apply = ctypes.CFUNCTYPE( + None, llama_sampler_p_ctypes, llama_token_data_array_p +) +llama_sampler_i_reset = ctypes.CFUNCTYPE(None, llama_sampler_p_ctypes) +llama_sampler_i_clone = ctypes.CFUNCTYPE(llama_sampler_p_ctypes, llama_sampler_p_ctypes) +llama_sampler_i_free = ctypes.CFUNCTYPE(None, llama_sampler_p_ctypes) + +llama_sampler_i._fields_ = [ + ("name", llama_sampler_i_name), + ("accept", llama_sampler_i_accept), + ("apply", llama_sampler_i_apply), + ("reset", llama_sampler_i_reset), + ("clone", llama_sampler_i_clone), + ("free", llama_sampler_i_free), +] + + +# // mirror of llama_sampler_i: +# LLAMA_API struct llama_sampler * llama_sampler_init (const struct llama_sampler_i * iface, llama_sampler_context_t ctx); +@ctypes_function( + "llama_sampler_init", + [ctypes.POINTER(llama_sampler_i), llama_sampler_context_t], + llama_sampler_p_ctypes, +) +def llama_sampler_init( + iface: ctypes.POINTER(llama_sampler_i), ctx: llama_sampler_context_t, / +) -> llama_sampler_p: + ... + + +# LLAMA_API const char * llama_sampler_name (const struct llama_sampler * smpl); +@ctypes_function( + "llama_sampler_name", + [llama_sampler_p_ctypes], + ctypes.c_char_p, +) +def llama_sampler_name(smpl: llama_sampler_p, /) -> bytes: + ... + + +# LLAMA_API void llama_sampler_accept( struct llama_sampler * smpl, llama_token token); +@ctypes_function( + "llama_sampler_accept", + [llama_sampler_p_ctypes, llama_token], + None, +) +def llama_sampler_accept(smpl: llama_sampler_p, token: Union[llama_token, int], /): + ... + + +# LLAMA_API void llama_sampler_apply ( struct llama_sampler * smpl, llama_token_data_array * cur_p); +@ctypes_function( + "llama_sampler_apply", + [llama_sampler_p_ctypes, llama_token_data_array_p], + None, +) +def llama_sampler_apply( + smpl: llama_sampler_p, cur_p: CtypesArray[llama_token_data_array], / +): + ... + + +# LLAMA_API void llama_sampler_reset ( struct llama_sampler * smpl); +@ctypes_function( + "llama_sampler_reset", + [llama_sampler_p_ctypes], + None, +) +def llama_sampler_reset(smpl: llama_sampler_p, /): + ... + + +# LLAMA_API struct llama_sampler * llama_sampler_clone (const struct llama_sampler * smpl); +@ctypes_function( + "llama_sampler_clone", + [llama_sampler_p_ctypes], + llama_sampler_p_ctypes, +) +def llama_sampler_clone(smpl: llama_sampler_p, /) -> llama_sampler_p: + ... + + +# // important: do not free if the sampler has been added to a llama_sampler_chain (via llama_sampler_chain_add) +# LLAMA_API void llama_sampler_free ( struct llama_sampler * smpl); +@ctypes_function( + "llama_sampler_free", + [llama_sampler_p_ctypes], + None, +) +def llama_sampler_free(smpl: llama_sampler_p, /): + ... + + +# // llama_sampler_chain +# // a type of llama_sampler that can chain multiple samplers one after another +# +# LLAMA_API struct llama_sampler * llama_sampler_chain_init(struct llama_sampler_chain_params params); +@ctypes_function( + "llama_sampler_chain_init", + [llama_sampler_chain_params], + llama_sampler_p_ctypes, +) +def llama_sampler_chain_init(params: llama_sampler_chain_params, /) -> llama_sampler_p: + ... + + +# // important: takes ownership of the sampler object and will free it when llama_sampler_free is called +# LLAMA_API void llama_sampler_chain_add( struct llama_sampler * chain, struct llama_sampler * smpl); +@ctypes_function( + "llama_sampler_chain_add", + [llama_sampler_p_ctypes, llama_sampler_p_ctypes], + None, +) +def llama_sampler_chain_add(chain: llama_sampler_p, smpl: llama_sampler_p, /): + ... + + +# LLAMA_API struct llama_sampler * llama_sampler_chain_get(const struct llama_sampler * chain, int32_t i); +@ctypes_function( + "llama_sampler_chain_get", + [llama_sampler_p_ctypes, ctypes.c_int32], + llama_sampler_p_ctypes, +) +def llama_sampler_chain_get( + chain: llama_sampler_p, i: Union[ctypes.c_int32, int], / +) -> llama_sampler_p: + ... + + +# LLAMA_API int llama_sampler_chain_n (const struct llama_sampler * chain); +@ctypes_function( + "llama_sampler_chain_n", + [llama_sampler_p_ctypes], + ctypes.c_int, +) +def llama_sampler_chain_n(chain: llama_sampler_p, /) -> int: + ... + + +# // after removing a sampler, the chain will no longer own it, and it will not be freed when the chain is freed +# LLAMA_API struct llama_sampler * llama_sampler_chain_remove( struct llama_sampler * chain, int32_t i); +@ctypes_function( + "llama_sampler_chain_remove", + [llama_sampler_p_ctypes, ctypes.c_int32], + llama_sampler_p_ctypes, +) +def llama_sampler_chain_remove( + chain: llama_sampler_p, i: Union[ctypes.c_int32, int], / +) -> llama_sampler_p: + ... + + +# // available samplers: +# +# LLAMA_API struct llama_sampler * llama_sampler_init_greedy(void); +@ctypes_function("llama_sampler_init_greedy", [], llama_sampler_p_ctypes) +def llama_sampler_init_greedy() -> llama_sampler_p: + ... + + +# LLAMA_API struct llama_sampler * llama_sampler_init_dist (uint32_t seed); +@ctypes_function("llama_sampler_init_dist", [ctypes.c_uint32], llama_sampler_p_ctypes) +def llama_sampler_init_dist(seed: int) -> llama_sampler_p: + ... + + +# /// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits. +# /// NOTE: Avoid using on the full vocabulary as the sorting can become slow. For example, apply top-k or top-p sampling first. +# DEPRECATED(LLAMA_API struct llama_sampler * llama_sampler_init_softmax (void), +# "will be removed in the future (see https://github.com/ggerganov/llama.cpp/pull/9896#discussion_r1800920915)"); +@ctypes_function("llama_sampler_init_softmax", [], llama_sampler_p_ctypes) +def llama_sampler_init_softmax() -> llama_sampler_p: + ... + + +# /// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 +# /// Setting k <= 0 makes this a noop +# LLAMA_API struct llama_sampler * llama_sampler_init_top_k (int32_t k); +@ctypes_function("llama_sampler_init_top_k", [ctypes.c_int32], llama_sampler_p_ctypes) +def llama_sampler_init_top_k(k: int) -> llama_sampler_p: + ... + + +# /// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 +# LLAMA_API struct llama_sampler * llama_sampler_init_top_p (float p, size_t min_keep); +@ctypes_function( + "llama_sampler_init_top_p", + [ctypes.c_float, ctypes.c_size_t], + llama_sampler_p_ctypes, +) +def llama_sampler_init_top_p(p: float, min_keep: int) -> llama_sampler_p: + ... + + +# /// @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841 +# LLAMA_API struct llama_sampler * llama_sampler_init_min_p (float p, size_t min_keep); +@ctypes_function( + "llama_sampler_init_min_p", + [ctypes.c_float, ctypes.c_size_t], + llama_sampler_p_ctypes, +) +def llama_sampler_init_min_p(p: float, min_keep: int) -> llama_sampler_p: + ... + + +# /// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666. +# LLAMA_API struct llama_sampler * llama_sampler_init_typical (float p, size_t min_keep); +@ctypes_function( + "llama_sampler_init_typical", + [ctypes.c_float, ctypes.c_size_t], + llama_sampler_p_ctypes, +) +def llama_sampler_init_typical(p: float, min_keep: int) -> llama_sampler_p: + ... + + +# LLAMA_API struct llama_sampler * llama_sampler_init_temp (float t); +@ctypes_function("llama_sampler_init_temp", [ctypes.c_float], llama_sampler_p_ctypes) +def llama_sampler_init_temp(t: float) -> llama_sampler_p: + ... + + +# /// @details Dynamic temperature implementation (a.k.a. entropy) described in the paper https://arxiv.org/abs/2309.02772. +# LLAMA_API struct llama_sampler * llama_sampler_init_temp_ext (float t, float delta, float exponent); +@ctypes_function( + "llama_sampler_init_temp_ext", + [ctypes.c_float, ctypes.c_float, ctypes.c_float], + llama_sampler_p_ctypes, +) +def llama_sampler_init_temp_ext( + t: float, delta: float, exponent: float +) -> llama_sampler_p: + ... + + +# /// @details XTC sampler as described in https://github.com/oobabooga/text-generation-webui/pull/6335 +# LLAMA_API struct llama_sampler * llama_sampler_init_xtc (float p, float t, size_t min_keep, uint32_t seed); +@ctypes_function( + "llama_sampler_init_xtc", + [ctypes.c_float, ctypes.c_float, ctypes.c_size_t, ctypes.c_uint32], + llama_sampler_p_ctypes, +) +def llama_sampler_init_xtc( + p: float, t: float, min_keep: int, seed: int, / +) -> llama_sampler_p: + ... + + +# /// @details Top n sigma sampling as described in academic paper "Top-nσ: Not All Logits Are You Need" https://arxiv.org/pdf/2411.07641 +# LLAMA_API struct llama_sampler * llama_sampler_init_top_n_sigma(float n); +@ctypes_function( + "llama_sampler_init_top_n_sigma", + [ctypes.c_float], + llama_sampler_p_ctypes, +) +def llama_sampler_init_top_n_sigma(n: float, /) -> llama_sampler_p: + ... + + +# /// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words. +# /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text. +# /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text. +# /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates. +# /// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm. +# /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal. +# LLAMA_API struct llama_sampler * llama_sampler_init_mirostat( +# int32_t n_vocab, +# uint32_t seed, +# float tau, +# float eta, +# int32_t m); +@ctypes_function( + "llama_sampler_init_mirostat", + [ctypes.c_int32, ctypes.c_uint32, ctypes.c_float, ctypes.c_float, ctypes.c_int32], + llama_sampler_p_ctypes, +) +def llama_sampler_init_mirostat( + n_vocab: int, seed: int, tau: float, eta: float, m: int, / +) -> llama_sampler_p: + ... + + +# /// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words. +# /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text. +# /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text. +# /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates. +# /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal. +# LLAMA_API struct llama_sampler * llama_sampler_init_mirostat_v2( +# uint32_t seed, +# float tau, +# float eta); +@ctypes_function( + "llama_sampler_init_mirostat_v2", + [ctypes.c_uint32, ctypes.c_float, ctypes.c_float], + llama_sampler_p_ctypes, +) +def llama_sampler_init_mirostat_v2( + seed: int, tau: float, eta: float, / +) -> llama_sampler_p: + ... + + +# /// @details Intializes a GBNF grammar, see grammars/README.md for details. +# /// @param vocab The vocabulary that this grammar will be used with. +# /// @param grammar_str The production rules for the grammar, encoded as a string. Returns an empty grammar if empty. Returns NULL if parsing of grammar_str fails. +# /// @param grammar_root The name of the start symbol for the grammar. +# LLAMA_API struct llama_sampler * llama_sampler_init_grammar( +# const struct llama_vocab * vocab, +# const char * grammar_str, +# const char * grammar_root); +@ctypes_function( + "llama_sampler_init_grammar", + [llama_vocab_p_ctypes, ctypes.c_char_p, ctypes.c_char_p], + llama_sampler_p_ctypes, +) +def llama_sampler_init_grammar( + vocab: llama_vocab_p, grammar_str: bytes, grammar_root: bytes, / +) -> llama_sampler_p: + ... + + +# /// @details Lazy grammar sampler, introduced in https://github.com/ggml-org/llama.cpp/pull/9639 +# /// @param trigger_patterns A list of patterns that will trigger the grammar sampler. Pattern will be matched from the start of the generation output, and grammar sampler will be fed content starting from its first match group. +# /// @param trigger_tokens A list of tokens that will trigger the grammar sampler. Grammar sampler will be fed content starting from the trigger token included. +# LLAMA_API struct llama_sampler * llama_sampler_init_grammar_lazy_patterns( +# const struct llama_vocab * vocab, +# const char * grammar_str, +# const char * grammar_root, +# const char ** trigger_patterns, +# size_t num_trigger_patterns, +# const llama_token * trigger_tokens, +# size_t num_trigger_tokens); +@ctypes_function( + "llama_sampler_init_grammar_lazy_patterns", + [ + llama_vocab_p_ctypes, + ctypes.c_char_p, + ctypes.c_char_p, + ctypes.POINTER(ctypes.c_char_p), + ctypes.c_size_t, + ctypes.POINTER(llama_token), + ctypes.c_size_t, + ], + llama_sampler_p_ctypes, +) +def llama_sampler_init_grammar_lazy_patterns( + vocab: llama_vocab_p, + grammar_str: bytes, + grammar_root: bytes, + trigger_patterns: CtypesArray[bytes], + num_trigger_patterns: int, + trigger_tokens: CtypesArray[llama_token], + num_trigger_tokens: int, + /, +) -> llama_sampler_p: + ... + + +# /// NOTE: Avoid using on the full vocabulary as searching for repeated tokens can become slow. For example, apply top-k or top-p sampling first. +# LLAMA_API struct llama_sampler * llama_sampler_init_penalties( +# int32_t penalty_last_n, // last n tokens to penalize (0 = disable penalty, -1 = context size) +# float penalty_repeat, // 1.0 = disabled +# float penalty_freq, // 0.0 = disabled +# float penalty_present); // 0.0 = disabled +@ctypes_function( + "llama_sampler_init_penalties", + [ctypes.c_int32, ctypes.c_float, ctypes.c_float, ctypes.c_float], + llama_sampler_p_ctypes, +) +def llama_sampler_init_penalties( + penalty_last_n: int, + penalty_repeat: float, + penalty_freq: float, + penalty_present: float, + /, +) -> llama_sampler_p: + ... + + +# /// @details DRY sampler, designed by p-e-w, as described in: https://github.com/oobabooga/text-generation-webui/pull/5677, porting Koboldcpp implementation authored by pi6am: https://github.com/LostRuins/koboldcpp/pull/982 +# LLAMA_API struct llama_sampler * llama_sampler_init_dry( +# const struct llama_vocab * vocab, +# int32_t n_ctx_train, +# float dry_multiplier, +# float dry_base, +# int32_t dry_allowed_length, +# int32_t dry_penalty_last_n, +# const char ** seq_breakers, +# size_t num_breakers); +@ctypes_function( + "llama_sampler_init_dry", + [ + llama_vocab_p_ctypes, + ctypes.c_int32, + ctypes.c_float, + ctypes.c_float, + ctypes.c_int32, + ctypes.c_int32, + ctypes.POINTER(ctypes.c_char_p), + ctypes.c_size_t, + ], + llama_sampler_p_ctypes, +) +def llama_sampler_init_dry( + vocab: llama_vocab_p, + n_ctx_train: int, + dry_multiplier: float, + dry_base: float, + dry_allowed_length: int, + dry_penalty_last_n: int, + seq_breakers, + num_breakers: int, + /, +) -> llama_sampler_p: + ... + + +# LLAMA_API struct llama_sampler * llama_sampler_init_logit_bias( +# int32_t n_vocab, +# int32_t n_logit_bias, +# const llama_logit_bias * logit_bias); +@ctypes_function( + "llama_sampler_init_logit_bias", + [ctypes.c_int32, ctypes.c_int32, llama_logit_bias_p], + llama_sampler_p_ctypes, +) +def llama_sampler_init_logit_bias( + n_vocab: int, n_logit_bias: int, logit_bias: CtypesArray[llama_logit_bias], / +) -> llama_sampler_p: + ... + + +# // this sampler is meant to be used for fill-in-the-middle infilling +# // it's supposed to be used after top_k + top_p sampling +# // +# // 1. if the sum of the EOG probs times the number of candidates is higher than the sum of the other probs -> pick EOG +# // 2. combine probs of tokens that have the same prefix +# // +# // example: +# // +# // - before: +# // "hel": 0.5 +# // "hell": 0.2 +# // "hello": 0.1 +# // "dummy": 0.1 +# // +# // - after: +# // "hel": 0.8 +# // "dummy": 0.1 +# // +# // 3. discard non-EOG tokens with low prob +# // 4. if no tokens are left -> pick EOT +# // +# LLAMA_API struct llama_sampler * llama_sampler_init_infill(const struct llama_vocab * vocab); +@ctypes_function( + "llama_sampler_init_infill", + [llama_vocab_p_ctypes], + llama_sampler_p_ctypes, +) +def llama_sampler_init_infill(vocab: llama_vocab_p, /) -> llama_sampler_p: + ... + + +# // Returns the seed used by the sampler if applicable, LLAMA_DEFAULT_SEED otherwise +# LLAMA_API uint32_t llama_sampler_get_seed(const struct llama_sampler * smpl); +@ctypes_function( + "llama_sampler_get_seed", + [llama_sampler_p_ctypes], + ctypes.c_uint32, +) +def llama_sampler_get_seed(smpl: llama_sampler_p, /) -> int: + ... + + +# /// @details Sample and accept a token from the idx-th output of the last evaluation +# // +# // Shorthand for: +# // const auto * logits = llama_get_logits_ith(ctx, idx); +# // llama_token_data_array cur_p = { ... init from logits ... }; +# // llama_sampler_apply(smpl, &cur_p); +# // auto token = cur_p.data[cur_p.selected].id; +# // llama_sampler_accept(smpl, token); +# // return token; +# // Returns the sampled token +# LLAMA_API llama_token llama_sampler_sample(struct llama_sampler * smpl, struct llama_context * ctx, int32_t idx); +@ctypes_function( + "llama_sampler_sample", + [llama_sampler_p_ctypes, llama_context_p_ctypes, ctypes.c_int32], + llama_token, +) +def llama_sampler_sample( + smpl: llama_sampler_p, ctx: llama_context_p, idx: int, / +) -> int: + ... + + +# // +# // Model split +# // + + +# /// @details Build a split GGUF final path for this chunk. +# /// llama_split_path(split_path, sizeof(split_path), "/models/ggml-model-q4_0", 2, 4) => split_path = "/models/ggml-model-q4_0-00002-of-00004.gguf" +# // Returns the split_path length. +# LLAMA_API int llama_split_path(char * split_path, size_t maxlen, const char * path_prefix, int split_no, int split_count); +@ctypes_function( + "llama_split_path", + [ctypes.c_char_p, ctypes.c_size_t, ctypes.c_char_p, ctypes.c_int, ctypes.c_int], + ctypes.c_int, +) +def llama_split_path( + split_path: bytes, + maxlen: Union[ctypes.c_size_t, int], + path_prefix: bytes, + split_no: Union[ctypes.c_int, int], + split_count: Union[ctypes.c_int, int], + /, +) -> int: + """Build a split GGUF final path for this chunk.""" + ... + + +# /// @details Extract the path prefix from the split_path if and only if the split_no and split_count match. +# /// llama_split_prefix(split_prefix, 64, "/models/ggml-model-q4_0-00002-of-00004.gguf", 2, 4) => split_prefix = "/models/ggml-model-q4_0" +# // Returns the split_prefix length. +# LLAMA_API int llama_split_prefix(char * split_prefix, size_t maxlen, const char * split_path, int split_no, int split_count); +@ctypes_function( + "llama_split_prefix", + [ctypes.c_char_p, ctypes.c_size_t, ctypes.c_char_p, ctypes.c_int, ctypes.c_int], + ctypes.c_int, +) +def llama_split_prefix( + split_prefix: bytes, + maxlen: Union[ctypes.c_size_t, int], + split_path: bytes, + split_no: Union[ctypes.c_int, int], + split_count: Union[ctypes.c_int, int], + /, +) -> int: + """Extract the path prefix from the split_path if and only if the split_no and split_count match.""" + ... + + +# // Print system information +# LLAMA_API const char * llama_print_system_info(void); +@ctypes_function("llama_print_system_info", [], ctypes.c_char_p) +def llama_print_system_info() -> bytes: + ... + + +# // Set callback for all future logging events. +# // If this is not called, or NULL is supplied, everything is output on stderr. +# LLAMA_API void llama_log_set(ggml_log_callback log_callback, void * user_data); +@ctypes_function( + "llama_log_set", + [ctypes.c_void_p, ctypes.c_void_p], + None, +) +def llama_log_set( + log_callback: Optional[CtypesFuncPointer], + user_data: ctypes.c_void_p, + /, +): + """Set callback for all future logging events. + + If this is not called, or NULL is supplied, everything is output on stderr.""" + ... + + +# // +# // Performance utils +# // +# // NOTE: Used by llama.cpp examples, avoid using in third-party apps. Instead, do your own performance measurements. +# // + + +# struct llama_perf_context_data { +# double t_start_ms; +# double t_load_ms; +# double t_p_eval_ms; +# double t_eval_ms; +# +# int32_t n_p_eval; +# int32_t n_eval; +# }; +class llama_perf_context_data(ctypes.Structure): + _fields_ = [ + ("t_start_ms", ctypes.c_double), + ("t_load_ms", ctypes.c_double), + ("t_p_eval_ms", ctypes.c_double), + ("t_eval_ms", ctypes.c_double), + ("n_p_eval", ctypes.c_int32), + ("n_eval", ctypes.c_int32), + ] + + +# struct llama_perf_sampler_data { +# double t_sample_ms; +# +# int32_t n_sample; +# }; +class llama_perf_sampler_data(ctypes.Structure): + _fields_ = [ + ("t_sample_ms", ctypes.c_double), + ("n_sample", ctypes.c_int32), + ] + + +# LLAMA_API struct llama_perf_context_data llama_perf_context (const struct llama_context * ctx); +@ctypes_function( + "llama_perf_context", + [llama_context_p_ctypes], + llama_perf_context_data, +) +def llama_perf_context(ctx: llama_context_p, /) -> llama_perf_context_data: + ... + + +# LLAMA_API void llama_perf_context_print(const struct llama_context * ctx); +@ctypes_function( + "llama_perf_context_print", + [llama_context_p_ctypes], + None, +) +def llama_perf_context_print(ctx: llama_context_p, /): + ... + + +# LLAMA_API void llama_perf_context_reset( struct llama_context * ctx); +@ctypes_function( + "llama_perf_context_reset", + [llama_context_p_ctypes], + None, +) +def llama_perf_context_reset(ctx: llama_context_p, /): + ... + + +# // NOTE: the following work only with samplers constructed via llama_sampler_chain_init +# LLAMA_API struct llama_perf_sampler_data llama_perf_sampler (const struct llama_sampler * chain); +@ctypes_function( + "llama_perf_sampler", + [llama_sampler_p_ctypes], + llama_perf_sampler_data, +) +def llama_perf_sampler(chain: llama_sampler_p, /) -> llama_perf_sampler_data: + ... + + +# LLAMA_API void llama_perf_sampler_print(const struct llama_sampler * chain); +@ctypes_function( + "llama_perf_sampler_print", + [llama_sampler_p_ctypes], + None, +) +def llama_perf_sampler_print(chain: llama_sampler_p, /): + ... + + +# LLAMA_API void llama_perf_sampler_reset( struct llama_sampler * chain); +@ctypes_function( + "llama_perf_sampler_reset", + [llama_sampler_p_ctypes], + None, +) +def llama_perf_sampler_reset(chain: llama_sampler_p, /): + ... -_lib.llama_print_system_info.argtypes = [] -_lib.llama_print_system_info.restype = c_char_p diff --git a/llama_cpp/llama_grammar.py b/llama_cpp/llama_grammar.py new file mode 100644 index 000000000..b95c77ab5 --- /dev/null +++ b/llama_cpp/llama_grammar.py @@ -0,0 +1,953 @@ +"""Python implementation of llama grammar parser directly translated from C++ source file in vendor/llama.cpp/common/grammar-parser.cpp.""" + +# flake8: noqa +from pathlib import Path + +from itertools import groupby +from typing import ( + Any, + Set, + List, + Optional, + Tuple, + Union, +) + +LLAMA_GRAMMAR_DEFAULT_ROOT = "root" + + +class LlamaGrammar: + def __init__(self, *args, _grammar: str, **kwargs): + self._grammar = _grammar + self._root = LLAMA_GRAMMAR_DEFAULT_ROOT + + @classmethod + def from_string(cls, grammar: str, verbose: bool = True) -> "LlamaGrammar": + return cls(_grammar=grammar) + + @classmethod + def from_file(cls, file: Union[str, Path], verbose: bool = True) -> "LlamaGrammar": + try: + with open(file) as f: + grammar = f.read() + except Exception as err: + raise Exception( + f"{cls.from_file.__name__}: error reading grammar file: {err}" + ) + + if grammar: + return cls.from_string(grammar, verbose=verbose) + + raise ValueError( + f"{cls.from_file.__name__}: error parsing grammar file: params_grammer is empty" + ) + + @classmethod + def from_json_schema(cls, json_schema: str, verbose: bool = True) -> "LlamaGrammar": + return cls.from_string(json_schema_to_gbnf(json_schema), verbose=verbose) + + +"""llama.cpp gbnf rules from vendor/llama.cpp/grammars""" + +ARITHMETIC_GBNF = r""" +root ::= (expr "=" ws term "\n")+ +expr ::= term ([-+*/] term)* +term ::= ident | num | "(" ws expr ")" ws +ident ::= [a-z] [a-z0-9_]* ws +num ::= [0-9]+ ws +ws ::= [ \t\n]* +""" + +C_GBNF = r""" +root ::= (declaration)* + +declaration ::= dataType identifier "(" parameter? ")" "{" statement* "}" + +dataType ::= "int" ws | "float" ws | "char" ws +identifier ::= [a-zA-Z_] [a-zA-Z_0-9]* + +parameter ::= dataType identifier + +statement ::= + ( dataType identifier ws "=" ws expression ";" ) | + ( identifier ws "=" ws expression ";" ) | + ( identifier ws "(" argList? ")" ";" ) | + ( "return" ws expression ";" ) | + ( "while" "(" condition ")" "{" statement* "}" ) | + ( "for" "(" forInit ";" ws condition ";" ws forUpdate ")" "{" statement* "}" ) | + ( "if" "(" condition ")" "{" statement* "}" ("else" "{" statement* "}")? ) | + ( singleLineComment ) | + ( multiLineComment ) + +forInit ::= dataType identifier ws "=" ws expression | identifier ws "=" ws expression +forUpdate ::= identifier ws "=" ws expression + +condition ::= expression relationOperator expression +relationOperator ::= ("<=" | "<" | "==" | "!=" | ">=" | ">") + +expression ::= term (("+" | "-") term)* +term ::= factor(("*" | "/") factor)* + +factor ::= identifier | number | unaryTerm | funcCall | parenExpression +unaryTerm ::= "-" factor +funcCall ::= identifier "(" argList? ")" +parenExpression ::= "(" ws expression ws ")" + +argList ::= expression ("," ws expression)* + +number ::= [0-9]+ + +singleLineComment ::= "//" [^\n]* "\n" +multiLineComment ::= "/*" ( [^*] | ("*" [^/]) )* "*/" + +ws ::= ([ \t\n]+) +""" + +CHESS_GBNF = r""" +root ::= object +value ::= object | array | string | number | ("true" | "false" | "null") ws + +object ::= + "{" ws ( + string ":" ws value + ("," ws string ":" ws value)* + )? "}" ws + +array ::= + "[" ws ( + value + ("," ws value)* + )? "]" ws + +string ::= + "\"" ( + [^"\\] | + "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes + )* "\"" ws + +number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws + +# Optional space: by convention, applied in this grammar after literal chars when allowed +ws ::= ([ \t\n] ws)? +""" + +JAPANESE_GBNF = r""" +root ::= object +value ::= object | array | string | number | ("true" | "false" | "null") ws + +object ::= + "{" ws ( + string ":" ws value + ("," ws string ":" ws value)* + )? "}" ws + +array ::= + "[" ws ( + value + ("," ws value)* + )? "]" ws + +string ::= + "\"" ( + [^"\\] | + "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes + )* "\"" ws + +number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws + +# Optional space: by convention, applied in this grammar after literal chars when allowed +ws ::= ([ \t\n] ws)? +""" + +JSON_ARR_GBNF = r""" +# This is the same as json.gbnf but we restrict whitespaces at the end of the root array +# Useful for generating JSON arrays + +root ::= arr +value ::= object | array | string | number | ("true" | "false" | "null") ws + +arr ::= + "[\n" ws ( + value + (",\n" ws value)* + )? "]" + +object ::= + "{" ws ( + string ":" ws value + ("," ws string ":" ws value)* + )? "}" ws + +array ::= + "[" ws ( + value + ("," ws value)* + )? "]" ws + +string ::= + "\"" ( + [^"\\\x7F\x00-\x1F] | + "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes + )* "\"" ws + +number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws + +# Optional space: by convention, applied in this grammar after literal chars when allowed +ws ::= ([ \t\n] ws)? +""" + + +JSON_GBNF = r""" +root ::= object +value ::= object | array | string | number | ("true" | "false" | "null") ws + +object ::= + "{" ws ( + string ":" ws value + ("," ws string ":" ws value)* + )? "}" ws + +array ::= + "[" ws ( + value + ("," ws value)* + )? "]" ws + +string ::= + "\"" ( + [^"\\\x7F\x00-\x1F] | + "\\" (["\\bfnrt] | "u" [0-9a-fA-F]{4}) # escapes + )* "\"" ws + +number ::= ("-"? ([0-9] | [1-9] [0-9]{0,15})) ("." [0-9]+)? ([eE] [-+]? [0-9] [1-9]{0,15})? ws + +# Optional space: by convention, applied in this grammar after literal chars when allowed +ws ::= | " " | "\n" [ \t]{0,20} +""" + +LIST_GBNF = r""" +root ::= item+ + +# Excludes various line break characters +item ::= "- " [^\r\n\x0b\x0c\x85\u2028\u2029]+ "\n" +""" + +"""llama.cpp json-schema to grammar converter from vendor/llama.cpp/examples/json-schema-to-grammar.py""" +import json +import re +from typing import List, Optional + +# whitespace is constrained to a single space char to prevent model "running away" in +# whitespace. Also maybe improves generation quality? +SPACE_RULE = '" "?' + + +INVALID_RULE_CHARS_RE = re.compile(r"[^a-zA-Z0-9-]+") +GRAMMAR_LITERAL_ESCAPE_RE = re.compile(r'[\r\n"]') +GRAMMAR_LITERAL_ESCAPES = {"\r": "\\r", "\n": "\\n", '"': '\\"'} + +# whitespace is constrained to a single space char to prevent model "running away" in +# whitespace. Also maybe improves generation quality? +SPACE_RULE = '" "?' + + +def _build_repetition( + item_rule, min_items, max_items, separator_rule=None, item_rule_is_literal=False +): + if not separator_rule: + if min_items == 0 and max_items == 1: + return f"{item_rule}?" + elif min_items == 1 and max_items is None: + return f"{item_rule}+" + + result = "" + + if min_items > 0: + if item_rule_is_literal and separator_rule is None: + result = '"' + (item_rule[1:-1] * min_items) + '"' + else: + result = (f" {separator_rule} " if separator_rule else " ").join( + [item_rule] * min_items + ) + + def opt_repetitions(up_to_n, prefix_with_sep=False): + """ + - n=4, no sep: '(a (a (a (a)?)?)?)?' + - n=4, sep=',', prefix: '("," a ("," a ("," a ("," a)?)?)?)?' + - n=4, sep=',', no prefix: '(a ("," a ("," a ("," a)?)?)?)?' + """ + + content = ( + f"{separator_rule} {item_rule}" + if prefix_with_sep and separator_rule + else item_rule + ) + if up_to_n == 0: + return "" + elif up_to_n == 1: + return f"({content})?" + elif separator_rule and not prefix_with_sep: + return f"({content} {opt_repetitions(up_to_n - 1, prefix_with_sep=True)})?" + else: + return (f"({content} " * up_to_n).rstrip() + (")?" * up_to_n) + + if min_items > 0 and max_items != min_items: + result += " " + + if max_items is not None: + result += opt_repetitions(max_items - min_items, prefix_with_sep=min_items > 0) + else: + item_operator = f'({separator_rule + " " if separator_rule else ""}{item_rule})' + + if min_items == 0 and separator_rule: + result = f"({item_rule} {item_operator}*)?" + else: + result += f"{item_operator}*" + + return result + + +class BuiltinRule: + def __init__(self, content: str, deps: list = None): + self.content = content + self.deps = deps or [] + + +_up_to_15_digits = _build_repetition("[0-9]", 0, 15) + +PRIMITIVE_RULES = { + "boolean": BuiltinRule('("true" | "false") space', []), + "decimal-part": BuiltinRule("[0-9] " + _up_to_15_digits, []), + "integral-part": BuiltinRule("[0-9] | [1-9] " + _up_to_15_digits, []), + "number": BuiltinRule( + '("-"? integral-part) ("." decimal-part)? ([eE] [-+]? integral-part)? space', + ["integral-part", "decimal-part"], + ), + "integer": BuiltinRule('("-"? integral-part) space', ["integral-part"]), + "value": BuiltinRule( + "object | array | string | number | boolean | null", + ["object", "array", "string", "number", "boolean", "null"], + ), + "object": BuiltinRule( + '"{" space ( string ":" space value ("," space string ":" space value)* )? "}" space', + ["string", "value"], + ), + "array": BuiltinRule( + '"[" space ( value ("," space value)* )? "]" space', ["value"] + ), + "uuid": BuiltinRule( + r'"\"" ' + + ' "-" '.join("[0-9a-fA-F]" * n for n in [8, 4, 4, 4, 12]) + + r' "\"" space', + [], + ), + "char": BuiltinRule( + r'[^"\\] | "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F])', + [], + ), + "string": BuiltinRule(r'"\"" char* "\"" space', ["char"]), + "null": BuiltinRule('"null" space', []), +} + +# TODO: support "uri", "email" string formats +STRING_FORMAT_RULES = { + "date": BuiltinRule( + '[0-9] [0-9] [0-9] [0-9] "-" ( "0" [1-9] | "1" [0-2] ) "-" ( "0" [1-9] | [1-2] [0-9] | "3" [0-1] )', + [], + ), + "time": BuiltinRule( + '([01] [0-9] | "2" [0-3]) ":" [0-5] [0-9] ":" [0-5] [0-9] ( "." [0-9] [0-9] [0-9] )? ( "Z" | ( "+" | "-" ) ( [01] [0-9] | "2" [0-3] ) ":" [0-5] [0-9] )', + [], + ), + "date-time": BuiltinRule('date "T" time', ["date", "time"]), + "date-string": BuiltinRule('"\\"" date "\\"" space', ["date"]), + "time-string": BuiltinRule('"\\"" time "\\"" space', ["time"]), + "date-time-string": BuiltinRule('"\\"" date-time "\\"" space', ["date-time"]), +} + +DOTALL = "[\\U00000000-\\U0010FFFF]" +DOT = "[^\\x0A\\x0D]" + +RESERVED_NAMES = set( + ["root", "dot", *PRIMITIVE_RULES.keys(), *STRING_FORMAT_RULES.keys()] +) + + +NON_LITERAL_SET = set("|.()[]{}*+?") +ESCAPED_IN_REGEXPS_BUT_NOT_IN_LITERALS = set("[]()|{}*+?") + + +class SchemaConverter: + def __init__(self, *, prop_order, allow_fetch, dotall, raw_pattern): + self._prop_order = prop_order + self._allow_fetch = allow_fetch + self._dotall = dotall + self._raw_pattern = raw_pattern + self._rules = { + "space": SPACE_RULE, + } + self._refs = {} + self._refs_being_resolved = set() + + def _format_literal(self, literal): + escaped = GRAMMAR_LITERAL_ESCAPE_RE.sub( + lambda m: GRAMMAR_LITERAL_ESCAPES.get(m.group(0)), literal + ) + return f'"{escaped}"' + + def not_literal( + self, literal: str, dotall: bool = True, maybe_escaped_underscores=False + ) -> str: + """ + not_literal('a') -> '[^a]' + not_literal('abc') -> '([^a] | "a" ([^b] | "b" ([^c])?)?)?' + """ + assert len(literal) > 0, "Empty literal not supported" + + def recurse(i: int): + c = literal[i] + if maybe_escaped_underscores and c == "_": + yield f"[^{c}\\\\]" + yield " | " + yield f'"\\\\"? "{c}"' + else: + yield f"[^{c}]" + if i < len(literal) - 1: + yield " | " + yield self._format_literal(c) + yield " (" + yield from recurse(i + 1) + yield ")?" + + return "".join(("(", *recurse(0), ")")) + + def _add_rule(self, name, rule): + esc_name = INVALID_RULE_CHARS_RE.sub("-", name) + if esc_name not in self._rules or self._rules[esc_name] == rule: + key = esc_name + else: + i = 0 + while ( + f"{esc_name}{i}" in self._rules + and self._rules[f"{esc_name}{i}"] != rule + ): + i += 1 + key = f"{esc_name}{i}" + self._rules[key] = rule + return key + + def resolve_refs(self, schema: dict, url: str): + """ + Resolves all $ref fields in the given schema, fetching any remote schemas, + replacing $ref with absolute reference URL and populating self._refs with the + respective referenced (sub)schema dictionaries. + """ + + def visit(n: dict): + if isinstance(n, list): + return [visit(x) for x in n] + elif isinstance(n, dict): + ref = n.get("$ref") + if ref is not None and ref not in self._refs: + if ref.startswith("https://"): + assert ( + self._allow_fetch + ), "Fetching remote schemas is not allowed (use --allow-fetch for force)" + import requests + + frag_split = ref.split("#") + base_url = frag_split[0] + + target = self._refs.get(base_url) + if target is None: + target = self.resolve_refs( + requests.get(ref).json(), base_url + ) + self._refs[base_url] = target + + if len(frag_split) == 1 or frag_split[-1] == "": + return target + elif ref.startswith("#/"): + target = schema + ref = f"{url}{ref}" + n["$ref"] = ref + else: + raise ValueError(f"Unsupported ref {ref}") + + for sel in ref.split("#")[-1].split("/")[1:]: + assert ( + target is not None and sel in target + ), f"Error resolving ref {ref}: {sel} not in {target}" + target = target[sel] + + self._refs[ref] = target + else: + for v in n.values(): + visit(v) + + return n + + return visit(schema) + + def _generate_union_rule(self, name, alt_schemas): + return " | ".join( + ( + self.visit(alt_schema, f'{name}{"-" if name else "alternative-"}{i}') + for i, alt_schema in enumerate(alt_schemas) + ) + ) + + def _visit_pattern(self, pattern, name): + """ + Transforms a regular expression pattern into a GBNF rule. + + Input: https://json-schema.org/understanding-json-schema/reference/regular_expressions + Output: https://github.com/ggerganov/llama.cpp/blob/master/grammars/README.md + + Unsupported features: negative/positive lookaheads, greedy/non-greedy modifiers. + + Mostly a 1:1 translation, except for {x} / {x,} / {x,y} quantifiers for which + we define sub-rules to keep the output lean. + """ + + assert pattern.startswith("^") and pattern.endswith( + "$" + ), 'Pattern must start with "^" and end with "$"' + pattern = pattern[1:-1] + sub_rule_ids = {} + + i = 0 + length = len(pattern) + + def to_rule(s: Tuple[str, bool]) -> str: + (txt, is_literal) = s + return '"' + txt + '"' if is_literal else txt + + def transform() -> Tuple[str, bool]: + """ + Parse a unit at index i (advancing it), and return its string representation + whether it's a literal. + """ + nonlocal i + nonlocal pattern + nonlocal sub_rule_ids + + start = i + # For each component of this sequence, store its string representation and whether it's a literal. + # We only need a flat structure here to apply repetition operators to the last item, and + # to merge literals at the and (we're parsing grouped ( sequences ) recursively and don't treat '|' specially + # (GBNF's syntax is luckily very close to regular expressions!) + seq: list[Tuple[str, bool]] = [] + + def get_dot(): + if self._dotall: + rule = DOTALL + else: + # Accept any character... except \n and \r line break chars (\x0A and \xOD) + rule = DOT + return self._add_rule(f"dot", rule) + + def join_seq(): + nonlocal seq + ret = [] + for is_literal, g in groupby(seq, lambda x: x[1]): + if is_literal: + ret.append(("".join(x[0] for x in g), True)) + else: + ret.extend(g) + if len(ret) == 1: + return ret[0] + return (" ".join(to_rule(x) for x in seq), False) + + while i < length: + c = pattern[i] + if c == ".": + seq.append((get_dot(), False)) + i += 1 + elif c == "(": + i += 1 + if i < length: + assert ( + pattern[i] != "?" + ), f'Unsupported pattern syntax "{pattern[i]}" at index {i} of /{pattern}/' + seq.append((f"({to_rule(transform())})", False)) + elif c == ")": + i += 1 + assert ( + start > 0 and pattern[start - 1] == "(" + ), f"Unbalanced parentheses; start = {start}, i = {i}, pattern = {pattern}" + return join_seq() + elif c == "[": + square_brackets = c + i += 1 + while i < length and pattern[i] != "]": + if pattern[i] == "\\": + square_brackets += pattern[i : i + 2] + i += 2 + else: + square_brackets += pattern[i] + i += 1 + assert ( + i < length + ), f"Unbalanced square brackets; start = {start}, i = {i}, pattern = {pattern}" + square_brackets += "]" + i += 1 + seq.append((square_brackets, False)) + elif c == "|": + seq.append(("|", False)) + i += 1 + elif c in ("*", "+", "?"): + seq[-1] = (to_rule(seq[-1]) + c, False) + i += 1 + elif c == "{": + curly_brackets = c + i += 1 + while i < length and pattern[i] != "}": + curly_brackets += pattern[i] + i += 1 + assert ( + i < length + ), f"Unbalanced curly brackets; start = {start}, i = {i}, pattern = {pattern}" + curly_brackets += "}" + i += 1 + nums = [s.strip() for s in curly_brackets[1:-1].split(",")] + min_times = 0 + max_times = None + try: + if len(nums) == 1: + min_times = int(nums[0]) + max_times = min_times + else: + assert len(nums) == 2 + min_times = int(nums[0]) if nums[0] else 0 + max_times = int(nums[1]) if nums[1] else None + except ValueError: + raise ValueError( + f"Invalid quantifier {curly_brackets} in /{pattern}/" + ) + + (sub, sub_is_literal) = seq[-1] + + if not sub_is_literal: + id = sub_rule_ids.get(sub) + if id is None: + id = self._add_rule(f"{name}-{len(sub_rule_ids) + 1}", sub) + sub_rule_ids[sub] = id + sub = id + + seq[-1] = ( + _build_repetition( + f'"{sub}"' if sub_is_literal else sub, + min_times, + max_times, + item_rule_is_literal=sub_is_literal, + ), + False, + ) + else: + literal = "" + while i < length: + if pattern[i] == "\\" and i < length - 1: + next = pattern[i + 1] + if next in ESCAPED_IN_REGEXPS_BUT_NOT_IN_LITERALS: + i += 1 + literal += pattern[i] + i += 1 + else: + literal += pattern[i : i + 2] + i += 2 + elif pattern[i] == '"' and not self._raw_pattern: + literal += '\\"' + i += 1 + elif pattern[i] not in NON_LITERAL_SET and ( + i == length - 1 + or literal == "" + or pattern[i + 1] == "." + or pattern[i + 1] not in NON_LITERAL_SET + ): + literal += pattern[i] + i += 1 + else: + break + if literal: + seq.append((literal, True)) + + return join_seq() + + return self._add_rule( + name, + ( + to_rule(transform()) + if self._raw_pattern + else '"\\"" ' + to_rule(transform()) + ' "\\"" space' + ), + ) + + def _resolve_ref(self, ref): + ref_name = ref.split("/")[-1] + if ref_name not in self._rules and ref not in self._refs_being_resolved: + self._refs_being_resolved.add(ref) + resolved = self._refs[ref] + ref_name = self.visit(resolved, ref_name) + self._refs_being_resolved.remove(ref) + return ref_name + + def _generate_constant_rule(self, value): + return self._format_literal(json.dumps(value)) + + def visit(self, schema, name): + schema_type = schema.get("type") + schema_format = schema.get("format") + rule_name = name + "-" if name in RESERVED_NAMES else name or "root" + + if (ref := schema.get("$ref")) is not None: + return self._add_rule(rule_name, self._resolve_ref(ref)) + + elif "oneOf" in schema or "anyOf" in schema: + return self._add_rule( + rule_name, + self._generate_union_rule(name, schema.get("oneOf") or schema["anyOf"]), + ) + + elif isinstance(schema_type, list): + return self._add_rule( + rule_name, + self._generate_union_rule(name, [{"type": t} for t in schema_type]), + ) + + elif "const" in schema: + return self._add_rule( + rule_name, self._generate_constant_rule(schema["const"]) + ) + + elif "enum" in schema: + rule = " | ".join((self._generate_constant_rule(v) for v in schema["enum"])) + return self._add_rule(rule_name, rule) + + elif schema_type in (None, "object") and ( + "properties" in schema + or ( + "additionalProperties" in schema + and schema["additionalProperties"] is not True + ) + ): + required = set(schema.get("required", [])) + properties = list(schema.get("properties", {}).items()) + return self._add_rule( + rule_name, + self._build_object_rule( + properties, required, name, schema.get("additionalProperties") + ), + ) + + elif schema_type in (None, "object") and "allOf" in schema: + required = set() + properties = [] + hybrid_name = name + + def add_component(comp_schema, is_required): + if (ref := comp_schema.get("$ref")) is not None: + comp_schema = self._refs[ref] + + if "properties" in comp_schema: + for prop_name, prop_schema in comp_schema["properties"].items(): + properties.append((prop_name, prop_schema)) + if is_required: + required.add(prop_name) + + for t in schema["allOf"]: + if "anyOf" in t: + for tt in t["anyOf"]: + add_component(tt, is_required=False) + else: + add_component(t, is_required=True) + + return self._add_rule( + rule_name, + self._build_object_rule( + properties, required, hybrid_name, additional_properties=[] + ), + ) + + elif schema_type in (None, "array") and ( + "items" in schema or "prefixItems" in schema + ): + items = schema.get("items") or schema["prefixItems"] + if isinstance(items, list): + return self._add_rule( + rule_name, + '"[" space ' + + ' "," space '.join( + self.visit(item, f'{name}{"-" if name else ""}tuple-{i}') + for i, item in enumerate(items) + ) + + ' "]" space', + ) + else: + item_rule_name = self.visit(items, f'{name}{"-" if name else ""}item') + min_items = schema.get("minItems", 0) + max_items = schema.get("maxItems") + return self._add_rule( + rule_name, + '"[" space ' + + _build_repetition( + item_rule_name, min_items, max_items, separator_rule='"," space' + ) + + ' "]" space', + ) + + elif schema_type in (None, "string") and "pattern" in schema: + return self._visit_pattern(schema["pattern"], rule_name) + + elif schema_type in (None, "string") and re.match( + r"^uuid[1-5]?$", schema_format or "" + ): + return self._add_primitive( + "root" if rule_name == "root" else schema_format, + PRIMITIVE_RULES["uuid"], + ) + + elif ( + schema_type in (None, "string") + and f"{schema_format}-string" in STRING_FORMAT_RULES + ): + prim_name = f"{schema_format}-string" + return self._add_rule( + rule_name, + self._add_primitive(prim_name, STRING_FORMAT_RULES[prim_name]), + ) + + elif schema_type == "string" and ( + "minLength" in schema or "maxLength" in schema + ): + char_rule = self._add_primitive("char", PRIMITIVE_RULES["char"]) + min_len = schema.get("minLength", 0) + max_len = schema.get("maxLength") + + return self._add_rule( + rule_name, + r'"\"" ' + + _build_repetition(char_rule, min_len, max_len) + + r' "\"" space', + ) + + elif (schema_type == "object") or (len(schema) == 0): + return self._add_rule( + rule_name, self._add_primitive("object", PRIMITIVE_RULES["object"]) + ) + + else: + assert schema_type in PRIMITIVE_RULES, f"Unrecognized schema: {schema}" + # TODO: support minimum, maximum, exclusiveMinimum, exclusiveMaximum at least for zero + return self._add_primitive( + "root" if rule_name == "root" else schema_type, + PRIMITIVE_RULES[schema_type], + ) + + def _add_primitive(self, name: str, rule: BuiltinRule): + n = self._add_rule(name, rule.content) + + for dep in rule.deps: + dep_rule = PRIMITIVE_RULES.get(dep) or STRING_FORMAT_RULES.get(dep) + assert dep_rule, f"Rule {dep} not known" + if dep not in self._rules: + self._add_primitive(dep, dep_rule) + return n + + def _build_object_rule( + self, + properties: List[Tuple[str, Any]], + required: Set[str], + name: str, + additional_properties: Union[bool, Any], + ): + prop_order = self._prop_order + # sort by position in prop_order (if specified) then by original order + sorted_props = [ + kv[0] + for _, kv in sorted( + enumerate(properties), + key=lambda ikv: (prop_order.get(ikv[1][0], len(prop_order)), ikv[0]), + ) + ] + + prop_kv_rule_names = {} + for prop_name, prop_schema in properties: + prop_rule_name = self.visit( + prop_schema, f'{name}{"-" if name else ""}{prop_name}' + ) + prop_kv_rule_names[prop_name] = self._add_rule( + f'{name}{"-" if name else ""}{prop_name}-kv', + rf'{self._format_literal(json.dumps(prop_name))} space ":" space {prop_rule_name}', + ) + required_props = [k for k in sorted_props if k in required] + optional_props = [k for k in sorted_props if k not in required] + + if additional_properties == True or isinstance(additional_properties, dict): + sub_name = f'{name}{"-" if name else ""}additional' + value_rule = self.visit( + {} if additional_properties == True else additional_properties, + f"{sub_name}-value", + ) + prop_kv_rule_names["*"] = self._add_rule( + f"{sub_name}-kv", + self._add_primitive("string", PRIMITIVE_RULES["string"]) + + f' ":" space {value_rule}', + ) + optional_props.append("*") + + rule = '"{" space ' + rule += ' "," space '.join(prop_kv_rule_names[k] for k in required_props) + + if optional_props: + rule += " (" + if required_props: + rule += ' "," space ( ' + + def get_recursive_refs(ks, first_is_optional): + [k, *rest] = ks + kv_rule_name = prop_kv_rule_names[k] + if k == "*": + res = self._add_rule( + f'{name}{"-" if name else ""}additional-kvs', + f'{kv_rule_name} ( "," space ' + kv_rule_name + " )*", + ) + elif first_is_optional: + res = f'( "," space {kv_rule_name} )?' + else: + res = kv_rule_name + if len(rest) > 0: + res += " " + self._add_rule( + f'{name}{"-" if name else ""}{k}-rest', + get_recursive_refs(rest, first_is_optional=True), + ) + return res + + rule += " | ".join( + get_recursive_refs(optional_props[i:], first_is_optional=False) + for i in range(len(optional_props)) + ) + if required_props: + rule += " )" + rule += " )?" + + rule += ' "}" space' + + return rule + + def format_grammar(self): + return "\n".join( + f"{name} ::= {rule}" + for name, rule in sorted(self._rules.items(), key=lambda kv: kv[0]) + ) + + +def json_schema_to_gbnf(schema: str, prop_order: Optional[List[str]] = None): + prop_order = prop_order or [] + schema = json.loads(schema) + prop_order = {name: idx for idx, name in enumerate(prop_order)} + converter = SchemaConverter( + prop_order=prop_order, allow_fetch=False, dotall=False, raw_pattern=False + ) + schema = converter.resolve_refs(schema, "stdin") + converter.visit(schema, "") + return converter.format_grammar() diff --git a/llama_cpp/llama_speculative.py b/llama_cpp/llama_speculative.py new file mode 100644 index 000000000..39dfb903b --- /dev/null +++ b/llama_cpp/llama_speculative.py @@ -0,0 +1,64 @@ +import abc + +from typing import Any + +import numpy as np +import numpy.typing as npt + + +class LlamaDraftModel(abc.ABC): + @abc.abstractmethod + def __call__( + self, input_ids: npt.NDArray[np.intc], /, **kwargs: Any + ) -> npt.NDArray[np.intc]: + raise NotImplementedError() + + +class LlamaPromptLookupDecoding(LlamaDraftModel): + """Based on https://github.com/apoorvumang/prompt-lookup-decoding""" + + def __init__(self, max_ngram_size: int = 2, num_pred_tokens: int = 10): + self.max_ngram_size = max_ngram_size + self.num_pred_tokens = num_pred_tokens + + @staticmethod + def find_candidate_pred_tokens( + input_ids: npt.NDArray[np.intc], + max_ngram_size: int, + num_pred_tokens: int, + ): + input_length = input_ids.shape[0] + + for ngram_size in range(min(max_ngram_size, input_length - 1), 0, -1): + # Create sliding windows of size ngram_size + windows = np.lib.stride_tricks.sliding_window_view(input_ids, (ngram_size,)) + + # Convert ngram to an array for comparison + ngram_array = input_ids[-ngram_size:] + + # Find where the windows match the ngram + matches = np.all(windows == ngram_array, axis=1) + + # Get the indices of matches + match_indices = np.nonzero(matches)[0] + + # Iterate through match indices to find a valid continuation + for idx in match_indices: + start_idx = idx + ngram_size + end_idx = start_idx + num_pred_tokens + end_idx = min(end_idx, input_length) + + if start_idx < end_idx: + return input_ids[start_idx:end_idx] + + # If no match is found, return an empty array + return np.array([], dtype=np.intc) + + def __call__( + self, input_ids: npt.NDArray[np.intc], /, **kwargs: Any + ) -> npt.NDArray[np.intc]: + return self.find_candidate_pred_tokens( + input_ids=input_ids, + max_ngram_size=self.max_ngram_size, + num_pred_tokens=self.num_pred_tokens, + ) diff --git a/llama_cpp/llama_tokenizer.py b/llama_cpp/llama_tokenizer.py new file mode 100644 index 000000000..1375e1392 --- /dev/null +++ b/llama_cpp/llama_tokenizer.py @@ -0,0 +1,120 @@ +from __future__ import annotations + +import abc +from typing import ( + List, + Optional, + Any, +) + +import llama_cpp +from llama_cpp.llama_types import List + + +class BaseLlamaTokenizer(abc.ABC): + @abc.abstractmethod + def tokenize( + self, text: bytes, add_bos: bool = True, special: bool = True + ) -> List[int]: + """Tokenize the text into tokens. + + Args: + text: The utf-8 encoded string to tokenize. + add_bos: Whether to add a beginning of sequence token. + special: Whether to tokenize special tokens. + """ + raise NotImplementedError + + @abc.abstractmethod + def detokenize( + self, + tokens: List[int], + prev_tokens: Optional[List[int]] = None, + special: bool = False, + ) -> bytes: + """Detokenize the tokens into text. + + Args: + tokens: The list of tokens to detokenize. + prev_tokens: The list of previous tokens. Offset mapping will be performed if provided. + special: Whether to detokenize special tokens. + """ + raise NotImplementedError + + +class LlamaTokenizer(BaseLlamaTokenizer): + def __init__(self, llama: llama_cpp.Llama): + self._model = llama._model # type: ignore + + def tokenize( + self, text: bytes, add_bos: bool = True, special: bool = True + ) -> List[int]: + return self._model.tokenize(text, add_bos=add_bos, special=special) + + def detokenize( + self, + tokens: List[int], + prev_tokens: Optional[List[int]] = None, + special: bool = False, + ) -> bytes: + return self._model.detokenize(tokens, special=special) + + def encode( + self, text: str, add_bos: bool = True, special: bool = True + ) -> List[int]: + return self.tokenize( + text.encode("utf-8", errors="ignore"), add_bos=add_bos, special=special + ) + + def decode(self, tokens: List[int]) -> str: + return self.detokenize(tokens).decode("utf-8", errors="ignore") + + @classmethod + def from_ggml_file(cls, path: str) -> "LlamaTokenizer": + return cls(llama_cpp.Llama(model_path=path, vocab_only=True)) + + +class LlamaHFTokenizer(BaseLlamaTokenizer): + def __init__(self, hf_tokenizer: Any): + self.hf_tokenizer = hf_tokenizer + + def tokenize( + self, text: bytes, add_bos: bool = True, special: bool = True + ) -> List[int]: + return self.hf_tokenizer.encode( + text.decode("utf-8", errors="ignore"), add_special_tokens=special + ) + + def detokenize( + self, + tokens: List[int], + prev_tokens: Optional[List[int]] = None, + special: bool = False, + ) -> bytes: + skip_special_tokens = not special + if prev_tokens is not None: + text = self.hf_tokenizer.decode( + prev_tokens + tokens, skip_special_tokens=skip_special_tokens + ).encode("utf-8", errors="ignore") + prev_text = self.hf_tokenizer.decode( + prev_tokens, skip_special_tokens=skip_special_tokens + ).encode("utf-8", errors="ignore") + return text[len(prev_text) :] + else: + return self.hf_tokenizer.decode( + tokens, skip_special_tokens=skip_special_tokens + ).encode("utf-8", errors="ignore") + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path: str) -> "LlamaHFTokenizer": + try: + from transformers import AutoTokenizer + except ImportError: + raise ImportError( + "The `transformers` library is required to use the `HFTokenizer`." + "You can install it with `pip install transformers`." + ) + hf_tokenizer = AutoTokenizer.from_pretrained( + pretrained_model_name_or_path=pretrained_model_name_or_path + ) + return cls(hf_tokenizer) diff --git a/llama_cpp/llama_types.py b/llama_cpp/llama_types.py index b62ff1b19..f647822ff 100644 --- a/llama_cpp/llama_types.py +++ b/llama_cpp/llama_types.py @@ -1,37 +1,52 @@ -from typing import List, Optional, Dict, Union +"""Types and request signatures for OpenAI compatibility + +NOTE: These types may change to match the OpenAI OpenAPI specification. + +Based on the OpenAI OpenAPI specification: +https://github.com/openai/openai-openapi/blob/master/openapi.yaml + +""" + +from typing import Any, List, Optional, Dict, Union from typing_extensions import TypedDict, NotRequired, Literal +# NOTE: Defining this correctly using annotations seems to break pydantic validation. +# This is a workaround until we can figure out how to do this correctly +# JsonType = Union[None, int, str, bool, List["JsonType"], Dict[str, "JsonType"]] +JsonType = Union[None, int, str, bool, List[Any], Dict[str, Any]] + + class EmbeddingUsage(TypedDict): prompt_tokens: int total_tokens: int -class EmbeddingData(TypedDict): +class Embedding(TypedDict): index: int object: str - embedding: List[float] + embedding: Union[List[float], List[List[float]]] -class Embedding(TypedDict): +class CreateEmbeddingResponse(TypedDict): object: Literal["list"] model: str - data: List[EmbeddingData] + data: List[Embedding] usage: EmbeddingUsage class CompletionLogprobs(TypedDict): text_offset: List[int] - token_logprobs: List[float] + token_logprobs: List[Optional[float]] tokens: List[str] - top_logprobs: List[Dict[str, float]] + top_logprobs: List[Optional[Dict[str, float]]] class CompletionChoice(TypedDict): text: str index: int logprobs: Optional[CompletionLogprobs] - finish_reason: Optional[str] + finish_reason: Optional[Literal["stop", "length"]] class CompletionUsage(TypedDict): @@ -40,58 +55,262 @@ class CompletionUsage(TypedDict): total_tokens: int -class CompletionChunk(TypedDict): +class CreateCompletionResponse(TypedDict): id: str object: Literal["text_completion"] created: int model: str choices: List[CompletionChoice] + usage: NotRequired[CompletionUsage] -class Completion(TypedDict): - id: str - object: Literal["text_completion"] - created: int - model: str - choices: List[CompletionChoice] - usage: CompletionUsage +class ChatCompletionResponseFunctionCall(TypedDict): + name: str + arguments: str + + +class ChatCompletionResponseMessage(TypedDict): + content: Optional[str] + tool_calls: NotRequired["ChatCompletionMessageToolCalls"] + role: Literal["assistant", "function"] # NOTE: "function" may be incorrect here + function_call: NotRequired[ChatCompletionResponseFunctionCall] # DEPRECATED + + +class ChatCompletionFunction(TypedDict): + name: str + description: NotRequired[str] + parameters: Dict[str, JsonType] # TODO: make this more specific + + +class ChatCompletionTopLogprobToken(TypedDict): + token: str + logprob: float + bytes: Optional[List[int]] + + +class ChatCompletionLogprobToken(ChatCompletionTopLogprobToken): + token: str + logprob: float + bytes: Optional[List[int]] + top_logprobs: List[ChatCompletionTopLogprobToken] -class ChatCompletionMessage(TypedDict): - role: Union[Literal["assistant"], Literal["user"], Literal["system"]] - content: str - user: NotRequired[str] +class ChatCompletionLogprobs(TypedDict): + content: Optional[List[ChatCompletionLogprobToken]] + refusal: Optional[List[ChatCompletionLogprobToken]] -class ChatCompletionChoice(TypedDict): +class ChatCompletionResponseChoice(TypedDict): index: int - message: ChatCompletionMessage + message: "ChatCompletionResponseMessage" + logprobs: Optional[ChatCompletionLogprobs] finish_reason: Optional[str] -class ChatCompletion(TypedDict): +class CreateChatCompletionResponse(TypedDict): id: str object: Literal["chat.completion"] created: int model: str - choices: List[ChatCompletionChoice] + choices: List["ChatCompletionResponseChoice"] usage: CompletionUsage -class ChatCompletionChunkDelta(TypedDict): - role: NotRequired[Literal["assistant"]] - content: NotRequired[str] +class ChatCompletionMessageToolCallChunkFunction(TypedDict): + name: Optional[str] + arguments: str -class ChatCompletionChunkChoice(TypedDict): +class ChatCompletionMessageToolCallChunk(TypedDict): index: int - delta: ChatCompletionChunkDelta - finish_reason: Optional[str] + id: NotRequired[str] + type: Literal["function"] + function: ChatCompletionMessageToolCallChunkFunction + + +class ChatCompletionStreamResponseDeltaEmpty(TypedDict): + pass + + +class ChatCompletionStreamResponseDeltaFunctionCall(TypedDict): + name: str + arguments: str + + +class ChatCompletionStreamResponseDelta(TypedDict): + content: NotRequired[Optional[str]] + function_call: NotRequired[ + Optional[ChatCompletionStreamResponseDeltaFunctionCall] + ] # DEPRECATED + tool_calls: NotRequired[Optional[List[ChatCompletionMessageToolCallChunk]]] + role: NotRequired[Optional[Literal["system", "user", "assistant", "tool"]]] + + +class ChatCompletionStreamResponseChoice(TypedDict): + index: int + delta: Union[ + ChatCompletionStreamResponseDelta, ChatCompletionStreamResponseDeltaEmpty + ] + finish_reason: Optional[Literal["stop", "length", "tool_calls", "function_call"]] + logprobs: NotRequired[Optional[ChatCompletionLogprobs]] -class ChatCompletionChunk(TypedDict): +class CreateChatCompletionStreamResponse(TypedDict): id: str model: str object: Literal["chat.completion.chunk"] created: int - choices: List[ChatCompletionChunkChoice] + choices: List[ChatCompletionStreamResponseChoice] + + +class ChatCompletionFunctions(TypedDict): + name: str + description: NotRequired[str] + parameters: Dict[str, JsonType] # TODO: make this more specific + + +class ChatCompletionFunctionCallOption(TypedDict): + name: str + + +class ChatCompletionRequestResponseFormat(TypedDict): + type: Literal["text", "json_object"] + schema: NotRequired[ + JsonType + ] # https://docs.endpoints.anyscale.com/guides/json_mode/ + + +class ChatCompletionRequestMessageContentPartText(TypedDict): + type: Literal["text"] + text: str + + +class ChatCompletionRequestMessageContentPartImageImageUrl(TypedDict): + url: str + detail: NotRequired[Literal["auto", "low", "high"]] + + +class ChatCompletionRequestMessageContentPartImage(TypedDict): + type: Literal["image_url"] + image_url: Union[str, ChatCompletionRequestMessageContentPartImageImageUrl] + + +ChatCompletionRequestMessageContentPart = Union[ + ChatCompletionRequestMessageContentPartText, + ChatCompletionRequestMessageContentPartImage, +] + + +class ChatCompletionRequestSystemMessage(TypedDict): + role: Literal["system"] + content: Optional[str] + + +class ChatCompletionRequestUserMessage(TypedDict): + role: Literal["user"] + content: Optional[Union[str, List[ChatCompletionRequestMessageContentPart]]] + + +class ChatCompletionMessageToolCallFunction(TypedDict): + name: str + arguments: str + + +class ChatCompletionMessageToolCall(TypedDict): + id: str + type: Literal["function"] + function: ChatCompletionMessageToolCallFunction + + +ChatCompletionMessageToolCalls = List[ChatCompletionMessageToolCall] + + +class ChatCompletionRequestAssistantMessageFunctionCall(TypedDict): + name: str + arguments: str + + +class ChatCompletionRequestAssistantMessage(TypedDict): + role: Literal["assistant"] + content: NotRequired[str] + tool_calls: NotRequired[ChatCompletionMessageToolCalls] + function_call: NotRequired[ + ChatCompletionRequestAssistantMessageFunctionCall + ] # DEPRECATED + + +class ChatCompletionRequestToolMessage(TypedDict): + role: Literal["tool"] + content: Optional[str] + tool_call_id: str + + +class ChatCompletionRequestFunctionMessage(TypedDict): + role: Literal["function"] + content: Optional[str] + name: str + + +ChatCompletionRequestMessage = Union[ + ChatCompletionRequestSystemMessage, + ChatCompletionRequestUserMessage, + ChatCompletionRequestAssistantMessage, + ChatCompletionRequestUserMessage, + ChatCompletionRequestToolMessage, + ChatCompletionRequestFunctionMessage, +] + + +class ChatCompletionRequestFunctionCallOption(TypedDict): + name: str + + +ChatCompletionRequestFunctionCall = Union[ + Literal["none", "auto"], ChatCompletionRequestFunctionCallOption +] + +ChatCompletionFunctionParameters = Dict[str, JsonType] # TODO: make this more specific + + +class ChatCompletionToolFunction(TypedDict): + name: str + description: NotRequired[str] + parameters: ChatCompletionFunctionParameters + + +class ChatCompletionTool(TypedDict): + type: Literal["function"] + function: ChatCompletionToolFunction + + +class ChatCompletionNamedToolChoiceFunction(TypedDict): + name: str + + +class ChatCompletionNamedToolChoice(TypedDict): + type: Literal["function"] + function: ChatCompletionNamedToolChoiceFunction + + +ChatCompletionToolChoiceOption = Union[ + Literal["none", "auto", "required"], ChatCompletionNamedToolChoice +] + + +# NOTE: The following type names are not part of the OpenAI OpenAPI specification +# and will be removed in a future major release. + +EmbeddingData = Embedding +CompletionChunk = CreateCompletionResponse +Completion = CreateCompletionResponse +CreateCompletionStreamResponse = CreateCompletionResponse +ChatCompletionMessage = ChatCompletionResponseMessage +ChatCompletionChoice = ChatCompletionResponseChoice +ChatCompletion = CreateChatCompletionResponse +ChatCompletionChunkDeltaEmpty = ChatCompletionStreamResponseDeltaEmpty +ChatCompletionChunkChoice = ChatCompletionStreamResponseChoice +ChatCompletionChunkDelta = ChatCompletionStreamResponseDelta +ChatCompletionChunk = CreateChatCompletionStreamResponse +ChatCompletionStreamResponse = CreateChatCompletionStreamResponse +ChatCompletionResponseFunction = ChatCompletionFunction +ChatCompletionFunctionCall = ChatCompletionResponseFunctionCall diff --git a/llama_cpp/llava_cpp.py b/llama_cpp/llava_cpp.py new file mode 100644 index 000000000..d9dfaf5fd --- /dev/null +++ b/llama_cpp/llava_cpp.py @@ -0,0 +1,158 @@ +from __future__ import annotations + +import os +from ctypes import ( + c_bool, + c_char_p, + c_int, + c_uint8, + c_float, + c_void_p, + POINTER, + _Pointer, # type: ignore + Structure, +) +import pathlib +from typing import ( + Union, + NewType, + Optional, + TYPE_CHECKING, +) + +import llama_cpp.llama_cpp as llama_cpp + +from llama_cpp._ctypes_extensions import ( + load_shared_library, + ctypes_function_for_shared_library, +) + +if TYPE_CHECKING: + from llama_cpp._ctypes_extensions import ( + CtypesArray, + ) + + +# Specify the base name of the shared library to load +_libllava_base_name = "llava" +_libllava_override_path = os.environ.get("LLAVA_CPP_LIB") +_libllava_base_path = pathlib.Path(os.path.abspath(os.path.dirname(__file__))) / "lib" if _libllava_override_path is None else pathlib.Path() + +# Load the library +_libllava = load_shared_library(_libllava_base_name, _libllava_base_path) + +ctypes_function = ctypes_function_for_shared_library(_libllava) + + +################################################ +# llava.h +################################################ + +# struct clip_ctx; +clip_ctx_p = NewType("clip_ctx_p", int) +clip_ctx_p_ctypes = c_void_p + + +# struct llava_image_embed { +# float * embed; +# int n_image_pos; +# }; +class llava_image_embed(Structure): + _fields_ = [ + ("embed", POINTER(c_float)), + ("n_image_pos", c_int), + ] + + +# /** sanity check for clip <-> llava embed size match */ +# LLAVA_API bool llava_validate_embed_size(const llama_context * ctx_llama, const clip_ctx * ctx_clip); +@ctypes_function( + "llava_validate_embed_size", + [llama_cpp.llama_context_p_ctypes, clip_ctx_p_ctypes], + c_bool, +) +def llava_validate_embed_size( + ctx_llama: llama_cpp.llama_context_p, ctx_clip: clip_ctx_p, / +) -> bool: + ... + + +# /** build an image embed from image file bytes */ +# LLAVA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length); +@ctypes_function( + "llava_image_embed_make_with_bytes", + [clip_ctx_p_ctypes, c_int, POINTER(c_uint8), c_int], + POINTER(llava_image_embed), +) +def llava_image_embed_make_with_bytes( + ctx_clip: clip_ctx_p, + n_threads: Union[c_int, int], + image_bytes: CtypesArray[c_uint8], + image_bytes_length: Union[c_int, int], + /, +) -> "_Pointer[llava_image_embed]": + ... + + +# /** build an image embed from a path to an image filename */ +# LLAVA_API struct llava_image_embed * llava_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path); +@ctypes_function( + "llava_image_embed_make_with_filename", + [clip_ctx_p_ctypes, c_int, c_char_p], + POINTER(llava_image_embed), +) +def llava_image_embed_make_with_filename( + ctx_clip: clip_ctx_p, n_threads: Union[c_int, int], image_path: bytes, / +) -> "_Pointer[llava_image_embed]": + ... + + +# LLAVA_API void llava_image_embed_free(struct llava_image_embed * embed); +# /** free an embedding made with llava_image_embed_make_* */ +@ctypes_function("llava_image_embed_free", [POINTER(llava_image_embed)], None) +def llava_image_embed_free(embed: "_Pointer[llava_image_embed]", /): + ... + + +# /** write the image represented by embed into the llama context with batch size n_batch, starting at context pos n_past. on completion, n_past points to the next position in the context after the image embed. */ +# LLAVA_API bool llava_eval_image_embed(struct llama_context * ctx_llama, const struct llava_image_embed * embed, int n_batch, int * n_past); +@ctypes_function( + "llava_eval_image_embed", + [ + llama_cpp.llama_context_p_ctypes, + POINTER(llava_image_embed), + c_int, + POINTER(c_int), + ], + c_bool, +) +def llava_eval_image_embed( + ctx_llama: llama_cpp.llama_context_p, + embed: "_Pointer[llava_image_embed]", + n_batch: Union[c_int, int], + n_past: "_Pointer[c_int]", + /, +) -> bool: + ... + + +################################################ +# clip.h +################################################ + + +# /** load mmproj model */ +# CLIP_API struct clip_ctx * clip_model_load (const char * fname, int verbosity); +@ctypes_function("clip_model_load", [c_char_p, c_int], clip_ctx_p_ctypes) +def clip_model_load( + fname: bytes, verbosity: Union[c_int, int], / +) -> Optional[clip_ctx_p]: + ... + + +# /** free mmproj model */ +# CLIP_API void clip_free(struct clip_ctx * ctx); +@ctypes_function("clip_free", [clip_ctx_p_ctypes], None) +def clip_free(ctx: clip_ctx_p, /): + ... + diff --git a/llama_cpp/py.typed b/llama_cpp/py.typed new file mode 100644 index 000000000..e69de29bb diff --git a/llama_cpp/server/__init__.py b/llama_cpp/server/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/llama_cpp/server/__main__.py b/llama_cpp/server/__main__.py index 44ee1f0f4..bbac4957e 100644 --- a/llama_cpp/server/__main__.py +++ b/llama_cpp/server/__main__.py @@ -3,267 +3,98 @@ To run this example: ```bash -pip install fastapi uvicorn sse-starlette +pip install fastapi uvicorn sse-starlette pydantic-settings export MODEL=../models/7B/... -uvicorn fastapi_server_chat:app --reload ``` -Then visit http://localhost:8000/docs to see the interactive API docs. - -""" -import os -import json -from typing import List, Optional, Literal, Union, Iterator, Dict -from typing_extensions import TypedDict - -import llama_cpp - -from fastapi import FastAPI -from fastapi.middleware.cors import CORSMiddleware -from pydantic import BaseModel, BaseSettings, Field, create_model_from_typeddict -from sse_starlette.sse import EventSourceResponse - - -class Settings(BaseSettings): - model: str - n_ctx: int = 2048 - n_batch: int = 8 - n_threads: int = ((os.cpu_count() or 2) // 2) or 1 - f16_kv: bool = True - use_mlock: bool = False # This causes a silent failure on platforms that don't support mlock (e.g. Windows) took forever to figure out... - embedding: bool = True - last_n_tokens_size: int = 64 - - -app = FastAPI( - title="🦙 llama.cpp Python API", - version="0.0.1", -) -app.add_middleware( - CORSMiddleware, - allow_origins=["*"], - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], -) -settings = Settings() -llama = llama_cpp.Llama( - settings.model, - f16_kv=settings.f16_kv, - use_mlock=settings.use_mlock, - embedding=settings.embedding, - n_threads=settings.n_threads, - n_batch=settings.n_batch, - n_ctx=settings.n_ctx, - last_n_tokens_size=settings.last_n_tokens_size, -) - - -class CreateCompletionRequest(BaseModel): - prompt: Union[str, List[str]] - suffix: Optional[str] = Field(None) - max_tokens: int = 16 - temperature: float = 0.8 - top_p: float = 0.95 - echo: bool = False - stop: List[str] = [] - stream: bool = False - - # ignored or currently unsupported - model: Optional[str] = Field(None) - n: Optional[int] = 1 - logprobs: Optional[int] = Field(None) - presence_penalty: Optional[float] = 0 - frequency_penalty: Optional[float] = 0 - best_of: Optional[int] = 1 - logit_bias: Optional[Dict[str, float]] = Field(None) - user: Optional[str] = Field(None) - - # llama.cpp specific parameters - top_k: int = 40 - repeat_penalty: float = 1.1 - - class Config: - schema_extra = { - "example": { - "prompt": "\n\n### Instructions:\nWhat is the capital of France?\n\n### Response:\n", - "stop": ["\n", "###"], - } - } - - -CreateCompletionResponse = create_model_from_typeddict(llama_cpp.Completion) - - -@app.post( - "/v1/completions", - response_model=CreateCompletionResponse, -) -def create_completion(request: CreateCompletionRequest): - if isinstance(request.prompt, list): - request.prompt = "".join(request.prompt) +Then run: +``` +uvicorn llama_cpp.server.app:create_app --reload +``` - completion_or_chunks = llama( - **request.dict( - exclude={ - "model", - "n", - "logprobs", - "frequency_penalty", - "presence_penalty", - "best_of", - "logit_bias", - "user", - } - ) - ) - if request.stream: - chunks: Iterator[llama_cpp.CompletionChunk] = completion_or_chunks # type: ignore - return EventSourceResponse(dict(data=json.dumps(chunk)) for chunk in chunks) - completion: llama_cpp.Completion = completion_or_chunks # type: ignore - return completion +or +``` +python3 -m llama_cpp.server +``` -class CreateEmbeddingRequest(BaseModel): - model: Optional[str] - input: str - user: Optional[str] +Then visit http://localhost:8000/docs to see the interactive API docs. - class Config: - schema_extra = { - "example": { - "input": "The food was delicious and the waiter...", - } - } +""" +from __future__ import annotations -CreateEmbeddingResponse = create_model_from_typeddict(llama_cpp.Embedding) +import os +import sys +import argparse +import uvicorn -@app.post( - "/v1/embeddings", - response_model=CreateEmbeddingResponse, +from llama_cpp.server.app import create_app +from llama_cpp.server.settings import ( + Settings, + ServerSettings, + ModelSettings, + ConfigFileSettings, ) -def create_embedding(request: CreateEmbeddingRequest): - return llama.create_embedding(**request.dict(exclude={"model", "user"})) - - -class ChatCompletionRequestMessage(BaseModel): - role: Union[Literal["system"], Literal["user"], Literal["assistant"]] - content: str - user: Optional[str] = None - - -class CreateChatCompletionRequest(BaseModel): - model: Optional[str] - messages: List[ChatCompletionRequestMessage] - temperature: float = 0.8 - top_p: float = 0.95 - stream: bool = False - stop: List[str] = [] - max_tokens: int = 128 - - # ignored or currently unsupported - model: Optional[str] = Field(None) - n: Optional[int] = 1 - presence_penalty: Optional[float] = 0 - frequency_penalty: Optional[float] = 0 - logit_bias: Optional[Dict[str, float]] = Field(None) - user: Optional[str] = Field(None) - - # llama.cpp specific parameters - repeat_penalty: float = 1.1 +from llama_cpp.server.cli import add_args_from_model, parse_model_from_args - class Config: - schema_extra = { - "example": { - "messages": [ - ChatCompletionRequestMessage( - role="system", content="You are a helpful assistant." - ), - ChatCompletionRequestMessage( - role="user", content="What is the capital of France?" - ), - ] - } - } +def main(): + description = "🦙 Llama.cpp python server. Host your own LLMs!🚀" + parser = argparse.ArgumentParser(description=description) -CreateChatCompletionResponse = create_model_from_typeddict(llama_cpp.ChatCompletion) - - -@app.post( - "/v1/chat/completions", - response_model=CreateChatCompletionResponse, -) -async def create_chat_completion( - request: CreateChatCompletionRequest, -) -> Union[llama_cpp.ChatCompletion, EventSourceResponse]: - completion_or_chunks = llama.create_chat_completion( - **request.dict( - exclude={ - "model", - "n", - "presence_penalty", - "frequency_penalty", - "logit_bias", - "user", - } - ), + add_args_from_model(parser, Settings) + parser.add_argument( + "--config_file", + type=str, + help="Path to a config file to load.", + ) + server_settings: ServerSettings | None = None + model_settings: list[ModelSettings] = [] + args = parser.parse_args() + try: + # Load server settings from config_file if provided + config_file = os.environ.get("CONFIG_FILE", args.config_file) + if config_file: + if not os.path.exists(config_file): + raise ValueError(f"Config file {config_file} not found!") + with open(config_file, "rb") as f: + # Check if yaml file + if config_file.endswith(".yaml") or config_file.endswith(".yml"): + import yaml + import json + + config_file_settings = ConfigFileSettings.model_validate_json( + json.dumps(yaml.safe_load(f)) + ) + else: + config_file_settings = ConfigFileSettings.model_validate_json( + f.read() + ) + server_settings = ServerSettings.model_validate(config_file_settings) + model_settings = config_file_settings.models + else: + server_settings = parse_model_from_args(ServerSettings, args) + model_settings = [parse_model_from_args(ModelSettings, args)] + except Exception as e: + print(e, file=sys.stderr) + parser.print_help() + sys.exit(1) + assert server_settings is not None + assert model_settings is not None + app = create_app( + server_settings=server_settings, + model_settings=model_settings, + ) + uvicorn.run( + app, + host=os.getenv("HOST", server_settings.host), + port=int(os.getenv("PORT", server_settings.port)), + ssl_keyfile=server_settings.ssl_keyfile, + ssl_certfile=server_settings.ssl_certfile, ) - - if request.stream: - - async def server_sent_events( - chat_chunks: Iterator[llama_cpp.ChatCompletionChunk], - ): - for chat_chunk in chat_chunks: - yield dict(data=json.dumps(chat_chunk)) - yield dict(data="[DONE]") - - chunks: Iterator[llama_cpp.ChatCompletionChunk] = completion_or_chunks # type: ignore - - return EventSourceResponse( - server_sent_events(chunks), - ) - completion: llama_cpp.ChatCompletion = completion_or_chunks # type: ignore - return completion - - -class ModelData(TypedDict): - id: str - object: Literal["model"] - owned_by: str - permissions: List[str] - - -class ModelList(TypedDict): - object: Literal["list"] - data: List[ModelData] - - -GetModelResponse = create_model_from_typeddict(ModelList) - - -@app.get("/v1/models", response_model=GetModelResponse) -def get_models() -> ModelList: - return { - "object": "list", - "data": [ - { - "id": llama.model_path, - "object": "model", - "owned_by": "me", - "permissions": [], - } - ], - } if __name__ == "__main__": - import os - import uvicorn - - uvicorn.run( - app, host=os.getenv("HOST", "localhost"), port=int(os.getenv("PORT", 8000)) - ) + main() diff --git a/llama_cpp/server/app.py b/llama_cpp/server/app.py new file mode 100644 index 000000000..5120f2416 --- /dev/null +++ b/llama_cpp/server/app.py @@ -0,0 +1,597 @@ +from __future__ import annotations + +import os +import json +import typing +import contextlib + +from anyio import Lock +from functools import partial +from typing import List, Optional, Union, Dict + +import llama_cpp + +import anyio +from anyio.streams.memory import MemoryObjectSendStream +from starlette.concurrency import run_in_threadpool, iterate_in_threadpool +from fastapi import Depends, FastAPI, APIRouter, Request, HTTPException, status, Body +from fastapi.middleware import Middleware +from fastapi.middleware.cors import CORSMiddleware +from fastapi.security import HTTPBearer +from sse_starlette.sse import EventSourceResponse +from starlette_context.plugins import RequestIdPlugin # type: ignore +from starlette_context.middleware import RawContextMiddleware + +from llama_cpp.server.model import ( + LlamaProxy, +) +from llama_cpp.server.settings import ( + ConfigFileSettings, + Settings, + ModelSettings, + ServerSettings, +) +from llama_cpp.server.types import ( + CreateCompletionRequest, + CreateEmbeddingRequest, + CreateChatCompletionRequest, + ModelList, + TokenizeInputRequest, + TokenizeInputResponse, + TokenizeInputCountResponse, + DetokenizeInputRequest, + DetokenizeInputResponse, +) +from llama_cpp.server.errors import RouteErrorHandler + + +router = APIRouter(route_class=RouteErrorHandler) + +_server_settings: Optional[ServerSettings] = None + + +def set_server_settings(server_settings: ServerSettings): + global _server_settings + _server_settings = server_settings + + +def get_server_settings(): + yield _server_settings + + +_llama_proxy: Optional[LlamaProxy] = None + +llama_outer_lock = Lock() +llama_inner_lock = Lock() + + +def set_llama_proxy(model_settings: List[ModelSettings]): + global _llama_proxy + _llama_proxy = LlamaProxy(models=model_settings) + + +async def get_llama_proxy(): + # NOTE: This double lock allows the currently streaming llama model to + # check if any other requests are pending in the same thread and cancel + # the stream if so. + await llama_outer_lock.acquire() + release_outer_lock = True + try: + await llama_inner_lock.acquire() + try: + llama_outer_lock.release() + release_outer_lock = False + yield _llama_proxy + finally: + llama_inner_lock.release() + finally: + if release_outer_lock: + llama_outer_lock.release() + + +_ping_message_factory: typing.Optional[typing.Callable[[], bytes]] = None + + +def set_ping_message_factory(factory: typing.Callable[[], bytes]): + global _ping_message_factory + _ping_message_factory = factory + + +def create_app( + settings: Settings | None = None, + server_settings: ServerSettings | None = None, + model_settings: List[ModelSettings] | None = None, +): + config_file = os.environ.get("CONFIG_FILE", None) + if config_file is not None: + if not os.path.exists(config_file): + raise ValueError(f"Config file {config_file} not found!") + with open(config_file, "rb") as f: + # Check if yaml file + if config_file.endswith(".yaml") or config_file.endswith(".yml"): + import yaml + + config_file_settings = ConfigFileSettings.model_validate_json( + json.dumps(yaml.safe_load(f)) + ) + else: + config_file_settings = ConfigFileSettings.model_validate_json(f.read()) + server_settings = ServerSettings.model_validate(config_file_settings) + model_settings = config_file_settings.models + + if server_settings is None and model_settings is None: + if settings is None: + settings = Settings() + server_settings = ServerSettings.model_validate(settings) + model_settings = [ModelSettings.model_validate(settings)] + + assert ( + server_settings is not None and model_settings is not None + ), "server_settings and model_settings must be provided together" + + set_server_settings(server_settings) + middleware = [Middleware(RawContextMiddleware, plugins=(RequestIdPlugin(),))] + app = FastAPI( + middleware=middleware, + title="🦙 llama.cpp Python API", + version=llama_cpp.__version__, + root_path=server_settings.root_path, + ) + app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + ) + app.include_router(router) + + assert model_settings is not None + set_llama_proxy(model_settings=model_settings) + + if server_settings.disable_ping_events: + set_ping_message_factory(lambda: bytes()) + + return app + + +def prepare_request_resources( + body: CreateCompletionRequest | CreateChatCompletionRequest, + llama_proxy: LlamaProxy, + body_model: str | None, + kwargs, +) -> llama_cpp.Llama: + if llama_proxy is None: + raise HTTPException( + status_code=status.HTTP_503_SERVICE_UNAVAILABLE, + detail="Service is not available", + ) + llama = llama_proxy(body_model) + if body.logit_bias is not None: + kwargs["logit_bias"] = ( + _logit_bias_tokens_to_input_ids(llama, body.logit_bias) + if body.logit_bias_type == "tokens" + else body.logit_bias + ) + + if body.grammar is not None: + kwargs["grammar"] = llama_cpp.LlamaGrammar.from_string(body.grammar) + + if body.min_tokens > 0: + _min_tokens_logits_processor = llama_cpp.LogitsProcessorList( + [llama_cpp.MinTokensLogitsProcessor(body.min_tokens, llama.token_eos())] + ) + if "logits_processor" not in kwargs: + kwargs["logits_processor"] = _min_tokens_logits_processor + else: + kwargs["logits_processor"].extend(_min_tokens_logits_processor) + return llama + + +async def get_event_publisher( + request: Request, + inner_send_chan: MemoryObjectSendStream[typing.Any], + body: CreateCompletionRequest | CreateChatCompletionRequest, + body_model: str | None, + llama_call, + kwargs, +): + server_settings = next(get_server_settings()) + interrupt_requests = ( + server_settings.interrupt_requests if server_settings else False + ) + async with contextlib.asynccontextmanager(get_llama_proxy)() as llama_proxy: + llama = prepare_request_resources(body, llama_proxy, body_model, kwargs) + async with inner_send_chan: + try: + iterator = await run_in_threadpool(llama_call, llama, **kwargs) + async for chunk in iterate_in_threadpool(iterator): + await inner_send_chan.send(dict(data=json.dumps(chunk))) + if await request.is_disconnected(): + raise anyio.get_cancelled_exc_class()() + if interrupt_requests and llama_outer_lock.locked(): + await inner_send_chan.send(dict(data="[DONE]")) + raise anyio.get_cancelled_exc_class()() + await inner_send_chan.send(dict(data="[DONE]")) + except anyio.get_cancelled_exc_class() as e: + print("disconnected") + with anyio.move_on_after(1, shield=True): + print( + f"Disconnected from client (via refresh/close) {request.client}" + ) + raise e + + +def _logit_bias_tokens_to_input_ids( + llama: llama_cpp.Llama, + logit_bias: Dict[str, float], +) -> Dict[str, float]: + to_bias: Dict[str, float] = {} + for token, score in logit_bias.items(): + token = token.encode("utf-8") + for input_id in llama.tokenize(token, add_bos=False, special=True): + to_bias[str(input_id)] = score + return to_bias + + +# Setup Bearer authentication scheme +bearer_scheme = HTTPBearer(auto_error=False) + + +async def authenticate( + settings: Settings = Depends(get_server_settings), + authorization: Optional[str] = Depends(bearer_scheme), +): + # Skip API key check if it's not set in settings + if settings.api_key is None: + return True + + # check bearer credentials against the api_key + if authorization and authorization.credentials == settings.api_key: + # api key is valid + return authorization.credentials + + # raise http error 401 + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid API key", + ) + + +openai_v1_tag = "OpenAI V1" + + +@router.post( + "/v1/completions", + summary="Completion", + dependencies=[Depends(authenticate)], + response_model=Union[ + llama_cpp.CreateCompletionResponse, + str, + ], + responses={ + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "anyOf": [ + {"$ref": "#/components/schemas/CreateCompletionResponse"} + ], + "title": "Completion response, when stream=False", + } + }, + "text/event-stream": { + "schema": { + "type": "string", + "title": "Server Side Streaming response, when stream=True. " + + "See SSE format: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format", # noqa: E501 + "example": """data: {... see CreateCompletionResponse ...} \\n\\n data: ... \\n\\n ... data: [DONE]""", + } + }, + }, + } + }, + tags=[openai_v1_tag], +) +@router.post( + "/v1/engines/copilot-codex/completions", + include_in_schema=False, + dependencies=[Depends(authenticate)], + tags=[openai_v1_tag], +) +async def create_completion( + request: Request, + body: CreateCompletionRequest, +) -> llama_cpp.Completion: + if isinstance(body.prompt, list): + assert len(body.prompt) <= 1 + body.prompt = body.prompt[0] if len(body.prompt) > 0 else "" + + body_model = ( + body.model + if request.url.path != "/v1/engines/copilot-codex/completions" + else "copilot-codex" + ) + + exclude = { + "n", + "best_of", + "logit_bias_type", + "user", + "min_tokens", + } + kwargs = body.model_dump(exclude=exclude) + + # handle streaming request + if kwargs.get("stream", False): + send_chan, recv_chan = anyio.create_memory_object_stream(10) + return EventSourceResponse( + recv_chan, + data_sender_callable=partial( # type: ignore + get_event_publisher, + request=request, + inner_send_chan=send_chan, + body=body, + body_model=body_model, + llama_call=llama_cpp.Llama.__call__, + kwargs=kwargs, + ), + sep="\n", + ping_message_factory=_ping_message_factory, + ) + + # handle regular request + async with contextlib.asynccontextmanager(get_llama_proxy)() as llama_proxy: + llama = prepare_request_resources(body, llama_proxy, body_model, kwargs) + + if await request.is_disconnected(): + print( + f"Disconnected from client (via refresh/close) before llm invoked {request.client}" + ) + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Client closed request", + ) + + return await run_in_threadpool(llama, **kwargs) + + +@router.post( + "/v1/embeddings", + summary="Embedding", + dependencies=[Depends(authenticate)], + tags=[openai_v1_tag], +) +async def create_embedding( + request: CreateEmbeddingRequest, + llama_proxy: LlamaProxy = Depends(get_llama_proxy), +): + return await run_in_threadpool( + llama_proxy(request.model).create_embedding, + **request.model_dump(exclude={"user"}), + ) + + +@router.post( + "/v1/chat/completions", + summary="Chat", + dependencies=[Depends(authenticate)], + response_model=Union[llama_cpp.ChatCompletion, str], + responses={ + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "anyOf": [ + { + "$ref": "#/components/schemas/CreateChatCompletionResponse" + } + ], + "title": "Completion response, when stream=False", + } + }, + "text/event-stream": { + "schema": { + "type": "string", + "title": "Server Side Streaming response, when stream=True" + + "See SSE format: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format", # noqa: E501 + "example": """data: {... see CreateChatCompletionResponse ...} \\n\\n data: ... \\n\\n ... data: [DONE]""", + } + }, + }, + } + }, + tags=[openai_v1_tag], +) +async def create_chat_completion( + request: Request, + body: CreateChatCompletionRequest = Body( + openapi_examples={ + "normal": { + "summary": "Chat Completion", + "value": { + "model": "gpt-3.5-turbo", + "messages": [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "What is the capital of France?"}, + ], + }, + }, + "json_mode": { + "summary": "JSON Mode", + "value": { + "model": "gpt-3.5-turbo", + "messages": [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Who won the world series in 2020"}, + ], + "response_format": {"type": "json_object"}, + }, + }, + "tool_calling": { + "summary": "Tool Calling", + "value": { + "model": "gpt-3.5-turbo", + "messages": [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Extract Jason is 30 years old."}, + ], + "tools": [ + { + "type": "function", + "function": { + "name": "User", + "description": "User record", + "parameters": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "age": {"type": "number"}, + }, + "required": ["name", "age"], + }, + }, + } + ], + "tool_choice": { + "type": "function", + "function": { + "name": "User", + }, + }, + }, + }, + "logprobs": { + "summary": "Logprobs", + "value": { + "model": "gpt-3.5-turbo", + "messages": [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "What is the capital of France?"}, + ], + "logprobs": True, + "top_logprobs": 10, + }, + }, + } + ), +) -> llama_cpp.ChatCompletion: + # This is a workaround for an issue in FastAPI dependencies + # where the dependency is cleaned up before a StreamingResponse + # is complete. + # https://github.com/tiangolo/fastapi/issues/11143 + + body_model = body.model + exclude = { + "n", + "logit_bias_type", + "user", + "min_tokens", + } + kwargs = body.model_dump(exclude=exclude) + + # handle streaming request + if kwargs.get("stream", False): + send_chan, recv_chan = anyio.create_memory_object_stream(10) + return EventSourceResponse( + recv_chan, + data_sender_callable=partial( # type: ignore + get_event_publisher, + request=request, + inner_send_chan=send_chan, + body=body, + body_model=body_model, + llama_call=llama_cpp.Llama.create_chat_completion, + kwargs=kwargs, + ), + sep="\n", + ping_message_factory=_ping_message_factory, + ) + + # handle regular request + async with contextlib.asynccontextmanager(get_llama_proxy)() as llama_proxy: + llama = prepare_request_resources(body, llama_proxy, body_model, kwargs) + + if await request.is_disconnected(): + print( + f"Disconnected from client (via refresh/close) before llm invoked {request.client}" + ) + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Client closed request", + ) + + return await run_in_threadpool(llama.create_chat_completion, **kwargs) + + +@router.get( + "/v1/models", + summary="Models", + dependencies=[Depends(authenticate)], + tags=[openai_v1_tag], +) +async def get_models( + llama_proxy: LlamaProxy = Depends(get_llama_proxy), +) -> ModelList: + return { + "object": "list", + "data": [ + { + "id": model_alias, + "object": "model", + "owned_by": "me", + "permissions": [], + } + for model_alias in llama_proxy + ], + } + + +extras_tag = "Extras" + + +@router.post( + "/extras/tokenize", + summary="Tokenize", + dependencies=[Depends(authenticate)], + tags=[extras_tag], +) +async def tokenize( + body: TokenizeInputRequest, + llama_proxy: LlamaProxy = Depends(get_llama_proxy), +) -> TokenizeInputResponse: + tokens = llama_proxy(body.model).tokenize(body.input.encode("utf-8"), special=True) + + return TokenizeInputResponse(tokens=tokens) + + +@router.post( + "/extras/tokenize/count", + summary="Tokenize Count", + dependencies=[Depends(authenticate)], + tags=[extras_tag], +) +async def count_query_tokens( + body: TokenizeInputRequest, + llama_proxy: LlamaProxy = Depends(get_llama_proxy), +) -> TokenizeInputCountResponse: + tokens = llama_proxy(body.model).tokenize(body.input.encode("utf-8"), special=True) + + return TokenizeInputCountResponse(count=len(tokens)) + + +@router.post( + "/extras/detokenize", + summary="Detokenize", + dependencies=[Depends(authenticate)], + tags=[extras_tag], +) +async def detokenize( + body: DetokenizeInputRequest, + llama_proxy: LlamaProxy = Depends(get_llama_proxy), +) -> DetokenizeInputResponse: + text = llama_proxy(body.model).detokenize(body.tokens).decode("utf-8") + + return DetokenizeInputResponse(text=text) diff --git a/llama_cpp/server/cli.py b/llama_cpp/server/cli.py new file mode 100644 index 000000000..3dd007676 --- /dev/null +++ b/llama_cpp/server/cli.py @@ -0,0 +1,97 @@ +from __future__ import annotations + +import argparse + +from typing import List, Literal, Union, Any, Type, TypeVar + +from pydantic import BaseModel + + +def _get_base_type(annotation: Type[Any]) -> Type[Any]: + if getattr(annotation, "__origin__", None) is Literal: + assert hasattr(annotation, "__args__") and len(annotation.__args__) >= 1 # type: ignore + return type(annotation.__args__[0]) # type: ignore + elif getattr(annotation, "__origin__", None) is Union: + assert hasattr(annotation, "__args__") and len(annotation.__args__) >= 1 # type: ignore + non_optional_args: List[Type[Any]] = [ + arg for arg in annotation.__args__ if arg is not type(None) # type: ignore + ] + if non_optional_args: + return _get_base_type(non_optional_args[0]) + elif ( + getattr(annotation, "__origin__", None) is list + or getattr(annotation, "__origin__", None) is List + ): + assert hasattr(annotation, "__args__") and len(annotation.__args__) >= 1 # type: ignore + return _get_base_type(annotation.__args__[0]) # type: ignore + return annotation + + +def _contains_list_type(annotation: Type[Any] | None) -> bool: + origin = getattr(annotation, "__origin__", None) + + if origin is list or origin is List: + return True + elif origin in (Literal, Union): + return any(_contains_list_type(arg) for arg in annotation.__args__) # type: ignore + else: + return False + + +def _parse_bool_arg(arg: str | bytes | bool) -> bool: + if isinstance(arg, bytes): + arg = arg.decode("utf-8") + + true_values = {"1", "on", "t", "true", "y", "yes"} + false_values = {"0", "off", "f", "false", "n", "no"} + + arg_str = str(arg).lower().strip() + + if arg_str in true_values: + return True + elif arg_str in false_values: + return False + else: + raise ValueError(f"Invalid boolean argument: {arg}") + + +def add_args_from_model(parser: argparse.ArgumentParser, model: Type[BaseModel]): + """Add arguments from a pydantic model to an argparse parser.""" + + for name, field in model.model_fields.items(): + description = field.description + if field.default and description and not field.is_required(): + description += f" (default: {field.default})" + base_type = ( + _get_base_type(field.annotation) if field.annotation is not None else str + ) + list_type = _contains_list_type(field.annotation) + if base_type is not bool: + parser.add_argument( + f"--{name}", + dest=name, + nargs="*" if list_type else None, + type=base_type, + help=description, + ) + if base_type is bool: + parser.add_argument( + f"--{name}", + dest=name, + type=_parse_bool_arg, + help=f"{description}", + ) + + +T = TypeVar("T", bound=Type[BaseModel]) + + +def parse_model_from_args(model: T, args: argparse.Namespace) -> T: + """Parse a pydantic model from an argparse namespace.""" + return model( + **{ + k: v + for k, v in vars(args).items() + if v is not None and k in model.model_fields + } + ) diff --git a/llama_cpp/server/errors.py b/llama_cpp/server/errors.py new file mode 100644 index 000000000..d0eda5664 --- /dev/null +++ b/llama_cpp/server/errors.py @@ -0,0 +1,212 @@ +from __future__ import annotations + +import sys +import traceback +import time +from re import compile, Match, Pattern +from typing import Callable, Coroutine, Optional, Tuple, Union, Dict +from typing_extensions import TypedDict + + +from fastapi import ( + Request, + Response, + HTTPException, +) +from fastapi.responses import JSONResponse +from fastapi.routing import APIRoute + +from llama_cpp.server.types import ( + CreateCompletionRequest, + CreateEmbeddingRequest, + CreateChatCompletionRequest, +) + + +class ErrorResponse(TypedDict): + """OpenAI style error response""" + + message: str + type: str + param: Optional[str] + code: Optional[str] + + +class ErrorResponseFormatters: + """Collection of formatters for error responses. + + Args: + request (Union[CreateCompletionRequest, CreateChatCompletionRequest]): + Request body + match (Match[str]): Match object from regex pattern + + Returns: + Tuple[int, ErrorResponse]: Status code and error response + """ + + @staticmethod + def context_length_exceeded( + request: Union["CreateCompletionRequest", "CreateChatCompletionRequest"], + match, # type: Match[str] # type: ignore + ) -> Tuple[int, ErrorResponse]: + """Formatter for context length exceeded error""" + + context_window = int(match.group(2)) + prompt_tokens = int(match.group(1)) + completion_tokens = request.max_tokens + if hasattr(request, "messages"): + # Chat completion + message = ( + "This model's maximum context length is {} tokens. " + "However, you requested {} tokens " + "({} in the messages, {} in the completion). " + "Please reduce the length of the messages or completion." + ) + else: + # Text completion + message = ( + "This model's maximum context length is {} tokens, " + "however you requested {} tokens " + "({} in your prompt; {} for the completion). " + "Please reduce your prompt; or completion length." + ) + return 400, ErrorResponse( + message=message.format( + context_window, + (completion_tokens or 0) + prompt_tokens, + prompt_tokens, + completion_tokens, + ), # type: ignore + type="invalid_request_error", + param="messages", + code="context_length_exceeded", + ) + + @staticmethod + def model_not_found( + request: Union["CreateCompletionRequest", "CreateChatCompletionRequest"], + match, # type: Match[str] # type: ignore + ) -> Tuple[int, ErrorResponse]: + """Formatter for model_not_found error""" + + model_path = str(match.group(1)) + message = f"The model `{model_path}` does not exist" + return 400, ErrorResponse( + message=message, + type="invalid_request_error", + param=None, + code="model_not_found", + ) + + +class RouteErrorHandler(APIRoute): + """Custom APIRoute that handles application errors and exceptions""" + + # key: regex pattern for original error message from llama_cpp + # value: formatter function + pattern_and_formatters: Dict[ + "Pattern[str]", + Callable[ + [ + Union["CreateCompletionRequest", "CreateChatCompletionRequest"], + "Match[str]", + ], + Tuple[int, ErrorResponse], + ], + ] = { + compile( + r"Requested tokens \((\d+)\) exceed context window of (\d+)" + ): ErrorResponseFormatters.context_length_exceeded, + compile( + r"Model path does not exist: (.+)" + ): ErrorResponseFormatters.model_not_found, + } + + def error_message_wrapper( + self, + error: Exception, + body: Optional[ + Union[ + "CreateChatCompletionRequest", + "CreateCompletionRequest", + "CreateEmbeddingRequest", + ] + ] = None, + ) -> Tuple[int, ErrorResponse]: + """Wraps error message in OpenAI style error response""" + if body is not None and isinstance( + body, + ( + CreateCompletionRequest, + CreateChatCompletionRequest, + ), + ): + # When text completion or chat completion + for pattern, callback in self.pattern_and_formatters.items(): + match = pattern.search(str(error)) + if match is not None: + return callback(body, match) + + # Only print the trace on unexpected exceptions + print(f"Exception: {str(error)}", file=sys.stderr) + traceback.print_exc(file=sys.stderr) + + # Wrap other errors as internal server error + return 500, ErrorResponse( + message=str(error), + type="internal_server_error", + param=None, + code=None, + ) + + def get_route_handler( + self, + ) -> Callable[[Request], Coroutine[None, None, Response]]: + """Defines custom route handler that catches exceptions and formats + in OpenAI style error response""" + + original_route_handler = super().get_route_handler() + + async def custom_route_handler(request: Request) -> Response: + try: + start_sec = time.perf_counter() + response = await original_route_handler(request) + elapsed_time_ms = int((time.perf_counter() - start_sec) * 1000) + response.headers["openai-processing-ms"] = f"{elapsed_time_ms}" + return response + except HTTPException as unauthorized: + # api key check failed + raise unauthorized + except Exception as exc: + json_body = await request.json() + try: + if "messages" in json_body: + # Chat completion + body: Optional[ + Union[ + CreateChatCompletionRequest, + CreateCompletionRequest, + CreateEmbeddingRequest, + ] + ] = CreateChatCompletionRequest(**json_body) + elif "prompt" in json_body: + # Text completion + body = CreateCompletionRequest(**json_body) + else: + # Embedding + body = CreateEmbeddingRequest(**json_body) + except Exception: + # Invalid request body + body = None + + # Get proper error message from the exception + ( + status_code, + error_message, + ) = self.error_message_wrapper(error=exc, body=body) + return JSONResponse( + {"error": error_message}, + status_code=status_code, + ) + + return custom_route_handler diff --git a/llama_cpp/server/model.py b/llama_cpp/server/model.py new file mode 100644 index 000000000..c6716f919 --- /dev/null +++ b/llama_cpp/server/model.py @@ -0,0 +1,298 @@ +from __future__ import annotations + +import json + +from typing import Dict, Optional, Union, List + +import llama_cpp +import llama_cpp.llama_speculative as llama_speculative +import llama_cpp.llama_tokenizer as llama_tokenizer + +from llama_cpp.server.settings import ModelSettings + + +class LlamaProxy: + def __init__(self, models: List[ModelSettings]) -> None: + assert len(models) > 0, "No models provided!" + + self._model_settings_dict: dict[str, ModelSettings] = {} + for model in models: + if not model.model_alias: + model.model_alias = model.model + self._model_settings_dict[model.model_alias] = model + + self._current_model: Optional[llama_cpp.Llama] = None + self._current_model_alias: Optional[str] = None + + self._default_model_settings: ModelSettings = models[0] + self._default_model_alias: str = self._default_model_settings.model_alias # type: ignore + + # Load default model + self._current_model = self.load_llama_from_model_settings( + self._default_model_settings + ) + self._current_model_alias = self._default_model_alias + + def __call__(self, model: Optional[str] = None) -> llama_cpp.Llama: + if model is None: + model = self._default_model_alias + + if model not in self._model_settings_dict: + model = self._default_model_alias + + if model == self._current_model_alias: + if self._current_model is not None: + return self._current_model + + if self._current_model: + self._current_model.close() + self._current_model = None + + settings = self._model_settings_dict[model] + self._current_model = self.load_llama_from_model_settings(settings) + self._current_model_alias = model + return self._current_model + + def __getitem__(self, model: str): + return self._model_settings_dict[model].model_dump() + + def __setitem__(self, model: str, settings: Union[ModelSettings, str, bytes]): + if isinstance(settings, (bytes, str)): + settings = ModelSettings.model_validate_json(settings) + self._model_settings_dict[model] = settings + + def __iter__(self): + for model in self._model_settings_dict: + yield model + + def free(self): + if self._current_model: + self._current_model.close() + del self._current_model + + @staticmethod + def load_llama_from_model_settings(settings: ModelSettings) -> llama_cpp.Llama: + chat_handler = None + if settings.chat_format == "llava-1-5": + assert settings.clip_model_path is not None, "clip model not found" + if settings.hf_model_repo_id is not None: + chat_handler = ( + llama_cpp.llama_chat_format.Llava15ChatHandler.from_pretrained( + repo_id=settings.hf_model_repo_id, + filename=settings.clip_model_path, + verbose=settings.verbose, + ) + ) + else: + chat_handler = llama_cpp.llama_chat_format.Llava15ChatHandler( + clip_model_path=settings.clip_model_path, verbose=settings.verbose + ) + elif settings.chat_format == "obsidian": + assert settings.clip_model_path is not None, "clip model not found" + if settings.hf_model_repo_id is not None: + chat_handler = ( + llama_cpp.llama_chat_format.ObsidianChatHandler.from_pretrained( + repo_id=settings.hf_model_repo_id, + filename=settings.clip_model_path, + verbose=settings.verbose, + ) + ) + else: + chat_handler = llama_cpp.llama_chat_format.ObsidianChatHandler( + clip_model_path=settings.clip_model_path, verbose=settings.verbose + ) + elif settings.chat_format == "llava-1-6": + assert settings.clip_model_path is not None, "clip model not found" + if settings.hf_model_repo_id is not None: + chat_handler = ( + llama_cpp.llama_chat_format.Llava16ChatHandler.from_pretrained( + repo_id=settings.hf_model_repo_id, + filename=settings.clip_model_path, + verbose=settings.verbose, + ) + ) + else: + chat_handler = llama_cpp.llama_chat_format.Llava16ChatHandler( + clip_model_path=settings.clip_model_path, verbose=settings.verbose + ) + elif settings.chat_format == "moondream": + assert settings.clip_model_path is not None, "clip model not found" + if settings.hf_model_repo_id is not None: + chat_handler = ( + llama_cpp.llama_chat_format.MoondreamChatHandler.from_pretrained( + repo_id=settings.hf_model_repo_id, + filename=settings.clip_model_path, + verbose=settings.verbose, + ) + ) + else: + chat_handler = llama_cpp.llama_chat_format.MoondreamChatHandler( + clip_model_path=settings.clip_model_path, verbose=settings.verbose + ) + elif settings.chat_format == "nanollava": + assert settings.clip_model_path is not None, "clip model not found" + if settings.hf_model_repo_id is not None: + chat_handler = ( + llama_cpp.llama_chat_format.NanoLlavaChatHandler.from_pretrained( + repo_id=settings.hf_model_repo_id, + filename=settings.clip_model_path, + verbose=settings.verbose, + ) + ) + else: + chat_handler = llama_cpp.llama_chat_format.NanoLlavaChatHandler( + clip_model_path=settings.clip_model_path, verbose=settings.verbose + ) + elif settings.chat_format == "llama-3-vision-alpha": + assert settings.clip_model_path is not None, "clip model not found" + if settings.hf_model_repo_id is not None: + chat_handler = ( + llama_cpp.llama_chat_format.Llama3VisionAlpha.from_pretrained( + repo_id=settings.hf_model_repo_id, + filename=settings.clip_model_path, + verbose=settings.verbose, + ) + ) + else: + chat_handler = llama_cpp.llama_chat_format.Llama3VisionAlpha( + clip_model_path=settings.clip_model_path, verbose=settings.verbose + ) + elif settings.chat_format == "minicpm-v-2.6": + assert settings.clip_model_path is not None, "clip model not found" + if settings.hf_model_repo_id is not None: + chat_handler = ( + llama_cpp.llama_chat_format.MiniCPMv26ChatHandler.from_pretrained( + repo_id=settings.hf_model_repo_id, + filename=settings.clip_model_path, + verbose=settings.verbose, + ) + ) + else: + chat_handler = llama_cpp.llama_chat_format.MiniCPMv26ChatHandler( + clip_model_path=settings.clip_model_path, verbose=settings.verbose + ) + elif settings.chat_format == "hf-autotokenizer": + assert ( + settings.hf_pretrained_model_name_or_path is not None + ), "hf_pretrained_model_name_or_path must be set for hf-autotokenizer" + chat_handler = ( + llama_cpp.llama_chat_format.hf_autotokenizer_to_chat_completion_handler( + settings.hf_pretrained_model_name_or_path + ) + ) + elif settings.chat_format == "hf-tokenizer-config": + assert ( + settings.hf_tokenizer_config_path is not None + ), "hf_tokenizer_config_path must be set for hf-tokenizer-config" + chat_handler = llama_cpp.llama_chat_format.hf_tokenizer_config_to_chat_completion_handler( + json.load(open(settings.hf_tokenizer_config_path)) + ) + + tokenizer: Optional[llama_cpp.BaseLlamaTokenizer] = None + if settings.hf_pretrained_model_name_or_path is not None: + tokenizer = llama_tokenizer.LlamaHFTokenizer.from_pretrained( + settings.hf_pretrained_model_name_or_path + ) + + draft_model = None + if settings.draft_model is not None: + draft_model = llama_speculative.LlamaPromptLookupDecoding( + num_pred_tokens=settings.draft_model_num_pred_tokens + ) + + kv_overrides: Optional[Dict[str, Union[bool, int, float, str]]] = None + if settings.kv_overrides is not None: + assert isinstance(settings.kv_overrides, list) + kv_overrides = {} + for kv in settings.kv_overrides: + key, value = kv.split("=") + if ":" in value: + value_type, value = value.split(":") + if value_type == "bool": + kv_overrides[key] = value.lower() in ["true", "1"] + elif value_type == "int": + kv_overrides[key] = int(value) + elif value_type == "float": + kv_overrides[key] = float(value) + elif value_type == "str": + kv_overrides[key] = value + else: + raise ValueError(f"Unknown value type {value_type}") + + import functools + + kwargs = {} + + if settings.hf_model_repo_id is not None: + create_fn = functools.partial( + llama_cpp.Llama.from_pretrained, + repo_id=settings.hf_model_repo_id, + filename=settings.model, + ) + else: + create_fn = llama_cpp.Llama + kwargs["model_path"] = settings.model + + _model = create_fn( + **kwargs, + # Model Params + n_gpu_layers=settings.n_gpu_layers, + split_mode=settings.split_mode, + main_gpu=settings.main_gpu, + tensor_split=settings.tensor_split, + vocab_only=settings.vocab_only, + use_mmap=settings.use_mmap, + use_mlock=settings.use_mlock, + kv_overrides=kv_overrides, + rpc_servers=settings.rpc_servers, + # Context Params + seed=settings.seed, + n_ctx=settings.n_ctx, + n_batch=settings.n_batch, + n_ubatch=settings.n_ubatch, + n_threads=settings.n_threads, + n_threads_batch=settings.n_threads_batch, + rope_scaling_type=settings.rope_scaling_type, + rope_freq_base=settings.rope_freq_base, + rope_freq_scale=settings.rope_freq_scale, + yarn_ext_factor=settings.yarn_ext_factor, + yarn_attn_factor=settings.yarn_attn_factor, + yarn_beta_fast=settings.yarn_beta_fast, + yarn_beta_slow=settings.yarn_beta_slow, + yarn_orig_ctx=settings.yarn_orig_ctx, + mul_mat_q=settings.mul_mat_q, + logits_all=settings.logits_all, + embedding=settings.embedding, + offload_kqv=settings.offload_kqv, + flash_attn=settings.flash_attn, + # Sampling Params + last_n_tokens_size=settings.last_n_tokens_size, + # LoRA Params + lora_base=settings.lora_base, + lora_path=settings.lora_path, + # Backend Params + numa=settings.numa, + # Chat Format Params + chat_format=settings.chat_format, + chat_handler=chat_handler, + # Speculative Decoding + draft_model=draft_model, + # KV Cache Quantization + type_k=settings.type_k, + type_v=settings.type_v, + # Tokenizer + tokenizer=tokenizer, + # Misc + verbose=settings.verbose, + ) + if settings.cache: + if settings.cache_type == "disk": + if settings.verbose: + print(f"Using disk cache with size {settings.cache_size}") + cache = llama_cpp.LlamaDiskCache(capacity_bytes=settings.cache_size) + else: + if settings.verbose: + print(f"Using ram cache with size {settings.cache_size}") + cache = llama_cpp.LlamaRAMCache(capacity_bytes=settings.cache_size) + _model.set_cache(cache) + return _model diff --git a/llama_cpp/server/settings.py b/llama_cpp/server/settings.py new file mode 100644 index 000000000..13c951241 --- /dev/null +++ b/llama_cpp/server/settings.py @@ -0,0 +1,240 @@ +from __future__ import annotations + +import multiprocessing + +from typing import Optional, List, Literal, Union, Dict, cast +from typing_extensions import Self + +from pydantic import Field, model_validator +from pydantic_settings import BaseSettings + +import llama_cpp + +# Disable warning for model and model_alias settings +BaseSettings.model_config["protected_namespaces"] = () + + +class ModelSettings(BaseSettings): + """Model settings used to load a Llama model.""" + + model: str = Field( + description="The path to the model to use for generating completions." + ) + model_alias: Optional[str] = Field( + default=None, + description="The alias of the model to use for generating completions.", + ) + # Model Params + n_gpu_layers: int = Field( + default=0, + ge=-1, + description="The number of layers to put on the GPU. The rest will be on the CPU. Set -1 to move all to GPU.", + ) + split_mode: int = Field( + default=llama_cpp.LLAMA_SPLIT_MODE_LAYER, + description="The split mode to use.", + ) + main_gpu: int = Field( + default=0, + ge=0, + description="Main GPU to use.", + ) + tensor_split: Optional[List[float]] = Field( + default=None, + description="Split layers across multiple GPUs in proportion.", + ) + vocab_only: bool = Field( + default=False, description="Whether to only return the vocabulary." + ) + use_mmap: bool = Field( + default=llama_cpp.llama_supports_mmap(), + description="Use mmap.", + ) + use_mlock: bool = Field( + default=llama_cpp.llama_supports_mlock(), + description="Use mlock.", + ) + kv_overrides: Optional[List[str]] = Field( + default=None, + description="List of model kv overrides in the format key=type:value where type is one of (bool, int, float). Valid true values are (true, TRUE, 1), otherwise false.", + ) + rpc_servers: Optional[str] = Field( + default=None, + description="comma seperated list of rpc servers for offloading", + ) + # Context Params + seed: int = Field( + default=llama_cpp.LLAMA_DEFAULT_SEED, description="Random seed. -1 for random." + ) + n_ctx: int = Field(default=2048, ge=0, description="The context size.") + n_batch: int = Field( + default=512, ge=1, description="The batch size to use per eval." + ) + n_ubatch: int = Field( + default=512, ge=1, description="The physical batch size used by llama.cpp" + ) + n_threads: int = Field( + default=max(multiprocessing.cpu_count() // 2, 1), + ge=1, + description="The number of threads to use. Use -1 for max cpu threads", + ) + n_threads_batch: int = Field( + default=max(multiprocessing.cpu_count(), 1), + ge=0, + description="The number of threads to use when batch processing. Use -1 for max cpu threads", + ) + rope_scaling_type: int = Field( + default=llama_cpp.LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED + ) + rope_freq_base: float = Field(default=0.0, description="RoPE base frequency") + rope_freq_scale: float = Field( + default=0.0, description="RoPE frequency scaling factor" + ) + yarn_ext_factor: float = Field(default=-1.0) + yarn_attn_factor: float = Field(default=1.0) + yarn_beta_fast: float = Field(default=32.0) + yarn_beta_slow: float = Field(default=1.0) + yarn_orig_ctx: int = Field(default=0) + mul_mat_q: bool = Field( + default=True, description="if true, use experimental mul_mat_q kernels" + ) + logits_all: bool = Field(default=True, description="Whether to return logits.") + embedding: bool = Field(default=False, description="Whether to use embeddings.") + offload_kqv: bool = Field( + default=True, description="Whether to offload kqv to the GPU." + ) + flash_attn: bool = Field( + default=False, description="Whether to use flash attention." + ) + # Sampling Params + last_n_tokens_size: int = Field( + default=64, + ge=0, + description="Last n tokens to keep for repeat penalty calculation.", + ) + # LoRA Params + lora_base: Optional[str] = Field( + default=None, + description="Optional path to base model, useful if using a quantized base model and you want to apply LoRA to an f16 model.", + ) + lora_path: Optional[str] = Field( + default=None, + description="Path to a LoRA file to apply to the model.", + ) + # Backend Params + numa: Union[bool, int] = Field( + default=False, + description="Enable NUMA support.", + ) + # Chat Format Params + chat_format: Optional[str] = Field( + default=None, + description="Chat format to use.", + ) + clip_model_path: Optional[str] = Field( + default=None, + description="Path to a CLIP model to use for multi-modal chat completion.", + ) + # Cache Params + cache: bool = Field( + default=False, + description="Use a cache to reduce processing times for evaluated prompts.", + ) + cache_type: Literal["ram", "disk"] = Field( + default="ram", + description="The type of cache to use. Only used if cache is True.", + ) + cache_size: int = Field( + default=2 << 30, + description="The size of the cache in bytes. Only used if cache is True.", + ) + # Tokenizer Options + hf_tokenizer_config_path: Optional[str] = Field( + default=None, + description="The path to a HuggingFace tokenizer_config.json file.", + ) + hf_pretrained_model_name_or_path: Optional[str] = Field( + default=None, + description="The model name or path to a pretrained HuggingFace tokenizer model. Same as you would pass to AutoTokenizer.from_pretrained().", + ) + # Loading from HuggingFace Model Hub + hf_model_repo_id: Optional[str] = Field( + default=None, + description="The model repo id to use for the HuggingFace tokenizer model.", + ) + # Speculative Decoding + draft_model: Optional[str] = Field( + default=None, + description="Method to use for speculative decoding. One of (prompt-lookup-decoding).", + ) + draft_model_num_pred_tokens: int = Field( + default=10, + description="Number of tokens to predict using the draft model.", + ) + # KV Cache Quantization + type_k: Optional[int] = Field( + default=None, + description="Type of the key cache quantization.", + ) + type_v: Optional[int] = Field( + default=None, + description="Type of the value cache quantization.", + ) + # Misc + verbose: bool = Field( + default=True, description="Whether to print debug information." + ) + + @model_validator( + mode="before" + ) # pre=True to ensure this runs before any other validation + def set_dynamic_defaults(self) -> Self: + # If n_threads or n_threads_batch is -1, set it to multiprocessing.cpu_count() + cpu_count = multiprocessing.cpu_count() + values = cast(Dict[str, int], self) + if values.get("n_threads", 0) == -1: + values["n_threads"] = cpu_count + if values.get("n_threads_batch", 0) == -1: + values["n_threads_batch"] = cpu_count + return self + + +class ServerSettings(BaseSettings): + """Server settings used to configure the FastAPI and Uvicorn server.""" + + # Uvicorn Settings + host: str = Field(default="localhost", description="Listen address") + port: int = Field(default=8000, description="Listen port") + ssl_keyfile: Optional[str] = Field( + default=None, description="SSL key file for HTTPS" + ) + ssl_certfile: Optional[str] = Field( + default=None, description="SSL certificate file for HTTPS" + ) + # FastAPI Settings + api_key: Optional[str] = Field( + default=None, + description="API key for authentication. If set all requests need to be authenticated.", + ) + interrupt_requests: bool = Field( + default=True, + description="Whether to interrupt requests when a new request is received.", + ) + disable_ping_events: bool = Field( + default=False, + description="Disable EventSource pings (may be needed for some clients).", + ) + root_path: str = Field( + default="", + description="The root path for the server. Useful when running behind a reverse proxy.", + ) + + +class Settings(ServerSettings, ModelSettings): + pass + + +class ConfigFileSettings(ServerSettings): + """Configuration file format settings.""" + + models: List[ModelSettings] = Field(default=[], description="Model configs") diff --git a/llama_cpp/server/types.py b/llama_cpp/server/types.py new file mode 100644 index 000000000..fdd164456 --- /dev/null +++ b/llama_cpp/server/types.py @@ -0,0 +1,316 @@ +from __future__ import annotations + +from typing import List, Optional, Union, Dict +from typing_extensions import TypedDict, Literal + +from pydantic import BaseModel, Field + +import llama_cpp + + +model_field = Field( + description="The model to use for generating completions.", default=None +) + +max_tokens_field = Field( + default=16, ge=1, description="The maximum number of tokens to generate." +) + +min_tokens_field = Field( + default=0, + ge=0, + description="The minimum number of tokens to generate. It may return fewer tokens if another condition is met (e.g. max_tokens, stop).", +) + +temperature_field = Field( + default=0.8, + description="Adjust the randomness of the generated text.\n\n" + + "Temperature is a hyperparameter that controls the randomness of the generated text. It affects the probability distribution of the model's output tokens. A higher temperature (e.g., 1.5) makes the output more random and creative, while a lower temperature (e.g., 0.5) makes the output more focused, deterministic, and conservative. The default value is 0.8, which provides a balance between randomness and determinism. At the extreme, a temperature of 0 will always pick the most likely next token, leading to identical outputs in each run.", +) + +top_p_field = Field( + default=0.95, + ge=0.0, + le=1.0, + description="Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P.\n\n" + + "Top-p sampling, also known as nucleus sampling, is another text generation method that selects the next token from a subset of tokens that together have a cumulative probability of at least p. This method provides a balance between diversity and quality by considering both the probabilities of tokens and the number of tokens to sample from. A higher value for top_p (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.", +) + +min_p_field = Field( + default=0.05, + ge=0.0, + le=1.0, + description="Sets a minimum base probability threshold for token selection.\n\n" + + "The Min-P sampling method was designed as an alternative to Top-P, and aims to ensure a balance of quality and variety. The parameter min_p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with min_p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.", +) + +stop_field = Field( + default=None, + description="A list of tokens at which to stop generation. If None, no stop tokens are used.", +) + +stream_field = Field( + default=False, + description="Whether to stream the results as they are generated. Useful for chatbots.", +) + +top_k_field = Field( + default=40, + ge=0, + description="Limit the next token selection to the K most probable tokens.\n\n" + + "Top-k sampling is a text generation method that selects the next token only from the top k most likely tokens predicted by the model. It helps reduce the risk of generating low-probability or nonsensical tokens, but it may also limit the diversity of the output. A higher value for top_k (e.g., 100) will consider more tokens and lead to more diverse text, while a lower value (e.g., 10) will focus on the most probable tokens and generate more conservative text.", +) + +repeat_penalty_field = Field( + default=1.1, + ge=0.0, + description="A penalty applied to each token that is already generated. This helps prevent the model from repeating itself.\n\n" + + "Repeat penalty is a hyperparameter used to penalize the repetition of token sequences during text generation. It helps prevent the model from generating repetitive or monotonous text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient.", +) + +presence_penalty_field = Field( + default=0.0, + ge=-2.0, + le=2.0, + description="Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.", +) + +frequency_penalty_field = Field( + default=0.0, + ge=-2.0, + le=2.0, + description="Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.", +) + +mirostat_mode_field = Field( + default=0, + ge=0, + le=2, + description="Enable Mirostat constant-perplexity algorithm of the specified version (1 or 2; 0 = disabled)", +) + +mirostat_tau_field = Field( + default=5.0, + ge=0.0, + le=10.0, + description="Mirostat target entropy, i.e. the target perplexity - lower values produce focused and coherent text, larger values produce more diverse and less coherent text", +) + +mirostat_eta_field = Field( + default=0.1, ge=0.001, le=1.0, description="Mirostat learning rate" +) + +grammar = Field( + default=None, + description="A CBNF grammar (as string) to be used for formatting the model's output.", +) + + +class CreateCompletionRequest(BaseModel): + prompt: Union[str, List[str]] = Field( + default="", description="The prompt to generate completions for." + ) + suffix: Optional[str] = Field( + default=None, + description="A suffix to append to the generated text. If None, no suffix is appended. Useful for chatbots.", + ) + max_tokens: Optional[int] = Field( + default=16, ge=0, description="The maximum number of tokens to generate." + ) + min_tokens: int = min_tokens_field + temperature: float = temperature_field + top_p: float = top_p_field + min_p: float = min_p_field + echo: bool = Field( + default=False, + description="Whether to echo the prompt in the generated text. Useful for chatbots.", + ) + stop: Optional[Union[str, List[str]]] = stop_field + stream: bool = stream_field + logprobs: Optional[int] = Field( + default=None, + ge=0, + description="The number of logprobs to generate. If None, no logprobs are generated.", + ) + presence_penalty: Optional[float] = presence_penalty_field + frequency_penalty: Optional[float] = frequency_penalty_field + logit_bias: Optional[Dict[str, float]] = Field(None) + seed: Optional[int] = Field(None) + + # ignored or currently unsupported + model: Optional[str] = model_field + n: Optional[int] = 1 + best_of: Optional[int] = 1 + user: Optional[str] = Field(default=None) + + # llama.cpp specific parameters + top_k: int = top_k_field + repeat_penalty: float = repeat_penalty_field + logit_bias_type: Optional[Literal["input_ids", "tokens"]] = Field(None) + mirostat_mode: int = mirostat_mode_field + mirostat_tau: float = mirostat_tau_field + mirostat_eta: float = mirostat_eta_field + grammar: Optional[str] = None + + model_config = { + "json_schema_extra": { + "examples": [ + { + "prompt": "\n\n### Instructions:\nWhat is the capital of France?\n\n### Response:\n", + "stop": ["\n", "###"], + } + ] + } + } + + +class CreateEmbeddingRequest(BaseModel): + model: Optional[str] = model_field + input: Union[str, List[str]] = Field(description="The input to embed.") + user: Optional[str] = Field(default=None) + + model_config = { + "json_schema_extra": { + "examples": [ + { + "input": "The food was delicious and the waiter...", + } + ] + } + } + + +class ChatCompletionRequestMessage(BaseModel): + role: Literal["system", "user", "assistant", "function"] = Field( + default="user", description="The role of the message." + ) + content: Optional[str] = Field( + default="", description="The content of the message." + ) + + +class CreateChatCompletionRequest(BaseModel): + messages: List[llama_cpp.ChatCompletionRequestMessage] = Field( + default=[], description="A list of messages to generate completions for." + ) + functions: Optional[List[llama_cpp.ChatCompletionFunction]] = Field( + default=None, + description="A list of functions to apply to the generated completions.", + ) + function_call: Optional[llama_cpp.ChatCompletionRequestFunctionCall] = Field( + default=None, + description="A function to apply to the generated completions.", + ) + tools: Optional[List[llama_cpp.ChatCompletionTool]] = Field( + default=None, + description="A list of tools to apply to the generated completions.", + ) + tool_choice: Optional[llama_cpp.ChatCompletionToolChoiceOption] = Field( + default=None, + description="A tool to apply to the generated completions.", + ) # TODO: verify + max_tokens: Optional[int] = Field( + default=None, + description="The maximum number of tokens to generate. Defaults to inf", + ) + min_tokens: int = min_tokens_field + logprobs: Optional[bool] = Field( + default=False, + description="Whether to output the logprobs or not. Default is True", + ) + top_logprobs: Optional[int] = Field( + default=None, + ge=0, + description="The number of logprobs to generate. If None, no logprobs are generated. logprobs need to set to True.", + ) + temperature: float = temperature_field + top_p: float = top_p_field + min_p: float = min_p_field + stop: Optional[Union[str, List[str]]] = stop_field + stream: bool = stream_field + presence_penalty: Optional[float] = presence_penalty_field + frequency_penalty: Optional[float] = frequency_penalty_field + logit_bias: Optional[Dict[str, float]] = Field(None) + seed: Optional[int] = Field(None) + response_format: Optional[llama_cpp.ChatCompletionRequestResponseFormat] = Field( + default=None, + ) + + # ignored or currently unsupported + model: Optional[str] = model_field + n: Optional[int] = 1 + user: Optional[str] = Field(None) + + # llama.cpp specific parameters + top_k: int = top_k_field + repeat_penalty: float = repeat_penalty_field + logit_bias_type: Optional[Literal["input_ids", "tokens"]] = Field(None) + mirostat_mode: int = mirostat_mode_field + mirostat_tau: float = mirostat_tau_field + mirostat_eta: float = mirostat_eta_field + grammar: Optional[str] = None + + model_config = { + "json_schema_extra": { + "examples": [ + { + "messages": [ + ChatCompletionRequestMessage( + role="system", content="You are a helpful assistant." + ).model_dump(), + ChatCompletionRequestMessage( + role="user", content="What is the capital of France?" + ).model_dump(), + ] + } + ] + } + } + + +class ModelData(TypedDict): + id: str + object: Literal["model"] + owned_by: str + permissions: List[str] + + +class ModelList(TypedDict): + object: Literal["list"] + data: List[ModelData] + + +class TokenizeInputRequest(BaseModel): + model: Optional[str] = model_field + input: str = Field(description="The input to tokenize.") + + model_config = { + "json_schema_extra": {"examples": [{"input": "How many tokens in this query?"}]} + } + + +class TokenizeInputResponse(BaseModel): + tokens: List[int] = Field(description="A list of tokens.") + + model_config = {"json_schema_extra": {"example": {"tokens": [123, 321, 222]}}} + + +class TokenizeInputCountResponse(BaseModel): + count: int = Field(description="The number of tokens in the input.") + + model_config = {"json_schema_extra": {"example": {"count": 5}}} + + +class DetokenizeInputRequest(BaseModel): + model: Optional[str] = model_field + tokens: List[int] = Field(description="A list of toekns to detokenize.") + + model_config = {"json_schema_extra": {"example": [{"tokens": [123, 321, 222]}]}} + + +class DetokenizeInputResponse(BaseModel): + text: str = Field(description="The detokenized text.") + + model_config = { + "json_schema_extra": {"example": {"text": "How many tokens in this query?"}} + } diff --git a/mkdocs.yml b/mkdocs.yml index 286581176..79a9e67a1 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -2,20 +2,73 @@ site_name: llama-cpp-python repo_url: https://github.com/abetlen/llama-cpp-python theme: - name: "material" + name: material + palette: + + # Palette toggle for light mode + - scheme: default + primary: indigo + toggle: + icon: material/brightness-7 + name: Switch to dark mode + + # Palette toggle for dark mode + - scheme: slate + primary: indigo + toggle: + icon: material/brightness-4 + name: Switch to light mode plugins: - - mkdocstrings - search + - mkdocstrings: + handlers: + python: + options: + members_order: source + group_by_category: false + signature_crossrefs: true + show_signature: true + docstring_section_style: list + show_root_heading: true + heading_level: 3 + preload_modules: + - typing + - typing_extensions + - ctypes + import: + - https://docs.python.org/3/objects.inv + - https://numpy.org/doc/stable/objects.inv watch: - llama_cpp + - README.md + +nav: + - "Getting Started": "index.md" + - "Installation Guides": + - "macOS (Metal)": "install/macos.md" + - "API Reference": "api-reference.md" + - "OpenAI Compatible Web Server": "server.md" + - "Changelog": "changelog.md" markdown_extensions: + - attr_list + - pymdownx.emoji: + emoji_index: !!python/name:materialx.emoji.twemoji + emoji_generator: !!python/name:materialx.emoji.to_svg - pymdownx.highlight: anchor_linenums: true line_spans: __span pygments_lang_class: true - pymdownx.inlinehilite + - pymdownx.magiclink: + repo_url_shorthand: true + user: abetlen + repo: llama-cpp-python - pymdownx.snippets - - pymdownx.superfences \ No newline at end of file + - pymdownx.superfences + - pymdownx.tabbed: + alternate_style: true + - pymdownx.tilde + - tables diff --git a/poetry.lock b/poetry.lock deleted file mode 100644 index 8a74d2fbd..000000000 --- a/poetry.lock +++ /dev/null @@ -1,1370 +0,0 @@ -# This file is automatically @generated by Poetry 1.4.1 and should not be changed by hand. - -[[package]] -name = "attrs" -version = "22.2.0" -description = "Classes Without Boilerplate" -category = "dev" -optional = false -python-versions = ">=3.6" -files = [ - {file = "attrs-22.2.0-py3-none-any.whl", hash = "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836"}, - {file = "attrs-22.2.0.tar.gz", hash = "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99"}, -] - -[package.extras] -cov = ["attrs[tests]", "coverage-enable-subprocess", "coverage[toml] (>=5.3)"] -dev = ["attrs[docs,tests]"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope.interface"] -tests = ["attrs[tests-no-zope]", "zope.interface"] -tests-no-zope = ["cloudpickle", "cloudpickle", "hypothesis", "hypothesis", "mypy (>=0.971,<0.990)", "mypy (>=0.971,<0.990)", "pympler", "pympler", "pytest (>=4.3.0)", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-mypy-plugins", "pytest-xdist[psutil]", "pytest-xdist[psutil]"] - -[[package]] -name = "black" -version = "23.1.0" -description = "The uncompromising code formatter." -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "black-23.1.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:b6a92a41ee34b883b359998f0c8e6eb8e99803aa8bf3123bf2b2e6fec505a221"}, - {file = "black-23.1.0-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:57c18c5165c1dbe291d5306e53fb3988122890e57bd9b3dcb75f967f13411a26"}, - {file = "black-23.1.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:9880d7d419bb7e709b37e28deb5e68a49227713b623c72b2b931028ea65f619b"}, - {file = "black-23.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6663f91b6feca5d06f2ccd49a10f254f9298cc1f7f49c46e498a0771b507104"}, - {file = "black-23.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:9afd3f493666a0cd8f8df9a0200c6359ac53940cbde049dcb1a7eb6ee2dd7074"}, - {file = "black-23.1.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:bfffba28dc52a58f04492181392ee380e95262af14ee01d4bc7bb1b1c6ca8d27"}, - {file = "black-23.1.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:c1c476bc7b7d021321e7d93dc2cbd78ce103b84d5a4cf97ed535fbc0d6660648"}, - {file = "black-23.1.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:382998821f58e5c8238d3166c492139573325287820963d2f7de4d518bd76958"}, - {file = "black-23.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bf649fda611c8550ca9d7592b69f0637218c2369b7744694c5e4902873b2f3a"}, - {file = "black-23.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:121ca7f10b4a01fd99951234abdbd97728e1240be89fde18480ffac16503d481"}, - {file = "black-23.1.0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:a8471939da5e824b891b25751955be52ee7f8a30a916d570a5ba8e0f2eb2ecad"}, - {file = "black-23.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8178318cb74f98bc571eef19068f6ab5613b3e59d4f47771582f04e175570ed8"}, - {file = "black-23.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:a436e7881d33acaf2536c46a454bb964a50eff59b21b51c6ccf5a40601fbef24"}, - {file = "black-23.1.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:a59db0a2094d2259c554676403fa2fac3473ccf1354c1c63eccf7ae65aac8ab6"}, - {file = "black-23.1.0-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:0052dba51dec07ed029ed61b18183942043e00008ec65d5028814afaab9a22fd"}, - {file = "black-23.1.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:49f7b39e30f326a34b5c9a4213213a6b221d7ae9d58ec70df1c4a307cf2a1580"}, - {file = "black-23.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:162e37d49e93bd6eb6f1afc3e17a3d23a823042530c37c3c42eeeaf026f38468"}, - {file = "black-23.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b70eb40a78dfac24842458476135f9b99ab952dd3f2dab738c1881a9b38b753"}, - {file = "black-23.1.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:a29650759a6a0944e7cca036674655c2f0f63806ddecc45ed40b7b8aa314b651"}, - {file = "black-23.1.0-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:bb460c8561c8c1bec7824ecbc3ce085eb50005883a6203dcfb0122e95797ee06"}, - {file = "black-23.1.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:c91dfc2c2a4e50df0026f88d2215e166616e0c80e86004d0003ece0488db2739"}, - {file = "black-23.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a951cc83ab535d248c89f300eccbd625e80ab880fbcfb5ac8afb5f01a258ac9"}, - {file = "black-23.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:0680d4380db3719ebcfb2613f34e86c8e6d15ffeabcf8ec59355c5e7b85bb555"}, - {file = "black-23.1.0-py3-none-any.whl", hash = "sha256:7a0f701d314cfa0896b9001df70a530eb2472babb76086344e688829efd97d32"}, - {file = "black-23.1.0.tar.gz", hash = "sha256:b0bd97bea8903f5a2ba7219257a44e3f1f9d00073d6cc1add68f0beec69692ac"}, -] - -[package.dependencies] -click = ">=8.0.0" -mypy-extensions = ">=0.4.3" -packaging = ">=22.0" -pathspec = ">=0.9.0" -platformdirs = ">=2" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} - -[package.extras] -colorama = ["colorama (>=0.4.3)"] -d = ["aiohttp (>=3.7.4)"] -jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] -uvloop = ["uvloop (>=0.15.2)"] - -[[package]] -name = "bleach" -version = "6.0.0" -description = "An easy safelist-based HTML-sanitizing tool." -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "bleach-6.0.0-py3-none-any.whl", hash = "sha256:33c16e3353dbd13028ab4799a0f89a83f113405c766e9c122df8a06f5b85b3f4"}, - {file = "bleach-6.0.0.tar.gz", hash = "sha256:1a1a85c1595e07d8db14c5f09f09e6433502c51c595970edc090551f0db99414"}, -] - -[package.dependencies] -six = ">=1.9.0" -webencodings = "*" - -[package.extras] -css = ["tinycss2 (>=1.1.0,<1.2)"] - -[[package]] -name = "certifi" -version = "2022.12.7" -description = "Python package for providing Mozilla's CA Bundle." -category = "dev" -optional = false -python-versions = ">=3.6" -files = [ - {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"}, - {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"}, -] - -[[package]] -name = "cffi" -version = "1.15.1" -description = "Foreign Function Interface for Python calling C code." -category = "dev" -optional = false -python-versions = "*" -files = [ - {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, - {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, - {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, - {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, - {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, - {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, - {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, - {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, - {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, - {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, - {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, - {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, - {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, - {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, - {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, - {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, - {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, - {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, - {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, - {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, - {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, - {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, - {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, - {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, - {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, - {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, - {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, - {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, - {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, - {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, - {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, - {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, - {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, - {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, - {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, -] - -[package.dependencies] -pycparser = "*" - -[[package]] -name = "charset-normalizer" -version = "3.1.0" -description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -category = "dev" -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-win32.whl", hash = "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-win32.whl", hash = "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-win32.whl", hash = "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-win32.whl", hash = "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b"}, - {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"}, -] - -[[package]] -name = "click" -version = "8.1.3" -description = "Composable command line interface toolkit" -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, - {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - -[[package]] -name = "colorama" -version = "0.4.6" -description = "Cross-platform colored terminal text." -category = "dev" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -files = [ - {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, - {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, -] - -[[package]] -name = "cryptography" -version = "39.0.2" -description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." -category = "dev" -optional = false -python-versions = ">=3.6" -files = [ - {file = "cryptography-39.0.2-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:2725672bb53bb92dc7b4150d233cd4b8c59615cd8288d495eaa86db00d4e5c06"}, - {file = "cryptography-39.0.2-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:23df8ca3f24699167daf3e23e51f7ba7334d504af63a94af468f468b975b7dd7"}, - {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:eb40fe69cfc6f5cdab9a5ebd022131ba21453cf7b8a7fd3631f45bbf52bed612"}, - {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc0521cce2c1d541634b19f3ac661d7a64f9555135e9d8af3980965be717fd4a"}, - {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffd394c7896ed7821a6d13b24657c6a34b6e2650bd84ae063cf11ccffa4f1a97"}, - {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:e8a0772016feeb106efd28d4a328e77dc2edae84dfbac06061319fdb669ff828"}, - {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8f35c17bd4faed2bc7797d2a66cbb4f986242ce2e30340ab832e5d99ae60e011"}, - {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:b49a88ff802e1993b7f749b1eeb31134f03c8d5c956e3c125c75558955cda536"}, - {file = "cryptography-39.0.2-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5f8c682e736513db7d04349b4f6693690170f95aac449c56f97415c6980edef5"}, - {file = "cryptography-39.0.2-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:d7d84a512a59f4412ca8549b01f94be4161c94efc598bf09d027d67826beddc0"}, - {file = "cryptography-39.0.2-cp36-abi3-win32.whl", hash = "sha256:c43ac224aabcbf83a947eeb8b17eaf1547bce3767ee2d70093b461f31729a480"}, - {file = "cryptography-39.0.2-cp36-abi3-win_amd64.whl", hash = "sha256:788b3921d763ee35dfdb04248d0e3de11e3ca8eb22e2e48fef880c42e1f3c8f9"}, - {file = "cryptography-39.0.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:d15809e0dbdad486f4ad0979753518f47980020b7a34e9fc56e8be4f60702fac"}, - {file = "cryptography-39.0.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:50cadb9b2f961757e712a9737ef33d89b8190c3ea34d0fb6675e00edbe35d074"}, - {file = "cryptography-39.0.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:103e8f7155f3ce2ffa0049fe60169878d47a4364b277906386f8de21c9234aa1"}, - {file = "cryptography-39.0.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6236a9610c912b129610eb1a274bdc1350b5df834d124fa84729ebeaf7da42c3"}, - {file = "cryptography-39.0.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e944fe07b6f229f4c1a06a7ef906a19652bdd9fd54c761b0ff87e83ae7a30354"}, - {file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:35d658536b0a4117c885728d1a7032bdc9a5974722ae298d6c533755a6ee3915"}, - {file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:30b1d1bfd00f6fc80d11300a29f1d8ab2b8d9febb6ed4a38a76880ec564fae84"}, - {file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e029b844c21116564b8b61216befabca4b500e6816fa9f0ba49527653cae2108"}, - {file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fa507318e427169ade4e9eccef39e9011cdc19534f55ca2f36ec3f388c1f70f3"}, - {file = "cryptography-39.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8bc0008ef798231fac03fe7d26e82d601d15bd16f3afaad1c6113771566570f3"}, - {file = "cryptography-39.0.2.tar.gz", hash = "sha256:bc5b871e977c8ee5a1bbc42fa8d19bcc08baf0c51cbf1586b0e87a2694dde42f"}, -] - -[package.dependencies] -cffi = ">=1.12" - -[package.extras] -docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] -docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] -pep8test = ["black", "check-manifest", "mypy", "ruff", "types-pytz", "types-requests"] -sdist = ["setuptools-rust (>=0.11.4)"] -ssh = ["bcrypt (>=3.1.5)"] -test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-shard (>=0.1.2)", "pytest-subtests", "pytest-xdist", "pytz"] -test-randomorder = ["pytest-randomly"] -tox = ["tox"] - -[[package]] -name = "docutils" -version = "0.19" -description = "Docutils -- Python Documentation Utilities" -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "docutils-0.19-py3-none-any.whl", hash = "sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc"}, - {file = "docutils-0.19.tar.gz", hash = "sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6"}, -] - -[[package]] -name = "exceptiongroup" -version = "1.1.1" -description = "Backport of PEP 654 (exception groups)" -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "exceptiongroup-1.1.1-py3-none-any.whl", hash = "sha256:232c37c63e4f682982c8b6459f33a8981039e5fb8756b2074364e5055c498c9e"}, - {file = "exceptiongroup-1.1.1.tar.gz", hash = "sha256:d484c3090ba2889ae2928419117447a14daf3c1231d5e30d0aae34f354f01785"}, -] - -[package.extras] -test = ["pytest (>=6)"] - -[[package]] -name = "ghp-import" -version = "2.1.0" -description = "Copy your docs directly to the gh-pages branch." -category = "dev" -optional = false -python-versions = "*" -files = [ - {file = "ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"}, - {file = "ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619"}, -] - -[package.dependencies] -python-dateutil = ">=2.8.1" - -[package.extras] -dev = ["flake8", "markdown", "twine", "wheel"] - -[[package]] -name = "griffe" -version = "0.25.5" -description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "griffe-0.25.5-py3-none-any.whl", hash = "sha256:1fb9edff48e66d4873014a2ebf21aca5f271d0006a4c937826e3cf592ffb3706"}, - {file = "griffe-0.25.5.tar.gz", hash = "sha256:11ea3403ef0560a1cbcf7f302eb5d21cf4c1d8ed3f8a16a75aa9f6f458caf3f1"}, -] - -[package.dependencies] -colorama = ">=0.4" - -[package.extras] -async = ["aiofiles (>=0.7,<1.0)"] - -[[package]] -name = "idna" -version = "3.4" -description = "Internationalized Domain Names in Applications (IDNA)" -category = "dev" -optional = false -python-versions = ">=3.5" -files = [ - {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, - {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, -] - -[[package]] -name = "importlib-metadata" -version = "6.1.0" -description = "Read metadata from Python packages" -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "importlib_metadata-6.1.0-py3-none-any.whl", hash = "sha256:ff80f3b5394912eb1b108fcfd444dc78b7f1f3e16b16188054bd01cb9cb86f09"}, - {file = "importlib_metadata-6.1.0.tar.gz", hash = "sha256:43ce9281e097583d758c2c708c4376371261a02c34682491a8e98352365aad20"}, -] - -[package.dependencies] -zipp = ">=0.5" - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -perf = ["ipython"] -testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"] - -[[package]] -name = "importlib-resources" -version = "5.12.0" -description = "Read resources from Python packages" -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "importlib_resources-5.12.0-py3-none-any.whl", hash = "sha256:7b1deeebbf351c7578e09bf2f63fa2ce8b5ffec296e0d349139d43cca061a81a"}, - {file = "importlib_resources-5.12.0.tar.gz", hash = "sha256:4be82589bf5c1d7999aedf2a45159d10cb3ca4f19b2271f8792bc8e6da7b22f6"}, -] - -[package.dependencies] -zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] - -[[package]] -name = "iniconfig" -version = "2.0.0" -description = "brain-dead simple config-ini parsing" -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, - {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, -] - -[[package]] -name = "jaraco-classes" -version = "3.2.3" -description = "Utility functions for Python class constructs" -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "jaraco.classes-3.2.3-py3-none-any.whl", hash = "sha256:2353de3288bc6b82120752201c6b1c1a14b058267fa424ed5ce5984e3b922158"}, - {file = "jaraco.classes-3.2.3.tar.gz", hash = "sha256:89559fa5c1d3c34eff6f631ad80bb21f378dbcbb35dd161fd2c6b93f5be2f98a"}, -] - -[package.dependencies] -more-itertools = "*" - -[package.extras] -docs = ["jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"] -testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] - -[[package]] -name = "jeepney" -version = "0.8.0" -description = "Low-level, pure Python DBus protocol wrapper." -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "jeepney-0.8.0-py3-none-any.whl", hash = "sha256:c0a454ad016ca575060802ee4d590dd912e35c122fa04e70306de3d076cce755"}, - {file = "jeepney-0.8.0.tar.gz", hash = "sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806"}, -] - -[package.extras] -test = ["async-timeout", "pytest", "pytest-asyncio (>=0.17)", "pytest-trio", "testpath", "trio"] -trio = ["async_generator", "trio"] - -[[package]] -name = "jinja2" -version = "3.1.2" -description = "A very fast and expressive template engine." -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, - {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, -] - -[package.dependencies] -MarkupSafe = ">=2.0" - -[package.extras] -i18n = ["Babel (>=2.7)"] - -[[package]] -name = "keyring" -version = "23.13.1" -description = "Store and access your passwords safely." -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "keyring-23.13.1-py3-none-any.whl", hash = "sha256:771ed2a91909389ed6148631de678f82ddc73737d85a927f382a8a1b157898cd"}, - {file = "keyring-23.13.1.tar.gz", hash = "sha256:ba2e15a9b35e21908d0aaf4e0a47acc52d6ae33444df0da2b49d41a46ef6d678"}, -] - -[package.dependencies] -importlib-metadata = {version = ">=4.11.4", markers = "python_version < \"3.12\""} -importlib-resources = {version = "*", markers = "python_version < \"3.9\""} -"jaraco.classes" = "*" -jeepney = {version = ">=0.4.2", markers = "sys_platform == \"linux\""} -pywin32-ctypes = {version = ">=0.2.0", markers = "sys_platform == \"win32\""} -SecretStorage = {version = ">=3.2", markers = "sys_platform == \"linux\""} - -[package.extras] -completion = ["shtab"] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"] -testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] - -[[package]] -name = "markdown" -version = "3.3.7" -description = "Python implementation of Markdown." -category = "dev" -optional = false -python-versions = ">=3.6" -files = [ - {file = "Markdown-3.3.7-py3-none-any.whl", hash = "sha256:f5da449a6e1c989a4cea2631aa8ee67caa5a2ef855d551c88f9e309f4634c621"}, - {file = "Markdown-3.3.7.tar.gz", hash = "sha256:cbb516f16218e643d8e0a95b309f77eb118cb138d39a4f27851e6a63581db874"}, -] - -[package.dependencies] -importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""} - -[package.extras] -testing = ["coverage", "pyyaml"] - -[[package]] -name = "markdown-it-py" -version = "2.2.0" -description = "Python port of markdown-it. Markdown parsing, done right!" -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "markdown-it-py-2.2.0.tar.gz", hash = "sha256:7c9a5e412688bc771c67432cbfebcdd686c93ce6484913dccf06cb5a0bea35a1"}, - {file = "markdown_it_py-2.2.0-py3-none-any.whl", hash = "sha256:5a35f8d1870171d9acc47b99612dc146129b631baf04970128b568f190d0cc30"}, -] - -[package.dependencies] -mdurl = ">=0.1,<1.0" - -[package.extras] -benchmarking = ["psutil", "pytest", "pytest-benchmark"] -code-style = ["pre-commit (>=3.0,<4.0)"] -compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] -linkify = ["linkify-it-py (>=1,<3)"] -plugins = ["mdit-py-plugins"] -profiling = ["gprof2dot"] -rtd = ["attrs", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] -testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] - -[[package]] -name = "markupsafe" -version = "2.1.2" -description = "Safely add untrusted strings to HTML/XML markup." -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28057e985dace2f478e042eaa15606c7efccb700797660629da387eb289b9323"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca244fa73f50a800cf8c3ebf7fd93149ec37f5cb9596aa8873ae2c1d23498601"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d9d971ec1e79906046aa3ca266de79eac42f1dbf3612a05dc9368125952bd1a1"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7e007132af78ea9df29495dbf7b5824cb71648d7133cf7848a2a5dd00d36f9ff"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7313ce6a199651c4ed9d7e4cfb4aa56fe923b1adf9af3b420ee14e6d9a73df65"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-win32.whl", hash = "sha256:c4a549890a45f57f1ebf99c067a4ad0cb423a05544accaf2b065246827ed9603"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:835fb5e38fd89328e9c81067fd642b3593c33e1e17e2fdbf77f5676abb14a156"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2ec4f2d48ae59bbb9d1f9d7efb9236ab81429a764dedca114f5fdabbc3788013"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:608e7073dfa9e38a85d38474c082d4281f4ce276ac0010224eaba11e929dd53a"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65608c35bfb8a76763f37036547f7adfd09270fbdbf96608be2bead319728fcd"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2bfb563d0211ce16b63c7cb9395d2c682a23187f54c3d79bfec33e6705473c6"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:da25303d91526aac3672ee6d49a2f3db2d9502a4a60b55519feb1a4c7714e07d"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9cad97ab29dfc3f0249b483412c85c8ef4766d96cdf9dcf5a1e3caa3f3661cf1"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:085fd3201e7b12809f9e6e9bc1e5c96a368c8523fad5afb02afe3c051ae4afcc"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1bea30e9bf331f3fef67e0a3877b2288593c98a21ccb2cf29b74c581a4eb3af0"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-win32.whl", hash = "sha256:7df70907e00c970c60b9ef2938d894a9381f38e6b9db73c5be35e59d92e06625"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:e55e40ff0cc8cc5c07996915ad367fa47da6b3fc091fdadca7f5403239c5fec3"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a6e40afa7f45939ca356f348c8e23048e02cb109ced1eb8420961b2f40fb373a"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf877ab4ed6e302ec1d04952ca358b381a882fbd9d1b07cccbfd61783561f98a"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63ba06c9941e46fa389d389644e2d8225e0e3e5ebcc4ff1ea8506dce646f8c8a"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1cd098434e83e656abf198f103a8207a8187c0fc110306691a2e94a78d0abb2"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:55f44b440d491028addb3b88f72207d71eeebfb7b5dbf0643f7c023ae1fba619"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a6f2fcca746e8d5910e18782f976489939d54a91f9411c32051b4aab2bd7c513"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0b462104ba25f1ac006fdab8b6a01ebbfbce9ed37fd37fd4acd70c67c973e460"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-win32.whl", hash = "sha256:7668b52e102d0ed87cb082380a7e2e1e78737ddecdde129acadb0eccc5423859"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6d6607f98fcf17e534162f0709aaad3ab7a96032723d8ac8750ffe17ae5a0666"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a806db027852538d2ad7555b203300173dd1b77ba116de92da9afbc3a3be3eed"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a4abaec6ca3ad8660690236d11bfe28dfd707778e2442b45addd2f086d6ef094"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f03a532d7dee1bed20bc4884194a16160a2de9ffc6354b3878ec9682bb623c54"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cf06cdc1dda95223e9d2d3c58d3b178aa5dacb35ee7e3bbac10e4e1faacb419"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22731d79ed2eb25059ae3df1dfc9cb1546691cc41f4e3130fe6bfbc3ecbbecfa"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f8ffb705ffcf5ddd0e80b65ddf7bed7ee4f5a441ea7d3419e861a12eaf41af58"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8db032bf0ce9022a8e41a22598eefc802314e81b879ae093f36ce9ddf39ab1ba"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2298c859cfc5463f1b64bd55cb3e602528db6fa0f3cfd568d3605c50678f8f03"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-win32.whl", hash = "sha256:50c42830a633fa0cf9e7d27664637532791bfc31c731a87b202d2d8ac40c3ea2"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:bb06feb762bade6bf3c8b844462274db0c76acc95c52abe8dbed28ae3d44a147"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:99625a92da8229df6d44335e6fcc558a5037dd0a760e11d84be2260e6f37002f"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8bca7e26c1dd751236cfb0c6c72d4ad61d986e9a41bbf76cb445f69488b2a2bd"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40627dcf047dadb22cd25ea7ecfe9cbf3bbbad0482ee5920b582f3809c97654f"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40dfd3fefbef579ee058f139733ac336312663c6706d1163b82b3003fb1925c4"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:090376d812fb6ac5f171e5938e82e7f2d7adc2b629101cec0db8b267815c85e2"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2e7821bffe00aa6bd07a23913b7f4e01328c3d5cc0b40b36c0bd81d362faeb65"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c0a33bc9f02c2b17c3ea382f91b4db0e6cde90b63b296422a939886a7a80de1c"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b8526c6d437855442cdd3d87eede9c425c4445ea011ca38d937db299382e6fa3"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-win32.whl", hash = "sha256:137678c63c977754abe9086a3ec011e8fd985ab90631145dfb9294ad09c102a7"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed"}, - {file = "MarkupSafe-2.1.2.tar.gz", hash = "sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d"}, -] - -[[package]] -name = "mdurl" -version = "0.1.2" -description = "Markdown URL utilities" -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, - {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, -] - -[[package]] -name = "mergedeep" -version = "1.3.4" -description = "A deep merge function for 🐍." -category = "dev" -optional = false -python-versions = ">=3.6" -files = [ - {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"}, - {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"}, -] - -[[package]] -name = "mkdocs" -version = "1.4.2" -description = "Project documentation with Markdown." -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "mkdocs-1.4.2-py3-none-any.whl", hash = "sha256:c8856a832c1e56702577023cd64cc5f84948280c1c0fcc6af4cd39006ea6aa8c"}, - {file = "mkdocs-1.4.2.tar.gz", hash = "sha256:8947af423a6d0facf41ea1195b8e1e8c85ad94ac95ae307fe11232e0424b11c5"}, -] - -[package.dependencies] -click = ">=7.0" -colorama = {version = ">=0.4", markers = "platform_system == \"Windows\""} -ghp-import = ">=1.0" -importlib-metadata = {version = ">=4.3", markers = "python_version < \"3.10\""} -jinja2 = ">=2.11.1" -markdown = ">=3.2.1,<3.4" -mergedeep = ">=1.3.4" -packaging = ">=20.5" -pyyaml = ">=5.1" -pyyaml-env-tag = ">=0.1" -watchdog = ">=2.0" - -[package.extras] -i18n = ["babel (>=2.9.0)"] -min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-import (==1.0)", "importlib-metadata (==4.3)", "jinja2 (==2.11.1)", "markdown (==3.2.1)", "markupsafe (==2.0.1)", "mergedeep (==1.3.4)", "packaging (==20.5)", "pyyaml (==5.1)", "pyyaml-env-tag (==0.1)", "typing-extensions (==3.10)", "watchdog (==2.0)"] - -[[package]] -name = "mkdocs-autorefs" -version = "0.4.1" -description = "Automatically link across pages in MkDocs." -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "mkdocs-autorefs-0.4.1.tar.gz", hash = "sha256:70748a7bd025f9ecd6d6feeba8ba63f8e891a1af55f48e366d6d6e78493aba84"}, - {file = "mkdocs_autorefs-0.4.1-py3-none-any.whl", hash = "sha256:a2248a9501b29dc0cc8ba4c09f4f47ff121945f6ce33d760f145d6f89d313f5b"}, -] - -[package.dependencies] -Markdown = ">=3.3" -mkdocs = ">=1.1" - -[[package]] -name = "mkdocs-material" -version = "9.1.4" -description = "Documentation that simply works" -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "mkdocs_material-9.1.4-py3-none-any.whl", hash = "sha256:4c92dcf9365068259bef3eed8e0dd5410056b6f7187bdea2d52848c0f94cd94c"}, - {file = "mkdocs_material-9.1.4.tar.gz", hash = "sha256:c3a8943e9e4a7d2624291da365bbccf0b9f88688aa6947a46260d8c165cd4389"}, -] - -[package.dependencies] -colorama = ">=0.4" -jinja2 = ">=3.0" -markdown = ">=3.2" -mkdocs = ">=1.4.2" -mkdocs-material-extensions = ">=1.1" -pygments = ">=2.14" -pymdown-extensions = ">=9.9.1" -regex = ">=2022.4.24" -requests = ">=2.26" - -[[package]] -name = "mkdocs-material-extensions" -version = "1.1.1" -description = "Extension pack for Python Markdown and MkDocs Material." -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "mkdocs_material_extensions-1.1.1-py3-none-any.whl", hash = "sha256:e41d9f38e4798b6617ad98ca8f7f1157b1e4385ac1459ca1e4ea219b556df945"}, - {file = "mkdocs_material_extensions-1.1.1.tar.gz", hash = "sha256:9c003da71e2cc2493d910237448c672e00cefc800d3d6ae93d2fc69979e3bd93"}, -] - -[[package]] -name = "mkdocstrings" -version = "0.20.0" -description = "Automatic documentation from sources, for MkDocs." -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "mkdocstrings-0.20.0-py3-none-any.whl", hash = "sha256:f17fc2c4f760ec302b069075ef9e31045aa6372ca91d2f35ded3adba8e25a472"}, - {file = "mkdocstrings-0.20.0.tar.gz", hash = "sha256:c757f4f646d4f939491d6bc9256bfe33e36c5f8026392f49eaa351d241c838e5"}, -] - -[package.dependencies] -Jinja2 = ">=2.11.1" -Markdown = ">=3.3" -MarkupSafe = ">=1.1" -mkdocs = ">=1.2" -mkdocs-autorefs = ">=0.3.1" -mkdocstrings-python = {version = ">=0.5.2", optional = true, markers = "extra == \"python\""} -pymdown-extensions = ">=6.3" - -[package.extras] -crystal = ["mkdocstrings-crystal (>=0.3.4)"] -python = ["mkdocstrings-python (>=0.5.2)"] -python-legacy = ["mkdocstrings-python-legacy (>=0.2.1)"] - -[[package]] -name = "mkdocstrings-python" -version = "0.8.3" -description = "A Python handler for mkdocstrings." -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "mkdocstrings-python-0.8.3.tar.gz", hash = "sha256:9ae473f6dc599339b09eee17e4d2b05d6ac0ec29860f3fc9b7512d940fc61adf"}, - {file = "mkdocstrings_python-0.8.3-py3-none-any.whl", hash = "sha256:4e6e1cd6f37a785de0946ced6eb846eb2f5d891ac1cc2c7b832943d3529087a7"}, -] - -[package.dependencies] -griffe = ">=0.24" -mkdocstrings = ">=0.19" - -[[package]] -name = "more-itertools" -version = "9.1.0" -description = "More routines for operating on iterables, beyond itertools" -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "more-itertools-9.1.0.tar.gz", hash = "sha256:cabaa341ad0389ea83c17a94566a53ae4c9d07349861ecb14dc6d0345cf9ac5d"}, - {file = "more_itertools-9.1.0-py3-none-any.whl", hash = "sha256:d2bc7f02446e86a68911e58ded76d6561eea00cddfb2a91e7019bbb586c799f3"}, -] - -[[package]] -name = "mypy-extensions" -version = "1.0.0" -description = "Type system extensions for programs checked with the mypy type checker." -category = "dev" -optional = false -python-versions = ">=3.5" -files = [ - {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, - {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, -] - -[[package]] -name = "packaging" -version = "23.0" -description = "Core utilities for Python packages" -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "packaging-23.0-py3-none-any.whl", hash = "sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2"}, - {file = "packaging-23.0.tar.gz", hash = "sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97"}, -] - -[[package]] -name = "pathspec" -version = "0.11.1" -description = "Utility library for gitignore style pattern matching of file paths." -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"}, - {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"}, -] - -[[package]] -name = "pkginfo" -version = "1.9.6" -description = "Query metadata from sdists / bdists / installed packages." -category = "dev" -optional = false -python-versions = ">=3.6" -files = [ - {file = "pkginfo-1.9.6-py3-none-any.whl", hash = "sha256:4b7a555a6d5a22169fcc9cf7bfd78d296b0361adad412a346c1226849af5e546"}, - {file = "pkginfo-1.9.6.tar.gz", hash = "sha256:8fd5896e8718a4372f0ea9cc9d96f6417c9b986e23a4d116dda26b62cc29d046"}, -] - -[package.extras] -testing = ["pytest", "pytest-cov"] - -[[package]] -name = "platformdirs" -version = "3.1.1" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "platformdirs-3.1.1-py3-none-any.whl", hash = "sha256:e5986afb596e4bb5bde29a79ac9061aa955b94fca2399b7aaac4090860920dd8"}, - {file = "platformdirs-3.1.1.tar.gz", hash = "sha256:024996549ee88ec1a9aa99ff7f8fc819bb59e2c3477b410d90a16d32d6e707aa"}, -] - -[package.extras] -docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"] - -[[package]] -name = "pluggy" -version = "1.0.0" -description = "plugin and hook calling mechanisms for python" -category = "dev" -optional = false -python-versions = ">=3.6" -files = [ - {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, - {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, -] - -[package.extras] -dev = ["pre-commit", "tox"] -testing = ["pytest", "pytest-benchmark"] - -[[package]] -name = "pycparser" -version = "2.21" -description = "C parser in Python" -category = "dev" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, - {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, -] - -[[package]] -name = "pygments" -version = "2.14.0" -description = "Pygments is a syntax highlighting package written in Python." -category = "dev" -optional = false -python-versions = ">=3.6" -files = [ - {file = "Pygments-2.14.0-py3-none-any.whl", hash = "sha256:fa7bd7bd2771287c0de303af8bfdfc731f51bd2c6a47ab69d117138893b82717"}, - {file = "Pygments-2.14.0.tar.gz", hash = "sha256:b3ed06a9e8ac9a9aae5a6f5dbe78a8a58655d17b43b93c078f094ddc476ae297"}, -] - -[package.extras] -plugins = ["importlib-metadata"] - -[[package]] -name = "pymdown-extensions" -version = "9.10" -description = "Extension pack for Python Markdown." -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pymdown_extensions-9.10-py3-none-any.whl", hash = "sha256:31eaa76ce6f96aabfcea98787c2fff2c5c0611b20a53a94213970cfbf05f02b8"}, - {file = "pymdown_extensions-9.10.tar.gz", hash = "sha256:562c38eee4ce3f101ce631b804bfc2177a8a76c7e4dc908871fb6741a90257a7"}, -] - -[package.dependencies] -markdown = ">=3.2" -pyyaml = "*" - -[[package]] -name = "pytest" -version = "7.2.2" -description = "pytest: simple powerful testing with Python" -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pytest-7.2.2-py3-none-any.whl", hash = "sha256:130328f552dcfac0b1cec75c12e3f005619dc5f874f0a06e8ff7263f0ee6225e"}, - {file = "pytest-7.2.2.tar.gz", hash = "sha256:c99ab0c73aceb050f68929bc93af19ab6db0558791c6a0715723abe9d0ade9d4"}, -] - -[package.dependencies] -attrs = ">=19.2.0" -colorama = {version = "*", markers = "sys_platform == \"win32\""} -exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} -iniconfig = "*" -packaging = "*" -pluggy = ">=0.12,<2.0" -tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} - -[package.extras] -testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "xmlschema"] - -[[package]] -name = "python-dateutil" -version = "2.8.2" -description = "Extensions to the standard Python datetime module" -category = "dev" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, -] - -[package.dependencies] -six = ">=1.5" - -[[package]] -name = "pywin32-ctypes" -version = "0.2.0" -description = "" -category = "dev" -optional = false -python-versions = "*" -files = [ - {file = "pywin32-ctypes-0.2.0.tar.gz", hash = "sha256:24ffc3b341d457d48e8922352130cf2644024a4ff09762a2261fd34c36ee5942"}, - {file = "pywin32_ctypes-0.2.0-py2.py3-none-any.whl", hash = "sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98"}, -] - -[[package]] -name = "pyyaml" -version = "6.0" -description = "YAML parser and emitter for Python" -category = "dev" -optional = false -python-versions = ">=3.6" -files = [ - {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, - {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, - {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, - {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, - {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"}, - {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"}, - {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"}, - {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"}, - {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"}, - {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"}, - {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"}, - {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"}, - {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"}, - {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"}, - {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"}, - {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"}, - {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"}, - {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"}, - {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"}, - {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"}, - {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"}, - {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, -] - -[[package]] -name = "pyyaml-env-tag" -version = "0.1" -description = "A custom YAML tag for referencing environment variables in YAML files. " -category = "dev" -optional = false -python-versions = ">=3.6" -files = [ - {file = "pyyaml_env_tag-0.1-py3-none-any.whl", hash = "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069"}, - {file = "pyyaml_env_tag-0.1.tar.gz", hash = "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb"}, -] - -[package.dependencies] -pyyaml = "*" - -[[package]] -name = "readme-renderer" -version = "37.3" -description = "readme_renderer is a library for rendering \"readme\" descriptions for Warehouse" -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "readme_renderer-37.3-py3-none-any.whl", hash = "sha256:f67a16caedfa71eef48a31b39708637a6f4664c4394801a7b0d6432d13907343"}, - {file = "readme_renderer-37.3.tar.gz", hash = "sha256:cd653186dfc73055656f090f227f5cb22a046d7f71a841dfa305f55c9a513273"}, -] - -[package.dependencies] -bleach = ">=2.1.0" -docutils = ">=0.13.1" -Pygments = ">=2.5.1" - -[package.extras] -md = ["cmarkgfm (>=0.8.0)"] - -[[package]] -name = "regex" -version = "2023.3.23" -description = "Alternative regular expression module, to replace re." -category = "dev" -optional = false -python-versions = ">=3.8" -files = [ - {file = "regex-2023.3.23-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:845a5e2d84389c4ddada1a9b95c055320070f18bb76512608374aca00d22eca8"}, - {file = "regex-2023.3.23-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:87d9951f5a538dd1d016bdc0dcae59241d15fa94860964833a54d18197fcd134"}, - {file = "regex-2023.3.23-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37ae17d3be44c0b3f782c28ae9edd8b47c1f1776d4cabe87edc0b98e1f12b021"}, - {file = "regex-2023.3.23-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0b8eb1e3bca6b48dc721818a60ae83b8264d4089a4a41d62be6d05316ec38e15"}, - {file = "regex-2023.3.23-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df45fac182ebc3c494460c644e853515cc24f5ad9da05f8ffb91da891bfee879"}, - {file = "regex-2023.3.23-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7006105b10b59971d3b248ad75acc3651c7e4cf54d81694df5a5130a3c3f7ea"}, - {file = "regex-2023.3.23-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93f3f1aa608380fe294aa4cb82e2afda07a7598e828d0341e124b8fd9327c715"}, - {file = "regex-2023.3.23-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:787954f541ab95d8195d97b0b8cf1dc304424adb1e07365967e656b92b38a699"}, - {file = "regex-2023.3.23-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:20abe0bdf03630fe92ccafc45a599bca8b3501f48d1de4f7d121153350a2f77d"}, - {file = "regex-2023.3.23-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:11d00c31aeab9a6e0503bc77e73ed9f4527b3984279d997eb145d7c7be6268fd"}, - {file = "regex-2023.3.23-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:d5bbe0e1511b844794a3be43d6c145001626ba9a6c1db8f84bdc724e91131d9d"}, - {file = "regex-2023.3.23-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ea3c0cb56eadbf4ab2277e7a095676370b3e46dbfc74d5c383bd87b0d6317910"}, - {file = "regex-2023.3.23-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d895b4c863059a4934d3e874b90998df774644a41b349ebb330f85f11b4ef2c0"}, - {file = "regex-2023.3.23-cp310-cp310-win32.whl", hash = "sha256:9d764514d19b4edcc75fd8cb1423448ef393e8b6cbd94f38cab983ab1b75855d"}, - {file = "regex-2023.3.23-cp310-cp310-win_amd64.whl", hash = "sha256:11d1f2b7a0696dc0310de0efb51b1f4d813ad4401fe368e83c0c62f344429f98"}, - {file = "regex-2023.3.23-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8a9c63cde0eaa345795c0fdeb19dc62d22e378c50b0bc67bf4667cd5b482d98b"}, - {file = "regex-2023.3.23-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dd7200b4c27b68cf9c9646da01647141c6db09f48cc5b51bc588deaf8e98a797"}, - {file = "regex-2023.3.23-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22720024b90a6ba673a725dcc62e10fb1111b889305d7c6b887ac7466b74bedb"}, - {file = "regex-2023.3.23-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b190a339090e6af25f4a5fd9e77591f6d911cc7b96ecbb2114890b061be0ac1"}, - {file = "regex-2023.3.23-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e76b6fc0d8e9efa39100369a9b3379ce35e20f6c75365653cf58d282ad290f6f"}, - {file = "regex-2023.3.23-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7868b8f218bf69a2a15402fde08b08712213a1f4b85a156d90473a6fb6b12b09"}, - {file = "regex-2023.3.23-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2472428efc4127374f494e570e36b30bb5e6b37d9a754f7667f7073e43b0abdd"}, - {file = "regex-2023.3.23-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c37df2a060cb476d94c047b18572ee2b37c31f831df126c0da3cd9227b39253d"}, - {file = "regex-2023.3.23-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4479f9e2abc03362df4045b1332d4a2b7885b245a30d4f4b051c4083b97d95d8"}, - {file = "regex-2023.3.23-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e2396e0678167f2d0c197da942b0b3fb48fee2f0b5915a0feb84d11b6686afe6"}, - {file = "regex-2023.3.23-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:75f288c60232a5339e0ff2fa05779a5e9c74e9fc085c81e931d4a264501e745b"}, - {file = "regex-2023.3.23-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c869260aa62cee21c5eb171a466c0572b5e809213612ef8d495268cd2e34f20d"}, - {file = "regex-2023.3.23-cp311-cp311-win32.whl", hash = "sha256:25f0532fd0c53e96bad84664171969de9673b4131f2297f1db850d3918d58858"}, - {file = "regex-2023.3.23-cp311-cp311-win_amd64.whl", hash = "sha256:5ccfafd98473e007cebf7da10c1411035b7844f0f204015efd050601906dbb53"}, - {file = "regex-2023.3.23-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6572ff287176c0fb96568adb292674b421fa762153ed074d94b1d939ed92c253"}, - {file = "regex-2023.3.23-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a610e0adfcb0fc84ea25f6ea685e39e74cbcd9245a72a9a7aab85ff755a5ed27"}, - {file = "regex-2023.3.23-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:086afe222d58b88b62847bdbd92079b4699350b4acab892f88a935db5707c790"}, - {file = "regex-2023.3.23-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79e29fd62fa2f597a6754b247356bda14b866131a22444d67f907d6d341e10f3"}, - {file = "regex-2023.3.23-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c07ce8e9eee878a48ebeb32ee661b49504b85e164b05bebf25420705709fdd31"}, - {file = "regex-2023.3.23-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86b036f401895e854de9fefe061518e78d506d8a919cc250dc3416bca03f6f9a"}, - {file = "regex-2023.3.23-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78ac8dd8e18800bb1f97aad0d73f68916592dddf233b99d2b5cabc562088503a"}, - {file = "regex-2023.3.23-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:539dd010dc35af935b32f248099e38447bbffc10b59c2b542bceead2bed5c325"}, - {file = "regex-2023.3.23-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9bf4a5626f2a0ea006bf81e8963f498a57a47d58907eaa58f4b3e13be68759d8"}, - {file = "regex-2023.3.23-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:cf86b4328c204c3f315074a61bc1c06f8a75a8e102359f18ce99fbcbbf1951f0"}, - {file = "regex-2023.3.23-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:2848bf76673c83314068241c8d5b7fa9ad9bed866c979875a0e84039349e8fa7"}, - {file = "regex-2023.3.23-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:c125a02d22c555e68f7433bac8449992fa1cead525399f14e47c2d98f2f0e467"}, - {file = "regex-2023.3.23-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cd1671e9d5ac05ce6aa86874dd8dfa048824d1dbe73060851b310c6c1a201a96"}, - {file = "regex-2023.3.23-cp38-cp38-win32.whl", hash = "sha256:fffe57312a358be6ec6baeb43d253c36e5790e436b7bf5b7a38df360363e88e9"}, - {file = "regex-2023.3.23-cp38-cp38-win_amd64.whl", hash = "sha256:dbb3f87e15d3dd76996d604af8678316ad2d7d20faa394e92d9394dfd621fd0c"}, - {file = "regex-2023.3.23-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c88e8c226473b5549fe9616980ea7ca09289246cfbdf469241edf4741a620004"}, - {file = "regex-2023.3.23-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6560776ec19c83f3645bbc5db64a7a5816c9d8fb7ed7201c5bcd269323d88072"}, - {file = "regex-2023.3.23-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b1fc2632c01f42e06173d8dd9bb2e74ab9b0afa1d698058c867288d2c7a31f3"}, - {file = "regex-2023.3.23-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fdf7ad455f1916b8ea5cdbc482d379f6daf93f3867b4232d14699867a5a13af7"}, - {file = "regex-2023.3.23-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5fc33b27b1d800fc5b78d7f7d0f287e35079ecabe68e83d46930cf45690e1c8c"}, - {file = "regex-2023.3.23-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c49552dc938e3588f63f8a78c86f3c9c75301e813bca0bef13bdb4b87ccf364"}, - {file = "regex-2023.3.23-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e152461e9a0aedec7d37fc66ec0fa635eca984777d3d3c3e36f53bf3d3ceb16e"}, - {file = "regex-2023.3.23-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:db034255e72d2995cf581b14bb3fc9c00bdbe6822b49fcd4eef79e1d5f232618"}, - {file = "regex-2023.3.23-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:55ae114da21b7a790b90255ea52d2aa3a0d121a646deb2d3c6a3194e722fc762"}, - {file = "regex-2023.3.23-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ef3f528fe1cc3d139508fe1b22523745aa77b9d6cb5b0bf277f48788ee0b993f"}, - {file = "regex-2023.3.23-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:a81c9ec59ca2303acd1ccd7b9ac409f1e478e40e96f8f79b943be476c5fdb8bb"}, - {file = "regex-2023.3.23-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:cde09c4fdd070772aa2596d97e942eb775a478b32459e042e1be71b739d08b77"}, - {file = "regex-2023.3.23-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3cd9f5dd7b821f141d3a6ca0d5d9359b9221e4f051ca3139320adea9f1679691"}, - {file = "regex-2023.3.23-cp39-cp39-win32.whl", hash = "sha256:7304863f3a652dab5e68e6fb1725d05ebab36ec0390676d1736e0571ebb713ef"}, - {file = "regex-2023.3.23-cp39-cp39-win_amd64.whl", hash = "sha256:54c3fa855a3f7438149de3211738dd9b5f0c733f48b54ae05aa7fce83d48d858"}, - {file = "regex-2023.3.23.tar.gz", hash = "sha256:dc80df325b43ffea5cdea2e3eaa97a44f3dd298262b1c7fe9dbb2a9522b956a7"}, -] - -[[package]] -name = "requests" -version = "2.28.2" -description = "Python HTTP for Humans." -category = "dev" -optional = false -python-versions = ">=3.7, <4" -files = [ - {file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"}, - {file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"}, -] - -[package.dependencies] -certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" -idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<1.27" - -[package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] - -[[package]] -name = "requests-toolbelt" -version = "0.10.1" -description = "A utility belt for advanced users of python-requests" -category = "dev" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "requests-toolbelt-0.10.1.tar.gz", hash = "sha256:62e09f7ff5ccbda92772a29f394a49c3ad6cb181d568b1337626b2abb628a63d"}, - {file = "requests_toolbelt-0.10.1-py2.py3-none-any.whl", hash = "sha256:18565aa58116d9951ac39baa288d3adb5b3ff975c4f25eee78555d89e8f247f7"}, -] - -[package.dependencies] -requests = ">=2.0.1,<3.0.0" - -[[package]] -name = "rfc3986" -version = "2.0.0" -description = "Validating URI References per RFC 3986" -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "rfc3986-2.0.0-py2.py3-none-any.whl", hash = "sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd"}, - {file = "rfc3986-2.0.0.tar.gz", hash = "sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c"}, -] - -[package.extras] -idna2008 = ["idna"] - -[[package]] -name = "rich" -version = "13.3.2" -description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" -category = "dev" -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "rich-13.3.2-py3-none-any.whl", hash = "sha256:a104f37270bf677148d8acb07d33be1569eeee87e2d1beb286a4e9113caf6f2f"}, - {file = "rich-13.3.2.tar.gz", hash = "sha256:91954fe80cfb7985727a467ca98a7618e5dd15178cc2da10f553b36a93859001"}, -] - -[package.dependencies] -markdown-it-py = ">=2.2.0,<3.0.0" -pygments = ">=2.13.0,<3.0.0" -typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} - -[package.extras] -jupyter = ["ipywidgets (>=7.5.1,<9)"] - -[[package]] -name = "secretstorage" -version = "3.3.3" -description = "Python bindings to FreeDesktop.org Secret Service API" -category = "dev" -optional = false -python-versions = ">=3.6" -files = [ - {file = "SecretStorage-3.3.3-py3-none-any.whl", hash = "sha256:f356e6628222568e3af06f2eba8df495efa13b3b63081dafd4f7d9a7b7bc9f99"}, - {file = "SecretStorage-3.3.3.tar.gz", hash = "sha256:2403533ef369eca6d2ba81718576c5e0f564d5cca1b58f73a8b23e7d4eeebd77"}, -] - -[package.dependencies] -cryptography = ">=2.0" -jeepney = ">=0.6" - -[[package]] -name = "six" -version = "1.16.0" -description = "Python 2 and 3 compatibility utilities" -category = "dev" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, -] - -[[package]] -name = "tomli" -version = "2.0.1" -description = "A lil' TOML parser" -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, -] - -[[package]] -name = "twine" -version = "4.0.2" -description = "Collection of utilities for publishing packages on PyPI" -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "twine-4.0.2-py3-none-any.whl", hash = "sha256:929bc3c280033347a00f847236564d1c52a3e61b1ac2516c97c48f3ceab756d8"}, - {file = "twine-4.0.2.tar.gz", hash = "sha256:9e102ef5fdd5a20661eb88fad46338806c3bd32cf1db729603fe3697b1bc83c8"}, -] - -[package.dependencies] -importlib-metadata = ">=3.6" -keyring = ">=15.1" -pkginfo = ">=1.8.1" -readme-renderer = ">=35.0" -requests = ">=2.20" -requests-toolbelt = ">=0.8.0,<0.9.0 || >0.9.0" -rfc3986 = ">=1.4.0" -rich = ">=12.0.0" -urllib3 = ">=1.26.0" - -[[package]] -name = "typing-extensions" -version = "4.5.0" -description = "Backported and Experimental Type Hints for Python 3.7+" -category = "main" -optional = false -python-versions = ">=3.7" -files = [ - {file = "typing_extensions-4.5.0-py3-none-any.whl", hash = "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"}, - {file = "typing_extensions-4.5.0.tar.gz", hash = "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb"}, -] - -[[package]] -name = "urllib3" -version = "1.26.15" -description = "HTTP library with thread-safe connection pooling, file post, and more." -category = "dev" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" -files = [ - {file = "urllib3-1.26.15-py2.py3-none-any.whl", hash = "sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42"}, - {file = "urllib3-1.26.15.tar.gz", hash = "sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305"}, -] - -[package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] -secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] -socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] - -[[package]] -name = "watchdog" -version = "3.0.0" -description = "Filesystem events monitoring" -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "watchdog-3.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:336adfc6f5cc4e037d52db31194f7581ff744b67382eb6021c868322e32eef41"}, - {file = "watchdog-3.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a70a8dcde91be523c35b2bf96196edc5730edb347e374c7de7cd20c43ed95397"}, - {file = "watchdog-3.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:adfdeab2da79ea2f76f87eb42a3ab1966a5313e5a69a0213a3cc06ef692b0e96"}, - {file = "watchdog-3.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2b57a1e730af3156d13b7fdddfc23dea6487fceca29fc75c5a868beed29177ae"}, - {file = "watchdog-3.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7ade88d0d778b1b222adebcc0927428f883db07017618a5e684fd03b83342bd9"}, - {file = "watchdog-3.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7e447d172af52ad204d19982739aa2346245cc5ba6f579d16dac4bfec226d2e7"}, - {file = "watchdog-3.0.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:9fac43a7466eb73e64a9940ac9ed6369baa39b3bf221ae23493a9ec4d0022674"}, - {file = "watchdog-3.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8ae9cda41fa114e28faf86cb137d751a17ffd0316d1c34ccf2235e8a84365c7f"}, - {file = "watchdog-3.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:25f70b4aa53bd743729c7475d7ec41093a580528b100e9a8c5b5efe8899592fc"}, - {file = "watchdog-3.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4f94069eb16657d2c6faada4624c39464f65c05606af50bb7902e036e3219be3"}, - {file = "watchdog-3.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7c5f84b5194c24dd573fa6472685b2a27cc5a17fe5f7b6fd40345378ca6812e3"}, - {file = "watchdog-3.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3aa7f6a12e831ddfe78cdd4f8996af9cf334fd6346531b16cec61c3b3c0d8da0"}, - {file = "watchdog-3.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:233b5817932685d39a7896b1090353fc8efc1ef99c9c054e46c8002561252fb8"}, - {file = "watchdog-3.0.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:13bbbb462ee42ec3c5723e1205be8ced776f05b100e4737518c67c8325cf6100"}, - {file = "watchdog-3.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8f3ceecd20d71067c7fd4c9e832d4e22584318983cabc013dbf3f70ea95de346"}, - {file = "watchdog-3.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c9d8c8ec7efb887333cf71e328e39cffbf771d8f8f95d308ea4125bf5f90ba64"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0e06ab8858a76e1219e68c7573dfeba9dd1c0219476c5a44d5333b01d7e1743a"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:d00e6be486affb5781468457b21a6cbe848c33ef43f9ea4a73b4882e5f188a44"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:c07253088265c363d1ddf4b3cdb808d59a0468ecd017770ed716991620b8f77a"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:5113334cf8cf0ac8cd45e1f8309a603291b614191c9add34d33075727a967709"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:51f90f73b4697bac9c9a78394c3acbbd331ccd3655c11be1a15ae6fe289a8c83"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:ba07e92756c97e3aca0912b5cbc4e5ad802f4557212788e72a72a47ff376950d"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:d429c2430c93b7903914e4db9a966c7f2b068dd2ebdd2fa9b9ce094c7d459f33"}, - {file = "watchdog-3.0.0-py3-none-win32.whl", hash = "sha256:3ed7c71a9dccfe838c2f0b6314ed0d9b22e77d268c67e015450a29036a81f60f"}, - {file = "watchdog-3.0.0-py3-none-win_amd64.whl", hash = "sha256:4c9956d27be0bb08fc5f30d9d0179a855436e655f046d288e2bcc11adfae893c"}, - {file = "watchdog-3.0.0-py3-none-win_ia64.whl", hash = "sha256:5d9f3a10e02d7371cd929b5d8f11e87d4bad890212ed3901f9b4d68767bee759"}, - {file = "watchdog-3.0.0.tar.gz", hash = "sha256:4d98a320595da7a7c5a18fc48cb633c2e73cda78f93cac2ef42d42bf609a33f9"}, -] - -[package.extras] -watchmedo = ["PyYAML (>=3.10)"] - -[[package]] -name = "webencodings" -version = "0.5.1" -description = "Character encoding aliases for legacy web content" -category = "dev" -optional = false -python-versions = "*" -files = [ - {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, - {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, -] - -[[package]] -name = "zipp" -version = "3.15.0" -description = "Backport of pathlib-compatible object wrapper for zip files" -category = "dev" -optional = false -python-versions = ">=3.7" -files = [ - {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"}, - {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"}, -] - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] - -[metadata] -lock-version = "2.0" -python-versions = "^3.8.1" -content-hash = "cc9babcdfdc3679a4d84f68912408a005619a576947b059146ed1b428850ece9" diff --git a/pyproject.toml b/pyproject.toml index 020bd5d8d..9983ef777 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,35 +1,81 @@ -[tool.poetry] +[build-system] +requires = ["scikit-build-core[pyproject]>=0.9.2"] +build-backend = "scikit_build_core.build" + +[project] name = "llama_cpp_python" -version = "0.1.27" +dynamic = ["version"] description = "Python bindings for the llama.cpp library" -authors = ["Andrei Betlen "] -license = "MIT" readme = "README.md" -homepage = "https://github.com/abetlen/llama-cpp-python" -repository = "https://github.com/abetlen/llama-cpp-python" -packages = [{include = "llama_cpp"}] -include = [ - "LICENSE.md", +license = { text = "MIT" } +authors = [ + { name = "Andrei Betlen", email = "abetlen@gmail.com" }, +] +dependencies = [ + "typing-extensions>=4.5.0", + "numpy>=1.20.0", + "diskcache>=5.6.1", + "jinja2>=2.11.3", +] +requires-python = ">=3.8" +classifiers = [ + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", +] + + +[project.optional-dependencies] +server = [ + "uvicorn>=0.22.0", + "fastapi>=0.100.0", + "pydantic-settings>=2.0.1", + "sse-starlette>=1.6.1", + "starlette-context>=0.3.6,<0.4", + "PyYAML>=5.1", +] +test = [ + "pytest>=7.4.0", + "httpx>=0.24.1", + "scipy>=1.10", + "fastapi>=0.100.0", + "sse-starlette>=1.6.1", + "starlette-context>=0.3.6,<0.4", + "pydantic-settings>=2.0.1", + "huggingface-hub>=0.23.0" +] +dev = [ + "black>=23.3.0", + "twine>=4.0.2", + "mkdocs>=1.4.3", + "mkdocstrings[python]>=0.22.0", + "mkdocs-material>=9.1.18", + "pytest>=7.4.0", + "httpx>=0.24.1", +] +all = [ + "llama_cpp_python[server,test,dev]", ] -[tool.poetry.dependencies] -python = "^3.8.1" -typing-extensions = "^4.5.0" +[tool.scikit-build] +wheel.packages = ["llama_cpp"] +cmake.verbose = true +cmake.minimum-version = "3.21" +minimum-version = "0.5.1" +sdist.include = [".git", "vendor/llama.cpp/*"] +[tool.scikit-build.metadata.version] +provider = "scikit_build_core.metadata.regex" +input = "llama_cpp/__init__.py" -[tool.poetry.group.dev.dependencies] -black = "^23.1.0" -twine = "^4.0.2" -mkdocs = "^1.4.2" -mkdocstrings = {extras = ["python"], version = "^0.20.0"} -mkdocs-material = "^9.1.4" -pytest = "^7.2.2" +[project.urls] +Homepage = "https://github.com/abetlen/llama-cpp-python" +Issues = "https://github.com/abetlen/llama-cpp-python/issues" +Documentation = "https://llama-cpp-python.readthedocs.io/en/latest/" +Changelog = "https://llama-cpp-python.readthedocs.io/en/latest/changelog/" -[build-system] -requires = [ - "setuptools>=42", - "scikit-build>=0.13", - "cmake>=3.18", - "ninja", -] -build-backend = "setuptools.build_meta" +[tool.pytest.ini_options] +testpaths = "tests" diff --git a/scripts/get-releases.sh b/scripts/get-releases.sh new file mode 100755 index 000000000..4c904da78 --- /dev/null +++ b/scripts/get-releases.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# Function to get all releases +get_all_releases() { + local page=1 + local per_page=100 + local releases="" + local new_releases + + # Prepare headers + local headers=(-H "Accept: application/vnd.github.v3+json") + if [ -n "$GITHUB_TOKEN" ]; then + headers+=(-H "Authorization: Bearer $GITHUB_TOKEN") + fi + + while true; do + response=$(curl -s "${headers[@]}" \ + "https://api.github.com/repos/abetlen/llama-cpp-python/releases?page=$page&per_page=$per_page") + + # Check if the response is valid JSON + if ! echo "$response" | jq empty > /dev/null 2>&1; then + echo "Error: Invalid response from GitHub API" >&2 + echo "Response: $response" >&2 + return 1 + fi + + new_releases=$(echo "$response" | jq -r '.[].tag_name') + if [ -z "$new_releases" ]; then + break + fi + releases="$releases $new_releases" + ((page++)) + done + + echo $releases +} + +# Get all releases and save to file +releases=$(get_all_releases) +if [ $? -ne 0 ]; then + echo "Failed to fetch releases. Please check your internet connection and try again later." >&2 + exit 1 +fi + +echo "$releases" | tr ' ' '\n' > all_releases.txt + +echo "All releases have been saved to all_releases.txt" diff --git a/scripts/releases-to-pep-503.sh b/scripts/releases-to-pep-503.sh new file mode 100755 index 000000000..71910efcb --- /dev/null +++ b/scripts/releases-to-pep-503.sh @@ -0,0 +1,104 @@ +#!/bin/bash + +# Enable exit on error +set -e + +# Function for logging +log_error() { + echo "ERROR: $1" >&2 +} + +log_info() { + echo "INFO: $1" +} + +# Get output directory or default to index/whl/cpu +output_dir=${1:-"index/whl/cpu"} + +# Get pattern from second arg or default to valid python package version pattern +pattern=${2:-"^[v]?[0-9]+\.[0-9]+\.[0-9]+$"} + +# Get the current directory (where the script is run from) +current_dir="$(pwd)" + +# Check if all_releases.txt exists +if [ ! -f "$current_dir/all_releases.txt" ]; then + log_error "all_releases.txt not found in the current directory." + exit 1 +fi + +# Create output directory +mkdir -p "$output_dir" + +# Create an index html file +cat << EOF > "$output_dir/index.html" + + + + + llama-cpp-python +
+ + + +EOF + +# Create llama-cpp-python directory +mkdir -p "$output_dir/llama-cpp-python" + +# Create an index html file in llama-cpp-python directory +cat << EOF > "$output_dir/llama-cpp-python/index.html" + + + +

Links for llama-cpp-python

+EOF + +# Filter releases by pattern +releases=$(grep -E "$pattern" "$current_dir/all_releases.txt") + +# Prepare curl headers +headers=('--header' 'Accept: application/vnd.github.v3+json') +if [ -n "$GITHUB_TOKEN" ]; then + headers+=('--header' "authorization: Bearer $GITHUB_TOKEN") +fi +headers+=('--header' 'content-type: application/json') + +# For each release, get all assets +for release in $releases; do + log_info "Processing release: $release" + response=$(curl -s "${headers[@]}" \ + "https://api.github.com/repos/abetlen/llama-cpp-python/releases/tags/$release") + + if [ -z "$response" ]; then + log_error "Empty response from GitHub API for release $release" + continue + fi + + if ! echo "$response" | jq -e '.assets' > /dev/null 2>&1; then + log_error "Invalid or unexpected response from GitHub API for release $release" + log_error "Response: $response" + continue + fi + + # Get release version from release ie v0.1.0-cu121 -> v0.1.0 + release_version=$(echo "$release" | grep -oE "^[v]?[0-9]+\.[0-9]+\.[0-9]+") + echo "

$release_version

" >> "$output_dir/llama-cpp-python/index.html" + + wheel_urls=$(echo "$response" | jq -r '.assets[] | select(.name | endswith(".whl")) | .browser_download_url') + if [ -z "$wheel_urls" ]; then + log_error "No wheel files found for release $release" + continue + fi + + echo "$wheel_urls" | while read -r asset; do + echo " $asset" >> "$output_dir/llama-cpp-python/index.html" + echo "
" >> "$output_dir/llama-cpp-python/index.html" + done +done + +echo " " >> "$output_dir/llama-cpp-python/index.html" +echo "" >> "$output_dir/llama-cpp-python/index.html" +echo "" >> "$output_dir/llama-cpp-python/index.html" + +log_info "Index generation complete. Output directory: $output_dir" diff --git a/setup.py b/setup.py deleted file mode 100644 index c53bb293c..000000000 --- a/setup.py +++ /dev/null @@ -1,34 +0,0 @@ -from skbuild import setup - -from pathlib import Path - -this_directory = Path(__file__).parent -long_description = (this_directory / "README.md").read_text() - -setup( - name="llama_cpp_python", - description="A Python wrapper for llama.cpp", - long_description=long_description, - long_description_content_type="text/markdown", - version="0.1.27", - author="Andrei Betlen", - author_email="abetlen@gmail.com", - license="MIT", - package_dir={"llama_cpp": "llama_cpp", "llama_cpp.server": "llama_cpp/server"}, - packages=["llama_cpp", "llama_cpp.server"], - install_requires=[ - "typing-extensions>=4.5.0", - ], - extras_require={ - "server": ["uvicorn>=0.21.1", "fastapi>=0.95.0", "sse-starlette>=1.3.3"], - }, - python_requires=">=3.7", - classifiers=[ - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - ], -) diff --git a/tests/test_llama.py b/tests/test_llama.py index 6a50256d4..fc182ae20 100644 --- a/tests/test_llama.py +++ b/tests/test_llama.py @@ -1,96 +1,218 @@ -import llama_cpp - -MODEL = "./vendor/llama.cpp/models/ggml-vocab.bin" +import ctypes +import multiprocessing +import numpy as np +from scipy.special import log_softmax -def test_llama(): - llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True) +from huggingface_hub import hf_hub_download - assert llama - assert llama.ctx is not None +import pytest - text = b"Hello World" +import llama_cpp +import llama_cpp._internals as internals - assert llama.detokenize(llama.tokenize(text)) == text +MODEL = "./vendor/llama.cpp/models/ggml-vocab-llama-spm.gguf" -def test_llama_patch(monkeypatch): - llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True) - ## Set up mock function - def mock_eval(*args, **kwargs): - return 0 +def test_llama_cpp_version(): + assert llama_cpp.__version__ - monkeypatch.setattr("llama_cpp.llama_cpp.llama_eval", mock_eval) - output_text = " jumps over the lazy dog." - output_tokens = llama.tokenize(output_text.encode("utf-8")) - token_eos = llama.token_eos() - n = 0 +def test_llama_cpp_tokenization(): + llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True, verbose=False) - def mock_sample(*args, **kwargs): - nonlocal n - if n < len(output_tokens): - n += 1 - return output_tokens[n - 1] - else: - return token_eos + assert llama + assert llama._ctx.ctx is not None - monkeypatch.setattr("llama_cpp.llama_cpp.llama_sample_top_p_top_k", mock_sample) + text = b"Hello World" - text = "The quick brown fox" + tokens = llama.tokenize(text) + assert tokens[0] == llama.token_bos() + assert tokens == [1, 15043, 2787] + detokenized = llama.detokenize(tokens) + assert detokenized == text + + tokens = llama.tokenize(text, add_bos=False) + assert tokens[0] != llama.token_bos() + assert tokens == [15043, 2787] + + detokenized = llama.detokenize(tokens) + assert detokenized != text + + text = b"Hello World" + tokens = llama.tokenize(text) + assert tokens[-1] != llama.token_eos() + assert tokens == [1, 15043, 2787, 829, 29879, 29958] + + tokens = llama.tokenize(text, special=True) + assert tokens[-1] == llama.token_eos() + assert tokens == [1, 15043, 2787, 2] + + text = b"" + tokens = llama.tokenize(text, add_bos=True, special=True) + assert tokens[-1] != llama.token_eos() + assert tokens == [llama.token_bos()] + assert text == llama.detokenize(tokens) + + +@pytest.fixture +def llama_cpp_model_path(): + repo_id = "Qwen/Qwen2-0.5B-Instruct-GGUF" + filename = "qwen2-0_5b-instruct-q8_0.gguf" + model_path = hf_hub_download(repo_id, filename) + return model_path + + +def test_real_model(llama_cpp_model_path): + import os + assert os.path.exists(llama_cpp_model_path) + + params = llama_cpp.llama_model_default_params() + params.use_mmap = llama_cpp.llama_supports_mmap() + params.use_mlock = llama_cpp.llama_supports_mlock() + params.check_tensors = False + + model = internals.LlamaModel(path_model=llama_cpp_model_path, params=params) + + cparams = llama_cpp.llama_context_default_params() + cparams.n_ctx = 16 + cparams.n_batch = 16 + cparams.n_ubatch = 16 + cparams.n_threads = multiprocessing.cpu_count() + cparams.n_threads_batch = multiprocessing.cpu_count() + cparams.logits_all = False + cparams.flash_attn = True + + context = internals.LlamaContext(model=model, params=cparams) + tokens = model.tokenize(b"Hello, world!", add_bos=True, special=True) + + assert tokens == [9707, 11, 1879, 0] + + tokens = model.tokenize(b"The quick brown fox jumps", add_bos=True, special=True) + + batch = internals.LlamaBatch(n_tokens=len(tokens), embd=0, n_seq_max=1) + + seed = 1337 + sampler = internals.LlamaSampler() + sampler.add_top_k(50) + sampler.add_top_p(0.9, 1) + sampler.add_temp(0.8) + sampler.add_dist(seed) + + result = tokens + n_eval = 0 + for _ in range(4): + batch.set_batch(tokens, n_past=n_eval, logits_all=False) + context.decode(batch) + n_eval += len(tokens) + token_id = sampler.sample(context, -1) + tokens = [token_id] + result += tokens + + output = result[5:] + output_text = model.detokenize(output, special=True) + assert output_text == b" over the lazy dog" + +def test_real_llama(llama_cpp_model_path): + model = llama_cpp.Llama( + llama_cpp_model_path, + n_ctx=32, + n_batch=32, + n_ubatch=32, + n_threads=multiprocessing.cpu_count(), + n_threads_batch=multiprocessing.cpu_count(), + logits_all=False, + flash_attn=True, + ) - ## Test basic completion until eos - n = 0 # reset - completion = llama.create_completion(text, max_tokens=20) - assert completion["choices"][0]["text"] == output_text - assert completion["choices"][0]["finish_reason"] == "stop" + output = model.create_completion( + "The quick brown fox jumps", + max_tokens=4, + top_k=50, + top_p=0.9, + temperature=0.8, + seed=1337 + ) + assert output["choices"][0]["text"] == " over the lazy dog" + + + output = model.create_completion( + "The capital of france is paris, 'true' or 'false'?:\n", + max_tokens=4, + top_k=50, + top_p=0.9, + temperature=0.8, + seed=1337, + grammar=llama_cpp.LlamaGrammar.from_string(""" +root ::= "true" | "false" +""") + ) + assert output["choices"][0]["text"] == "true" - ## Test streaming completion until eos - n = 0 # reset - chunks = llama.create_completion(text, max_tokens=20, stream=True) - assert "".join(chunk["choices"][0]["text"] for chunk in chunks) == output_text - assert completion["choices"][0]["finish_reason"] == "stop" + suffix = b"rot" + tokens = model.tokenize(suffix, add_bos=True, special=True) + def logit_processor_func(input_ids, logits): + for token in tokens: + logits[token] *= 1000 + return logits - ## Test basic completion until stop sequence - n = 0 # reset - completion = llama.create_completion(text, max_tokens=20, stop=["lazy"]) - assert completion["choices"][0]["text"] == " jumps over the " - assert completion["choices"][0]["finish_reason"] == "stop" + logit_processors = llama_cpp.LogitsProcessorList( + [logit_processor_func] + ) - ## Test streaming completion until stop sequence - n = 0 # reset - chunks = llama.create_completion(text, max_tokens=20, stream=True, stop=["lazy"]) - assert ( - "".join(chunk["choices"][0]["text"] for chunk in chunks) == " jumps over the " + output = model.create_completion( + "The capital of france is par", + max_tokens=4, + top_k=50, + top_p=0.9, + temperature=0.8, + seed=1337, + logits_processor=logit_processors ) - assert completion["choices"][0]["finish_reason"] == "stop" - - ## Test basic completion until length - n = 0 # reset - completion = llama.create_completion(text, max_tokens=2) - assert completion["choices"][0]["text"] == " j" - assert completion["choices"][0]["finish_reason"] == "length" - - ## Test streaming completion until length - n = 0 # reset - chunks = llama.create_completion(text, max_tokens=2, stream=True) - assert "".join(chunk["choices"][0]["text"] for chunk in chunks) == " j" - assert completion["choices"][0]["finish_reason"] == "length" - - -def test_llama_pickle(): - import pickle - import tempfile - fp = tempfile.TemporaryFile() - llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True) - pickle.dump(llama, fp) - fp.seek(0) - llama = pickle.load(fp) + assert output["choices"][0]["text"].lower().startswith("rot") - assert llama - assert llama.ctx is not None + model.set_seed(1337) - text = b"Hello World" + state = model.save_state() + + output = model.create_completion( + "Pick a number from 1 to 10?:\n", + max_tokens=4, + top_k=50, + top_p=0.9, + temperature=0.8, + grammar=llama_cpp.LlamaGrammar.from_string(""" +root ::= "1" | "2" | "3" | "4" | "5" | "6" | "7" | "8" | "9" | "10" +""") + ) + number_1 = output["choices"][0]["text"] + + output = model.create_completion( + "Pick a number from 1 to 10?:\n", + max_tokens=4, + top_k=50, + top_p=0.9, + temperature=0.8, + grammar=llama_cpp.LlamaGrammar.from_string(""" +root ::= "1" | "2" | "3" | "4" | "5" | "6" | "7" | "8" | "9" | "10" +""") + ) + number_2 = output["choices"][0]["text"] + + model.load_state(state) + + output = model.create_completion( + "Pick a number from 1 to 10?:\n", + max_tokens=4, + top_k=50, + top_p=0.9, + temperature=0.8, + grammar=llama_cpp.LlamaGrammar.from_string(""" +root ::= "1" | "2" | "3" | "4" | "5" | "6" | "7" | "8" | "9" | "10" +""") + ) + number_3 = output["choices"][0]["text"] - assert llama.detokenize(llama.tokenize(text)) == text \ No newline at end of file + assert number_1 != number_2 + assert number_1 == number_3 diff --git a/tests/test_llama_chat_format.py b/tests/test_llama_chat_format.py new file mode 100644 index 000000000..f031bf72b --- /dev/null +++ b/tests/test_llama_chat_format.py @@ -0,0 +1,89 @@ +import json + +import jinja2 + +from llama_cpp import ( + ChatCompletionRequestUserMessage, +) +import llama_cpp.llama_types as llama_types +import llama_cpp.llama_chat_format as llama_chat_format + +from llama_cpp.llama_chat_format import hf_tokenizer_config_to_chat_formatter + +def test_mistral_instruct(): + chat_template = "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}" + chat_formatter = jinja2.Template(chat_template) + messages = [ + llama_types.ChatCompletionRequestUserMessage(role="user", content="Instruction"), + llama_types.ChatCompletionRequestAssistantMessage(role="assistant", content="Model answer"), + llama_types.ChatCompletionRequestUserMessage(role="user", content="Follow-up instruction"), + ] + response = llama_chat_format.format_mistral_instruct( + messages=messages, + ) + prompt = ("" if response.added_special else "") + response.prompt + reference = chat_formatter.render( + messages=messages, + bos_token="", + eos_token="", + ) + assert prompt == reference + + +mistral_7b_tokenizer_config = """{ + "add_bos_token": true, + "add_eos_token": false, + "added_tokens_decoder": { + "0": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "1": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "2": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + } + }, + "additional_special_tokens": [], + "bos_token": "", + "clean_up_tokenization_spaces": false, + "eos_token": "", + "legacy": true, + "model_max_length": 1000000000000000019884624838656, + "pad_token": null, + "sp_model_kwargs": {}, + "spaces_between_special_tokens": false, + "tokenizer_class": "LlamaTokenizer", + "unk_token": "", + "use_default_system_prompt": false, + "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}" +}""" + + +def test_hf_tokenizer_config_str_to_chat_formatter(): + tokenizer_config = json.loads(mistral_7b_tokenizer_config) + chat_formatter = hf_tokenizer_config_to_chat_formatter( + tokenizer_config + ) + chat_formatter_respoonse = chat_formatter( + messages=[ + ChatCompletionRequestUserMessage(role="user", content="Hello, world!"), + ] + ) + + assert chat_formatter_respoonse.prompt == ("[INST] Hello, world! [/INST]" "") diff --git a/tests/test_llama_grammar.py b/tests/test_llama_grammar.py new file mode 100644 index 000000000..34ef2874d --- /dev/null +++ b/tests/test_llama_grammar.py @@ -0,0 +1,78 @@ +import llama_cpp +import json + +tree = """ +leaf ::= "." +node ::= leaf | "(" node node ")" +root ::= node +""" + + +def test_grammar_from_string(): + grammar = llama_cpp.LlamaGrammar.from_string(tree) + # assert grammar._n_rules == 3 + # assert grammar._start_rule_index == 2 + # assert grammar.grammar is not None + + +def test_composed_pydantic_grammar(): + """ + from pydantic import BaseModel + + class A(BaseModel): + a: int + + class B(BaseModel): + a: A + b: int + """ + + # This schema corresponds to the grammar in the comment above. + # We don't use the pydantic models directly to avoid the dependency. + schema = { + "$defs": { + "A": { + "properties": {"a": {"title": "A", "type": "integer"}}, + "required": ["a"], + "title": "A", + "type": "object", + } + }, + "properties": { + "a": {"$ref": "#/$defs/A"}, + "b": {"title": "B", "type": "integer"}, + }, + "required": ["a", "b"], + "title": "B", + "type": "object", + } + + grammar = llama_cpp.LlamaGrammar.from_json_schema(json.dumps(schema)) + + # assert grammar.grammar is not None + + +def test_grammar_anyof(): + sch = { + "properties": { + "temperature": { + "description": "The temperature mentioned", + "type": "number", + }, + "unit": { + "anyOf": [ + { + "description": "Unit for temperature", + "enum": ["celsius", "fahrenheit"], + "type": "string", + }, + {"type": "null"}, + ], + }, + }, + "type": "object", + } + + grammar = llama_cpp.LlamaGrammar.from_json_schema(json.dumps(sch)) + + # assert grammar.grammar is not None diff --git a/tests/test_llama_speculative.py b/tests/test_llama_speculative.py new file mode 100644 index 000000000..b5d450567 --- /dev/null +++ b/tests/test_llama_speculative.py @@ -0,0 +1,16 @@ +import numpy as np + +from llama_cpp.llama_speculative import LlamaPromptLookupDecoding + +def test_find_candidate_pred_tokens(): + find_candidate_pred_tokens = LlamaPromptLookupDecoding.find_candidate_pred_tokens + + # Test Case 1: Matching ngram is found + input_ids1 = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3]) + result1 = find_candidate_pred_tokens(input_ids1, max_ngram_size=3, num_pred_tokens=2) + assert np.array_equal(result1, np.array([1, 2])) + + # Test Case 2: Matching ngram is not found + input_ids2 = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9]) + result2 = find_candidate_pred_tokens(input_ids2, max_ngram_size=3, num_pred_tokens=2) + assert np.array_equal(result2, np.array([])) diff --git a/vendor/llama.cpp b/vendor/llama.cpp index 62cfc54f7..8733e0cf6 160000 --- a/vendor/llama.cpp +++ b/vendor/llama.cpp @@ -1 +1 @@ -Subproject commit 62cfc54f77e519057110265b52b0d614fa363e2a +Subproject commit 8733e0cf6eefc7c7752297cc22d0836706f4222c