diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000000..132fdff097 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,91 @@ +name: CI + +on: [pull_request] + +jobs: + Windows: + name: 'Windows (${{ matrix.python }}, ${{ matrix.arch }}${{ matrix.extra_name }})' + timeout-minutes: 20 + runs-on: 'windows-latest' + strategy: + matrix: + python: ['3.5', '3.6', '3.7', '3.8'] + arch: ['x86', 'x64'] + lsp: [''] + extra_name: [''] + include: + - python: '3.8' + arch: 'x64' + lsp: 'http://www.proxifier.com/download/ProxifierSetup.exe' + extra_name: ', with IFS LSP' + - python: '3.8' + arch: 'x64' + lsp: 'http://download.pctools.com/mirror/updates/9.0.0.2308-SDavfree-lite_en.exe' + extra_name: ', with non-IFS LSP' + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Setup python + uses: actions/setup-python@v1 + with: + python-version: '${{ matrix.python }}' + architecture: '${{ matrix.arch }}' + - name: Run tests + run: ./ci.sh + shell: bash + env: + LSP: '${{ matrix.lsp }}' + # Should match 'name:' up above + JOB_NAME: 'Windows (${{ matrix.python }}, ${{ matrix.arch }}${{ matrix.extra_name }})' + + Linux: + name: 'Linux (${{ matrix.python }}${{ matrix.extra_name }})' + timeout-minutes: 10 + runs-on: 'ubuntu-latest' + strategy: + matrix: + python: ['3.5', '3.6', '3.7', '3.8'] + check_docs: ['0'] + check_formatting: ['0'] + extra_name: [''] + include: + - python: '3.8' + check_docs: '1' + extra_name: ', check docs' + - python: '3.8' + check_formatting: '1' + extra_name: ', check formatting' + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Setup python + uses: actions/setup-python@v1 + with: + python-version: '${{ matrix.python }}' + - name: Run tests + run: ./ci.sh + env: + CHECK_DOCS: '${{ matrix.check_docs }}' + CHECK_FORMATTING: '${{ matrix.check_formatting }}' + # Should match 'name:' up above + JOB_NAME: 'Linux (${{ matrix.python }}${{ matrix.extra_name }})' + + macOS: + name: 'macOS (${{ matrix.python }})' + timeout-minutes: 10 + runs-on: 'macos-latest' + strategy: + matrix: + python: ['3.5', '3.6', '3.7', '3.8'] + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Setup python + uses: actions/setup-python@v1 + with: + python-version: '${{ matrix.python }}' + - name: Run tests + run: ./ci.sh + env: + # Should match 'name:' up above + JOB_NAME: 'macOS (${{ matrix.python }})' diff --git a/.gitignore b/.gitignore index 555e03c6e8..a50c10b8a3 100644 --- a/.gitignore +++ b/.gitignore @@ -31,6 +31,7 @@ __pycache__/ .installed.cfg *.egg /.pybuild +pip-wheel-metadata/ # Installer logs pip-log.txt @@ -64,3 +65,6 @@ coverage.xml # Sphinx documentation doc/_build/ + +# PyCharm +.idea/ diff --git a/.travis.yml b/.travis.yml index e3fdb79e6c..685b4a8e93 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,28 +1,37 @@ +os: linux language: python -dist: xenial +dist: bionic -matrix: +jobs: include: - - python: 3.6 - env: CHECK_DOCS=1 - - python: 3.6 - env: CHECK_FORMATTING=1 # The pypy tests are slow, so we list them first - - python: pypy3 + - python: pypy3.6-7.2.0 - language: generic env: PYPY_NIGHTLY_BRANCH=py3.6 + # Qemu tests are also slow + # The unique thing this provides is testing on the given distro's + # kernel, which is important when we use new kernel features. This + # is also good for testing the latest openssl etc., and getting + # early warning of any issues that might happen in the next Ubuntu + # LTS. + - language: generic + # We use bionic for the host, b/c rumor says that Travis's + # 'bionic' systems have nested KVM enabled. + dist: bionic + env: + - "JOB_NAME='Ubuntu 19.10, full VM'" + - "VM_IMAGE=https://cloud-images.ubuntu.com/eoan/current/eoan-server-cloudimg-amd64.img" # 3.5.0 and 3.5.1 have different __aiter__ semantics than all # other versions, so we need to test them specially. Travis's - # xenial dist only provides 3.5.2+. + # newer images only provide 3.5.2+, so we have to request the old + # 'trusty' images. - python: 3.5.0 dist: trusty - - python: 3.5 - - python: 3.6 - - python: 3.7 - python: 3.5-dev - python: 3.6-dev - python: 3.7-dev - python: 3.8-dev + - python: nightly script: - ./ci.sh diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 228e8398e6..dabc5cb2d2 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -1,44 +1,13 @@ -# Will want to adjust this if we ever start having non-master branches -# in the main repo: -# https://docs.microsoft.com/en-us/azure/devops/pipelines/build/triggers trigger: - - master + branches: + exclude: + - 'dependabot/*' jobs: -# Special job that uses a container to check we work on the latest -# tippy-tip ubuntu. The main thing this adds is openssl 1.1.1/TLS 1.3. -# -# Azure has fancy stuff to let you run directly inside a container: -# -# https://docs.microsoft.com/en-us/azure/devops/pipelines/process/container-phases -# -# Unfortunately it's useless for us. Azure carefully sets everything -# up so that you run as a non-root user, but can sudo to get root. But -# the standard images that docker maintains like 'ubuntu' or 'debian' -# don't have sudo installed, which means that if you use 'container: -# ubuntu:rolling' then you simply cannot get root. And it's definitely -# not worth maintaining our own container image just so we can -# preinstall sudo: -# -# https://github.com/MicrosoftDocs/vsts-docs/issues/2939 -- job: "py37_latest_ubuntu" - pool: - vmImage: "ubuntu-16.04" - timeoutInMinutes: 10 - steps: - # This actually reveals the CODECOV_TOKEN in the logs, but - # AFAICT the only thing the token lets you do is upload coverage - # reports, which doesn't seem like a very tempting target for - # malicious hackers. - - bash: | - set -ex - env | sort - sudo docker run -e SYSTEM_JOBIDENTIFIER="$SYSTEM_JOBIDENTIFIER" -e CODECOV_TOKEN="$CODECOV_TOKEN" -v "$PWD:/t" ubuntu:rolling /bin/bash -c "set -ex; cd /t; apt update; apt install -y python3.7-dev python3-virtualenv git build-essential curl; python3.7 -m virtualenv -p python3.7 venv; source venv/bin/activate; source ci.sh" - - job: 'Windows' pool: - vmImage: 'vs2017-win2016' + vmImage: 'windows-latest' timeoutInMinutes: 20 strategy: # Python version list: @@ -50,11 +19,11 @@ jobs: # installer. So we put them at the top, so they can get started # earlier. "with IFS LSP, Python 3.7, 64 bit": - python.version: '3.7.2' + python.version: '3.7.5' python.pkg: 'python' lsp: 'http://www.proxifier.com/download/ProxifierSetup.exe' "with non-IFS LSP, Python 3.7, 64 bit": - python.version: '3.7.2' + python.version: '3.7.5' python.pkg: 'python' lsp: 'http://download.pctools.com/mirror/updates/9.0.0.2308-SDavfree-lite_en.exe' "Python 3.5, 32 bit": @@ -70,10 +39,16 @@ jobs: python.version: '3.6.8' python.pkg: 'python' "Python 3.7, 32 bit": - python.version: '3.7.2' + python.version: '3.7.5' python.pkg: 'pythonx86' "Python 3.7, 64 bit": - python.version: '3.7.2' + python.version: '3.7.5' + python.pkg: 'python' + "Python 3.8, 32 bit": + python.version: '3.8.0' + python.pkg: 'pythonx86' + "Python 3.8, 64 bit": + python.version: '3.8.0' python.pkg: 'python' steps: @@ -88,38 +63,43 @@ jobs: testRunTitle: 'Windows $(python.pkg) $(python.version)' condition: succeededOrFailed() -# Currently broken for unclear reasons probably related to openssl v1.1.1: -# https://github.com/python-trio/trio/pull/827#issuecomment-457139883 -# -# - job: 'Linux' -# pool: -# vmImage: 'ubuntu-16.04' -# timeoutInMinutes: 10 -# strategy: -# matrix: -# "Python 3.5": -# python.version: '3.5' -# "Python 3.6": -# python.version: '3.6' -# "Python 3.7": -# python.version: '3.7' -# -# steps: -# - task: UsePythonVersion@0 -# inputs: -# versionSpec: '$(python.version)' -# -# - bash: ci.sh -# displayName: "Run the actual tests" -# -# - task: PublishTestResults@2 -# inputs: -# testResultsFiles: 'test-results.xml' -# condition: succeededOrFailed() +- job: 'Linux' + pool: + vmImage: 'ubuntu-latest' + timeoutInMinutes: 10 + strategy: + matrix: + "Check docs": + python.version: '3.8' + CHECK_DOCS: 1 + "Formatting and linting": + python.version: '3.8' + CHECK_FORMATTING: 1 + "Python 3.5": + python.version: '3.5' + "Python 3.6": + python.version: '3.6' + "Python 3.7": + python.version: '3.7' + "Python 3.8": + python.version: '3.8' + + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: '$(python.version)' + + - bash: ./ci.sh + displayName: "Run the actual tests" + + - task: PublishTestResults@2 + inputs: + testResultsFiles: 'test-results.xml' + condition: succeededOrFailed() - job: 'macOS' pool: - vmImage: 'macOS-10.13' + vmImage: 'macOS-latest' timeoutInMinutes: 10 strategy: matrix: @@ -129,6 +109,8 @@ jobs: python.version: '3.6' "Python 3.7": python.version: '3.7' + "Python 3.8": + python.version: '3.8' steps: - task: UsePythonVersion@0 diff --git a/ci.sh b/ci.sh index 1daf728aff..70f85ce1aa 100755 --- a/ci.sh +++ b/ci.sh @@ -5,17 +5,30 @@ set -ex -o pipefail # Log some general info about the environment env | sort -if [ "$SYSTEM_JOBIDENTIFIER" != "" ]; then - # azure pipelines - CODECOV_NAME="$SYSTEM_JOBDISPLAYNAME" -else - CODECOV_NAME="${TRAVIS_OS_NAME}-${TRAVIS_PYTHON_VERSION:-unknown}" +if [ "$JOB_NAME" = "" ]; then + if [ "$SYSTEM_JOBIDENTIFIER" != "" ]; then + # azure pipelines + JOB_NAME="$SYSTEM_JOBDISPLAYNAME" + else + JOB_NAME="${TRAVIS_OS_NAME}-${TRAVIS_PYTHON_VERSION:-unknown}" + fi fi -# We always want to retry on failure, and we have to set --connect-timeout to -# work around a curl bug: -# https://github.com/curl/curl/issues/4461 -CURL="curl --connect-timeout 5 --retry 5" +# Curl's built-in retry system is not very robust; it gives up on lots of +# network errors that we want to retry on. Wget might work better, but it's +# not installed on azure pipelines's windows boxes. So... let's try some good +# old-fashioned brute force. (This is also a convenient place to put options +# we always want, like -f to tell curl to give an error if the server sends an +# error response, and -L to follow redirects.) +function curl-harder() { + for BACKOFF in 0 1 2 4 8 15 15 15 15; do + sleep $BACKOFF + if curl -fL --connect-timeout 5 "$@"; then + return 0 + fi + done + return 1 +} ################################################################ # Bootstrap python environment, if necessary @@ -51,13 +64,14 @@ fi ### Travis + macOS ### if [ "$TRAVIS_OS_NAME" = "osx" ]; then - CODECOV_NAME="osx_${MACPYTHON}" - $CURL -Lo macpython.pkg https://www.python.org/ftp/python/${MACPYTHON}/python-${MACPYTHON}-macosx10.6.pkg + JOB_NAME="osx_${MACPYTHON}" + curl-harder -o macpython.pkg https://www.python.org/ftp/python/${MACPYTHON}/python-${MACPYTHON}-macosx10.6.pkg sudo installer -pkg macpython.pkg -target / ls /Library/Frameworks/Python.framework/Versions/*/bin/ PYTHON_EXE=/Library/Frameworks/Python.framework/Versions/*/bin/python3 # The pip in older MacPython releases doesn't support a new enough TLS - $CURL https://bootstrap.pypa.io/get-pip.py | sudo $PYTHON_EXE + curl-harder -o get-pip.py https://bootstrap.pypa.io/get-pip.py + sudo $PYTHON_EXE get-pip.py sudo $PYTHON_EXE -m pip install virtualenv $PYTHON_EXE -m virtualenv testenv source testenv/bin/activate @@ -66,12 +80,11 @@ fi ### PyPy nightly (currently on Travis) ### if [ "$PYPY_NIGHTLY_BRANCH" != "" ]; then - CODECOV_NAME="pypy_nightly_${PYPY_NIGHTLY_BRANCH}" - $CURL -fLo pypy.tar.bz2 http://buildbot.pypy.org/nightly/${PYPY_NIGHTLY_BRANCH}/pypy-c-jit-latest-linux64.tar.bz2 + JOB_NAME="pypy_nightly_${PYPY_NIGHTLY_BRANCH}" + curl-harder -o pypy.tar.bz2 http://buildbot.pypy.org/nightly/${PYPY_NIGHTLY_BRANCH}/pypy-c-jit-latest-linux64.tar.bz2 if [ ! -s pypy.tar.bz2 ]; then # We know: - # - curl succeeded (200 response code; -f means "exit with error if - # server returns 4xx or 5xx") + # - curl succeeded (200 response code) # - nonetheless, pypy.tar.bz2 does not exist, or contains no data # This isn't going to work, and the failure is not informative of # anything involving Trio. @@ -94,6 +107,105 @@ if [ "$PYPY_NIGHTLY_BRANCH" != "" ]; then source testenv/bin/activate fi +### Qemu virtual-machine inception, on Travis + +if [ "$VM_IMAGE" != "" ]; then + VM_CPU=${VM_CPU:-x86_64} + + sudo apt update + sudo apt install cloud-image-utils qemu-system-x86 + + # If the base image is already present, we don't try downloading it again; + # and we use a scratch image for the actual run, in order to keep the base + # image file pristine. None of this matters when running in CI, but it + # makes local testing much easier. + BASEIMG=$(basename $VM_IMAGE) + if [ ! -e $BASEIMG ]; then + curl-harder "$VM_IMAGE" -o $BASEIMG + fi + rm -f os-working.img + qemu-img create -f qcow2 -b $BASEIMG os-working.img + + # This is the test script, that runs inside the VM, using cloud-init. + # + # This script goes through shell expansion, so use \ to quote any + # $variables you want to expand inside the guest. + cloud-localds -H test-host seed.img /dev/stdin << EOF +#!/bin/bash + +set -xeuo pipefail + +# When this script exits, we shut down the machine, which causes the qemu on +# the host to exit +trap "poweroff" exit + +uname -a +echo \$PWD +id +cat /etc/lsb-release +cat /proc/cpuinfo + +# Pass-through JOB_NAME + the env vars that codecov-bash looks at +export JOB_NAME="$JOB_NAME" +export CI="$CI" +export TRAVIS="$TRAVIS" +export TRAVIS_COMMIT="$TRAVIS_COMMIT" +export TRAVIS_PULL_REQUEST_SHA="$TRAVIS_PULL_REQUEST_SHA" +export TRAVIS_JOB_NUMBER="$TRAVIS_JOB_NUMBER" +export TRAVIS_PULL_REQUEST="$TRAVIS_PULL_REQUEST" +export TRAVIS_JOB_ID="$TRAVIS_JOB_ID" +export TRAVIS_REPO_SLUG="$TRAVIS_REPO_SLUG" +export TRAVIS_TAG="$TRAVIS_TAG" +export TRAVIS_BRANCH="$TRAVIS_BRANCH" + +env + +mkdir /host-files +mount -t 9p -o trans=virtio,version=9p2000.L host-files /host-files + +# Install and set up the system Python (assumes Debian/Ubuntu) +apt update +apt install -y python3-dev python3-virtualenv git build-essential curl +python3 -m virtualenv -p python3 /venv +# Uses unbound shell variable PS1, so have to allow that temporarily +set +u +source /venv/bin/activate +set -u + +# And then we re-invoke ourselves! +cd /host-files +./ci.sh + +# We can't pass our exit status out. So if we got this far without error, make +# a marker file where the host can see it. +touch /host-files/SUCCESS +EOF + + rm -f SUCCESS + # Apparently Travis's bionic images have nested virtualization enabled, so + # we can use KVM... but the default user isn't in the appropriate groups + # to use KVM, so we have to use 'sudo' to add that. And then a second + # 'sudo', because by default we have rights to run arbitrary commands as + # root, but we don't have rights to run a command as ourselves but with a + # tweaked group setting. + # + # Travis Linux VMs have 7.5 GiB RAM, so we give our nested VM 6 GiB RAM + # (-m 6144). + sudo sudo -u $USER -g kvm qemu-system-$VM_CPU \ + -enable-kvm \ + -M pc \ + -m 6144 \ + -nographic \ + -drive "file=./os-working.img,if=virtio" \ + -drive "file=./seed.img,if=virtio,format=raw" \ + -net nic \ + -net "user,hostfwd=tcp:127.0.0.1:50022-:22" \ + -virtfs local,path=$PWD,security_model=mapped-file,mount_tag=host-files + + test -e SUCCESS + exit +fi + ################################################################ # We have a Python environment! ################################################################ @@ -125,7 +237,7 @@ else # up. if [ "$LSP" != "" ]; then echo "Installing LSP from ${LSP}" - $CURL -o lsp-installer.exe "$LSP" + curl-harder -o lsp-installer.exe "$LSP" # Double-slashes are how you tell windows-bash that you want a single # slash, and don't treat this as a unix-style filename that needs to # be replaced by a windows-style filename. @@ -155,30 +267,13 @@ else netsh winsock reset fi - # Disable coverage on 3.8 until we run 3.8 on Windows CI too - # https://github.com/python-trio/trio/pull/784#issuecomment-446438407 - if [[ "$(python -V)" = Python\ 3.8* ]]; then - true; - # coverage is broken in pypy3 7.1.1, but is fixed in nightly and should be - # fixed in the next release after 7.1.1. - # See: https://bitbucket.org/pypy/pypy/issues/2943/ - elif [[ "$TRAVIS_PYTHON_VERSION" = "pypy3" ]]; then - true; - else - # Flag pypy and cpython coverage differently, until it settles down... - FLAG="cpython" - if [[ "$PYPY_NIGHTLY_BRANCH" == "py3.6" ]]; then - FLAG="pypy36nightly" - elif [[ "$(python -V)" == *PyPy* ]]; then - FLAG="pypy36release" - fi - # It's more common to do - # bash <(curl ...) - # but azure is broken: - # https://developercommunity.visualstudio.com/content/problem/743824/bash-task-on-windows-suddenly-fails-with-bash-devf.html - $CURL -o codecov.sh https://codecov.io/bash - bash codecov.sh -n "${CODECOV_NAME}" -F "$FLAG" - fi + # The codecov docs recommend something like 'bash <(curl ...)' to pipe the + # script directly into bash as its being downloaded. But, the codecov + # server is flaky, so we instead save to a temp file with retries, and + # wait until we've successfully fetched the whole script before trying to + # run it. + curl-harder -o codecov.sh https://codecov.io/bash + bash codecov.sh -n "${JOB_NAME}" $PASSED fi diff --git a/docs-requirements.txt b/docs-requirements.txt index 53eefd1d38..f6b3a6359e 100644 --- a/docs-requirements.txt +++ b/docs-requirements.txt @@ -5,38 +5,38 @@ # pip-compile --output-file docs-requirements.txt docs-requirements.in # alabaster==0.7.12 # via sphinx -async-generator==1.10 -attrs==19.3.0 -babel==2.7.0 # via sphinx -certifi==2019.9.11 # via requests +async-generator==1.10 # via -r docs-requirements.in +attrs==19.3.0 # via -r docs-requirements.in, outcome +babel==2.8.0 # via sphinx +certifi==2020.4.5.1 # via requests chardet==3.0.4 # via requests -click==7.0 # via towncrier -docutils==0.15.2 # via sphinx -idna==2.8 -imagesize==1.1.0 # via sphinx -immutables==0.11 +click==7.1.1 # via towncrier +docutils==0.16 # via sphinx +idna==2.9 # via -r docs-requirements.in, requests +imagesize==1.2.0 # via sphinx +immutables==0.12 # via -r docs-requirements.in incremental==17.5.0 # via towncrier -jinja2==2.10.3 # via sphinx, towncrier +jinja2==2.11.2 # via sphinx, towncrier markupsafe==1.1.1 # via jinja2 -outcome==1.0.1 -packaging==19.2 # via sphinx -pygments==2.4.2 # via sphinx -pyparsing==2.4.2 # via packaging +outcome==1.0.1 # via -r docs-requirements.in +packaging==20.3 # via sphinx +pygments==2.6.1 # via sphinx +pyparsing==2.4.7 # via packaging pytz==2019.3 # via babel -requests==2.22.0 # via sphinx -six==1.12.0 # via packaging -sniffio==1.1.0 +requests==2.23.0 # via sphinx +six==1.14.0 # via packaging +sniffio==1.1.0 # via -r docs-requirements.in snowballstemmer==2.0.0 # via sphinx -sortedcontainers==2.1.0 -sphinx-rtd-theme==0.4.3 -sphinx==2.2.1 -sphinxcontrib-applehelp==1.0.1 # via sphinx -sphinxcontrib-devhelp==1.0.1 # via sphinx -sphinxcontrib-htmlhelp==1.0.2 # via sphinx +sortedcontainers==2.1.0 # via -r docs-requirements.in +sphinx-rtd-theme==0.4.3 # via -r docs-requirements.in +sphinx==2.4.4 # via -r docs-requirements.in, sphinx-rtd-theme, sphinxcontrib-trio +sphinxcontrib-applehelp==1.0.2 # via sphinx +sphinxcontrib-devhelp==1.0.2 # via sphinx +sphinxcontrib-htmlhelp==1.0.3 # via sphinx sphinxcontrib-jsmath==1.0.1 # via sphinx -sphinxcontrib-qthelp==1.0.2 # via sphinx -sphinxcontrib-serializinghtml==1.1.3 # via sphinx -sphinxcontrib-trio==1.1.0 +sphinxcontrib-qthelp==1.0.3 # via sphinx +sphinxcontrib-serializinghtml==1.1.4 # via sphinx +sphinxcontrib-trio==1.1.1 # via -r docs-requirements.in toml==0.10.0 # via towncrier -towncrier==19.2.0 -urllib3==1.25.6 # via requests +towncrier==19.2.0 # via -r docs-requirements.in +urllib3==1.25.9 # via requests diff --git a/docs/source/awesome-trio-libraries.rst b/docs/source/awesome-trio-libraries.rst new file mode 100644 index 0000000000..6d0d649a59 --- /dev/null +++ b/docs/source/awesome-trio-libraries.rst @@ -0,0 +1,85 @@ +Awesome Trio Libraries +====================== + +.. List of Trio Libraries + + A list of libraries that support Trio, similar to the awesome-python + list here: https://github.com/vinta/awesome-python/ + + +.. currentmodule:: trio + +You have completed the tutorial, and are enthusiastic about building +great new applications and libraries with async functionality. +However, to get much useful work done you will want to use some of +the great libraries that support Trio-flavoured concurrency. This list +is not complete, but gives a starting point. Another great way to find +Trio-compatible libraries is to search on PyPI for the ``Framework :: Trio`` +tag -> `PyPI Search `__ + + +Getting Started +--------------- +* `cookiecutter-trio `__ - This is a cookiecutter template for Python projects that use Trio. It makes it easy to start a new project, by providing a bunch of preconfigured boilerplate. +* `pytest-trio `__ - Pytest plugin to test async-enabled Trio functions. +* `sphinxcontrib-trio `__ - Make Sphinx better at documenting Python functions and methods. In particular, it makes it easy to document async functions. + + +Web and HTML +------------ +* `asks `__ - asks is an async requests-like http library. +* `trio-websocket `__ - This library implements the WebSocket protocol, striving for safety, correctness, and ergonomics. +* `quart-trio `__ - Like Flask, but for Trio. A simple and powerful framework for building async web applications and REST APIs. Tip: this is an ASGI-based framework, so you'll also need an HTTP server with ASGI support. +* `hypercorn `__ - An HTTP server for hosting your ASGI apps. Supports HTTP/1.1, HTTP/2, HTTP/3, and Websockets. Can be run as a standalone server, or embedded in a larger Trio app. Use it with ``quart-trio``, or any other Trio-compatible ASGI framework. +* `httpx `__ - HTTPX is a fully featured HTTP client for Python 3, which provides sync and async APIs, and support for both HTTP/1.1 and HTTP/2. + + +Database +-------- + +* `triopg `__ - PostgreSQL client for Trio based on asyncpg. +* `trio-mysql `__ - Pure Python MySQL Client. +* `sqlalchemy_aio `__ - Add asyncio and Trio support to SQLAlchemy core, derived from alchimia. +* `redio `__ - Redis client, pure Python and Trio. + + +IOT +--- +* `DistMQTT `__ - DistMQTT is an open source MQTT client and broker implementation. It is a fork of hbmqtt with support for anyio and DistKV. +* `asyncgpio `__ - Allows easy access to the GPIO pins on your Raspberry Pi or similar embedded computer. + + + + +Building Command Line Apps +-------------------------- +* `trio-click `__ - Python composable command line utility, trio-compatible version. +* `urwid `__ - Urwid is a console user interface library for Python. + + +Multi-Core/Multiprocessing +-------------------------- +* `tractor `__ - tractor is an attempt to bring trionic structured concurrency to distributed multi-core Python. +* `Trio run_in_process `__ - Trio based API for running code in a separate process. + + +Testing +------- +* `pytest-trio `__ - Pytest plugin for trio. +* `hypothesis-trio `__ - Hypothesis plugin for trio. +* `trustme `__ - #1 quality TLS certs while you wait, for the discerning tester. + + +Tools and Utilities +------------------- +* `trio-typing `__ - Type hints for Trio and related projects. +* `trio-util `__ - An assortment of utilities for the Trio async/await framework. +* `tricycle `__ - This is a library of interesting-but-maybe-not-yet-fully-proven extensions to Trio. +* `tenacity `__ - Retrying library for Python with async/await support. + + +Trio/Asyncio Interoperability +----------------------------- +* `anyio `__ - AnyIO is a asynchronous compatibility API that allows applications and libraries written against it to run unmodified on asyncio, curio and trio. +* `sniffio `__ - This is a tiny package whose only purpose is to let you detect which async library your code is running under. +* `trio-asyncio `__ - Trio-Asyncio lets you use many asyncio libraries from your Trio app. diff --git a/docs/source/history.rst b/docs/source/history.rst index ed6cb2a7b7..65bb5dd54c 100644 --- a/docs/source/history.rst +++ b/docs/source/history.rst @@ -5,6 +5,51 @@ Release history .. towncrier release notes start +Trio 0.14.0 (2020-04-27) +------------------------ + +Features +~~~~~~~~ + +- If you're using Trio's low-level interfaces like + `trio.hazmat.wait_readable` or similar, and then you close a socket or + file descriptor, you're supposed to call `trio.hazmat.notify_closing` + first so Trio can clean up properly. But what if you forget? In the + past, Trio would tend to either deadlock or explode spectacularly. + Now, it's much more robust to this situation, and should generally + survive. (But note that "survive" is not the same as "give you the + results you were expecting", so you should still call + `~trio.hazmat.notify_closing` when appropriate. This is about harm + reduction and making it easier to debug this kind of mistake, not + something you should rely on.) + + If you're using higher-level interfaces outside of the `trio.hazmat` + module, then you don't need to worry about any of this; those + intefaces already take care of calling `~trio.hazmat.notify_closing` + for you. (`#1272 `__) + + +Bugfixes +~~~~~~~~ + +- A bug related to the following methods has been introduced in version 0.12.0: + + - `trio.Path.iterdir` + - `trio.Path.glob` + - `trio.Path.rglob` + + The iteration of the blocking generators produced by pathlib was performed in + the trio thread. With this fix, the previous behavior is restored: the blocking + generators are converted into lists in a thread dedicated to blocking IO calls. (`#1308 `__) + + +Deprecations and Removals +~~~~~~~~~~~~~~~~~~~~~~~~~ + +- Deprecate Python 3.5 (`#1408 `__) +- Remove ``trio.open_cancel_scope`` which was deprecated in 0.11.0. (`#1458 `__) + + Trio 0.13.0 (2019-11-02) ------------------------ diff --git a/docs/source/index.rst b/docs/source/index.rst index a1395e3980..4f20067785 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -73,6 +73,7 @@ Vital statistics: :caption: Trio's friendly, yet comprehensive, manual: tutorial.rst + awesome-trio-libraries.rst reference-core.rst reference-io.rst reference-testing.rst diff --git a/docs/source/reference-core.rst b/docs/source/reference-core.rst index 09266a9986..d4bdd74edd 100644 --- a/docs/source/reference-core.rst +++ b/docs/source/reference-core.rst @@ -355,7 +355,7 @@ Here's an example:: print("starting...") with trio.move_on_after(5): with trio.move_on_after(10): - await sleep(20) + await trio.sleep(20) print("sleep finished without error") print("move_on_after(10) finished without error") print("move_on_after(5) finished without error") @@ -382,7 +382,7 @@ object representing this cancel scope, which we can use to check whether this scope caught a :exc:`Cancelled` exception:: with trio.move_on_after(5) as cancel_scope: - await sleep(10) + await trio.sleep(10) print(cancel_scope.cancelled_caught) # prints "True" The ``cancel_scope`` object also allows you to check or adjust this @@ -1351,7 +1351,7 @@ after an hour, that grows to ~32,400. Eventually, the program will run out of memory. And well before we run out of memory, our latency on handling individual messages will become abysmal. For example, at the one minute mark, the producer is sending message ~600, but the -producer is still processing message ~60. Message 600 will have to sit +consumer is still processing message ~60. Message 600 will have to sit in the channel for ~9 minutes before the consumer catches up and processes it. diff --git a/docs/source/reference-io.rst b/docs/source/reference-io.rst index 48f19074ae..7c700b1328 100644 --- a/docs/source/reference-io.rst +++ b/docs/source/reference-io.rst @@ -24,7 +24,7 @@ create complex transport configurations. Here's some examples: speak SSL over the network is to wrap an :class:`~trio.SSLStream` around a :class:`~trio.SocketStream`. -* If you spawn a :ref:`subprocess`, you can get a +* If you spawn a :ref:`subprocess `, you can get a :class:`~trio.abc.SendStream` that lets you write to its stdin, and a :class:`~trio.abc.ReceiveStream` that lets you read from its stdout. If for some reason you wanted to speak SSL to a subprocess, @@ -479,7 +479,7 @@ you can `jump down to the API overview Background: Why is async file I/O useful? The answer may surprise you ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Many people expect that switching to from synchronous file I/O to +Many people expect that switching from synchronous file I/O to async file I/O will always make their program faster. This is not true! If we just look at total throughput, then async file I/O might be faster, slower, or about the same, and it depends in a complicated diff --git a/docs/source/reference-testing.rst b/docs/source/reference-testing.rst index e6ddde2156..40a275bbeb 100644 --- a/docs/source/reference-testing.rst +++ b/docs/source/reference-testing.rst @@ -118,7 +118,7 @@ some interesting hooks you can set, that let you customize the behavior of their methods. This is where you can insert the evil, if you want it. :func:`memory_stream_one_way_pair` takes advantage of these hooks in a relatively boring way: it just sets it up so that -when you call ``sendall``, or when you close the send stream, then it +when you call ``send_all``, or when you close the send stream, then it automatically triggers a call to :func:`memory_stream_pump`, which is a convenience function that takes data out of a :class:`MemorySendStream`\´s buffer and puts it into a diff --git a/docs/source/tutorial.rst b/docs/source/tutorial.rst index 5896933871..3b9255b236 100644 --- a/docs/source/tutorial.rst +++ b/docs/source/tutorial.rst @@ -770,11 +770,12 @@ programming, and – to add insult to injury – `pretty poor scalability Python just aren't that appealing. Trio doesn't make your code run on multiple cores; in fact, as we saw -above, it's baked into Trio's design that you never have two tasks -running at the same time. We're not so much overcoming the GIL as -embracing it. But if you're willing to accept that, plus a bit of -extra work to put these new ``async`` and ``await`` keywords in the -right places, then in exchange you get: +above, it's baked into Trio's design that when it has multiple tasks, +they take turns, so at each moment only one of them is actively running. +We're not so much overcoming the GIL as embracing it. But if you're +willing to accept that, plus a bit of extra work to put these new +``async`` and ``await`` keywords in the right places, then in exchange +you get: * Excellent scalability: Trio can run 10,000+ tasks simultaneously without breaking a sweat, so long as their total CPU demands don't diff --git a/setup.cfg b/setup.cfg index 0e814ba376..e6579a4f07 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,3 +1,5 @@ [tool:pytest] xfail_strict = true faulthandler_timeout=60 +markers = + redistributors_should_skip: tests that should be skipped by downstream redistributors diff --git a/setup.py b/setup.py index 45964b34b2..9ef41cb3e0 100644 --- a/setup.py +++ b/setup.py @@ -85,22 +85,14 @@ "idna", "outcome", "sniffio", - # PEP 508 style, but: - # https://bitbucket.org/pypa/wheel/issues/181/bdist_wheel-silently-discards-pep-508 - #"cffi; os_name == 'nt'", # "cffi is required on windows" + # cffi 1.12 adds from_buffer(require_writable=True) and ffi.release() + # cffi 1.14 fixes memory leak inside ffi.getwinerror() + "cffi>=1.14; os_name == 'nt'", # "cffi is required on windows" + "contextvars>=2.1; python_version < '3.7'" ], # This means, just install *everything* you see under trio/, even if it # doesn't look like a source file, so long as it appears in MANIFEST.in: include_package_data=True, - # Quirky bdist_wheel-specific way: - # https://wheel.readthedocs.io/en/latest/#defining-conditional-dependencies - # also supported by pip and setuptools, as long as they're vaguely - # recent - extras_require={ - # cffi 1.12 adds from_buffer(require_writable=True) and ffi.release() - ":os_name == 'nt'": ["cffi >= 1.12"], # "cffi is required on windows", - ":python_version < '3.7'": ["contextvars>=2.1"] - }, python_requires=">=3.5", keywords=["async", "io", "networking", "trio"], classifiers=[ diff --git a/test-requirements.in b/test-requirements.in index 5d1bda2aa3..f03409d484 100644 --- a/test-requirements.in +++ b/test-requirements.in @@ -8,7 +8,7 @@ pylint # for pylint finding all symbols tests jedi # for jedi code completion tests # Tools -yapf ==0.28.0 # formatting +yapf ==0.30.0 # formatting flake8 astor # code generation diff --git a/test-requirements.txt b/test-requirements.txt index 339b47e5b2..e23ce7acd0 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -4,53 +4,49 @@ # # pip-compile --output-file test-requirements.txt test-requirements.in # -astor==0.8.0 -astroid==2.3.2 # via pylint -async-generator==1.10 -atomicwrites==1.3.0 # via pytest -attrs==19.3.0 +astor==0.8.1 # via -r test-requirements.in +astroid==2.3.3 # via pylint +async-generator==1.10 # via -r test-requirements.in +attrs==19.3.0 # via -r test-requirements.in, outcome, pytest backcall==0.1.0 # via ipython -cffi==1.13.1 # via cryptography -coverage==4.5.4 # via pytest-cov -cryptography==2.8 # via pyopenssl, trustme -decorator==4.4.1 # via ipython, traitlets +cffi==1.14.0 # via cryptography +coverage==5.1 # via pytest-cov +cryptography==2.9.2 # via pyopenssl, trustme +decorator==4.4.2 # via ipython, traitlets entrypoints==0.3 # via flake8 -flake8==3.7.9 -idna==2.8 -immutables==0.11 -importlib-metadata==0.23 # via pluggy, pytest +flake8==3.7.9 # via -r test-requirements.in +idna==2.9 # via -r test-requirements.in, trustme +immutables==0.12 # via -r test-requirements.in ipython-genutils==0.2.0 # via traitlets -ipython==7.9.0 +ipython==7.9.0 # via -r test-requirements.in isort==4.3.21 # via pylint -jedi==0.15.1 +jedi==0.16.0 # via -r test-requirements.in, ipython lazy-object-proxy==1.4.3 # via astroid mccabe==0.6.1 # via flake8, pylint -more-itertools==7.2.0 # via pytest, zipp -outcome==1.0.1 -packaging==19.2 # via pytest -parso==0.5.1 # via jedi -pexpect==4.7.0 # via ipython +more-itertools==8.2.0 # via pytest +outcome==1.0.1 # via -r test-requirements.in +packaging==20.3 # via pytest +parso==0.7.0 # via jedi +pexpect==4.8.0 # via ipython pickleshare==0.7.5 # via ipython -pluggy==0.13.0 # via pytest +pluggy==0.13.1 # via pytest prompt-toolkit==2.0.10 # via ipython ptyprocess==0.6.0 # via pexpect -py==1.8.0 # via pytest +py==1.8.1 # via pytest pycodestyle==2.5.0 # via flake8 -pycparser==2.19 # via cffi +pycparser==2.20 # via cffi pyflakes==2.1.1 # via flake8 -pygments==2.4.2 # via ipython -pylint==2.4.2 -pyopenssl==19.0.0 -pyparsing==2.4.2 # via packaging -pytest-cov==2.8.1 -pytest==5.2.2 -six==1.12.0 # via astroid, cryptography, packaging, prompt-toolkit, pyopenssl, traitlets -sniffio==1.1.0 -sortedcontainers==2.1.0 +pygments==2.6.1 # via ipython +pylint==2.4.2 # via -r test-requirements.in +pyopenssl==19.1.0 # via -r test-requirements.in +pyparsing==2.4.7 # via packaging +pytest-cov==2.8.1 # via -r test-requirements.in +pytest==5.4.1 # via -r test-requirements.in, pytest-cov +six==1.14.0 # via astroid, cryptography, packaging, prompt-toolkit, pyopenssl, traitlets +sniffio==1.1.0 # via -r test-requirements.in +sortedcontainers==2.1.0 # via -r test-requirements.in traitlets==4.3.3 # via ipython -trustme==0.5.2 -typed-ast==1.4.0 ; python_version < "3.8" and implementation_name == "cpython" -wcwidth==0.1.7 # via prompt-toolkit, pytest +trustme==0.6.0 # via -r test-requirements.in +wcwidth==0.1.9 # via prompt-toolkit, pytest wrapt==1.11.2 # via astroid -yapf==0.28.0 -zipp==0.6.0 # via importlib-metadata +yapf==0.30.0 # via -r test-requirements.in diff --git a/trio/__init__.py b/trio/__init__.py index 6e9035d8fd..679c8509be 100644 --- a/trio/__init__.py +++ b/trio/__init__.py @@ -18,9 +18,8 @@ from ._core import ( TrioInternalError, RunFinishedError, WouldBlock, Cancelled, BusyResourceError, ClosedResourceError, MultiError, run, open_nursery, - CancelScope, open_cancel_scope, current_effective_deadline, - TASK_STATUS_IGNORED, current_time, BrokenResourceError, EndOfChannel, - Nursery + CancelScope, current_effective_deadline, TASK_STATUS_IGNORED, current_time, + BrokenResourceError, EndOfChannel, Nursery ) from ._timeouts import ( @@ -170,3 +169,10 @@ __name__ + ".subprocess", _deprecated_subprocess_reexports.__dict__ ) del fixup_module_metadata + +import sys +if sys.version_info < (3, 6): + _deprecate.warn_deprecated( + "Support for Python 3.5", "0.14", issue=75, instead="Python 3.6+" + ) +del sys diff --git a/trio/_abc.py b/trio/_abc.py index 126a65711f..88c2ff1f70 100644 --- a/trio/_abc.py +++ b/trio/_abc.py @@ -19,6 +19,7 @@ def start_clock(self): Called at the beginning of the run. """ + @abstractmethod def current_time(self): """Return the current time, according to this clock. @@ -30,6 +31,7 @@ def current_time(self): float: The current time. """ + @abstractmethod def deadline_to_sleep_time(self, deadline): """Compute the real time until the given deadline. @@ -68,10 +70,12 @@ def before_run(self): """Called at the beginning of :func:`trio.run`. """ + def after_run(self): """Called just before :func:`trio.run` returns. """ + def task_spawned(self, task): """Called when the given task is created. @@ -79,6 +83,7 @@ def task_spawned(self, task): task (trio.hazmat.Task): The new task. """ + def task_scheduled(self, task): """Called when the given task becomes runnable. @@ -89,6 +94,7 @@ def task_scheduled(self, task): task (trio.hazmat.Task): The task that became runnable. """ + def before_task_step(self, task): """Called immediately before we resume running the given task. @@ -96,6 +102,7 @@ def before_task_step(self, task): task (trio.hazmat.Task): The task that is about to run. """ + def after_task_step(self, task): """Called when we return to the main run loop after a task has yielded. @@ -103,6 +110,7 @@ def after_task_step(self, task): task (trio.hazmat.Task): The task that just ran. """ + def task_exited(self, task): """Called when the given task exits. @@ -110,6 +118,7 @@ def task_exited(self, task): task (trio.hazmat.Task): The finished task. """ + def before_io_wait(self, timeout): """Called before blocking to wait for I/O readiness. @@ -117,6 +126,7 @@ def before_io_wait(self, timeout): timeout (float): The number of seconds we are willing to wait. """ + def after_io_wait(self, timeout): """Called after handling pending I/O. @@ -155,6 +165,7 @@ async def getaddrinfo( ``b"xn--caf-dma.com"``. """ + @abstractmethod async def getnameinfo(self, sockaddr, flags): """A custom implementation of :func:`~trio.socket.getnameinfo`. @@ -244,6 +255,7 @@ async def aclose(self): See also: :func:`trio.aclose_forcefully`. """ + async def __aenter__(self): return self @@ -293,6 +305,7 @@ async def send_all(self, data): or none of the requested data, and there is no way to know which. """ + @abstractmethod async def wait_send_all_might_not_block(self): """Block until it's possible that :meth:`send_all` might not block. @@ -400,6 +413,7 @@ async def receive_some(self, max_bytes=None): :meth:`receive_some` is running. """ + @aiter_compat def __aiter__(self): return self @@ -545,7 +559,7 @@ class SendChannel(AsyncResource, Generic[SendType]): with`` block. If you want to send raw bytes rather than Python objects, see - `ReceiveStream`. + `SendStream`. """ __slots__ = () @@ -614,6 +628,7 @@ async def receive(self) -> ReceiveType: doesn't support it, then you can get `~trio.BusyResourceError`. """ + @aiter_compat def __aiter__(self): return self diff --git a/trio/_channel.py b/trio/_channel.py index 58802b585f..c902c485fa 100644 --- a/trio/_channel.py +++ b/trio/_channel.py @@ -27,6 +27,11 @@ def open_memory_channel(max_buffer_size): on a channel when there's no-one on the other side. See :ref:`channel-shutdown` for details. + Memory channel operations are all atomic with respect to + cancellation, either `~trio.abc.ReceiveChannel.receive` will + successfully return an object, or it will raise :exc:`Cancelled` + while leaving the channel unchanged. + Args: max_buffer_size (int or math.inf): The maximum number of items that can be buffered in the channel before :meth:`~trio.abc.SendChannel.send` diff --git a/trio/_core/__init__.py b/trio/_core/__init__.py index fda027e193..cb3d5d714f 100644 --- a/trio/_core/__init__.py +++ b/trio/_core/__init__.py @@ -17,12 +17,12 @@ # Imports that always exist from ._run import ( - Task, CancelScope, run, open_nursery, open_cancel_scope, checkpoint, - current_task, current_effective_deadline, checkpoint_if_cancelled, - TASK_STATUS_IGNORED, current_statistics, current_trio_token, reschedule, - remove_instrument, add_instrument, current_clock, current_root_task, - spawn_system_task, current_time, wait_all_tasks_blocked, wait_readable, - wait_writable, notify_closing, Nursery + Task, CancelScope, run, open_nursery, checkpoint, current_task, + current_effective_deadline, checkpoint_if_cancelled, TASK_STATUS_IGNORED, + current_statistics, current_trio_token, reschedule, remove_instrument, + add_instrument, current_clock, current_root_task, spawn_system_task, + current_time, wait_all_tasks_blocked, wait_readable, wait_writable, + notify_closing, Nursery ) # Has to come after _run to resolve a circular import diff --git a/trio/_core/_generated_run.py b/trio/_core/_generated_run.py index 9d9439ec76..834346c0bf 100644 --- a/trio/_core/_generated_run.py +++ b/trio/_core/_generated_run.py @@ -193,21 +193,21 @@ async def test_lock_fairness(): lock = trio.Lock() await lock.acquire() async with trio.open_nursery() as nursery: - child = nursery.start_soon(lock_taker, lock) + nursery.start_soon(lock_taker, lock) # child hasn't run yet, we have the lock assert lock.locked() - assert lock._owner is trio.current_task() + assert lock._owner is trio.hazmat.current_task() await trio.testing.wait_all_tasks_blocked() # now the child has run and is blocked on lock.acquire(), we # still have the lock assert lock.locked() - assert lock._owner is trio.current_task() + assert lock._owner is trio.hazmat.current_task() lock.release() try: # The child has a prior claim, so we can't have it lock.acquire_nowait() except trio.WouldBlock: - assert lock._owner is child + assert lock._owner is not trio.hazmat.current_task() print("PASS") else: print("FAIL") diff --git a/trio/_core/_io_common.py b/trio/_core/_io_common.py new file mode 100644 index 0000000000..9891849bc9 --- /dev/null +++ b/trio/_core/_io_common.py @@ -0,0 +1,22 @@ +import copy +import outcome +from .. import _core + + +# Utility function shared between _io_epoll and _io_windows +def wake_all(waiters, exc): + try: + current_task = _core.current_task() + except RuntimeError: + current_task = None + raise_at_end = False + for attr_name in ["read_task", "write_task"]: + task = getattr(waiters, attr_name) + if task is not None: + if task is current_task: + raise_at_end = True + else: + _core.reschedule(task, outcome.Error(copy.copy(exc))) + setattr(waiters, attr_name, None) + if raise_at_end: + raise exc diff --git a/trio/_core/_io_epoll.py b/trio/_core/_io_epoll.py index 9fae320b23..5d73a58c84 100644 --- a/trio/_core/_io_epoll.py +++ b/trio/_core/_io_epoll.py @@ -1,9 +1,10 @@ import select import attr -import outcome +from collections import defaultdict from .. import _core from ._run import _public +from ._io_common import wake_all @attr.s(slots=True, eq=False, frozen=True) @@ -13,38 +14,176 @@ class _EpollStatistics: backend = attr.ib(default="epoll") +# Some facts about epoll +# ---------------------- +# +# Internally, an epoll object is sort of like a WeakKeyDictionary where the +# keys are tuples of (fd number, file object). When you call epoll_ctl, you +# pass in an fd; that gets converted to an (fd number, file object) tuple by +# looking up the fd in the process's fd table at the time of the call. When an +# event happens on the file object, epoll_wait drops the file object part, and +# just returns the fd number in its event. So from the outside it looks like +# it's keeping a table of fds, but really it's a bit more complicated. This +# has some subtle consequences. +# +# In general, file objects inside the kernel are reference counted. Each entry +# in a process's fd table holds a strong reference to the corresponding file +# object, and most operations that use file objects take a temporary strong +# reference while they're working. So when you call close() on an fd, that +# might or might not cause the file object to be deallocated -- it depends on +# whether there are any other references to that file object. Some common ways +# this can happen: +# +# - after calling dup(), you have two fds in the same process referring to the +# same file object. Even if you close one fd (= remove that entry from the +# fd table), the file object will be kept alive by the other fd. +# - when calling fork(), the child inherits a copy of the parent's fd table, +# so all the file objects get another reference. (But if the fork() is +# followed by exec(), then all of the child's fds that have the CLOEXEC flag +# set will be closed at that point.) +# - most syscalls that work on fds take a strong reference to the underlying +# file object while they're using it. So there's one thread blocked in +# read(fd), and then another thread calls close() on the last fd referring +# to that object, the underlying file won't actually be closed until +# after read() returns. +# +# However, epoll does *not* take a reference to any of the file objects in its +# interest set (that's what makes it similar to a WeakKeyDictionary). File +# objects inside an epoll interest set will be deallocated if all *other* +# references to them are closed. And when that happens, the epoll object will +# automatically deregister that file object and stop reporting events on it. +# So that's quite handy. +# +# But, what happens if we do this? +# +# fd1 = open(...) +# epoll_ctl(EPOLL_CTL_ADD, fd1, ...) +# fd2 = dup(fd1) +# close(fd1) +# +# In this case, the dup() keeps the underlying file object alive, so it +# remains registered in the epoll object's interest set, as the tuple (fd1, +# file object). But, fd1 no longer refers to this file object! You might think +# there was some magic to handle this, but unfortunately no; the consequences +# are totally predictable from what I said above: +# +# If any events occur on the file object, then epoll will report them as +# happening on fd1, even though that doesn't make sense. +# +# Perhaps we would like to deregister fd1 to stop getting nonsensical events. +# But how? When we call epoll_ctl, we have to pass an fd number, which will +# get expanded to an (fd number, file object) tuple. We can't pass fd1, +# because when epoll_ctl tries to look it up, it won't find our file object. +# And we can't pass fd2, because that will get expanded to (fd2, file object), +# which is a different lookup key. In fact, it's *impossible* to de-register +# this fd! +# +# We could even have fd1 get assigned to another file object, and then we can +# have multiple keys registered simultaneously using the same fd number, like: +# (fd1, file object 1), (fd1, file object 2). And if events happen on either +# file object, then epoll will happily report that something happened to +# "fd1". +# +# Now here's what makes this especially nasty: suppose the old file object +# becomes, say, readable. That means that every time we call epoll_wait, it +# will return immediately to tell us that "fd1" is readable. Normally, we +# would handle this by de-registering fd1, waking up the corresponding call to +# wait_readable, then the user will call read() or recv() or something, and +# we're fine. But if this happens on a stale fd where we can't remove the +# registration, then we might get stuck in a state where epoll_wait *always* +# returns immediately, so our event loop becomes unable to sleep, and now our +# program is burning 100% of the CPU doing nothing, with no way out. +# +# +# What does this mean for Trio? +# ----------------------------- +# +# Since we don't control the user's code, we have no way to guarantee that we +# don't get stuck with stale fd's in our epoll interest set. For example, a +# user could call wait_readable(fd) in one task, and then while that's +# running, they might close(fd) from another task. In this situation, they're +# *supposed* to call notify_closing(fd) to let us know what's happening, so we +# can interrupt the wait_readable() call and avoid getting into this mess. And +# that's the only thing that can possibly work correctly in all cases. But +# sometimes user code has bugs. So if this does happen, we'd like to degrade +# gracefully, and survive without corrupting Trio's internal state or +# otherwise causing the whole program to explode messily. +# +# Our solution: we always use EPOLLONESHOT. This way, we might get *one* +# spurious event on a stale fd, but then epoll will automatically silence it +# until we explicitly say that we want more events... and if we have a stale +# fd, then we actually can't re-enable it! So we can't get stuck in an +# infinite busy-loop. If there's a stale fd hanging around, then it might +# cause a spurious `BusyResourceError`, or cause one wait_* call to return +# before it should have... but in general, the wait_* functions are allowed to +# have some spurious wakeups; the user code will just attempt the operation, +# get EWOULDBLOCK, and call wait_* again. And the program as a whole will +# survive, any exceptions will propagate, etc. +# +# As a bonus, EPOLLONESHOT also saves us having to explicitly deregister fds +# on the normal wakeup path, so it's a bit more efficient in general. +# +# However, EPOLLONESHOT has a few trade-offs to consider: +# +# First, you can't combine EPOLLONESHOT with EPOLLEXCLUSIVE. This is a bit sad +# in one somewhat rare case: if you have a multi-process server where a group +# of processes all share the same listening socket, then EPOLLEXCLUSIVE can be +# used to avoid "thundering herd" problems when a new connection comes in. But +# this isn't too bad. It's not clear if EPOLLEXCLUSIVE even works for us +# anyway: +# +# https://stackoverflow.com/questions/41582560/how-does-epolls-epollexclusive-mode-interact-with-level-triggering +# +# And it's not clear that EPOLLEXCLUSIVE is a great approach either: +# +# https://blog.cloudflare.com/the-sad-state-of-linux-socket-balancing/ +# +# And if we do need to support this, we could always add support through some +# more-specialized API in the future. So this isn't a blocker to using +# EPOLLONESHOT. +# +# Second, EPOLLONESHOT does not actually *deregister* the fd after delivering +# an event (EPOLL_CTL_DEL). Instead, it keeps the fd registered, but +# effectively does an EPOLL_CTL_MOD to set the fd's interest flags to +# all-zeros. So we could still end up with an fd hanging around in the +# interest set for a long time, even if we're not using it. +# +# Fortunately, this isn't a problem, because it's only a weak reference – if +# we have a stale fd that's been silenced by EPOLLONESHOT, then it wastes a +# tiny bit of kernel memory remembering this fd that can never be revived, but +# when the underlying file object is eventually closed, that memory will be +# reclaimed. So that's OK. +# +# The other issue is that when someone calls wait_*, using EPOLLONESHOT means +# that if we have ever waited for this fd before, we have to use EPOLL_CTL_MOD +# to re-enable it; but if it's a new fd, we have to use EPOLL_CTL_ADD. How do +# we know which one to use? There's no reasonable way to track which fds are +# currently registered -- remember, we're assuming the user might have gone +# and rearranged their fds without telling us! +# +# Fortunately, this also has a simple solution: if we wait on a socket or +# other fd once, then we'll probably wait on it lots of times. And the epoll +# object itself knows which fds it already has registered. So when an fd comes +# in, we optimistically assume that it's been waited on before, and try doing +# EPOLL_CTL_MOD. And if that fails with an ENOENT error, then we try again +# with EPOLL_CTL_ADD. +# +# So that's why this code is the way it is. And now you know more than you +# wanted to about how epoll works. + + @attr.s(slots=True, eq=False) class EpollWaiters: read_task = attr.ib(default=None) write_task = attr.ib(default=None) - - def flags(self): - flags = 0 - if self.read_task is not None: - flags |= select.EPOLLIN - if self.write_task is not None: - flags |= select.EPOLLOUT - if not flags: - return None - # XX not sure if EPOLLEXCLUSIVE is actually safe... I think - # probably we should use it here unconditionally, but: - # https://stackoverflow.com/questions/41582560/how-does-epolls-epollexclusive-mode-interact-with-level-triggering - - # flags |= select.EPOLLEXCLUSIVE - # We used to use ONESHOT here also, but it turns out that it's - # confusing/complicated: you can't use ONESHOT+EPOLLEXCLUSIVE - # together, you ONESHOT doesn't delete the registration but just - # "disables" it so you re-enable with CTL rather than ADD (or - # something?)... - # https://lkml.org/lkml/2016/2/4/541 - return flags + current_flags = attr.ib(default=0) @attr.s(slots=True, eq=False, hash=False) class EpollIOManager: _epoll = attr.ib(factory=select.epoll) # {fd: EpollWaiters} - _registered = attr.ib(factory=dict) + _registered = attr.ib(factory=lambda: defaultdict(EpollWaiters)) def statistics(self): tasks_waiting_read = 0 @@ -69,6 +208,8 @@ def handle_io(self, timeout): events = self._epoll.poll(timeout, max_events) for fd, flags in events: waiters = self._registered[fd] + # EPOLLONESHOT always clears the flags when an event is delivered + waiters.current_flags = 0 # Clever hack stolen from selectors.EpollSelector: an event # with EPOLLHUP or EPOLLERR flags wakes both readers and # writers. @@ -78,40 +219,53 @@ def handle_io(self, timeout): if flags & ~select.EPOLLOUT and waiters.read_task is not None: _core.reschedule(waiters.read_task) waiters.read_task = None - self._update_registrations(fd, True) + self._update_registrations(fd) - def _update_registrations(self, fd, currently_registered): + def _update_registrations(self, fd): waiters = self._registered[fd] - flags = waiters.flags() - if flags is None: - assert currently_registered + wanted_flags = 0 + if waiters.read_task is not None: + wanted_flags |= select.EPOLLIN + if waiters.write_task is not None: + wanted_flags |= select.EPOLLOUT + if wanted_flags != waiters.current_flags: + try: + try: + # First try EPOLL_CTL_MOD + self._epoll.modify(fd, wanted_flags | select.EPOLLONESHOT) + except OSError: + # If that fails, it might be a new fd; try EPOLL_CTL_ADD + self._epoll.register( + fd, wanted_flags | select.EPOLLONESHOT + ) + waiters.current_flags = wanted_flags + except OSError as exc: + # If everything fails, probably it's a bad fd, e.g. because + # the fd was closed behind our back. In this case we don't + # want to try to unregister the fd, because that will probably + # fail too. Just clear our state and wake everyone up. + del self._registered[fd] + # This could raise (in case we're calling this inside one of + # the to-be-woken tasks), so we have to do it last. + wake_all(waiters, exc) + return + if not wanted_flags: del self._registered[fd] - self._epoll.unregister(fd) - else: - if currently_registered: - self._epoll.modify(fd, flags) - else: - self._epoll.register(fd, flags) - - # Public (hazmat) API: async def _epoll_wait(self, fd, attr_name): if not isinstance(fd, int): fd = fd.fileno() - currently_registered = (fd in self._registered) - if not currently_registered: - self._registered[fd] = EpollWaiters() waiters = self._registered[fd] if getattr(waiters, attr_name) is not None: raise _core.BusyResourceError( "another task is already reading / writing this fd" ) setattr(waiters, attr_name, _core.current_task()) - self._update_registrations(fd, currently_registered) + self._update_registrations(fd) def abort(_): - setattr(self._registered[fd], attr_name, None) - self._update_registrations(fd, True) + setattr(waiters, attr_name, None) + self._update_registrations(fd) return _core.Abort.SUCCEEDED await _core.wait_task_rescheduled(abort) @@ -128,21 +282,12 @@ async def wait_writable(self, fd): def notify_closing(self, fd): if not isinstance(fd, int): fd = fd.fileno() - if fd not in self._registered: - return - - waiters = self._registered[fd] - - def interrupt(task): - exc = _core.ClosedResourceError("another task closed this fd") - _core.reschedule(task, outcome.Error(exc)) - - if waiters.write_task is not None: - interrupt(waiters.write_task) - waiters.write_task = None - - if waiters.read_task is not None: - interrupt(waiters.read_task) - waiters.read_task = None - - self._update_registrations(fd, True) + wake_all( + self._registered[fd], + _core.ClosedResourceError("another task closed this fd") + ) + del self._registered[fd] + try: + self._epoll.unregister(fd) + except (OSError, ValueError): + pass diff --git a/trio/_core/_io_kqueue.py b/trio/_core/_io_kqueue.py index 82d134712a..e3989152ea 100644 --- a/trio/_core/_io_kqueue.py +++ b/trio/_core/_io_kqueue.py @@ -120,7 +120,18 @@ async def _wait_common(self, fd, filter): def abort(_): event = select.kevent(fd, filter, select.KQ_EV_DELETE) - self._kqueue.control([event], 0) + try: + self._kqueue.control([event], 0) + except FileNotFoundError: + # kqueue tracks individual fds (*not* the underlying file + # object, see _io_epoll.py for a long discussion of why this + # distinction matters), and automatically deregisters an event + # if the fd is closed. So if kqueue.control says that it + # doesn't know about this event, then probably it's because + # the fd was closed behind our backs. (Too bad it doesn't tell + # us that this happened... oh well, you can't have + # everything.) + pass return _core.Abort.SUCCEEDED await self.wait_kevent(fd, filter, abort) diff --git a/trio/_core/_io_windows.py b/trio/_core/_io_windows.py index c4a5d176e4..7e452177d1 100644 --- a/trio/_core/_io_windows.py +++ b/trio/_core/_io_windows.py @@ -3,11 +3,11 @@ import enum import socket -import outcome import attr from .. import _core from ._run import _public +from ._io_common import wake_all from ._windows_cffi import ( ffi, @@ -264,13 +264,14 @@ def _afd_helper_handle(): ) -# Annoyingly, while the API makes it *seem* like you can happily issue -# as many independent AFD_POLL operations as you want without them interfering -# with each other, in fact if you issue two AFD_POLL operations for the same -# socket at the same time, then Windows gets super confused. For example, if -# we issue one operation from wait_readable, and another independent operation -# from wait_writable, then Windows may complete the wait_writable operation -# when the socket becomes readable. +# Annoyingly, while the API makes it *seem* like you can happily issue as many +# independent AFD_POLL operations as you want without them interfering with +# each other, in fact if you issue two AFD_POLL operations for the same socket +# at the same time with notification going to the same IOCP port, then Windows +# gets super confused. For example, if we issue one operation from +# wait_readable, and another independent operation from wait_writable, then +# Windows may complete the wait_writable operation when the socket becomes +# readable. # # To avoid this, we have to coalesce all the operations on a single socket # into one, and when the set of waiters changes we have to throw away the old @@ -511,8 +512,10 @@ def _refresh_afd(self, base_handle): ) ) except OSError as exc: - if exc.winerror != ErrorCodes.ERROR_NOT_FOUND: # pragma: no cover - raise + if exc.winerror != ErrorCodes.ERROR_NOT_FOUND: + # I don't think this is possible, so if it happens let's + # crash noisily. + raise # pragma: no cover waiters.current_op = None flags = 0 @@ -548,9 +551,15 @@ def _refresh_afd(self, base_handle): ) ) except OSError as exc: - if exc.winerror != ErrorCodes.ERROR_IO_PENDING: # pragma: no cover - raise - + if exc.winerror != ErrorCodes.ERROR_IO_PENDING: + # This could happen if the socket handle got closed behind + # our back while a wait_* call was pending, and we tried + # to re-issue the call. Clear our state and wake up any + # pending calls. + del self._afd_waiters[base_handle] + # Do this last, because it could raise. + wake_all(waiters, exc) + return op = AFDPollOp(lpOverlapped, poll_info, waiters) waiters.current_op = op self._afd_ops[lpOverlapped] = op @@ -564,6 +573,8 @@ async def _afd_poll(self, sock, mode): if getattr(waiters, mode) is not None: raise _core.BusyResourceError setattr(waiters, mode, _core.current_task()) + # Could potentially raise if the handle is somehow invalid; that's OK, + # we let it escape. self._refresh_afd(base_handle) def abort_fn(_): @@ -586,18 +597,7 @@ def notify_closing(self, handle): handle = _get_base_socket(handle) waiters = self._afd_waiters.get(handle) if waiters is not None: - if waiters.read_task is not None: - _core.reschedule( - waiters.read_task, - outcome.Error(_core.ClosedResourceError()) - ) - waiters.read_task = None - if waiters.write_task is not None: - _core.reschedule( - waiters.write_task, - outcome.Error(_core.ClosedResourceError()) - ) - waiters.write_task = None + wake_all(waiters, _core.ClosedResourceError()) self._refresh_afd(handle) ################################################################ diff --git a/trio/_core/_run.py b/trio/_core/_run.py index 905f477165..3ddfa5d524 100644 --- a/trio/_core/_run.py +++ b/trio/_core/_run.py @@ -619,12 +619,6 @@ def cancel_called(self): return self._cancel_called -@deprecated("0.11.0", issue=607, instead="trio.CancelScope") -def open_cancel_scope(*, deadline=inf, shield=False): - """Returns a context manager which creates a new cancellation scope.""" - return CancelScope(deadline=deadline, shield=shield) - - ################################################################ # Nursery and friends ################################################################ @@ -856,7 +850,7 @@ def aborted(raise_cancel): return MultiError(self._pending_excs) def start_soon(self, async_fn, *args, name=None): - """ Creates a child task, scheduling ``await async_fn(*args)``. + """Creates a child task, scheduling ``await async_fn(*args)``. This and :meth:`start` are the two fundamental methods for creating concurrent tasks in Trio. @@ -1581,21 +1575,21 @@ async def test_lock_fairness(): lock = trio.Lock() await lock.acquire() async with trio.open_nursery() as nursery: - child = nursery.start_soon(lock_taker, lock) + nursery.start_soon(lock_taker, lock) # child hasn't run yet, we have the lock assert lock.locked() - assert lock._owner is trio.current_task() + assert lock._owner is trio.hazmat.current_task() await trio.testing.wait_all_tasks_blocked() # now the child has run and is blocked on lock.acquire(), we # still have the lock assert lock.locked() - assert lock._owner is trio.current_task() + assert lock._owner is trio.hazmat.current_task() lock.release() try: # The child has a prior claim, so we can't have it lock.acquire_nowait() except trio.WouldBlock: - assert lock._owner is child + assert lock._owner is not trio.hazmat.current_task() print("PASS") else: print("FAIL") diff --git a/trio/_core/tests/test_io.py b/trio/_core/tests/test_io.py index 466558d8a7..2adb3f9a65 100644 --- a/trio/_core/tests/test_io.py +++ b/trio/_core/tests/test_io.py @@ -4,6 +4,7 @@ import select import random import errno +from contextlib import suppress from ... import _core from ...testing import wait_all_tasks_blocked, Sequencer, assert_checkpoints @@ -300,10 +301,8 @@ async def test_wait_on_invalid_object(): fileno = s.fileno() # We just closed the socket and don't do anything else in between, so # we can be confident that the fileno hasn't be reassigned. - with pytest.raises(OSError) as excinfo: + with pytest.raises(OSError): await wait(fileno) - exc = excinfo.value - assert exc.errno == errno.EBADF or exc.winerror == errno.ENOTSOCK async def test_io_manager_statistics(): @@ -352,3 +351,99 @@ def check(*, expected_readers, expected_writers): # 1 for call_soon_task check(expected_readers=1, expected_writers=0) + + +async def test_can_survive_unnotified_close(): + # An "unnotified" close is when the user closes an fd/socket/handle + # directly, without calling notify_closing first. This should never happen + # -- users should call notify_closing before closing things. But, just in + # case they don't, we would still like to avoid exploding. + # + # Acceptable behaviors: + # - wait_* never return, but can be cancelled cleanly + # - wait_* exit cleanly + # - wait_* raise an OSError + # + # Not acceptable: + # - getting stuck in an uncancellable state + # - TrioInternalError blowing up the whole run + # + # This test exercises some tricky "unnotified close" scenarios, to make + # sure we get the "acceptable" behaviors. + + async def allow_OSError(async_func, *args): + with suppress(OSError): + await async_func(*args) + + with stdlib_socket.socket() as s: + async with trio.open_nursery() as nursery: + nursery.start_soon(allow_OSError, trio.hazmat.wait_readable, s) + await wait_all_tasks_blocked() + s.close() + await wait_all_tasks_blocked() + nursery.cancel_scope.cancel() + + # We hit different paths on Windows depending on whether we close the last + # handle to the object (which produces a LOCAL_CLOSE notification and + # wakes up wait_readable), or only close one of the handles (which leaves + # wait_readable pending until cancelled). + with stdlib_socket.socket() as s, s.dup() as s2: # noqa: F841 + async with trio.open_nursery() as nursery: + nursery.start_soon(allow_OSError, trio.hazmat.wait_readable, s) + await wait_all_tasks_blocked() + s.close() + await wait_all_tasks_blocked() + nursery.cancel_scope.cancel() + + # A more elaborate case, with two tasks waiting. On windows and epoll, + # the two tasks get muxed together onto a single underlying wait + # operation. So when they're cancelled, there's a brief moment where one + # of the tasks is cancelled but the other isn't, so we try to re-issue the + # underlying wait operation. But here, the handle we were going to use to + # do that has been pulled out from under our feet... so test that we can + # survive this. + a, b = stdlib_socket.socketpair() + with a, b, a.dup() as a2: # noqa: F841 + a.setblocking(False) + b.setblocking(False) + fill_socket(a) + async with trio.open_nursery() as nursery: + nursery.start_soon(allow_OSError, trio.hazmat.wait_readable, a) + nursery.start_soon(allow_OSError, trio.hazmat.wait_writable, a) + await wait_all_tasks_blocked() + a.close() + nursery.cancel_scope.cancel() + + # A similar case, but now the single-task-wakeup happens due to I/O + # arriving, not a cancellation, so the operation gets re-issued from + # handle_io context rather than abort context. + a, b = stdlib_socket.socketpair() + with a, b, a.dup() as a2: # noqa: F841 + print("a={}, b={}, a2={}".format(a.fileno(), b.fileno(), a2.fileno())) + a.setblocking(False) + b.setblocking(False) + fill_socket(a) + e = trio.Event() + + # We want to wait for the kernel to process the wakeup on 'a', if any. + # But depending on the platform, we might not get a wakeup on 'a'. So + # we put one task to sleep waiting on 'a', and we put a second task to + # sleep waiting on 'a2', with the idea that the 'a2' notification will + # definitely arrive, and when it does then we can assume that whatever + # notification was going to arrive for 'a' has also arrived. + async def wait_readable_a2_then_set(): + await trio.hazmat.wait_readable(a2) + e.set() + + async with trio.open_nursery() as nursery: + nursery.start_soon(allow_OSError, trio.hazmat.wait_readable, a) + nursery.start_soon(allow_OSError, trio.hazmat.wait_writable, a) + nursery.start_soon(wait_readable_a2_then_set) + await wait_all_tasks_blocked() + a.close() + b.send(b"x") + # Make sure that the wakeup has been received and everything has + # settled before cancelling the wait_writable. + await e.wait() + await wait_all_tasks_blocked() + nursery.cancel_scope.cancel() diff --git a/trio/_core/tests/test_run.py b/trio/_core/tests/test_run.py index fc03197133..3593793fe1 100644 --- a/trio/_core/tests/test_run.py +++ b/trio/_core/tests/test_run.py @@ -805,13 +805,6 @@ async def test_basic_timeout(mock_clock): await _core.checkpoint() -@pytest.mark.filterwarnings( - "ignore:.*trio.open_cancel_scope:trio.TrioDeprecationWarning" -) -async def test_cancel_scope_deprecated(recwarn): - assert isinstance(_core.open_cancel_scope(), _core.CancelScope) - - async def test_cancel_scope_nesting(): # Nested scopes: if two triggering at once, the outer one wins with _core.CancelScope() as scope1: diff --git a/trio/_deprecate.py b/trio/_deprecate.py index 8f7fc56118..b1362cbc38 100644 --- a/trio/_deprecate.py +++ b/trio/_deprecate.py @@ -122,7 +122,8 @@ def __getattr__(self, name): ) return info.value - raise AttributeError(name) + msg = "module '{}' has no attribute '{}'" + raise AttributeError(msg.format(self.__name__, name)) def enable_attribute_deprecations(module_name): diff --git a/trio/_highlevel_open_tcp_listeners.py b/trio/_highlevel_open_tcp_listeners.py index 9120a45d18..2625238803 100644 --- a/trio/_highlevel_open_tcp_listeners.py +++ b/trio/_highlevel_open_tcp_listeners.py @@ -89,7 +89,7 @@ async def open_tcp_listeners(port, *, host=None, backlog=None): # doesn't: # http://klickverbot.at/blog/2012/01/getaddrinfo-edge-case-behavior-on-windows-linux-and-osx/ if not isinstance(port, int): - raise TypeError("port must be an int or str, not {!r}".format(port)) + raise TypeError("port must be an int not {!r}".format(port)) backlog = _compute_backlog(backlog) diff --git a/trio/_highlevel_open_tcp_stream.py b/trio/_highlevel_open_tcp_stream.py index 81c191e15c..71418b12d3 100644 --- a/trio/_highlevel_open_tcp_stream.py +++ b/trio/_highlevel_open_tcp_stream.py @@ -212,7 +212,7 @@ async def open_tcp_stream( connection attempt to succeed or fail before getting impatient and starting another one in parallel. Set to `math.inf` if you want to limit to only one connection attempt at a time (like - :func:`socket.create_connection`). Default: 0.3 (300 ms). + :func:`socket.create_connection`). Default: 0.25 (250 ms). Returns: SocketStream: a :class:`~trio.abc.Stream` connected to the given server. diff --git a/trio/_highlevel_open_unix_stream.py b/trio/_highlevel_open_unix_stream.py index 5992c4f73e..59141ebc38 100644 --- a/trio/_highlevel_open_unix_stream.py +++ b/trio/_highlevel_open_unix_stream.py @@ -40,13 +40,10 @@ async def open_unix_socket(filename,): if not has_unix: raise RuntimeError("Unix sockets are not supported on this platform") - if filename is None: - raise ValueError("Filename cannot be None") - # much more simplified logic vs tcp sockets - one socket type and only one # possible location to connect to sock = socket(AF_UNIX, SOCK_STREAM) with close_on_error(sock): - await sock.connect(filename) + await sock.connect(trio._util.fspath(filename)) return trio.SocketStream(sock) diff --git a/trio/_highlevel_ssl_helpers.py b/trio/_highlevel_ssl_helpers.py index ce78832087..9b68e942f4 100644 --- a/trio/_highlevel_ssl_helpers.py +++ b/trio/_highlevel_ssl_helpers.py @@ -84,7 +84,7 @@ async def open_ssl_over_tcp_listeners( host (str, bytes, or None): The address to bind to; use ``None`` to bind to the wildcard address. See :func:`open_tcp_listeners`. https_compatible (bool): See :class:`~trio.SSLStream` for details. - backlog (int or None): See :class:`~trio.SSLStream` for details. + backlog (int or None): See :func:`open_tcp_listeners` for details. """ tcp_listeners = await trio.open_tcp_listeners( diff --git a/trio/_path.py b/trio/_path.py index 89fa93a586..bbadf9d874 100644 --- a/trio/_path.py +++ b/trio/_path.py @@ -58,7 +58,9 @@ def iter_wrapper_factory(cls, meth_name): async def wrapper(self, *args, **kwargs): meth = getattr(self._wrapped, meth_name) func = partial(meth, *args, **kwargs) - items = await trio.to_thread.run_sync(func) + # Make sure that the full iteration is performed in the thread + # by converting the generator produced by pathlib into a list + items = await trio.to_thread.run_sync(lambda: list(func())) return (rewrap_path(item) for item in items) return wrapper diff --git a/trio/_sync.py b/trio/_sync.py index 6574d8c3ae..f34acd0858 100644 --- a/trio/_sync.py +++ b/trio/_sync.py @@ -676,6 +676,8 @@ class StrictFIFOLock(Lock): on this property. """ + + @attr.s(frozen=True) class _ConditionStatistics: tasks_waiting = attr.ib() @@ -737,7 +739,7 @@ def release(self): @enable_ki_protection async def wait(self): - """Wait for another thread to call :meth:`notify` or + """Wait for another task to call :meth:`notify` or :meth:`notify_all`. When calling this method, you must hold the lock. It releases the lock diff --git a/trio/_threads.py b/trio/_threads.py index 52c19ac486..e5ffb74b7c 100644 --- a/trio/_threads.py +++ b/trio/_threads.py @@ -210,7 +210,7 @@ async def to_thread_run_sync(sync_fn, *args, cancellable=False, limiter=None): background** – we just abandon it to do whatever it's going to do, and silently discard any return value or errors that it raises. Only use this if you know that the operation is safe and side-effect free. (For - example: :func:`trio.socket.getaddrinfo` is uses a thread with + example: :func:`trio.socket.getaddrinfo` uses a thread with ``cancellable=True``, because it doesn't really affect anything if a stray hostname lookup keeps running in the background.) diff --git a/trio/_timeouts.py b/trio/_timeouts.py index ccdf075946..25cef08d0d 100644 --- a/trio/_timeouts.py +++ b/trio/_timeouts.py @@ -93,6 +93,8 @@ class TooSlowError(Exception): expires. """ + + @contextmanager def fail_at(deadline): """Creates a cancel scope with the given deadline, and raises an error if it diff --git a/trio/_tools/gen_exports.py b/trio/_tools/gen_exports.py index cc0738177a..1340d049a2 100755 --- a/trio/_tools/gen_exports.py +++ b/trio/_tools/gen_exports.py @@ -10,7 +10,6 @@ import os from pathlib import Path import sys -import yapf.yapflib.yapf_api as formatter from textwrap import indent diff --git a/trio/_util.py b/trio/_util.py index bfef624dab..9204b83349 100644 --- a/trio/_util.py +++ b/trio/_util.py @@ -72,10 +72,13 @@ def signal_raise(signum): if sys.version_info < (3, 5, 2): def aiter_compat(aiter_impl): - @wraps(aiter_impl) + # de-sugar decorator to fix Python 3.8 coverage issue + # https://github.com/python-trio/trio/pull/784#issuecomment-446438407 async def __aiter__(*args, **kwargs): return aiter_impl(*args, **kwargs) + __aiter__ = wraps(aiter_impl)(__aiter__) + return __aiter__ else: diff --git a/trio/_version.py b/trio/_version.py index 52bca16bec..0705c99035 100644 --- a/trio/_version.py +++ b/trio/_version.py @@ -1,3 +1,3 @@ # This file is imported from __init__.py and exec'd from setup.py -__version__ = "0.13.0" +__version__ = "0.14.0" diff --git a/trio/socket.py b/trio/socket.py index 3464679394..266fc0dbda 100644 --- a/trio/socket.py +++ b/trio/socket.py @@ -104,7 +104,9 @@ CAN_BCM_RX_CHECK_DLC, CAN_BCM_RX_FILTER_ID, CAN_BCM_RX_NO_AUTOTIMER, CAN_BCM_RX_RTR_FRAME, CAN_BCM_SETTIMER, CAN_BCM_STARTTIMER, CAN_BCM_TX_ANNOUNCE, CAN_BCM_TX_COUNTEVT, CAN_BCM_TX_CP_CAN_ID, - CAN_BCM_TX_RESET_MULTI_IDX + CAN_BCM_TX_RESET_MULTI_IDX, IPPROTO_CBT, IPPROTO_ICLFXBM, IPPROTO_IGP, + IPPROTO_L2TP, IPPROTO_PGM, IPPROTO_RDP, IPPROTO_ST, AF_QIPCRTR, + CAN_BCM_CAN_FD_FRAME ) except ImportError: pass diff --git a/trio/testing/_memory_streams.py b/trio/testing/_memory_streams.py index ac70d561e9..d86e301888 100644 --- a/trio/testing/_memory_streams.py +++ b/trio/testing/_memory_streams.py @@ -393,7 +393,7 @@ def memory_stream_pair(): async def trickle(): # left is a StapledStream, and left.send_stream is a MemorySendStream # right is a StapledStream, and right.recv_stream is a MemoryReceiveStream - while memory_stream_pump(left.send_stream, right.recv_stream, max_byes=1): + while memory_stream_pump(left.send_stream, right.recv_stream, max_bytes=1): # Pause between each byte await trio.sleep(1) # Normally this send_all_hook calls memory_stream_pump directly without diff --git a/trio/tests/test_deprecate.py b/trio/tests/test_deprecate.py index 98e57ec706..6ecd00003e 100644 --- a/trio/tests/test_deprecate.py +++ b/trio/tests/test_deprecate.py @@ -157,16 +157,22 @@ def docstring_test1(): # pragma: no cover """Hello! """ + + @deprecated("2.1", issue=None, instead="hi") def docstring_test2(): # pragma: no cover """Hello! """ + + @deprecated("2.1", issue=1, instead=None) def docstring_test3(): # pragma: no cover """Hello! """ + + @deprecated("2.1", issue=None, instead=None) def docstring_test4(): # pragma: no cover """Hello! diff --git a/trio/tests/test_exports.py b/trio/tests/test_exports.py index a9b7179f5f..abc6f07963 100644 --- a/trio/tests/test_exports.py +++ b/trio/tests/test_exports.py @@ -49,6 +49,11 @@ def public_namespaces(module): NAMESPACES = list(public_namespaces(trio)) +# It doesn't make sense for downstream redistributors to run this test, since +# they might be using a newer version of Python with additional symbols which +# won't be reflected in trio.socket, and this shouldn't cause downstream test +# runs to start failing. +@pytest.mark.redistributors_should_skip # pylint/jedi often have trouble with alpha releases, where Python's internals # are in flux, grammar may not have settled down, etc. @pytest.mark.skipif( diff --git a/trio/tests/test_highlevel_open_unix_stream.py b/trio/tests/test_highlevel_open_unix_stream.py index c66028b14f..872a43dd6d 100644 --- a/trio/tests/test_highlevel_open_unix_stream.py +++ b/trio/tests/test_highlevel_open_unix_stream.py @@ -30,6 +30,12 @@ def close(self): assert c.closed +@pytest.mark.parametrize('filename', [4, 4.5]) +async def test_open_with_bad_filename_type(filename): + with pytest.raises(TypeError): + await open_unix_socket(filename) + + async def test_open_bad_socket(): # mktemp is marked as insecure, but that's okay, we don't want the file to # exist diff --git a/trio/tests/test_ssl.py b/trio/tests/test_ssl.py index dbdd882c43..16b323e98b 100644 --- a/trio/tests/test_ssl.py +++ b/trio/tests/test_ssl.py @@ -107,6 +107,22 @@ def ssl_echo_serve_sync(sock, *, expect_fail=False): pass return wrapped.sendall(data) + # This is an obscure workaround for an openssl bug. In server mode, in + # some versions, openssl sends some extra data at the end of do_handshake + # that it shouldn't send. Normally this is harmless, but, if the other + # side shuts down the connection before it reads that data, it might cause + # the OS to report a ECONNREST or even ECONNABORTED (which is just wrong, + # since ECONNABORTED is supposed to mean that connect() failed, but what + # can you do). In this case the other side did nothing wrong, but there's + # no way to recover, so we let it pass, and just cross our fingers its not + # hiding any (other) real bugs. For more details see: + # + # https://github.com/python-trio/trio/issues/1293 + # + # Also, this happens frequently but non-deterministically, so we have to + # 'no cover' it to avoid coverage flapping. + except (ConnectionResetError, ConnectionAbortedError): # pragma: no cover + return except Exception as exc: if expect_fail: print("ssl_echo_serve_sync got error as expected:", exc) @@ -1239,8 +1255,8 @@ async def test_getpeercert(client_ctx): assert server.getpeercert() is None print(client.getpeercert()) assert ( - ("DNS", - "trio-test-1.example.org") in client.getpeercert()["subjectAltName"] + ("DNS", "trio-test-1.example.org") + in client.getpeercert()["subjectAltName"] ) diff --git a/trio/tests/test_subprocess.py b/trio/tests/test_subprocess.py index fe55b85aba..7fe64564c7 100644 --- a/trio/tests/test_subprocess.py +++ b/trio/tests/test_subprocess.py @@ -14,9 +14,9 @@ posix = os.name == "posix" if posix: - from signal import SIGKILL, SIGTERM, SIGINT + from signal import SIGKILL, SIGTERM, SIGUSR1 else: - SIGKILL, SIGTERM, SIGINT = None, None, None + SIGKILL, SIGTERM, SIGUSR1 = None, None, None # Since Windows has very few command-line utilities generally available, @@ -362,8 +362,15 @@ async def test_one_signal(send_it, signum): await test_one_signal(Process.kill, SIGKILL) await test_one_signal(Process.terminate, SIGTERM) + # Test that we can send arbitrary signals. + # + # We used to use SIGINT here, but it turns out that the Python interpreter + # has race conditions that can cause it to explode in weird ways if it + # tries to handle SIGINT during startup. SIGUSR1's default disposition is + # to terminate the target process, and Python doesn't try to do anything + # clever to handle it. if posix: - await test_one_signal(lambda proc: proc.send_signal(SIGINT), SIGINT) + await test_one_signal(lambda proc: proc.send_signal(SIGUSR1), SIGUSR1) @pytest.mark.skipif(not posix, reason="POSIX specific") diff --git a/trio/tests/test_unix_pipes.py b/trio/tests/test_unix_pipes.py index 46f28d5471..9349139e23 100644 --- a/trio/tests/test_unix_pipes.py +++ b/trio/tests/test_unix_pipes.py @@ -1,6 +1,7 @@ import errno import select import os +import tempfile import pytest @@ -98,8 +99,11 @@ async def test_pipe_errors(): with pytest.raises(TypeError): FdStream(None) - with pytest.raises(ValueError): - await FdStream(0).receive_some(0) + r, w = os.pipe() + os.close(w) + async with FdStream(r) as s: + with pytest.raises(ValueError): + await s.receive_some(0) async def test_del():