diff --git a/.asf.yaml b/.asf.yaml index 75b2262de..cb0520c17 100644 --- a/.asf.yaml +++ b/.asf.yaml @@ -31,8 +31,11 @@ github: issues: true protected_branches: main: - required_pull_request_reviews: - required_approving_review_count: 1 + required_status_checks: + # require branches to be up-to-date before merging + strict: true + # don't require any jobs to pass + contexts: [] staging: whoami: asf-staging diff --git a/.cargo/config.toml b/.cargo/config.toml index 91a099a61..af951327f 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,12 +1,5 @@ [target.x86_64-apple-darwin] -rustflags = [ - "-C", "link-arg=-undefined", - "-C", "link-arg=dynamic_lookup", -] +rustflags = ["-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup"] [target.aarch64-apple-darwin] -rustflags = [ - "-C", "link-arg=-undefined", - "-C", "link-arg=dynamic_lookup", -] - +rustflags = ["-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup"] diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 61896e43d..b86b37c6e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -15,19 +15,54 @@ # specific language governing permissions and limitations # under the License. -name: Python Release Build +# Reusable workflow for running building +# This ensures the same tests run for both debug (PRs) and release (main/tags) builds + +name: Build + on: - pull_request: - branches: ["main"] - push: - tags: ["*-rc*"] - branches: ["branch-*"] + workflow_call: + inputs: + build_mode: + description: 'Build mode: debug or release' + required: true + type: string + run_wheels: + description: 'Whether to build distribution wheels' + required: false + type: boolean + default: false + +env: + CARGO_TERM_COLOR: always + RUST_BACKTRACE: 1 jobs: - build: + # ============================================ + # Linting Jobs + # ============================================ + lint-rust: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 + + - name: Setup Rust + uses: dtolnay/rust-toolchain@stable + with: + toolchain: "nightly" + components: rustfmt + + - name: Cache Cargo + uses: Swatinem/rust-cache@v2 + + - name: Check formatting + run: cargo +nightly fmt --all -- --check + + lint-python: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - name: Install Python uses: actions/setup-python@v5 with: @@ -35,37 +70,184 @@ jobs: - uses: astral-sh/setup-uv@v6 with: - enable-cache: true + enable-cache: true - # Use the --no-install-package to only install the dependencies - # but do not yet build the rust library - name: Install dependencies run: uv sync --dev --no-install-package datafusion - # Update output format to enable automatic inline annotations. - name: Run Ruff run: | uv run --no-project ruff check --output-format=github python/ uv run --no-project ruff format --check python/ + - name: Run codespell + run: | + uv run --no-project codespell --toml pyproject.toml + + lint-toml: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + + - name: Install taplo + uses: taiki-e/install-action@v2 + with: + tool: taplo-cli + + # if you encounter an error, try running 'taplo format' to fix the formatting automatically. + - name: Check Cargo.toml formatting + run: taplo format --check + generate-license: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 + - uses: astral-sh/setup-uv@v6 with: - enable-cache: true + enable-cache: true + + - name: Install cargo-license + uses: taiki-e/install-action@v2 + with: + tool: cargo-license - name: Generate license file run: uv run --no-project python ./dev/create_license.py - - uses: actions/upload-artifact@v4 + + - uses: actions/upload-artifact@v6 with: name: python-wheel-license path: LICENSE.txt + # ============================================ + # Build - Linux x86_64 + # ============================================ + build-manylinux-x86_64: + needs: [generate-license, lint-rust, lint-python] + name: ManyLinux x86_64 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + + - run: rm LICENSE.txt + - name: Download LICENSE.txt + uses: actions/download-artifact@v7 + with: + name: python-wheel-license + path: . + + - name: Setup Rust + uses: dtolnay/rust-toolchain@stable + + - name: Cache Cargo + uses: Swatinem/rust-cache@v2 + with: + key: ${{ inputs.build_mode }} + + - uses: astral-sh/setup-uv@v6 + with: + enable-cache: true + + - name: Build (release mode) + uses: PyO3/maturin-action@v1 + if: inputs.build_mode == 'release' + with: + target: x86_64-unknown-linux-gnu + manylinux: "2_28" + args: --release --strip --features protoc,substrait --out dist + rustup-components: rust-std + + - name: Build (debug mode) + uses: PyO3/maturin-action@v1 + if: inputs.build_mode == 'debug' + with: + target: x86_64-unknown-linux-gnu + manylinux: "2_28" + args: --features protoc,substrait --out dist + rustup-components: rust-std + + - name: Build FFI test library + uses: PyO3/maturin-action@v1 + with: + target: x86_64-unknown-linux-gnu + manylinux: "2_28" + working-directory: examples/datafusion-ffi-example + args: --out dist + rustup-components: rust-std + + - name: Archive wheels + uses: actions/upload-artifact@v6 + with: + name: dist-manylinux-x86_64 + path: dist/* + + - name: Archive FFI test wheel + uses: actions/upload-artifact@v6 + with: + name: test-ffi-manylinux-x86_64 + path: examples/datafusion-ffi-example/dist/* + + # ============================================ + # Build - Linux ARM64 + # ============================================ + build-manylinux-aarch64: + needs: [generate-license, lint-rust, lint-python] + name: ManyLinux arm64 + runs-on: ubuntu-24.04-arm + steps: + - uses: actions/checkout@v6 + + - run: rm LICENSE.txt + - name: Download LICENSE.txt + uses: actions/download-artifact@v7 + with: + name: python-wheel-license + path: . + + - name: Setup Rust + uses: dtolnay/rust-toolchain@stable + + - name: Cache Cargo + uses: Swatinem/rust-cache@v2 + with: + key: ${{ inputs.build_mode }} + + - uses: astral-sh/setup-uv@v6 + with: + enable-cache: true + + - name: Build (release mode) + uses: PyO3/maturin-action@v1 + if: inputs.build_mode == 'release' + with: + target: aarch64-unknown-linux-gnu + manylinux: "2_28" + args: --release --strip --features protoc,substrait --out dist + rustup-components: rust-std + + - name: Build (debug mode) + uses: PyO3/maturin-action@v1 + if: inputs.build_mode == 'debug' + with: + target: aarch64-unknown-linux-gnu + manylinux: "2_28" + args: --features protoc,substrait --out dist + rustup-components: rust-std + + - name: Archive wheels + uses: actions/upload-artifact@v6 + if: inputs.build_mode == 'release' + with: + name: dist-manylinux-aarch64 + path: dist/* + + # ============================================ + # Build - macOS arm64 / Windows + # ============================================ build-python-mac-win: - needs: [generate-license] - name: Mac/Win + needs: [generate-license, lint-rust, lint-python] + name: macOS arm64 & Windows runs-on: ${{ matrix.os }} strategy: fail-fast: false @@ -73,35 +255,49 @@ jobs: python-version: ["3.10"] os: [macos-latest, windows-latest] steps: - - uses: actions/checkout@v4 - - - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} + - uses: actions/checkout@v6 - uses: dtolnay/rust-toolchain@stable - run: rm LICENSE.txt - name: Download LICENSE.txt - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v7 with: name: python-wheel-license path: . + - name: Cache Cargo + uses: Swatinem/rust-cache@v2 + with: + key: ${{ inputs.build_mode }} + + - uses: astral-sh/setup-uv@v7 + with: + enable-cache: true + - name: Install Protoc uses: arduino/setup-protoc@v3 with: version: "27.4" repo-token: ${{ secrets.GITHUB_TOKEN }} - - uses: astral-sh/setup-uv@v6 - with: - enable-cache: true + - name: Install dependencies + run: uv sync --dev --no-install-package datafusion - - name: Build Python package - run: | - uv sync --dev --no-install-package datafusion - uv run --no-project maturin build --release --strip --features substrait + # Run clippy BEFORE maturin so we can avoid rebuilding. The features must match + # exactly the features used by maturin. Linux maturin builds need to happen in a + # container so only run this for our mac runner. + - name: Run Clippy + if: matrix.os != 'windows-latest' + run: cargo clippy --no-deps --all-targets --features substrait -- -D warnings + + - name: Build Python package (release mode) + if: inputs.build_mode == 'release' + run: uv run --no-project maturin build --release --strip --features substrait + + - name: Build Python package (debug mode) + if: inputs.build_mode != 'release' + run: uv run --no-project maturin build --features substrait - name: List Windows wheels if: matrix.os == 'windows-latest' @@ -115,127 +311,80 @@ jobs: run: find target/wheels/ - name: Archive wheels - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 + if: inputs.build_mode == 'release' with: name: dist-${{ matrix.os }} path: target/wheels/* + # ============================================ + # Build - macOS x86_64 (release only) + # ============================================ build-macos-x86_64: - needs: [generate-license] - name: Mac x86_64 - runs-on: macos-13 + if: inputs.build_mode == 'release' + needs: [generate-license, lint-rust, lint-python] + runs-on: macos-15-intel strategy: fail-fast: false matrix: python-version: ["3.10"] steps: - - uses: actions/checkout@v4 - - - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} + - uses: actions/checkout@v6 - uses: dtolnay/rust-toolchain@stable - run: rm LICENSE.txt - name: Download LICENSE.txt - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v7 with: name: python-wheel-license path: . + - name: Cache Cargo + uses: Swatinem/rust-cache@v2 + with: + key: ${{ inputs.build_mode }} + + - uses: astral-sh/setup-uv@v7 + with: + enable-cache: true + - name: Install Protoc uses: arduino/setup-protoc@v3 with: version: "27.4" repo-token: ${{ secrets.GITHUB_TOKEN }} - - uses: astral-sh/setup-uv@v6 - with: - enable-cache: true + - name: Install dependencies + run: uv sync --dev --no-install-package datafusion - - name: Build Python package + - name: Build (release mode) run: | - uv sync --dev --no-install-package datafusion uv run --no-project maturin build --release --strip --features substrait - name: List Mac wheels run: find target/wheels/ - name: Archive wheels - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: dist-macos-aarch64 path: target/wheels/* - build-manylinux-x86_64: - needs: [generate-license] - name: Manylinux x86_64 - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - run: rm LICENSE.txt - - name: Download LICENSE.txt - uses: actions/download-artifact@v4 - with: - name: python-wheel-license - path: . - - run: cat LICENSE.txt - - name: Build wheels - uses: PyO3/maturin-action@v1 - env: - RUST_BACKTRACE: 1 - with: - rust-toolchain: nightly - target: x86_64 - manylinux: auto - rustup-components: rust-std rustfmt # Keep them in one line due to https://github.com/PyO3/maturin-action/issues/153 - args: --release --manylinux 2014 --features protoc,substrait - - name: Archive wheels - uses: actions/upload-artifact@v4 - with: - name: dist-manylinux-x86_64 - path: target/wheels/* - - build-manylinux-aarch64: - needs: [generate-license] - name: Manylinux arm64 - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - run: rm LICENSE.txt - - name: Download LICENSE.txt - uses: actions/download-artifact@v4 - with: - name: python-wheel-license - path: . - - run: cat LICENSE.txt - - name: Build wheels - uses: PyO3/maturin-action@v1 - env: - RUST_BACKTRACE: 1 - with: - rust-toolchain: nightly - target: aarch64 - # Use manylinux_2_28-cross because the manylinux2014-cross has GCC 4.8.5, which causes the build to fail - manylinux: 2_28 - rustup-components: rust-std rustfmt # Keep them in one line due to https://github.com/PyO3/maturin-action/issues/153 - args: --release --features protoc,substrait - - name: Archive wheels - uses: actions/upload-artifact@v4 - with: - name: dist-manylinux-aarch64 - path: target/wheels/* + # ============================================ + # Build - Source Distribution + # ============================================ build-sdist: needs: [generate-license] name: Source distribution + if: inputs.build_mode == 'release' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - run: rm LICENSE.txt - name: Download LICENSE.txt - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v7 with: name: python-wheel-license path: . @@ -249,16 +398,22 @@ jobs: args: --release --sdist --out dist --features protoc,substrait - name: Assert sdist build does not generate wheels run: | - if [ "$(ls -A target/wheels)" ]; then - echo "Error: Sdist build generated wheels" - exit 1 - else - echo "Directory is clean" - fi + if [ "$(ls -A target/wheels)" ]; then + echo "Error: Sdist build generated wheels" + exit 1 + else + echo "Directory is clean" + fi shell: bash - + + # ============================================ + # Build - Source Distribution + # ============================================ + merge-build-artifacts: runs-on: ubuntu-latest + name: Merge build artifacts + if: inputs.build_mode == 'release' needs: - build-python-mac-win - build-macos-x86_64 @@ -267,20 +422,104 @@ jobs: - build-sdist steps: - name: Merge Build Artifacts - uses: actions/upload-artifact/merge@v4 + uses: actions/upload-artifact/merge@v6 with: name: dist pattern: dist-* - - # NOTE: PyPI publish needs to be done manually for now after release passed the vote - # release: - # name: Publish in PyPI - # needs: [build-manylinux, build-python-mac-win] - # runs-on: ubuntu-latest - # steps: - # - uses: actions/download-artifact@v4 - # - name: Publish to PyPI - # uses: pypa/gh-action-pypi-publish@master - # with: - # user: __token__ - # password: ${{ secrets.pypi_password }} + + # ============================================ + # Build - Documentation + # ============================================ + # Documentation build job that runs after wheels are built + build-docs: + name: Build docs + runs-on: ubuntu-latest + needs: [build-manylinux-x86_64] # Only need the Linux wheel for docs + # Only run docs on main branch pushes, tags, or PRs + if: github.event_name == 'push' || github.event_name == 'pull_request' + steps: + - name: Set target branch + if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref_type == 'tag') + id: target-branch + run: | + set -x + if test '${{ github.ref }}' = 'refs/heads/main'; then + echo "value=asf-staging" >> "$GITHUB_OUTPUT" + elif test '${{ github.ref_type }}' = 'tag'; then + echo "value=asf-site" >> "$GITHUB_OUTPUT" + else + echo "Unsupported input: ${{ github.ref }} / ${{ github.ref_type }}" + exit 1 + fi + + - name: Checkout docs sources + uses: actions/checkout@v6 + + - name: Checkout docs target branch + if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref_type == 'tag') + uses: actions/checkout@v6 + with: + fetch-depth: 0 + ref: ${{ steps.target-branch.outputs.value }} + path: docs-target + + - name: Setup Python + uses: actions/setup-python@v6 + with: + python-version: "3.10" + + - name: Install dependencies + uses: astral-sh/setup-uv@v7 + with: + enable-cache: true + + # Download the Linux wheel built in the previous job + - name: Download pre-built Linux wheel + uses: actions/download-artifact@v7 + with: + name: dist-manylinux-x86_64 + path: wheels/ + + # Install from the pre-built wheels + - name: Install from pre-built wheels + run: | + set -x + uv venv + # Install documentation dependencies + uv sync --dev --no-install-package datafusion --group docs + # Install all pre-built wheels + WHEELS=$(find wheels/ -name "*.whl") + if [ -n "$WHEELS" ]; then + echo "Installing wheels:" + echo "$WHEELS" + uv pip install wheels/*.whl + else + echo "ERROR: No wheels found!" + exit 1 + fi + + - name: Build docs + run: | + set -x + cd docs + curl -O https://gist.githubusercontent.com/ritchie46/cac6b337ea52281aa23c049250a4ff03/raw/89a957ff3919d90e6ef2d34235e6bf22304f3366/pokemon.csv + curl -O https://d37ci6vzurychx.cloudfront.net/trip-data/yellow_tripdata_2021-01.parquet + uv run --no-project make html + + - name: Copy & push the generated HTML + if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref_type == 'tag') + run: | + set -x + cd docs-target + # delete anything but: 1) '.'; 2) '..'; 3) .git/ + find ./ | grep -vE "^./$|^../$|^./.git" | xargs rm -rf + cp ../.asf.yaml . + cp -r ../docs/build/html/* . + git status --porcelain + if [ "$(git status --porcelain)" != "" ]; then + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + git add --all + git commit -m 'Publish built docs triggered by ${{ github.sha }}' + git push || git push --force + fi diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000..ab284b522 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# CI workflow for pull requests - runs tests in DEBUG mode for faster feedback + +name: CI + +on: + pull_request: + branches: ["main"] + +concurrency: + group: ${{ github.repository }}-${{ github.head_ref || github.sha }}-${{ github.workflow }} + cancel-in-progress: true + +jobs: + build: + uses: ./.github/workflows/build.yml + with: + build_mode: debug + run_wheels: false + secrets: inherit + + test: + needs: build + uses: ./.github/workflows/test.yml + secrets: inherit diff --git a/.github/workflows/dev.yml b/.github/workflows/dev.yml index 44481818e..2c8ecbc5e 100644 --- a/.github/workflows/dev.yml +++ b/.github/workflows/dev.yml @@ -25,10 +25,10 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Setup Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: - python-version: "3.10" + python-version: "3.14" - name: Audit licenses run: ./dev/release/run-rat.sh . diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml deleted file mode 100644 index 9341488a0..000000000 --- a/.github/workflows/docs.yaml +++ /dev/null @@ -1,95 +0,0 @@ -on: - push: - branches: - - main - tags-ignore: - - "**-rc**" - pull_request: - branches: - - main - -name: Deploy DataFusion Python site - -jobs: - debug-github-context: - name: Print github context - runs-on: ubuntu-latest - steps: - - name: Dump GitHub context - env: - GITHUB_CONTEXT: ${{ toJson(github) }} - run: | - echo "$GITHUB_CONTEXT" - build-docs: - name: Build docs - runs-on: ubuntu-latest - steps: - - name: Set target branch - if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref_type == 'tag') - id: target-branch - run: | - set -x - if test '${{ github.ref }}' = 'refs/heads/main'; then - echo "value=asf-staging" >> "$GITHUB_OUTPUT" - elif test '${{ github.ref_type }}' = 'tag'; then - echo "value=asf-site" >> "$GITHUB_OUTPUT" - else - echo "Unsupported input: ${{ github.ref }} / ${{ github.ref_type }}" - exit 1 - fi - - name: Checkout docs sources - uses: actions/checkout@v4 - - name: Checkout docs target branch - if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref_type == 'tag') - uses: actions/checkout@v4 - with: - fetch-depth: 0 - ref: ${{ steps.target-branch.outputs.value }} - path: docs-target - - name: Setup Python - uses: actions/setup-python@v5 - with: - python-version: "3.11" - - - name: Install Protoc - uses: arduino/setup-protoc@v3 - with: - version: '27.4' - repo-token: ${{ secrets.GITHUB_TOKEN }} - - - name: Install dependencies and build - uses: astral-sh/setup-uv@v6 - with: - enable-cache: true - - - name: Build repo - run: | - uv venv - uv sync --dev --no-install-package datafusion --group docs - uv run --no-project maturin develop --uv - - - name: Build docs - run: | - set -x - cd docs - curl -O https://gist.githubusercontent.com/ritchie46/cac6b337ea52281aa23c049250a4ff03/raw/89a957ff3919d90e6ef2d34235e6bf22304f3366/pokemon.csv - curl -O https://d37ci6vzurychx.cloudfront.net/trip-data/yellow_tripdata_2021-01.parquet - uv run --no-project make html - - - name: Copy & push the generated HTML - if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref_type == 'tag') - run: | - set -x - cd docs-target - # delete anything but: 1) '.'; 2) '..'; 3) .git/ - find ./ | grep -vE "^./$|^../$|^./.git" | xargs rm -rf - cp ../.asf.yaml . - cp -r ../docs/build/html/* . - git status --porcelain - if [ "$(git status --porcelain)" != "" ]; then - git config user.name "github-actions[bot]" - git config user.email "github-actions[bot]@users.noreply.github.com" - git add --all - git commit -m 'Publish built docs triggered by ${{ github.sha }}' - git push || git push --force - fi diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 000000000..bddc89eac --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,49 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Release workflow - runs tests in RELEASE mode and builds distribution wheels +# Triggered on: +# - Merges to main +# - Release candidate tags (*-rc*) +# - Release tags (e.g., 45.0.0) + +name: Release Build + +on: + push: + branches: + - "main" + tags: + - "*-rc*" # Release candidates (e.g., 45.0.0-rc1) + - "[0-9]+.*" # Release tags (e.g., 45.0.0) + +concurrency: + group: ${{ github.repository }}-${{ github.head_ref || github.sha }}-${{ github.workflow }} + cancel-in-progress: true + +jobs: + build: + uses: ./.github/workflows/build.yml + with: + build_mode: release + run_wheels: true + secrets: inherit + + test: + needs: build + uses: ./.github/workflows/test.yml + secrets: inherit diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yml similarity index 53% rename from .github/workflows/test.yaml rename to .github/workflows/test.yml index 4ae081406..55248b6bf 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yml @@ -15,16 +15,13 @@ # specific language governing permissions and limitations # under the License. -name: Python test -on: - push: - branches: [main] - pull_request: - branches: [main] +# Reusable workflow for running tests +# This ensures the same tests run for both debug (PRs) and release (main/tags) builds + +name: Test -concurrency: - group: ${{ github.repository }}-${{ github.head_ref || github.sha }}-${{ github.workflow }} - cancel-in-progress: true +on: + workflow_call: jobs: test-matrix: @@ -33,78 +30,92 @@ jobs: fail-fast: false matrix: python-version: - - "3.9" - "3.10" - "3.11" - "3.12" - "3.13" + - "3.14" toolchain: - "stable" steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - - name: Setup Rust Toolchain - uses: dtolnay/rust-toolchain@stable - id: rust-toolchain - with: - components: clippy,rustfmt + - name: Verify example datafusion version + run: | + MAIN_VERSION=$(grep -A 1 "name = \"datafusion-common\"" Cargo.lock | grep "version = " | head -1 | sed 's/.*version = "\(.*\)"/\1/') + EXAMPLE_VERSION=$(grep -A 1 "name = \"datafusion-common\"" examples/datafusion-ffi-example/Cargo.lock | grep "version = " | head -1 | sed 's/.*version = "\(.*\)"/\1/') + echo "Main crate datafusion version: $MAIN_VERSION" + echo "FFI example datafusion version: $EXAMPLE_VERSION" - - name: Install Protoc - uses: arduino/setup-protoc@v3 - with: - version: '27.4' - repo-token: ${{ secrets.GITHUB_TOKEN }} + if [ "$MAIN_VERSION" != "$EXAMPLE_VERSION" ]; then + echo "❌ Error: FFI example datafusion versions don't match!" + exit 1 + fi - name: Setup Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: ${{ matrix.python-version }} - name: Cache Cargo - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: ~/.cargo key: cargo-cache-${{ steps.rust-toolchain.outputs.cachekey }}-${{ hashFiles('Cargo.lock') }} - - name: Check Formatting - if: ${{ matrix.python-version == '3.10' && matrix.toolchain == 'stable' }} - run: cargo fmt -- --check + - name: Install dependencies + uses: astral-sh/setup-uv@v7 + with: + enable-cache: true - - name: Run Clippy - if: ${{ matrix.python-version == '3.10' && matrix.toolchain == 'stable' }} - run: cargo clippy --all-targets --all-features -- -D clippy::all -D warnings -A clippy::redundant_closure + # Download the Linux wheel built in the build workflow + - name: Download pre-built Linux wheel + uses: actions/download-artifact@v7 + with: + name: dist-manylinux-x86_64 + path: wheels/ - - name: Install dependencies and build - uses: astral-sh/setup-uv@v6 + # Download the FFI test wheel + - name: Download pre-built FFI test wheel + uses: actions/download-artifact@v7 with: - enable-cache: true + name: test-ffi-manylinux-x86_64 + path: wheels/ - - name: Check documentation - if: ${{ matrix.python-version == '3.10' && matrix.toolchain == 'stable' }} + # Install from the pre-built wheels + - name: Install from pre-built wheels run: | - uv sync --dev --group docs --no-install-package datafusion - uv run --no-project maturin develop --uv - uv run --no-project docs/build.sh + set -x + uv venv + # Install development dependencies + uv sync --dev --no-install-package datafusion + # Install all pre-built wheels + WHEELS=$(find wheels/ -name "*.whl") + if [ -n "$WHEELS" ]; then + echo "Installing wheels:" + echo "$WHEELS" + uv pip install wheels/*.whl + else + echo "ERROR: No wheels found!" + exit 1 + fi - name: Run tests env: RUST_BACKTRACE: 1 run: | git submodule update --init - uv sync --dev --no-install-package datafusion - uv run --no-project maturin develop --uv - uv run --no-project pytest -v . + uv run --no-project pytest -v . --import-mode=importlib - name: FFI unit tests run: | cd examples/datafusion-ffi-example - uv run --no-project maturin develop --uv uv run --no-project pytest python/tests/_test*.py - name: Cache the generated dataset id: cache-tpch-dataset - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: benchmarks/tpch/data key: tpch-data-2.18.0 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index abcfcf321..bcefa405d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -33,7 +33,7 @@ repos: - id: rust-fmt name: Rust fmt description: Run cargo fmt on files included in the commit. rustfmt should be installed before-hand. - entry: cargo fmt --all -- + entry: cargo +nightly fmt --all -- pass_filenames: true types: [file, rust] language: system @@ -45,5 +45,13 @@ repos: types: [file, rust] language: system + - repo: https://github.com/codespell-project/codespell + rev: v2.4.1 + hooks: + - id: codespell + args: [ --toml, "pyproject.toml"] + additional_dependencies: + - tomli + default_language_version: python: python3 diff --git a/Cargo.lock b/Cargo.lock index 112167cb4..cd853a03f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "abi_stable" @@ -50,26 +50,11 @@ dependencies = [ "core_extensions", ] -[[package]] -name = "addr2line" -version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" -dependencies = [ - "gimli", -] - [[package]] name = "adler2" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" - -[[package]] -name = "adler32" -version = "1.2.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "ahash" @@ -79,7 +64,7 @@ checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" dependencies = [ "cfg-if", "const-random", - "getrandom 0.3.3", + "getrandom 0.3.4", "once_cell", "version_check", "zerocopy", @@ -87,9 +72,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" dependencies = [ "memchr", ] @@ -115,12 +100,6 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - [[package]] name = "android_system_properties" version = "0.1.5" @@ -132,25 +111,27 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.98" +version = "1.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" +checksum = "5f0e0fee31ef5ed1ba1316088939cea399010ed7731dba877ed44aeb407a75ea" [[package]] name = "apache-avro" -version = "0.17.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aef82843a0ec9f8b19567445ad2421ceeb1d711514384bdd3d49fe37102ee13" +checksum = "36fa98bc79671c7981272d91a8753a928ff6a1cd8e4f20a44c45bd5d313840bf" dependencies = [ "bigdecimal", - "bzip2 0.4.4", + "bon", + "bzip2", "crc32fast", "digest", - "libflate", + "liblzma", "log", + "miniz_oxide", "num-bigint", "quad-rand", - "rand 0.8.5", + "rand", "regex-lite", "serde", "serde_bytes", @@ -158,13 +139,29 @@ dependencies = [ "snap", "strum", "strum_macros", - "thiserror 1.0.69", - "typed-builder", + "thiserror", "uuid", - "xz2", "zstd", ] +[[package]] +name = "ar_archive_writer" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7eb93bbb63b9c227414f6eb3a0adfddca591a8ce1e9b60661bb08969b87e340b" +dependencies = [ + "object", +] + +[[package]] +name = "arc-swap" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9f3647c145568cec02c42054e07bdf9a5a698e15b466fb2341bfc393cd24aa5" +dependencies = [ + "rustversion", +] + [[package]] name = "arrayref" version = "0.3.9" @@ -179,9 +176,9 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "arrow" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1bb018b6960c87fd9d025009820406f74e83281185a8bdcb44880d2aa5c9a87" +checksum = "e4754a624e5ae42081f464514be454b39711daae0458906dacde5f4c632f33a8" dependencies = [ "arrow-arith", "arrow-array", @@ -192,32 +189,32 @@ dependencies = [ "arrow-ipc", "arrow-json", "arrow-ord", + "arrow-pyarrow", "arrow-row", "arrow-schema", "arrow-select", "arrow-string", - "pyo3", ] [[package]] name = "arrow-arith" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44de76b51473aa888ecd6ad93ceb262fb8d40d1f1154a4df2f069b3590aa7575" +checksum = "f7b3141e0ec5145a22d8694ea8b6d6f69305971c4fa1c1a13ef0195aef2d678b" dependencies = [ "arrow-array", "arrow-buffer", "arrow-data", "arrow-schema", "chrono", - "num", + "num-traits", ] [[package]] name = "arrow-array" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29ed77e22744475a9a53d00026cf8e166fe73cf42d89c4c4ae63607ee1cfcc3f" +checksum = "4c8955af33b25f3b175ee10af580577280b4bd01f7e823d94c7cdef7cf8c9aef" dependencies = [ "ahash", "arrow-buffer", @@ -226,47 +223,51 @@ dependencies = [ "chrono", "chrono-tz", "half", - "hashbrown 0.15.3", - "num", + "hashbrown 0.16.1", + "num-complex", + "num-integer", + "num-traits", ] [[package]] name = "arrow-buffer" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0391c96eb58bf7389171d1e103112d3fc3e5625ca6b372d606f2688f1ea4cce" +checksum = "c697ddca96183182f35b3a18e50b9110b11e916d7b7799cbfd4d34662f2c56c2" dependencies = [ "bytes", "half", - "num", + "num-bigint", + "num-traits", ] [[package]] name = "arrow-cast" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f39e1d774ece9292697fcbe06b5584401b26bd34be1bec25c33edae65c2420ff" +checksum = "646bbb821e86fd57189c10b4fcdaa941deaf4181924917b0daa92735baa6ada5" dependencies = [ "arrow-array", "arrow-buffer", "arrow-data", + "arrow-ord", "arrow-schema", "arrow-select", "atoi", - "base64 0.22.1", + "base64", "chrono", "comfy-table", "half", "lexical-core", - "num", + "num-traits", "ryu", ] [[package]] name = "arrow-csv" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9055c972a07bf12c2a827debfd34f88d3b93da1941d36e1d9fee85eebe38a12a" +checksum = "8da746f4180004e3ce7b83c977daf6394d768332349d3d913998b10a120b790a" dependencies = [ "arrow-array", "arrow-cast", @@ -274,41 +275,43 @@ dependencies = [ "chrono", "csv", "csv-core", - "lazy_static", "regex", ] [[package]] name = "arrow-data" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf75ac27a08c7f48b88e5c923f267e980f27070147ab74615ad85b5c5f90473d" +checksum = "1fdd994a9d28e6365aa78e15da3f3950c0fdcea6b963a12fa1c391afb637b304" dependencies = [ "arrow-buffer", "arrow-schema", "half", - "num", + "num-integer", + "num-traits", ] [[package]] name = "arrow-ipc" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a222f0d93772bd058d1268f4c28ea421a603d66f7979479048c429292fac7b2e" +checksum = "abf7df950701ab528bf7c0cf7eeadc0445d03ef5d6ffc151eaae6b38a58feff1" dependencies = [ "arrow-array", "arrow-buffer", "arrow-data", "arrow-schema", + "arrow-select", "flatbuffers", "lz4_flex", + "zstd", ] [[package]] name = "arrow-json" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9085342bbca0f75e8cb70513c0807cc7351f1fbf5cb98192a67d5e3044acb033" +checksum = "0ff8357658bedc49792b13e2e862b80df908171275f8e6e075c460da5ee4bf86" dependencies = [ "arrow-array", "arrow-buffer", @@ -318,19 +321,21 @@ dependencies = [ "chrono", "half", "indexmap", + "itoa", "lexical-core", "memchr", - "num", - "serde", + "num-traits", + "ryu", + "serde_core", "serde_json", "simdutf8", ] [[package]] name = "arrow-ord" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab2f1065a5cad7b9efa9e22ce5747ce826aa3855766755d4904535123ef431e7" +checksum = "f7d8f1870e03d4cbed632959498bcc84083b5a24bded52905ae1695bd29da45b" dependencies = [ "arrow-array", "arrow-buffer", @@ -339,11 +344,23 @@ dependencies = [ "arrow-select", ] +[[package]] +name = "arrow-pyarrow" +version = "57.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d18c442b4c266aaf3d7f7dd40fd7ae058cef7f113b00ff0cd8256e1e218ec544" +dependencies = [ + "arrow-array", + "arrow-data", + "arrow-schema", + "pyo3", +] + [[package]] name = "arrow-row" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3703a0e3e92d23c3f756df73d2dc9476873f873a76ae63ef9d3de17fda83b2d8" +checksum = "18228633bad92bff92a95746bbeb16e5fc318e8382b75619dec26db79e4de4c0" dependencies = [ "arrow-array", "arrow-buffer", @@ -354,34 +371,34 @@ dependencies = [ [[package]] name = "arrow-schema" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73a47aa0c771b5381de2b7f16998d351a6f4eb839f1e13d48353e17e873d969b" +checksum = "8c872d36b7bf2a6a6a2b40de9156265f0242910791db366a2c17476ba8330d68" dependencies = [ "bitflags", - "serde", + "serde_core", "serde_json", ] [[package]] name = "arrow-select" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24b7b85575702b23b85272b01bc1c25a01c9b9852305e5d0078c79ba25d995d4" +checksum = "68bf3e3efbd1278f770d67e5dc410257300b161b93baedb3aae836144edcaf4b" dependencies = [ "ahash", "arrow-array", "arrow-buffer", "arrow-data", "arrow-schema", - "num", + "num-traits", ] [[package]] name = "arrow-string" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9260fddf1cdf2799ace2b4c2fc0356a9789fa7551e0953e35435536fecefebbd" +checksum = "85e968097061b3c0e9fe3079cf2e703e487890700546b5b0647f60fca1b5a8d8" dependencies = [ "arrow-array", "arrow-buffer", @@ -389,7 +406,7 @@ dependencies = [ "arrow-schema", "arrow-select", "memchr", - "num", + "num-traits", "regex", "regex-syntax", ] @@ -408,19 +425,14 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.19" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06575e6a9673580f52661c92107baabffbf41e2141373441cbcdc47cb733003c" +checksum = "7d67d43201f4d20c78bcda740c142ca52482d81da80681533d33bf3f0596c8e2" dependencies = [ - "bzip2 0.5.2", - "flate2", - "futures-core", - "memchr", + "compression-codecs", + "compression-core", "pin-project-lite", "tokio", - "xz2", - "zstd", - "zstd-safe", ] [[package]] @@ -440,18 +452,18 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "async-trait" -version = "0.1.88" +version = "0.1.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] @@ -471,30 +483,9 @@ checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "autocfg" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" - -[[package]] -name = "backtrace" -version = "0.3.75" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" -dependencies = [ - "addr2line", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", - "windows-targets 0.52.6", -] - -[[package]] -name = "base64" -version = "0.21.7" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "base64" @@ -504,9 +495,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "bigdecimal" -version = "0.4.8" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a22f228ab7a1b23027ccc6c350b72868017af7ea8356fbdf19f8d991c690013" +checksum = "4d6867f1565b3aad85681f1015055b087fcfd840d6aeee6eee7f2da317603695" dependencies = [ "autocfg", "libm", @@ -518,9 +509,9 @@ dependencies = [ [[package]] name = "bitflags" -version = "2.9.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" +checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af" [[package]] name = "blake2" @@ -533,15 +524,16 @@ dependencies = [ [[package]] name = "blake3" -version = "1.8.2" +version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3888aaa89e4b2a40fca9848e400f6a658a5a3978de7be858e209cafa8be9a4a0" +checksum = "2468ef7d57b3fb7e16b576e8377cdbde2320c60e1491e961d11da40fc4f02a2d" dependencies = [ "arrayref", "arrayvec", "cc", "cfg-if", "constant_time_eq", + "cpufeatures", ] [[package]] @@ -553,11 +545,36 @@ dependencies = [ "generic-array", ] +[[package]] +name = "bon" +version = "3.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d13a61f2963b88eef9c1be03df65d42f6996dfeac1054870d950fcf66686f83" +dependencies = [ + "bon-macros", + "rustversion", +] + +[[package]] +name = "bon-macros" +version = "3.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d314cc62af2b6b0c65780555abb4d02a03dd3b799cd42419044f0c38d99738c0" +dependencies = [ + "darling", + "ident_case", + "prettyplease", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.116", +] + [[package]] name = "brotli" -version = "8.0.1" +version = "8.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9991eea70ea4f293524138648e41ee89b0b2b12ddef3b255effa43c8056e0e0d" +checksum = "4bd8b9603c7aa97359dbd97ecf258968c95f3adddd6db2f7e7a5bef101c84560" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -576,9 +593,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.17.0" +version = "3.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" +checksum = "c81d250916401487680ed13b8b675660281dcfc3ab0121fe44c94bcab9eae2fb" [[package]] name = "byteorder" @@ -588,45 +605,26 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" - -[[package]] -name = "bzip2" -version = "0.4.4" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdb116a6ef3f6c3698828873ad02c3014b3c85cadb88496095628e3ef1e347f8" -dependencies = [ - "bzip2-sys", - "libc", -] +checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" [[package]] name = "bzip2" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49ecfb22d906f800d4fe833b6282cf4dc1c298f5057ca0b5445e5c209735ca47" -dependencies = [ - "bzip2-sys", -] - -[[package]] -name = "bzip2-sys" -version = "0.1.13+1.0.8" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225bff33b2141874fe80d71e07d6eec4f85c5c216453dd96388240f96e1acc14" +checksum = "f3a53fac24f34a81bc9954b5d6cfce0c21e18ec6959f44f56e8e90e4bb7c346c" dependencies = [ - "cc", - "pkg-config", + "libbz2-rs-sys", ] [[package]] name = "cc" -version = "1.2.23" +version = "1.2.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f4ac86a9e5bc1e2b3449ab9d7d3a6a405e3d1bb28d7b9be8614f55846ae3766" +checksum = "aebf35691d1bfb0ac386a69bac2fde4dd276fb618cf8bf4f5318fe285e821bb2" dependencies = [ + "find-msvc-tools", "jobserver", "libc", "shlex", @@ -634,9 +632,9 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" [[package]] name = "cfg_aliases" @@ -646,11 +644,10 @@ checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chrono" -version = "0.4.41" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" +checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" dependencies = [ - "android-tzdata", "iana-time-zone", "num-traits", "serde", @@ -659,44 +656,54 @@ dependencies = [ [[package]] name = "chrono-tz" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efdce149c370f133a071ca8ef6ea340b7b88748ab0810097a9e2976eaa34b4f3" +checksum = "a6139a8597ed92cf816dfb33f5dd6cf0bb93a6adc938f11039f371bc5bcd26c3" dependencies = [ "chrono", - "chrono-tz-build", "phf", ] -[[package]] -name = "chrono-tz-build" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f10f8c9340e31fc120ff885fcdb54a0b48e474bbd77cab557f0c30a3e569402" -dependencies = [ - "parse-zoneinfo", - "phf_codegen", -] - [[package]] name = "cmake" -version = "0.1.54" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" +checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" dependencies = [ "cc", ] [[package]] name = "comfy-table" -version = "7.1.4" +version = "7.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a65ebfec4fb190b6f90e944a817d60499ee0744e582530e2c9900a22e591d9a" +checksum = "958c5d6ecf1f214b4c2bbbbf6ab9523a864bd136dcf71a7e8904799acfe1ad47" dependencies = [ "unicode-segmentation", "unicode-width", ] +[[package]] +name = "compression-codecs" +version = "0.4.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb7b51a7d9c967fc26773061ba86150f19c50c0d65c887cb1fbe295fd16619b7" +dependencies = [ + "bzip2", + "compression-core", + "flate2", + "liblzma", + "memchr", + "zstd", + "zstd-safe", +] + +[[package]] +name = "compression-core" +version = "0.4.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75984efb6ed102a0d42db99afb6c1948f0380d1d91808d5529916e6c08b49d8d" + [[package]] name = "const-random" version = "0.1.18" @@ -712,28 +719,31 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.17", "once_cell", "tiny-keccak", ] [[package]] name = "const_panic" -version = "0.2.12" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2459fc9262a1aa204eb4b5764ad4f189caec88aea9634389c0a25f8be7f6265e" +checksum = "e262cdaac42494e3ae34c43969f9cdeb7da178bdb4b66fa6a1ea2edb4c8ae652" +dependencies = [ + "typewit", +] [[package]] name = "constant_time_eq" -version = "0.3.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" +checksum = "3d52eff69cd5e647efe296129160853a42795992097e8af39800e1060caeea9b" [[package]] name = "core-foundation" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" dependencies = [ "core-foundation-sys", "libc", @@ -745,29 +755,20 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" -[[package]] -name = "core2" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b49ba7ef1ad6107f8824dbe97de947cbaac53c44e7f9756a1fba0d37c1eec505" -dependencies = [ - "memchr", -] - [[package]] name = "core_extensions" -version = "1.5.3" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92c71dc07c9721607e7a16108336048ee978c3a8b129294534272e8bac96c0ee" +checksum = "42bb5e5d0269fd4f739ea6cedaf29c16d81c27a7ce7582008e90eb50dcd57003" dependencies = [ "core_extensions_proc_macros", ] [[package]] name = "core_extensions_proc_macros" -version = "1.5.3" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69f3b219d28b6e3b4ac87bc1fc522e0803ab22e055da177bff0068c4150c61a6" +checksum = "533d38ecd2709b7608fb8e18e4504deb99e9a72879e6aa66373a76d8dc4259ea" [[package]] name = "cpufeatures" @@ -780,9 +781,9 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.4.2" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" dependencies = [ "cfg-if", ] @@ -804,46 +805,84 @@ checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" [[package]] name = "crypto-common" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" dependencies = [ "generic-array", "typenum", ] +[[package]] +name = "cstr" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68523903c8ae5aacfa32a0d9ae60cadeb764e1da14ee0d26b1f3089f13a54636" +dependencies = [ + "proc-macro2", + "quote", +] + [[package]] name = "csv" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acdc4883a9c96732e4733212c01447ebd805833b7275a73ca3ee080fd77afdaf" +checksum = "52cd9d68cf7efc6ddfaaee42e7288d3a99d613d4b50f76ce9827ae0c6e14f938" dependencies = [ "csv-core", "itoa", "ryu", - "serde", + "serde_core", ] [[package]] name = "csv-core" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d02f3b0da4c6504f86e9cd789d8dbafab48c2321be74e9987593de5a894d93d" +checksum = "704a3c26996a80471189265814dbc2c257598b96b8a7feae2d31ace646bb9782" dependencies = [ "memchr", ] [[package]] -name = "dary_heap" -version = "0.3.7" +name = "darling" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25ae13da2f202d56bd7f91c25fba009e7717a1e4a1cc98a76d844b65ae912e9d" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9865a50f7c335f53564bb694ef660825eb8610e0a53d3e11bf1b0d3df31e03b0" +dependencies = [ + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 2.0.116", +] + +[[package]] +name = "darling_macro" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04d2cd9c18b9f454ed67da600630b021a8a80bf33f8c95896ab33aaf1c26b728" +checksum = "ac3984ec7bd6cfa798e62b4a642426a5be0e68f9401cfc2a01e3fa9ea2fcdb8d" +dependencies = [ + "darling_core", + "quote", + "syn 2.0.116", +] [[package]] name = "dashmap" @@ -861,22 +900,22 @@ dependencies = [ [[package]] name = "datafusion" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6cb8c2c81eada072059983657d6c9caf3fddefc43b4a65551d243253254a96" +checksum = "d12ee9fdc6cdb5898c7691bb994f0ba606c4acc93a2258d78bb9f26ff8158bb3" dependencies = [ "arrow", - "arrow-ipc", "arrow-schema", "async-trait", "bytes", - "bzip2 0.5.2", + "bzip2", "chrono", "datafusion-catalog", "datafusion-catalog-listing", "datafusion-common", "datafusion-common-runtime", "datafusion-datasource", + "datafusion-datasource-arrow", "datafusion-datasource-avro", "datafusion-datasource-csv", "datafusion-datasource-json", @@ -891,6 +930,7 @@ dependencies = [ "datafusion-functions-window", "datafusion-optimizer", "datafusion-physical-expr", + "datafusion-physical-expr-adapter", "datafusion-physical-expr-common", "datafusion-physical-optimizer", "datafusion-physical-plan", @@ -898,27 +938,27 @@ dependencies = [ "datafusion-sql", "flate2", "futures", - "itertools 0.14.0", + "itertools", + "liblzma", "log", "object_store", "parking_lot", "parquet", - "rand 0.9.1", + "rand", "regex", "sqlparser", "tempfile", "tokio", "url", "uuid", - "xz2", "zstd", ] [[package]] name = "datafusion-catalog" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7be8d1b627843af62e447396db08fe1372d882c0eb8d0ea655fd1fbc33120ee" +checksum = "462dc9ef45e5d688aeaae49a7e310587e81b6016b9d03bace5626ad0043e5a9e" dependencies = [ "arrow", "async-trait", @@ -931,9 +971,8 @@ dependencies = [ "datafusion-physical-expr", "datafusion-physical-plan", "datafusion-session", - "datafusion-sql", "futures", - "itertools 0.14.0", + "itertools", "log", "object_store", "parking_lot", @@ -942,9 +981,9 @@ dependencies = [ [[package]] name = "datafusion-catalog-listing" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38ab16c5ae43f65ee525fc493ceffbc41f40dee38b01f643dfcfc12959e92038" +checksum = "1b96dbf1d728fc321817b744eb5080cdd75312faa6980b338817f68f3caa4208" dependencies = [ "arrow", "async-trait", @@ -954,28 +993,28 @@ dependencies = [ "datafusion-execution", "datafusion-expr", "datafusion-physical-expr", + "datafusion-physical-expr-adapter", "datafusion-physical-expr-common", "datafusion-physical-plan", - "datafusion-session", "futures", + "itertools", "log", "object_store", - "tokio", ] [[package]] name = "datafusion-common" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3d56b2ac9f476b93ca82e4ef5fb00769c8a3f248d12b4965af7e27635fa7e12" +checksum = "3237a6ff0d2149af4631290074289cae548c9863c885d821315d54c6673a074a" dependencies = [ "ahash", "apache-avro", "arrow", "arrow-ipc", - "base64 0.22.1", + "chrono", "half", - "hashbrown 0.14.5", + "hashbrown 0.16.1", "indexmap", "libc", "log", @@ -990,9 +1029,9 @@ dependencies = [ [[package]] name = "datafusion-common-runtime" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16015071202d6133bc84d72756176467e3e46029f3ce9ad2cb788f9b1ff139b2" +checksum = "70b5e34026af55a1bfccb1ef0a763cf1f64e77c696ffcf5a128a278c31236528" dependencies = [ "futures", "log", @@ -1001,81 +1040,97 @@ dependencies = [ [[package]] name = "datafusion-datasource" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b77523c95c89d2a7eb99df14ed31390e04ab29b43ff793e562bdc1716b07e17b" +checksum = "1b2a6be734cc3785e18bbf2a7f2b22537f6b9fb960d79617775a51568c281842" dependencies = [ "arrow", "async-compression", "async-trait", "bytes", - "bzip2 0.5.2", + "bzip2", "chrono", "datafusion-common", "datafusion-common-runtime", "datafusion-execution", "datafusion-expr", "datafusion-physical-expr", + "datafusion-physical-expr-adapter", "datafusion-physical-expr-common", "datafusion-physical-plan", "datafusion-session", "flate2", "futures", "glob", - "itertools 0.14.0", + "itertools", + "liblzma", "log", "object_store", - "parquet", - "rand 0.9.1", - "tempfile", + "rand", "tokio", "tokio-util", "url", - "xz2", "zstd", ] +[[package]] +name = "datafusion-datasource-arrow" +version = "52.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1739b9b07c9236389e09c74f770e88aff7055250774e9def7d3f4f56b3dcc7be" +dependencies = [ + "arrow", + "arrow-ipc", + "async-trait", + "bytes", + "datafusion-common", + "datafusion-common-runtime", + "datafusion-datasource", + "datafusion-execution", + "datafusion-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "datafusion-session", + "futures", + "itertools", + "object_store", + "tokio", +] + [[package]] name = "datafusion-datasource-avro" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1371cb4ef13c2e3a15685d37a07398cf13e3b0a85e705024b769fc4c511f5fef" +checksum = "828088c2fb681cc0e06fb42f541f76c82a0c10278f9fd6334e22c8d1e3574ee7" dependencies = [ "apache-avro", "arrow", "async-trait", "bytes", - "chrono", - "datafusion-catalog", "datafusion-common", "datafusion-datasource", - "datafusion-execution", - "datafusion-physical-expr", "datafusion-physical-expr-common", "datafusion-physical-plan", "datafusion-session", "futures", "num-traits", "object_store", - "tokio", ] [[package]] name = "datafusion-datasource-csv" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40d25c5e2c0ebe8434beeea997b8e88d55b3ccc0d19344293f2373f65bc524fc" +checksum = "61c73bc54b518bbba7c7650299d07d58730293cfba4356f6f428cc94c20b7600" dependencies = [ "arrow", "async-trait", "bytes", - "datafusion-catalog", "datafusion-common", "datafusion-common-runtime", "datafusion-datasource", "datafusion-execution", "datafusion-expr", - "datafusion-physical-expr", "datafusion-physical-expr-common", "datafusion-physical-plan", "datafusion-session", @@ -1087,73 +1142,71 @@ dependencies = [ [[package]] name = "datafusion-datasource-json" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dc6959e1155741ab35369e1dc7673ba30fc45ed568fad34c01b7cb1daeb4d4c" +checksum = "37812c8494c698c4d889374ecfabbff780f1f26d9ec095dd1bddfc2a8ca12559" dependencies = [ "arrow", "async-trait", "bytes", - "datafusion-catalog", "datafusion-common", "datafusion-common-runtime", "datafusion-datasource", "datafusion-execution", "datafusion-expr", - "datafusion-physical-expr", "datafusion-physical-expr-common", "datafusion-physical-plan", "datafusion-session", "futures", "object_store", - "serde_json", "tokio", ] [[package]] name = "datafusion-datasource-parquet" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7a6afdfe358d70f4237f60eaef26ae5a1ce7cb2c469d02d5fc6c7fd5d84e58b" +checksum = "2210937ecd9f0e824c397e73f4b5385c97cd1aff43ab2b5836fcfd2d321523fb" dependencies = [ "arrow", "async-trait", "bytes", - "datafusion-catalog", "datafusion-common", "datafusion-common-runtime", "datafusion-datasource", "datafusion-execution", "datafusion-expr", - "datafusion-functions-aggregate", + "datafusion-functions-aggregate-common", "datafusion-physical-expr", + "datafusion-physical-expr-adapter", "datafusion-physical-expr-common", - "datafusion-physical-optimizer", "datafusion-physical-plan", + "datafusion-pruning", "datafusion-session", "futures", - "itertools 0.14.0", + "itertools", "log", "object_store", "parking_lot", "parquet", - "rand 0.9.1", "tokio", ] [[package]] name = "datafusion-doc" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bcd8a3e3e3d02ea642541be23d44376b5d5c37c2938cce39b3873cdf7186eea" +checksum = "2c825f969126bc2ef6a6a02d94b3c07abff871acf4d6dd759ce1255edb7923ce" [[package]] name = "datafusion-execution" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670da1d45d045eee4c2319b8c7ea57b26cf48ab77b630aaa50b779e406da476a" +checksum = "fa03ef05a2c2f90dd6c743e3e111078e322f4b395d20d4b4d431a245d79521ae" dependencies = [ "arrow", + "async-trait", + "chrono", "dashmap", "datafusion-common", "datafusion-expr", @@ -1161,18 +1214,19 @@ dependencies = [ "log", "object_store", "parking_lot", - "rand 0.9.1", + "rand", "tempfile", "url", ] [[package]] name = "datafusion-expr" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3a577f64bdb7e2cc4043cd97f8901d8c504711fde2dbcb0887645b00d7c660b" +checksum = "ef33934c1f98ee695cc51192cc5f9ed3a8febee84fdbcd9131bf9d3a9a78276f" dependencies = [ "arrow", + "async-trait", "chrono", "datafusion-common", "datafusion-doc", @@ -1181,6 +1235,7 @@ dependencies = [ "datafusion-functions-window-common", "datafusion-physical-expr-common", "indexmap", + "itertools", "paste", "recursive", "serde_json", @@ -1189,32 +1244,40 @@ dependencies = [ [[package]] name = "datafusion-expr-common" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b7916806ace3e9f41884f230f7f38ebf0e955dfbd88266da1826f29a0b9a6a" +checksum = "000c98206e3dd47d2939a94b6c67af4bfa6732dd668ac4fafdbde408fd9134ea" dependencies = [ "arrow", "datafusion-common", "indexmap", - "itertools 0.14.0", + "itertools", "paste", ] [[package]] name = "datafusion-ffi" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "980cca31de37f5dadf7ea18e4ffc2b6833611f45bed5ef9de0831d2abb50f1ef" +checksum = "30f57f7f63a25a0b78b3f2a5e18c0ecbd54851b64064ac0d5a9eb05efd5586d2" dependencies = [ "abi_stable", "arrow", "arrow-schema", "async-ffi", "async-trait", - "datafusion", + "datafusion-catalog", + "datafusion-common", + "datafusion-datasource", + "datafusion-execution", + "datafusion-expr", "datafusion-functions-aggregate-common", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", "datafusion-proto", "datafusion-proto-common", + "datafusion-session", "futures", "log", "prost", @@ -1224,16 +1287,17 @@ dependencies = [ [[package]] name = "datafusion-functions" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb31c9dc73d3e0c365063f91139dc273308f8a8e124adda9898db8085d68357" +checksum = "379b01418ab95ca947014066248c22139fe9af9289354de10b445bd000d5d276" dependencies = [ "arrow", "arrow-buffer", - "base64 0.22.1", + "base64", "blake2", "blake3", "chrono", + "chrono-tz", "datafusion-common", "datafusion-doc", "datafusion-execution", @@ -1241,10 +1305,11 @@ dependencies = [ "datafusion-expr-common", "datafusion-macros", "hex", - "itertools 0.14.0", + "itertools", "log", "md-5", - "rand 0.9.1", + "num-traits", + "rand", "regex", "sha2", "unicode-segmentation", @@ -1253,9 +1318,9 @@ dependencies = [ [[package]] name = "datafusion-functions-aggregate" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebb72c6940697eaaba9bd1f746a697a07819de952b817e3fb841fb75331ad5d4" +checksum = "fd00d5454ba4c3f8ebbd04bd6a6a9dc7ced7c56d883f70f2076c188be8459e4c" dependencies = [ "ahash", "arrow", @@ -1274,9 +1339,9 @@ dependencies = [ [[package]] name = "datafusion-functions-aggregate-common" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7fdc54656659e5ecd49bf341061f4156ab230052611f4f3609612a0da259696" +checksum = "aec06b380729a87210a4e11f555ec2d729a328142253f8d557b87593622ecc9f" dependencies = [ "ahash", "arrow", @@ -1287,9 +1352,9 @@ dependencies = [ [[package]] name = "datafusion-functions-nested" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fad94598e3374938ca43bca6b675febe557e7a14eb627d617db427d70d65118b" +checksum = "904f48d45e0f1eb7d0eb5c0f80f2b5c6046a85454364a6b16a2e0b46f62e7dff" dependencies = [ "arrow", "arrow-ord", @@ -1297,20 +1362,22 @@ dependencies = [ "datafusion-doc", "datafusion-execution", "datafusion-expr", + "datafusion-expr-common", "datafusion-functions", "datafusion-functions-aggregate", + "datafusion-functions-aggregate-common", "datafusion-macros", "datafusion-physical-expr-common", - "itertools 0.14.0", + "itertools", "log", "paste", ] [[package]] name = "datafusion-functions-table" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de2fc6c2946da5cab8364fb28b5cac3115f0f3a87960b235ed031c3f7e2e639b" +checksum = "e9a0d20e2b887e11bee24f7734d780a2588b925796ac741c3118dd06d5aa77f0" dependencies = [ "arrow", "async-trait", @@ -1324,9 +1391,9 @@ dependencies = [ [[package]] name = "datafusion-functions-window" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e5746548a8544870a119f556543adcd88fe0ba6b93723fe78ad0439e0fbb8b4" +checksum = "d3414b0a07e39b6979fe3a69c7aa79a9f1369f1d5c8e52146e66058be1b285ee" dependencies = [ "arrow", "datafusion-common", @@ -1342,9 +1409,9 @@ dependencies = [ [[package]] name = "datafusion-functions-window-common" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcbe9404382cda257c434f22e13577bee7047031dfdb6216dd5e841b9465e6fe" +checksum = "5bf2feae63cd4754e31add64ce75cae07d015bce4bb41cd09872f93add32523a" dependencies = [ "datafusion-common", "datafusion-physical-expr-common", @@ -1352,28 +1419,29 @@ dependencies = [ [[package]] name = "datafusion-macros" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dce50e3b637dab0d25d04d2fe79dfdca2b257eabd76790bffd22c7f90d700c8" +checksum = "c4fe888aeb6a095c4bcbe8ac1874c4b9a4c7ffa2ba849db7922683ba20875aaf" dependencies = [ - "datafusion-expr", + "datafusion-doc", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "datafusion-optimizer" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03cfaacf06445dc3bbc1e901242d2a44f2cae99a744f49f3fefddcee46240058" +checksum = "8a6527c063ae305c11be397a86d8193936f4b84d137fe40bd706dfc178cf733c" dependencies = [ "arrow", "chrono", "datafusion-common", "datafusion-expr", + "datafusion-expr-common", "datafusion-physical-expr", "indexmap", - "itertools 0.14.0", + "itertools", "log", "recursive", "regex", @@ -1382,9 +1450,9 @@ dependencies = [ [[package]] name = "datafusion-physical-expr" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1908034a89d7b2630898e06863583ae4c00a0dd310c1589ca284195ee3f7f8a6" +checksum = "0bb028323dd4efd049dd8a78d78fe81b2b969447b39c51424167f973ac5811d9" dependencies = [ "ahash", "arrow", @@ -1394,33 +1462,53 @@ dependencies = [ "datafusion-functions-aggregate-common", "datafusion-physical-expr-common", "half", - "hashbrown 0.14.5", + "hashbrown 0.16.1", "indexmap", - "itertools 0.14.0", - "log", + "itertools", + "parking_lot", "paste", - "petgraph 0.8.2", + "petgraph", + "recursive", + "tokio", ] [[package]] -name = "datafusion-physical-expr-common" -version = "48.0.0" +name = "datafusion-physical-expr-adapter" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47b7a12dd59ea07614b67dbb01d85254fbd93df45bcffa63495e11d3bdf847df" +checksum = "78fe0826aef7eab6b4b61533d811234a7a9e5e458331ebbf94152a51fc8ab433" dependencies = [ - "ahash", "arrow", "datafusion-common", - "datafusion-expr-common", - "hashbrown 0.14.5", - "itertools 0.14.0", + "datafusion-expr", + "datafusion-functions", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "itertools", +] + +[[package]] +name = "datafusion-physical-expr-common" +version = "52.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfccd388620734c661bd8b7ca93c44cdd59fecc9b550eea416a78ffcbb29475f" +dependencies = [ + "ahash", + "arrow", + "chrono", + "datafusion-common", + "datafusion-expr-common", + "hashbrown 0.16.1", + "indexmap", + "itertools", + "parking_lot", ] [[package]] name = "datafusion-physical-optimizer" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4371cc4ad33978cc2a8be93bd54a232d3f2857b50401a14631c0705f3f910aae" +checksum = "bde5fa10e73259a03b705d5fddc136516814ab5f441b939525618a4070f5a059" dependencies = [ "arrow", "datafusion-common", @@ -1430,35 +1518,36 @@ dependencies = [ "datafusion-physical-expr", "datafusion-physical-expr-common", "datafusion-physical-plan", - "itertools 0.14.0", - "log", + "datafusion-pruning", + "itertools", "recursive", ] [[package]] name = "datafusion-physical-plan" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc47bc33025757a5c11f2cd094c5b6b5ed87f46fa33c023e6fdfa25fcbfade23" +checksum = "0e1098760fb29127c24cc9ade3277051dc73c9ed0ac0131bd7bcd742e0ad7470" dependencies = [ "ahash", "arrow", "arrow-ord", "arrow-schema", "async-trait", - "chrono", "datafusion-common", "datafusion-common-runtime", "datafusion-execution", "datafusion-expr", + "datafusion-functions", + "datafusion-functions-aggregate-common", "datafusion-functions-window-common", "datafusion-physical-expr", "datafusion-physical-expr-common", "futures", "half", - "hashbrown 0.14.5", + "hashbrown 0.16.1", "indexmap", - "itertools 0.14.0", + "itertools", "log", "parking_lot", "pin-project-lite", @@ -1467,15 +1556,26 @@ dependencies = [ [[package]] name = "datafusion-proto" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8f5d9acd7d96e3bf2a7bb04818373cab6e51de0356e3694b94905fee7b4e8b6" +checksum = "0cf75daf56aa6b1c6867cc33ff0fb035d517d6d06737fd355a3e1ef67cba6e7a" dependencies = [ "arrow", "chrono", - "datafusion", + "datafusion-catalog", + "datafusion-catalog-listing", "datafusion-common", + "datafusion-datasource", + "datafusion-datasource-arrow", + "datafusion-datasource-csv", + "datafusion-datasource-json", + "datafusion-datasource-parquet", + "datafusion-execution", "datafusion-expr", + "datafusion-functions-table", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", "datafusion-proto-common", "object_store", "prost", @@ -1483,33 +1583,56 @@ dependencies = [ [[package]] name = "datafusion-proto-common" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ecb5ec152c4353b60f7a5635489834391f7a291d2b39a4820cd469e318b78e" +checksum = "12a0cb3cce232a3de0d14ef44b58a6537aeb1362cfb6cf4d808691ddbb918956" dependencies = [ "arrow", "datafusion-common", "prost", ] +[[package]] +name = "datafusion-pruning" +version = "52.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64d0fef4201777b52951edec086c21a5b246f3c82621569ddb4a26f488bc38a9" +dependencies = [ + "arrow", + "datafusion-common", + "datafusion-datasource", + "datafusion-expr-common", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "itertools", + "log", +] + [[package]] name = "datafusion-python" -version = "47.0.0" +version = "51.0.0" dependencies = [ "arrow", + "arrow-select", "async-trait", + "cstr", "datafusion", "datafusion-ffi", "datafusion-proto", "datafusion-substrait", "futures", + "log", "mimalloc", "object_store", + "parking_lot", "prost", "prost-types", "pyo3", "pyo3-async-runtimes", "pyo3-build-config", + "pyo3-log", + "serde_json", "tokio", "url", "uuid", @@ -1517,36 +1640,27 @@ dependencies = [ [[package]] name = "datafusion-session" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7485da32283985d6b45bd7d13a65169dcbe8c869e25d01b2cfbc425254b4b49" +checksum = "f71f1e39e8f2acbf1c63b0e93756c2e970a64729dab70ac789587d6237c4fde0" dependencies = [ - "arrow", "async-trait", - "dashmap", "datafusion-common", - "datafusion-common-runtime", "datafusion-execution", "datafusion-expr", - "datafusion-physical-expr", "datafusion-physical-plan", - "datafusion-sql", - "futures", - "itertools 0.14.0", - "log", - "object_store", "parking_lot", - "tokio", ] [[package]] name = "datafusion-sql" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a466b15632befddfeac68c125f0260f569ff315c6831538cbb40db754134e0df" +checksum = "f44693cfcaeb7a9f12d71d1c576c3a6dc025a12cef209375fa2d16fb3b5670ee" dependencies = [ "arrow", "bigdecimal", + "chrono", "datafusion-common", "datafusion-expr", "indexmap", @@ -1558,21 +1672,23 @@ dependencies = [ [[package]] name = "datafusion-substrait" -version = "48.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f3973b1a4f6e9ee7fd99a22d58e1c06e6723a28dc911a60df575974c8339aa" +checksum = "6042adacd0bd64e56c22f6a7f9ce0ce1793dd367c899d868179d029f110d9215" dependencies = [ "async-recursion", "async-trait", "chrono", "datafusion", - "itertools 0.14.0", + "half", + "itertools", "object_store", "pbjson-types", "prost", "substrait", "tokio", "url", + "uuid", ] [[package]] @@ -1594,14 +1710,14 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "dyn-clone" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c7a8fb8a9fbf66c1f703fe16184d10ca0ee9d23be5b4436400408ba54a95005" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" [[package]] name = "either" @@ -1617,12 +1733,12 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.12" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -1631,6 +1747,12 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" +[[package]] +name = "find-msvc-tools" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" + [[package]] name = "fixedbitset" version = "0.5.7" @@ -1639,9 +1761,9 @@ checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" [[package]] name = "flatbuffers" -version = "25.2.10" +version = "25.12.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1045398c1bfd89168b5fd3f1fc11f6e70b34f6f66300c87d44d3de849463abf1" +checksum = "35f6839d7b3b98adde531effaf34f0c2badc6f4735d26fe74709d8e513a96ef3" dependencies = [ "bitflags", "rustc_version", @@ -1649,13 +1771,13 @@ dependencies = [ [[package]] name = "flate2" -version = "1.1.1" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" +checksum = "843fba2746e448b37e26a819579957415c8cef339bf08564fe8b7ddbd959573c" dependencies = [ "crc32fast", - "libz-rs-sys", "miniz_oxide", + "zlib-rs", ] [[package]] @@ -1670,20 +1792,26 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" +[[package]] +name = "foldhash" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" + [[package]] name = "form_urlencoded" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" dependencies = [ "percent-encoding", ] [[package]] name = "futures" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +checksum = "8b147ee9d1f6d097cef9ce628cd2ee62288d963e16fb287bd9286455b241382d" dependencies = [ "futures-channel", "futures-core", @@ -1696,9 +1824,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d" dependencies = [ "futures-core", "futures-sink", @@ -1706,15 +1834,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" +checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d" [[package]] name = "futures-executor" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d" dependencies = [ "futures-core", "futures-task", @@ -1723,38 +1851,38 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" +checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718" [[package]] name = "futures-macro" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "futures-sink" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" +checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893" [[package]] name = "futures-task" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" +checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393" [[package]] name = "futures-util" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" dependencies = [ "futures-channel", "futures-core", @@ -1764,7 +1892,6 @@ dependencies = [ "futures-task", "memchr", "pin-project-lite", - "pin-utils", "slab", ] @@ -1789,48 +1916,55 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "wasm-bindgen", ] [[package]] name = "getrandom" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", "js-sys", "libc", "r-efi", - "wasi 0.14.2+wasi-0.2.4", + "wasip2", "wasm-bindgen", ] [[package]] -name = "gimli" -version = "0.31.1" +name = "getrandom" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +checksum = "139ef39800118c7683f2fd3c98c1b23c09ae076556b435f8e9064ae108aaeeec" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasip2", + "wasip3", +] [[package]] name = "glob" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" [[package]] name = "h2" -version = "0.4.10" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9421a676d1b147b16b82c9225157dc629087ef8ec4d5e2960f9437a90dac0a5" +checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54" dependencies = [ "atomic-waker", "bytes", @@ -1847,13 +1981,14 @@ dependencies = [ [[package]] name = "half" -version = "2.6.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" +checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" dependencies = [ "cfg-if", "crunchy", "num-traits", + "zerocopy", ] [[package]] @@ -1861,20 +1996,25 @@ name = "hashbrown" version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" + +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ - "ahash", - "allocator-api2", + "foldhash 0.1.5", ] [[package]] name = "hashbrown" -version = "0.15.3" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b26c544d002229e640969970a2e74021aadf6e2f96372b9c58eff97de08eb3" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" dependencies = [ "allocator-api2", "equivalent", - "foldhash", + "foldhash 0.2.0", ] [[package]] @@ -1891,12 +2031,11 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "http" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" dependencies = [ "bytes", - "fnv", "itoa", ] @@ -1931,25 +2070,27 @@ checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" [[package]] name = "humantime" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" [[package]] name = "hyper" -version = "1.6.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" +checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" dependencies = [ + "atomic-waker", "bytes", "futures-channel", - "futures-util", + "futures-core", "h2", "http", "http-body", "httparse", "itoa", "pin-project-lite", + "pin-utils", "smallvec", "tokio", "want", @@ -1957,11 +2098,10 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.5" +version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ - "futures-util", "http", "hyper", "hyper-util", @@ -1975,17 +2115,20 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.11" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497bbc33a26fdd4af9ed9c70d63f61cf56a938375fbb32df34db9b1cd6d643f2" +checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" dependencies = [ + "base64", "bytes", "futures-channel", "futures-util", "http", "http-body", "hyper", + "ipnet", "libc", + "percent-encoding", "pin-project-lite", "socket2", "tokio", @@ -1995,9 +2138,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.63" +version = "0.1.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" +checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -2019,9 +2162,9 @@ dependencies = [ [[package]] name = "icu_collections" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" dependencies = [ "displaydoc", "potential_utf", @@ -2032,9 +2175,9 @@ dependencies = [ [[package]] name = "icu_locale_core" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" dependencies = [ "displaydoc", "litemap", @@ -2045,11 +2188,10 @@ dependencies = [ [[package]] name = "icu_normalizer" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" dependencies = [ - "displaydoc", "icu_collections", "icu_normalizer_data", "icu_properties", @@ -2060,42 +2202,38 @@ dependencies = [ [[package]] name = "icu_normalizer_data" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" [[package]] name = "icu_properties" -version = "2.0.0" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2549ca8c7241c82f59c80ba2a6f415d931c5b58d24fb8412caa1a1f02c49139a" +checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" dependencies = [ - "displaydoc", "icu_collections", "icu_locale_core", "icu_properties_data", "icu_provider", - "potential_utf", "zerotrie", "zerovec", ] [[package]] name = "icu_properties_data" -version = "2.0.0" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8197e866e47b68f8f7d95249e172903bec06004b18b2937f1095d40a0c57de04" +checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" [[package]] name = "icu_provider" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" dependencies = [ "displaydoc", "icu_locale_core", - "stable_deref_trait", - "tinystr", "writeable", "yoke", "zerofrom", @@ -2103,11 +2241,23 @@ dependencies = [ "zerovec", ] +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + [[package]] name = "idna" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" dependencies = [ "idna_adapter", "smallvec", @@ -2126,19 +2276,24 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.9.0" +version = "2.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" dependencies = [ "equivalent", - "hashbrown 0.15.3", + "hashbrown 0.16.1", + "serde", + "serde_core", ] [[package]] name = "indoc" -version = "2.0.6" +version = "2.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c7245a08504955605670dbf141fceab975f15ca21570696aebe9d2e71576bd" +checksum = "79cf5c93f93228cf8efb3ba362535fb11199ac548a09ce117c9b1adc3030d706" +dependencies = [ + "rustversion", +] [[package]] name = "integer-encoding" @@ -2153,12 +2308,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] -name = "itertools" -version = "0.13.0" +name = "iri-string" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a" dependencies = [ - "either", + "memchr", + "serde", ] [[package]] @@ -2172,41 +2328,41 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.15" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" [[package]] name = "jobserver" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" dependencies = [ - "getrandom 0.3.3", + "getrandom 0.3.4", "libc", ] [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" dependencies = [ "once_cell", "wasm-bindgen", ] [[package]] -name = "lazy_static" -version = "1.5.0" +name = "leb128fmt" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" [[package]] name = "lexical-core" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b765c31809609075565a70b4b71402281283aeda7ecaf4818ac14a7b2ade8958" +checksum = "7d8d125a277f807e55a77304455eb7b1cb52f2b18c143b60e766c120bd64a594" dependencies = [ "lexical-parse-float", "lexical-parse-integer", @@ -2217,84 +2373,59 @@ dependencies = [ [[package]] name = "lexical-parse-float" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de6f9cb01fb0b08060209a057c048fcbab8717b4c1ecd2eac66ebfe39a65b0f2" +checksum = "52a9f232fbd6f550bc0137dcb5f99ab674071ac2d690ac69704593cb4abbea56" dependencies = [ "lexical-parse-integer", "lexical-util", - "static_assertions", ] [[package]] name = "lexical-parse-integer" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72207aae22fc0a121ba7b6d479e42cbfea549af1479c3f3a4f12c70dd66df12e" +checksum = "9a7a039f8fb9c19c996cd7b2fcce303c1b2874fe1aca544edc85c4a5f8489b34" dependencies = [ "lexical-util", - "static_assertions", ] [[package]] name = "lexical-util" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a82e24bf537fd24c177ffbbdc6ebcc8d54732c35b50a3f28cc3f4e4c949a0b3" -dependencies = [ - "static_assertions", -] +checksum = "2604dd126bb14f13fb5d1bd6a66155079cb9fa655b37f875b3a742c705dbed17" [[package]] name = "lexical-write-float" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5afc668a27f460fb45a81a757b6bf2f43c2d7e30cb5a2dcd3abf294c78d62bd" +checksum = "50c438c87c013188d415fbabbb1dceb44249ab81664efbd31b14ae55dabb6361" dependencies = [ "lexical-util", "lexical-write-integer", - "static_assertions", ] [[package]] name = "lexical-write-integer" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "629ddff1a914a836fb245616a7888b62903aae58fa771e1d83943035efa0f978" +checksum = "409851a618475d2d5796377cad353802345cba92c867d9fbcde9cf4eac4e14df" dependencies = [ "lexical-util", - "static_assertions", ] [[package]] -name = "libc" -version = "0.2.172" +name = "libbz2-rs-sys" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" +checksum = "2c4a545a15244c7d945065b5d392b2d2d7f21526fba56ce51467b06ed445e8f7" [[package]] -name = "libflate" -version = "2.1.0" +name = "libc" +version = "0.2.182" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45d9dfdc14ea4ef0900c1cddbc8dcd553fbaacd8a4a282cf4018ae9dd04fb21e" -dependencies = [ - "adler32", - "core2", - "crc32fast", - "dary_heap", - "libflate_lz77", -] - -[[package]] -name = "libflate_lz77" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6e0d73b369f386f1c44abd9c570d5318f55ccde816ff4b562fa452e5182863d" -dependencies = [ - "core2", - "hashbrown 0.14.5", - "rle-decode-fast", -] +checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112" [[package]] name = "libloading" @@ -2307,57 +2438,67 @@ dependencies = [ ] [[package]] -name = "libm" -version = "0.2.15" +name = "liblzma" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" +checksum = "b6033b77c21d1f56deeae8014eb9fbe7bdf1765185a6c508b5ca82eeaed7f899" +dependencies = [ + "liblzma-sys", +] [[package]] -name = "libmimalloc-sys" -version = "0.1.42" +name = "liblzma-sys" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec9d6fac27761dabcd4ee73571cdb06b7022dc99089acbe5435691edffaac0f4" +checksum = "9f2db66f3268487b5033077f266da6777d057949b8f93c8ad82e441df25e6186" dependencies = [ "cc", "libc", + "pkg-config", ] [[package]] -name = "libz-rs-sys" -version = "0.5.0" +name = "libm" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" + +[[package]] +name = "libmimalloc-sys" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6489ca9bd760fe9642d7644e827b0c9add07df89857b0416ee15c1cc1a3b8c5a" +checksum = "667f4fec20f29dfc6bc7357c582d91796c169ad7e2fce709468aefeb2c099870" dependencies = [ - "zlib-rs", + "cc", + "libc", ] [[package]] name = "linux-raw-sys" -version = "0.9.4" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" [[package]] name = "litemap" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" dependencies = [ - "autocfg", "scopeguard", ] [[package]] name = "log" -version = "0.4.27" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" [[package]] name = "lru-slab" @@ -2367,22 +2508,11 @@ checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" [[package]] name = "lz4_flex" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75761162ae2b0e580d7e7c390558127e5f01b4194debd6221fd8c207fc80e3f5" -dependencies = [ - "twox-hash 1.6.3", -] - -[[package]] -name = "lzma-sys" -version = "0.1.20" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fda04ab3764e6cde78b9974eec4f779acaba7c4e84b36eca3cf77c581b85d27" +checksum = "ab6473172471198271ff72e9379150e9dfd70d8e533e0752a27e515b48dd375e" dependencies = [ - "cc", - "libc", - "pkg-config", + "twox-hash", ] [[package]] @@ -2397,9 +2527,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.4" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" [[package]] name = "memoffset" @@ -2412,37 +2542,32 @@ dependencies = [ [[package]] name = "mimalloc" -version = "0.1.46" +version = "0.1.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "995942f432bbb4822a7e9c3faa87a695185b0d09273ba85f097b54f4e458f2af" +checksum = "e1ee66a4b64c74f4ef288bcbb9192ad9c3feaad75193129ac8509af543894fd8" dependencies = [ "libmimalloc-sys", ] -[[package]] -name = "mime" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" - [[package]] name = "miniz_oxide" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", + "simd-adler32", ] [[package]] name = "mio" -version = "1.0.3" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" dependencies = [ "libc", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.52.0", + "wasi", + "windows-sys 0.61.2", ] [[package]] @@ -2451,20 +2576,6 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084" -[[package]] -name = "num" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" -dependencies = [ - "num-bigint", - "num-complex", - "num-integer", - "num-iter", - "num-rational", - "num-traits", -] - [[package]] name = "num-bigint" version = "0.4.6" @@ -2494,28 +2605,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-iter" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" -dependencies = [ - "num-bigint", - "num-integer", - "num-traits", -] - [[package]] name = "num-traits" version = "0.2.19" @@ -2528,21 +2617,21 @@ dependencies = [ [[package]] name = "object" -version = "0.36.7" +version = "0.37.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe" dependencies = [ "memchr", ] [[package]] name = "object_store" -version = "0.12.1" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d94ac16b433c0ccf75326388c893d2835ab7457ea35ab8ba5d745c053ef5fa16" +checksum = "fbfbfff40aeccab00ec8a910b57ca8ecf4319b335c542f2edcd19dd25a1e2a00" dependencies = [ "async-trait", - "base64 0.22.1", + "base64", "bytes", "chrono", "form_urlencoded", @@ -2552,19 +2641,19 @@ dependencies = [ "httparse", "humantime", "hyper", - "itertools 0.14.0", + "itertools", "md-5", "parking_lot", "percent-encoding", "quick-xml", - "rand 0.9.1", + "rand", "reqwest", "ring", "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", - "thiserror 2.0.12", + "thiserror", "tokio", "tracing", "url", @@ -2581,9 +2670,9 @@ checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "openssl-probe" -version = "0.1.6" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" [[package]] name = "ordered-float" @@ -2596,9 +2685,9 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.3" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" dependencies = [ "lock_api", "parking_lot_core", @@ -2606,22 +2695,22 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.10" +version = "0.9.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", - "windows-targets 0.52.6", + "windows-link", ] [[package]] name = "parquet" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be7b2d778f6b841d37083ebdf32e33a524acde1266b5884a8ca29bf00dfa1231" +checksum = "6ee96b29972a257b855ff2341b37e61af5f12d6af1158b6dcdb5b31ea07bb3cb" dependencies = [ "ahash", "arrow-array", @@ -2631,17 +2720,18 @@ dependencies = [ "arrow-ipc", "arrow-schema", "arrow-select", - "base64 0.22.1", + "base64", "brotli", "bytes", "chrono", "flate2", "futures", "half", - "hashbrown 0.15.3", + "hashbrown 0.16.1", "lz4_flex", - "num", "num-bigint", + "num-integer", + "num-traits", "object_store", "paste", "seq-macro", @@ -2649,19 +2739,10 @@ dependencies = [ "snap", "thrift", "tokio", - "twox-hash 2.1.0", + "twox-hash", "zstd", ] -[[package]] -name = "parse-zoneinfo" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f2a05b18d44e2957b88f96ba460715e295bc1d7510468a2f3d3b44535d26c24" -dependencies = [ - "regex", -] - [[package]] name = "paste" version = "1.0.15" @@ -2670,31 +2751,31 @@ checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pbjson" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7e6349fa080353f4a597daffd05cb81572a9c031a6d4fff7e504947496fcc68" +checksum = "898bac3fa00d0ba57a4e8289837e965baa2dee8c3749f3b11d45a64b4223d9c3" dependencies = [ - "base64 0.21.7", + "base64", "serde", ] [[package]] name = "pbjson-build" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eea3058763d6e656105d1403cb04e0a41b7bbac6362d413e7c33be0c32279c9" +checksum = "af22d08a625a2213a78dbb0ffa253318c5c79ce3133d32d296655a7bdfb02095" dependencies = [ "heck", - "itertools 0.13.0", + "itertools", "prost", "prost-types", ] [[package]] name = "pbjson-types" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e54e5e7bfb1652f95bc361d76f3c780d8e526b134b85417e774166ee941f0887" +checksum = "8e748e28374f10a330ee3bb9f29b828c0ac79831a32bab65015ad9b661ead526" dependencies = [ "bytes", "chrono", @@ -2707,66 +2788,36 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "petgraph" -version = "0.7.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" +checksum = "8701b58ea97060d5e5b155d383a69952a60943f0e6dfe30b04c287beb0b27455" dependencies = [ "fixedbitset", - "indexmap", -] - -[[package]] -name = "petgraph" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54acf3a685220b533e437e264e4d932cfbdc4cc7ec0cd232ed73c08d03b8a7ca" -dependencies = [ - "fixedbitset", - "hashbrown 0.15.3", + "hashbrown 0.15.5", "indexmap", "serde", ] [[package]] name = "phf" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" -dependencies = [ - "phf_shared", -] - -[[package]] -name = "phf_codegen" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aef8048c789fa5e851558d709946d6d79a8ff88c0440c587967f8e94bfb1216a" -dependencies = [ - "phf_generator", - "phf_shared", -] - -[[package]] -name = "phf_generator" -version = "0.11.3" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" +checksum = "913273894cec178f401a31ec4b656318d95473527be05c0752cc41cdc32be8b7" dependencies = [ "phf_shared", - "rand 0.8.5", ] [[package]] name = "phf_shared" -version = "0.11.3" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" +checksum = "06005508882fb681fd97892ecff4b7fd0fee13ef1aa569f8695dae7ab9099981" dependencies = [ "siphasher", ] @@ -2791,15 +2842,15 @@ checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "portable-atomic" -version = "1.11.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" +checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" [[package]] name = "potential_utf" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" dependencies = [ "zerovec", ] @@ -2815,28 +2866,28 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.32" +version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "664ec5419c51e34154eec046ebcba56312d5a2fc3b09a06da188e1ad21afadf6" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "proc-macro2" -version = "1.0.95" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" dependencies = [ "unicode-ident", ] [[package]] name = "prost" -version = "0.13.5" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +checksum = "d2ea70524a2f82d518bce41317d0fae74151505651af45faf1ffbd6fd33f0568" dependencies = [ "bytes", "prost-derive", @@ -2844,42 +2895,41 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.13.5" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf" +checksum = "343d3bd7056eda839b03204e68deff7d1b13aba7af2b2fd16890697274262ee7" dependencies = [ "heck", - "itertools 0.14.0", + "itertools", "log", "multimap", - "once_cell", - "petgraph 0.7.1", + "petgraph", "prettyplease", "prost", "prost-types", "regex", - "syn 2.0.101", + "syn 2.0.116", "tempfile", ] [[package]] name = "prost-derive" -version = "0.13.5" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" dependencies = [ "anyhow", - "itertools 0.14.0", + "itertools", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "prost-types" -version = "0.13.5" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" +checksum = "8991c4cbdb8bc5b11f0b074ffe286c30e523de90fee5ba8132f1399f23cb3dd7" dependencies = [ "prost", ] @@ -2895,20 +2945,20 @@ dependencies = [ [[package]] name = "psm" -version = "0.1.26" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e944464ec8536cd1beb0bbfd96987eb5e3b72f2ecdafdc5c769a37f1fa2ae1f" +checksum = "3852766467df634d74f0b2d7819bf8dc483a0eb2e3b0f50f756f9cfe8b0d18d8" dependencies = [ + "ar_archive_writer", "cc", ] [[package]] name = "pyo3" -version = "0.24.2" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5203598f366b11a02b13aa20cab591229ff0a89fd121a308a5df751d5fc9219" +checksum = "7ba0117f4212101ee6544044dae45abe1083d30ce7b29c4b5cbdfa2354e07383" dependencies = [ - "cfg-if", "indoc", "libc", "memoffset", @@ -2922,9 +2972,9 @@ dependencies = [ [[package]] name = "pyo3-async-runtimes" -version = "0.24.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd0b83dc42f9d41f50d38180dad65f0c99763b65a3ff2a81bf351dd35a1df8bf" +checksum = "e6ee6d4cb3e8d5b925f5cdb38da183e0ff18122eb2048d4041c9e7034d026e23" dependencies = [ "futures", "once_cell", @@ -2935,47 +2985,57 @@ dependencies = [ [[package]] name = "pyo3-build-config" -version = "0.24.2" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99636d423fa2ca130fa5acde3059308006d46f98caac629418e53f7ebb1e9999" +checksum = "4fc6ddaf24947d12a9aa31ac65431fb1b851b8f4365426e182901eabfb87df5f" dependencies = [ - "once_cell", "target-lexicon", ] [[package]] name = "pyo3-ffi" -version = "0.24.2" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78f9cf92ba9c409279bc3305b5409d90db2d2c22392d443a87df3a1adad59e33" +checksum = "025474d3928738efb38ac36d4744a74a400c901c7596199e20e45d98eb194105" dependencies = [ "libc", "pyo3-build-config", ] +[[package]] +name = "pyo3-log" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26c2ec80932c5c3b2d4fbc578c9b56b2d4502098587edb8bef5b6bfcad43682e" +dependencies = [ + "arc-swap", + "log", + "pyo3", +] + [[package]] name = "pyo3-macros" -version = "0.24.2" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b999cb1a6ce21f9a6b147dcf1be9ffedf02e0043aec74dc390f3007047cecd9" +checksum = "2e64eb489f22fe1c95911b77c44cc41e7c19f3082fc81cce90f657cdc42ffded" dependencies = [ "proc-macro2", "pyo3-macros-backend", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "pyo3-macros-backend" -version = "0.24.2" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "822ece1c7e1012745607d5cf0bcb2874769f0f7cb34c4cde03b9358eb9ef911a" +checksum = "100246c0ecf400b475341b8455a9213344569af29a3c841d29270e53102e0fcf" dependencies = [ "heck", "proc-macro2", "pyo3-build-config", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] @@ -2986,9 +3046,9 @@ checksum = "5a651516ddc9168ebd67b24afd085a718be02f8858fe406591b013d101ce2f40" [[package]] name = "quick-xml" -version = "0.37.5" +version = "0.38.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "331e97a1af0bf59823e6eadffe373d7b27f485be8748f71471c662c1f269b7fb" +checksum = "b66c2058c55a409d601666cffe35f04333cf1013010882cec174a7467cd4e21c" dependencies = [ "memchr", "serde", @@ -2996,9 +3056,9 @@ dependencies = [ [[package]] name = "quinn" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626214629cda6781b6dc1d316ba307189c85ba657213ce642d9c77670f8202c8" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" dependencies = [ "bytes", "cfg_aliases", @@ -3008,7 +3068,7 @@ dependencies = [ "rustc-hash", "rustls", "socket2", - "thiserror 2.0.12", + "thiserror", "tokio", "tracing", "web-time", @@ -3016,20 +3076,20 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.12" +version = "0.11.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49df843a9161c85bb8aae55f101bc0bac8bcafd637a620d9122fd7e0b2f7422e" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" dependencies = [ "bytes", - "getrandom 0.3.3", + "getrandom 0.3.4", "lru-slab", - "rand 0.9.1", + "rand", "ring", "rustc-hash", "rustls", "rustls-pki-types", "slab", - "thiserror 2.0.12", + "thiserror", "tinyvec", "tracing", "web-time", @@ -3037,90 +3097,60 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.12" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee4e529991f949c5e25755532370b8af5d114acae52326361d68d47af64aa842" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" dependencies = [ "cfg_aliases", "libc", "once_cell", "socket2", "tracing", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] name = "quote" -version = "1.0.40" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" dependencies = [ "proc-macro2", ] [[package]] name = "r-efi" -version = "5.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" - -[[package]] -name = "rand" -version = "0.8.5" +version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" [[package]] name = "rand" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97" -dependencies = [ - "rand_chacha 0.9.0", - "rand_core 0.9.3", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ - "ppv-lite86", - "rand_core 0.6.4", + "rand_chacha", + "rand_core", ] [[package]] name = "rand_chacha" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" -dependencies = [ - "ppv-lite86", - "rand_core 0.9.3", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ - "getrandom 0.2.16", + "ppv-lite86", + "rand_core", ] [[package]] name = "rand_core" -version = "0.9.3" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" dependencies = [ - "getrandom 0.3.3", + "getrandom 0.3.4", ] [[package]] @@ -3140,23 +3170,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76009fbe0614077fc1a2ce255e3a1881a2e3a3527097d5dc6d8212c585e7e38b" dependencies = [ "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "redox_syscall" -version = "0.5.12" +version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ "bitflags", ] [[package]] name = "regex" -version = "1.11.1" +version = "1.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" dependencies = [ "aho-corasick", "memchr", @@ -3166,9 +3196,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.9" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" dependencies = [ "aho-corasick", "memchr", @@ -3177,23 +3207,23 @@ dependencies = [ [[package]] name = "regex-lite" -version = "0.1.6" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53a49587ad06b26609c52e423de037e7f57f20d53535d66e08c695f347df952a" +checksum = "cab834c73d247e67f4fae452806d17d3c7501756d98c8808d7c9c7aa7d18f973" [[package]] name = "regex-syntax" -version = "0.8.5" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +checksum = "a96887878f22d7bad8a3b6dc5b7440e0ada9a245242924394987b21cf2210a4c" [[package]] name = "regress" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ef7fa9ed0256d64a688a3747d0fef7a88851c18a5e1d57f115f38ec2e09366" +checksum = "2057b2325e68a893284d1538021ab90279adac1139957ca2a74426c6f118fb48" dependencies = [ - "hashbrown 0.15.3", + "hashbrown 0.16.1", "memchr", ] @@ -3208,11 +3238,11 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.15" +version = "0.12.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19c46a6fdd48bc4dab94b6103fccc55d34c67cc0ad04653aad4ea2a07cd7bbb" +checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" dependencies = [ - "base64 0.22.1", + "base64", "bytes", "futures-core", "futures-util", @@ -3223,17 +3253,13 @@ dependencies = [ "hyper", "hyper-rustls", "hyper-util", - "ipnet", "js-sys", "log", - "mime", - "once_cell", "percent-encoding", "pin-project-lite", "quinn", "rustls", "rustls-native-certs", - "rustls-pemfile", "rustls-pki-types", "serde", "serde_json", @@ -3243,13 +3269,13 @@ dependencies = [ "tokio-rustls", "tokio-util", "tower", + "tower-http", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "wasm-streams", "web-sys", - "windows-registry", ] [[package]] @@ -3260,24 +3286,12 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.16", + "getrandom 0.2.17", "libc", "untrusted", "windows-sys 0.52.0", ] -[[package]] -name = "rle-decode-fast" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3582f63211428f83597b51b2ddb88e2a91a9d52d12831f9d08f5e624e8977422" - -[[package]] -name = "rustc-demangle" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" - [[package]] name = "rustc-hash" version = "2.1.1" @@ -3295,22 +3309,22 @@ dependencies = [ [[package]] name = "rustix" -version = "1.0.7" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" +checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" dependencies = [ "bitflags", "errno", "libc", "linux-raw-sys", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "rustls" -version = "0.23.27" +version = "0.23.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "730944ca083c1c233a75c09f199e973ca499344a2b7ba9e755c457e86fb4a321" +checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" dependencies = [ "once_cell", "ring", @@ -3322,9 +3336,9 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.8.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" +checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" dependencies = [ "openssl-probe", "rustls-pki-types", @@ -3343,9 +3357,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.12.0" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" dependencies = [ "web-time", "zeroize", @@ -3353,9 +3367,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.3" +version = "0.103.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4a72fe2bcf7a6ac6fd7d0b9e5cb68aeb7d4c0a0271730218b3e92d43b4eb435" +checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" dependencies = [ "ring", "rustls-pki-types", @@ -3364,15 +3378,15 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "ryu" -version = "1.0.20" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" [[package]] name = "same-file" @@ -3385,11 +3399,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -3413,7 +3427,7 @@ dependencies = [ "proc-macro2", "quote", "serde_derive_internals", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] @@ -3424,9 +3438,9 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "security-framework" -version = "3.2.0" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" +checksum = "d17b898a6d6948c3a8ee4372c17cb384f90d2e6e912ef00895b14fd7ab54ec38" dependencies = [ "bitflags", "core-foundation", @@ -3437,9 +3451,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.14.0" +version = "2.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" +checksum = "321c8673b092a9a42605034a9879d73cb79101ed5fd117bc9a597b89b4e9e61a" dependencies = [ "core-foundation-sys", "libc", @@ -3447,11 +3461,12 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" dependencies = [ "serde", + "serde_core", ] [[package]] @@ -3462,31 +3477,42 @@ checksum = "1bc711410fbe7399f390ca1c3b60ad0f53f80e95c5eb935e52268a0e2cd49acc" [[package]] name = "serde" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ + "serde_core", "serde_derive", ] [[package]] name = "serde_bytes" -version = "0.11.17" +version = "0.11.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8437fd221bde2d4ca316d61b90e337e9e702b3820b87d63caa9ba6c02bd06d96" +checksum = "a5d440709e79d88e51ac01c4b72fc6cb7314017bb7da9eeff678aa94c10e3ea8" dependencies = [ "serde", + "serde_core", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] @@ -3497,19 +3523,20 @@ checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" dependencies = [ "itoa", "memchr", - "ryu", "serde", + "serde_core", + "zmij", ] [[package]] @@ -3521,7 +3548,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] @@ -3566,6 +3593,12 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" +[[package]] +name = "simd-adler32" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" + [[package]] name = "simdutf8" version = "0.1.5" @@ -3574,24 +3607,21 @@ checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" [[package]] name = "siphasher" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" +checksum = "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e" [[package]] name = "slab" -version = "0.4.9" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] +checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" [[package]] name = "smallvec" -version = "1.15.0" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" [[package]] name = "snap" @@ -3601,19 +3631,19 @@ checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" [[package]] name = "socket2" -version = "0.5.9" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" +checksum = "86f4aa3ad99f2088c990dfa82d367e19cb29268ed67c574d10d0a4bfe71f07e0" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.60.2", ] [[package]] name = "sqlparser" -version = "0.55.0" +version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4521174166bac1ff04fe16ef4524c70144cd29682a45978978ca3d7f4e0be11" +checksum = "4591acadbcf52f0af60eafbb2c003232b2b4cd8de5f0e9437cb8b1b59046cc0f" dependencies = [ "log", "recursive", @@ -3628,20 +3658,20 @@ checksum = "da5fc6819faabb412da764b99d3b713bb55083c11e7e0c00144d386cd6a1939c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "stable_deref_trait" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" [[package]] name = "stacker" -version = "0.1.21" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cddb07e32ddb770749da91081d8d0ac3a16f1a569a18b20348cd371f5dead06b" +checksum = "08d74a23609d509411d10e2176dc2a4346e3b4aea2e7b1869f19fdedbc71c013" dependencies = [ "cc", "cfg-if", @@ -3651,35 +3681,34 @@ dependencies = [ ] [[package]] -name = "static_assertions" -version = "1.1.0" +name = "strsim" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "strum" -version = "0.26.3" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" [[package]] name = "strum_macros" -version = "0.26.4" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" dependencies = [ "heck", "proc-macro2", "quote", - "rustversion", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "substrait" -version = "0.56.0" +version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13de2e20128f2a018dab1cfa30be83ae069219a65968c6f89df66ad124de2397" +checksum = "62fc4b483a129b9772ccb9c3f7945a472112fdd9140da87f8a4e7f1d44e045d0" dependencies = [ "heck", "pbjson", @@ -3696,7 +3725,7 @@ dependencies = [ "serde", "serde_json", "serde_yaml", - "syn 2.0.101", + "syn 2.0.116", "typify", "walkdir", ] @@ -3720,9 +3749,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.101" +version = "2.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" +checksum = "3df424c70518695237746f84cede799c9c58fcb37450d7b23716568cc8bc69cb" dependencies = [ "proc-macro2", "quote", @@ -3746,66 +3775,46 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "target-lexicon" -version = "0.13.2" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e502f78cdbb8ba4718f566c418c52bc729126ffd16baee5baa718cf25dd5a69a" +checksum = "adb6935a6f5c20170eeceb1a3835a49e12e19d792f6dd344ccc76a985ca5a6ca" [[package]] name = "tempfile" -version = "3.20.0" +version = "3.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +checksum = "0136791f7c95b1f6dd99f9cc786b91bb81c3800b639b3478e561ddb7be95e5f1" dependencies = [ "fastrand", - "getrandom 0.3.3", + "getrandom 0.4.1", "once_cell", "rustix", - "windows-sys 0.59.0", -] - -[[package]] -name = "thiserror" -version = "1.0.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" -dependencies = [ - "thiserror-impl 1.0.69", + "windows-sys 0.61.2", ] [[package]] name = "thiserror" -version = "2.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" -dependencies = [ - "thiserror-impl 2.0.12", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.69" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.101", + "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "2.0.12" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] @@ -3830,9 +3839,9 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" dependencies = [ "displaydoc", "zerovec", @@ -3840,9 +3849,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" +checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" dependencies = [ "tinyvec_macros", ] @@ -3855,36 +3864,35 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.45.0" +version = "1.49.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2513ca694ef9ede0fb23fe71a4ee4107cb102b9dc1930f6d0fd77aae068ae165" +checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" dependencies = [ - "backtrace", "bytes", "libc", "mio", "pin-project-lite", "socket2", "tokio-macros", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] name = "tokio-macros" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "tokio-rustls" -version = "0.26.2" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ "rustls", "tokio", @@ -3892,9 +3900,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.15" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" dependencies = [ "bytes", "futures-core", @@ -3905,9 +3913,9 @@ dependencies = [ [[package]] name = "tower" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" dependencies = [ "futures-core", "futures-util", @@ -3918,6 +3926,24 @@ dependencies = [ "tower-service", ] +[[package]] +name = "tower-http" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" +dependencies = [ + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "iri-string", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-layer" version = "0.3.3" @@ -3932,9 +3958,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.41" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ "pin-project-lite", "tracing-attributes", @@ -3943,20 +3969,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.28" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "tracing-core" -version = "0.1.33" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" dependencies = [ "once_cell", ] @@ -3984,19 +4010,9 @@ checksum = "e78122066b0cb818b8afd08f7ed22f7fdbc3e90815035726f0840d0d26c0747a" [[package]] name = "twox-hash" -version = "1.6.3" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" -dependencies = [ - "cfg-if", - "static_assertions", -] - -[[package]] -name = "twox-hash" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7b17f197b3050ba473acf9181f7b1d3b66d1cf7356c6cc57886662276e65908" +checksum = "9ea3136b675547379c4bd395ca6b938e5ad3c3d20fad76e7fe85f9e0d011419c" [[package]] name = "typed-arena" @@ -4005,36 +4021,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6af6ae20167a9ece4bcb41af5b80f8a1f1df981f6391189ce00fd257af04126a" [[package]] -name = "typed-builder" -version = "0.19.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06fbd5b8de54c5f7c91f6fe4cebb949be2125d7758e630bb58b1d831dbce600" -dependencies = [ - "typed-builder-macro", -] - -[[package]] -name = "typed-builder-macro" -version = "0.19.1" +name = "typenum" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9534daa9fd3ed0bd911d462a37f172228077e7abf18c18a5f67199d959205f8" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.101", -] +checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" [[package]] -name = "typenum" -version = "1.18.0" +name = "typewit" +version = "1.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" +checksum = "f8c1ae7cc0fdb8b842d65d127cb981574b0d2b249b74d1c7a2986863dc134f71" [[package]] name = "typify" -version = "0.4.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c6c647a34e851cf0260ccc14687f17cdcb8302ff1a8a687a24b97ca0f82406f" +checksum = "e6d5bcc6f62eb1fa8aa4098f39b29f93dcb914e17158b76c50360911257aa629" dependencies = [ "typify-impl", "typify-macro", @@ -4042,9 +4044,9 @@ dependencies = [ [[package]] name = "typify-impl" -version = "0.4.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "741b7f1e2e1338c0bee5ad5a7d3a9bbd4e24c33765c08b7691810e68d879365d" +checksum = "a1eb359f7ffa4f9ebe947fa11a1b2da054564502968db5f317b7e37693cb2240" dependencies = [ "heck", "log", @@ -4055,16 +4057,16 @@ dependencies = [ "semver", "serde", "serde_json", - "syn 2.0.101", - "thiserror 2.0.12", + "syn 2.0.116", + "thiserror", "unicode-ident", ] [[package]] name = "typify-macro" -version = "0.4.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7560adf816a1e8dad7c63d8845ef6e31e673e39eab310d225636779230cbedeb" +checksum = "911c32f3c8514b048c1b228361bebb5e6d73aeec01696e8cc0e82e2ffef8ab7a" dependencies = [ "proc-macro2", "quote", @@ -4073,15 +4075,15 @@ dependencies = [ "serde", "serde_json", "serde_tokenstream", - "syn 2.0.101", + "syn 2.0.116", "typify-impl", ] [[package]] name = "unicode-ident" -version = "1.0.18" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" [[package]] name = "unicode-segmentation" @@ -4091,9 +4093,15 @@ checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] name = "unicode-width" -version = "0.2.0" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4ac048d71ede7ee76d585517add45da530660ef4390e49b098733c6e897f254" + +[[package]] +name = "unicode-xid" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "unindent" @@ -4115,13 +4123,14 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.4" +version = "2.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" dependencies = [ "form_urlencoded", "idna", "percent-encoding", + "serde", ] [[package]] @@ -4132,13 +4141,13 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "uuid" -version = "1.17.0" +version = "1.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d" +checksum = "b672338555252d43fd2240c714dc444b8c6fb0a5c5335e65a07bba7742735ddb" dependencies = [ - "getrandom 0.3.3", + "getrandom 0.4.1", "js-sys", - "serde", + "serde_core", "wasm-bindgen", ] @@ -4169,52 +4178,49 @@ dependencies = [ [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] -name = "wasi" -version = "0.14.2+wasi-0.2.4" +name = "wasip2" +version = "1.0.2+wasi-0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5" dependencies = [ - "wit-bindgen-rt", + "wit-bindgen", ] [[package]] -name = "wasm-bindgen" -version = "0.2.100" +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" dependencies = [ - "cfg-if", - "once_cell", - "rustversion", - "wasm-bindgen-macro", + "wit-bindgen", ] [[package]] -name = "wasm-bindgen-backend" -version = "0.2.100" +name = "wasm-bindgen" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" dependencies = [ - "bumpalo", - "log", - "proc-macro2", - "quote", - "syn 2.0.101", + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.50" +version = "0.4.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" dependencies = [ "cfg-if", + "futures-util", "js-sys", "once_cell", "wasm-bindgen", @@ -4223,9 +4229,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4233,26 +4239,48 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" dependencies = [ + "bumpalo", "proc-macro2", "quote", - "syn 2.0.101", - "wasm-bindgen-backend", + "syn 2.0.116", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" dependencies = [ "unicode-ident", ] +[[package]] +name = "wasm-encoder" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap", + "wasm-encoder", + "wasmparser", +] + [[package]] name = "wasm-streams" version = "0.4.2" @@ -4266,11 +4294,23 @@ dependencies = [ "web-sys", ] +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags", + "hashbrown 0.15.5", + "indexmap", + "semver", +] + [[package]] name = "web-sys" -version = "0.3.77" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" dependencies = [ "js-sys", "wasm-bindgen", @@ -4304,11 +4344,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.9" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -4319,99 +4359,97 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-core" -version = "0.61.1" +version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46ec44dc15085cea82cf9c78f85a9114c463a369786585ad2882d1ff0b0acf40" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ "windows-implement", "windows-interface", "windows-link", "windows-result", - "windows-strings 0.4.1", + "windows-strings", ] [[package]] name = "windows-implement" -version = "0.60.0" +version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "windows-interface" -version = "0.59.1" +version = "0.59.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "windows-link" -version = "0.1.1" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" [[package]] -name = "windows-registry" -version = "0.4.0" +name = "windows-result" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ - "windows-result", - "windows-strings 0.3.1", - "windows-targets 0.53.0", + "windows-link", ] [[package]] -name = "windows-result" -version = "0.3.3" +name = "windows-strings" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b895b5356fc36103d0f64dd1e94dfa7ac5633f1c9dd6e80fe9ec4adef69e09d" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ "windows-link", ] [[package]] -name = "windows-strings" -version = "0.3.1" +name = "windows-sys" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-link", + "windows-targets 0.52.6", ] [[package]] -name = "windows-strings" -version = "0.4.1" +name = "windows-sys" +version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a7ab927b2637c19b3dbe0965e75d8f2d30bdd697a1516191cad2ec4df8fb28a" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ - "windows-link", + "windows-targets 0.52.6", ] [[package]] name = "windows-sys" -version = "0.52.0" +version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" dependencies = [ - "windows-targets 0.52.6", + "windows-targets 0.53.5", ] [[package]] name = "windows-sys" -version = "0.59.0" +version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" dependencies = [ - "windows-targets 0.52.6", + "windows-link", ] [[package]] @@ -4432,18 +4470,19 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.53.0" +version = "0.53.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" dependencies = [ - "windows_aarch64_gnullvm 0.53.0", - "windows_aarch64_msvc 0.53.0", - "windows_i686_gnu 0.53.0", - "windows_i686_gnullvm 0.53.0", - "windows_i686_msvc 0.53.0", - "windows_x86_64_gnu 0.53.0", - "windows_x86_64_gnullvm 0.53.0", - "windows_x86_64_msvc 0.53.0", + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", ] [[package]] @@ -4454,9 +4493,9 @@ checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" [[package]] name = "windows_aarch64_msvc" @@ -4466,9 +4505,9 @@ checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_aarch64_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" [[package]] name = "windows_i686_gnu" @@ -4478,9 +4517,9 @@ checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnu" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" [[package]] name = "windows_i686_gnullvm" @@ -4490,9 +4529,9 @@ checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" [[package]] name = "windows_i686_msvc" @@ -4502,9 +4541,9 @@ checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_i686_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" [[package]] name = "windows_x86_64_gnu" @@ -4514,9 +4553,9 @@ checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnu" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" [[package]] name = "windows_x86_64_gnullvm" @@ -4526,9 +4565,9 @@ checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" [[package]] name = "windows_x86_64_msvc" @@ -4538,41 +4577,110 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "windows_x86_64_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" [[package]] -name = "wit-bindgen-rt" -version = "0.39.0" +name = "wit-bindgen" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" dependencies = [ - "bitflags", + "wit-bindgen-rust-macro", ] [[package]] -name = "writeable" -version = "0.6.1" +name = "wit-bindgen-core" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] [[package]] -name = "xz2" -version = "0.1.7" +name = "wit-bindgen-rust" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388c44dc09d76f1536602ead6d325eb532f5c122f17782bd57fb47baeeb767e2" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" dependencies = [ - "lzma-sys", + "anyhow", + "heck", + "indexmap", + "prettyplease", + "syn 2.0.116", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", ] [[package]] -name = "yoke" -version = "0.8.0" +name = "wit-bindgen-rust-macro" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn 2.0.116", + "wit-bindgen-core", + "wit-bindgen-rust", +] + +[[package]] +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags", + "indexmap", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" dependencies = [ + "anyhow", + "id-arena", + "indexmap", + "log", + "semver", "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser", +] + +[[package]] +name = "writeable" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" + +[[package]] +name = "yoke" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" +dependencies = [ "stable_deref_trait", "yoke-derive", "zerofrom", @@ -4580,34 +4688,34 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.25" +version = "0.8.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" +checksum = "db6d35d663eadb6c932438e763b262fe1a70987f9ae936e60158176d710cae4a" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.25" +version = "0.8.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" +checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] @@ -4627,21 +4735,21 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", "synstructure", ] [[package]] name = "zeroize" -version = "1.8.1" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" [[package]] name = "zerotrie" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" dependencies = [ "displaydoc", "yoke", @@ -4650,9 +4758,9 @@ dependencies = [ [[package]] name = "zerovec" -version = "0.11.2" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" dependencies = [ "yoke", "zerofrom", @@ -4661,20 +4769,26 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "zlib-rs" -version = "0.5.0" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c745c48e1007337ed136dc99df34128b9faa6ed542d80a1c673cf55a6d7236c8" + +[[package]] +name = "zmij" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "868b928d7949e09af2f6086dfc1e01936064cc7a819253bce650d4e2a2d63ba8" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" [[package]] name = "zstd" @@ -4696,9 +4810,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.15+zstd.1.5.7" +version = "2.0.16+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index 4135e64e2..3e632bafc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,42 +17,72 @@ [package] name = "datafusion-python" -version = "47.0.0" +version = "51.0.0" homepage = "https://datafusion.apache.org/python" repository = "https://github.com/apache/datafusion-python" authors = ["Apache DataFusion "] description = "Apache DataFusion DataFrame and SQL Query Engine" readme = "README.md" license = "Apache-2.0" -edition = "2021" -rust-version = "1.78" -include = ["/src", "/datafusion", "/LICENSE.txt", "pyproject.toml", "Cargo.toml", "Cargo.lock"] +edition = "2024" +rust-version = "1.88" +include = [ + "/src", + "/datafusion", + "/LICENSE.txt", + "build.rs", + "pyproject.toml", + "Cargo.toml", + "Cargo.lock", +] [features] default = ["mimalloc"] -protoc = [ "datafusion-substrait/protoc" ] +protoc = ["datafusion-substrait/protoc"] substrait = ["dep:datafusion-substrait"] [dependencies] -tokio = { version = "1.45", features = ["macros", "rt", "rt-multi-thread", "sync"] } -pyo3 = { version = "0.24", features = ["extension-module", "abi3", "abi3-py39"] } -pyo3-async-runtimes = { version = "0.24", features = ["tokio-runtime"]} -arrow = { version = "55.1.0", features = ["pyarrow"] } -datafusion = { version = "48.0.0", features = ["avro", "unicode_expressions"] } -datafusion-substrait = { version = "48.0.0", optional = true } -datafusion-proto = { version = "48.0.0" } -datafusion-ffi = { version = "48.0.0" } -prost = "0.13.1" # keep in line with `datafusion-substrait` -uuid = { version = "1.16", features = ["v4"] } -mimalloc = { version = "0.1", optional = true, default-features = false, features = ["local_dynamic_tls"] } -async-trait = "0.1.88" +tokio = { version = "1.47", features = [ + "macros", + "rt", + "rt-multi-thread", + "sync", +] } +pyo3 = { version = "0.26", features = [ + "extension-module", + "abi3", + "abi3-py310", +] } +pyo3-async-runtimes = { version = "0.26", features = ["tokio-runtime"] } +pyo3-log = "0.13.2" +arrow = { version = "57", features = ["pyarrow"] } +arrow-select = { version = "57" } +datafusion = { version = "52", features = ["avro", "unicode_expressions"] } +datafusion-substrait = { version = "52", optional = true } +datafusion-proto = { version = "52" } +datafusion-ffi = { version = "52" } +prost = "0.14.1" # keep in line with `datafusion-substrait` +serde_json = "1" +uuid = { version = "1.18", features = ["v4"] } +mimalloc = { version = "0.1", optional = true, default-features = false, features = [ + "local_dynamic_tls", +] } +async-trait = "0.1.89" futures = "0.3" -object_store = { version = "0.12.1", features = ["aws", "gcp", "azure", "http"] } +cstr = "0.2" +object_store = { version = "0.12.4", features = [ + "aws", + "gcp", + "azure", + "http", +] } url = "2" +log = "0.4.27" +parking_lot = "0.12" [build-dependencies] -prost-types = "0.13.1" # keep in line with `datafusion-substrait` -pyo3-build-config = "0.24" +prost-types = "0.14.1" # keep in line with `datafusion-substrait` +pyo3-build-config = "0.26" [lib] name = "datafusion_python" diff --git a/README.md b/README.md index 4f80dbe18..810ac8710 100644 --- a/README.md +++ b/README.md @@ -42,6 +42,10 @@ DataFusion's Python bindings can be used as a foundation for building new data s - Serialize and deserialize query plans in Substrait format. - Experimental support for transpiling SQL queries to DataFrame calls with Polars, Pandas, and cuDF. +For tips on tuning parallelism, see +[Maximizing CPU Usage](docs/source/user-guide/configuration.rst#maximizing-cpu-usage) +in the configuration guide. + ## Example Usage The following example demonstrates running a SQL query against a Parquet file using DataFusion, storing the results @@ -216,6 +220,8 @@ You can verify the installation by running: This assumes that you have rust and cargo installed. We use the workflow recommended by [pyo3](https://github.com/PyO3/pyo3) and [maturin](https://github.com/PyO3/maturin). The Maturin tools used in this workflow can be installed either via `uv` or `pip`. Both approaches should offer the same experience. It is recommended to use `uv` since it has significant performance improvements over `pip`. +Currently for protobuf support either [protobuf](https://protobuf.dev/installation/) or cmake must be installed. + Bootstrap (`uv`): By default `uv` will attempt to build the datafusion python package. For our development we prefer to build manually. This means @@ -225,7 +231,9 @@ and for `uv run` commands the additional parameter `--no-project` ```bash # fetch this repo git clone git@github.com:apache/datafusion-python.git -# create the virtual enviornment +# cd to the repo root +cd datafusion-python/ +# create the virtual environment uv sync --dev --no-install-package datafusion # activate the environment source .venv/bin/activate @@ -236,6 +244,8 @@ Bootstrap (`pip`): ```bash # fetch this repo git clone git@github.com:apache/datafusion-python.git +# cd to the repo root +cd datafusion-python/ # prepare development environment (used to build wheel / install in development) python3 -m venv .venv # activate the venv @@ -265,7 +275,7 @@ needing to activate the virtual environment: ```bash uv run --no-project maturin develop --uv -uv --no-project pytest . +uv run --no-project pytest . ``` ### Running & Installing pre-commit hooks @@ -278,7 +288,9 @@ Our pre-commit hooks can be installed by running `pre-commit install`, which wil your DATAFUSION_PYTHON_ROOT/.github directory and run each time you perform a commit, failing to complete the commit if an offending lint is found allowing you to make changes locally before pushing. -The pre-commit hooks can also be run adhoc without installing them by simply running `pre-commit run --all-files` +The pre-commit hooks can also be run adhoc without installing them by simply running `pre-commit run --all-files`. + +NOTE: the current `pre-commit` hooks require docker, and cmake. See note on protobuf above. ## Running linters without using pre-commit diff --git a/benchmarks/db-benchmark/groupby-datafusion.py b/benchmarks/db-benchmark/groupby-datafusion.py index f9e8d638b..533166695 100644 --- a/benchmarks/db-benchmark/groupby-datafusion.py +++ b/benchmarks/db-benchmark/groupby-datafusion.py @@ -18,6 +18,7 @@ import gc import os import timeit +from pathlib import Path import datafusion as df import pyarrow as pa @@ -34,7 +35,7 @@ print("# groupby-datafusion.py", flush=True) -exec(open("./_helpers/helpers.py").read()) +exec(Path.open("./_helpers/helpers.py").read()) def ans_shape(batches) -> tuple[int, int]: @@ -65,7 +66,7 @@ def execute(df) -> list: sql = True data_name = os.environ["SRC_DATANAME"] -src_grp = os.path.join("data", data_name + ".csv") +src_grp = "data" / data_name / ".csv" print("loading dataset %s" % src_grp, flush=True) schema = pa.schema( diff --git a/benchmarks/db-benchmark/join-datafusion.py b/benchmarks/db-benchmark/join-datafusion.py index 039868031..3be296c81 100755 --- a/benchmarks/db-benchmark/join-datafusion.py +++ b/benchmarks/db-benchmark/join-datafusion.py @@ -18,6 +18,7 @@ import gc import os import timeit +from pathlib import Path import datafusion as df from datafusion import col @@ -26,7 +27,7 @@ print("# join-datafusion.py", flush=True) -exec(open("./_helpers/helpers.py").read()) +exec(Path.open("./_helpers/helpers.py").read()) def ans_shape(batches) -> tuple[int, int]: @@ -49,12 +50,12 @@ def ans_shape(batches) -> tuple[int, int]: on_disk = "FALSE" data_name = os.environ["SRC_DATANAME"] -src_jn_x = os.path.join("data", data_name + ".csv") +src_jn_x = "data" / data_name / ".csv" y_data_name = join_to_tbls(data_name) src_jn_y = [ - os.path.join("data", y_data_name[0] + ".csv"), - os.path.join("data", y_data_name[1] + ".csv"), - os.path.join("data", y_data_name[2] + ".csv"), + "data" / y_data_name[0] / ".csv", + "data" / y_data_name[1] / ".csv", + "data" / y_data_name[2] / ".csv", ] if len(src_jn_y) != 3: error_msg = "Something went wrong in preparing files used for join" diff --git a/benchmarks/max_cpu_usage.py b/benchmarks/max_cpu_usage.py new file mode 100644 index 000000000..ae73baad6 --- /dev/null +++ b/benchmarks/max_cpu_usage.py @@ -0,0 +1,107 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Benchmark script showing how to maximize CPU usage. + +This script demonstrates one example of tuning DataFusion for improved parallelism +and CPU utilization. It uses synthetic in-memory data and performs simple aggregation +operations to showcase the impact of partitioning configuration. + +IMPORTANT: This is a simplified example designed to illustrate partitioning concepts. +Actual performance in your applications may vary significantly based on many factors: + +- Type of table providers (Parquet files, CSV, databases, etc.) +- I/O operations and storage characteristics (local disk, network, cloud storage) +- Query complexity and operation types (joins, window functions, complex expressions) +- Data distribution and size characteristics +- Memory available and hardware specifications +- Network latency for distributed data sources + +It is strongly recommended that you create similar benchmarks tailored to your specific: +- Hardware configuration +- Data sources and formats +- Typical query patterns and workloads +- Performance requirements + +This will give you more accurate insights into how DataFusion configuration options +will affect your particular use case. +""" + +from __future__ import annotations + +import argparse +import multiprocessing +import time + +import pyarrow as pa +from datafusion import SessionConfig, SessionContext, col +from datafusion import functions as f + + +def main(num_rows: int, partitions: int) -> None: + """Run a simple aggregation after repartitioning. + + This function demonstrates basic partitioning concepts using synthetic data. + Real-world performance will depend on your specific data sources, query types, + and system configuration. + """ + # Create some example data (synthetic in-memory data for demonstration) + # Note: Real applications typically work with files, databases, or other + # data sources that have different I/O and distribution characteristics + array = pa.array(range(num_rows)) + batch = pa.record_batch([array], names=["a"]) + + # Configure the session to use a higher target partition count and + # enable automatic repartitioning. + config = ( + SessionConfig() + .with_target_partitions(partitions) + .with_repartition_joins(enabled=True) + .with_repartition_aggregations(enabled=True) + .with_repartition_windows(enabled=True) + ) + ctx = SessionContext(config) + + # Register the input data and repartition manually to ensure that all + # partitions are used. + df = ctx.create_dataframe([[batch]]).repartition(partitions) + + start = time.time() + df = df.aggregate([], [f.sum(col("a"))]) + df.collect() + end = time.time() + + print( + f"Processed {num_rows} rows using {partitions} partitions in {end - start:.3f}s" + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "--rows", + type=int, + default=1_000_000, + help="Number of rows in the generated dataset", + ) + parser.add_argument( + "--partitions", + type=int, + default=multiprocessing.cpu_count(), + help="Target number of partitions to use", + ) + args = parser.parse_args() + main(args.rows, args.partitions) diff --git a/benchmarks/tpch/tpch.py b/benchmarks/tpch/tpch.py index 2d1bbae5b..ffee5554c 100644 --- a/benchmarks/tpch/tpch.py +++ b/benchmarks/tpch/tpch.py @@ -17,12 +17,13 @@ import argparse import time +from pathlib import Path from datafusion import SessionContext def bench(data_path, query_path) -> None: - with open("results.csv", "w") as results: + with Path("results.csv").open("w") as results: # register tables start = time.time() total_time_millis = 0 @@ -45,7 +46,7 @@ def bench(data_path, query_path) -> None: print("Configuration:\n", ctx) # register tables - with open("create_tables.sql") as f: + with Path("create_tables.sql").open() as f: sql = "" for line in f.readlines(): if line.startswith("--"): @@ -65,7 +66,7 @@ def bench(data_path, query_path) -> None: # run queries for query in range(1, 23): - with open(f"{query_path}/q{query}.sql") as f: + with Path(f"{query_path}/q{query}.sql").open() as f: text = f.read() tmp = text.split(";") queries = [s.strip() for s in tmp if len(s.strip()) > 0] diff --git a/ci/scripts/rust_fmt.sh b/ci/scripts/rust_fmt.sh index 9d8325877..05cb6b208 100755 --- a/ci/scripts/rust_fmt.sh +++ b/ci/scripts/rust_fmt.sh @@ -18,4 +18,4 @@ # under the License. set -ex -cargo fmt --all -- --check +cargo +nightly fmt --all -- --check diff --git a/dev/changelog/48.0.0.md b/dev/changelog/48.0.0.md new file mode 100644 index 000000000..80bc61aca --- /dev/null +++ b/dev/changelog/48.0.0.md @@ -0,0 +1,59 @@ + + +# Apache DataFusion Python 48.0.0 Changelog + +This release consists of 15 commits from 6 contributors. See credits at the end of this changelog for more information. + +**Implemented enhancements:** + +- feat: upgrade df48 dependency [#1143](https://github.com/apache/datafusion-python/pull/1143) (timsaucer) +- feat: Support Parquet writer options [#1123](https://github.com/apache/datafusion-python/pull/1123) (nuno-faria) +- feat: dataframe string formatter [#1170](https://github.com/apache/datafusion-python/pull/1170) (timsaucer) +- feat: collect once during display() in jupyter notebooks [#1167](https://github.com/apache/datafusion-python/pull/1167) (timsaucer) +- feat: python based catalog and schema provider [#1156](https://github.com/apache/datafusion-python/pull/1156) (timsaucer) +- feat: add FFI support for user defined functions [#1145](https://github.com/apache/datafusion-python/pull/1145) (timsaucer) + +**Other:** + +- Release DataFusion 47.0.0 [#1130](https://github.com/apache/datafusion-python/pull/1130) (timsaucer) +- Add a documentation build step in CI [#1139](https://github.com/apache/datafusion-python/pull/1139) (crystalxyz) +- Add DataFrame API Documentation for DataFusion Python [#1132](https://github.com/apache/datafusion-python/pull/1132) (kosiew) +- Add Interruptible Query Execution in Jupyter via KeyboardInterrupt Support [#1141](https://github.com/apache/datafusion-python/pull/1141) (kosiew) +- Support types other than String and Int for partition columns [#1154](https://github.com/apache/datafusion-python/pull/1154) (miclegr) +- Fix signature of `__arrow_c_stream__` [#1168](https://github.com/apache/datafusion-python/pull/1168) (kylebarron) +- Consolidate DataFrame Docs: Merge HTML Rendering Section as Subpage [#1161](https://github.com/apache/datafusion-python/pull/1161) (kosiew) +- Add compression_level support to ParquetWriterOptions and enhance write_parquet to accept full options object [#1169](https://github.com/apache/datafusion-python/pull/1169) (kosiew) +- Simplify HTML Formatter Style Handling Using Script Injection [#1177](https://github.com/apache/datafusion-python/pull/1177) (kosiew) + +## Credits + +Thank you to everyone who contributed to this release. Here is a breakdown of commits (PRs merged) per contributor. + +``` + 6 Tim Saucer + 5 kosiew + 1 Crystal Zhou + 1 Kyle Barron + 1 Michele Gregori + 1 Nuno Faria +``` + +Thank you also to everyone who contributed in other ways such as filing issues, reviewing PRs, and providing feedback on this release. + diff --git a/dev/changelog/49.0.0.md b/dev/changelog/49.0.0.md new file mode 100644 index 000000000..008bd43bc --- /dev/null +++ b/dev/changelog/49.0.0.md @@ -0,0 +1,61 @@ + + +# Apache DataFusion Python 49.0.0 Changelog + +This release consists of 16 commits from 7 contributors. See credits at the end of this changelog for more information. + +**Fixed bugs:** + +- fix(build): Include build.rs in published crates [#1199](https://github.com/apache/datafusion-python/pull/1199) (colinmarc) + +**Other:** + +- 48.0.0 Release [#1175](https://github.com/apache/datafusion-python/pull/1175) (timsaucer) +- Update CI rules [#1188](https://github.com/apache/datafusion-python/pull/1188) (timsaucer) +- Fix Python UDAF Accumulator Interface example to Properly Handle State and Updates with List[Array] Types [#1192](https://github.com/apache/datafusion-python/pull/1192) (kosiew) +- chore: Upgrade datafusion to version 49 [#1200](https://github.com/apache/datafusion-python/pull/1200) (nuno-faria) +- Update how to dev instructions [#1179](https://github.com/apache/datafusion-python/pull/1179) (ntjohnson1) +- build(deps): bump object_store from 0.12.2 to 0.12.3 [#1189](https://github.com/apache/datafusion-python/pull/1189) (dependabot[bot]) +- build(deps): bump uuid from 1.17.0 to 1.18.0 [#1202](https://github.com/apache/datafusion-python/pull/1202) (dependabot[bot]) +- build(deps): bump async-trait from 0.1.88 to 0.1.89 [#1203](https://github.com/apache/datafusion-python/pull/1203) (dependabot[bot]) +- build(deps): bump slab from 0.4.10 to 0.4.11 [#1205](https://github.com/apache/datafusion-python/pull/1205) (dependabot[bot]) +- Improved window and aggregate function signature [#1187](https://github.com/apache/datafusion-python/pull/1187) (timsaucer) +- Optional improvements in verification instructions [#1183](https://github.com/apache/datafusion-python/pull/1183) (paleolimbot) +- Improve `show()` output for empty DataFrames [#1208](https://github.com/apache/datafusion-python/pull/1208) (kosiew) +- build(deps): bump actions/download-artifact from 4 to 5 [#1201](https://github.com/apache/datafusion-python/pull/1201) (dependabot[bot]) +- build(deps): bump url from 2.5.4 to 2.5.7 [#1210](https://github.com/apache/datafusion-python/pull/1210) (dependabot[bot]) +- build(deps): bump actions/checkout from 4 to 5 [#1204](https://github.com/apache/datafusion-python/pull/1204) (dependabot[bot]) + +## Credits + +Thank you to everyone who contributed to this release. Here is a breakdown of commits (PRs merged) per contributor. + +``` + 7 dependabot[bot] + 3 Tim Saucer + 2 kosiew + 1 Colin Marc + 1 Dewey Dunnington + 1 Nick + 1 Nuno Faria +``` + +Thank you also to everyone who contributed in other ways such as filing issues, reviewing PRs, and providing feedback on this release. + diff --git a/dev/changelog/50.0.0.md b/dev/changelog/50.0.0.md new file mode 100644 index 000000000..c3f09d180 --- /dev/null +++ b/dev/changelog/50.0.0.md @@ -0,0 +1,60 @@ + + +# Apache DataFusion Python 50.0.0 Changelog + +This release consists of 12 commits from 7 contributors. See credits at the end of this changelog for more information. + +**Implemented enhancements:** + +- feat: allow passing a slice to and expression with the [] indexing [#1215](https://github.com/apache/datafusion-python/pull/1215) (timsaucer) + +**Documentation updates:** + +- docs: fix CaseBuilder documentation example [#1225](https://github.com/apache/datafusion-python/pull/1225) (IndexSeek) +- docs: update link to user example for custom table provider [#1224](https://github.com/apache/datafusion-python/pull/1224) (IndexSeek) +- docs: add apache iceberg as datafusion data source [#1240](https://github.com/apache/datafusion-python/pull/1240) (kevinjqliu) + +**Other:** + +- 49.0.0 release [#1211](https://github.com/apache/datafusion-python/pull/1211) (timsaucer) +- Update development guide in README.md [#1213](https://github.com/apache/datafusion-python/pull/1213) (YKoustubhRao) +- Add benchmark script and documentation for maximizing CPU usage in DataFusion Python [#1216](https://github.com/apache/datafusion-python/pull/1216) (kosiew) +- Fixing a few Typos [#1220](https://github.com/apache/datafusion-python/pull/1220) (ntjohnson1) +- Set fail on warning for documentation generation [#1218](https://github.com/apache/datafusion-python/pull/1218) (timsaucer) +- chore: remove redundant error transformation [#1232](https://github.com/apache/datafusion-python/pull/1232) (mesejo) +- Support string column identifiers for sort/aggregate/window and stricter Expr validation [#1221](https://github.com/apache/datafusion-python/pull/1221) (kosiew) +- Prepare for DF50 [#1231](https://github.com/apache/datafusion-python/pull/1231) (timsaucer) + +## Credits + +Thank you to everyone who contributed to this release. Here is a breakdown of commits (PRs merged) per contributor. + +``` + 4 Tim Saucer + 2 Tyler White + 2 kosiew + 1 Daniel Mesejo + 1 Kevin Liu + 1 Koustubh Rao + 1 Nick +``` + +Thank you also to everyone who contributed in other ways such as filing issues, reviewing PRs, and providing feedback on this release. + diff --git a/dev/changelog/50.1.0.md b/dev/changelog/50.1.0.md new file mode 100644 index 000000000..3b9ff84ff --- /dev/null +++ b/dev/changelog/50.1.0.md @@ -0,0 +1,57 @@ + + +# Apache DataFusion Python 50.1.0 Changelog + +This release consists of 11 commits from 7 contributors. See credits at the end of this changelog for more information. + +**Breaking changes:** + +- Unify Table representations [#1256](https://github.com/apache/datafusion-python/pull/1256) (timsaucer) + +**Implemented enhancements:** + +- feat: expose DataFrame.write_table [#1264](https://github.com/apache/datafusion-python/pull/1264) (timsaucer) +- feat: expose` DataFrame.parse_sql_expr` [#1274](https://github.com/apache/datafusion-python/pull/1274) (milenkovicm) + +**Other:** + +- Update version number, add changelog [#1249](https://github.com/apache/datafusion-python/pull/1249) (timsaucer) +- Fix drop() method to handle quoted column names consistently [#1242](https://github.com/apache/datafusion-python/pull/1242) (H0TB0X420) +- Make Session Context `pyclass` frozen so interior mutability is only managed by rust [#1248](https://github.com/apache/datafusion-python/pull/1248) (ntjohnson1) +- macos-13 is deprecated [#1259](https://github.com/apache/datafusion-python/pull/1259) (kevinjqliu) +- Freeze PyO3 wrappers & introduce interior mutability to avoid PyO3 borrow errors [#1253](https://github.com/apache/datafusion-python/pull/1253) (kosiew) +- chore: update dependencies [#1269](https://github.com/apache/datafusion-python/pull/1269) (timsaucer) + +## Credits + +Thank you to everyone who contributed to this release. Here is a breakdown of commits (PRs merged) per contributor. + +``` + 4 Tim Saucer + 2 Siew Kam Onn + 1 H0TB0X420 + 1 Kevin Liu + 1 Marko Milenković + 1 Nick + 1 kosiew +``` + +Thank you also to everyone who contributed in other ways such as filing issues, reviewing PRs, and providing feedback on this release. + diff --git a/dev/changelog/51.0.0.md b/dev/changelog/51.0.0.md new file mode 100644 index 000000000..cc157eb0d --- /dev/null +++ b/dev/changelog/51.0.0.md @@ -0,0 +1,74 @@ + + +# Apache DataFusion Python 51.0.0 Changelog + +This release consists of 23 commits from 7 contributors. See credits at the end of this changelog for more information. + +**Breaking changes:** + +- feat: reduce duplicate fields on join [#1184](https://github.com/apache/datafusion-python/pull/1184) (timsaucer) + +**Implemented enhancements:** + +- feat: expose `select_exprs` method on DataFrame [#1271](https://github.com/apache/datafusion-python/pull/1271) (milenkovicm) +- feat: allow DataFrame.filter to accept SQL strings [#1276](https://github.com/apache/datafusion-python/pull/1276) (K-dash) +- feat: add temporary view option for into_view [#1267](https://github.com/apache/datafusion-python/pull/1267) (timsaucer) +- feat: support session token parameter for AmazonS3 [#1275](https://github.com/apache/datafusion-python/pull/1275) (GCHQDeveloper028) +- feat: `with_column` supports SQL expression [#1284](https://github.com/apache/datafusion-python/pull/1284) (milenkovicm) +- feat: Add SQL expression for `repartition_by_hash` [#1285](https://github.com/apache/datafusion-python/pull/1285) (milenkovicm) +- feat: Add SQL expression support for `with_columns` [#1286](https://github.com/apache/datafusion-python/pull/1286) (milenkovicm) + +**Fixed bugs:** + +- fix: use coalesce instead of drop_duplicate_keys for join [#1318](https://github.com/apache/datafusion-python/pull/1318) (mesejo) +- fix: Inconsistent schemas when converting to pyarrow [#1315](https://github.com/apache/datafusion-python/pull/1315) (nuno-faria) + +**Other:** + +- Release 50.1 [#1281](https://github.com/apache/datafusion-python/pull/1281) (timsaucer) +- Update python minimum version to 3.10 [#1296](https://github.com/apache/datafusion-python/pull/1296) (timsaucer) +- chore: update datafusion minor version [#1297](https://github.com/apache/datafusion-python/pull/1297) (timsaucer) +- Enable remaining pylints [#1298](https://github.com/apache/datafusion-python/pull/1298) (timsaucer) +- Add Arrow C streaming, DataFrame iteration, and OOM-safe streaming execution [#1222](https://github.com/apache/datafusion-python/pull/1222) (kosiew) +- Add PyCapsule Type Support and Type Hint Enhancements for AggregateUDF in DataFusion Python Bindings [#1277](https://github.com/apache/datafusion-python/pull/1277) (kosiew) +- Add collect_column to dataframe [#1302](https://github.com/apache/datafusion-python/pull/1302) (timsaucer) +- chore: apply cargo fmt with import organization [#1303](https://github.com/apache/datafusion-python/pull/1303) (timsaucer) +- Feat/parameterized sql queries [#964](https://github.com/apache/datafusion-python/pull/964) (timsaucer) +- Upgrade to Datafusion 51 [#1311](https://github.com/apache/datafusion-python/pull/1311) (nuno-faria) +- minor: resolve build errors after latest merge into main [#1325](https://github.com/apache/datafusion-python/pull/1325) (timsaucer) +- Update build workflow link [#1330](https://github.com/apache/datafusion-python/pull/1330) (timsaucer) +- Do not convert pyarrow scalar values to plain python types when passing as `lit` [#1319](https://github.com/apache/datafusion-python/pull/1319) (timsaucer) + +## Credits + +Thank you to everyone who contributed to this release. Here is a breakdown of commits (PRs merged) per contributor. + +``` + 12 Tim Saucer + 4 Marko Milenković + 2 Nuno Faria + 2 kosiew + 1 Daniel Mesejo + 1 GCHQDeveloper028 + 1 𝕂 +``` + +Thank you also to everyone who contributed in other ways such as filing issues, reviewing PRs, and providing feedback on this release. + diff --git a/dev/create_license.py b/dev/create_license.py index 2a67cb8fd..acbf8587c 100644 --- a/dev/create_license.py +++ b/dev/create_license.py @@ -20,12 +20,11 @@ import json import subprocess +from pathlib import Path -subprocess.check_output(["cargo", "install", "cargo-license"]) data = subprocess.check_output( [ - "cargo", - "license", + "cargo-license", "--avoid-build-deps", "--avoid-dev-deps", "--do-not-bundle", @@ -248,5 +247,5 @@ result += "------------------\n\n" result += f"### {name} {version}\n* source: [{repository}]({repository})\n* license: {license}\n\n" -with open("LICENSE.txt", "w") as f: +with Path.open("LICENSE.txt", "w") as f: f.write(result) diff --git a/dev/release/README.md b/dev/release/README.md index 692473930..5d2fae5a7 100644 --- a/dev/release/README.md +++ b/dev/release/README.md @@ -81,6 +81,9 @@ Generating changelog content ### Update the version number The only place you should need to update the version is in the root `Cargo.toml`. +After updating the toml file, run `cargo update` to update the cargo lock file. +If you do not want to update all the dependencies, you can instead run `cargo build` +which should only update the version number for `datafusion-python`. ### Tag the Repository @@ -101,7 +104,7 @@ git push apache 0.8.0-rc1 ./dev/release/create-tarball.sh 0.8.0 1 ``` -This will also create the email template to send to the mailing list. +This will also create the email template to send to the mailing list. Create a draft email using this content, but do not send until after completing the next step. @@ -157,7 +160,14 @@ Send the email to start the vote. ## Verifying a Release -Running the unit tests against a testpypi release candidate: +Releases may be verified using `verify-release-candidate.sh`: + +```bash +git clone https://github.com/apache/datafusion-python.git +dev/release/verify-release-candidate.sh 48.0.0 1 +``` + +Alternatively, one can run unit tests against a testpypi release candidate: ```bash # clone a fresh repo @@ -176,11 +186,11 @@ source .venv/bin/activate # install release candidate pip install --extra-index-url https://test.pypi.org/simple/ datafusion==40.0.0 -# only dep needed to run tests is pytest -pip install pytest +# install test dependencies +pip install pytest numpy pytest-asyncio # run the tests -pytest --import-mode=importlib python/tests +pytest --import-mode=importlib python/tests -vv ``` Try running one of the examples from the top-level README, or write some custom Python code to query some available @@ -233,7 +243,7 @@ git push apache 0.8.0 Add the release to https://reporter.apache.org/addrelease.html?datafusion with a version name prefixed with `DATAFUSION-PYTHON`, for example `DATAFUSION-PYTHON-31.0.0`. -The release information is used to generate a template for a board report (see example from Apache Arrow +The release information is used to generate a template for a board report (see example from Apache Arrow [here](https://github.com/apache/arrow/pull/14357)). ### Delete old RCs and Releases diff --git a/dev/release/check-rat-report.py b/dev/release/check-rat-report.py index 0c9f4c326..72a35212e 100644 --- a/dev/release/check-rat-report.py +++ b/dev/release/check-rat-report.py @@ -21,6 +21,7 @@ import re import sys import xml.etree.ElementTree as ET +from pathlib import Path if len(sys.argv) != 3: sys.stderr.write("Usage: %s exclude_globs.lst rat_report.xml\n" % sys.argv[0]) @@ -29,7 +30,7 @@ exclude_globs_filename = sys.argv[1] xml_filename = sys.argv[2] -globs = [line.strip() for line in open(exclude_globs_filename)] +globs = [line.strip() for line in Path.open(exclude_globs_filename)] tree = ET.parse(xml_filename) root = tree.getroot() diff --git a/dev/release/release-tarball.sh b/dev/release/release-tarball.sh index 8c305a676..2b82d1bac 100755 --- a/dev/release/release-tarball.sh +++ b/dev/release/release-tarball.sh @@ -43,6 +43,13 @@ fi version=$1 rc=$2 +read -r -p "Proceed to release tarball for ${version}-rc${rc}? [y/N]: " answer +answer=${answer:-no} +if [ "${answer}" != "y" ]; then + echo "Cancelled tarball release!" + exit 1 +fi + tmp_dir=tmp-apache-datafusion-python-dist echo "Recreate temporary directory: ${tmp_dir}" diff --git a/docs/Makefile b/docs/Makefile index e65c8e250..49ebae372 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -35,4 +35,4 @@ help: # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) --fail-on-warning \ No newline at end of file diff --git a/docs/README.md b/docs/README.md index 2bffea9bd..502f1c2a1 100644 --- a/docs/README.md +++ b/docs/README.md @@ -59,7 +59,7 @@ firefox docs/build/html/index.html This documentation is hosted at https://datafusion.apache.org/python When the PR is merged to the `main` branch of the DataFusion -repository, a [github workflow](https://github.com/apache/datafusion-python/blob/main/.github/workflows/docs.yaml) which: +repository, a [github workflow](https://github.com/apache/datafusion-python/blob/main/.github/workflows/build.yml) which: 1. Builds the html content 2. Pushes the html content to the [`asf-site`](https://github.com/apache/datafusion-python/tree/asf-site) branch in this repository. @@ -67,4 +67,4 @@ repository, a [github workflow](https://github.com/apache/datafusion-python/blob The Apache Software Foundation provides https://arrow.apache.org/, which serves content based on the configuration in [.asf.yaml](https://github.com/apache/datafusion-python/blob/main/.asf.yaml), -which specifies the target as https://datafusion.apache.org/python. \ No newline at end of file +which specifies the target as https://datafusion.apache.org/python. diff --git a/docs/source/api/dataframe.rst b/docs/source/api/dataframe.rst deleted file mode 100644 index a9e9e47c8..000000000 --- a/docs/source/api/dataframe.rst +++ /dev/null @@ -1,387 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at - -.. http://www.apache.org/licenses/LICENSE-2.0 - -.. Unless required by applicable law or agreed to in writing, -.. software distributed under the License is distributed on an -.. "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -.. KIND, either express or implied. See the License for the -.. specific language governing permissions and limitations -.. under the License. - -================= -DataFrame API -================= - -Overview --------- - -The ``DataFrame`` class is the core abstraction in DataFusion that represents tabular data and operations -on that data. DataFrames provide a flexible API for transforming data through various operations such as -filtering, projection, aggregation, joining, and more. - -A DataFrame represents a logical plan that is lazily evaluated. The actual execution occurs only when -terminal operations like ``collect()``, ``show()``, or ``to_pandas()`` are called. - -Creating DataFrames -------------------- - -DataFrames can be created in several ways: - -* From SQL queries via a ``SessionContext``: - - .. code-block:: python - - from datafusion import SessionContext - - ctx = SessionContext() - df = ctx.sql("SELECT * FROM your_table") - -* From registered tables: - - .. code-block:: python - - df = ctx.table("your_table") - -* From various data sources: - - .. code-block:: python - - # From CSV files (see :ref:`io_csv` for detailed options) - df = ctx.read_csv("path/to/data.csv") - - # From Parquet files (see :ref:`io_parquet` for detailed options) - df = ctx.read_parquet("path/to/data.parquet") - - # From JSON files (see :ref:`io_json` for detailed options) - df = ctx.read_json("path/to/data.json") - - # From Avro files (see :ref:`io_avro` for detailed options) - df = ctx.read_avro("path/to/data.avro") - - # From Pandas DataFrame - import pandas as pd - pandas_df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) - df = ctx.from_pandas(pandas_df) - - # From Arrow data - import pyarrow as pa - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2, 3]), pa.array([4, 5, 6])], - names=["a", "b"] - ) - df = ctx.from_arrow(batch) - - For detailed information about reading from different data sources, see the :doc:`I/O Guide <../user-guide/io/index>`. - For custom data sources, see :ref:`io_custom_table_provider`. - -Common DataFrame Operations ---------------------------- - -DataFusion's DataFrame API offers a wide range of operations: - -.. code-block:: python - - from datafusion import column, literal - - # Select specific columns - df = df.select("col1", "col2") - - # Select with expressions - df = df.select(column("a") + column("b"), column("a") - column("b")) - - # Filter rows - df = df.filter(column("age") > literal(25)) - - # Add computed columns - df = df.with_column("full_name", column("first_name") + literal(" ") + column("last_name")) - - # Multiple column additions - df = df.with_columns( - (column("a") + column("b")).alias("sum"), - (column("a") * column("b")).alias("product") - ) - - # Sort data - df = df.sort(column("age").sort(ascending=False)) - - # Join DataFrames - df = df1.join(df2, on="user_id", how="inner") - - # Aggregate data - from datafusion import functions as f - df = df.aggregate( - [], # Group by columns (empty for global aggregation) - [f.sum(column("amount")).alias("total_amount")] - ) - - # Limit rows - df = df.limit(100) - - # Drop columns - df = df.drop("temporary_column") - -Terminal Operations -------------------- - -To materialize the results of your DataFrame operations: - -.. code-block:: python - - # Collect all data as PyArrow RecordBatches - result_batches = df.collect() - - # Convert to various formats - pandas_df = df.to_pandas() # Pandas DataFrame - polars_df = df.to_polars() # Polars DataFrame - arrow_table = df.to_arrow_table() # PyArrow Table - py_dict = df.to_pydict() # Python dictionary - py_list = df.to_pylist() # Python list of dictionaries - - # Display results - df.show() # Print tabular format to console - - # Count rows - count = df.count() - -HTML Rendering in Jupyter -------------------------- - -When working in Jupyter notebooks or other environments that support rich HTML display, -DataFusion DataFrames automatically render as nicely formatted HTML tables. This functionality -is provided by the ``_repr_html_`` method, which is automatically called by Jupyter. - -Basic HTML Rendering -~~~~~~~~~~~~~~~~~~~~ - -In a Jupyter environment, simply displaying a DataFrame object will trigger HTML rendering: - -.. code-block:: python - - # Will display as HTML table in Jupyter - df - - # Explicit display also uses HTML rendering - display(df) - -HTML Rendering Customization ----------------------------- - -DataFusion provides extensive customization options for HTML table rendering through the -``datafusion.html_formatter`` module. - -Configuring the HTML Formatter -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can customize how DataFrames are rendered by configuring the formatter: - -.. code-block:: python - - from datafusion.html_formatter import configure_formatter - - configure_formatter( - max_cell_length=30, # Maximum length of cell content before truncation - max_width=800, # Maximum width of table in pixels - max_height=400, # Maximum height of table in pixels - max_memory_bytes=2 * 1024 * 1024,# Maximum memory used for rendering (2MB) - min_rows_display=10, # Minimum rows to display - repr_rows=20, # Number of rows to display in representation - enable_cell_expansion=True, # Allow cells to be expandable on click - custom_css=None, # Custom CSS to apply - show_truncation_message=True, # Show message when data is truncated - style_provider=None, # Custom style provider class - use_shared_styles=True # Share styles across tables to reduce duplication - ) - -Custom Style Providers -~~~~~~~~~~~~~~~~~~~~~~ - -For advanced styling needs, you can create a custom style provider class: - -.. code-block:: python - - from datafusion.html_formatter import configure_formatter - - class CustomStyleProvider: - def get_cell_style(self) -> str: - return "background-color: #f5f5f5; color: #333; padding: 8px; border: 1px solid #ddd;" - - def get_header_style(self) -> str: - return "background-color: #4285f4; color: white; font-weight: bold; padding: 10px;" - - # Apply custom styling - configure_formatter(style_provider=CustomStyleProvider()) - -Custom Type Formatters -~~~~~~~~~~~~~~~~~~~~~~ - -You can register custom formatters for specific data types: - -.. code-block:: python - - from datafusion.html_formatter import get_formatter - - formatter = get_formatter() - - # Format integers with color based on value - def format_int(value): - return f' 100 else "blue"}">{value}' - - formatter.register_formatter(int, format_int) - - # Format date values - def format_date(value): - return f'{value.isoformat()}' - - formatter.register_formatter(datetime.date, format_date) - -Custom Cell Builders -~~~~~~~~~~~~~~~~~~~~ - -For complete control over cell rendering: - -.. code-block:: python - - formatter = get_formatter() - - def custom_cell_builder(value, row, col, table_id): - try: - num_value = float(value) - if num_value > 0: # Positive values get green - return f'{value}' - if num_value < 0: # Negative values get red - return f'{value}' - except (ValueError, TypeError): - pass - - # Default styling for non-numeric or zero values - return f'{value}' - - formatter.set_custom_cell_builder(custom_cell_builder) - -Custom Header Builders -~~~~~~~~~~~~~~~~~~~~~~ - -Similarly, you can customize the rendering of table headers: - -.. code-block:: python - - def custom_header_builder(field): - tooltip = f"Type: {field.type}" - return f'{field.name}' - - formatter.set_custom_header_builder(custom_header_builder) - -Managing Formatter State ------------------------~ - -The HTML formatter maintains global state that can be managed: - -.. code-block:: python - - from datafusion.html_formatter import reset_formatter, reset_styles_loaded_state, get_formatter - - # Reset the formatter to default settings - reset_formatter() - - # Reset only the styles loaded state (useful when styles were loaded but need reloading) - reset_styles_loaded_state() - - # Get the current formatter instance to make changes - formatter = get_formatter() - -Advanced Example: Dashboard-Style Formatting -------------------------------------------~~ - -This example shows how to create a dashboard-like styling for your DataFrames: - -.. code-block:: python - - from datafusion.html_formatter import configure_formatter, get_formatter - - # Define custom CSS - custom_css = """ - .datafusion-table { - font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; - border-collapse: collapse; - width: 100%; - box-shadow: 0 2px 3px rgba(0,0,0,0.1); - } - .datafusion-table th { - position: sticky; - top: 0; - z-index: 10; - } - .datafusion-table tr:hover td { - background-color: #f1f7fa !important; - } - .datafusion-table .numeric-positive { - color: #0a7c00; - } - .datafusion-table .numeric-negative { - color: #d13438; - } - """ - - class DashboardStyleProvider: - def get_cell_style(self) -> str: - return "padding: 8px 12px; border-bottom: 1px solid #e0e0e0;" - - def get_header_style(self) -> str: - return ("background-color: #0078d4; color: white; font-weight: 600; " - "padding: 12px; text-align: left; border-bottom: 2px solid #005a9e;") - - # Apply configuration - configure_formatter( - max_height=500, - enable_cell_expansion=True, - custom_css=custom_css, - style_provider=DashboardStyleProvider(), - max_cell_length=50 - ) - - # Add custom formatters for numbers - formatter = get_formatter() - - def format_number(value): - try: - num = float(value) - cls = "numeric-positive" if num > 0 else "numeric-negative" if num < 0 else "" - return f'{value:,}' if cls else f'{value:,}' - except (ValueError, TypeError): - return str(value) - - formatter.register_formatter(int, format_number) - formatter.register_formatter(float, format_number) - -Best Practices --------------- - -1. **Memory Management**: For large datasets, use ``max_memory_bytes`` to limit memory usage. - -2. **Responsive Design**: Set reasonable ``max_width`` and ``max_height`` values to ensure tables display well on different screens. - -3. **Style Optimization**: Use ``use_shared_styles=True`` to avoid duplicate style definitions when displaying multiple tables. - -4. **Reset When Needed**: Call ``reset_formatter()`` when you want to start fresh with default settings. - -5. **Cell Expansion**: Use ``enable_cell_expansion=True`` when cells might contain longer content that users may want to see in full. - -Additional Resources --------------------- - -* :doc:`../user-guide/dataframe` - Complete guide to using DataFrames -* :doc:`../user-guide/io/index` - I/O Guide for reading data from various sources -* :doc:`../user-guide/data-sources` - Comprehensive data sources guide -* :ref:`io_csv` - CSV file reading -* :ref:`io_parquet` - Parquet file reading -* :ref:`io_json` - JSON file reading -* :ref:`io_avro` - Avro file reading -* :ref:`io_custom_table_provider` - Custom table providers -* `API Reference `_ - Full API reference diff --git a/docs/source/api/index.rst b/docs/source/api/index.rst deleted file mode 100644 index 7f58227ca..000000000 --- a/docs/source/api/index.rst +++ /dev/null @@ -1,27 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at - -.. http://www.apache.org/licenses/LICENSE-2.0 - -.. Unless required by applicable law or agreed to in writing, -.. software distributed under the License is distributed on an -.. "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -.. KIND, either express or implied. See the License for the -.. specific language governing permissions and limitations -.. under the License. - -============= -API Reference -============= - -This section provides detailed API documentation for the DataFusion Python library. - -.. toctree:: - :maxdepth: 2 - - dataframe diff --git a/docs/source/conf.py b/docs/source/conf.py index 28db17d35..01813b032 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -91,6 +91,13 @@ def autoapi_skip_member_fn(app, what, name, obj, skip, options) -> bool: # noqa ("method", "datafusion.context.SessionContext.tables"), ("method", "datafusion.dataframe.DataFrame.unnest_column"), ] + # Explicitly skip certain members listed above. These are either + # re-exports, duplicate module-level documentation, deprecated + # API surfaces, or private variables that would otherwise appear + # in the generated docs and cause confusing duplication. + # Keeping this explicit list avoids surprising entries in the + # AutoAPI output and gives us a single place to opt-out items + # when we intentionally hide them from the docs. if (what, name) in skip_contents: skip = True diff --git a/docs/source/contributor-guide/ffi.rst b/docs/source/contributor-guide/ffi.rst index c1f9806b3..5006b0ca4 100644 --- a/docs/source/contributor-guide/ffi.rst +++ b/docs/source/contributor-guide/ffi.rst @@ -15,6 +15,8 @@ .. specific language governing permissions and limitations .. under the License. +.. _ffi: + Python Extensions ================= @@ -34,7 +36,7 @@ as performant as possible and to utilize the features of DataFusion, you may dec your source in Rust and then expose it through `PyO3 `_ as a Python library. At first glance, it may appear the best way to do this is to add the ``datafusion-python`` -crate as a dependency, provide a ``PyTable``, and then to register it with the +crate as a dependency, provide a ``PyTable``, and then to register it with the ``SessionContext``. Unfortunately, this will not work. When you produce your code as a Python library and it needs to interact with the DataFusion @@ -137,6 +139,67 @@ and you want to create a sharable FFI counterpart, you could write: let my_provider = MyTableProvider::default(); let ffi_provider = FFI_TableProvider::new(Arc::new(my_provider), false, None); +.. _ffi_pyclass_mutability: + +PyO3 class mutability guidelines +-------------------------------- + +PyO3 bindings should present immutable wrappers whenever a struct stores shared or +interior-mutable state. In practice this means that any ``#[pyclass]`` containing an +``Arc>`` or similar synchronized primitive must opt into ``#[pyclass(frozen)]`` +unless there is a compelling reason not to. + +The :mod:`datafusion` configuration helpers illustrate the preferred pattern. The +``PyConfig`` class in :file:`src/config.rs` stores an ``Arc>`` and is +explicitly frozen so callers interact with configuration state through provided methods +instead of mutating the container directly: + +.. code-block:: rust + + #[pyclass(name = "Config", module = "datafusion", subclass, frozen)] + #[derive(Clone)] + pub(crate) struct PyConfig { + config: Arc>, + } + +The same approach applies to execution contexts. ``PySessionContext`` in +:file:`src/context.rs` stays frozen even though it shares mutable state internally via +``SessionContext``. This ensures PyO3 tracks borrows correctly while Python-facing APIs +clone the inner ``SessionContext`` or return new wrappers instead of mutating the +existing instance in place: + +.. code-block:: rust + + #[pyclass(frozen, name = "SessionContext", module = "datafusion", subclass)] + #[derive(Clone)] + pub struct PySessionContext { + pub ctx: SessionContext, + } + +Occasionally a type must remain mutable—for example when PyO3 attribute setters need to +update fields directly. In these rare cases add an inline justification so reviewers and +future contributors understand why ``frozen`` is unsafe to enable. ``DataTypeMap`` in +:file:`src/common/data_type.rs` includes such a comment because PyO3 still needs to track +field updates: + +.. code-block:: rust + + // TODO: This looks like this needs pyo3 tracking so leaving unfrozen for now + #[derive(Debug, Clone)] + #[pyclass(name = "DataTypeMap", module = "datafusion.common", subclass)] + pub struct DataTypeMap { + #[pyo3(get, set)] + pub arrow_type: PyDataType, + #[pyo3(get, set)] + pub python_type: PythonType, + #[pyo3(get, set)] + pub sql_type: SqlType, + } + +When reviewers encounter a mutable ``#[pyclass]`` without a comment, they should request +an explanation or ask that ``frozen`` be added. Keeping these wrappers frozen by default +helps avoid subtle bugs stemming from PyO3's interior mutability tracking. + If you were interfacing with a library that provided the above ``FFI_TableProvider`` and you needed to turn it back into an ``TableProvider``, you can turn it into a ``ForeignTableProvider`` with implements the ``TableProvider`` trait. @@ -176,7 +239,7 @@ By convention the ``datafusion-python`` library expects a Python object that has ``TableProvider`` PyCapsule to have this capsule accessible by calling a function named ``__datafusion_table_provider__``. You can see a complete working example of how to share a ``TableProvider`` from one python library to DataFusion Python in the -`repository examples folder `_. +`repository examples folder `_. This section has been written using ``TableProvider`` as an example. It is the first extension that has been written using this approach and the most thoroughly implemented. @@ -195,7 +258,7 @@ optimization levels. If you wish to go down this route, there are two approaches have identified you can use. #. Re-export all of ``datafusion-python`` yourself with your extensions built in. -#. Carefully synchonize your software releases with the ``datafusion-python`` CI build +#. Carefully synchronize your software releases with the ``datafusion-python`` CI build system so that your libraries use the exact same compiler, features, and optimization level. diff --git a/docs/source/contributor-guide/introduction.rst b/docs/source/contributor-guide/introduction.rst index 2fba64111..33c2b274c 100644 --- a/docs/source/contributor-guide/introduction.rst +++ b/docs/source/contributor-guide/introduction.rst @@ -26,6 +26,10 @@ We welcome and encourage contributions of all kinds, such as: In addition to submitting new PRs, we have a healthy tradition of community members reviewing each other’s PRs. Doing so is a great way to help the community as well as get more familiar with Rust and the relevant codebases. +Before opening a pull request that touches PyO3 bindings, please review the +:ref:`PyO3 class mutability guidelines ` so you can flag missing +``#[pyclass(frozen)]`` annotations during development and review. + How to develop -------------- @@ -43,7 +47,7 @@ Bootstrap: # fetch this repo git clone git@github.com:apache/datafusion-python.git - # create the virtual enviornment + # create the virtual environment uv sync --dev --no-install-package datafusion # activate the environment source .venv/bin/activate diff --git a/docs/source/index.rst b/docs/source/index.rst index ff1e47280..134d41cb6 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -72,11 +72,12 @@ Example user-guide/introduction user-guide/basics user-guide/data-sources - user-guide/dataframe + user-guide/dataframe/index user-guide/common-operations/index user-guide/io/index user-guide/configuration user-guide/sql + user-guide/upgrade-guides .. _toc.contributor_guide: @@ -93,5 +94,3 @@ Example :hidden: :maxdepth: 1 :caption: API - - api/index diff --git a/docs/source/user-guide/basics.rst b/docs/source/user-guide/basics.rst index 2975d9a6b..7c6820461 100644 --- a/docs/source/user-guide/basics.rst +++ b/docs/source/user-guide/basics.rst @@ -73,7 +73,7 @@ DataFrames are typically created by calling a method on :py:class:`~datafusion.c calling the transformation methods, such as :py:func:`~datafusion.dataframe.DataFrame.filter`, :py:func:`~datafusion.dataframe.DataFrame.select`, :py:func:`~datafusion.dataframe.DataFrame.aggregate`, and :py:func:`~datafusion.dataframe.DataFrame.limit` to build up a query definition. -For more details on working with DataFrames, including visualization options and conversion to other formats, see :doc:`dataframe`. +For more details on working with DataFrames, including visualization options and conversion to other formats, see :doc:`dataframe/index`. Expressions ----------- diff --git a/docs/source/user-guide/common-operations/expressions.rst b/docs/source/user-guide/common-operations/expressions.rst index e94e1a6b5..7848b4ee7 100644 --- a/docs/source/user-guide/common-operations/expressions.rst +++ b/docs/source/user-guide/common-operations/expressions.rst @@ -64,7 +64,7 @@ Arrays ------ For columns that contain arrays of values, you can access individual elements of the array by index -using bracket indexing. This is similar to callling the function +using bracket indexing. This is similar to calling the function :py:func:`datafusion.functions.array_element`, except that array indexing using brackets is 0 based, similar to Python arrays and ``array_element`` is 1 based indexing to be compatible with other SQL approaches. @@ -82,6 +82,13 @@ approaches. Indexing an element of an array via ``[]`` starts at index 0 whereas :py:func:`~datafusion.functions.array_element` starts at index 1. +Starting in DataFusion 49.0.0 you can also create slices of array elements using +slice syntax from Python. + +.. ipython:: python + + df.select(col("a")[1:3].alias("second_two_elements")) + To check if an array is empty, you can use the function :py:func:`datafusion.functions.array_empty` or `datafusion.functions.empty`. This function returns a boolean indicating whether the array is empty. diff --git a/docs/source/user-guide/common-operations/joins.rst b/docs/source/user-guide/common-operations/joins.rst index 40d922150..1d9d70385 100644 --- a/docs/source/user-guide/common-operations/joins.rst +++ b/docs/source/user-guide/common-operations/joins.rst @@ -101,4 +101,36 @@ the right table. .. ipython:: python - left.join(right, left_on="customer_id", right_on="id", how="anti") \ No newline at end of file + left.join(right, left_on="customer_id", right_on="id", how="anti") + +Duplicate Keys +-------------- + +It is common to join two DataFrames on a common column name. Starting in +version 51.0.0, ``datafusion-python``` will now coalesce on column with identical names by +default. This reduces problems with ambiguous column selection after joins. +You can disable this feature by setting the parameter ``coalesce_duplicate_keys`` +to ``False``. + +.. ipython:: python + + left = ctx.from_pydict( + { + "id": [1, 2, 3], + "customer": ["Alice", "Bob", "Charlie"], + } + ) + + right = ctx.from_pylist([ + {"id": 1, "name": "CityCabs"}, + {"id": 2, "name": "MetroRide"}, + {"id": 5, "name": "UrbanGo"}, + ]) + + left.join(right, "id", how="inner") + +In contrast to the above example, if we wish to get both columns: + +.. ipython:: python + + left.join(right, "id", how="inner", coalesce_duplicate_keys=False) diff --git a/docs/source/user-guide/common-operations/udf-and-udfa.rst b/docs/source/user-guide/common-operations/udf-and-udfa.rst index 0830fa81c..f669721a3 100644 --- a/docs/source/user-guide/common-operations/udf-and-udfa.rst +++ b/docs/source/user-guide/common-operations/udf-and-udfa.rst @@ -90,6 +90,17 @@ converting to Python objects to do the evaluation. df.select(col("a"), is_null_arr(col("a")).alias("is_null")).show() +In this example we passed the PyArrow ``DataType`` when we defined the function +by calling ``udf()``. If you need additional control, such as specifying +metadata or nullability of the input or output, you can instead specify a +PyArrow ``Field``. + +If you need to write a custom function but do not want to incur the performance +cost of converting to Python objects and back, a more advanced approach is to +write Rust based UDFs and to expose them to Python. There is an example in the +`DataFusion blog `_ +describing how to do this. + Aggregate Functions ------------------- @@ -112,7 +123,7 @@ also see how the inputs to ``update`` and ``merge`` differ. .. code-block:: python - import pyarrow + import pyarrow as pa import pyarrow.compute import datafusion from datafusion import col, udaf, Accumulator @@ -125,16 +136,16 @@ also see how the inputs to ``update`` and ``merge`` differ. def __init__(self): self._sum = 0.0 - def update(self, values_a: pyarrow.Array, values_b: pyarrow.Array) -> None: + def update(self, values_a: pa.Array, values_b: pa.Array) -> None: self._sum = self._sum + pyarrow.compute.sum(values_a).as_py() - pyarrow.compute.sum(values_b).as_py() - def merge(self, states: List[pyarrow.Array]) -> None: + def merge(self, states: list[pa.Array]) -> None: self._sum = self._sum + pyarrow.compute.sum(states[0]).as_py() - def state(self) -> pyarrow.Array: - return pyarrow.array([self._sum]) + def state(self) -> list[pa.Scalar]: + return [pyarrow.scalar(self._sum)] - def evaluate(self) -> pyarrow.Scalar: + def evaluate(self) -> pa.Scalar: return pyarrow.scalar(self._sum) ctx = datafusion.SessionContext() @@ -145,10 +156,29 @@ also see how the inputs to ``update`` and ``merge`` differ. } ) - my_udaf = udaf(MyAccumulator, [pyarrow.float64(), pyarrow.float64()], pyarrow.float64(), [pyarrow.float64()], 'stable') + my_udaf = udaf(MyAccumulator, [pa.float64(), pa.float64()], pa.float64(), [pa.float64()], 'stable') df.aggregate([], [my_udaf(col("a"), col("b")).alias("col_diff")]) +FAQ +^^^ + +**How do I return a list from a UDAF?** + +Both the ``evaluate`` and the ``state`` functions expect to return scalar values. +If you wish to return a list array as a scalar value, the best practice is to +wrap the values in a ``pyarrow.Scalar`` object. For example, you can return a +timestamp list with ``pa.scalar([...], type=pa.list_(pa.timestamp("ms")))`` and +register the appropriate return or state types as +``return_type=pa.list_(pa.timestamp("ms"))`` and +``state_type=[pa.list_(pa.timestamp("ms"))]``, respectively. + +As of DataFusion 52.0.0 , you can pass return any Python object, including a +PyArrow array, as the return value(s) for these functions and DataFusion will +attempt to create a scalar type from the value. DataFusion has been tested to +convert PyArrow, nanoarrow, and arro3 objects as well as primitive data types +like integers, strings, and so on. + Window Functions ---------------- diff --git a/docs/source/user-guide/common-operations/windows.rst b/docs/source/user-guide/common-operations/windows.rst index 8225d125a..c8fdea8f4 100644 --- a/docs/source/user-guide/common-operations/windows.rst +++ b/docs/source/user-guide/common-operations/windows.rst @@ -24,14 +24,14 @@ In this section you will learn about window functions. A window function utilize multiple rows to produce a result for each individual row, unlike an aggregate function that provides a single value for multiple rows. -The window functions are availble in the :py:mod:`~datafusion.functions` module. +The window functions are available in the :py:mod:`~datafusion.functions` module. We'll use the pokemon dataset (from Ritchie Vink) in the following examples. .. ipython:: python from datafusion import SessionContext - from datafusion import col + from datafusion import col, lit from datafusion import functions as f ctx = SessionContext() @@ -99,8 +99,8 @@ If you do not specify a Window Frame, the frame will be set depending on the fol criteria. * If an ``order_by`` clause is set, the default window frame is defined as the rows between - unbounded preceeding and the current row. -* If an ``order_by`` is not set, the default frame is defined as the rows betwene unbounded + unbounded preceding and the current row. +* If an ``order_by`` is not set, the default frame is defined as the rows between unbounded and unbounded following (the entire partition). Window Frames are defined by three parameters: unit type, starting bound, and ending bound. @@ -116,20 +116,18 @@ The unit types available are: ``order_by`` clause. In this example we perform a "rolling average" of the speed of the current Pokemon and the -two preceeding rows. +two preceding rows. .. ipython:: python - from datafusion.expr import WindowFrame + from datafusion.expr import Window, WindowFrame df.select( col('"Name"'), col('"Speed"'), - f.window("avg", - [col('"Speed"')], - order_by=[col('"Speed"')], - window_frame=WindowFrame("rows", 2, 0) - ).alias("Previous Speed") + f.avg(col('"Speed"')) + .over(Window(window_frame=WindowFrame("rows", 2, 0), order_by=[col('"Speed"')])) + .alias("Previous Speed"), ) Null Treatment @@ -151,21 +149,27 @@ it's ``Type 2`` column that are null. from datafusion.common import NullTreatment - df.filter(col('"Type 1"') == lit("Bug")).select( + df.filter(col('"Type 1"') == lit("Bug")).select( '"Name"', '"Type 2"', - f.window("last_value", [col('"Type 2"')]) - .window_frame(WindowFrame("rows", None, 0)) - .order_by(col('"Speed"')) - .null_treatment(NullTreatment.IGNORE_NULLS) - .build() - .alias("last_wo_null"), - f.window("last_value", [col('"Type 2"')]) - .window_frame(WindowFrame("rows", None, 0)) - .order_by(col('"Speed"')) - .null_treatment(NullTreatment.RESPECT_NULLS) - .build() - .alias("last_with_null") + f.last_value(col('"Type 2"')) + .over( + Window( + window_frame=WindowFrame("rows", None, 0), + order_by=[col('"Speed"')], + null_treatment=NullTreatment.IGNORE_NULLS, + ) + ) + .alias("last_wo_null"), + f.last_value(col('"Type 2"')) + .over( + Window( + window_frame=WindowFrame("rows", None, 0), + order_by=[col('"Speed"')], + null_treatment=NullTreatment.RESPECT_NULLS, + ) + ) + .alias("last_with_null"), ) Aggregate Functions diff --git a/docs/source/user-guide/configuration.rst b/docs/source/user-guide/configuration.rst index db200a46a..f8e613cd4 100644 --- a/docs/source/user-guide/configuration.rst +++ b/docs/source/user-guide/configuration.rst @@ -15,6 +15,8 @@ .. specific language governing permissions and limitations .. under the License. +.. _configuration: + Configuration ============= @@ -46,6 +48,141 @@ a :py:class:`~datafusion.context.SessionConfig` and :py:class:`~datafusion.conte ctx = SessionContext(config, runtime) print(ctx) +Maximizing CPU Usage +-------------------- + +DataFusion uses partitions to parallelize work. For small queries the +default configuration (number of CPU cores) is often sufficient, but to +fully utilize available hardware you can tune how many partitions are +created and when DataFusion will repartition data automatically. + +Configure a ``SessionContext`` with a higher partition count: + +.. code-block:: python + + from datafusion import SessionConfig, SessionContext + + # allow up to 16 concurrent partitions + config = SessionConfig().with_target_partitions(16) + ctx = SessionContext(config) + +Automatic repartitioning for joins, aggregations, window functions and +other operations can be enabled to increase parallelism: + +.. code-block:: python + + config = ( + SessionConfig() + .with_target_partitions(16) + .with_repartition_joins(True) + .with_repartition_aggregations(True) + .with_repartition_windows(True) + ) + +Manual repartitioning is available on DataFrames when you need precise +control: + +.. code-block:: python + + from datafusion import col + + df = ctx.read_parquet("data.parquet") + + # Evenly divide into 16 partitions + df = df.repartition(16) + + # Or partition by the hash of a column + df = df.repartition_by_hash(col("a"), num=16) + + result = df.collect() + + +Benchmark Example +^^^^^^^^^^^^^^^^^ + +The repository includes a benchmark script that demonstrates how to maximize CPU usage +with DataFusion. The :code:`benchmarks/max_cpu_usage.py` script shows a practical example +of configuring DataFusion for optimal parallelism. + +You can run the benchmark script to see the impact of different configuration settings: + +.. code-block:: bash + + # Run with default settings (uses all CPU cores) + python benchmarks/max_cpu_usage.py + + # Run with specific number of rows and partitions + python benchmarks/max_cpu_usage.py --rows 5000000 --partitions 16 + + # See all available options + python benchmarks/max_cpu_usage.py --help + +Here's an example showing the performance difference between single and multiple partitions: + +.. code-block:: bash + + # Single partition - slower processing + $ python benchmarks/max_cpu_usage.py --rows=10000000 --partitions 1 + Processed 10000000 rows using 1 partitions in 0.107s + + # Multiple partitions - faster processing + $ python benchmarks/max_cpu_usage.py --rows=10000000 --partitions 10 + Processed 10000000 rows using 10 partitions in 0.038s + +This example demonstrates nearly 3x performance improvement (0.107s vs 0.038s) when using +10 partitions instead of 1, showcasing how proper partitioning can significantly improve +CPU utilization and query performance. + +The script demonstrates several key optimization techniques: + +1. **Higher target partition count**: Uses :code:`with_target_partitions()` to set the number of concurrent partitions +2. **Automatic repartitioning**: Enables repartitioning for joins, aggregations, and window functions +3. **Manual repartitioning**: Uses :code:`repartition()` to ensure all partitions are utilized +4. **CPU-intensive operations**: Performs aggregations that can benefit from parallelization + +The benchmark creates synthetic data and measures the time taken to perform a sum aggregation +across the specified number of partitions. This helps you understand how partition configuration +affects performance on your specific hardware. + +Important Considerations +"""""""""""""""""""""""" + +The provided benchmark script demonstrates partitioning concepts using synthetic in-memory data +and simple aggregation operations. While useful for understanding basic configuration principles, +actual performance in production environments may vary significantly based on numerous factors: + +**Data Sources and I/O Characteristics:** + +- **Table providers**: Performance differs greatly between Parquet files, CSV files, databases, and cloud storage +- **Storage type**: Local SSD, network-attached storage, and cloud storage have vastly different characteristics +- **Network latency**: Remote data sources introduce additional latency considerations +- **File sizes and distribution**: Large files may benefit differently from partitioning than many small files + +**Query and Workload Characteristics:** + +- **Operation complexity**: Simple aggregations versus complex joins, window functions, or nested queries +- **Data distribution**: Skewed data may not partition evenly, affecting parallel efficiency +- **Memory usage**: Large datasets may require different memory management strategies +- **Concurrent workloads**: Multiple queries running simultaneously affect resource allocation + +**Hardware and Environment Factors:** + +- **CPU architecture**: Different processors have varying parallel processing capabilities +- **Available memory**: Limited RAM may require different optimization strategies +- **System load**: Other applications competing for resources affect DataFusion performance + +**Recommendations for Production Use:** + +To optimize DataFusion for your specific use case, it is strongly recommended to: + +1. **Create custom benchmarks** using your actual data sources, formats, and query patterns +2. **Test with representative data volumes** that match your production workloads +3. **Measure end-to-end performance** including data loading, processing, and result handling +4. **Evaluate different configuration combinations** for your specific hardware and workload +5. **Monitor resource utilization** (CPU, memory, I/O) to identify bottlenecks in your environment + +This approach will provide more accurate insights into how DataFusion configuration options +will impact your particular applications and infrastructure. -You can read more about available :py:class:`~datafusion.context.SessionConfig` options in the `rust DataFusion Configuration guide `_, +For more information about available :py:class:`~datafusion.context.SessionConfig` options, see the `rust DataFusion Configuration guide `_, and about :code:`RuntimeEnvBuilder` options in the rust `online API documentation `_. diff --git a/docs/source/user-guide/data-sources.rst b/docs/source/user-guide/data-sources.rst index ba5967c97..26f1303c4 100644 --- a/docs/source/user-guide/data-sources.rst +++ b/docs/source/user-guide/data-sources.rst @@ -25,7 +25,7 @@ DataFusion provides a wide variety of ways to get data into a DataFrame to perfo Local file ---------- -DataFusion has the abilty to read from a variety of popular file formats, such as :ref:`Parquet `, +DataFusion has the ability to read from a variety of popular file formats, such as :ref:`Parquet `, :ref:`CSV `, :ref:`JSON `, and :ref:`AVRO `. .. ipython:: python @@ -120,7 +120,7 @@ DataFusion can import DataFrames directly from other libraries, such as `Polars `_ and `Pandas `_. Since DataFusion version 42.0.0, any DataFrame library that supports the Arrow FFI PyCapsule interface can be imported to DataFusion using the -:py:func:`~datafusion.context.SessionContext.from_arrow` function. Older verions of Polars may +:py:func:`~datafusion.context.SessionContext.from_arrow` function. Older versions of Polars may not support the arrow interface. In those cases, you can still import via the :py:func:`~datafusion.context.SessionContext.from_polars` function. @@ -154,11 +154,11 @@ as Delta Lake. This will require a recent version of from deltalake import DeltaTable delta_table = DeltaTable("path_to_table") - ctx.register_table_provider("my_delta_table", delta_table) + ctx.register_table("my_delta_table", delta_table) df = ctx.table("my_delta_table") df.show() -On older versions of ``deltalake`` (prior to 0.22) you can use the +On older versions of ``deltalake`` (prior to 0.22) you can use the `Arrow DataSet `_ interface to import to DataFusion, but this does not support features such as filter push down which can lead to a significant performance difference. @@ -172,10 +172,41 @@ which can lead to a significant performance difference. df = ctx.table("my_delta_table") df.show() -Iceberg -------- +Apache Iceberg +-------------- -Coming soon! +DataFusion 45.0.0 and later support the ability to register Apache Iceberg tables as table providers through the Custom Table Provider interface. + +This requires either the `pyiceberg `__ library (>=0.10.0) or the `pyiceberg-core `__ library (>=0.5.0). + +* The ``pyiceberg-core`` library exposes Iceberg Rust's implementation of the Custom Table Provider interface as python bindings. +* The ``pyiceberg`` library utilizes the ``pyiceberg-core`` python bindings under the hood and provides a native way for Python users to interact with the DataFusion. + +.. code-block:: python + + from datafusion import SessionContext + from pyiceberg.catalog import load_catalog + import pyarrow as pa + + # Load catalog and create/load a table + catalog = load_catalog("catalog", type="in-memory") + catalog.create_namespace_if_not_exists("default") + + # Create some sample data + data = pa.table({"x": [1, 2, 3], "y": [4, 5, 6]}) + iceberg_table = catalog.create_table("default.test", schema=data.schema) + iceberg_table.append(data) + + # Register the table with DataFusion + ctx = SessionContext() + ctx.register_table_provider("test", iceberg_table) + + # Query the table using DataFusion + ctx.table("test").show() + + +Note that the Datafusion integration rely on features from the `Iceberg Rust `_ implementation instead of the `PyIceberg `_ implementation. +Features that are available in PyIceberg but not yet in Iceberg Rust will not be available when using DataFusion. Custom Table Provider --------------------- @@ -183,5 +214,61 @@ Custom Table Provider You can implement a custom Data Provider in Rust and expose it to DataFusion through the the interface as describe in the :ref:`Custom Table Provider ` section. This is an advanced topic, but a -`user example `_ +`user example `_ is provided in the DataFusion repository. + +Catalog +======= + +A common technique for organizing tables is using a three level hierarchical approach. DataFusion +supports this form of organizing using the :py:class:`~datafusion.catalog.Catalog`, +:py:class:`~datafusion.catalog.Schema`, and :py:class:`~datafusion.catalog.Table`. By default, +a :py:class:`~datafusion.context.SessionContext` comes with a single Catalog and a single Schema +with the names ``datafusion`` and ``default``, respectively. + +The default implementation uses an in-memory approach to the catalog and schema. We have support +for adding additional in-memory catalogs and schemas. This can be done like in the following +example: + +.. code-block:: python + + from datafusion.catalog import Catalog, Schema + + my_catalog = Catalog.memory_catalog() + my_schema = Schema.memory_schema() + + my_catalog.register_schema("my_schema_name", my_schema) + + ctx.register_catalog("my_catalog_name", my_catalog) + +You could then register tables in ``my_schema`` and access them either through the DataFrame +API or via sql commands such as ``"SELECT * from my_catalog_name.my_schema_name.my_table"``. + +User Defined Catalog and Schema +------------------------------- + +If the in-memory catalogs are insufficient for your uses, there are two approaches you can take +to implementing a custom catalog and/or schema. In the below discussion, we describe how to +implement these for a Catalog, but the approach to implementing for a Schema is nearly +identical. + +DataFusion supports Catalogs written in either Rust or Python. If you write a Catalog in Rust, +you will need to export it as a Python library via PyO3. There is a complete example of a +catalog implemented this way in the +`examples folder `_ +of our repository. Writing catalog providers in Rust provides typically can lead to significant +performance improvements over the Python based approach. + +To implement a Catalog in Python, you will need to inherit from the abstract base class +:py:class:`~datafusion.catalog.CatalogProvider`. There are examples in the +`unit tests `_ of +implementing a basic Catalog in Python where we simply keep a dictionary of the +registered Schemas. + +One important note for developers is that when we have a Catalog defined in Python, we have +two different ways of accessing this Catalog. First, we register the catalog with a Rust +wrapper. This allows for any rust based code to call the Python functions as necessary. +Second, if the user access the Catalog via the Python API, we identify this and return back +the original Python object that implements the Catalog. This is an important distinction +for developers because we do *not* return a Python wrapper around the Rust wrapper of the +original Python object. diff --git a/docs/source/user-guide/dataframe/index.rst b/docs/source/user-guide/dataframe/index.rst new file mode 100644 index 000000000..510bcbc68 --- /dev/null +++ b/docs/source/user-guide/dataframe/index.rst @@ -0,0 +1,371 @@ +.. Licensed to the Apache Software Foundation (ASF) under one +.. or more contributor license agreements. See the NOTICE file +.. distributed with this work for additional information +.. regarding copyright ownership. The ASF licenses this file +.. to you under the Apache License, Version 2.0 (the +.. "License"); you may not use this file except in compliance +.. with the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, +.. software distributed under the License is distributed on an +.. "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +.. KIND, either express or implied. See the License for the +.. specific language governing permissions and limitations +.. under the License. + +DataFrames +========== + +Overview +-------- + +The ``DataFrame`` class is the core abstraction in DataFusion that represents tabular data and operations +on that data. DataFrames provide a flexible API for transforming data through various operations such as +filtering, projection, aggregation, joining, and more. + +A DataFrame represents a logical plan that is lazily evaluated. The actual execution occurs only when +terminal operations like ``collect()``, ``show()``, or ``to_pandas()`` are called. + +Creating DataFrames +------------------- + +DataFrames can be created in several ways: + +* From SQL queries via a ``SessionContext``: + + .. code-block:: python + + from datafusion import SessionContext + + ctx = SessionContext() + df = ctx.sql("SELECT * FROM your_table") + +* From registered tables: + + .. code-block:: python + + df = ctx.table("your_table") + +* From various data sources: + + .. code-block:: python + + # From CSV files (see :ref:`io_csv` for detailed options) + df = ctx.read_csv("path/to/data.csv") + + # From Parquet files (see :ref:`io_parquet` for detailed options) + df = ctx.read_parquet("path/to/data.parquet") + + # From JSON files (see :ref:`io_json` for detailed options) + df = ctx.read_json("path/to/data.json") + + # From Avro files (see :ref:`io_avro` for detailed options) + df = ctx.read_avro("path/to/data.avro") + + # From Pandas DataFrame + import pandas as pd + pandas_df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df = ctx.from_pandas(pandas_df) + + # From Arrow data + import pyarrow as pa + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"] + ) + df = ctx.from_arrow(batch) + +For detailed information about reading from different data sources, see the :doc:`I/O Guide <../io/index>`. +For custom data sources, see :ref:`io_custom_table_provider`. + +Common DataFrame Operations +--------------------------- + +DataFusion's DataFrame API offers a wide range of operations: + +.. code-block:: python + + from datafusion import column, literal + + # Select specific columns + df = df.select("col1", "col2") + + # Select with expressions + df = df.select(column("a") + column("b"), column("a") - column("b")) + + # Filter rows (expressions or SQL strings) + df = df.filter(column("age") > literal(25)) + df = df.filter("age > 25") + + # Add computed columns + df = df.with_column("full_name", column("first_name") + literal(" ") + column("last_name")) + + # Multiple column additions + df = df.with_columns( + (column("a") + column("b")).alias("sum"), + (column("a") * column("b")).alias("product") + ) + + # Sort data + df = df.sort(column("age").sort(ascending=False)) + + # Join DataFrames + df = df1.join(df2, on="user_id", how="inner") + + # Aggregate data + from datafusion import functions as f + df = df.aggregate( + [], # Group by columns (empty for global aggregation) + [f.sum(column("amount")).alias("total_amount")] + ) + + # Limit rows + df = df.limit(100) + + # Drop columns + df = df.drop("temporary_column") + +Column Names as Function Arguments +---------------------------------- + +Some ``DataFrame`` methods accept column names when an argument refers to an +existing column. These include: + +* :py:meth:`~datafusion.DataFrame.select` +* :py:meth:`~datafusion.DataFrame.sort` +* :py:meth:`~datafusion.DataFrame.drop` +* :py:meth:`~datafusion.DataFrame.join` (``on`` argument) +* :py:meth:`~datafusion.DataFrame.aggregate` (grouping columns) + +See the full function documentation for details on any specific function. + +Note that :py:meth:`~datafusion.DataFrame.join_on` expects ``col()``/``column()`` expressions rather than plain strings. + +For such methods, you can pass column names directly: + +.. code-block:: python + + from datafusion import col, functions as f + + df.sort('id') + df.aggregate('id', [f.count(col('value'))]) + +The same operation can also be written with explicit column expressions, using either ``col()`` or ``column()``: + +.. code-block:: python + + from datafusion import col, column, functions as f + + df.sort(col('id')) + df.aggregate(column('id'), [f.count(col('value'))]) + +Note that ``column()`` is an alias of ``col()``, so you can use either name; the example above shows both in action. + +Whenever an argument represents an expression—such as in +:py:meth:`~datafusion.DataFrame.filter` or +:py:meth:`~datafusion.DataFrame.with_column`—use ``col()`` to reference +columns. The comparison and arithmetic operators on ``Expr`` will automatically +convert any non-``Expr`` value into a literal expression, so writing + +.. code-block:: python + + from datafusion import col + df.filter(col("age") > 21) + +is equivalent to using ``lit(21)`` explicitly. Use ``lit()`` (also available +as ``literal()``) when you need to construct a literal expression directly. + +Terminal Operations +------------------- + +To materialize the results of your DataFrame operations: + +.. code-block:: python + + # Collect all data as PyArrow RecordBatches + result_batches = df.collect() + + # Convert to various formats + pandas_df = df.to_pandas() # Pandas DataFrame + polars_df = df.to_polars() # Polars DataFrame + arrow_table = df.to_arrow_table() # PyArrow Table + py_dict = df.to_pydict() # Python dictionary + py_list = df.to_pylist() # Python list of dictionaries + + # Display results + df.show() # Print tabular format to console + + # Count rows + count = df.count() + + # Collect a single column of data as a PyArrow Array + arr = df.collect_column("age") + +Zero-copy streaming to Arrow-based Python libraries +--------------------------------------------------- + +DataFusion DataFrames implement the ``__arrow_c_stream__`` protocol, enabling +zero-copy, lazy streaming into Arrow-based Python libraries. With the streaming +protocol, batches are produced on demand. + +.. note:: + + The protocol is implementation-agnostic and works with any Python library + that understands the Arrow C streaming interface (for example, PyArrow + or other Arrow-compatible implementations). The sections below provide a + short PyArrow-specific example and general guidance for other + implementations. + +PyArrow +------- + +.. code-block:: python + + import pyarrow as pa + + # Create a PyArrow RecordBatchReader without materializing all batches + reader = pa.RecordBatchReader.from_stream(df) + for batch in reader: + ... # process each batch as it is produced + +DataFrames are also iterable, yielding :class:`datafusion.RecordBatch` +objects lazily so you can loop over results directly without importing +PyArrow: + +.. code-block:: python + + for batch in df: + ... # each batch is a ``datafusion.RecordBatch`` + +Each batch exposes ``to_pyarrow()``, allowing conversion to a PyArrow +table. ``pa.table(df)`` collects the entire DataFrame eagerly into a +PyArrow table: + +.. code-block:: python + + import pyarrow as pa + table = pa.table(df) + +Asynchronous iteration is supported as well, allowing integration with +``asyncio`` event loops: + +.. code-block:: python + + async for batch in df: + ... # process each batch as it is produced + +To work with the stream directly, use ``execute_stream()``, which returns a +:class:`~datafusion.RecordBatchStream`. + +.. code-block:: python + + stream = df.execute_stream() + for batch in stream: + ... + +Execute as Stream +^^^^^^^^^^^^^^^^^ + +For finer control over streaming execution, use +:py:meth:`~datafusion.DataFrame.execute_stream` to obtain a +:py:class:`datafusion.RecordBatchStream`: + +.. code-block:: python + + stream = df.execute_stream() + for batch in stream: + ... # process each batch as it is produced + +.. tip:: + + To get a PyArrow reader instead, call + + ``pa.RecordBatchReader.from_stream(df)``. + +When partition boundaries are important, +:py:meth:`~datafusion.DataFrame.execute_stream_partitioned` +returns an iterable of :py:class:`datafusion.RecordBatchStream` objects, one per +partition: + +.. code-block:: python + + for stream in df.execute_stream_partitioned(): + for batch in stream: + ... # each stream yields RecordBatches + +To process partitions concurrently, first collect the streams into a list +and then poll each one in a separate ``asyncio`` task: + +.. code-block:: python + + import asyncio + + async def consume(stream): + async for batch in stream: + ... + + streams = list(df.execute_stream_partitioned()) + await asyncio.gather(*(consume(s) for s in streams)) + +See :doc:`../io/arrow` for additional details on the Arrow interface. + +HTML Rendering +-------------- + +When working in Jupyter notebooks or other environments that support HTML rendering, DataFrames will +automatically display as formatted HTML tables. For detailed information about customizing HTML +rendering, formatting options, and advanced styling, see :doc:`rendering`. + +Core Classes +------------ + +**DataFrame** + The main DataFrame class for building and executing queries. + + See: :py:class:`datafusion.DataFrame` + +**SessionContext** + The primary entry point for creating DataFrames from various data sources. + + Key methods for DataFrame creation: + + * :py:meth:`~datafusion.SessionContext.read_csv` - Read CSV files + * :py:meth:`~datafusion.SessionContext.read_parquet` - Read Parquet files + * :py:meth:`~datafusion.SessionContext.read_json` - Read JSON files + * :py:meth:`~datafusion.SessionContext.read_avro` - Read Avro files + * :py:meth:`~datafusion.SessionContext.table` - Access registered tables + * :py:meth:`~datafusion.SessionContext.sql` - Execute SQL queries + * :py:meth:`~datafusion.SessionContext.from_pandas` - Create from Pandas DataFrame + * :py:meth:`~datafusion.SessionContext.from_arrow` - Create from Arrow data + + See: :py:class:`datafusion.SessionContext` + +Expression Classes +------------------ + +**Expr** + Represents expressions that can be used in DataFrame operations. + + See: :py:class:`datafusion.Expr` + +**Functions for creating expressions:** + +* :py:func:`datafusion.column` - Reference a column by name +* :py:func:`datafusion.literal` - Create a literal value expression + +Built-in Functions +------------------ + +DataFusion provides many built-in functions for data manipulation: + +* :py:mod:`datafusion.functions` - Mathematical, string, date/time, and aggregation functions + +For a complete list of available functions, see the :py:mod:`datafusion.functions` module documentation. + + +.. toctree:: + :maxdepth: 1 + + rendering diff --git a/docs/source/user-guide/dataframe.rst b/docs/source/user-guide/dataframe/rendering.rst similarity index 69% rename from docs/source/user-guide/dataframe.rst rename to docs/source/user-guide/dataframe/rendering.rst index 23c65b5f6..9dea948bb 100644 --- a/docs/source/user-guide/dataframe.rst +++ b/docs/source/user-guide/dataframe/rendering.rst @@ -15,59 +15,37 @@ .. specific language governing permissions and limitations .. under the License. -DataFrames -========== +HTML Rendering in Jupyter +========================= -Overview --------- +When working in Jupyter notebooks or other environments that support rich HTML display, +DataFusion DataFrames automatically render as nicely formatted HTML tables. This functionality +is provided by the ``_repr_html_`` method, which is automatically called by Jupyter to provide +a richer visualization than plain text output. -DataFusion's DataFrame API provides a powerful interface for building and executing queries against data sources. -It offers a familiar API similar to pandas and other DataFrame libraries, but with the performance benefits of Rust -and Arrow. +Basic HTML Rendering +-------------------- -A DataFrame represents a logical plan that can be composed through operations like filtering, projection, and aggregation. -The actual execution happens when terminal operations like ``collect()`` or ``show()`` are called. - -Basic Usage ------------ +In a Jupyter environment, simply displaying a DataFrame object will trigger HTML rendering: .. code-block:: python - import datafusion - from datafusion import col, lit + # Will display as HTML table in Jupyter + df - # Create a context and register a data source - ctx = datafusion.SessionContext() - ctx.register_csv("my_table", "path/to/data.csv") - - # Create and manipulate a DataFrame - df = ctx.sql("SELECT * FROM my_table") - - # Or use the DataFrame API directly - df = (ctx.table("my_table") - .filter(col("age") > lit(25)) - .select([col("name"), col("age")])) - - # Execute and collect results - result = df.collect() - - # Display the first few rows - df.show() + # Explicit display also uses HTML rendering + display(df) -HTML Rendering --------------- - -When working in Jupyter notebooks or other environments that support HTML rendering, DataFrames will -automatically display as formatted HTML tables, making it easier to visualize your data. +Customizing HTML Rendering +--------------------------- -The ``_repr_html_`` method is called automatically by Jupyter to render a DataFrame. This method -controls how DataFrames appear in notebook environments, providing a richer visualization than -plain text output. +DataFusion provides extensive customization options for HTML table rendering through the +``datafusion.html_formatter`` module. -Customizing HTML Rendering --------------------------- +Configuring the HTML Formatter +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -You can customize how DataFrames are rendered in HTML by configuring the formatter: +You can customize how DataFrames are rendered by configuring the formatter: .. code-block:: python @@ -79,8 +57,8 @@ You can customize how DataFrames are rendered in HTML by configuring the formatt max_width=1000, # Maximum width in pixels max_height=300, # Maximum height in pixels max_memory_bytes=2097152, # Maximum memory for rendering (2MB) - min_rows_display=20, # Minimum number of rows to display - repr_rows=10, # Number of rows to display in __repr__ + min_rows=10, # Minimum number of rows to display + max_rows=10, # Maximum rows to display in __repr__ enable_cell_expansion=True,# Allow expanding truncated cells custom_css=None, # Additional custom CSS show_truncation_message=True, # Show message when data is truncated @@ -91,7 +69,7 @@ You can customize how DataFrames are rendered in HTML by configuring the formatt The formatter settings affect all DataFrames displayed after configuration. Custom Style Providers ----------------------- +----------------------- For advanced styling needs, you can create a custom style provider: @@ -118,7 +96,8 @@ For advanced styling needs, you can create a custom style provider: configure_formatter(style_provider=MyStyleProvider()) Performance Optimization with Shared Styles -------------------------------------------- +-------------------------------------------- + The ``use_shared_styles`` parameter (enabled by default) optimizes performance when displaying multiple DataFrames in notebook environments: @@ -138,7 +117,7 @@ When ``use_shared_styles=True``: - Applies consistent styling across all DataFrames Creating a Custom Formatter ---------------------------- +---------------------------- For complete control over rendering, you can implement a custom formatter: @@ -184,7 +163,7 @@ Get the current formatter settings: print(formatter.theme) Contextual Formatting ---------------------- +---------------------- You can also use a context manager to temporarily change formatting settings: @@ -207,12 +186,38 @@ Memory and Display Controls You can control how much data is displayed and how much memory is used for rendering: - .. code-block:: python - +.. code-block:: python + configure_formatter( max_memory_bytes=4 * 1024 * 1024, # 4MB maximum memory for display - min_rows_display=50, # Always show at least 50 rows - repr_rows=20 # Show 20 rows in __repr__ output + min_rows=20, # Always show at least 20 rows + max_rows=50 # Show up to 50 rows in output ) -These parameters help balance comprehensive data display against performance considerations. \ No newline at end of file +These parameters help balance comprehensive data display against performance considerations. + +Best Practices +-------------- + +1. **Global Configuration**: Use ``configure_formatter()`` at the beginning of your notebook to set up consistent formatting for all DataFrames. + +2. **Memory Management**: Set appropriate ``max_memory_bytes`` limits to prevent performance issues with large datasets. + +3. **Shared Styles**: Keep ``use_shared_styles=True`` (default) for better performance in notebooks with multiple DataFrames. + +4. **Reset When Needed**: Call ``reset_formatter()`` when you want to start fresh with default settings. + +5. **Cell Expansion**: Use ``enable_cell_expansion=True`` when cells might contain longer content that users may want to see in full. + +Additional Resources +-------------------- + +* :doc:`../dataframe/index` - Complete guide to using DataFrames +* :doc:`../io/index` - I/O Guide for reading data from various sources +* :doc:`../data-sources` - Comprehensive data sources guide +* :ref:`io_csv` - CSV file reading +* :ref:`io_parquet` - Parquet file reading +* :ref:`io_json` - JSON file reading +* :ref:`io_avro` - Avro file reading +* :ref:`io_custom_table_provider` - Custom table providers +* `API Reference `_ - Full API reference diff --git a/docs/source/user-guide/io/arrow.rst b/docs/source/user-guide/io/arrow.rst index d571aa99c..9196fcea7 100644 --- a/docs/source/user-guide/io/arrow.rst +++ b/docs/source/user-guide/io/arrow.rst @@ -60,14 +60,16 @@ Exporting from DataFusion DataFusion DataFrames implement ``__arrow_c_stream__`` PyCapsule interface, so any Python library that accepts these can import a DataFusion DataFrame directly. -.. warning:: - It is important to note that this will cause the DataFrame execution to happen, which may be - a time consuming task. That is, you will cause a - :py:func:`datafusion.dataframe.DataFrame.collect` operation call to occur. +Invoking ``__arrow_c_stream__`` triggers execution of the underlying query, but +batches are yielded incrementally rather than materialized all at once in memory. +Consumers can process the stream as it arrives. The stream executes lazily, +letting downstream readers pull batches on demand. .. ipython:: python + from datafusion import col, lit + df = df.select((col("a") * lit(1.5)).alias("c"), lit("df").alias("d")) pa.table(df) diff --git a/docs/source/user-guide/io/csv.rst b/docs/source/user-guide/io/csv.rst index 144b6615c..9c23c291b 100644 --- a/docs/source/user-guide/io/csv.rst +++ b/docs/source/user-guide/io/csv.rst @@ -36,3 +36,25 @@ An alternative is to use :py:func:`~datafusion.context.SessionContext.register_c ctx.register_csv("file", "file.csv") df = ctx.table("file") + +If you require additional control over how to read the CSV file, you can use +:py:class:`~datafusion.options.CsvReadOptions` to set a variety of options. + +.. code-block:: python + + from datafusion import CsvReadOptions + options = ( + CsvReadOptions() + .with_has_header(True) # File contains a header row + .with_delimiter(";") # Use ; as the delimiter instead of , + .with_comment("#") # Skip lines starting with # + .with_escape("\\") # Escape character + .with_null_regex(r"^(null|NULL|N/A)$") # Treat these as NULL + .with_truncated_rows(True) # Allow rows to have incomplete columns + .with_file_compression_type("gzip") # Read gzipped CSV + .with_file_extension(".gz") # File extension other than .csv + ) + df = ctx.read_csv("data.csv.gz", options=options) + +Details for all CSV reading options can be found on the +`DataFusion documentation site `_. diff --git a/docs/source/user-guide/io/table_provider.rst b/docs/source/user-guide/io/table_provider.rst index bd1d6b80f..29e5d9880 100644 --- a/docs/source/user-guide/io/table_provider.rst +++ b/docs/source/user-guide/io/table_provider.rst @@ -37,22 +37,26 @@ A complete example can be found in the `examples folder , ) -> PyResult> { - let name = CString::new("datafusion_table_provider").unwrap(); + let name = cr"datafusion_table_provider".into(); - let provider = Arc::new(self.clone()) - .map_err(|e| PyRuntimeError::new_err(e.to_string()))?; - let provider = FFI_TableProvider::new(Arc::new(provider), false); + let provider = Arc::new(self.clone()); + let provider = FFI_TableProvider::new(provider, false, None); PyCapsule::new_bound(py, provider, Some(name.clone())) } } -Once you have this library available, in python you can register your table provider -to the ``SessionContext``. +Once you have this library available, you can construct a +:py:class:`~datafusion.Table` in Python and register it with the +``SessionContext``. .. code-block:: python + from datafusion import SessionContext, Table + + ctx = SessionContext() provider = MyTableProvider() - ctx.register_table_provider("my_table", provider) - ctx.table("my_table").show() + ctx.register_table("capsule_table", provider) + + ctx.table("capsule_table").show() diff --git a/docs/source/user-guide/sql.rst b/docs/source/user-guide/sql.rst index 6fa7f0c6a..b4bfb9611 100644 --- a/docs/source/user-guide/sql.rst +++ b/docs/source/user-guide/sql.rst @@ -23,17 +23,100 @@ DataFusion also offers a SQL API, read the full reference `here `_, +but allow passing named parameters into a SQL query. Consider this simple +example. + +.. ipython:: python + + def show_attacks(ctx: SessionContext, threshold: int) -> None: + ctx.sql( + 'SELECT "Name", "Attack" FROM pokemon WHERE "Attack" > $val', val=threshold + ).show(num=5) + show_attacks(ctx, 75) + +When passing parameters like the example above we convert the Python objects +into their string representation. We also have special case handling +for :py:class:`~datafusion.dataframe.DataFrame` objects, since they cannot simply +be turned into string representations for an SQL query. In these cases we +will register a temporary view in the :py:class:`~datafusion.context.SessionContext` +using a generated table name. + +The formatting for passing string replacement objects is to precede the +variable name with a single ``$``. This works for all dialects in +the SQL parser except ``hive`` and ``mysql``. Since these dialects do not +support named placeholders, we are unable to do this type of replacement. +We recommend either switching to another dialect or using Python +f-string style replacement. + +.. warning:: + + To support DataFrame parameterized queries, your session must support + registration of temporary views. The default + :py:class:`~datafusion.catalog.CatalogProvider` and + :py:class:`~datafusion.catalog.SchemaProvider` do have this capability. + If you have implemented custom providers, it is important that temporary + views do not persist across :py:class:`~datafusion.context.SessionContext` + or you may get unintended consequences. + +The following example shows passing in both a :py:class:`~datafusion.dataframe.DataFrame` +object as well as a Python object to be used in parameterized replacement. + +.. ipython:: python + + def show_column( + ctx: SessionContext, column: str, df: DataFrame, threshold: int + ) -> None: + ctx.sql( + 'SELECT "Name", $col FROM $df WHERE $col > $val', + col=column, + df=df, + val=threshold, + ).show(num=5) + df = ctx.table("pokemon") + show_column(ctx, '"Defense"', df, 75) + +The approach implemented for conversion of variables into a SQL query +relies on string conversion. This has the potential for data loss, +specifically for cases like floating point numbers. If you need to pass +variables into a parameterized query and it is important to maintain the +original value without conversion to a string, then you can use the +optional parameter ``param_values`` to specify these. This parameter +expects a dictionary mapping from the parameter name to a Python +object. Those objects will be cast into a +`PyArrow Scalar Value `_. + +Using ``param_values`` will rely on the SQL dialect you have configured +for your session. This can be set using the :ref:`configuration options ` +of your :py:class:`~datafusion.context.SessionContext`. Similar to how +`prepared statements `_ +work, these parameters are limited to places where you would pass in a +scalar value, such as a comparison. + +.. ipython:: python + + def param_attacks(ctx: SessionContext, threshold: int) -> None: + ctx.sql( + 'SELECT "Name", "Attack" FROM pokemon WHERE "Attack" > $val', + param_values={"val": threshold}, + ).show(num=5) + param_attacks(ctx, 75) diff --git a/docs/source/user-guide/upgrade-guides.rst b/docs/source/user-guide/upgrade-guides.rst new file mode 100644 index 000000000..a77f60776 --- /dev/null +++ b/docs/source/user-guide/upgrade-guides.rst @@ -0,0 +1,96 @@ +.. Licensed to the Apache Software Foundation (ASF) under one +.. or more contributor license agreements. See the NOTICE file +.. distributed with this work for additional information +.. regarding copyright ownership. The ASF licenses this file +.. to you under the Apache License, Version 2.0 (the +.. "License"); you may not use this file except in compliance +.. with the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, +.. software distributed under the License is distributed on an +.. "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +.. KIND, either express or implied. See the License for the +.. specific language governing permissions and limitations +.. under the License. + +Upgrade Guides +============== + +DataFusion 52.0.0 +----------------- + +This version includes a major update to the :ref:`ffi` due to upgrades +to the `Foreign Function Interface `_. +Users who contribute their own ``CatalogProvider``, ``SchemaProvider``, +``TableProvider`` or ``TableFunction`` via FFI must now provide access to a +``LogicalExtensionCodec`` and a ``TaskContextProvider``. The function signatures +for the methods to get these ``PyCapsule`` objects now requires an additional +parameter, which is a Python object that can be used to extract the +``FFI_LogicalExtensionCodec`` that is necessary. + +A complete example can be found in the `FFI example `_. +Your methods need to be updated to take an additional parameter like in this +example. + +.. code-block:: rust + + #[pymethods] + impl MyCatalogProvider { + pub fn __datafusion_catalog_provider__<'py>( + &self, + py: Python<'py>, + session: Bound, + ) -> PyResult> { + let name = cr"datafusion_catalog_provider".into(); + + let provider = Arc::clone(&self.inner) as Arc; + + let codec = ffi_logical_codec_from_pycapsule(session)?; + let provider = FFI_CatalogProvider::new_with_ffi_codec(provider, None, codec); + + PyCapsule::new(py, provider, Some(name)) + } + } + +To extract the logical extension codec FFI object from the provided object you +can implement a helper method such as: + +.. code-block:: rust + + pub(crate) fn ffi_logical_codec_from_pycapsule( + obj: Bound, + ) -> PyResult { + let attr_name = "__datafusion_logical_extension_codec__"; + let capsule = if obj.hasattr(attr_name)? { + obj.getattr(attr_name)?.call0()? + } else { + obj + }; + + let capsule = capsule.downcast::()?; + validate_pycapsule(capsule, "datafusion_logical_extension_codec")?; + + let codec = unsafe { capsule.reference::() }; + + Ok(codec.clone()) + } + + +The DataFusion FFI interface updates no longer depend directly on the +``datafusion`` core crate. You can improve your build times and potentially +reduce your library binary size by removing this dependency and instead +using the specific datafusion project crates. + +For example, instead of including expressions like: + +.. code-block:: rust + + use datafusion::catalog::MemTable; + +Instead you can now write: + +.. code-block:: rust + + use datafusion_catalog::MemTable; diff --git a/examples/csv-read-options.py b/examples/csv-read-options.py new file mode 100644 index 000000000..a5952d950 --- /dev/null +++ b/examples/csv-read-options.py @@ -0,0 +1,96 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""Example demonstrating CsvReadOptions usage.""" + +from datafusion import CsvReadOptions, SessionContext + +# Create a SessionContext +ctx = SessionContext() + +# Example 1: Using CsvReadOptions with default values +print("Example 1: Default CsvReadOptions") +options = CsvReadOptions() +df = ctx.read_csv("data.csv", options=options) + +# Example 2: Using CsvReadOptions with custom parameters +print("\nExample 2: Custom CsvReadOptions") +options = CsvReadOptions( + has_header=True, + delimiter=",", + quote='"', + schema_infer_max_records=1000, + file_extension=".csv", +) +df = ctx.read_csv("data.csv", options=options) + +# Example 3: Using the builder pattern (recommended for readability) +print("\nExample 3: Builder pattern") +options = ( + CsvReadOptions() + .with_has_header(True) # noqa: FBT003 + .with_delimiter("|") + .with_quote("'") + .with_schema_infer_max_records(500) + .with_truncated_rows(False) # noqa: FBT003 + .with_newlines_in_values(True) # noqa: FBT003 +) +df = ctx.read_csv("data.csv", options=options) + +# Example 4: Advanced options +print("\nExample 4: Advanced options") +options = ( + CsvReadOptions() + .with_has_header(True) # noqa: FBT003 + .with_delimiter(",") + .with_comment("#") # Skip lines starting with # + .with_escape("\\") # Escape character + .with_null_regex(r"^(null|NULL|N/A)$") # Treat these as NULL + .with_truncated_rows(True) # noqa: FBT003 + .with_file_compression_type("gzip") # Read gzipped CSV + .with_file_extension(".gz") +) +df = ctx.read_csv("data.csv.gz", options=options) + +# Example 5: Register CSV table with options +print("\nExample 5: Register CSV table") +options = CsvReadOptions().with_has_header(True).with_delimiter(",") # noqa: FBT003 +ctx.register_csv("my_table", "data.csv", options=options) +df = ctx.sql("SELECT * FROM my_table") + +# Example 6: Backward compatibility (without options) +print("\nExample 6: Backward compatibility") +# Still works the old way! +df = ctx.read_csv("data.csv", has_header=True, delimiter=",") + +print("\nAll examples completed!") +print("\nFor all available options, see the CsvReadOptions documentation:") +print(" - has_header: bool") +print(" - delimiter: str") +print(" - quote: str") +print(" - terminator: str | None") +print(" - escape: str | None") +print(" - comment: str | None") +print(" - newlines_in_values: bool") +print(" - schema: pa.Schema | None") +print(" - schema_infer_max_records: int") +print(" - file_extension: str") +print(" - table_partition_cols: list[tuple[str, pa.DataType]]") +print(" - file_compression_type: str") +print(" - file_sort_order: list[list[SortExpr]]") +print(" - null_regex: str | None") +print(" - truncated_rows: bool") diff --git a/examples/datafusion-ffi-example/.cargo/config.toml b/examples/datafusion-ffi-example/.cargo/config.toml index 91a099a61..af951327f 100644 --- a/examples/datafusion-ffi-example/.cargo/config.toml +++ b/examples/datafusion-ffi-example/.cargo/config.toml @@ -1,12 +1,5 @@ [target.x86_64-apple-darwin] -rustflags = [ - "-C", "link-arg=-undefined", - "-C", "link-arg=dynamic_lookup", -] +rustflags = ["-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup"] [target.aarch64-apple-darwin] -rustflags = [ - "-C", "link-arg=-undefined", - "-C", "link-arg=dynamic_lookup", -] - +rustflags = ["-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup"] diff --git a/examples/datafusion-ffi-example/Cargo.lock b/examples/datafusion-ffi-example/Cargo.lock index 075ebd5a1..02aa7d9d9 100644 --- a/examples/datafusion-ffi-example/Cargo.lock +++ b/examples/datafusion-ffi-example/Cargo.lock @@ -50,20 +50,11 @@ dependencies = [ "core_extensions", ] -[[package]] -name = "addr2line" -version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" -dependencies = [ - "gimli", -] - [[package]] name = "adler2" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "ahash" @@ -73,7 +64,7 @@ checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" dependencies = [ "cfg-if", "const-random", - "getrandom 0.3.3", + "getrandom 0.3.4", "once_cell", "version_check", "zerocopy", @@ -81,9 +72,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" dependencies = [ "memchr", ] @@ -109,12 +100,6 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - [[package]] name = "android_system_properties" version = "0.1.5" @@ -126,27 +111,15 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.98" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" - -[[package]] -name = "arrayref" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" - -[[package]] -name = "arrayvec" -version = "0.7.6" +version = "1.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +checksum = "5f0e0fee31ef5ed1ba1316088939cea399010ed7731dba877ed44aeb407a75ea" [[package]] name = "arrow" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1bb018b6960c87fd9d025009820406f74e83281185a8bdcb44880d2aa5c9a87" +checksum = "e4754a624e5ae42081f464514be454b39711daae0458906dacde5f4c632f33a8" dependencies = [ "arrow-arith", "arrow-array", @@ -165,23 +138,23 @@ dependencies = [ [[package]] name = "arrow-arith" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44de76b51473aa888ecd6ad93ceb262fb8d40d1f1154a4df2f069b3590aa7575" +checksum = "f7b3141e0ec5145a22d8694ea8b6d6f69305971c4fa1c1a13ef0195aef2d678b" dependencies = [ "arrow-array", "arrow-buffer", "arrow-data", "arrow-schema", "chrono", - "num", + "num-traits", ] [[package]] name = "arrow-array" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29ed77e22744475a9a53d00026cf8e166fe73cf42d89c4c4ae63607ee1cfcc3f" +checksum = "4c8955af33b25f3b175ee10af580577280b4bd01f7e823d94c7cdef7cf8c9aef" dependencies = [ "ahash", "arrow-buffer", @@ -190,30 +163,34 @@ dependencies = [ "chrono", "chrono-tz", "half", - "hashbrown 0.15.3", - "num", + "hashbrown 0.16.1", + "num-complex", + "num-integer", + "num-traits", ] [[package]] name = "arrow-buffer" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0391c96eb58bf7389171d1e103112d3fc3e5625ca6b372d606f2688f1ea4cce" +checksum = "c697ddca96183182f35b3a18e50b9110b11e916d7b7799cbfd4d34662f2c56c2" dependencies = [ "bytes", "half", - "num", + "num-bigint", + "num-traits", ] [[package]] name = "arrow-cast" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f39e1d774ece9292697fcbe06b5584401b26bd34be1bec25c33edae65c2420ff" +checksum = "646bbb821e86fd57189c10b4fcdaa941deaf4181924917b0daa92735baa6ada5" dependencies = [ "arrow-array", "arrow-buffer", "arrow-data", + "arrow-ord", "arrow-schema", "arrow-select", "atoi", @@ -222,15 +199,15 @@ dependencies = [ "comfy-table", "half", "lexical-core", - "num", + "num-traits", "ryu", ] [[package]] name = "arrow-csv" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9055c972a07bf12c2a827debfd34f88d3b93da1941d36e1d9fee85eebe38a12a" +checksum = "8da746f4180004e3ce7b83c977daf6394d768332349d3d913998b10a120b790a" dependencies = [ "arrow-array", "arrow-cast", @@ -238,41 +215,42 @@ dependencies = [ "chrono", "csv", "csv-core", - "lazy_static", "regex", ] [[package]] name = "arrow-data" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf75ac27a08c7f48b88e5c923f267e980f27070147ab74615ad85b5c5f90473d" +checksum = "1fdd994a9d28e6365aa78e15da3f3950c0fdcea6b963a12fa1c391afb637b304" dependencies = [ "arrow-buffer", "arrow-schema", "half", - "num", + "num-integer", + "num-traits", ] [[package]] name = "arrow-ipc" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a222f0d93772bd058d1268f4c28ea421a603d66f7979479048c429292fac7b2e" +checksum = "abf7df950701ab528bf7c0cf7eeadc0445d03ef5d6ffc151eaae6b38a58feff1" dependencies = [ "arrow-array", "arrow-buffer", "arrow-data", "arrow-schema", + "arrow-select", "flatbuffers", "lz4_flex", ] [[package]] name = "arrow-json" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9085342bbca0f75e8cb70513c0807cc7351f1fbf5cb98192a67d5e3044acb033" +checksum = "0ff8357658bedc49792b13e2e862b80df908171275f8e6e075c460da5ee4bf86" dependencies = [ "arrow-array", "arrow-buffer", @@ -282,19 +260,21 @@ dependencies = [ "chrono", "half", "indexmap", + "itoa", "lexical-core", "memchr", - "num", - "serde", + "num-traits", + "ryu", + "serde_core", "serde_json", "simdutf8", ] [[package]] name = "arrow-ord" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab2f1065a5cad7b9efa9e22ce5747ce826aa3855766755d4904535123ef431e7" +checksum = "f7d8f1870e03d4cbed632959498bcc84083b5a24bded52905ae1695bd29da45b" dependencies = [ "arrow-array", "arrow-buffer", @@ -305,9 +285,9 @@ dependencies = [ [[package]] name = "arrow-row" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3703a0e3e92d23c3f756df73d2dc9476873f873a76ae63ef9d3de17fda83b2d8" +checksum = "18228633bad92bff92a95746bbeb16e5fc318e8382b75619dec26db79e4de4c0" dependencies = [ "arrow-array", "arrow-buffer", @@ -318,32 +298,32 @@ dependencies = [ [[package]] name = "arrow-schema" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73a47aa0c771b5381de2b7f16998d351a6f4eb839f1e13d48353e17e873d969b" +checksum = "8c872d36b7bf2a6a6a2b40de9156265f0242910791db366a2c17476ba8330d68" dependencies = [ "bitflags", ] [[package]] name = "arrow-select" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24b7b85575702b23b85272b01bc1c25a01c9b9852305e5d0078c79ba25d995d4" +checksum = "68bf3e3efbd1278f770d67e5dc410257300b161b93baedb3aae836144edcaf4b" dependencies = [ "ahash", "arrow-array", "arrow-buffer", "arrow-data", "arrow-schema", - "num", + "num-traits", ] [[package]] name = "arrow-string" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9260fddf1cdf2799ace2b4c2fc0356a9789fa7551e0953e35435536fecefebbd" +checksum = "85e968097061b3c0e9fe3079cf2e703e487890700546b5b0647f60fca1b5a8d8" dependencies = [ "arrow-array", "arrow-buffer", @@ -351,7 +331,7 @@ dependencies = [ "arrow-schema", "arrow-select", "memchr", - "num", + "num-traits", "regex", "regex-syntax", ] @@ -368,23 +348,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "async-compression" -version = "0.4.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06575e6a9673580f52661c92107baabffbf41e2141373441cbcdc47cb733003c" -dependencies = [ - "bzip2", - "flate2", - "futures-core", - "memchr", - "pin-project-lite", - "tokio", - "xz2", - "zstd", - "zstd-safe", -] - [[package]] name = "async-ffi" version = "0.5.0" @@ -396,13 +359,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.88" +version = "0.1.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] @@ -416,24 +379,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" - -[[package]] -name = "backtrace" -version = "0.3.75" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" -dependencies = [ - "addr2line", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", - "windows-targets", -] +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "base64" @@ -441,61 +389,17 @@ version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" -[[package]] -name = "bigdecimal" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a22f228ab7a1b23027ccc6c350b72868017af7ea8356fbdf19f8d991c690013" -dependencies = [ - "autocfg", - "libm", - "num-bigint", - "num-integer", - "num-traits", -] - [[package]] name = "bitflags" -version = "2.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" - -[[package]] -name = "blake2" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" -dependencies = [ - "digest", -] - -[[package]] -name = "blake3" -version = "1.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3888aaa89e4b2a40fca9848e400f6a658a5a3978de7be858e209cafa8be9a4a0" -dependencies = [ - "arrayref", - "arrayvec", - "cc", - "cfg-if", - "constant_time_eq", -] - -[[package]] -name = "block-buffer" -version = "0.10.4" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" -dependencies = [ - "generic-array", -] +checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af" [[package]] name = "brotli" -version = "8.0.1" +version = "8.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9991eea70ea4f293524138648e41ee89b0b2b12ddef3b255effa43c8056e0e0d" +checksum = "4bd8b9603c7aa97359dbd97ecf258968c95f3adddd6db2f7e7a5bef101c84560" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -514,9 +418,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.17.0" +version = "3.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" +checksum = "c81d250916401487680ed13b8b675660281dcfc3ab0121fe44c94bcab9eae2fb" [[package]] name = "byteorder" @@ -526,35 +430,17 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" - -[[package]] -name = "bzip2" -version = "0.5.2" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49ecfb22d906f800d4fe833b6282cf4dc1c298f5057ca0b5445e5c209735ca47" -dependencies = [ - "bzip2-sys", -] - -[[package]] -name = "bzip2-sys" -version = "0.1.13+1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225bff33b2141874fe80d71e07d6eec4f85c5c216453dd96388240f96e1acc14" -dependencies = [ - "cc", - "pkg-config", -] +checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" [[package]] name = "cc" -version = "1.2.23" +version = "1.2.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f4ac86a9e5bc1e2b3449ab9d7d3a6a405e3d1bb28d7b9be8614f55846ae3766" +checksum = "aebf35691d1bfb0ac386a69bac2fde4dd276fb618cf8bf4f5318fe285e821bb2" dependencies = [ + "find-msvc-tools", "jobserver", "libc", "shlex", @@ -562,17 +448,16 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" [[package]] name = "chrono" -version = "0.4.41" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" +checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" dependencies = [ - "android-tzdata", "iana-time-zone", "num-traits", "windows-link", @@ -580,30 +465,19 @@ dependencies = [ [[package]] name = "chrono-tz" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efdce149c370f133a071ca8ef6ea340b7b88748ab0810097a9e2976eaa34b4f3" +checksum = "a6139a8597ed92cf816dfb33f5dd6cf0bb93a6adc938f11039f371bc5bcd26c3" dependencies = [ "chrono", - "chrono-tz-build", "phf", ] -[[package]] -name = "chrono-tz-build" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f10f8c9340e31fc120ff885fcdb54a0b48e474bbd77cab557f0c30a3e569402" -dependencies = [ - "parse-zoneinfo", - "phf_codegen", -] - [[package]] name = "comfy-table" -version = "7.1.4" +version = "7.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a65ebfec4fb190b6f90e944a817d60499ee0744e582530e2c9900a22e591d9a" +checksum = "958c5d6ecf1f214b4c2bbbbf6ab9523a864bd136dcf71a7e8904799acfe1ad47" dependencies = [ "unicode-segmentation", "unicode-width", @@ -624,22 +498,19 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.17", "once_cell", "tiny-keccak", ] [[package]] name = "const_panic" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2459fc9262a1aa204eb4b5764ad4f189caec88aea9634389c0a25f8be7f6265e" - -[[package]] -name = "constant_time_eq" -version = "0.3.1" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" +checksum = "e262cdaac42494e3ae34c43969f9cdeb7da178bdb4b66fa6a1ea2edb4c8ae652" +dependencies = [ + "typewit", +] [[package]] name = "core-foundation-sys" @@ -649,36 +520,18 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "core_extensions" -version = "1.5.3" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92c71dc07c9721607e7a16108336048ee978c3a8b129294534272e8bac96c0ee" +checksum = "42bb5e5d0269fd4f739ea6cedaf29c16d81c27a7ce7582008e90eb50dcd57003" dependencies = [ "core_extensions_proc_macros", ] [[package]] name = "core_extensions_proc_macros" -version = "1.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69f3b219d28b6e3b4ac87bc1fc522e0803ab22e055da177bff0068c4150c61a6" - -[[package]] -name = "cpufeatures" -version = "0.2.17" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" -dependencies = [ - "libc", -] - -[[package]] -name = "crc32fast" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" -dependencies = [ - "cfg-if", -] +checksum = "533d38ecd2709b7608fb8e18e4504deb99e9a72879e6aa66373a76d8dc4259ea" [[package]] name = "crossbeam-channel" @@ -697,37 +550,27 @@ checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" - -[[package]] -name = "crypto-common" -version = "0.1.6" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" -dependencies = [ - "generic-array", - "typenum", -] +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" [[package]] name = "csv" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acdc4883a9c96732e4733212c01447ebd805833b7275a73ca3ee080fd77afdaf" +checksum = "52cd9d68cf7efc6ddfaaee42e7288d3a99d613d4b50f76ce9827ae0c6e14f938" dependencies = [ "csv-core", "itoa", "ryu", - "serde", + "serde_core", ] [[package]] name = "csv-core" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d02f3b0da4c6504f86e9cd789d8dbafab48c2321be74e9987593de5a894d93d" +checksum = "704a3c26996a80471189265814dbc2c257598b96b8a7feae2d31ace646bb9782" dependencies = [ "memchr", ] @@ -746,66 +589,11 @@ dependencies = [ "parking_lot_core", ] -[[package]] -name = "datafusion" -version = "47.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe060b978f74ab446be722adb8a274e052e005bf6dfd171caadc3abaad10080" -dependencies = [ - "arrow", - "arrow-ipc", - "arrow-schema", - "async-trait", - "bytes", - "bzip2", - "chrono", - "datafusion-catalog", - "datafusion-catalog-listing", - "datafusion-common", - "datafusion-common-runtime", - "datafusion-datasource", - "datafusion-datasource-csv", - "datafusion-datasource-json", - "datafusion-datasource-parquet", - "datafusion-execution", - "datafusion-expr", - "datafusion-expr-common", - "datafusion-functions", - "datafusion-functions-aggregate", - "datafusion-functions-nested", - "datafusion-functions-table", - "datafusion-functions-window", - "datafusion-macros", - "datafusion-optimizer", - "datafusion-physical-expr", - "datafusion-physical-expr-common", - "datafusion-physical-optimizer", - "datafusion-physical-plan", - "datafusion-session", - "datafusion-sql", - "flate2", - "futures", - "itertools", - "log", - "object_store", - "parking_lot", - "parquet", - "rand", - "regex", - "sqlparser", - "tempfile", - "tokio", - "url", - "uuid", - "xz2", - "zstd", -] - [[package]] name = "datafusion-catalog" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61fe34f401bd03724a1f96d12108144f8cd495a3cdda2bf5e091822fb80b7e66" +checksum = "462dc9ef45e5d688aeaae49a7e310587e81b6016b9d03bace5626ad0043e5a9e" dependencies = [ "arrow", "async-trait", @@ -818,7 +606,6 @@ dependencies = [ "datafusion-physical-expr", "datafusion-physical-plan", "datafusion-session", - "datafusion-sql", "futures", "itertools", "log", @@ -829,9 +616,9 @@ dependencies = [ [[package]] name = "datafusion-catalog-listing" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4411b8e3bce5e0fc7521e44f201def2e2d5d1b5f176fb56e8cdc9942c890f00" +checksum = "1b96dbf1d728fc321817b744eb5080cdd75312faa6980b338817f68f3caa4208" dependencies = [ "arrow", "async-trait", @@ -841,44 +628,42 @@ dependencies = [ "datafusion-execution", "datafusion-expr", "datafusion-physical-expr", + "datafusion-physical-expr-adapter", "datafusion-physical-expr-common", "datafusion-physical-plan", - "datafusion-session", "futures", + "itertools", "log", "object_store", - "tokio", ] [[package]] name = "datafusion-common" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0734015d81c8375eb5d4869b7f7ecccc2ee8d6cb81948ef737cd0e7b743bd69c" +checksum = "3237a6ff0d2149af4631290074289cae548c9863c885d821315d54c6673a074a" dependencies = [ "ahash", "arrow", "arrow-ipc", - "base64", + "chrono", "half", - "hashbrown 0.14.5", + "hashbrown 0.16.1", "indexmap", "libc", "log", "object_store", "parquet", "paste", - "recursive", - "sqlparser", "tokio", "web-time", ] [[package]] name = "datafusion-common-runtime" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5167bb1d2ccbb87c6bc36c295274d7a0519b14afcfdaf401d53cbcaa4ef4968b" +checksum = "70b5e34026af55a1bfccb1ef0a763cf1f64e77c696ffcf5a128a278c31236528" dependencies = [ "futures", "log", @@ -887,56 +672,71 @@ dependencies = [ [[package]] name = "datafusion-datasource" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04e602dcdf2f50c2abf297cc2203c73531e6f48b29516af7695d338cf2a778b1" +checksum = "1b2a6be734cc3785e18bbf2a7f2b22537f6b9fb960d79617775a51568c281842" dependencies = [ "arrow", - "async-compression", "async-trait", "bytes", - "bzip2", "chrono", "datafusion-common", "datafusion-common-runtime", "datafusion-execution", "datafusion-expr", "datafusion-physical-expr", + "datafusion-physical-expr-adapter", "datafusion-physical-expr-common", "datafusion-physical-plan", "datafusion-session", - "flate2", "futures", "glob", "itertools", "log", "object_store", - "parquet", "rand", - "tempfile", "tokio", - "tokio-util", "url", - "xz2", - "zstd", +] + +[[package]] +name = "datafusion-datasource-arrow" +version = "52.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1739b9b07c9236389e09c74f770e88aff7055250774e9def7d3f4f56b3dcc7be" +dependencies = [ + "arrow", + "arrow-ipc", + "async-trait", + "bytes", + "datafusion-common", + "datafusion-common-runtime", + "datafusion-datasource", + "datafusion-execution", + "datafusion-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "datafusion-session", + "futures", + "itertools", + "object_store", + "tokio", ] [[package]] name = "datafusion-datasource-csv" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3bb2253952dc32296ed5b84077cb2e0257fea4be6373e1c376426e17ead4ef6" +checksum = "61c73bc54b518bbba7c7650299d07d58730293cfba4356f6f428cc94c20b7600" dependencies = [ "arrow", "async-trait", "bytes", - "datafusion-catalog", "datafusion-common", "datafusion-common-runtime", "datafusion-datasource", "datafusion-execution", "datafusion-expr", - "datafusion-physical-expr", "datafusion-physical-expr-common", "datafusion-physical-plan", "datafusion-session", @@ -948,49 +748,46 @@ dependencies = [ [[package]] name = "datafusion-datasource-json" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8c7f47a5d2fe03bfa521ec9bafdb8a5c82de8377f60967c3663f00c8790352" +checksum = "37812c8494c698c4d889374ecfabbff780f1f26d9ec095dd1bddfc2a8ca12559" dependencies = [ "arrow", "async-trait", "bytes", - "datafusion-catalog", "datafusion-common", "datafusion-common-runtime", "datafusion-datasource", "datafusion-execution", "datafusion-expr", - "datafusion-physical-expr", "datafusion-physical-expr-common", "datafusion-physical-plan", "datafusion-session", "futures", "object_store", - "serde_json", "tokio", ] [[package]] name = "datafusion-datasource-parquet" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27d15868ea39ed2dc266728b554f6304acd473de2142281ecfa1294bb7415923" +checksum = "2210937ecd9f0e824c397e73f4b5385c97cd1aff43ab2b5836fcfd2d321523fb" dependencies = [ "arrow", "async-trait", "bytes", - "datafusion-catalog", "datafusion-common", "datafusion-common-runtime", "datafusion-datasource", "datafusion-execution", "datafusion-expr", - "datafusion-functions-aggregate", + "datafusion-functions-aggregate-common", "datafusion-physical-expr", + "datafusion-physical-expr-adapter", "datafusion-physical-expr-common", - "datafusion-physical-optimizer", "datafusion-physical-plan", + "datafusion-pruning", "datafusion-session", "futures", "itertools", @@ -998,23 +795,24 @@ dependencies = [ "object_store", "parking_lot", "parquet", - "rand", "tokio", ] [[package]] name = "datafusion-doc" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a91f8c2c5788ef32f48ff56c68e5b545527b744822a284373ac79bba1ba47292" +checksum = "2c825f969126bc2ef6a6a02d94b3c07abff871acf4d6dd759ce1255edb7923ce" [[package]] name = "datafusion-execution" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06f004d100f49a3658c9da6fb0c3a9b760062d96cd4ad82ccc3b7b69a9fb2f84" +checksum = "fa03ef05a2c2f90dd6c743e3e111078e322f4b395d20d4b4d431a245d79521ae" dependencies = [ "arrow", + "async-trait", + "chrono", "dashmap", "datafusion-common", "datafusion-expr", @@ -1029,11 +827,12 @@ dependencies = [ [[package]] name = "datafusion-expr" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a4e4ce3802609be38eeb607ee72f6fe86c3091460de9dbfae9e18db423b3964" +checksum = "ef33934c1f98ee695cc51192cc5f9ed3a8febee84fdbcd9131bf9d3a9a78276f" dependencies = [ "arrow", + "async-trait", "chrono", "datafusion-common", "datafusion-doc", @@ -1042,17 +841,17 @@ dependencies = [ "datafusion-functions-window-common", "datafusion-physical-expr-common", "indexmap", + "itertools", "paste", - "recursive", "serde_json", "sqlparser", ] [[package]] name = "datafusion-expr-common" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "422ac9cf3b22bbbae8cdf8ceb33039107fde1b5492693168f13bd566b1bcc839" +checksum = "000c98206e3dd47d2939a94b6c67af4bfa6732dd668ac4fafdbde408fd9134ea" dependencies = [ "arrow", "datafusion-common", @@ -1063,17 +862,27 @@ dependencies = [ [[package]] name = "datafusion-ffi" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cf3fe9ab492c56daeb7beed526690d33622d388b8870472e0b7b7f55490338c" +checksum = "30f57f7f63a25a0b78b3f2a5e18c0ecbd54851b64064ac0d5a9eb05efd5586d2" dependencies = [ "abi_stable", "arrow", "arrow-schema", "async-ffi", "async-trait", - "datafusion", + "datafusion-catalog", + "datafusion-common", + "datafusion-datasource", + "datafusion-execution", + "datafusion-expr", + "datafusion-functions-aggregate-common", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", "datafusion-proto", + "datafusion-proto-common", + "datafusion-session", "futures", "log", "prost", @@ -1081,18 +890,35 @@ dependencies = [ "tokio", ] +[[package]] +name = "datafusion-ffi-example" +version = "0.2.0" +dependencies = [ + "arrow", + "arrow-array", + "arrow-schema", + "async-trait", + "datafusion-catalog", + "datafusion-common", + "datafusion-expr", + "datafusion-ffi", + "datafusion-functions-aggregate", + "datafusion-functions-window", + "pyo3", + "pyo3-build-config", +] + [[package]] name = "datafusion-functions" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ddf0a0a2db5d2918349c978d42d80926c6aa2459cd8a3c533a84ec4bb63479e" +checksum = "379b01418ab95ca947014066248c22139fe9af9289354de10b445bd000d5d276" dependencies = [ "arrow", "arrow-buffer", "base64", - "blake2", - "blake3", "chrono", + "chrono-tz", "datafusion-common", "datafusion-doc", "datafusion-execution", @@ -1102,19 +928,18 @@ dependencies = [ "hex", "itertools", "log", - "md-5", + "num-traits", "rand", "regex", - "sha2", "unicode-segmentation", "uuid", ] [[package]] name = "datafusion-functions-aggregate" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "408a05dafdc70d05a38a29005b8b15e21b0238734dab1e98483fcb58038c5aba" +checksum = "fd00d5454ba4c3f8ebbd04bd6a6a9dc7ced7c56d883f70f2076c188be8459e4c" dependencies = [ "ahash", "arrow", @@ -1133,9 +958,9 @@ dependencies = [ [[package]] name = "datafusion-functions-aggregate-common" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "756d21da2dd6c9bef97af1504970ff56cbf35d03fbd4ffd62827f02f4d2279d4" +checksum = "aec06b380729a87210a4e11f555ec2d729a328142253f8d557b87593622ecc9f" dependencies = [ "ahash", "arrow", @@ -1144,32 +969,11 @@ dependencies = [ "datafusion-physical-expr-common", ] -[[package]] -name = "datafusion-functions-nested" -version = "47.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d8d50f6334b378930d992d801a10ac5b3e93b846b39e4a05085742572844537" -dependencies = [ - "arrow", - "arrow-ord", - "datafusion-common", - "datafusion-doc", - "datafusion-execution", - "datafusion-expr", - "datafusion-functions", - "datafusion-functions-aggregate", - "datafusion-macros", - "datafusion-physical-expr-common", - "itertools", - "log", - "paste", -] - [[package]] name = "datafusion-functions-table" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc9a97220736c8fff1446e936be90d57216c06f28969f9ffd3b72ac93c958c8a" +checksum = "e9a0d20e2b887e11bee24f7734d780a2588b925796ac741c3118dd06d5aa77f0" dependencies = [ "arrow", "async-trait", @@ -1183,10 +987,11 @@ dependencies = [ [[package]] name = "datafusion-functions-window" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cefc2d77646e1aadd1d6a9c40088937aedec04e68c5f0465939912e1291f8193" +checksum = "d3414b0a07e39b6979fe3a69c7aa79a9f1369f1d5c8e52146e66058be1b285ee" dependencies = [ + "arrow", "datafusion-common", "datafusion-doc", "datafusion-expr", @@ -1200,9 +1005,9 @@ dependencies = [ [[package]] name = "datafusion-functions-window-common" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd4aff082c42fa6da99ce0698c85addd5252928c908eb087ca3cfa64ff16b313" +checksum = "5bf2feae63cd4754e31add64ce75cae07d015bce4bb41cd09872f93add32523a" dependencies = [ "datafusion-common", "datafusion-physical-expr-common", @@ -1210,39 +1015,20 @@ dependencies = [ [[package]] name = "datafusion-macros" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df6f88d7ee27daf8b108ba910f9015176b36fbc72902b1ca5c2a5f1d1717e1a1" +checksum = "c4fe888aeb6a095c4bcbe8ac1874c4b9a4c7ffa2ba849db7922683ba20875aaf" dependencies = [ - "datafusion-expr", + "datafusion-doc", "quote", - "syn 2.0.101", -] - -[[package]] -name = "datafusion-optimizer" -version = "47.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "084d9f979c4b155346d3c34b18f4256e6904ded508e9554d90fed416415c3515" -dependencies = [ - "arrow", - "chrono", - "datafusion-common", - "datafusion-expr", - "datafusion-physical-expr", - "indexmap", - "itertools", - "log", - "recursive", - "regex", - "regex-syntax", + "syn 2.0.116", ] [[package]] name = "datafusion-physical-expr" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c536062b0076f4e30084065d805f389f9fe38af0ca75bcbac86bc5e9fbab65" +checksum = "0bb028323dd4efd049dd8a78d78fe81b2b969447b39c51424167f973ac5811d9" dependencies = [ "ahash", "arrow", @@ -1252,69 +1038,70 @@ dependencies = [ "datafusion-functions-aggregate-common", "datafusion-physical-expr-common", "half", - "hashbrown 0.14.5", + "hashbrown 0.16.1", "indexmap", "itertools", - "log", + "parking_lot", "paste", "petgraph", + "tokio", ] [[package]] -name = "datafusion-physical-expr-common" -version = "47.0.0" +name = "datafusion-physical-expr-adapter" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8a92b53b3193fac1916a1c5b8e3f4347c526f6822e56b71faa5fb372327a863" +checksum = "78fe0826aef7eab6b4b61533d811234a7a9e5e458331ebbf94152a51fc8ab433" dependencies = [ - "ahash", "arrow", "datafusion-common", - "datafusion-expr-common", - "hashbrown 0.14.5", + "datafusion-expr", + "datafusion-functions", + "datafusion-physical-expr", + "datafusion-physical-expr-common", "itertools", ] [[package]] -name = "datafusion-physical-optimizer" -version = "47.0.0" +name = "datafusion-physical-expr-common" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fa0a5ac94c7cf3da97bedabd69d6bbca12aef84b9b37e6e9e8c25286511b5e2" +checksum = "cfccd388620734c661bd8b7ca93c44cdd59fecc9b550eea416a78ffcbb29475f" dependencies = [ + "ahash", "arrow", + "chrono", "datafusion-common", - "datafusion-execution", - "datafusion-expr", "datafusion-expr-common", - "datafusion-physical-expr", - "datafusion-physical-expr-common", - "datafusion-physical-plan", + "hashbrown 0.16.1", + "indexmap", "itertools", - "log", - "recursive", + "parking_lot", ] [[package]] name = "datafusion-physical-plan" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "690c615db468c2e5fe5085b232d8b1c088299a6c63d87fd960a354a71f7acb55" +checksum = "0e1098760fb29127c24cc9ade3277051dc73c9ed0ac0131bd7bcd742e0ad7470" dependencies = [ "ahash", "arrow", "arrow-ord", "arrow-schema", "async-trait", - "chrono", "datafusion-common", "datafusion-common-runtime", "datafusion-execution", "datafusion-expr", + "datafusion-functions", + "datafusion-functions-aggregate-common", "datafusion-functions-window-common", "datafusion-physical-expr", "datafusion-physical-expr-common", "futures", "half", - "hashbrown 0.14.5", + "hashbrown 0.16.1", "indexmap", "itertools", "log", @@ -1325,15 +1112,26 @@ dependencies = [ [[package]] name = "datafusion-proto" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a1afb2bdb05de7ff65be6883ebfd4ec027bd9f1f21c46aa3afd01927160a83" +checksum = "0cf75daf56aa6b1c6867cc33ff0fb035d517d6d06737fd355a3e1ef67cba6e7a" dependencies = [ "arrow", "chrono", - "datafusion", + "datafusion-catalog", + "datafusion-catalog-listing", "datafusion-common", + "datafusion-datasource", + "datafusion-datasource-arrow", + "datafusion-datasource-csv", + "datafusion-datasource-json", + "datafusion-datasource-parquet", + "datafusion-execution", "datafusion-expr", + "datafusion-functions-table", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", "datafusion-proto-common", "object_store", "prost", @@ -1341,9 +1139,9 @@ dependencies = [ [[package]] name = "datafusion-proto-common" -version = "47.0.0" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35b7a5876ebd6b564fb9a1fd2c3a2a9686b787071a256b47e4708f0916f9e46f" +checksum = "12a0cb3cce232a3de0d14ef44b58a6537aeb1362cfb6cf4d808691ddbb918956" dependencies = [ "arrow", "datafusion-common", @@ -1351,55 +1149,34 @@ dependencies = [ ] [[package]] -name = "datafusion-session" -version = "47.0.0" +name = "datafusion-pruning" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad229a134c7406c057ece00c8743c0c34b97f4e72f78b475fe17b66c5e14fa4f" +checksum = "64d0fef4201777b52951edec086c21a5b246f3c82621569ddb4a26f488bc38a9" dependencies = [ "arrow", - "async-trait", - "dashmap", "datafusion-common", - "datafusion-common-runtime", - "datafusion-execution", - "datafusion-expr", + "datafusion-datasource", + "datafusion-expr-common", "datafusion-physical-expr", + "datafusion-physical-expr-common", "datafusion-physical-plan", - "datafusion-sql", - "futures", "itertools", "log", - "object_store", - "parking_lot", - "tokio", ] [[package]] -name = "datafusion-sql" -version = "47.0.0" +name = "datafusion-session" +version = "52.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64f6ab28b72b664c21a27b22a2ff815fd390ed224c26e89a93b5a8154a4e8607" +checksum = "f71f1e39e8f2acbf1c63b0e93756c2e970a64729dab70ac789587d6237c4fde0" dependencies = [ - "arrow", - "bigdecimal", + "async-trait", "datafusion-common", + "datafusion-execution", "datafusion-expr", - "indexmap", - "log", - "recursive", - "regex", - "sqlparser", -] - -[[package]] -name = "digest" -version = "0.10.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" -dependencies = [ - "block-buffer", - "crypto-common", - "subtle", + "datafusion-physical-plan", + "parking_lot", ] [[package]] @@ -1410,7 +1187,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] @@ -1427,9 +1204,9 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.12" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", "windows-sys", @@ -1442,17 +1219,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] -name = "ffi-table-provider" -version = "0.1.0" -dependencies = [ - "arrow", - "arrow-array", - "arrow-schema", - "datafusion", - "datafusion-ffi", - "pyo3", - "pyo3-build-config", -] +name = "find-msvc-tools" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" [[package]] name = "fixedbitset" @@ -1462,9 +1232,9 @@ checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" [[package]] name = "flatbuffers" -version = "25.2.10" +version = "25.12.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1045398c1bfd89168b5fd3f1fc11f6e70b34f6f66300c87d44d3de849463abf1" +checksum = "35f6839d7b3b98adde531effaf34f0c2badc6f4735d26fe74709d8e513a96ef3" dependencies = [ "bitflags", "rustc_version", @@ -1472,35 +1242,40 @@ dependencies = [ [[package]] name = "flate2" -version = "1.1.1" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" +checksum = "843fba2746e448b37e26a819579957415c8cef339bf08564fe8b7ddbd959573c" dependencies = [ - "crc32fast", - "libz-rs-sys", "miniz_oxide", + "zlib-rs", ] [[package]] -name = "fnv" -version = "1.0.7" +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "foldhash" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" [[package]] name = "form_urlencoded" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" dependencies = [ "percent-encoding", ] [[package]] name = "futures" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +checksum = "8b147ee9d1f6d097cef9ce628cd2ee62288d963e16fb287bd9286455b241382d" dependencies = [ "futures-channel", "futures-core", @@ -1513,9 +1288,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d" dependencies = [ "futures-core", "futures-sink", @@ -1523,15 +1298,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" +checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d" [[package]] name = "futures-executor" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d" dependencies = [ "futures-core", "futures-task", @@ -1540,38 +1315,38 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" +checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718" [[package]] name = "futures-macro" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "futures-sink" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" +checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893" [[package]] name = "futures-task" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" +checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393" [[package]] name = "futures-util" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" dependencies = [ "futures-channel", "futures-core", @@ -1581,7 +1356,6 @@ dependencies = [ "futures-task", "memchr", "pin-project-lite", - "pin-utils", "slab", ] @@ -1595,59 +1369,57 @@ dependencies = [ ] [[package]] -name = "generic-array" -version = "0.14.7" +name = "getrandom" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" dependencies = [ - "typenum", - "version_check", + "cfg-if", + "libc", + "wasi", ] [[package]] name = "getrandom" -version = "0.2.16" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "r-efi", + "wasip2", ] [[package]] name = "getrandom" -version = "0.3.3" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +checksum = "139ef39800118c7683f2fd3c98c1b23c09ae076556b435f8e9064ae108aaeeec" dependencies = [ "cfg-if", "libc", "r-efi", - "wasi 0.14.2+wasi-0.2.4", + "wasip2", + "wasip3", ] -[[package]] -name = "gimli" -version = "0.31.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" - [[package]] name = "glob" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" [[package]] name = "half" -version = "2.6.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" +checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" dependencies = [ "cfg-if", "crunchy", "num-traits", + "zerocopy", ] [[package]] @@ -1655,16 +1427,26 @@ name = "hashbrown" version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" + +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ - "ahash", - "allocator-api2", + "foldhash 0.1.5", ] [[package]] name = "hashbrown" -version = "0.15.3" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b26c544d002229e640969970a2e74021aadf6e2f96372b9c58eff97de08eb3" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash 0.2.0", +] [[package]] name = "heck" @@ -1680,26 +1462,25 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "http" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" dependencies = [ "bytes", - "fnv", "itoa", ] [[package]] name = "humantime" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" [[package]] name = "iana-time-zone" -version = "0.1.63" +version = "0.1.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" +checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -1721,9 +1502,9 @@ dependencies = [ [[package]] name = "icu_collections" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" dependencies = [ "displaydoc", "potential_utf", @@ -1734,9 +1515,9 @@ dependencies = [ [[package]] name = "icu_locale_core" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" dependencies = [ "displaydoc", "litemap", @@ -1747,11 +1528,10 @@ dependencies = [ [[package]] name = "icu_normalizer" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" dependencies = [ - "displaydoc", "icu_collections", "icu_normalizer_data", "icu_properties", @@ -1762,42 +1542,38 @@ dependencies = [ [[package]] name = "icu_normalizer_data" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" [[package]] name = "icu_properties" -version = "2.0.0" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2549ca8c7241c82f59c80ba2a6f415d931c5b58d24fb8412caa1a1f02c49139a" +checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" dependencies = [ - "displaydoc", "icu_collections", "icu_locale_core", "icu_properties_data", "icu_provider", - "potential_utf", "zerotrie", "zerovec", ] [[package]] name = "icu_properties_data" -version = "2.0.0" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8197e866e47b68f8f7d95249e172903bec06004b18b2937f1095d40a0c57de04" +checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" [[package]] name = "icu_provider" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" dependencies = [ "displaydoc", "icu_locale_core", - "stable_deref_trait", - "tinystr", "writeable", "yoke", "zerofrom", @@ -1805,11 +1581,17 @@ dependencies = [ "zerovec", ] +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + [[package]] name = "idna" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" dependencies = [ "idna_adapter", "smallvec", @@ -1828,19 +1610,24 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.9.0" +version = "2.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" dependencies = [ "equivalent", - "hashbrown 0.15.3", + "hashbrown 0.16.1", + "serde", + "serde_core", ] [[package]] name = "indoc" -version = "2.0.6" +version = "2.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c7245a08504955605670dbf141fceab975f15ca21570696aebe9d2e71576bd" +checksum = "79cf5c93f93228cf8efb3ba362535fb11199ac548a09ce117c9b1adc3030d706" +dependencies = [ + "rustversion", +] [[package]] name = "integer-encoding" @@ -1859,41 +1646,41 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.15" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" [[package]] name = "jobserver" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" dependencies = [ - "getrandom 0.3.3", + "getrandom 0.3.4", "libc", ] [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" dependencies = [ "once_cell", "wasm-bindgen", ] [[package]] -name = "lazy_static" -version = "1.5.0" +name = "leb128fmt" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" [[package]] name = "lexical-core" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b765c31809609075565a70b4b71402281283aeda7ecaf4818ac14a7b2ade8958" +checksum = "7d8d125a277f807e55a77304455eb7b1cb52f2b18c143b60e766c120bd64a594" dependencies = [ "lexical-parse-float", "lexical-parse-integer", @@ -1904,60 +1691,53 @@ dependencies = [ [[package]] name = "lexical-parse-float" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de6f9cb01fb0b08060209a057c048fcbab8717b4c1ecd2eac66ebfe39a65b0f2" +checksum = "52a9f232fbd6f550bc0137dcb5f99ab674071ac2d690ac69704593cb4abbea56" dependencies = [ "lexical-parse-integer", "lexical-util", - "static_assertions", ] [[package]] name = "lexical-parse-integer" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72207aae22fc0a121ba7b6d479e42cbfea549af1479c3f3a4f12c70dd66df12e" +checksum = "9a7a039f8fb9c19c996cd7b2fcce303c1b2874fe1aca544edc85c4a5f8489b34" dependencies = [ "lexical-util", - "static_assertions", ] [[package]] name = "lexical-util" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a82e24bf537fd24c177ffbbdc6ebcc8d54732c35b50a3f28cc3f4e4c949a0b3" -dependencies = [ - "static_assertions", -] +checksum = "2604dd126bb14f13fb5d1bd6a66155079cb9fa655b37f875b3a742c705dbed17" [[package]] name = "lexical-write-float" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5afc668a27f460fb45a81a757b6bf2f43c2d7e30cb5a2dcd3abf294c78d62bd" +checksum = "50c438c87c013188d415fbabbb1dceb44249ab81664efbd31b14ae55dabb6361" dependencies = [ "lexical-util", "lexical-write-integer", - "static_assertions", ] [[package]] name = "lexical-write-integer" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "629ddff1a914a836fb245616a7888b62903aae58fa771e1d83943035efa0f978" +checksum = "409851a618475d2d5796377cad353802345cba92c867d9fbcde9cf4eac4e14df" dependencies = [ "lexical-util", - "static_assertions", ] [[package]] name = "libc" -version = "0.2.172" +version = "0.2.182" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" +checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112" [[package]] name = "libloading" @@ -1971,82 +1751,51 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" - -[[package]] -name = "libz-rs-sys" -version = "0.5.0" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6489ca9bd760fe9642d7644e827b0c9add07df89857b0416ee15c1cc1a3b8c5a" -dependencies = [ - "zlib-rs", -] +checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" [[package]] name = "linux-raw-sys" -version = "0.9.4" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" [[package]] name = "litemap" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" dependencies = [ - "autocfg", "scopeguard", ] [[package]] name = "log" -version = "0.4.27" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" [[package]] name = "lz4_flex" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75761162ae2b0e580d7e7c390558127e5f01b4194debd6221fd8c207fc80e3f5" -dependencies = [ - "twox-hash 1.6.3", -] - -[[package]] -name = "lzma-sys" -version = "0.1.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fda04ab3764e6cde78b9974eec4f779acaba7c4e84b36eca3cf77c581b85d27" -dependencies = [ - "cc", - "libc", - "pkg-config", -] - -[[package]] -name = "md-5" -version = "0.10.6" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" +checksum = "ab6473172471198271ff72e9379150e9dfd70d8e533e0752a27e515b48dd375e" dependencies = [ - "cfg-if", - "digest", + "twox-hash", ] [[package]] name = "memchr" -version = "2.7.4" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" [[package]] name = "memoffset" @@ -2059,25 +1808,12 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", -] - -[[package]] -name = "num" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" -dependencies = [ - "num-bigint", - "num-complex", - "num-integer", - "num-iter", - "num-rational", - "num-traits", + "simd-adler32", ] [[package]] @@ -2092,41 +1828,19 @@ dependencies = [ [[package]] name = "num-complex" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" -dependencies = [ - "num-traits", -] - -[[package]] -name = "num-integer" -version = "0.1.46" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" -dependencies = [ - "num-traits", -] - -[[package]] -name = "num-iter" -version = "0.1.45" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" dependencies = [ - "autocfg", - "num-integer", "num-traits", ] [[package]] -name = "num-rational" -version = "0.4.2" +name = "num-integer" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "num-bigint", - "num-integer", "num-traits", ] @@ -2140,20 +1854,11 @@ dependencies = [ "libm", ] -[[package]] -name = "object" -version = "0.36.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" -dependencies = [ - "memchr", -] - [[package]] name = "object_store" -version = "0.12.1" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d94ac16b433c0ccf75326388c893d2835ab7457ea35ab8ba5d745c053ef5fa16" +checksum = "fbfbfff40aeccab00ec8a910b57ca8ecf4319b335c542f2edcd19dd25a1e2a00" dependencies = [ "async-trait", "bytes", @@ -2190,9 +1895,9 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.3" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" dependencies = [ "lock_api", "parking_lot_core", @@ -2200,22 +1905,22 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.10" +version = "0.9.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", - "windows-targets", + "windows-link", ] [[package]] name = "parquet" -version = "55.1.0" +version = "57.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be7b2d778f6b841d37083ebdf32e33a524acde1266b5884a8ca29bf00dfa1231" +checksum = "6ee96b29972a257b855ff2341b37e61af5f12d6af1158b6dcdb5b31ea07bb3cb" dependencies = [ "ahash", "arrow-array", @@ -2232,10 +1937,11 @@ dependencies = [ "flate2", "futures", "half", - "hashbrown 0.15.3", + "hashbrown 0.16.1", "lz4_flex", - "num", "num-bigint", + "num-integer", + "num-traits", "object_store", "paste", "seq-macro", @@ -2243,19 +1949,10 @@ dependencies = [ "snap", "thrift", "tokio", - "twox-hash 2.1.0", + "twox-hash", "zstd", ] -[[package]] -name = "parse-zoneinfo" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f2a05b18d44e2957b88f96ba460715e295bc1d7510468a2f3d3b44535d26c24" -dependencies = [ - "regex", -] - [[package]] name = "paste" version = "1.0.15" @@ -2264,54 +1961,36 @@ checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "percent-encoding" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "petgraph" -version = "0.7.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" +checksum = "8701b58ea97060d5e5b155d383a69952a60943f0e6dfe30b04c287beb0b27455" dependencies = [ "fixedbitset", + "hashbrown 0.15.5", "indexmap", + "serde", ] [[package]] name = "phf" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" -dependencies = [ - "phf_shared", -] - -[[package]] -name = "phf_codegen" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aef8048c789fa5e851558d709946d6d79a8ff88c0440c587967f8e94bfb1216a" -dependencies = [ - "phf_generator", - "phf_shared", -] - -[[package]] -name = "phf_generator" -version = "0.11.3" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" +checksum = "913273894cec178f401a31ec4b656318d95473527be05c0752cc41cdc32be8b7" dependencies = [ "phf_shared", - "rand", ] [[package]] name = "phf_shared" -version = "0.11.3" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" +checksum = "06005508882fb681fd97892ecff4b7fd0fee13ef1aa569f8695dae7ab9099981" dependencies = [ "siphasher", ] @@ -2322,12 +2001,6 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - [[package]] name = "pkg-config" version = "0.3.32" @@ -2336,15 +2009,15 @@ checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "portable-atomic" -version = "1.11.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" +checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" [[package]] name = "potential_utf" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" dependencies = [ "zerovec", ] @@ -2358,20 +2031,30 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn 2.0.116", +] + [[package]] name = "proc-macro2" -version = "1.0.95" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" dependencies = [ "unicode-ident", ] [[package]] name = "prost" -version = "0.13.5" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +checksum = "d2ea70524a2f82d518bce41317d0fae74151505651af45faf1ffbd6fd33f0568" dependencies = [ "bytes", "prost-derive", @@ -2379,33 +2062,23 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.13.5" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" dependencies = [ "anyhow", "itertools", "proc-macro2", "quote", - "syn 2.0.101", -] - -[[package]] -name = "psm" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e944464ec8536cd1beb0bbfd96987eb5e3b72f2ecdafdc5c769a37f1fa2ae1f" -dependencies = [ - "cc", + "syn 2.0.116", ] [[package]] name = "pyo3" -version = "0.23.5" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7778bffd85cf38175ac1f545509665d0b9b92a198ca7941f131f85f7a4f9a872" +checksum = "7ba0117f4212101ee6544044dae45abe1083d30ce7b29c4b5cbdfa2354e07383" dependencies = [ - "cfg-if", "indoc", "libc", "memoffset", @@ -2419,19 +2092,18 @@ dependencies = [ [[package]] name = "pyo3-build-config" -version = "0.23.5" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94f6cbe86ef3bf18998d9df6e0f3fc1050a8c5efa409bf712e661a4366e010fb" +checksum = "4fc6ddaf24947d12a9aa31ac65431fb1b851b8f4365426e182901eabfb87df5f" dependencies = [ - "once_cell", "target-lexicon", ] [[package]] name = "pyo3-ffi" -version = "0.23.5" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9f1b4c431c0bb1c8fb0a338709859eed0d030ff6daa34368d3b152a63dfdd8d" +checksum = "025474d3928738efb38ac36d4744a74a400c901c7596199e20e45d98eb194105" dependencies = [ "libc", "pyo3-build-config", @@ -2439,60 +2111,59 @@ dependencies = [ [[package]] name = "pyo3-macros" -version = "0.23.5" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc2201328f63c4710f68abdf653c89d8dbc2858b88c5d88b0ff38a75288a9da" +checksum = "2e64eb489f22fe1c95911b77c44cc41e7c19f3082fc81cce90f657cdc42ffded" dependencies = [ "proc-macro2", "pyo3-macros-backend", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "pyo3-macros-backend" -version = "0.23.5" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fca6726ad0f3da9c9de093d6f116a93c1a38e417ed73bf138472cf4064f72028" +checksum = "100246c0ecf400b475341b8455a9213344569af29a3c841d29270e53102e0fcf" dependencies = [ "heck", "proc-macro2", "pyo3-build-config", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "quote" -version = "1.0.40" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" dependencies = [ "proc-macro2", ] [[package]] name = "r-efi" -version = "5.2.0" +version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" [[package]] name = "rand" -version = "0.8.5" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ - "libc", "rand_chacha", "rand_core", ] [[package]] name = "rand_chacha" -version = "0.3.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", "rand_core", @@ -2500,47 +2171,27 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom 0.2.16", -] - -[[package]] -name = "recursive" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0786a43debb760f491b1bc0269fe5e84155353c67482b9e60d0cfb596054b43e" -dependencies = [ - "recursive-proc-macro-impl", - "stacker", -] - -[[package]] -name = "recursive-proc-macro-impl" -version = "0.1.1" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76009fbe0614077fc1a2ce255e3a1881a2e3a3527097d5dc6d8212c585e7e38b" +checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" dependencies = [ - "quote", - "syn 2.0.101", + "getrandom 0.3.4", ] [[package]] name = "redox_syscall" -version = "0.5.12" +version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ "bitflags", ] [[package]] name = "regex" -version = "1.11.1" +version = "1.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" dependencies = [ "aho-corasick", "memchr", @@ -2550,9 +2201,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.9" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" dependencies = [ "aho-corasick", "memchr", @@ -2561,9 +2212,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.5" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +checksum = "a96887878f22d7bad8a3b6dc5b7440e0ada9a245242924394987b21cf2210a4c" [[package]] name = "repr_offset" @@ -2574,12 +2225,6 @@ dependencies = [ "tstr", ] -[[package]] -name = "rustc-demangle" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" - [[package]] name = "rustc_version" version = "0.4.1" @@ -2591,9 +2236,9 @@ dependencies = [ [[package]] name = "rustix" -version = "1.0.7" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" +checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" dependencies = [ "bitflags", "errno", @@ -2604,15 +2249,15 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "ryu" -version = "1.0.20" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" [[package]] name = "same-file" @@ -2631,9 +2276,9 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "semver" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" [[package]] name = "seq-macro" @@ -2643,45 +2288,45 @@ checksum = "1bc711410fbe7399f390ca1c3b60ad0f53f80e95c5eb935e52268a0e2cd49acc" [[package]] name = "serde" -version = "1.0.219" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" dependencies = [ "itoa", "memchr", - "ryu", "serde", -] - -[[package]] -name = "sha2" -version = "0.10.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest", + "serde_core", + "zmij", ] [[package]] @@ -2690,6 +2335,12 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" +[[package]] +name = "simd-adler32" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" + [[package]] name = "simdutf8" version = "0.1.5" @@ -2698,24 +2349,21 @@ checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" [[package]] name = "siphasher" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" +checksum = "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e" [[package]] name = "slab" -version = "0.4.9" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] +checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" [[package]] name = "smallvec" -version = "1.15.0" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" [[package]] name = "snap" @@ -2725,12 +2373,11 @@ checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" [[package]] name = "sqlparser" -version = "0.55.0" +version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4521174166bac1ff04fe16ef4524c70144cd29682a45978978ca3d7f4e0be11" +checksum = "4591acadbcf52f0af60eafbb2c003232b2b4cd8de5f0e9437cb8b1b59046cc0f" dependencies = [ "log", - "recursive", "sqlparser_derive", ] @@ -2742,39 +2389,14 @@ checksum = "da5fc6819faabb412da764b99d3b713bb55083c11e7e0c00144d386cd6a1939c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - -[[package]] -name = "stacker" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cddb07e32ddb770749da91081d8d0ac3a16f1a569a18b20348cd371f5dead06b" -dependencies = [ - "cc", - "cfg-if", - "libc", - "psm", - "windows-sys", -] - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "subtle" -version = "2.6.1" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" [[package]] name = "syn" @@ -2789,9 +2411,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.101" +version = "2.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" +checksum = "3df424c70518695237746f84cede799c9c58fcb37450d7b23716568cc8bc69cb" dependencies = [ "proc-macro2", "quote", @@ -2806,23 +2428,23 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "target-lexicon" -version = "0.12.16" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" +checksum = "adb6935a6f5c20170eeceb1a3835a49e12e19d792f6dd344ccc76a985ca5a6ca" [[package]] name = "tempfile" -version = "3.20.0" +version = "3.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +checksum = "0136791f7c95b1f6dd99f9cc786b91bb81c3800b639b3478e561ddb7be95e5f1" dependencies = [ "fastrand", - "getrandom 0.3.3", + "getrandom 0.4.1", "once_cell", "rustix", "windows-sys", @@ -2830,22 +2452,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.12" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "2.0.12" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] @@ -2870,9 +2492,9 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" dependencies = [ "displaydoc", "zerovec", @@ -2880,11 +2502,10 @@ dependencies = [ [[package]] name = "tokio" -version = "1.45.0" +version = "1.49.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2513ca694ef9ede0fb23fe71a4ee4107cb102b9dc1930f6d0fd77aae068ae165" +checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" dependencies = [ - "backtrace", "bytes", "pin-project-lite", "tokio-macros", @@ -2892,33 +2513,20 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", -] - -[[package]] -name = "tokio-util" -version = "0.7.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "pin-project-lite", - "tokio", + "syn 2.0.116", ] [[package]] name = "tracing" -version = "0.1.41" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ "pin-project-lite", "tracing-attributes", @@ -2927,20 +2535,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.28" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "tracing-core" -version = "0.1.33" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" dependencies = [ "once_cell", ] @@ -2962,19 +2570,9 @@ checksum = "e78122066b0cb818b8afd08f7ed22f7fdbc3e90815035726f0840d0d26c0747a" [[package]] name = "twox-hash" -version = "1.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" -dependencies = [ - "cfg-if", - "static_assertions", -] - -[[package]] -name = "twox-hash" -version = "2.1.0" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7b17f197b3050ba473acf9181f7b1d3b66d1cf7356c6cc57886662276e65908" +checksum = "9ea3136b675547379c4bd395ca6b938e5ad3c3d20fad76e7fe85f9e0d011419c" [[package]] name = "typed-arena" @@ -2983,16 +2581,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6af6ae20167a9ece4bcb41af5b80f8a1f1df981f6391189ce00fd257af04126a" [[package]] -name = "typenum" -version = "1.18.0" +name = "typewit" +version = "1.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" +checksum = "f8c1ae7cc0fdb8b842d65d127cb981574b0d2b249b74d1c7a2986863dc134f71" [[package]] name = "unicode-ident" -version = "1.0.18" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" [[package]] name = "unicode-segmentation" @@ -3002,9 +2600,15 @@ checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] name = "unicode-width" -version = "0.2.0" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4ac048d71ede7ee76d585517add45da530660ef4390e49b098733c6e897f254" + +[[package]] +name = "unicode-xid" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "unindent" @@ -3014,13 +2618,14 @@ checksum = "7264e107f553ccae879d21fbea1d6724ac785e8c3bfc762137959b5802826ef3" [[package]] name = "url" -version = "2.5.4" +version = "2.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" dependencies = [ "form_urlencoded", "idna", "percent-encoding", + "serde", ] [[package]] @@ -3031,11 +2636,11 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "uuid" -version = "1.16.0" +version = "1.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" +checksum = "b672338555252d43fd2240c714dc444b8c6fb0a5c5335e65a07bba7742735ddb" dependencies = [ - "getrandom 0.3.3", + "getrandom 0.4.1", "js-sys", "wasm-bindgen", ] @@ -3058,52 +2663,49 @@ dependencies = [ [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] -name = "wasi" -version = "0.14.2+wasi-0.2.4" +name = "wasip2" +version = "1.0.2+wasi-0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5" dependencies = [ - "wit-bindgen-rt", + "wit-bindgen", ] [[package]] -name = "wasm-bindgen" -version = "0.2.100" +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" dependencies = [ - "cfg-if", - "once_cell", - "rustversion", - "wasm-bindgen-macro", + "wit-bindgen", ] [[package]] -name = "wasm-bindgen-backend" -version = "0.2.100" +name = "wasm-bindgen" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" dependencies = [ - "bumpalo", - "log", - "proc-macro2", - "quote", - "syn 2.0.101", + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.50" +version = "0.4.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" dependencies = [ "cfg-if", + "futures-util", "js-sys", "once_cell", "wasm-bindgen", @@ -3112,9 +2714,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3122,31 +2724,65 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" dependencies = [ + "bumpalo", "proc-macro2", "quote", - "syn 2.0.101", - "wasm-bindgen-backend", + "syn 2.0.116", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" dependencies = [ "unicode-ident", ] +[[package]] +name = "wasm-encoder" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap", + "wasm-encoder", + "wasmparser", +] + +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags", + "hashbrown 0.15.5", + "indexmap", + "semver", +] + [[package]] name = "web-sys" -version = "0.3.77" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" dependencies = [ "js-sys", "wasm-bindgen", @@ -3180,9 +2816,9 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.9" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ "windows-sys", ] @@ -3195,9 +2831,9 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-core" -version = "0.61.1" +version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46ec44dc15085cea82cf9c78f85a9114c463a369786585ad2882d1ff0b0acf40" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ "windows-implement", "windows-interface", @@ -3208,154 +2844,159 @@ dependencies = [ [[package]] name = "windows-implement" -version = "0.60.0" +version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "windows-interface" -version = "0.59.1" +version = "0.59.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "windows-link" -version = "0.1.1" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" [[package]] name = "windows-result" -version = "0.3.3" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b895b5356fc36103d0f64dd1e94dfa7ac5633f1c9dd6e80fe9ec4adef69e09d" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ "windows-link", ] [[package]] name = "windows-strings" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a7ab927b2637c19b3dbe0965e75d8f2d30bdd697a1516191cad2ec4df8fb28a" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ "windows-link", ] [[package]] name = "windows-sys" -version = "0.59.0" +version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" dependencies = [ - "windows-targets", + "windows-link", ] [[package]] -name = "windows-targets" -version = "0.52.6" +name = "wit-bindgen" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_gnullvm", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "wit-bindgen-rust-macro", ] [[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" - -[[package]] -name = "windows_i686_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" - -[[package]] -name = "windows_i686_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.52.6" +name = "wit-bindgen-core" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] [[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.6" +name = "wit-bindgen-rust" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap", + "prettyplease", + "syn 2.0.116", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] [[package]] -name = "windows_x86_64_msvc" -version = "0.52.6" +name = "wit-bindgen-rust-macro" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn 2.0.116", + "wit-bindgen-core", + "wit-bindgen-rust", +] [[package]] -name = "wit-bindgen-rt" -version = "0.39.0" +name = "wit-component" +version = "0.244.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" dependencies = [ + "anyhow", "bitflags", + "indexmap", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser", + "wit-parser", ] [[package]] -name = "writeable" -version = "0.6.1" +name = "wit-parser" +version = "0.244.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap", + "log", + "semver", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser", +] [[package]] -name = "xz2" -version = "0.1.7" +name = "writeable" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388c44dc09d76f1536602ead6d325eb532f5c122f17782bd57fb47baeeb767e2" -dependencies = [ - "lzma-sys", -] +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" [[package]] name = "yoke" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" dependencies = [ - "serde", "stable_deref_trait", "yoke-derive", "zerofrom", @@ -3363,34 +3004,34 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.25" +version = "0.8.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" +checksum = "db6d35d663eadb6c932438e763b262fe1a70987f9ae936e60158176d710cae4a" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.25" +version = "0.8.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" +checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] @@ -3410,15 +3051,15 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", "synstructure", ] [[package]] name = "zerotrie" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" dependencies = [ "displaydoc", "yoke", @@ -3427,9 +3068,9 @@ dependencies = [ [[package]] name = "zerovec" -version = "0.11.2" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" dependencies = [ "yoke", "zerofrom", @@ -3438,20 +3079,26 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.116", ] [[package]] name = "zlib-rs" -version = "0.5.0" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c745c48e1007337ed136dc99df34128b9faa6ed542d80a1c673cf55a6d7236c8" + +[[package]] +name = "zmij" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "868b928d7949e09af2f6086dfc1e01936064cc7a819253bce650d4e2a2d63ba8" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" [[package]] name = "zstd" @@ -3473,9 +3120,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.15+zstd.1.5.7" +version = "2.0.16+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" dependencies = [ "cc", "pkg-config", diff --git a/examples/datafusion-ffi-example/Cargo.toml b/examples/datafusion-ffi-example/Cargo.toml index 0e17567b9..15c11ea49 100644 --- a/examples/datafusion-ffi-example/Cargo.toml +++ b/examples/datafusion-ffi-example/Cargo.toml @@ -16,20 +16,30 @@ # under the License. [package] -name = "ffi-table-provider" -version = "0.1.0" -edition = "2021" +name = "datafusion-ffi-example" +version = "0.2.0" +edition = "2024" [dependencies] -datafusion = { version = "47.0.0" } -datafusion-ffi = { version = "47.0.0" } -pyo3 = { version = "0.23", features = ["extension-module", "abi3", "abi3-py39"] } -arrow = { version = "55.0.0" } -arrow-array = { version = "55.0.0" } -arrow-schema = { version = "55.0.0" } +datafusion-catalog = { version = "52", default-features = false } +datafusion-common = { version = "52", default-features = false } +datafusion-functions-aggregate = { version = "52" } +datafusion-functions-window = { version = "52" } +datafusion-expr = { version = "52" } +datafusion-ffi = { version = "52" } + +pyo3 = { version = "0.26", features = [ + "extension-module", + "abi3", + "abi3-py39", +] } +arrow = { version = "57" } +arrow-array = { version = "57" } +arrow-schema = { version = "57" } +async-trait = "0.1.89" [build-dependencies] -pyo3-build-config = "0.23" +pyo3-build-config = "0.26" [lib] name = "datafusion_ffi_example" diff --git a/examples/datafusion-ffi-example/pyproject.toml b/examples/datafusion-ffi-example/pyproject.toml index 0c54df95c..7f85e9487 100644 --- a/examples/datafusion-ffi-example/pyproject.toml +++ b/examples/datafusion-ffi-example/pyproject.toml @@ -23,9 +23,9 @@ build-backend = "maturin" name = "datafusion_ffi_example" requires-python = ">=3.9" classifiers = [ - "Programming Language :: Rust", - "Programming Language :: Python :: Implementation :: CPython", - "Programming Language :: Python :: Implementation :: PyPy", + "Programming Language :: Rust", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", ] dynamic = ["version"] diff --git a/examples/datafusion-ffi-example/python/tests/_test_aggregate_udf.py b/examples/datafusion-ffi-example/python/tests/_test_aggregate_udf.py new file mode 100644 index 000000000..7ea6b295c --- /dev/null +++ b/examples/datafusion-ffi-example/python/tests/_test_aggregate_udf.py @@ -0,0 +1,77 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations + +import pyarrow as pa +from datafusion import SessionContext, col, udaf +from datafusion_ffi_example import MySumUDF + + +def setup_context_with_table(): + ctx = SessionContext() + + # Pick numbers here so we get the same value in both groups + # since we cannot be certain of the output order of batches + batch = pa.RecordBatch.from_arrays( + [ + pa.array([1, 2, 3, None], type=pa.int64()), + pa.array([1, 1, 2, 2], type=pa.int64()), + ], + names=["a", "b"], + ) + ctx.register_record_batches("test_table", [[batch]]) + return ctx + + +def test_ffi_aggregate_register(): + ctx = setup_context_with_table() + my_udaf = udaf(MySumUDF()) + ctx.register_udaf(my_udaf) + + result = ctx.sql("select my_custom_sum(a) from test_table group by b").collect() + + assert len(result) == 2 + assert result[0].num_columns == 1 + + result = [r.column(0) for r in result] + expected = [ + pa.array([3], type=pa.int64()), + pa.array([3], type=pa.int64()), + ] + + assert result == expected + + +def test_ffi_aggregate_call_directly(): + ctx = setup_context_with_table() + my_udaf = udaf(MySumUDF()) + + result = ( + ctx.table("test_table").aggregate([col("b")], [my_udaf(col("a"))]).collect() + ) + + assert len(result) == 2 + assert result[0].num_columns == 2 + + result = [r.column(1) for r in result] + expected = [ + pa.array([3], type=pa.int64()), + pa.array([3], type=pa.int64()), + ] + + assert result == expected diff --git a/examples/datafusion-ffi-example/python/tests/_test_catalog_provider.py b/examples/datafusion-ffi-example/python/tests/_test_catalog_provider.py new file mode 100644 index 000000000..a862b23ba --- /dev/null +++ b/examples/datafusion-ffi-example/python/tests/_test_catalog_provider.py @@ -0,0 +1,136 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations + +import pyarrow as pa +import pyarrow.dataset as ds +import pytest +from datafusion import SessionContext, Table +from datafusion.catalog import Schema +from datafusion_ffi_example import MyCatalogProvider, MyCatalogProviderList + + +def create_test_dataset() -> Table: + """Create a simple test dataset.""" + batch = pa.RecordBatch.from_arrays( + [pa.array([100, 200, 300]), pa.array([1.1, 2.2, 3.3])], + names=["id", "value"], + ) + dataset = ds.dataset([batch]) + return Table(dataset) + + +@pytest.mark.parametrize("inner_capsule", [True, False]) +def test_ffi_catalog_provider_list(inner_capsule: bool) -> None: + """Test basic FFI CatalogProviderList functionality.""" + ctx = SessionContext() + + # Register FFI catalog + catalog_provider_list = MyCatalogProviderList() + if inner_capsule: + catalog_provider_list = ( + catalog_provider_list.__datafusion_catalog_provider_list__(ctx) + ) + + ctx.register_catalog_provider_list(catalog_provider_list) + + # Verify the catalog exists + catalog = ctx.catalog("auto_ffi_catalog") + schema_names = catalog.names() + assert "my_schema" in schema_names + + ctx.register_catalog_provider("second", MyCatalogProvider()) + + assert ctx.catalog_names() == {"auto_ffi_catalog", "second"} + + +@pytest.mark.parametrize("inner_capsule", [True, False]) +def test_ffi_catalog_provider_basic(inner_capsule: bool) -> None: + """Test basic FFI CatalogProvider functionality.""" + ctx = SessionContext() + + # Register FFI catalog + catalog_provider = MyCatalogProvider() + if inner_capsule: + catalog_provider = catalog_provider.__datafusion_catalog_provider__(ctx) + + ctx.register_catalog_provider("ffi_catalog", catalog_provider) + + # Verify the catalog exists + catalog = ctx.catalog("ffi_catalog") + schema_names = catalog.names() + assert "my_schema" in schema_names + + # Query the pre-populated table + result = ctx.sql("SELECT * FROM ffi_catalog.my_schema.my_table").collect() + assert len(result) == 2 + assert result[0].num_columns == 2 + + +def test_ffi_catalog_provider_register_schema(): + """Test registering additional schemas to FFI CatalogProvider.""" + ctx = SessionContext() + + catalog_provider = MyCatalogProvider() + ctx.register_catalog_provider("ffi_catalog", catalog_provider) + + catalog = ctx.catalog("ffi_catalog") + + # Register a new memory schema + new_schema = Schema.memory_schema() + catalog.register_schema("additional_schema", new_schema) + + # Verify the schema was registered + assert "additional_schema" in catalog.names() + + # Add a table to the new schema + new_schema.register_table("new_table", create_test_dataset()) + + # Query the new table + result = ctx.sql("SELECT * FROM ffi_catalog.additional_schema.new_table").collect() + assert len(result) == 1 + assert result[0].column(0) == pa.array([100, 200, 300]) + + +def test_ffi_catalog_provider_deregister_schema(): + """Test deregistering schemas from FFI CatalogProvider.""" + ctx = SessionContext() + + catalog_provider = MyCatalogProvider() + ctx.register_catalog_provider("ffi_catalog", catalog_provider) + + catalog = ctx.catalog("ffi_catalog") + + # Register two schemas + schema1 = Schema.memory_schema() + schema2 = Schema.memory_schema() + catalog.register_schema("temp_schema1", schema1) + catalog.register_schema("temp_schema2", schema2) + + # Verify both exist + names = catalog.names() + assert "temp_schema1" in names + assert "temp_schema2" in names + + # Deregister one schema + catalog.deregister_schema("temp_schema1") + + # Verify it's gone + names = catalog.names() + assert "temp_schema1" not in names + assert "temp_schema2" in names diff --git a/examples/datafusion-ffi-example/python/tests/_test_scalar_udf.py b/examples/datafusion-ffi-example/python/tests/_test_scalar_udf.py new file mode 100644 index 000000000..0c949c34a --- /dev/null +++ b/examples/datafusion-ffi-example/python/tests/_test_scalar_udf.py @@ -0,0 +1,70 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations + +import pyarrow as pa +from datafusion import SessionContext, col, udf +from datafusion_ffi_example import IsNullUDF + + +def setup_context_with_table(): + ctx = SessionContext() + + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3, None])], + names=["a"], + ) + ctx.register_record_batches("test_table", [[batch]]) + return ctx + + +def test_ffi_scalar_register(): + ctx = setup_context_with_table() + my_udf = udf(IsNullUDF()) + ctx.register_udf(my_udf) + + result = ctx.sql("select my_custom_is_null(a) from test_table").collect() + + assert len(result) == 1 + assert result[0].num_columns == 1 + print(result) + + result = [r.column(0) for r in result] + expected = [ + pa.array([False, False, False, True], type=pa.bool_()), + ] + + assert result == expected + + +def test_ffi_scalar_call_directly(): + ctx = setup_context_with_table() + my_udf = udf(IsNullUDF()) + + result = ctx.table("test_table").select(my_udf(col("a"))).collect() + + assert len(result) == 1 + assert result[0].num_columns == 1 + print(result) + + result = [r.column(0) for r in result] + expected = [ + pa.array([False, False, False, True], type=pa.bool_()), + ] + + assert result == expected diff --git a/examples/datafusion-ffi-example/python/tests/_test_schema_provider.py b/examples/datafusion-ffi-example/python/tests/_test_schema_provider.py new file mode 100644 index 000000000..93449c660 --- /dev/null +++ b/examples/datafusion-ffi-example/python/tests/_test_schema_provider.py @@ -0,0 +1,232 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations + +import pyarrow as pa +import pyarrow.dataset as ds +import pytest +from datafusion import SessionContext, Table +from datafusion.catalog import Schema +from datafusion_ffi_example import FixedSchemaProvider, MyCatalogProvider + + +def create_test_dataset() -> Table: + """Create a simple test dataset.""" + batch = pa.RecordBatch.from_arrays( + [pa.array([100, 200, 300]), pa.array([1.1, 2.2, 3.3])], + names=["id", "value"], + ) + dataset = ds.dataset([batch]) + return Table(dataset) + + +@pytest.mark.parametrize("inner_capsule", [True, False]) +def test_schema_provider_extract_values(inner_capsule: bool) -> None: + ctx = SessionContext() + + my_schema_name = "my_schema" + + schema_provider = FixedSchemaProvider() + if inner_capsule: + schema_provider = schema_provider.__datafusion_schema_provider__(ctx) + + ctx.catalog().register_schema(my_schema_name, schema_provider) + + expected_schema_name = "my_schema" + expected_table_name = "my_table" + expected_table_columns = ["units", "price"] + + default_catalog = ctx.catalog() + + catalog_schemas = default_catalog.names() + assert expected_schema_name in catalog_schemas + my_schema = default_catalog.schema(expected_schema_name) + assert expected_table_name in my_schema.names() + my_table = my_schema.table(expected_table_name) + assert expected_table_columns == my_table.schema.names + + result = ctx.table(f"{expected_schema_name}.{expected_table_name}").collect() + assert len(result) == 2 + + col0_result = [r.column(0) for r in result] + col1_result = [r.column(1) for r in result] + expected_col0 = [ + pa.array([10, 20, 30], type=pa.int32()), + pa.array([5, 7], type=pa.int32()), + ] + expected_col1 = [ + pa.array([1, 2, 5], type=pa.float64()), + pa.array([1.5, 2.5], type=pa.float64()), + ] + assert col0_result == expected_col0 + assert col1_result == expected_col1 + + +def test_ffi_schema_provider_basic(): + """Test basic FFI SchemaProvider functionality.""" + ctx = SessionContext() + + # Register FFI schema + schema_provider = FixedSchemaProvider() + ctx.catalog().register_schema("ffi_schema", schema_provider) + + # Verify the schema exists + schema = ctx.catalog().schema("ffi_schema") + table_names = schema.names() + assert "my_table" in table_names + + # Query the pre-populated table + result = ctx.sql("SELECT * FROM ffi_schema.my_table").collect() + assert len(result) == 2 + assert result[0].num_columns == 2 + + +def test_ffi_schema_provider_register_table(): + """Test registering additional tables to FFI SchemaProvider.""" + ctx = SessionContext() + + schema_provider = FixedSchemaProvider() + ctx.catalog().register_schema("ffi_schema", schema_provider) + + schema = ctx.catalog().schema("ffi_schema") + + # Register a new table + schema.register_table("additional_table", create_test_dataset()) + + # Verify the table was registered + assert "additional_table" in schema.names() + + # Query the new table + result = ctx.sql("SELECT * FROM ffi_schema.additional_table").collect() + assert len(result) == 1 + assert result[0].column(0) == pa.array([100, 200, 300]) + assert result[0].column(1) == pa.array([1.1, 2.2, 3.3]) + + +def test_ffi_schema_provider_deregister_table(): + """Test deregistering tables from FFI SchemaProvider.""" + ctx = SessionContext() + + schema_provider = FixedSchemaProvider() + ctx.catalog().register_schema("ffi_schema", schema_provider) + + schema = ctx.catalog().schema("ffi_schema") + + # Register two tables + schema.register_table("temp_table1", create_test_dataset()) + schema.register_table("temp_table2", create_test_dataset()) + + # Verify both exist + names = schema.names() + assert "temp_table1" in names + assert "temp_table2" in names + + # Deregister one table + schema.deregister_table("temp_table1") + + # Verify it's gone + names = schema.names() + assert "temp_table1" not in names + assert "temp_table2" in names + + +def test_mixed_ffi_and_python_providers(): + """Test mixing FFI and Python providers in the same catalog/schema.""" + ctx = SessionContext() + + # Register FFI catalog + ffi_catalog = MyCatalogProvider() + ctx.register_catalog_provider("ffi_catalog", ffi_catalog) + + # Register Python memory schema to FFI catalog + python_schema = Schema.memory_schema() + ctx.catalog("ffi_catalog").register_schema("python_schema", python_schema) + + # Add table to Python schema + python_schema.register_table("python_table", create_test_dataset()) + + # Query both FFI table and Python table + result_ffi = ctx.sql("SELECT * FROM ffi_catalog.my_schema.my_table").collect() + assert len(result_ffi) == 2 + + result_python = ctx.sql( + "SELECT * FROM ffi_catalog.python_schema.python_table" + ).collect() + assert len(result_python) == 1 + assert result_python[0].column(0) == pa.array([100, 200, 300]) + + +def test_ffi_catalog_with_multiple_schemas(): + """Test FFI catalog with multiple schemas of different types.""" + ctx = SessionContext() + + catalog_provider = MyCatalogProvider() + ctx.register_catalog_provider("multi_catalog", catalog_provider) + + catalog = ctx.catalog("multi_catalog") + + # Register different types of schemas + ffi_schema = FixedSchemaProvider() + memory_schema = Schema.memory_schema() + + catalog.register_schema("ffi_schema", ffi_schema) + catalog.register_schema("memory_schema", memory_schema) + + # Add tables to memory schema + memory_schema.register_table("mem_table", create_test_dataset()) + + # Verify all schemas exist + names = catalog.names() + assert "my_schema" in names # Pre-populated + assert "ffi_schema" in names + assert "memory_schema" in names + + # Query tables from each schema + result = ctx.sql("SELECT * FROM multi_catalog.my_schema.my_table").collect() + assert len(result) == 2 + + result = ctx.sql("SELECT * FROM multi_catalog.ffi_schema.my_table").collect() + assert len(result) == 2 + + result = ctx.sql("SELECT * FROM multi_catalog.memory_schema.mem_table").collect() + assert len(result) == 1 + assert result[0].column(0) == pa.array([100, 200, 300]) + + +def test_ffi_schema_table_exist(): + """Test table_exist method on FFI SchemaProvider.""" + ctx = SessionContext() + + schema_provider = FixedSchemaProvider() + ctx.catalog().register_schema("ffi_schema", schema_provider) + + schema = ctx.catalog().schema("ffi_schema") + + # Check pre-populated table + assert schema.table_exist("my_table") + + # Check non-existent table + assert not schema.table_exist("nonexistent_table") + + # Register a new table and check + schema.register_table("new_table", create_test_dataset()) + assert schema.table_exist("new_table") + + # Deregister and check + schema.deregister_table("new_table") + assert not schema.table_exist("new_table") diff --git a/examples/datafusion-ffi-example/python/tests/_test_table_function.py b/examples/datafusion-ffi-example/python/tests/_test_table_function.py index f3c56a90a..bf5aae3bd 100644 --- a/examples/datafusion-ffi-example/python/tests/_test_table_function.py +++ b/examples/datafusion-ffi-example/python/tests/_test_table_function.py @@ -27,9 +27,10 @@ from datafusion.context import TableProviderExportable -def test_ffi_table_function_register(): +def test_ffi_table_function_register() -> None: ctx = SessionContext() table_func = MyTableFunction() + table_udtf = udtf(table_func, "my_table_func") ctx.register_udtf(table_udtf) result = ctx.sql("select * from my_table_func()").collect() @@ -53,7 +54,7 @@ def test_ffi_table_function_call_directly(): table_udtf = udtf(table_func, "my_table_func") my_table = table_udtf() - ctx.register_table_provider("t", my_table) + ctx.register_table("t", my_table) result = ctx.table("t").collect() assert len(result) == 2 diff --git a/examples/datafusion-ffi-example/python/tests/_test_table_provider.py b/examples/datafusion-ffi-example/python/tests/_test_table_provider.py index 6b24da06c..fc77d2d3b 100644 --- a/examples/datafusion-ffi-example/python/tests/_test_table_provider.py +++ b/examples/datafusion-ffi-example/python/tests/_test_table_provider.py @@ -18,14 +18,19 @@ from __future__ import annotations import pyarrow as pa +import pytest from datafusion import SessionContext from datafusion_ffi_example import MyTableProvider -def test_table_loading(): +@pytest.mark.parametrize("inner_capsule", [True, False]) +def test_table_provider_ffi(inner_capsule: bool) -> None: ctx = SessionContext() table = MyTableProvider(3, 2, 4) - ctx.register_table_provider("t", table) + if inner_capsule: + table = table.__datafusion_table_provider__(ctx) + + ctx.register_table("t", table) result = ctx.table("t").collect() assert len(result) == 4 @@ -40,3 +45,7 @@ def test_table_loading(): ] assert result == expected + + result = ctx.read_table(table).collect() + result = [r.column(0) for r in result] + assert result == expected diff --git a/examples/datafusion-ffi-example/python/tests/_test_window_udf.py b/examples/datafusion-ffi-example/python/tests/_test_window_udf.py new file mode 100644 index 000000000..7d96994b9 --- /dev/null +++ b/examples/datafusion-ffi-example/python/tests/_test_window_udf.py @@ -0,0 +1,89 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations + +import pyarrow as pa +from datafusion import SessionContext, col, udwf +from datafusion_ffi_example import MyRankUDF + + +def setup_context_with_table(): + ctx = SessionContext() + + # Pick numbers here so we get the same value in both groups + # since we cannot be certain of the output order of batches + batch = pa.RecordBatch.from_arrays( + [ + pa.array([40, 10, 30, 20], type=pa.int64()), + ], + names=["a"], + ) + ctx.register_record_batches("test_table", [[batch]]) + return ctx + + +def test_ffi_window_register(): + ctx = setup_context_with_table() + my_udwf = udwf(MyRankUDF()) + ctx.register_udwf(my_udwf) + + result = ctx.sql( + "select a, my_custom_rank() over (order by a) from test_table" + ).collect() + assert len(result) == 1 + assert result[0].num_columns == 2 + + results = [ + (result[0][0][idx].as_py(), result[0][1][idx].as_py()) for idx in range(4) + ] + results.sort() + + expected = [ + (10, 1), + (20, 2), + (30, 3), + (40, 4), + ] + assert results == expected + + +def test_ffi_window_call_directly(): + ctx = setup_context_with_table() + my_udwf = udwf(MyRankUDF()) + + result = ( + ctx.table("test_table") + .select(col("a"), my_udwf().order_by(col("a")).build()) + .collect() + ) + + assert len(result) == 1 + assert result[0].num_columns == 2 + + results = [ + (result[0][0][idx].as_py(), result[0][1][idx].as_py()) for idx in range(4) + ] + results.sort() + + expected = [ + (10, 1), + (20, 2), + (30, 3), + (40, 4), + ] + assert results == expected diff --git a/examples/datafusion-ffi-example/src/aggregate_udf.rs b/examples/datafusion-ffi-example/src/aggregate_udf.rs new file mode 100644 index 000000000..276ad0275 --- /dev/null +++ b/examples/datafusion-ffi-example/src/aggregate_udf.rs @@ -0,0 +1,82 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use std::any::Any; +use std::sync::Arc; + +use arrow_schema::DataType; +use datafusion_common::error::Result as DataFusionResult; +use datafusion_expr::function::AccumulatorArgs; +use datafusion_expr::{Accumulator, AggregateUDF, AggregateUDFImpl, Signature}; +use datafusion_ffi::udaf::FFI_AggregateUDF; +use datafusion_functions_aggregate::sum::Sum; +use pyo3::types::PyCapsule; +use pyo3::{Bound, PyResult, Python, pyclass, pymethods}; + +#[pyclass(name = "MySumUDF", module = "datafusion_ffi_example", subclass)] +#[derive(Debug, Clone, Eq, PartialEq, Hash)] +pub(crate) struct MySumUDF { + inner: Arc, +} + +#[pymethods] +impl MySumUDF { + #[new] + fn new() -> PyResult { + Ok(Self { + inner: Arc::new(Sum::new()), + }) + } + + fn __datafusion_aggregate_udf__<'py>( + &self, + py: Python<'py>, + ) -> PyResult> { + let name = cr"datafusion_aggregate_udf".into(); + + let func = Arc::new(AggregateUDF::from(self.clone())); + let provider = FFI_AggregateUDF::from(func); + + PyCapsule::new(py, provider, Some(name)) + } +} + +impl AggregateUDFImpl for MySumUDF { + fn as_any(&self) -> &dyn Any { + self + } + + fn name(&self) -> &str { + "my_custom_sum" + } + + fn signature(&self) -> &Signature { + self.inner.signature() + } + + fn return_type(&self, arg_types: &[DataType]) -> DataFusionResult { + self.inner.return_type(arg_types) + } + + fn accumulator(&self, acc_args: AccumulatorArgs) -> DataFusionResult> { + self.inner.accumulator(acc_args) + } + + fn coerce_types(&self, arg_types: &[DataType]) -> DataFusionResult> { + self.inner.coerce_types(arg_types) + } +} diff --git a/examples/datafusion-ffi-example/src/catalog_provider.rs b/examples/datafusion-ffi-example/src/catalog_provider.rs new file mode 100644 index 000000000..3dc111cbc --- /dev/null +++ b/examples/datafusion-ffi-example/src/catalog_provider.rs @@ -0,0 +1,270 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use std::any::Any; +use std::fmt::Debug; +use std::sync::Arc; + +use arrow::datatypes::Schema; +use async_trait::async_trait; +use datafusion_catalog::{ + CatalogProvider, CatalogProviderList, MemTable, MemoryCatalogProvider, + MemoryCatalogProviderList, MemorySchemaProvider, SchemaProvider, TableProvider, +}; +use datafusion_common::error::{DataFusionError, Result}; +use datafusion_ffi::catalog_provider::FFI_CatalogProvider; +use datafusion_ffi::catalog_provider_list::FFI_CatalogProviderList; +use datafusion_ffi::schema_provider::FFI_SchemaProvider; +use pyo3::types::PyCapsule; +use pyo3::{Bound, PyAny, PyResult, Python, pyclass, pymethods}; + +use crate::utils::ffi_logical_codec_from_pycapsule; + +pub fn my_table() -> Arc { + use arrow::datatypes::{DataType, Field}; + use datafusion_common::record_batch; + + let schema = Arc::new(Schema::new(vec![ + Field::new("units", DataType::Int32, true), + Field::new("price", DataType::Float64, true), + ])); + + let partitions = vec![ + record_batch!( + ("units", Int32, vec![10, 20, 30]), + ("price", Float64, vec![1.0, 2.0, 5.0]) + ) + .unwrap(), + record_batch!( + ("units", Int32, vec![5, 7]), + ("price", Float64, vec![1.5, 2.5]) + ) + .unwrap(), + ]; + + Arc::new(MemTable::try_new(schema, vec![partitions]).unwrap()) +} + +#[pyclass( + name = "FixedSchemaProvider", + module = "datafusion_ffi_example", + subclass +)] +#[derive(Debug)] +pub struct FixedSchemaProvider { + inner: Arc, +} + +impl Default for FixedSchemaProvider { + fn default() -> Self { + let inner = Arc::new(MemorySchemaProvider::new()); + + let table = my_table(); + + let _ = inner.register_table("my_table".to_string(), table).unwrap(); + + Self { inner } + } +} + +#[pymethods] +impl FixedSchemaProvider { + #[new] + pub fn new() -> Self { + Self::default() + } + + pub fn __datafusion_schema_provider__<'py>( + &self, + py: Python<'py>, + session: Bound, + ) -> PyResult> { + let name = cr"datafusion_schema_provider".into(); + + let provider = Arc::clone(&self.inner) as Arc; + + let codec = ffi_logical_codec_from_pycapsule(session)?; + let provider = FFI_SchemaProvider::new_with_ffi_codec(provider, None, codec); + + PyCapsule::new(py, provider, Some(name)) + } +} + +#[async_trait] +impl SchemaProvider for FixedSchemaProvider { + fn as_any(&self) -> &dyn Any { + self + } + + fn table_names(&self) -> Vec { + self.inner.table_names() + } + + async fn table(&self, name: &str) -> Result>, DataFusionError> { + self.inner.table(name).await + } + + fn register_table( + &self, + name: String, + table: Arc, + ) -> Result>> { + self.inner.register_table(name, table) + } + + fn deregister_table(&self, name: &str) -> Result>> { + self.inner.deregister_table(name) + } + + fn table_exist(&self, name: &str) -> bool { + self.inner.table_exist(name) + } +} + +/// This catalog provider is intended only for unit tests. It prepopulates with one +/// schema and only allows for schemas named after four types of fruit. +#[pyclass( + name = "MyCatalogProvider", + module = "datafusion_ffi_example", + subclass +)] +#[derive(Debug, Clone)] +pub(crate) struct MyCatalogProvider { + inner: Arc, +} + +impl CatalogProvider for MyCatalogProvider { + fn as_any(&self) -> &dyn Any { + self + } + + fn schema_names(&self) -> Vec { + self.inner.schema_names() + } + + fn schema(&self, name: &str) -> Option> { + self.inner.schema(name) + } + + fn register_schema( + &self, + name: &str, + schema: Arc, + ) -> Result>> { + self.inner.register_schema(name, schema) + } + + fn deregister_schema( + &self, + name: &str, + cascade: bool, + ) -> Result>> { + self.inner.deregister_schema(name, cascade) + } +} + +#[pymethods] +impl MyCatalogProvider { + #[new] + pub fn new() -> PyResult { + let inner = Arc::new(MemoryCatalogProvider::new()); + + let schema_name: &str = "my_schema"; + let _ = inner.register_schema(schema_name, Arc::new(FixedSchemaProvider::default())); + + Ok(Self { inner }) + } + + pub fn __datafusion_catalog_provider__<'py>( + &self, + py: Python<'py>, + session: Bound, + ) -> PyResult> { + let name = cr"datafusion_catalog_provider".into(); + + let provider = Arc::clone(&self.inner) as Arc; + + let codec = ffi_logical_codec_from_pycapsule(session)?; + let provider = FFI_CatalogProvider::new_with_ffi_codec(provider, None, codec); + + PyCapsule::new(py, provider, Some(name)) + } +} + +/// This catalog provider list is intended only for unit tests. +/// It pre-populates with a single catalog. +#[pyclass( + name = "MyCatalogProviderList", + module = "datafusion_ffi_example", + subclass +)] +#[derive(Debug, Clone)] +pub(crate) struct MyCatalogProviderList { + inner: Arc, +} + +impl CatalogProviderList for MyCatalogProviderList { + fn as_any(&self) -> &dyn Any { + self + } + + fn catalog_names(&self) -> Vec { + self.inner.catalog_names() + } + + fn catalog(&self, name: &str) -> Option> { + self.inner.catalog(name) + } + + fn register_catalog( + &self, + name: String, + catalog: Arc, + ) -> Option> { + self.inner.register_catalog(name, catalog) + } +} + +#[pymethods] +impl MyCatalogProviderList { + #[new] + pub fn new() -> PyResult { + let inner = Arc::new(MemoryCatalogProviderList::new()); + + inner.register_catalog( + "auto_ffi_catalog".to_owned(), + Arc::new(MyCatalogProvider::new()?), + ); + + Ok(Self { inner }) + } + + pub fn __datafusion_catalog_provider_list__<'py>( + &self, + py: Python<'py>, + session: Bound, + ) -> PyResult> { + let name = cr"datafusion_catalog_provider_list".into(); + + let provider = Arc::clone(&self.inner) as Arc; + + let codec = ffi_logical_codec_from_pycapsule(session)?; + let provider = FFI_CatalogProviderList::new_with_ffi_codec(provider, None, codec); + + PyCapsule::new(py, provider, Some(name)) + } +} diff --git a/examples/datafusion-ffi-example/src/lib.rs b/examples/datafusion-ffi-example/src/lib.rs index ae08c3b65..6c64c9fe5 100644 --- a/examples/datafusion-ffi-example/src/lib.rs +++ b/examples/datafusion-ffi-example/src/lib.rs @@ -15,16 +15,32 @@ // specific language governing permissions and limitations // under the License. +use pyo3::prelude::*; + +use crate::aggregate_udf::MySumUDF; +use crate::catalog_provider::{FixedSchemaProvider, MyCatalogProvider, MyCatalogProviderList}; +use crate::scalar_udf::IsNullUDF; use crate::table_function::MyTableFunction; use crate::table_provider::MyTableProvider; -use pyo3::prelude::*; +use crate::window_udf::MyRankUDF; +pub(crate) mod aggregate_udf; +pub(crate) mod catalog_provider; +pub(crate) mod scalar_udf; pub(crate) mod table_function; pub(crate) mod table_provider; +pub(crate) mod utils; +pub(crate) mod window_udf; #[pymodule] fn datafusion_ffi_example(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_class::()?; m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; Ok(()) } diff --git a/examples/datafusion-ffi-example/src/scalar_udf.rs b/examples/datafusion-ffi-example/src/scalar_udf.rs new file mode 100644 index 000000000..089d32d93 --- /dev/null +++ b/examples/datafusion-ffi-example/src/scalar_udf.rs @@ -0,0 +1,92 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use std::any::Any; +use std::sync::Arc; + +use arrow_array::{Array, BooleanArray}; +use arrow_schema::DataType; +use datafusion_common::ScalarValue; +use datafusion_common::error::Result as DataFusionResult; +use datafusion_expr::{ + ColumnarValue, ScalarFunctionArgs, ScalarUDF, ScalarUDFImpl, Signature, TypeSignature, + Volatility, +}; +use datafusion_ffi::udf::FFI_ScalarUDF; +use pyo3::types::PyCapsule; +use pyo3::{Bound, PyResult, Python, pyclass, pymethods}; + +#[pyclass(name = "IsNullUDF", module = "datafusion_ffi_example", subclass)] +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub(crate) struct IsNullUDF { + signature: Signature, +} + +#[pymethods] +impl IsNullUDF { + #[new] + fn new() -> Self { + Self { + signature: Signature::new(TypeSignature::Any(1), Volatility::Immutable), + } + } + + fn __datafusion_scalar_udf__<'py>(&self, py: Python<'py>) -> PyResult> { + let name = cr"datafusion_scalar_udf".into(); + + let func = Arc::new(ScalarUDF::from(self.clone())); + let provider = FFI_ScalarUDF::from(func); + + PyCapsule::new(py, provider, Some(name)) + } +} + +impl ScalarUDFImpl for IsNullUDF { + fn as_any(&self) -> &dyn Any { + self + } + + fn name(&self) -> &str { + "my_custom_is_null" + } + + fn signature(&self) -> &Signature { + &self.signature + } + + fn return_type(&self, _arg_types: &[DataType]) -> DataFusionResult { + Ok(DataType::Boolean) + } + + fn invoke_with_args(&self, args: ScalarFunctionArgs) -> DataFusionResult { + let input = &args.args[0]; + + Ok(match input { + ColumnarValue::Array(arr) => match arr.is_nullable() { + true => { + let nulls = arr.nulls().unwrap(); + let nulls = BooleanArray::from_iter(nulls.iter().map(|x| Some(!x))); + ColumnarValue::Array(Arc::new(nulls)) + } + false => ColumnarValue::Scalar(ScalarValue::Boolean(Some(false))), + }, + ColumnarValue::Scalar(sv) => { + ColumnarValue::Scalar(ScalarValue::Boolean(Some(sv == &ScalarValue::Null))) + } + }) + } +} diff --git a/examples/datafusion-ffi-example/src/table_function.rs b/examples/datafusion-ffi-example/src/table_function.rs index 2d7b356e3..1cddb9e35 100644 --- a/examples/datafusion-ffi-example/src/table_function.rs +++ b/examples/datafusion-ffi-example/src/table_function.rs @@ -15,14 +15,17 @@ // specific language governing permissions and limitations // under the License. -use crate::table_provider::MyTableProvider; -use datafusion::catalog::{TableFunctionImpl, TableProvider}; -use datafusion::error::Result as DataFusionResult; -use datafusion::prelude::Expr; +use std::sync::Arc; + +use datafusion_catalog::{TableFunctionImpl, TableProvider}; +use datafusion_common::error::Result as DataFusionResult; +use datafusion_expr::Expr; use datafusion_ffi::udtf::FFI_TableFunction; use pyo3::types::PyCapsule; -use pyo3::{pyclass, pymethods, Bound, PyResult, Python}; -use std::sync::Arc; +use pyo3::{Bound, PyAny, PyResult, Python, pyclass, pymethods}; + +use crate::table_provider::MyTableProvider; +use crate::utils::ffi_logical_codec_from_pycapsule; #[pyclass(name = "MyTableFunction", module = "datafusion_ffi_example", subclass)] #[derive(Debug, Clone)] @@ -38,11 +41,13 @@ impl MyTableFunction { fn __datafusion_table_function__<'py>( &self, py: Python<'py>, + session: Bound, ) -> PyResult> { let name = cr"datafusion_table_function".into(); let func = self.clone(); - let provider = FFI_TableFunction::new(Arc::new(func), None); + let codec = ffi_logical_codec_from_pycapsule(session)?; + let provider = FFI_TableFunction::new_with_ffi_codec(Arc::new(func), None, codec); PyCapsule::new(py, provider, Some(name)) } diff --git a/examples/datafusion-ffi-example/src/table_provider.rs b/examples/datafusion-ffi-example/src/table_provider.rs index e884585b5..887b2c671 100644 --- a/examples/datafusion-ffi-example/src/table_provider.rs +++ b/examples/datafusion-ffi-example/src/table_provider.rs @@ -15,15 +15,18 @@ // specific language governing permissions and limitations // under the License. +use std::sync::Arc; + use arrow_array::{ArrayRef, RecordBatch}; use arrow_schema::{DataType, Field, Schema}; -use datafusion::catalog::MemTable; -use datafusion::error::{DataFusionError, Result as DataFusionResult}; +use datafusion_catalog::MemTable; +use datafusion_common::error::{DataFusionError, Result as DataFusionResult}; use datafusion_ffi::table_provider::FFI_TableProvider; use pyo3::exceptions::PyRuntimeError; use pyo3::types::PyCapsule; -use pyo3::{pyclass, pymethods, Bound, PyResult, Python}; -use std::sync::Arc; +use pyo3::{Bound, PyAny, PyResult, Python, pyclass, pymethods}; + +use crate::utils::ffi_logical_codec_from_pycapsule; /// In order to provide a test that demonstrates different sized record batches, /// the first batch will have num_rows, the second batch num_rows+1, and so on. @@ -90,13 +93,17 @@ impl MyTableProvider { pub fn __datafusion_table_provider__<'py>( &self, py: Python<'py>, + session: Bound, ) -> PyResult> { let name = cr"datafusion_table_provider".into(); let provider = self .create_table() - .map_err(|e| PyRuntimeError::new_err(e.to_string()))?; - let provider = FFI_TableProvider::new(Arc::new(provider), false, None); + .map_err(|e: DataFusionError| PyRuntimeError::new_err(e.to_string()))?; + + let codec = ffi_logical_codec_from_pycapsule(session)?; + let provider = + FFI_TableProvider::new_with_ffi_codec(Arc::new(provider), false, None, codec); PyCapsule::new(py, provider, Some(name)) } diff --git a/examples/datafusion-ffi-example/src/utils.rs b/examples/datafusion-ffi-example/src/utils.rs new file mode 100644 index 000000000..a01d3fe27 --- /dev/null +++ b/examples/datafusion-ffi-example/src/utils.rs @@ -0,0 +1,58 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use datafusion_ffi::proto::logical_extension_codec::FFI_LogicalExtensionCodec; +use pyo3::exceptions::PyValueError; +use pyo3::prelude::{PyAnyMethods, PyCapsuleMethods}; +use pyo3::types::PyCapsule; +use pyo3::{Bound, PyAny, PyResult}; + +pub(crate) fn ffi_logical_codec_from_pycapsule( + obj: Bound, +) -> PyResult { + let attr_name = "__datafusion_logical_extension_codec__"; + let capsule = if obj.hasattr(attr_name)? { + obj.getattr(attr_name)?.call0()? + } else { + obj + }; + + let capsule = capsule.downcast::()?; + validate_pycapsule(capsule, "datafusion_logical_extension_codec")?; + + let codec = unsafe { capsule.reference::() }; + + Ok(codec.clone()) +} + +pub(crate) fn validate_pycapsule(capsule: &Bound, name: &str) -> PyResult<()> { + let capsule_name = capsule.name()?; + if capsule_name.is_none() { + return Err(PyValueError::new_err(format!( + "Expected {name} PyCapsule to have name set." + ))); + } + + let capsule_name = capsule_name.unwrap().to_str()?; + if capsule_name != name { + return Err(PyValueError::new_err(format!( + "Expected name '{name}' in PyCapsule, instead got '{capsule_name}'" + ))); + } + + Ok(()) +} diff --git a/examples/datafusion-ffi-example/src/window_udf.rs b/examples/datafusion-ffi-example/src/window_udf.rs new file mode 100644 index 000000000..f3f565234 --- /dev/null +++ b/examples/datafusion-ffi-example/src/window_udf.rs @@ -0,0 +1,82 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use std::any::Any; +use std::sync::Arc; + +use arrow_schema::{DataType, FieldRef}; +use datafusion_common::error::Result as DataFusionResult; +use datafusion_expr::function::{PartitionEvaluatorArgs, WindowUDFFieldArgs}; +use datafusion_expr::{PartitionEvaluator, Signature, WindowUDF, WindowUDFImpl}; +use datafusion_ffi::udwf::FFI_WindowUDF; +use datafusion_functions_window::rank::rank_udwf; +use pyo3::types::PyCapsule; +use pyo3::{Bound, PyResult, Python, pyclass, pymethods}; + +#[pyclass(name = "MyRankUDF", module = "datafusion_ffi_example", subclass)] +#[derive(Debug, Clone, Eq, PartialEq, Hash)] +pub(crate) struct MyRankUDF { + inner: Arc, +} + +#[pymethods] +impl MyRankUDF { + #[new] + fn new() -> PyResult { + Ok(Self { inner: rank_udwf() }) + } + + fn __datafusion_window_udf__<'py>(&self, py: Python<'py>) -> PyResult> { + let name = cr"datafusion_window_udf".into(); + + let func = Arc::new(WindowUDF::from(self.clone())); + let provider = FFI_WindowUDF::from(func); + + PyCapsule::new(py, provider, Some(name)) + } +} + +impl WindowUDFImpl for MyRankUDF { + fn as_any(&self) -> &dyn Any { + self + } + + fn name(&self) -> &str { + "my_custom_rank" + } + + fn signature(&self) -> &Signature { + self.inner.signature() + } + + fn partition_evaluator( + &self, + partition_evaluator_args: PartitionEvaluatorArgs, + ) -> DataFusionResult> { + self.inner + .inner() + .partition_evaluator(partition_evaluator_args) + } + + fn field(&self, field_args: WindowUDFFieldArgs) -> DataFusionResult { + self.inner.inner().field(field_args) + } + + fn coerce_types(&self, arg_types: &[DataType]) -> DataFusionResult> { + self.inner.coerce_types(arg_types) + } +} diff --git a/examples/python-udf-comparisons.py b/examples/python-udf-comparisons.py index eb0825011..b870645a3 100644 --- a/examples/python-udf-comparisons.py +++ b/examples/python-udf-comparisons.py @@ -15,16 +15,16 @@ # specific language governing permissions and limitations # under the License. -import os import time +from pathlib import Path import pyarrow as pa import pyarrow.compute as pc from datafusion import SessionContext, col, lit, udf from datafusion import functions as F -path = os.path.dirname(os.path.abspath(__file__)) -filepath = os.path.join(path, "./tpch/data/lineitem.parquet") +path = Path(__file__).parent.resolve() +filepath = path / "./tpch/data/lineitem.parquet" # This example serves to demonstrate alternate approaches to answering the # question "return all of the rows that have a specific combination of these diff --git a/examples/sql-using-python-udaf.py b/examples/sql-using-python-udaf.py index 32ce38900..f42bbdc23 100644 --- a/examples/sql-using-python-udaf.py +++ b/examples/sql-using-python-udaf.py @@ -28,16 +28,16 @@ class MyAccumulator(Accumulator): def __init__(self) -> None: self._sum = pa.scalar(0.0) - def update(self, values: pa.Array) -> None: + def update(self, values: list[pa.Array]) -> None: # not nice since pyarrow scalars can't be summed yet. This breaks on `None` self._sum = pa.scalar(self._sum.as_py() + pa.compute.sum(values).as_py()) def merge(self, states: pa.Array) -> None: # not nice since pyarrow scalars can't be summed yet. This breaks on `None` - self._sum = pa.scalar(self._sum.as_py() + pa.compute.sum(states).as_py()) + self._sum = pa.scalar(self._sum.as_py() + pa.compute.sum(states[0]).as_py()) - def state(self) -> pa.Array: - return pa.array([self._sum.as_py()]) + def state(self) -> list[pa.Array]: + return [self._sum] def evaluate(self) -> pa.Scalar: return self._sum diff --git a/examples/tpch/_tests.py b/examples/tpch/_tests.py index 80ff80244..780fcf5e5 100644 --- a/examples/tpch/_tests.py +++ b/examples/tpch/_tests.py @@ -25,8 +25,10 @@ def df_selection(col_name, col_type): - if col_type == pa.float64() or isinstance(col_type, pa.Decimal128Type): + if col_type == pa.float64(): return F.round(col(col_name), lit(2)).alias(col_name) + if isinstance(col_type, pa.Decimal128Type): + return F.round(col(col_name).cast(pa.float64()), lit(2)).alias(col_name) if col_type == pa.string() or col_type == pa.string_view(): return F.trim(col(col_name)).alias(col_name) return col(col_name) diff --git a/examples/tpch/convert_data_to_parquet.py b/examples/tpch/convert_data_to_parquet.py index fd0fcca49..af554c39e 100644 --- a/examples/tpch/convert_data_to_parquet.py +++ b/examples/tpch/convert_data_to_parquet.py @@ -22,7 +22,7 @@ as will be generated by the script provided in this repository. """ -import os +from pathlib import Path import datafusion import pyarrow as pa @@ -116,7 +116,7 @@ ("S_COMMENT", pa.string()), ] -curr_dir = os.path.dirname(os.path.abspath(__file__)) +curr_dir = Path(__file__).resolve().parent for filename, curr_schema_val in all_schemas.items(): # For convenience, go ahead and convert the schema column names to lowercase curr_schema = [(s[0].lower(), s[1]) for s in curr_schema_val] @@ -132,10 +132,8 @@ schema = pa.schema(curr_schema) - source_file = os.path.abspath( - os.path.join(curr_dir, f"../../benchmarks/tpch/data/{filename}.csv") - ) - dest_file = os.path.abspath(os.path.join(curr_dir, f"./data/{filename}.parquet")) + source_file = (curr_dir / f"../../benchmarks/tpch/data/{filename}.csv").resolve() + dest_file = (curr_dir / f"./data/{filename}.parquet").resolve() df = ctx.read_csv(source_file, schema=schema, has_header=False, delimiter="|") diff --git a/examples/tpch/q07_volume_shipping.py b/examples/tpch/q07_volume_shipping.py index a84cf728a..ff2f891f1 100644 --- a/examples/tpch/q07_volume_shipping.py +++ b/examples/tpch/q07_volume_shipping.py @@ -80,7 +80,7 @@ # not match these will result in a null value and then get filtered out. # # To do the same using a simple filter would be: -# df_nation = df_nation.filter((F.col("n_name") == nation_1) | (F.col("n_name") == nation_2)) +# df_nation = df_nation.filter((F.col("n_name") == nation_1) | (F.col("n_name") == nation_2)) # noqa: ERA001 df_nation = df_nation.with_column( "n_name", F.case(col("n_name")) diff --git a/examples/tpch/q12_ship_mode_order_priority.py b/examples/tpch/q12_ship_mode_order_priority.py index f1d894940..9071597f0 100644 --- a/examples/tpch/q12_ship_mode_order_priority.py +++ b/examples/tpch/q12_ship_mode_order_priority.py @@ -73,7 +73,7 @@ # matches either of the two values, but we want to show doing some array operations in this # example. If you want to see this done with filters, comment out the above line and uncomment # this one. -# df = df.filter((col("l_shipmode") == lit(SHIP_MODE_1)) | (col("l_shipmode") == lit(SHIP_MODE_2))) +# df = df.filter((col("l_shipmode") == lit(SHIP_MODE_1)) | (col("l_shipmode") == lit(SHIP_MODE_2))) # noqa: ERA001 # We need order priority, so join order df to line item diff --git a/examples/tpch/util.py b/examples/tpch/util.py index 7e3d659dd..ec53bcd15 100644 --- a/examples/tpch/util.py +++ b/examples/tpch/util.py @@ -19,18 +19,16 @@ Common utilities for running TPC-H examples. """ -import os +from pathlib import Path -def get_data_path(filename: str) -> str: - path = os.path.dirname(os.path.abspath(__file__)) +def get_data_path(filename: str) -> Path: + path = Path(__file__).resolve().parent - return os.path.join(path, "data", filename) + return path / "data" / filename -def get_answer_file(answer_file: str) -> str: - path = os.path.dirname(os.path.abspath(__file__)) +def get_answer_file(answer_file: str) -> Path: + path = Path(__file__).resolve().parent - return os.path.join( - path, "../../benchmarks/tpch/data/answers", f"{answer_file}.out" - ) + return path / "../../benchmarks/tpch/data/answers" / f"{answer_file}.out" diff --git a/pyproject.toml b/pyproject.toml index 728cedb2d..5a5128a2f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,26 +24,30 @@ name = "datafusion" description = "Build and run queries against data" readme = "README.md" license = { file = "LICENSE.txt" } -requires-python = ">=3.9" +requires-python = ">=3.10" keywords = ["datafusion", "dataframe", "rust", "query-engine"] classifiers = [ - "Development Status :: 2 - Pre-Alpha", - "Intended Audience :: Developers", - "License :: OSI Approved :: Apache Software License", - "License :: OSI Approved", - "Operating System :: MacOS", - "Operating System :: Microsoft :: Windows", - "Operating System :: POSIX :: Linux", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "Programming Language :: Python :: 3.13", - "Programming Language :: Python", - "Programming Language :: Rust", + "Development Status :: 2 - Pre-Alpha", + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "License :: OSI Approved", + "Operating System :: MacOS", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX :: Linux", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", + "Programming Language :: Python", + "Programming Language :: Rust", +] +dependencies = [ + "pyarrow>=16.0.0;python_version<'3.14'", + "pyarrow>=22.0.0;python_version>='3.14'", + "typing-extensions;python_version<'3.13'", ] -dependencies = ["pyarrow>=11.0.0", "typing-extensions;python_version<'3.13'"] dynamic = ["version"] [project.urls] @@ -69,36 +73,23 @@ asyncio_default_fixture_loop_scope = "function" # Enable docstring linting using the google style guide [tool.ruff.lint] -select = ["ALL" ] +select = ["ALL"] ignore = [ - "A001", # Allow using words like min as variable names - "A002", # Allow using words like filter as variable names - "ANN401", # Allow Any for wrapper classes - "COM812", # Recommended to ignore these rules when using with ruff-format - "FIX002", # Allow TODO lines - consider removing at some point - "FBT001", # Allow boolean positional args - "FBT002", # Allow boolean positional args - "ISC001", # Recommended to ignore these rules when using with ruff-format - "SLF001", # Allow accessing private members - "TD002", - "TD003", # Allow TODO lines - "UP007", # Disallowing Union is pedantic - # TODO: Enable all of the following, but this PR is getting too large already - "PLR0913", - "TRY003", - "PLR2004", - "PD901", - "ERA001", - "ANN001", - "ANN202", - "PTH", - "N812", - "INP001", - "DTZ007", - "RUF015", - "A005", - "TC001", - "UP035", + "A001", # Allow using words like min as variable names + "A002", # Allow using words like filter as variable names + "ANN401", # Allow Any for wrapper classes + "COM812", # Recommended to ignore these rules when using with ruff-format + "FIX002", # Allow TODO lines - consider removing at some point + "FBT001", # Allow boolean positional args + "FBT002", # Allow boolean positional args + "ISC001", # Recommended to ignore these rules when using with ruff-format + "SLF001", # Allow accessing private members + "TD002", # Do not require author names in TODO statements + "TD003", # Allow TODO lines + "PLR0913", # Allow many arguments in function definition + "PD901", # Allow variable name df + "N812", # Allow importing functions as `F` + "A005", # Allow module named io ] [tool.ruff.lint.pydocstyle] @@ -107,46 +98,104 @@ convention = "google" [tool.ruff.lint.pycodestyle] max-doc-length = 88 +[tool.ruff.lint.flake8-boolean-trap] +extend-allowed-calls = ["lit", "datafusion.lit"] + # Disable docstring checking for these directories [tool.ruff.lint.per-file-ignores] "python/tests/*" = [ - "ANN", - "ARG", - "BLE001", - "D", - "S101", - "SLF", - "PD", - "PLR2004", - "PT011", - "RUF015", - "S608", - "PLR0913", - "PT004", + "ANN", + "ARG", + "BLE001", + "D", + "S101", + "SLF", + "PD", + "PLR2004", + "PT011", + "RUF015", + "S608", + "PLR0913", + "PT004", +] +"examples/*" = [ + "D", + "W505", + "E501", + "T201", + "S101", + "PLR2004", + "ANN001", + "ANN202", + "INP001", + "DTZ007", + "RUF015", +] +"dev/*" = [ + "D", + "E", + "T", + "S", + "PLR", + "C", + "SIM", + "UP", + "EXE", + "N817", + "ERA001", + "ANN001", +] +"benchmarks/*" = [ + "D", + "F", + "T", + "BLE", + "FURB", + "PLR", + "E", + "TD", + "TRY", + "S", + "SIM", + "EXE", + "UP", + "ERA001", + "ANN001", + "INP001", ] -"examples/*" = ["D", "W505", "E501", "T201", "S101"] -"dev/*" = ["D", "E", "T", "S", "PLR", "C", "SIM", "UP", "EXE", "N817"] -"benchmarks/*" = ["D", "F", "T", "BLE", "FURB", "PLR", "E", "TD", "TRY", "S", "SIM", "EXE", "UP"] "docs/*" = ["D"] +"docs/source/conf.py" = ["ERA001", "ANN001", "INP001"] + +[tool.codespell] +skip = ["./target", "uv.lock", "./python/tests/test_functions.py"] +count = true +ignore-words-list = ["ans", "IST"] [dependency-groups] dev = [ - "maturin>=1.8.1", - "numpy>1.25.0", - "pytest>=7.4.4", - "pytest-asyncio>=0.23.3", - "ruff>=0.9.1", - "toml>=0.10.2", - "pygithub==2.5.0", + "arro3-core==0.6.5", + "codespell==2.4.1", + "maturin>=1.8.1", + "nanoarrow==0.8.0", + "numpy>1.25.0;python_version<'3.14'", + "numpy>=2.3.2;python_version>='3.14'", + "pre-commit>=4.3.0", + "pyarrow>=19.0.0", + "pygithub==2.5.0", + "pytest>=7.4.4", + "pytest-asyncio>=0.23.3", + "pyyaml>=6.0.3", + "ruff>=0.9.1", + "toml>=0.10.2", ] docs = [ - "sphinx>=7.1.2", - "pydata-sphinx-theme==0.8.0", - "myst-parser>=3.0.1", - "jinja2>=3.1.5", - "ipython>=8.12.3", - "pandas>=2.0.3", - "pickleshare>=0.7.5", - "sphinx-autoapi>=3.4.0", - "setuptools>=75.3.0", + "ipython>=8.12.3", + "jinja2>=3.1.5", + "myst-parser>=3.0.1", + "pandas>=2.0.3", + "pickleshare>=0.7.5", + "pydata-sphinx-theme==0.8.0", + "setuptools>=75.3.0", + "sphinx>=7.1.2", + "sphinx-autoapi>=3.4.0", ] diff --git a/python/datafusion/__init__.py b/python/datafusion/__init__.py index 4f7700251..2e6f81166 100644 --- a/python/datafusion/__init__.py +++ b/python/datafusion/__init__.py @@ -28,31 +28,33 @@ try: import importlib.metadata as importlib_metadata except ImportError: - import importlib_metadata - -from datafusion.col import col, column + import importlib_metadata # type: ignore[import] +# Public submodules from . import functions, object_store, substrait, unparser # The following imports are okay to remain as opaque to the user. from ._internal import Config from .catalog import Catalog, Database, Table -from .common import ( - DFSchema, -) +from .col import col, column +from .common import DFSchema from .context import ( RuntimeEnvBuilder, SessionConfig, SessionContext, SQLOptions, ) -from .dataframe import DataFrame -from .expr import ( - Expr, - WindowFrame, +from .dataframe import ( + DataFrame, + DataFrameWriteOptions, + InsertOp, + ParquetColumnOptions, + ParquetWriterOptions, ) -from .html_formatter import configure_formatter +from .dataframe_formatter import configure_formatter +from .expr import Expr, WindowFrame from .io import read_avro, read_csv, read_json, read_parquet +from .options import CsvReadOptions from .plan import ExecutionPlan, LogicalPlan from .record_batch import RecordBatch, RecordBatchStream from .user_defined import ( @@ -74,12 +76,17 @@ "AggregateUDF", "Catalog", "Config", + "CsvReadOptions", "DFSchema", "DataFrame", + "DataFrameWriteOptions", "Database", "ExecutionPlan", "Expr", + "InsertOp", "LogicalPlan", + "ParquetColumnOptions", + "ParquetWriterOptions", "RecordBatch", "RecordBatchStream", "RuntimeEnvBuilder", @@ -91,6 +98,7 @@ "TableFunction", "WindowFrame", "WindowUDF", + "catalog", "col", "column", "common", @@ -100,6 +108,7 @@ "lit", "literal", "object_store", + "options", "read_avro", "read_csv", "read_json", @@ -113,12 +122,12 @@ ] -def literal(value) -> Expr: +def literal(value: Any) -> Expr: """Create a literal expression.""" return Expr.literal(value) -def string_literal(value): +def string_literal(value: str) -> Expr: """Create a UTF8 literal expression. It differs from `literal` which creates a UTF8view literal. @@ -126,12 +135,12 @@ def string_literal(value): return Expr.string_literal(value) -def str_lit(value): +def str_lit(value: str) -> Expr: """Alias for `string_literal`.""" return string_literal(value) -def lit(value) -> Expr: +def lit(value: Any) -> Expr: """Create a literal expression.""" return Expr.literal(value) diff --git a/python/datafusion/catalog.py b/python/datafusion/catalog.py index 67ab3ead2..bc43cf349 100644 --- a/python/datafusion/catalog.py +++ b/python/datafusion/catalog.py @@ -19,18 +19,84 @@ from __future__ import annotations -from typing import TYPE_CHECKING +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Any, Protocol import datafusion._internal as df_internal if TYPE_CHECKING: import pyarrow as pa + from datafusion import DataFrame, SessionContext + from datafusion.context import TableProviderExportable + +try: + from warnings import deprecated # Python 3.13+ +except ImportError: + from typing_extensions import deprecated # Python 3.12 + + +__all__ = [ + "Catalog", + "CatalogList", + "CatalogProvider", + "CatalogProviderList", + "Schema", + "SchemaProvider", + "Table", +] + + +class CatalogList: + """DataFusion data catalog list.""" + + def __init__(self, catalog_list: df_internal.catalog.RawCatalogList) -> None: + """This constructor is not typically called by the end user.""" + self.catalog_list = catalog_list + + def __repr__(self) -> str: + """Print a string representation of the catalog list.""" + return self.catalog_list.__repr__() + + def names(self) -> set[str]: + """This is an alias for `catalog_names`.""" + return self.catalog_names() + + def catalog_names(self) -> set[str]: + """Returns the list of schemas in this catalog.""" + return self.catalog_list.catalog_names() + + @staticmethod + def memory_catalog(ctx: SessionContext | None = None) -> CatalogList: + """Create an in-memory catalog provider list.""" + catalog_list = df_internal.catalog.RawCatalogList.memory_catalog(ctx) + return CatalogList(catalog_list) + + def catalog(self, name: str = "datafusion") -> Catalog: + """Returns the catalog with the given ``name`` from this catalog.""" + catalog = self.catalog_list.catalog(name) + + return ( + Catalog(catalog) + if isinstance(catalog, df_internal.catalog.RawCatalog) + else catalog + ) + + def register_catalog( + self, + name: str, + catalog: Catalog | CatalogProvider | CatalogProviderExportable, + ) -> Catalog | None: + """Register a catalog with this catalog list.""" + if isinstance(catalog, Catalog): + return self.catalog_list.register_catalog(name, catalog.catalog) + return self.catalog_list.register_catalog(name, catalog) + class Catalog: """DataFusion data catalog.""" - def __init__(self, catalog: df_internal.Catalog) -> None: + def __init__(self, catalog: df_internal.catalog.RawCatalog) -> None: """This constructor is not typically called by the end user.""" self.catalog = catalog @@ -38,52 +104,268 @@ def __repr__(self) -> str: """Print a string representation of the catalog.""" return self.catalog.__repr__() - def names(self) -> list[str]: - """Returns the list of databases in this catalog.""" - return self.catalog.names() + def names(self) -> set[str]: + """This is an alias for `schema_names`.""" + return self.schema_names() + + def schema_names(self) -> set[str]: + """Returns the list of schemas in this catalog.""" + return self.catalog.schema_names() - def database(self, name: str = "public") -> Database: + @staticmethod + def memory_catalog(ctx: SessionContext | None = None) -> Catalog: + """Create an in-memory catalog provider.""" + catalog = df_internal.catalog.RawCatalog.memory_catalog(ctx) + return Catalog(catalog) + + def schema(self, name: str = "public") -> Schema: """Returns the database with the given ``name`` from this catalog.""" - return Database(self.catalog.database(name)) + schema = self.catalog.schema(name) + + return ( + Schema(schema) + if isinstance(schema, df_internal.catalog.RawSchema) + else schema + ) + + @deprecated("Use `schema` instead.") + def database(self, name: str = "public") -> Schema: + """Returns the database with the given ``name`` from this catalog.""" + return self.schema(name) + + def register_schema( + self, + name: str, + schema: Schema | SchemaProvider | SchemaProviderExportable, + ) -> Schema | None: + """Register a schema with this catalog.""" + if isinstance(schema, Schema): + return self.catalog.register_schema(name, schema._raw_schema) + return self.catalog.register_schema(name, schema) + def deregister_schema(self, name: str, cascade: bool = True) -> Schema | None: + """Deregister a schema from this catalog.""" + return self.catalog.deregister_schema(name, cascade) -class Database: - """DataFusion Database.""" - def __init__(self, db: df_internal.Database) -> None: +class Schema: + """DataFusion Schema.""" + + def __init__(self, schema: df_internal.catalog.RawSchema) -> None: """This constructor is not typically called by the end user.""" - self.db = db + self._raw_schema = schema def __repr__(self) -> str: - """Print a string representation of the database.""" - return self.db.__repr__() + """Print a string representation of the schema.""" + return self._raw_schema.__repr__() + + @staticmethod + def memory_schema(ctx: SessionContext | None = None) -> Schema: + """Create an in-memory schema provider.""" + schema = df_internal.catalog.RawSchema.memory_schema(ctx) + return Schema(schema) def names(self) -> set[str]: - """Returns the list of all tables in this database.""" - return self.db.names() + """This is an alias for `table_names`.""" + return self.table_names() + + def table_names(self) -> set[str]: + """Returns the list of all tables in this schema.""" + return self._raw_schema.table_names def table(self, name: str) -> Table: - """Return the table with the given ``name`` from this database.""" - return Table(self.db.table(name)) + """Return the table with the given ``name`` from this schema.""" + return Table(self._raw_schema.table(name)) + + def register_table( + self, + name: str, + table: Table | TableProviderExportable | DataFrame | pa.dataset.Dataset, + ) -> None: + """Register a table in this schema.""" + return self._raw_schema.register_table(name, table) + + def deregister_table(self, name: str) -> None: + """Deregister a table provider from this schema.""" + return self._raw_schema.deregister_table(name) + + def table_exist(self, name: str) -> bool: + """Determines if a table exists in this schema.""" + return self._raw_schema.table_exist(name) + + +@deprecated("Use `Schema` instead.") +class Database(Schema): + """See `Schema`.""" class Table: - """DataFusion table.""" + """A DataFusion table. - def __init__(self, table: df_internal.Table) -> None: - """This constructor is not typically called by the end user.""" - self.table = table + Internally we currently support the following types of tables: + + - Tables created using built-in DataFusion methods, such as + reading from CSV or Parquet + - pyarrow datasets + - DataFusion DataFrames, which will be converted into a view + - Externally provided tables implemented with the FFI PyCapsule + interface (advanced) + """ + + __slots__ = ("_inner",) + + def __init__( + self, + table: Table | TableProviderExportable | DataFrame | pa.dataset.Dataset, + ctx: SessionContext | None = None, + ) -> None: + """Constructor.""" + self._inner = df_internal.catalog.RawTable(table, ctx) def __repr__(self) -> str: """Print a string representation of the table.""" - return self.table.__repr__() + return repr(self._inner) + + @staticmethod + @deprecated("Use Table() constructor instead.") + def from_dataset(dataset: pa.dataset.Dataset) -> Table: + """Turn a :mod:`pyarrow.dataset` ``Dataset`` into a :class:`Table`.""" + return Table(dataset) @property def schema(self) -> pa.Schema: """Returns the schema associated with this table.""" - return self.table.schema + return self._inner.schema @property def kind(self) -> str: """Returns the kind of table.""" - return self.table.kind + return self._inner.kind + + +class CatalogProviderList(ABC): + """Abstract class for defining a Python based Catalog Provider List.""" + + @abstractmethod + def catalog_names(self) -> set[str]: + """Set of the names of all catalogs in this catalog list.""" + ... + + @abstractmethod + def catalog( + self, name: str + ) -> CatalogProviderExportable | CatalogProvider | Catalog | None: + """Retrieve a specific catalog from this catalog list.""" + ... + + def register_catalog( # noqa: B027 + self, name: str, catalog: CatalogProviderExportable | CatalogProvider | Catalog + ) -> None: + """Add a catalog to this catalog list. + + This method is optional. If your catalog provides a fixed list of catalogs, you + do not need to implement this method. + """ + + +class CatalogProviderListExportable(Protocol): + """Type hint for object that has __datafusion_catalog_provider_list__ PyCapsule. + + https://docs.rs/datafusion/latest/datafusion/catalog/trait.CatalogProviderList.html + """ + + def __datafusion_catalog_provider_list__(self, session: Any) -> object: ... + + +class CatalogProvider(ABC): + """Abstract class for defining a Python based Catalog Provider.""" + + @abstractmethod + def schema_names(self) -> set[str]: + """Set of the names of all schemas in this catalog.""" + ... + + @abstractmethod + def schema(self, name: str) -> Schema | None: + """Retrieve a specific schema from this catalog.""" + ... + + def register_schema( # noqa: B027 + self, name: str, schema: SchemaProviderExportable | SchemaProvider | Schema + ) -> None: + """Add a schema to this catalog. + + This method is optional. If your catalog provides a fixed list of schemas, you + do not need to implement this method. + """ + + def deregister_schema(self, name: str, cascade: bool) -> None: # noqa: B027 + """Remove a schema from this catalog. + + This method is optional. If your catalog provides a fixed list of schemas, you + do not need to implement this method. + + Args: + name: The name of the schema to remove. + cascade: If true, deregister the tables within the schema. + """ + + +class CatalogProviderExportable(Protocol): + """Type hint for object that has __datafusion_catalog_provider__ PyCapsule. + + https://docs.rs/datafusion/latest/datafusion/catalog/trait.CatalogProvider.html + """ + + def __datafusion_catalog_provider__(self, session: Any) -> object: ... + + +class SchemaProvider(ABC): + """Abstract class for defining a Python based Schema Provider.""" + + def owner_name(self) -> str | None: + """Returns the owner of the schema. + + This is an optional method. The default return is None. + """ + return None + + @abstractmethod + def table_names(self) -> set[str]: + """Set of the names of all tables in this schema.""" + ... + + @abstractmethod + def table(self, name: str) -> Table | None: + """Retrieve a specific table from this schema.""" + ... + + def register_table( # noqa: B027 + self, name: str, table: Table | TableProviderExportable | Any + ) -> None: + """Add a table to this schema. + + This method is optional. If your schema provides a fixed list of tables, you do + not need to implement this method. + """ + + def deregister_table(self, name: str, cascade: bool) -> None: # noqa: B027 + """Remove a table from this schema. + + This method is optional. If your schema provides a fixed list of tables, you do + not need to implement this method. + """ + + @abstractmethod + def table_exist(self, name: str) -> bool: + """Returns true if the table exists in this schema.""" + ... + + +class SchemaProviderExportable(Protocol): + """Type hint for object that has __datafusion_schema_provider__ PyCapsule. + + https://docs.rs/datafusion/latest/datafusion/catalog/trait.SchemaProvider.html + """ + + def __datafusion_schema_provider__(self, session: Any) -> object: ... diff --git a/python/datafusion/context.py b/python/datafusion/context.py index 5b99b0d26..0d8259774 100644 --- a/python/datafusion/context.py +++ b/python/datafusion/context.py @@ -19,34 +19,56 @@ from __future__ import annotations +import uuid import warnings from typing import TYPE_CHECKING, Any, Protocol -import pyarrow as pa - try: from warnings import deprecated # Python 3.13+ except ImportError: from typing_extensions import deprecated # Python 3.12 -from datafusion.catalog import Catalog, Table + +import pyarrow as pa + +from datafusion.catalog import ( + Catalog, + CatalogList, + CatalogProviderExportable, + CatalogProviderList, + CatalogProviderListExportable, +) from datafusion.dataframe import DataFrame -from datafusion.expr import Expr, SortExpr, sort_list_to_raw_sort_list +from datafusion.expr import sort_list_to_raw_sort_list +from datafusion.options import ( + DEFAULT_MAX_INFER_SCHEMA, + CsvReadOptions, + _convert_table_partition_cols, +) from datafusion.record_batch import RecordBatchStream -from datafusion.user_defined import AggregateUDF, ScalarUDF, TableFunction, WindowUDF from ._internal import RuntimeEnvBuilder as RuntimeEnvBuilderInternal from ._internal import SessionConfig as SessionConfigInternal from ._internal import SessionContext as SessionContextInternal from ._internal import SQLOptions as SQLOptionsInternal +from ._internal import expr as expr_internal if TYPE_CHECKING: import pathlib + from collections.abc import Sequence import pandas as pd - import polars as pl + import polars as pl # type: ignore[import] + from datafusion.catalog import CatalogProvider, Table + from datafusion.expr import SortKey from datafusion.plan import ExecutionPlan, LogicalPlan + from datafusion.user_defined import ( + AggregateUDF, + ScalarUDF, + TableFunction, + WindowUDF, + ) class ArrowStreamExportable(Protocol): @@ -77,7 +99,7 @@ class TableProviderExportable(Protocol): https://datafusion.apache.org/python/user-guide/io/table_provider.html """ - def __datafusion_table_provider__(self) -> object: ... # noqa: D105 + def __datafusion_table_provider__(self, session: Any) -> object: ... # noqa: D105 class SessionConfig: @@ -544,7 +566,7 @@ def register_listing_table( table_partition_cols: list[tuple[str, str | pa.DataType]] | None = None, file_extension: str = ".parquet", schema: pa.Schema | None = None, - file_sort_order: list[list[Expr | SortExpr]] | None = None, + file_sort_order: Sequence[Sequence[SortKey]] | None = None, ) -> None: """Register multiple files as a single table. @@ -558,28 +580,35 @@ def register_listing_table( table_partition_cols: Partition columns. file_extension: File extension of the provided table. schema: The data source schema. - file_sort_order: Sort order for the file. + file_sort_order: Sort order for the file. Each sort key can be + specified as a column name (``str``), an expression + (``Expr``), or a ``SortExpr``. """ if table_partition_cols is None: table_partition_cols = [] - table_partition_cols = self._convert_table_partition_cols(table_partition_cols) - file_sort_order_raw = ( - [sort_list_to_raw_sort_list(f) for f in file_sort_order] - if file_sort_order is not None - else None - ) + table_partition_cols = _convert_table_partition_cols(table_partition_cols) self.ctx.register_listing_table( name, str(path), table_partition_cols, file_extension, schema, - file_sort_order_raw, + self._convert_file_sort_order(file_sort_order), ) - def sql(self, query: str, options: SQLOptions | None = None) -> DataFrame: + def sql( + self, + query: str, + options: SQLOptions | None = None, + param_values: dict[str, Any] | None = None, + **named_params: Any, + ) -> DataFrame: """Create a :py:class:`~datafusion.DataFrame` from SQL query text. + See the online documentation for a description of how to perform + parameterized substitution via either the ``param_values`` option + or passing in ``named_params``. + Note: This API implements DDL statements such as ``CREATE TABLE`` and ``CREATE VIEW`` and DML statements such as ``INSERT INTO`` with in-memory default implementation.See @@ -588,15 +617,57 @@ def sql(self, query: str, options: SQLOptions | None = None) -> DataFrame: Args: query: SQL query text. options: If provided, the query will be validated against these options. + param_values: Provides substitution of scalar values in the query + after parsing. + named_params: Provides string or DataFrame substitution in the query string. Returns: DataFrame representation of the SQL query. """ - if options is None: - return DataFrame(self.ctx.sql(query)) - return DataFrame(self.ctx.sql_with_options(query, options.options_internal)) - def sql_with_options(self, query: str, options: SQLOptions) -> DataFrame: + def value_to_scalar(value: Any) -> pa.Scalar: + if isinstance(value, pa.Scalar): + return value + return pa.scalar(value) + + def value_to_string(value: Any) -> str: + if isinstance(value, DataFrame): + view_name = str(uuid.uuid4()).replace("-", "_") + view_name = f"view_{view_name}" + view = value.df.into_view(temporary=True) + self.ctx.register_table(view_name, view) + return view_name + return str(value) + + param_values = ( + {name: value_to_scalar(value) for (name, value) in param_values.items()} + if param_values is not None + else {} + ) + param_strings = ( + {name: value_to_string(value) for (name, value) in named_params.items()} + if named_params is not None + else {} + ) + + options_raw = options.options_internal if options is not None else None + + return DataFrame( + self.ctx.sql_with_options( + query, + options=options_raw, + param_values=param_values, + param_strings=param_strings, + ) + ) + + def sql_with_options( + self, + query: str, + options: SQLOptions, + param_values: dict[str, Any] | None = None, + **named_params: Any, + ) -> DataFrame: """Create a :py:class:`~datafusion.dataframe.DataFrame` from SQL query text. This function will first validate that the query is allowed by the @@ -605,11 +676,16 @@ def sql_with_options(self, query: str, options: SQLOptions) -> DataFrame: Args: query: SQL query text. options: SQL options. + param_values: Provides substitution of scalar values in the query + after parsing. + named_params: Provides string or DataFrame substitution in the query string. Returns: DataFrame representation of the SQL query. """ - return self.sql(query, options) + return self.sql( + query, options=options, param_values=param_values, **named_params + ) def create_dataframe( self, @@ -725,7 +801,7 @@ def from_polars(self, data: pl.DataFrame, name: str | None = None) -> DataFrame: # https://github.com/apache/datafusion-python/pull/1016#discussion_r1983239116 # is the discussion on how we arrived at adding register_view def register_view(self, name: str, df: DataFrame) -> None: - """Register a :py:class: `~datafusion.detaframe.DataFrame` as a view. + """Register a :py:class:`~datafusion.dataframe.DataFrame` as a view. Args: name (str): The name to register the view under. @@ -734,30 +810,60 @@ def register_view(self, name: str, df: DataFrame) -> None: view = df.into_view() self.ctx.register_table(name, view) - def register_table(self, name: str, table: Table) -> None: - """Register a :py:class: `~datafusion.catalog.Table` as a table. + def register_table( + self, + name: str, + table: Table | TableProviderExportable | DataFrame | pa.dataset.Dataset, + ) -> None: + """Register a :py:class:`~datafusion.Table` with this context. - The registered table can be referenced from SQL statement executed against. + The registered table can be referenced from SQL statements executed against + this context. Args: name: Name of the resultant table. - table: DataFusion table to add to the session context. + table: Any object that can be converted into a :class:`Table`. """ - self.ctx.register_table(name, table.table) + self.ctx.register_table(name, table) def deregister_table(self, name: str) -> None: """Remove a table from the session.""" self.ctx.deregister_table(name) + def catalog_names(self) -> set[str]: + """Returns the list of catalogs in this context.""" + return self.ctx.catalog_names() + + def register_catalog_provider_list( + self, + provider: CatalogProviderListExportable | CatalogProviderList | CatalogList, + ) -> None: + """Register a catalog provider list.""" + if isinstance(provider, CatalogList): + self.ctx.register_catalog_provider_list(provider.catalog) + else: + self.ctx.register_catalog_provider_list(provider) + + def register_catalog_provider( + self, name: str, provider: CatalogProviderExportable | CatalogProvider | Catalog + ) -> None: + """Register a catalog provider.""" + if isinstance(provider, Catalog): + self.ctx.register_catalog_provider(name, provider.catalog) + else: + self.ctx.register_catalog_provider(name, provider) + + @deprecated("Use register_table() instead.") def register_table_provider( - self, name: str, provider: TableProviderExportable + self, + name: str, + provider: Table | TableProviderExportable | DataFrame | pa.dataset.Dataset, ) -> None: """Register a table provider. - This table provider must have a method called ``__datafusion_table_provider__`` - which returns a PyCapsule that exposes a ``FFI_TableProvider``. + Deprecated: use :meth:`register_table` instead. """ - self.ctx.register_table_provider(name, provider) + self.register_table(name, provider) def register_udtf(self, func: TableFunction) -> None: """Register a user defined table function.""" @@ -786,7 +892,7 @@ def register_parquet( file_extension: str = ".parquet", skip_metadata: bool = True, schema: pa.Schema | None = None, - file_sort_order: list[list[SortExpr]] | None = None, + file_sort_order: Sequence[Sequence[SortKey]] | None = None, ) -> None: """Register a Parquet file as a table. @@ -805,11 +911,13 @@ def register_parquet( that may be in the file schema. This can help avoid schema conflicts due to metadata. schema: The data source schema. - file_sort_order: Sort order for the file. + file_sort_order: Sort order for the file. Each sort key can be + specified as a column name (``str``), an expression + (``Expr``), or a ``SortExpr``. """ if table_partition_cols is None: table_partition_cols = [] - table_partition_cols = self._convert_table_partition_cols(table_partition_cols) + table_partition_cols = _convert_table_partition_cols(table_partition_cols) self.ctx.register_parquet( name, str(path), @@ -818,9 +926,7 @@ def register_parquet( file_extension, skip_metadata, schema, - [sort_list_to_raw_sort_list(exprs) for exprs in file_sort_order] - if file_sort_order is not None - else None, + self._convert_file_sort_order(file_sort_order), ) def register_csv( @@ -830,9 +936,10 @@ def register_csv( schema: pa.Schema | None = None, has_header: bool = True, delimiter: str = ",", - schema_infer_max_records: int = 1000, + schema_infer_max_records: int = DEFAULT_MAX_INFER_SCHEMA, file_extension: str = ".csv", file_compression_type: str | None = None, + options: CsvReadOptions | None = None, ) -> None: """Register a CSV file as a table. @@ -852,18 +959,46 @@ def register_csv( file_extension: File extension; only files with this extension are selected for data input. file_compression_type: File compression type. + options: Set advanced options for CSV reading. This cannot be + combined with any of the other options in this method. """ - path = [str(p) for p in path] if isinstance(path, list) else str(path) + path_arg = [str(p) for p in path] if isinstance(path, list) else str(path) + + if options is not None and ( + schema is not None + or not has_header + or delimiter != "," + or schema_infer_max_records != DEFAULT_MAX_INFER_SCHEMA + or file_extension != ".csv" + or file_compression_type is not None + ): + message = ( + "Combining CsvReadOptions parameter with additional options " + "is not supported. Use CsvReadOptions to set parameters." + ) + warnings.warn( + message, + category=UserWarning, + stacklevel=2, + ) + + options = ( + options + if options is not None + else CsvReadOptions( + schema=schema, + has_header=has_header, + delimiter=delimiter, + schema_infer_max_records=schema_infer_max_records, + file_extension=file_extension, + file_compression_type=file_compression_type, + ) + ) self.ctx.register_csv( name, - path, - schema, - has_header, - delimiter, - schema_infer_max_records, - file_extension, - file_compression_type, + path_arg, + options.to_inner(), ) def register_json( @@ -894,7 +1029,7 @@ def register_json( """ if table_partition_cols is None: table_partition_cols = [] - table_partition_cols = self._convert_table_partition_cols(table_partition_cols) + table_partition_cols = _convert_table_partition_cols(table_partition_cols) self.ctx.register_json( name, str(path), @@ -927,7 +1062,7 @@ def register_avro( """ if table_partition_cols is None: table_partition_cols = [] - table_partition_cols = self._convert_table_partition_cols(table_partition_cols) + table_partition_cols = _convert_table_partition_cols(table_partition_cols) self.ctx.register_avro( name, str(path), schema, file_extension, table_partition_cols ) @@ -1007,7 +1142,7 @@ def read_json( """ if table_partition_cols is None: table_partition_cols = [] - table_partition_cols = self._convert_table_partition_cols(table_partition_cols) + table_partition_cols = _convert_table_partition_cols(table_partition_cols) return DataFrame( self.ctx.read_json( str(path), @@ -1025,10 +1160,11 @@ def read_csv( schema: pa.Schema | None = None, has_header: bool = True, delimiter: str = ",", - schema_infer_max_records: int = 1000, + schema_infer_max_records: int = DEFAULT_MAX_INFER_SCHEMA, file_extension: str = ".csv", table_partition_cols: list[tuple[str, str | pa.DataType]] | None = None, file_compression_type: str | None = None, + options: CsvReadOptions | None = None, ) -> DataFrame: """Read a CSV data source. @@ -1046,26 +1182,51 @@ def read_csv( selected for data input. table_partition_cols: Partition columns. file_compression_type: File compression type. + options: Set advanced options for CSV reading. This cannot be + combined with any of the other options in this method. Returns: DataFrame representation of the read CSV files """ - if table_partition_cols is None: - table_partition_cols = [] - table_partition_cols = self._convert_table_partition_cols(table_partition_cols) + path_arg = [str(p) for p in path] if isinstance(path, list) else str(path) + + if options is not None and ( + schema is not None + or not has_header + or delimiter != "," + or schema_infer_max_records != DEFAULT_MAX_INFER_SCHEMA + or file_extension != ".csv" + or table_partition_cols is not None + or file_compression_type is not None + ): + message = ( + "Combining CsvReadOptions parameter with additional options " + "is not supported. Use CsvReadOptions to set parameters." + ) + warnings.warn( + message, + category=UserWarning, + stacklevel=2, + ) - path = [str(p) for p in path] if isinstance(path, list) else str(path) + options = ( + options + if options is not None + else CsvReadOptions( + schema=schema, + has_header=has_header, + delimiter=delimiter, + schema_infer_max_records=schema_infer_max_records, + file_extension=file_extension, + table_partition_cols=table_partition_cols, + file_compression_type=file_compression_type, + ) + ) return DataFrame( self.ctx.read_csv( - path, - schema, - has_header, - delimiter, - schema_infer_max_records, - file_extension, - table_partition_cols, - file_compression_type, + path_arg, + options.to_inner(), ) ) @@ -1077,7 +1238,7 @@ def read_parquet( file_extension: str = ".parquet", skip_metadata: bool = True, schema: pa.Schema | None = None, - file_sort_order: list[list[Expr | SortExpr]] | None = None, + file_sort_order: Sequence[Sequence[SortKey]] | None = None, ) -> DataFrame: """Read a Parquet source into a :py:class:`~datafusion.dataframe.Dataframe`. @@ -1094,19 +1255,17 @@ def read_parquet( schema: An optional schema representing the parquet files. If None, the parquet reader will try to infer it based on data in the file. - file_sort_order: Sort order for the file. + file_sort_order: Sort order for the file. Each sort key can be + specified as a column name (``str``), an expression + (``Expr``), or a ``SortExpr``. Returns: DataFrame representation of the read Parquet files """ if table_partition_cols is None: table_partition_cols = [] - table_partition_cols = self._convert_table_partition_cols(table_partition_cols) - file_sort_order = ( - [sort_list_to_raw_sort_list(f) for f in file_sort_order] - if file_sort_order is not None - else None - ) + table_partition_cols = _convert_table_partition_cols(table_partition_cols) + file_sort_order = self._convert_file_sort_order(file_sort_order) return DataFrame( self.ctx.read_parquet( str(path), @@ -1139,24 +1298,39 @@ def read_avro( """ if file_partition_cols is None: file_partition_cols = [] - file_partition_cols = self._convert_table_partition_cols(file_partition_cols) + file_partition_cols = _convert_table_partition_cols(file_partition_cols) return DataFrame( self.ctx.read_avro(str(path), schema, file_partition_cols, file_extension) ) - def read_table(self, table: Table) -> DataFrame: - """Creates a :py:class:`~datafusion.dataframe.DataFrame` from a table. - - For a :py:class:`~datafusion.catalog.Table` such as a - :py:class:`~datafusion.catalog.ListingTable`, create a - :py:class:`~datafusion.dataframe.DataFrame`. - """ - return DataFrame(self.ctx.read_table(table.table)) + def read_table( + self, table: Table | TableProviderExportable | DataFrame | pa.dataset.Dataset + ) -> DataFrame: + """Creates a :py:class:`~datafusion.dataframe.DataFrame` from a table.""" + return DataFrame(self.ctx.read_table(table)) def execute(self, plan: ExecutionPlan, partitions: int) -> RecordBatchStream: """Execute the ``plan`` and return the results.""" return RecordBatchStream(self.ctx.execute(plan._raw_plan, partitions)) + @staticmethod + def _convert_file_sort_order( + file_sort_order: Sequence[Sequence[SortKey]] | None, + ) -> list[list[expr_internal.SortExpr]] | None: + """Convert nested ``SortKey`` sequences into raw sort expressions. + + Each ``SortKey`` can be a column name string, an ``Expr``, or a + ``SortExpr`` and will be converted using + :func:`datafusion.expr.sort_list_to_raw_sort_list`. + """ + # Convert each ``SortKey`` in the provided sort order to the low-level + # representation expected by the Rust bindings. + return ( + [sort_list_to_raw_sort_list(f) for f in file_sort_order] + if file_sort_order is not None + else None + ) + @staticmethod def _convert_table_partition_cols( table_partition_cols: list[tuple[str, str | pa.DataType]], @@ -1194,3 +1368,19 @@ def _convert_table_partition_cols( ) return converted_table_partition_cols + + def __datafusion_task_context_provider__(self) -> Any: + """Access the PyCapsule FFI_TaskContextProvider.""" + return self.ctx.__datafusion_task_context_provider__() + + def __datafusion_logical_extension_codec__(self) -> Any: + """Access the PyCapsule FFI_LogicalExtensionCodec.""" + return self.ctx.__datafusion_logical_extension_codec__() + + def with_logical_extension_codec(self, codec: Any) -> SessionContext: + """Create a new session context with specified codec. + + This only supports codecs that have been implemented using the + FFI interface. + """ + return self.ctx.with_logical_extension_codec(codec) diff --git a/python/datafusion/dataframe.py b/python/datafusion/dataframe.py index a1df7e080..d302c12a5 100644 --- a/python/datafusion/dataframe.py +++ b/python/datafusion/dataframe.py @@ -22,13 +22,11 @@ from __future__ import annotations import warnings +from collections.abc import AsyncIterator, Iterable, Iterator, Sequence from typing import ( TYPE_CHECKING, Any, - Iterable, Literal, - Optional, - Union, overload, ) @@ -38,20 +36,31 @@ from typing_extensions import deprecated # Python 3.12 from datafusion._internal import DataFrame as DataFrameInternal -from datafusion.expr import Expr, SortExpr, sort_or_default +from datafusion._internal import DataFrameWriteOptions as DataFrameWriteOptionsInternal +from datafusion._internal import InsertOp as InsertOpInternal +from datafusion._internal import ParquetColumnOptions as ParquetColumnOptionsInternal +from datafusion._internal import ParquetWriterOptions as ParquetWriterOptionsInternal +from datafusion.expr import ( + Expr, + SortExpr, + SortKey, + ensure_expr, + ensure_expr_list, + expr_list_to_raw_expr_list, + sort_list_to_raw_sort_list, +) from datafusion.plan import ExecutionPlan, LogicalPlan -from datafusion.record_batch import RecordBatchStream +from datafusion.record_batch import RecordBatch, RecordBatchStream if TYPE_CHECKING: import pathlib - from typing import Callable, Sequence + from collections.abc import Callable import pandas as pd import polars as pl import pyarrow as pa - from datafusion._internal import DataFrame as DataFrameInternal - from datafusion._internal import expr as expr_internal + from datafusion.catalog import Table from enum import Enum @@ -68,7 +77,7 @@ class Compression(Enum): LZ4 = "lz4" # lzo is not implemented yet # https://github.com/apache/arrow-rs/issues/6970 - # LZO = "lzo" + # LZO = "lzo" # noqa: ERA001 ZSTD = "zstd" LZ4_RAW = "lz4_raw" @@ -95,7 +104,7 @@ def from_str(cls: type[Compression], value: str) -> Compression: """ raise ValueError(error_msg) from err - def get_default_level(self) -> Optional[int]: + def get_default_level(self) -> int | None: """Get the default compression level for the compression type. Returns: @@ -114,9 +123,190 @@ def get_default_level(self) -> Optional[int]: return None +class ParquetWriterOptions: + """Advanced parquet writer options. + + Allows settings the writer options that apply to the entire file. Some options can + also be set on a column by column basis, with the field ``column_specific_options`` + (see ``ParquetColumnOptions``). + """ + + def __init__( + self, + data_pagesize_limit: int = 1024 * 1024, + write_batch_size: int = 1024, + writer_version: str = "1.0", + skip_arrow_metadata: bool = False, + compression: str | None = "zstd(3)", + compression_level: int | None = None, + dictionary_enabled: bool | None = True, + dictionary_page_size_limit: int = 1024 * 1024, + statistics_enabled: str | None = "page", + max_row_group_size: int = 1024 * 1024, + created_by: str = "datafusion-python", + column_index_truncate_length: int | None = 64, + statistics_truncate_length: int | None = None, + data_page_row_count_limit: int = 20_000, + encoding: str | None = None, + bloom_filter_on_write: bool = False, + bloom_filter_fpp: float | None = None, + bloom_filter_ndv: int | None = None, + allow_single_file_parallelism: bool = True, + maximum_parallel_row_group_writers: int = 1, + maximum_buffered_record_batches_per_stream: int = 2, + column_specific_options: dict[str, ParquetColumnOptions] | None = None, + ) -> None: + """Initialize the ParquetWriterOptions. + + Args: + data_pagesize_limit: Sets best effort maximum size of data page in bytes. + write_batch_size: Sets write_batch_size in bytes. + writer_version: Sets parquet writer version. Valid values are ``1.0`` and + ``2.0``. + skip_arrow_metadata: Skip encoding the embedded arrow metadata in the + KV_meta. + compression: Compression type to use. Default is ``zstd(3)``. + Available compression types are + + - ``uncompressed``: No compression. + - ``snappy``: Snappy compression. + - ``gzip(n)``: Gzip compression with level n. + - ``brotli(n)``: Brotli compression with level n. + - ``lz4``: LZ4 compression. + - ``lz4_raw``: LZ4_RAW compression. + - ``zstd(n)``: Zstandard compression with level n. + compression_level: Compression level to set. + dictionary_enabled: Sets if dictionary encoding is enabled. If ``None``, + uses the default parquet writer setting. + dictionary_page_size_limit: Sets best effort maximum dictionary page size, + in bytes. + statistics_enabled: Sets if statistics are enabled for any column Valid + values are ``none``, ``chunk``, and ``page``. If ``None``, uses the + default parquet writer setting. + max_row_group_size: Target maximum number of rows in each row group + (defaults to 1M rows). Writing larger row groups requires more memory + to write, but can get better compression and be faster to read. + created_by: Sets "created by" property. + column_index_truncate_length: Sets column index truncate length. + statistics_truncate_length: Sets statistics truncate length. If ``None``, + uses the default parquet writer setting. + data_page_row_count_limit: Sets best effort maximum number of rows in a data + page. + encoding: Sets default encoding for any column. Valid values are ``plain``, + ``plain_dictionary``, ``rle``, ``bit_packed``, ``delta_binary_packed``, + ``delta_length_byte_array``, ``delta_byte_array``, ``rle_dictionary``, + and ``byte_stream_split``. If ``None``, uses the default parquet writer + setting. + bloom_filter_on_write: Write bloom filters for all columns when creating + parquet files. + bloom_filter_fpp: Sets bloom filter false positive probability. If ``None``, + uses the default parquet writer setting + bloom_filter_ndv: Sets bloom filter number of distinct values. If ``None``, + uses the default parquet writer setting. + allow_single_file_parallelism: Controls whether DataFusion will attempt to + speed up writing parquet files by serializing them in parallel. Each + column in each row group in each output file are serialized in parallel + leveraging a maximum possible core count of + ``n_files * n_row_groups * n_columns``. + maximum_parallel_row_group_writers: By default parallel parquet writer is + tuned for minimum memory usage in a streaming execution plan. You may + see a performance benefit when writing large parquet files by increasing + ``maximum_parallel_row_group_writers`` and + ``maximum_buffered_record_batches_per_stream`` if your system has idle + cores and can tolerate additional memory usage. Boosting these values is + likely worthwhile when writing out already in-memory data, such as from + a cached data frame. + maximum_buffered_record_batches_per_stream: See + ``maximum_parallel_row_group_writers``. + column_specific_options: Overrides options for specific columns. If a column + is not a part of this dictionary, it will use the parameters provided + here. + """ + self.data_pagesize_limit = data_pagesize_limit + self.write_batch_size = write_batch_size + self.writer_version = writer_version + self.skip_arrow_metadata = skip_arrow_metadata + if compression_level is not None: + self.compression = f"{compression}({compression_level})" + else: + self.compression = compression + self.dictionary_enabled = dictionary_enabled + self.dictionary_page_size_limit = dictionary_page_size_limit + self.statistics_enabled = statistics_enabled + self.max_row_group_size = max_row_group_size + self.created_by = created_by + self.column_index_truncate_length = column_index_truncate_length + self.statistics_truncate_length = statistics_truncate_length + self.data_page_row_count_limit = data_page_row_count_limit + self.encoding = encoding + self.bloom_filter_on_write = bloom_filter_on_write + self.bloom_filter_fpp = bloom_filter_fpp + self.bloom_filter_ndv = bloom_filter_ndv + self.allow_single_file_parallelism = allow_single_file_parallelism + self.maximum_parallel_row_group_writers = maximum_parallel_row_group_writers + self.maximum_buffered_record_batches_per_stream = ( + maximum_buffered_record_batches_per_stream + ) + self.column_specific_options = column_specific_options + + +class ParquetColumnOptions: + """Parquet options for individual columns. + + Contains the available options that can be applied for an individual Parquet column, + replacing the global options in ``ParquetWriterOptions``. + """ + + def __init__( + self, + encoding: str | None = None, + dictionary_enabled: bool | None = None, + compression: str | None = None, + statistics_enabled: str | None = None, + bloom_filter_enabled: bool | None = None, + bloom_filter_fpp: float | None = None, + bloom_filter_ndv: int | None = None, + ) -> None: + """Initialize the ParquetColumnOptions. + + Args: + encoding: Sets encoding for the column path. Valid values are: ``plain``, + ``plain_dictionary``, ``rle``, ``bit_packed``, ``delta_binary_packed``, + ``delta_length_byte_array``, ``delta_byte_array``, ``rle_dictionary``, + and ``byte_stream_split``. These values are not case-sensitive. If + ``None``, uses the default parquet options + dictionary_enabled: Sets if dictionary encoding is enabled for the column + path. If `None`, uses the default parquet options + compression: Sets default parquet compression codec for the column path. + Valid values are ``uncompressed``, ``snappy``, ``gzip(level)``, ``lzo``, + ``brotli(level)``, ``lz4``, ``zstd(level)``, and ``lz4_raw``. These + values are not case-sensitive. If ``None``, uses the default parquet + options. + statistics_enabled: Sets if statistics are enabled for the column Valid + values are: ``none``, ``chunk``, and ``page`` These values are not case + sensitive. If ``None``, uses the default parquet options. + bloom_filter_enabled: Sets if bloom filter is enabled for the column path. + If ``None``, uses the default parquet options. + bloom_filter_fpp: Sets bloom filter false positive probability for the + column path. If ``None``, uses the default parquet options. + bloom_filter_ndv: Sets bloom filter number of distinct values. If ``None``, + uses the default parquet options. + """ + self.encoding = encoding + self.dictionary_enabled = dictionary_enabled + self.compression = compression + self.statistics_enabled = statistics_enabled + self.bloom_filter_enabled = bloom_filter_enabled + self.bloom_filter_fpp = bloom_filter_fpp + self.bloom_filter_ndv = bloom_filter_ndv + + class DataFrame: """Two dimensional table representation of data. + DataFrame objects are iterable; iterating over a DataFrame yields + :class:`datafusion.RecordBatch` instances lazily. + See :ref:`user_guide_concepts` in the online documentation for more information. """ @@ -128,12 +318,24 @@ def __init__(self, df: DataFrameInternal) -> None: """ self.df = df - def into_view(self) -> pa.Table: - """Convert DataFrame as a ViewTable which can be used in register_table.""" - return self.df.into_view() + def into_view(self, temporary: bool = False) -> Table: + """Convert ``DataFrame`` into a :class:`~datafusion.Table`. + + Examples: + >>> from datafusion import SessionContext + >>> ctx = SessionContext() + >>> df = ctx.sql("SELECT 1 AS value") + >>> view = df.into_view() + >>> ctx.register_table("values_view", view) + >>> df.collect() # The DataFrame is still usable + >>> ctx.sql("SELECT value FROM values_view").collect() + """ + from datafusion.catalog import Table as _Table + + return _Table(self.df.into_view(temporary)) def __getitem__(self, key: str | list[str]) -> DataFrame: - """Return a new :py:class`DataFrame` with the specified column or columns. + """Return a new :py:class:`DataFrame` with the specified column or columns. Args: key: Column name or list of column names to select. @@ -154,6 +356,20 @@ def __repr__(self) -> str: def _repr_html_(self) -> str: return self.df._repr_html_() + @staticmethod + def default_str_repr( + batches: list[pa.RecordBatch], + schema: pa.Schema, + has_more: bool, + table_uuid: str | None = None, + ) -> str: + """Return the default string representation of a DataFrame. + + This method is used by the default formatter and implemented in Rust for + performance reasons. + """ + return DataFrameInternal.default_str_repr(batches, schema, has_more, table_uuid) + def describe(self) -> DataFrame: """Return the statistics for this DataFrame. @@ -189,6 +405,17 @@ def select_columns(self, *args: str) -> DataFrame: """ return self.select(*args) + def select_exprs(self, *args: str) -> DataFrame: + """Project arbitrary list of expression strings into a new DataFrame. + + This method will parse string expressions into logical plan expressions. + The output DataFrame has one column for each expression. + + Returns: + DataFrame only containing the specified columns. + """ + return self.df.select_exprs(*args) + def select(self, *exprs: Expr | str) -> DataFrame: """Project arbitrary expressions into a new :py:class:`DataFrame`. @@ -208,44 +435,104 @@ def select(self, *exprs: Expr | str) -> DataFrame: df = df.select("a", col("b"), col("a").alias("alternate_a")) """ - exprs_internal = [ - Expr.column(arg).expr if isinstance(arg, str) else arg.expr for arg in exprs - ] + exprs_internal = expr_list_to_raw_expr_list(exprs) return DataFrame(self.df.select(*exprs_internal)) def drop(self, *columns: str) -> DataFrame: """Drop arbitrary amount of columns. + Column names are case-sensitive and do not require double quotes like + other operations such as `select`. Leading and trailing double quotes + are allowed and will be automatically stripped if present. + Args: - columns: Column names to drop from the dataframe. + columns: Column names to drop from the dataframe. Both ``column_name`` + and ``"column_name"`` are accepted. Returns: DataFrame with those columns removed in the projection. + + Example Usage:: + + df.drop('ID_For_Students') # Works + df.drop('"ID_For_Students"') # Also works (quotes stripped) """ - return DataFrame(self.df.drop(*columns)) + normalized_columns = [] + for col in columns: + if col.startswith('"') and col.endswith('"'): + normalized_columns.append(col.strip('"')) # Strip double quotes + else: + normalized_columns.append(col) + + return DataFrame(self.df.drop(*normalized_columns)) - def filter(self, *predicates: Expr) -> DataFrame: + def filter(self, *predicates: Expr | str) -> DataFrame: """Return a DataFrame for which ``predicate`` evaluates to ``True``. Rows for which ``predicate`` evaluates to ``False`` or ``None`` are filtered - out. If more than one predicate is provided, these predicates will be - combined as a logical AND. If more complex logic is required, see the - logical operations in :py:mod:`~datafusion.functions`. + out. If more than one predicate is provided, these predicates will be + combined as a logical AND. Each ``predicate`` can be an + :class:`~datafusion.expr.Expr` created using helper functions such as + :func:`datafusion.col` or :func:`datafusion.lit`, or a SQL expression string + that will be parsed against the DataFrame schema. If more complex logic is + required, see the logical operations in :py:mod:`~datafusion.functions`. + + Example:: + + from datafusion import col, lit + df.filter(col("a") > lit(1)) + df.filter("a > 1") Args: - predicates: Predicate expression(s) to filter the DataFrame. + predicates: Predicate expression(s) or SQL strings to filter the DataFrame. Returns: DataFrame after filtering. """ df = self.df - for p in predicates: - df = df.filter(p.expr) + for predicate in predicates: + expr = ( + self.parse_sql_expr(predicate) + if isinstance(predicate, str) + else predicate + ) + df = df.filter(ensure_expr(expr)) return DataFrame(df) - def with_column(self, name: str, expr: Expr) -> DataFrame: + def parse_sql_expr(self, expr: str) -> Expr: + """Creates logical expression from a SQL query text. + + The expression is created and processed against the current schema. + + Example:: + + from datafusion import col, lit + df.parse_sql_expr("a > 1") + + should produce: + + col("a") > lit(1) + + Args: + expr: Expression string to be converted to datafusion expression + + Returns: + Logical expression . + """ + return Expr(self.df.parse_sql_expr(expr)) + + def with_column(self, name: str, expr: Expr | str) -> DataFrame: """Add an additional column to the DataFrame. + The ``expr`` must be an :class:`~datafusion.expr.Expr` constructed with + :func:`datafusion.col` or :func:`datafusion.lit`, or a SQL expression + string that will be parsed against the DataFrame schema. + + Example:: + + from datafusion import col, lit + df.with_column("b", col("a") + lit(1)) + Args: name: Name of the column to add. expr: Expression to compute the column. @@ -253,49 +540,69 @@ def with_column(self, name: str, expr: Expr) -> DataFrame: Returns: DataFrame with the new column. """ - return DataFrame(self.df.with_column(name, expr.expr)) + expr = self.parse_sql_expr(expr) if isinstance(expr, str) else expr + + return DataFrame(self.df.with_column(name, ensure_expr(expr))) def with_columns( - self, *exprs: Expr | Iterable[Expr], **named_exprs: Expr + self, *exprs: Expr | str | Iterable[Expr | str], **named_exprs: Expr | str ) -> DataFrame: """Add columns to the DataFrame. - By passing expressions, iteratables of expressions, or named expressions. To - pass named expressions use the form name=Expr. + By passing expressions, iterables of expressions, string SQL expressions, + or named expressions. + All expressions must be :class:`~datafusion.expr.Expr` objects created via + :func:`datafusion.col` or :func:`datafusion.lit`, or SQL expression strings. + To pass named expressions use the form ``name=Expr``. - Example usage: The following will add 4 columns labeled a, b, c, and d:: + Example usage: The following will add 4 columns labeled ``a``, ``b``, ``c``, + and ``d``:: + from datafusion import col, lit df = df.with_columns( - lit(0).alias('a'), - [lit(1).alias('b'), lit(2).alias('c')], + col("x").alias("a"), + [lit(1).alias("b"), col("y").alias("c")], d=lit(3) - ) + ) + + Equivalent example using just SQL strings: + + df = df.with_columns( + "x as a", + ["1 as b", "y as c"], + d="3" + ) Args: - exprs: Either a single expression or an iterable of expressions to add. + exprs: Either a single expression, an iterable of expressions to add or + SQL expression strings. named_exprs: Named expressions in the form of ``name=expr`` Returns: DataFrame with the new columns added. """ + expressions = [] + for expr in exprs: + if isinstance(expr, str): + expressions.append(self.parse_sql_expr(expr).expr) + elif isinstance(expr, Iterable) and not isinstance( + expr, Expr | str | bytes | bytearray + ): + expressions.extend( + [ + self.parse_sql_expr(e).expr + if isinstance(e, str) + else ensure_expr(e) + for e in expr + ] + ) + else: + expressions.append(ensure_expr(expr)) - def _simplify_expression( - *exprs: Expr | Iterable[Expr], **named_exprs: Expr - ) -> list[expr_internal.Expr]: - expr_list = [] - for expr in exprs: - if isinstance(expr, Expr): - expr_list.append(expr.expr) - elif isinstance(expr, Iterable): - expr_list.extend(inner_expr.expr for inner_expr in expr) - else: - raise NotImplementedError - if named_exprs: - for alias, expr in named_exprs.items(): - expr_list.append(expr.alias(alias).expr) - return expr_list - - expressions = _simplify_expression(*exprs, **named_exprs) + for alias, expr in named_exprs.items(): + e = self.parse_sql_expr(expr) if isinstance(expr, str) else expr + ensure_expr(e) + expressions.append(e.alias(alias).expr) return DataFrame(self.df.with_columns(expressions)) @@ -317,37 +624,47 @@ def with_column_renamed(self, old_name: str, new_name: str) -> DataFrame: return DataFrame(self.df.with_column_renamed(old_name, new_name)) def aggregate( - self, group_by: list[Expr] | Expr, aggs: list[Expr] | Expr + self, + group_by: Sequence[Expr | str] | Expr | str, + aggs: Sequence[Expr] | Expr, ) -> DataFrame: """Aggregates the rows of the current DataFrame. Args: - group_by: List of expressions to group by. - aggs: List of expressions to aggregate. + group_by: Sequence of expressions or column names to group by. + aggs: Sequence of expressions to aggregate. Returns: DataFrame after aggregation. """ - group_by = group_by if isinstance(group_by, list) else [group_by] - aggs = aggs if isinstance(aggs, list) else [aggs] - - group_by = [e.expr for e in group_by] - aggs = [e.expr for e in aggs] - return DataFrame(self.df.aggregate(group_by, aggs)) - - def sort(self, *exprs: Expr | SortExpr) -> DataFrame: - """Sort the DataFrame by the specified sorting expressions. + group_by_list = ( + list(group_by) + if isinstance(group_by, Sequence) and not isinstance(group_by, Expr | str) + else [group_by] + ) + aggs_list = ( + list(aggs) + if isinstance(aggs, Sequence) and not isinstance(aggs, Expr) + else [aggs] + ) + + group_by_exprs = expr_list_to_raw_expr_list(group_by_list) + aggs_exprs = ensure_expr_list(aggs_list) + return DataFrame(self.df.aggregate(group_by_exprs, aggs_exprs)) + + def sort(self, *exprs: SortKey) -> DataFrame: + """Sort the DataFrame by the specified sorting expressions or column names. Note that any expression can be turned into a sort expression by - calling its` ``sort`` method. + calling its ``sort`` method. Args: - exprs: Sort expressions, applied in order. + exprs: Sort expressions or column names, applied in order. Returns: DataFrame after sorting. """ - exprs_raw = [sort_or_default(expr) for expr in exprs] + exprs_raw = sort_list_to_raw_sort_list(exprs) return DataFrame(self.df.sort(*exprs_raw)) def cast(self, mapping: dict[str, pa.DataType[Any]]) -> DataFrame: @@ -402,7 +719,7 @@ def tail(self, n: int = 5) -> DataFrame: def collect(self) -> list[pa.RecordBatch]: """Execute this :py:class:`DataFrame` and collect results into memory. - Prior to calling ``collect``, modifying a DataFrme simply updates a plan + Prior to calling ``collect``, modifying a DataFrame simply updates a plan (no actual computation is performed). Calling ``collect`` triggers the computation. @@ -411,6 +728,10 @@ def collect(self) -> list[pa.RecordBatch]: """ return self.df.collect() + def collect_column(self, column_name: str) -> pa.Array | pa.ChunkedArray: + """Executes this :py:class:`DataFrame` for a single column.""" + return self.df.collect_column(column_name) + def cache(self) -> DataFrame: """Cache the DataFrame as a memory table. @@ -457,6 +778,7 @@ def join( left_on: None = None, right_on: None = None, join_keys: None = None, + coalesce_duplicate_keys: bool = True, ) -> DataFrame: ... @overload @@ -469,6 +791,7 @@ def join( left_on: str | Sequence[str], right_on: str | Sequence[str], join_keys: tuple[list[str], list[str]] | None = None, + coalesce_duplicate_keys: bool = True, ) -> DataFrame: ... @overload @@ -481,6 +804,7 @@ def join( join_keys: tuple[list[str], list[str]], left_on: None = None, right_on: None = None, + coalesce_duplicate_keys: bool = True, ) -> DataFrame: ... def join( @@ -492,6 +816,7 @@ def join( left_on: str | Sequence[str] | None = None, right_on: str | Sequence[str] | None = None, join_keys: tuple[list[str], list[str]] | None = None, + coalesce_duplicate_keys: bool = True, ) -> DataFrame: """Join this :py:class:`DataFrame` with another :py:class:`DataFrame`. @@ -504,33 +829,37 @@ def join( "right", "full", "semi", "anti". left_on: Join column of the left dataframe. right_on: Join column of the right dataframe. + coalesce_duplicate_keys: When True, coalesce the columns + from the right DataFrame and left DataFrame + that have identical names in the ``on`` fields. join_keys: Tuple of two lists of column names to join on. [Deprecated] Returns: DataFrame after join. """ + if join_keys is not None: + warnings.warn( + "`join_keys` is deprecated, use `on` or `left_on` with `right_on`", + category=DeprecationWarning, + stacklevel=2, + ) + left_on = join_keys[0] + right_on = join_keys[1] + # This check is to prevent breaking API changes where users prior to # DF 43.0.0 would pass the join_keys as a positional argument instead # of a keyword argument. if ( isinstance(on, tuple) - and len(on) == 2 + and len(on) == 2 # noqa: PLR2004 and isinstance(on[0], list) and isinstance(on[1], list) ): # We know this is safe because we've checked the types - join_keys = on # type: ignore[assignment] + left_on = on[0] + right_on = on[1] on = None - if join_keys is not None: - warnings.warn( - "`join_keys` is deprecated, use `on` or `left_on` with `right_on`", - category=DeprecationWarning, - stacklevel=2, - ) - left_on = join_keys[0] - right_on = join_keys[1] - if on is not None: if left_on is not None or right_on is not None: error_msg = "`left_on` or `right_on` should not provided with `on`" @@ -549,7 +878,9 @@ def join( if isinstance(right_on, str): right_on = [right_on] - return DataFrame(self.df.join(right.df, how, left_on, right_on)) + return DataFrame( + self.df.join(right.df, how, left_on, right_on, coalesce_duplicate_keys) + ) def join_on( self, @@ -559,8 +890,14 @@ def join_on( ) -> DataFrame: """Join two :py:class:`DataFrame` using the specified expressions. - On expressions are used to support in-equality predicates. Equality - predicates are correctly optimized + Join predicates must be :class:`~datafusion.expr.Expr` objects, typically + built with :func:`datafusion.col`. On expressions are used to support + in-equality predicates. Equality predicates are correctly optimized. + + Example:: + + from datafusion import col + df.join_on(other_df, col("id") == col("other_id")) Args: right: Other DataFrame to join with. @@ -571,22 +908,19 @@ def join_on( Returns: DataFrame after join. """ - exprs = [expr.expr for expr in on_exprs] + exprs = [ensure_expr(expr) for expr in on_exprs] return DataFrame(self.df.join_on(right.df, exprs, how)) - def explain(self, verbose: bool = False, analyze: bool = False) -> DataFrame: - """Return a DataFrame with the explanation of its plan so far. + def explain(self, verbose: bool = False, analyze: bool = False) -> None: + """Print an explanation of the DataFrame's plan so far. If ``analyze`` is specified, runs the plan and reports metrics. Args: verbose: If ``True``, more details will be included. - analyze: If ``Tru`e``, the plan will run and metrics reported. - - Returns: - DataFrame with the explanation of its plan. + analyze: If ``True``, the plan will run and metrics reported. """ - return DataFrame(self.df.explain(verbose, analyze)) + self.df.explain(verbose, analyze) def logical_plan(self) -> LogicalPlan: """Return the unoptimized ``LogicalPlan``. @@ -625,17 +959,20 @@ def repartition(self, num: int) -> DataFrame: """ return DataFrame(self.df.repartition(num)) - def repartition_by_hash(self, *exprs: Expr, num: int) -> DataFrame: + def repartition_by_hash(self, *exprs: Expr | str, num: int) -> DataFrame: """Repartition a DataFrame using a hash partitioning scheme. Args: - exprs: Expressions to evaluate and perform hashing on. + exprs: Expressions or a SQL expression string to evaluate + and perform hashing on. num: Number of partitions to repartition the DataFrame into. Returns: Repartitioned DataFrame. """ - exprs = [expr.expr for expr in exprs] + exprs = [self.parse_sql_expr(e) if isinstance(e, str) else e for e in exprs] + exprs = expr_list_to_raw_expr_list(exprs) + return DataFrame(self.df.repartition_by_hash(*exprs, num=num)) def union(self, other: DataFrame, distinct: bool = False) -> DataFrame: @@ -692,40 +1029,88 @@ def except_all(self, other: DataFrame) -> DataFrame: """ return DataFrame(self.df.except_all(other.df)) - def write_csv(self, path: str | pathlib.Path, with_header: bool = False) -> None: + def write_csv( + self, + path: str | pathlib.Path, + with_header: bool = False, + write_options: DataFrameWriteOptions | None = None, + ) -> None: """Execute the :py:class:`DataFrame` and write the results to a CSV file. Args: path: Path of the CSV file to write. with_header: If true, output the CSV header row. + write_options: Options that impact how the DataFrame is written. """ - self.df.write_csv(str(path), with_header) + raw_write_options = ( + write_options._raw_write_options if write_options is not None else None + ) + self.df.write_csv(str(path), with_header, raw_write_options) + + @overload + def write_parquet( + self, + path: str | pathlib.Path, + compression: str, + compression_level: int | None = None, + write_options: DataFrameWriteOptions | None = None, + ) -> None: ... + @overload def write_parquet( self, path: str | pathlib.Path, - compression: Union[str, Compression] = Compression.ZSTD, + compression: Compression = Compression.ZSTD, compression_level: int | None = None, + write_options: DataFrameWriteOptions | None = None, + ) -> None: ... + + @overload + def write_parquet( + self, + path: str | pathlib.Path, + compression: ParquetWriterOptions, + compression_level: None = None, + write_options: DataFrameWriteOptions | None = None, + ) -> None: ... + + def write_parquet( + self, + path: str | pathlib.Path, + compression: str | Compression | ParquetWriterOptions = Compression.ZSTD, + compression_level: int | None = None, + write_options: DataFrameWriteOptions | None = None, ) -> None: """Execute the :py:class:`DataFrame` and write the results to a Parquet file. + Available compression types are: + + - "uncompressed": No compression. + - "snappy": Snappy compression. + - "gzip": Gzip compression. + - "brotli": Brotli compression. + - "lz4": LZ4 compression. + - "lz4_raw": LZ4_RAW compression. + - "zstd": Zstandard compression. + + LZO compression is not yet implemented in arrow-rs and is therefore + excluded. + Args: path: Path of the Parquet file to write. compression: Compression type to use. Default is "ZSTD". - Available compression types are: - - "uncompressed": No compression. - - "snappy": Snappy compression. - - "gzip": Gzip compression. - - "brotli": Brotli compression. - - "lz4": LZ4 compression. - - "lz4_raw": LZ4_RAW compression. - - "zstd": Zstandard compression. - Note: LZO is not yet implemented in arrow-rs and is therefore excluded. compression_level: Compression level to use. For ZSTD, the recommended range is 1 to 22, with the default being 4. Higher levels provide better compression but slower speed. + write_options: Options that impact how the DataFrame is written. """ - # Convert string to Compression enum if necessary + if isinstance(compression, ParquetWriterOptions): + if compression_level is not None: + msg = "compression_level should be None when using ParquetWriterOptions" + raise ValueError(msg) + self.write_parquet_with_options(path, compression) + return + if isinstance(compression, str): compression = Compression.from_str(compression) @@ -735,15 +1120,105 @@ def write_parquet( ): compression_level = compression.get_default_level() - self.df.write_parquet(str(path), compression.value, compression_level) + raw_write_options = ( + write_options._raw_write_options if write_options is not None else None + ) + self.df.write_parquet( + str(path), + compression.value, + compression_level, + raw_write_options, + ) + + def write_parquet_with_options( + self, + path: str | pathlib.Path, + options: ParquetWriterOptions, + write_options: DataFrameWriteOptions | None = None, + ) -> None: + """Execute the :py:class:`DataFrame` and write the results to a Parquet file. + + Allows advanced writer options to be set with `ParquetWriterOptions`. - def write_json(self, path: str | pathlib.Path) -> None: + Args: + path: Path of the Parquet file to write. + options: Sets the writer parquet options (see `ParquetWriterOptions`). + write_options: Options that impact how the DataFrame is written. + """ + options_internal = ParquetWriterOptionsInternal( + options.data_pagesize_limit, + options.write_batch_size, + options.writer_version, + options.skip_arrow_metadata, + options.compression, + options.dictionary_enabled, + options.dictionary_page_size_limit, + options.statistics_enabled, + options.max_row_group_size, + options.created_by, + options.column_index_truncate_length, + options.statistics_truncate_length, + options.data_page_row_count_limit, + options.encoding, + options.bloom_filter_on_write, + options.bloom_filter_fpp, + options.bloom_filter_ndv, + options.allow_single_file_parallelism, + options.maximum_parallel_row_group_writers, + options.maximum_buffered_record_batches_per_stream, + ) + + column_specific_options_internal = {} + for column, opts in (options.column_specific_options or {}).items(): + column_specific_options_internal[column] = ParquetColumnOptionsInternal( + bloom_filter_enabled=opts.bloom_filter_enabled, + encoding=opts.encoding, + dictionary_enabled=opts.dictionary_enabled, + compression=opts.compression, + statistics_enabled=opts.statistics_enabled, + bloom_filter_fpp=opts.bloom_filter_fpp, + bloom_filter_ndv=opts.bloom_filter_ndv, + ) + + raw_write_options = ( + write_options._raw_write_options if write_options is not None else None + ) + self.df.write_parquet_with_options( + str(path), + options_internal, + column_specific_options_internal, + raw_write_options, + ) + + def write_json( + self, + path: str | pathlib.Path, + write_options: DataFrameWriteOptions | None = None, + ) -> None: """Execute the :py:class:`DataFrame` and write the results to a JSON file. Args: path: Path of the JSON file to write. + write_options: Options that impact how the DataFrame is written. + """ + raw_write_options = ( + write_options._raw_write_options if write_options is not None else None + ) + self.df.write_json(str(path), write_options=raw_write_options) + + def write_table( + self, table_name: str, write_options: DataFrameWriteOptions | None = None + ) -> None: + """Execute the :py:class:`DataFrame` and write the results to a table. + + The table must be registered with the session to perform this operation. + Not all table providers support writing operations. See the individual + implementations for details. """ - self.df.write_json(str(path)) + raw_write_options = ( + write_options._raw_write_options if write_options is not None else None + ) + self.df.write_table(table_name, raw_write_options) def to_arrow_table(self) -> pa.Table: """Execute the :py:class:`DataFrame` and convert it into an Arrow Table. @@ -832,22 +1307,55 @@ def unnest_columns(self, *columns: str, preserve_nulls: bool = True) -> DataFram columns = list(columns) return DataFrame(self.df.unnest_columns(columns, preserve_nulls=preserve_nulls)) - def __arrow_c_stream__(self, requested_schema: pa.Schema) -> Any: - """Export an Arrow PyCapsule Stream. + def __arrow_c_stream__(self, requested_schema: object | None = None) -> object: + """Export the DataFrame as an Arrow C Stream. + + The DataFrame is executed using DataFusion's streaming APIs and exposed via + Arrow's C Stream interface. Record batches are produced incrementally, so the + full result set is never materialized in memory. - This will execute and collect the DataFrame. We will attempt to respect the - requested schema, but only trivial transformations will be applied such as only - returning the fields listed in the requested schema if their data types match - those in the DataFrame. + When ``requested_schema`` is provided, DataFusion applies only simple + projections such as selecting a subset of existing columns or reordering + them. Column renaming, computed expressions, or type coercion are not + supported through this interface. Args: - requested_schema: Attempt to provide the DataFrame using this schema. + requested_schema: Either a :py:class:`pyarrow.Schema` or an Arrow C + Schema capsule (``PyCapsule``) produced by + ``schema._export_to_c_capsule()``. The DataFrame will attempt to + align its output with the fields and order specified by this schema. Returns: - Arrow PyCapsule object. + Arrow ``PyCapsule`` object representing an ``ArrowArrayStream``. + + For practical usage patterns, see the Apache Arrow streaming + documentation: https://arrow.apache.org/docs/python/ipc.html#streaming. + + For details on DataFusion's Arrow integration and DataFrame streaming, + see the user guide (user-guide/io/arrow and user-guide/dataframe/index). + + Notes: + The Arrow C Data Interface PyCapsule details are documented by Apache + Arrow and can be found at: + https://arrow.apache.org/docs/format/CDataInterface/PyCapsuleInterface.html """ + # ``DataFrame.__arrow_c_stream__`` in the Rust extension leverages + # ``execute_stream_partitioned`` under the hood to stream batches while + # preserving the original partition order. return self.df.__arrow_c_stream__(requested_schema) + def __iter__(self) -> Iterator[RecordBatch]: + """Return an iterator over this DataFrame's record batches.""" + return iter(self.execute_stream()) + + def __aiter__(self) -> AsyncIterator[RecordBatch]: + """Return an async iterator over this DataFrame's record batches. + + We're using __aiter__ because we support Python < 3.10 where aiter() is not + available. + """ + return self.execute_stream().__aiter__() + def transform(self, func: Callable[..., DataFrame], *args: Any) -> DataFrame: """Apply a function to the current DataFrame which returns another DataFrame. @@ -891,3 +1399,49 @@ def fill_null(self, value: Any, subset: list[str] | None = None) -> DataFrame: - For columns not in subset, the original column is kept unchanged """ return DataFrame(self.df.fill_null(value, subset)) + + +class InsertOp(Enum): + """Insert operation mode. + + These modes are used by the table writing feature to define how record + batches should be written to a table. + """ + + APPEND = InsertOpInternal.APPEND + """Appends new rows to the existing table without modifying any existing rows.""" + + REPLACE = InsertOpInternal.REPLACE + """Replace existing rows that collide with the inserted rows. + + Replacement is typically based on a unique key or primary key. + """ + + OVERWRITE = InsertOpInternal.OVERWRITE + """Overwrites all existing rows in the table with the new rows.""" + + +class DataFrameWriteOptions: + """Writer options for DataFrame. + + There is no guarantee the table provider supports all writer options. + See the individual implementation and documentation for details. + """ + + def __init__( + self, + insert_operation: InsertOp | None = None, + single_file_output: bool = False, + partition_by: str | Sequence[str] | None = None, + sort_by: Expr | SortExpr | Sequence[Expr] | Sequence[SortExpr] | None = None, + ) -> None: + """Instantiate writer options for DataFrame.""" + if isinstance(partition_by, str): + partition_by = [partition_by] + + sort_by_raw = sort_list_to_raw_sort_list(sort_by) + insert_op = insert_operation.value if insert_operation is not None else None + + self._raw_write_options = DataFrameWriteOptionsInternal( + insert_op, single_file_output, partition_by, sort_by_raw + ) diff --git a/python/datafusion/dataframe_formatter.py b/python/datafusion/dataframe_formatter.py new file mode 100644 index 000000000..b8af45a1b --- /dev/null +++ b/python/datafusion/dataframe_formatter.py @@ -0,0 +1,843 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""HTML formatting utilities for DataFusion DataFrames.""" + +from __future__ import annotations + +import warnings +from typing import ( + TYPE_CHECKING, + Any, + Protocol, + runtime_checkable, +) + +from datafusion._internal import DataFrame as DataFrameInternal + +if TYPE_CHECKING: + from collections.abc import Callable + + +def _validate_positive_int(value: Any, param_name: str) -> None: + """Validate that a parameter is a positive integer. + + Args: + value: The value to validate + param_name: Name of the parameter (used in error message) + + Raises: + ValueError: If the value is not a positive integer + """ + if not isinstance(value, int) or value <= 0: + msg = f"{param_name} must be a positive integer" + raise ValueError(msg) + + +def _validate_bool(value: Any, param_name: str) -> None: + """Validate that a parameter is a boolean. + + Args: + value: The value to validate + param_name: Name of the parameter (used in error message) + + Raises: + TypeError: If the value is not a boolean + """ + if not isinstance(value, bool): + msg = f"{param_name} must be a boolean" + raise TypeError(msg) + + +def _validate_formatter_parameters( + max_cell_length: int, + max_width: int, + max_height: int, + max_memory_bytes: int, + min_rows: int, + max_rows: int | None, + repr_rows: int | None, + enable_cell_expansion: bool, + show_truncation_message: bool, + use_shared_styles: bool, + custom_css: str | None, + style_provider: Any, +) -> int: + """Validate all formatter parameters and return resolved max_rows value. + + Args: + max_cell_length: Maximum cell length value to validate + max_width: Maximum width value to validate + max_height: Maximum height value to validate + max_memory_bytes: Maximum memory bytes value to validate + min_rows: Minimum rows to display value to validate + max_rows: Maximum rows value to validate (None means use default) + repr_rows: Deprecated repr_rows value to validate + enable_cell_expansion: Boolean expansion flag to validate + show_truncation_message: Boolean message flag to validate + use_shared_styles: Boolean styles flag to validate + custom_css: Custom CSS string to validate + style_provider: Style provider object to validate + + Returns: + The resolved max_rows value after handling repr_rows deprecation + + Raises: + ValueError: If any numeric parameter is invalid or constraints are violated + TypeError: If any parameter has invalid type + DeprecationWarning: If repr_rows parameter is used + """ + # Validate numeric parameters + _validate_positive_int(max_cell_length, "max_cell_length") + _validate_positive_int(max_width, "max_width") + _validate_positive_int(max_height, "max_height") + _validate_positive_int(max_memory_bytes, "max_memory_bytes") + _validate_positive_int(min_rows, "min_rows") + + # Handle deprecated repr_rows parameter + if repr_rows is not None: + warnings.warn( + "repr_rows parameter is deprecated, use max_rows instead", + DeprecationWarning, + stacklevel=4, + ) + _validate_positive_int(repr_rows, "repr_rows") + if max_rows is not None and repr_rows != max_rows: + msg = "Cannot specify both repr_rows and max_rows; use max_rows only" + raise ValueError(msg) + max_rows = repr_rows + + # Use default if max_rows was not provided + if max_rows is None: + max_rows = 10 + + _validate_positive_int(max_rows, "max_rows") + + # Validate constraint: min_rows <= max_rows + if min_rows > max_rows: + msg = "min_rows must be less than or equal to max_rows" + raise ValueError(msg) + + # Validate boolean parameters + _validate_bool(enable_cell_expansion, "enable_cell_expansion") + _validate_bool(show_truncation_message, "show_truncation_message") + _validate_bool(use_shared_styles, "use_shared_styles") + + # Validate custom_css + if custom_css is not None and not isinstance(custom_css, str): + msg = "custom_css must be None or a string" + raise TypeError(msg) + + # Validate style_provider + if style_provider is not None and not isinstance(style_provider, StyleProvider): + msg = "style_provider must implement the StyleProvider protocol" + raise TypeError(msg) + + return max_rows + + +@runtime_checkable +class CellFormatter(Protocol): + """Protocol for cell value formatters.""" + + def __call__(self, value: Any) -> str: + """Format a cell value to string representation.""" + ... + + +@runtime_checkable +class StyleProvider(Protocol): + """Protocol for HTML style providers.""" + + def get_cell_style(self) -> str: + """Get the CSS style for table cells.""" + ... + + def get_header_style(self) -> str: + """Get the CSS style for header cells.""" + ... + + +class DefaultStyleProvider: + """Default implementation of StyleProvider.""" + + def get_cell_style(self) -> str: + """Get the CSS style for table cells. + + Returns: + CSS style string + """ + return ( + "border: 1px solid black; padding: 8px; text-align: left; " + "white-space: nowrap;" + ) + + def get_header_style(self) -> str: + """Get the CSS style for header cells. + + Returns: + CSS style string + """ + return ( + "border: 1px solid black; padding: 8px; text-align: left; " + "background-color: #f2f2f2; white-space: nowrap; min-width: fit-content; " + "max-width: fit-content;" + ) + + +class DataFrameHtmlFormatter: + """Configurable HTML formatter for DataFusion DataFrames. + + This class handles the HTML rendering of DataFrames for display in + Jupyter notebooks and other rich display contexts. + + This class supports extension through composition. Key extension points: + - Provide a custom StyleProvider for styling cells and headers + - Register custom formatters for specific types + - Provide custom cell builders for specialized cell rendering + + Args: + max_cell_length: Maximum characters to display in a cell before truncation + max_width: Maximum width of the HTML table in pixels + max_height: Maximum height of the HTML table in pixels + max_memory_bytes: Maximum memory in bytes for rendered data (default: 2MB) + min_rows: Minimum number of rows to display (must be <= max_rows) + max_rows: Maximum number of rows to display in repr output + repr_rows: Deprecated alias for max_rows + enable_cell_expansion: Whether to add expand/collapse buttons for long cell + values + custom_css: Additional CSS to include in the HTML output + show_truncation_message: Whether to display a message when data is truncated + style_provider: Custom provider for cell and header styles + use_shared_styles: Whether to load styles and scripts only once per notebook + session + """ + + def __init__( + self, + max_cell_length: int = 25, + max_width: int = 1000, + max_height: int = 300, + max_memory_bytes: int = 2 * 1024 * 1024, # 2 MB + min_rows: int = 10, + max_rows: int | None = None, + repr_rows: int | None = None, + enable_cell_expansion: bool = True, + custom_css: str | None = None, + show_truncation_message: bool = True, + style_provider: StyleProvider | None = None, + use_shared_styles: bool = True, + ) -> None: + """Initialize the HTML formatter. + + Parameters + ---------- + max_cell_length + Maximum length of cell content before truncation. + max_width + Maximum width of the displayed table in pixels. + max_height + Maximum height of the displayed table in pixels. + max_memory_bytes + Maximum memory in bytes for rendered data. Helps prevent performance + issues with large datasets. + min_rows + Minimum number of rows to display even if memory limit is reached. + Must not exceed ``max_rows``. + max_rows + Maximum number of rows to display. Takes precedence over memory limits + when fewer rows are requested. + repr_rows + Deprecated alias for ``max_rows``. Use ``max_rows`` instead. + enable_cell_expansion + Whether to allow cells to expand when clicked. + custom_css + Custom CSS to apply to the HTML table. + show_truncation_message + Whether to show a message indicating that content has been truncated. + style_provider + Provider of CSS styles for the HTML table. If None, DefaultStyleProvider + is used. + use_shared_styles + Whether to use shared styles across multiple tables. This improves + performance when displaying many DataFrames in a single notebook. + + Raises: + ------ + ValueError + If max_cell_length, max_width, max_height, max_memory_bytes, + min_rows or max_rows is not a positive integer, or if min_rows + exceeds max_rows. + TypeError + If enable_cell_expansion, show_truncation_message, or use_shared_styles is + not a boolean, or if custom_css is provided but is not a string, or if + style_provider is provided but does not implement the StyleProvider + protocol. + """ + # Validate all parameters and get resolved max_rows + resolved_max_rows = _validate_formatter_parameters( + max_cell_length, + max_width, + max_height, + max_memory_bytes, + min_rows, + max_rows, + repr_rows, + enable_cell_expansion, + show_truncation_message, + use_shared_styles, + custom_css, + style_provider, + ) + + self.max_cell_length = max_cell_length + self.max_width = max_width + self.max_height = max_height + self.max_memory_bytes = max_memory_bytes + self.min_rows = min_rows + self._max_rows = resolved_max_rows + self.enable_cell_expansion = enable_cell_expansion + self.custom_css = custom_css + self.show_truncation_message = show_truncation_message + self.style_provider = style_provider or DefaultStyleProvider() + self.use_shared_styles = use_shared_styles + # Registry for custom type formatters + self._type_formatters: dict[type, CellFormatter] = {} + # Custom cell builders + self._custom_cell_builder: Callable[[Any, int, int, str], str] | None = None + self._custom_header_builder: Callable[[Any], str] | None = None + + @property + def max_rows(self) -> int: + """Get the maximum number of rows to display. + + Returns: + The maximum number of rows to display in repr output + """ + return self._max_rows + + @max_rows.setter + def max_rows(self, value: int) -> None: + """Set the maximum number of rows to display. + + Args: + value: The maximum number of rows + """ + self._max_rows = value + + @property + def repr_rows(self) -> int: + """Get the maximum number of rows (deprecated name). + + .. deprecated:: + Use :attr:`max_rows` instead. This property is provided for + backward compatibility. + + Returns: + The maximum number of rows to display + """ + return self._max_rows + + @repr_rows.setter + def repr_rows(self, value: int) -> None: + """Set the maximum number of rows using deprecated name. + + .. deprecated:: + Use :attr:`max_rows` setter instead. This property is provided for + backward compatibility. + + Args: + value: The maximum number of rows + """ + warnings.warn( + "repr_rows is deprecated, use max_rows instead", + DeprecationWarning, + stacklevel=2, + ) + self._max_rows = value + + def register_formatter(self, type_class: type, formatter: CellFormatter) -> None: + """Register a custom formatter for a specific data type. + + Args: + type_class: The type to register a formatter for + formatter: Function that takes a value of the given type and returns + a formatted string + """ + self._type_formatters[type_class] = formatter + + def set_custom_cell_builder( + self, builder: Callable[[Any, int, int, str], str] + ) -> None: + """Set a custom cell builder function. + + Args: + builder: Function that takes (value, row, col, table_id) and returns HTML + """ + self._custom_cell_builder = builder + + def set_custom_header_builder(self, builder: Callable[[Any], str]) -> None: + """Set a custom header builder function. + + Args: + builder: Function that takes a field and returns HTML + """ + self._custom_header_builder = builder + + def format_html( + self, + batches: list, + schema: Any, + has_more: bool = False, + table_uuid: str | None = None, + ) -> str: + """Format record batches as HTML. + + This method is used by DataFrame's _repr_html_ implementation and can be + called directly when custom HTML rendering is needed. + + Args: + batches: List of Arrow RecordBatch objects + schema: Arrow Schema object + has_more: Whether there are more batches not shown + table_uuid: Unique ID for the table, used for JavaScript interactions + + Returns: + HTML string representation of the data + + Raises: + TypeError: If schema is invalid and no batches are provided + """ + if not batches: + return "No data to display" + + # Validate schema + if schema is None or not hasattr(schema, "__iter__"): + msg = "Schema must be provided" + raise TypeError(msg) + + # Generate a unique ID if none provided + table_uuid = table_uuid or f"df-{id(batches)}" + + # Build HTML components + html = [] + + html.extend(self._build_html_header()) + + html.extend(self._build_table_container_start()) + + # Add table header and body + html.extend(self._build_table_header(schema)) + html.extend(self._build_table_body(batches, table_uuid)) + + html.append("") + html.append("") + + # Add footer (JavaScript and messages) + if self.enable_cell_expansion: + html.append(self._get_javascript()) + + # Always add truncation message if needed (independent of styles) + if has_more and self.show_truncation_message: + html.append("
Data truncated due to size.
") + + return "\n".join(html) + + def format_str( + self, + batches: list, + schema: Any, + has_more: bool = False, + table_uuid: str | None = None, + ) -> str: + """Format record batches as a string. + + This method is used by DataFrame's __repr__ implementation and can be + called directly when string rendering is needed. + + Args: + batches: List of Arrow RecordBatch objects + schema: Arrow Schema object + has_more: Whether there are more batches not shown + table_uuid: Unique ID for the table, used for JavaScript interactions + + Returns: + String representation of the data + + Raises: + TypeError: If schema is invalid and no batches are provided + """ + return DataFrameInternal.default_str_repr(batches, schema, has_more, table_uuid) + + def _build_html_header(self) -> list[str]: + """Build the HTML header with CSS styles.""" + default_css = self._get_default_css() if self.enable_cell_expansion else "" + script = f""" + +""" + html = [script] + if self.custom_css: + html.append(f"") + return html + + def _build_table_container_start(self) -> list[str]: + """Build the opening tags for the table container.""" + html = [] + html.append( + f'
' + ) + html.append('') + return html + + def _build_table_header(self, schema: Any) -> list[str]: + """Build the HTML table header with column names.""" + html = [] + html.append("") + html.append("") + for field in schema: + if self._custom_header_builder: + html.append(self._custom_header_builder(field)) + else: + html.append( + f"" + ) + html.append("") + html.append("") + return html + + def _build_table_body(self, batches: list, table_uuid: str) -> list[str]: + """Build the HTML table body with data rows.""" + html = [] + html.append("") + + row_count = 0 + for batch in batches: + for row_idx in range(batch.num_rows): + row_count += 1 + html.append("") + + for col_idx, column in enumerate(batch.columns): + # Get the raw value from the column + raw_value = self._get_cell_value(column, row_idx) + + # Always check for type formatters first to format the value + formatted_value = self._format_cell_value(raw_value) + + # Then apply either custom cell builder or standard cell formatting + if self._custom_cell_builder: + # Pass both the raw value and formatted value to let the + # builder decide + cell_html = self._custom_cell_builder( + raw_value, row_count, col_idx, table_uuid + ) + html.append(cell_html) + else: + # Standard cell formatting with formatted value + if ( + len(str(raw_value)) > self.max_cell_length + and self.enable_cell_expansion + ): + cell_html = self._build_expandable_cell( + formatted_value, row_count, col_idx, table_uuid + ) + else: + cell_html = self._build_regular_cell(formatted_value) + html.append(cell_html) + + html.append("") + + html.append("") + return html + + def _get_cell_value(self, column: Any, row_idx: int) -> Any: + """Extract a cell value from a column. + + Args: + column: Arrow array + row_idx: Row index + + Returns: + The raw cell value + """ + try: + value = column[row_idx] + + if hasattr(value, "as_py"): + return value.as_py() + except (AttributeError, TypeError): + pass + else: + return value + + def _format_cell_value(self, value: Any) -> str: + """Format a cell value for display. + + Uses registered type formatters if available. + + Args: + value: The cell value to format + + Returns: + Formatted cell value as string + """ + # Check for custom type formatters + for type_cls, formatter in self._type_formatters.items(): + if isinstance(value, type_cls): + return formatter(value) + + # If no formatter matched, return string representation + return str(value) + + def _build_expandable_cell( + self, formatted_value: str, row_count: int, col_idx: int, table_uuid: str + ) -> str: + """Build an expandable cell for long content.""" + short_value = str(formatted_value)[: self.max_cell_length] + return ( + f"" + ) + + def _build_regular_cell(self, formatted_value: str) -> str: + """Build a regular table cell.""" + return ( + f"" + ) + + def _build_html_footer(self, has_more: bool) -> list[str]: + """Build the HTML footer with JavaScript and messages.""" + html = [] + + # Add JavaScript for interactivity only if cell expansion is enabled + # and we're not using the shared styles approach + if self.enable_cell_expansion and not self.use_shared_styles: + html.append(self._get_javascript()) + + # Add truncation message if needed + if has_more and self.show_truncation_message: + html.append("
Data truncated due to size.
") + + return html + + def _get_default_css(self) -> str: + """Get default CSS styles for the HTML table.""" + return """ + .expandable-container { + display: inline-block; + max-width: 200px; + } + .expandable { + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + display: block; + } + .full-text { + display: none; + white-space: normal; + } + .expand-btn { + cursor: pointer; + color: blue; + text-decoration: underline; + border: none; + background: none; + font-size: inherit; + display: block; + margin-top: 5px; + } + """ + + def _get_javascript(self) -> str: + """Get JavaScript code for interactive elements.""" + return """ + +""" + + +class FormatterManager: + """Manager class for the global DataFrame HTML formatter instance.""" + + _default_formatter: DataFrameHtmlFormatter = DataFrameHtmlFormatter() + + @classmethod + def set_formatter(cls, formatter: DataFrameHtmlFormatter) -> None: + """Set the global DataFrame HTML formatter. + + Args: + formatter: The formatter instance to use globally + """ + cls._default_formatter = formatter + _refresh_formatter_reference() + + @classmethod + def get_formatter(cls) -> DataFrameHtmlFormatter: + """Get the current global DataFrame HTML formatter. + + Returns: + The global HTML formatter instance + """ + return cls._default_formatter + + +def get_formatter() -> DataFrameHtmlFormatter: + """Get the current global DataFrame HTML formatter. + + This function is used by the DataFrame._repr_html_ implementation to access + the shared formatter instance. It can also be used directly when custom + HTML rendering is needed. + + Returns: + The global HTML formatter instance + + Example: + >>> from datafusion.html_formatter import get_formatter + >>> formatter = get_formatter() + >>> formatter.max_cell_length = 50 # Increase cell length + """ + return FormatterManager.get_formatter() + + +def set_formatter(formatter: DataFrameHtmlFormatter) -> None: + """Set the global DataFrame HTML formatter. + + Args: + formatter: The formatter instance to use globally + + Example: + >>> from datafusion.html_formatter import get_formatter, set_formatter + >>> custom_formatter = DataFrameHtmlFormatter(max_cell_length=100) + >>> set_formatter(custom_formatter) + """ + FormatterManager.set_formatter(formatter) + + +def configure_formatter(**kwargs: Any) -> None: + """Configure the global DataFrame HTML formatter. + + This function creates a new formatter with the provided configuration + and sets it as the global formatter for all DataFrames. + + Args: + **kwargs: Formatter configuration parameters like max_cell_length, + max_width, max_height, enable_cell_expansion, etc. + + Raises: + ValueError: If any invalid parameters are provided + + Example: + >>> from datafusion.html_formatter import configure_formatter + >>> configure_formatter( + ... max_cell_length=50, + ... max_height=500, + ... enable_cell_expansion=True, + ... use_shared_styles=True + ... ) + """ + # Valid parameters accepted by DataFrameHtmlFormatter + valid_params = { + "max_cell_length", + "max_width", + "max_height", + "max_memory_bytes", + "min_rows", + "max_rows", + "repr_rows", + "enable_cell_expansion", + "custom_css", + "show_truncation_message", + "style_provider", + "use_shared_styles", + } + + # Check for invalid parameters + invalid_params = set(kwargs) - valid_params + if invalid_params: + msg = ( + f"Invalid formatter parameters: {', '.join(invalid_params)}. " + f"Valid parameters are: {', '.join(valid_params)}" + ) + raise ValueError(msg) + + # Create and set formatter with validated parameters + set_formatter(DataFrameHtmlFormatter(**kwargs)) + + +def reset_formatter() -> None: + """Reset the global DataFrame HTML formatter to default settings. + + This function creates a new formatter with default configuration + and sets it as the global formatter for all DataFrames. + + Example: + >>> from datafusion.html_formatter import reset_formatter + >>> reset_formatter() # Reset formatter to default settings + """ + formatter = DataFrameHtmlFormatter() + set_formatter(formatter) + + +def _refresh_formatter_reference() -> None: + """Refresh formatter reference in any modules using it. + + This helps ensure that changes to the formatter are reflected in existing + DataFrames that might be caching the formatter reference. + """ + # This is a no-op but signals modules to refresh their reference diff --git a/python/datafusion/expr.py b/python/datafusion/expr.py index e785cab06..9df58f52a 100644 --- a/python/datafusion/expr.py +++ b/python/datafusion/expr.py @@ -22,23 +22,34 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any, ClassVar, Optional - -import pyarrow as pa +from collections.abc import Iterable, Sequence +from typing import TYPE_CHECKING, Any, ClassVar try: from warnings import deprecated # Python 3.13+ except ImportError: from typing_extensions import deprecated # Python 3.12 -from datafusion.common import DataTypeMap, NullTreatment, RexType +import pyarrow as pa from ._internal import expr as expr_internal from ._internal import functions as functions_internal if TYPE_CHECKING: + from collections.abc import Sequence + + from datafusion.common import ( # type: ignore[import] + DataTypeMap, + NullTreatment, + RexType, + ) from datafusion.plan import LogicalPlan + +# Standard error message for invalid expression types +# Mention both alias forms of column and literal helpers +EXPR_TYPE_ERROR = "Use col()/column() or lit()/literal() to construct expressions" + # The following are imported from the internal representation. We may choose to # give these all proper wrappers, or to simply leave as is. These were added # in order to support passing the `test_imports` unit test. @@ -126,6 +137,7 @@ WindowExpr = expr_internal.WindowExpr __all__ = [ + "EXPR_TYPE_ERROR", "Aggregate", "AggregateFunction", "Alias", @@ -195,6 +207,7 @@ "SimilarTo", "Sort", "SortExpr", + "SortKey", "Subquery", "SubqueryAlias", "TableScan", @@ -212,14 +225,94 @@ "WindowExpr", "WindowFrame", "WindowFrameBound", + "ensure_expr", + "ensure_expr_list", ] +def ensure_expr(value: Expr | Any) -> expr_internal.Expr: + """Return the internal expression from ``Expr`` or raise ``TypeError``. + + This helper rejects plain strings and other non-:class:`Expr` values so + higher level APIs consistently require explicit :func:`~datafusion.col` or + :func:`~datafusion.lit` expressions. + + Args: + value: Candidate expression or other object. + + Returns: + The internal expression representation. + + Raises: + TypeError: If ``value`` is not an instance of :class:`Expr`. + """ + if not isinstance(value, Expr): + raise TypeError(EXPR_TYPE_ERROR) + return value.expr + + +def ensure_expr_list( + exprs: Iterable[Expr | Iterable[Expr]], +) -> list[expr_internal.Expr]: + """Flatten an iterable of expressions, validating each via ``ensure_expr``. + + Args: + exprs: Possibly nested iterable containing expressions. + + Returns: + A flat list of raw expressions. + + Raises: + TypeError: If any item is not an instance of :class:`Expr`. + """ + + def _iter( + items: Iterable[Expr | Iterable[Expr]], + ) -> Iterable[expr_internal.Expr]: + for expr in items: + if isinstance(expr, Iterable) and not isinstance( + expr, Expr | str | bytes | bytearray + ): + # Treat string-like objects as atomic to surface standard errors + yield from _iter(expr) + else: + yield ensure_expr(expr) + + return list(_iter(exprs)) + + +def _to_raw_expr(value: Expr | str) -> expr_internal.Expr: + """Convert a Python expression or column name to its raw variant. + + Args: + value: Candidate expression or column name. + + Returns: + The internal :class:`~datafusion._internal.expr.Expr` representation. + + Raises: + TypeError: If ``value`` is neither an :class:`Expr` nor ``str``. + """ + if isinstance(value, str): + return Expr.column(value).expr + if isinstance(value, Expr): + return value.expr + error = ( + "Expected Expr or column name, found:" + f" {type(value).__name__}. {EXPR_TYPE_ERROR}." + ) + raise TypeError(error) + + def expr_list_to_raw_expr_list( - expr_list: Optional[list[Expr]], -) -> Optional[list[expr_internal.Expr]]: - """Helper function to convert an optional list to raw expressions.""" - return [e.expr for e in expr_list] if expr_list is not None else None + expr_list: list[Expr] | Expr | None, +) -> list[expr_internal.Expr] | None: + """Convert a sequence of expressions or column names to raw expressions.""" + if isinstance(expr_list, Expr | str): + expr_list = [expr_list] + if expr_list is None: + return None + return [_to_raw_expr(e) for e in expr_list] def sort_or_default(e: Expr | SortExpr) -> expr_internal.SortExpr: @@ -230,10 +323,21 @@ def sort_or_default(e: Expr | SortExpr) -> expr_internal.SortExpr: def sort_list_to_raw_sort_list( - sort_list: Optional[list[Expr | SortExpr]], -) -> Optional[list[expr_internal.SortExpr]]: + sort_list: Sequence[SortKey] | SortKey | None, +) -> list[expr_internal.SortExpr] | None: """Helper function to return an optional sort list to raw variant.""" - return [sort_or_default(e) for e in sort_list] if sort_list is not None else None + if isinstance(sort_list, Expr | SortExpr | str): + sort_list = [sort_list] + if sort_list is None: + return None + raw_sort_list = [] + for item in sort_list: + if isinstance(item, SortExpr): + raw_sort_list.append(sort_or_default(item)) + else: + raw_expr = _to_raw_expr(item) # may raise ``TypeError`` + raw_sort_list.append(sort_or_default(Expr(raw_expr))) + return raw_sort_list class Expr: @@ -353,12 +457,39 @@ def __getitem__(self, key: str | int) -> Expr: If ``key`` is a string, returns the subfield of the struct. If ``key`` is an integer, retrieves the element in the array. Note that the - element index begins at ``0``, unlike `array_element` which begins at ``1``. + element index begins at ``0``, unlike + :py:func:`~datafusion.functions.array_element` which begins at ``1``. + If ``key`` is a slice, returns an array that contains a slice of the + original array. Similar to integer indexing, this follows Python convention + where the index begins at ``0`` unlike + :py:func:`~datafusion.functions.array_slice` which begins at ``1``. """ if isinstance(key, int): return Expr( functions_internal.array_element(self.expr, Expr.literal(key + 1).expr) ) + if isinstance(key, slice): + if isinstance(key.start, int): + start = Expr.literal(key.start + 1).expr + elif isinstance(key.start, Expr): + start = (key.start + Expr.literal(1)).expr + else: + # Default start at the first element, index 1 + start = Expr.literal(1).expr + + if isinstance(key.stop, int): + stop = Expr.literal(key.stop).expr + else: + stop = key.stop.expr + + if isinstance(key.step, int): + step = Expr.literal(key.step).expr + elif isinstance(key.step, Expr): + step = key.step.expr + else: + step = key.step + + return Expr(functions_internal.array_slice(self.expr, start, stop, step)) return Expr(self.expr.__getitem__(key)) def __eq__(self, rhs: object) -> Expr: @@ -431,8 +562,6 @@ def literal(value: Any) -> Expr: """ if isinstance(value, str): value = pa.scalar(value, type=pa.string_view()) - if not isinstance(value, pa.Scalar): - value = pa.scalar(value) return Expr(expr_internal.RawExpr.literal(value)) @staticmethod @@ -445,7 +574,6 @@ def literal_with_metadata(value: Any, metadata: dict[str, str]) -> Expr: """ if isinstance(value, str): value = pa.scalar(value, type=pa.string_view()) - value = value if isinstance(value, pa.Scalar) else pa.scalar(value) return Expr(expr_internal.RawExpr.literal_with_metadata(value, metadata)) @@ -470,7 +598,7 @@ def column(value: str) -> Expr: """Creates a new expression representing a column.""" return Expr(expr_internal.RawExpr.column(value)) - def alias(self, name: str, metadata: Optional[dict[str, str]] = None) -> Expr: + def alias(self, name: str, metadata: dict[str, str] | None = None) -> Expr: """Assign a name to the expression. Args: @@ -518,7 +646,7 @@ def fill_null(self, value: Any | Expr | None = None) -> Expr: bool: pa.bool_(), } - def cast(self, to: pa.DataType[Any] | type[float | int | str | bool]) -> Expr: + def cast(self, to: pa.DataType[Any] | type) -> Expr: """Cast to a new data type.""" if not isinstance(to, pa.DataType): try: @@ -564,7 +692,7 @@ def types(self) -> DataTypeMap: return self.expr.types() def python_value(self) -> Any: - """Extracts the Expr value into a PyObject. + """Extracts the Expr value into `Any`. This is only valid for literal expressions. @@ -656,7 +784,7 @@ def over(self, window: Window) -> Expr: window: Window definition """ partition_by_raw = expr_list_to_raw_expr_list(window._partition_by) - order_by_raw = sort_list_to_raw_sort_list(window._order_by) + order_by_raw = window._order_by window_frame_raw = ( window._window_frame.window_frame if window._window_frame is not None @@ -1140,10 +1268,10 @@ class Window: def __init__( self, - partition_by: Optional[list[Expr]] = None, - window_frame: Optional[WindowFrame] = None, - order_by: Optional[list[SortExpr | Expr]] = None, - null_treatment: Optional[NullTreatment] = None, + partition_by: list[Expr] | Expr | None = None, + window_frame: WindowFrame | None = None, + order_by: list[SortExpr | Expr | str] | Expr | SortExpr | str | None = None, + null_treatment: NullTreatment | None = None, ) -> None: """Construct a window definition. @@ -1155,7 +1283,7 @@ def __init__( """ self._partition_by = partition_by self._window_frame = window_frame - self._order_by = order_by + self._order_by = sort_list_to_raw_sort_list(order_by) self._null_treatment = null_treatment @@ -1163,7 +1291,7 @@ class WindowFrame: """Defines a window frame for performing window operations.""" def __init__( - self, units: str, start_bound: Optional[Any], end_bound: Optional[Any] + self, units: str, start_bound: Any | None, end_bound: Any | None ) -> None: """Construct a window frame using the given parameters. @@ -1242,7 +1370,7 @@ class CaseBuilder: import datafusion.functions as f from datafusion import lit, col df.select( - f.case(col("column_a") + f.case(col("column_a")) .when(lit(1), lit("One")) .when(lit(2), lit("Two")) .otherwise(lit("Unknown")) @@ -1295,3 +1423,6 @@ def nulls_first(self) -> bool: def __repr__(self) -> str: """Generate a string representation of this expression.""" return self.raw_sort.__repr__() + + +SortKey = Expr | SortExpr | str diff --git a/python/datafusion/functions.py b/python/datafusion/functions.py index f430cdf4b..431afcc30 100644 --- a/python/datafusion/functions.py +++ b/python/datafusion/functions.py @@ -18,7 +18,7 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any, Optional +from typing import TYPE_CHECKING, Any import pyarrow as pa @@ -28,11 +28,18 @@ CaseBuilder, Expr, SortExpr, + SortKey, WindowFrame, expr_list_to_raw_expr_list, sort_list_to_raw_sort_list, + sort_or_default, ) +try: + from warnings import deprecated # Python 3.13+ +except ImportError: + from typing_extensions import deprecated # Python 3.12 + if TYPE_CHECKING: from datafusion.context import SessionContext @@ -218,6 +225,7 @@ "range", "rank", "regexp_count", + "regexp_instr", "regexp_like", "regexp_match", "regexp_replace", @@ -372,7 +380,7 @@ def order_by(expr: Expr, ascending: bool = True, nulls_first: bool = True) -> So return SortExpr(expr, ascending=ascending, nulls_first=nulls_first) -def alias(expr: Expr, name: str, metadata: Optional[dict[str, str]] = None) -> Expr: +def alias(expr: Expr, name: str, metadata: dict[str, str] | None = None) -> Expr: """Creates an alias expression with an optional metadata dictionary. Args: @@ -391,7 +399,7 @@ def col(name: str) -> Expr: return Expr(f.col(name)) -def count_star(filter: Optional[Expr] = None) -> Expr: +def count_star(filter: Expr | None = None) -> Expr: """Create a COUNT(1) aggregate expression. This aggregate function will count all of the rows in the partition. @@ -425,12 +433,15 @@ def when(when: Expr, then: Expr) -> CaseBuilder: return CaseBuilder(f.when(when.expr, then.expr)) +@deprecated("Prefer to call Expr.over() instead") def window( name: str, args: list[Expr], - partition_by: list[Expr] | None = None, - order_by: list[Expr | SortExpr] | None = None, + partition_by: list[Expr] | Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, window_frame: WindowFrame | None = None, + filter: Expr | None = None, + distinct: bool = False, ctx: SessionContext | None = None, ) -> Expr: """Creates a new Window function expression. @@ -440,13 +451,29 @@ def window( lag use:: df.select(functions.lag(col("a")).partition_by(col("b")).build()) + + The ``order_by`` parameter accepts column names or expressions, e.g.:: + + window("lag", [col("a")], order_by="ts") """ args = [a.expr for a in args] - partition_by = expr_list_to_raw_expr_list(partition_by) + partition_by_raw = expr_list_to_raw_expr_list(partition_by) order_by_raw = sort_list_to_raw_sort_list(order_by) window_frame = window_frame.window_frame if window_frame is not None else None ctx = ctx.ctx if ctx is not None else None - return Expr(f.window(name, args, partition_by, order_by_raw, window_frame, ctx)) + filter_raw = filter.expr if filter is not None else None + return Expr( + f.window( + name, + args, + partition_by=partition_by_raw, + order_by=order_by_raw, + window_frame=window_frame, + ctx=ctx, + filter=filter_raw, + distinct=distinct, + ) + ) # scalar functions @@ -790,7 +817,7 @@ def regexp_replace( def regexp_count( - string: Expr, pattern: Expr, start: Expr, flags: Expr | None = None + string: Expr, pattern: Expr, start: Expr | None = None, flags: Expr | None = None ) -> Expr: """Returns the number of matches in a string. @@ -799,10 +826,42 @@ def regexp_count( """ if flags is not None: flags = flags.expr - start = start.expr if start is not None else Expr.expr + start = start.expr if start is not None else start return Expr(f.regexp_count(string.expr, pattern.expr, start, flags)) +def regexp_instr( + values: Expr, + regex: Expr, + start: Expr | None = None, + n: Expr | None = None, + flags: Expr | None = None, + sub_expr: Expr | None = None, +) -> Expr: + """Returns the position of a regular expression match in a string. + + Searches ``values`` for the ``n``-th occurrence of ``regex``, starting at position + ``start`` (the first position is 1). Returns the starting or ending position based + on ``end_position``. Use ``flags`` to control regex behavior and ``sub_expr`` to + return the position of a specific capture group instead of the entire match. + """ + start = start.expr if start is not None else None + n = n.expr if n is not None else None + flags = flags.expr if flags is not None else None + sub_expr = sub_expr.expr if sub_expr is not None else None + + return Expr( + f.regexp_instr( + values.expr, + regex.expr, + start, + n, + flags, + sub_expr, + ) + ) + + def repeat(string: Expr, n: Expr) -> Expr: """Repeats the ``string`` to ``n`` times.""" return Expr(f.repeat(string.expr, n.expr)) @@ -1621,7 +1680,7 @@ def empty(array: Expr) -> Expr: # aggregate functions def approx_distinct( expression: Expr, - filter: Optional[Expr] = None, + filter: Expr | None = None, ) -> Expr: """Returns the approximate number of distinct values. @@ -1641,7 +1700,7 @@ def approx_distinct( return Expr(f.approx_distinct(expression.expr, filter=filter_raw)) -def approx_median(expression: Expr, filter: Optional[Expr] = None) -> Expr: +def approx_median(expression: Expr, filter: Expr | None = None) -> Expr: """Returns the approximate median value. This aggregate function is similar to :py:func:`median`, but it will only @@ -1659,10 +1718,10 @@ def approx_median(expression: Expr, filter: Optional[Expr] = None) -> Expr: def approx_percentile_cont( - expression: Expr, + sort_expression: Expr | SortExpr, percentile: float, - num_centroids: Optional[int] = None, - filter: Optional[Expr] = None, + num_centroids: int | None = None, + filter: Expr | None = None, ) -> Expr: """Returns the value that is approximately at a given percentile of ``expr``. @@ -1673,28 +1732,33 @@ def approx_percentile_cont( between two of the values. This function uses the [t-digest](https://arxiv.org/abs/1902.04023) algorithm to - compute the percentil. You can limit the number of bins used in this algorithm by + compute the percentile. You can limit the number of bins used in this algorithm by setting the ``num_centroids`` parameter. If using the builder functions described in ref:`_aggregation` this function ignores the options ``order_by``, ``null_treatment``, and ``distinct``. Args: - expression: Values for which to find the approximate percentile + sort_expression: Values for which to find the approximate percentile percentile: This must be between 0.0 and 1.0, inclusive num_centroids: Max bin size for the t-digest algorithm filter: If provided, only compute against rows for which the filter is True """ + sort_expr_raw = sort_or_default(sort_expression) filter_raw = filter.expr if filter is not None else None return Expr( f.approx_percentile_cont( - expression.expr, percentile, num_centroids=num_centroids, filter=filter_raw + sort_expr_raw, percentile, num_centroids=num_centroids, filter=filter_raw ) ) def approx_percentile_cont_with_weight( - expression: Expr, weight: Expr, percentile: float, filter: Optional[Expr] = None + sort_expression: Expr | SortExpr, + weight: Expr, + percentile: float, + num_centroids: int | None = None, + filter: Expr | None = None, ) -> Expr: """Returns the value of the weighted approximate percentile. @@ -1705,16 +1769,22 @@ def approx_percentile_cont_with_weight( the options ``order_by``, ``null_treatment``, and ``distinct``. Args: - expression: Values for which to find the approximate percentile + sort_expression: Values for which to find the approximate percentile weight: Relative weight for each of the values in ``expression`` percentile: This must be between 0.0 and 1.0, inclusive + num_centroids: Max bin size for the t-digest algorithm filter: If provided, only compute against rows for which the filter is True """ + sort_expr_raw = sort_or_default(sort_expression) filter_raw = filter.expr if filter is not None else None return Expr( f.approx_percentile_cont_with_weight( - expression.expr, weight.expr, percentile, filter=filter_raw + sort_expr_raw, + weight.expr, + percentile, + num_centroids=num_centroids, + filter=filter_raw, ) ) @@ -1722,8 +1792,8 @@ def approx_percentile_cont_with_weight( def array_agg( expression: Expr, distinct: bool = False, - filter: Optional[Expr] = None, - order_by: Optional[list[Expr | SortExpr]] = None, + filter: Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, ) -> Expr: """Aggregate values into an array. @@ -1738,7 +1808,11 @@ def array_agg( expression: Values to combine into an array distinct: If True, a single entry for each distinct value will be in the result filter: If provided, only compute against rows for which the filter is True - order_by: Order the resultant array values + order_by: Order the resultant array values. Accepts column names or expressions. + + For example:: + + df.aggregate([], array_agg(col("a"), order_by="b")) """ order_by_raw = sort_list_to_raw_sort_list(order_by) filter_raw = filter.expr if filter is not None else None @@ -1752,7 +1826,7 @@ def array_agg( def avg( expression: Expr, - filter: Optional[Expr] = None, + filter: Expr | None = None, ) -> Expr: """Returns the average value. @@ -1769,7 +1843,7 @@ def avg( return Expr(f.avg(expression.expr, filter=filter_raw)) -def corr(value_y: Expr, value_x: Expr, filter: Optional[Expr] = None) -> Expr: +def corr(value_y: Expr, value_x: Expr, filter: Expr | None = None) -> Expr: """Returns the correlation coefficient between ``value1`` and ``value2``. This aggregate function expects both values to be numeric and will return a float. @@ -1789,7 +1863,7 @@ def corr(value_y: Expr, value_x: Expr, filter: Optional[Expr] = None) -> Expr: def count( expressions: Expr | list[Expr] | None = None, distinct: bool = False, - filter: Optional[Expr] = None, + filter: Expr | None = None, ) -> Expr: """Returns the number of rows that match the given arguments. @@ -1815,7 +1889,7 @@ def count( return Expr(f.count(*args, distinct=distinct, filter=filter_raw)) -def covar_pop(value_y: Expr, value_x: Expr, filter: Optional[Expr] = None) -> Expr: +def covar_pop(value_y: Expr, value_x: Expr, filter: Expr | None = None) -> Expr: """Computes the population covariance. This aggregate function expects both values to be numeric and will return a float. @@ -1832,7 +1906,7 @@ def covar_pop(value_y: Expr, value_x: Expr, filter: Optional[Expr] = None) -> Ex return Expr(f.covar_pop(value_y.expr, value_x.expr, filter=filter_raw)) -def covar_samp(value_y: Expr, value_x: Expr, filter: Optional[Expr] = None) -> Expr: +def covar_samp(value_y: Expr, value_x: Expr, filter: Expr | None = None) -> Expr: """Computes the sample covariance. This aggregate function expects both values to be numeric and will return a float. @@ -1849,7 +1923,7 @@ def covar_samp(value_y: Expr, value_x: Expr, filter: Optional[Expr] = None) -> E return Expr(f.covar_samp(value_y.expr, value_x.expr, filter=filter_raw)) -def covar(value_y: Expr, value_x: Expr, filter: Optional[Expr] = None) -> Expr: +def covar(value_y: Expr, value_x: Expr, filter: Expr | None = None) -> Expr: """Computes the sample covariance. This is an alias for :py:func:`covar_samp`. @@ -1857,7 +1931,7 @@ def covar(value_y: Expr, value_x: Expr, filter: Optional[Expr] = None) -> Expr: return covar_samp(value_y, value_x, filter) -def max(expression: Expr, filter: Optional[Expr] = None) -> Expr: +def max(expression: Expr, filter: Expr | None = None) -> Expr: """Aggregate function that returns the maximum value of the argument. If using the builder functions described in ref:`_aggregation` this function ignores @@ -1871,7 +1945,7 @@ def max(expression: Expr, filter: Optional[Expr] = None) -> Expr: return Expr(f.max(expression.expr, filter=filter_raw)) -def mean(expression: Expr, filter: Optional[Expr] = None) -> Expr: +def mean(expression: Expr, filter: Expr | None = None) -> Expr: """Returns the average (mean) value of the argument. This is an alias for :py:func:`avg`. @@ -1880,7 +1954,7 @@ def mean(expression: Expr, filter: Optional[Expr] = None) -> Expr: def median( - expression: Expr, distinct: bool = False, filter: Optional[Expr] = None + expression: Expr, distinct: bool = False, filter: Expr | None = None ) -> Expr: """Computes the median of a set of numbers. @@ -1899,8 +1973,8 @@ def median( return Expr(f.median(expression.expr, distinct=distinct, filter=filter_raw)) -def min(expression: Expr, filter: Optional[Expr] = None) -> Expr: - """Returns the minimum value of the argument. +def min(expression: Expr, filter: Expr | None = None) -> Expr: + """Aggregate function that returns the minimum value of the argument. If using the builder functions described in ref:`_aggregation` this function ignores the options ``order_by``, ``null_treatment``, and ``distinct``. @@ -1915,7 +1989,7 @@ def min(expression: Expr, filter: Optional[Expr] = None) -> Expr: def sum( expression: Expr, - filter: Optional[Expr] = None, + filter: Expr | None = None, ) -> Expr: """Computes the sum of a set of numbers. @@ -1932,7 +2006,7 @@ def sum( return Expr(f.sum(expression.expr, filter=filter_raw)) -def stddev(expression: Expr, filter: Optional[Expr] = None) -> Expr: +def stddev(expression: Expr, filter: Expr | None = None) -> Expr: """Computes the standard deviation of the argument. If using the builder functions described in ref:`_aggregation` this function ignores @@ -1946,7 +2020,7 @@ def stddev(expression: Expr, filter: Optional[Expr] = None) -> Expr: return Expr(f.stddev(expression.expr, filter=filter_raw)) -def stddev_pop(expression: Expr, filter: Optional[Expr] = None) -> Expr: +def stddev_pop(expression: Expr, filter: Expr | None = None) -> Expr: """Computes the population standard deviation of the argument. If using the builder functions described in ref:`_aggregation` this function ignores @@ -1960,7 +2034,7 @@ def stddev_pop(expression: Expr, filter: Optional[Expr] = None) -> Expr: return Expr(f.stddev_pop(expression.expr, filter=filter_raw)) -def stddev_samp(arg: Expr, filter: Optional[Expr] = None) -> Expr: +def stddev_samp(arg: Expr, filter: Expr | None = None) -> Expr: """Computes the sample standard deviation of the argument. This is an alias for :py:func:`stddev`. @@ -1968,7 +2042,7 @@ def stddev_samp(arg: Expr, filter: Optional[Expr] = None) -> Expr: return stddev(arg, filter=filter) -def var(expression: Expr, filter: Optional[Expr] = None) -> Expr: +def var(expression: Expr, filter: Expr | None = None) -> Expr: """Computes the sample variance of the argument. This is an alias for :py:func:`var_samp`. @@ -1976,7 +2050,7 @@ def var(expression: Expr, filter: Optional[Expr] = None) -> Expr: return var_samp(expression, filter) -def var_pop(expression: Expr, filter: Optional[Expr] = None) -> Expr: +def var_pop(expression: Expr, filter: Expr | None = None) -> Expr: """Computes the population variance of the argument. If using the builder functions described in ref:`_aggregation` this function ignores @@ -1990,7 +2064,7 @@ def var_pop(expression: Expr, filter: Optional[Expr] = None) -> Expr: return Expr(f.var_pop(expression.expr, filter=filter_raw)) -def var_samp(expression: Expr, filter: Optional[Expr] = None) -> Expr: +def var_samp(expression: Expr, filter: Expr | None = None) -> Expr: """Computes the sample variance of the argument. If using the builder functions described in ref:`_aggregation` this function ignores @@ -2004,7 +2078,7 @@ def var_samp(expression: Expr, filter: Optional[Expr] = None) -> Expr: return Expr(f.var_sample(expression.expr, filter=filter_raw)) -def var_sample(expression: Expr, filter: Optional[Expr] = None) -> Expr: +def var_sample(expression: Expr, filter: Expr | None = None) -> Expr: """Computes the sample variance of the argument. This is an alias for :py:func:`var_samp`. @@ -2015,7 +2089,7 @@ def var_sample(expression: Expr, filter: Optional[Expr] = None) -> Expr: def regr_avgx( y: Expr, x: Expr, - filter: Optional[Expr] = None, + filter: Expr | None = None, ) -> Expr: """Computes the average of the independent variable ``x``. @@ -2038,7 +2112,7 @@ def regr_avgx( def regr_avgy( y: Expr, x: Expr, - filter: Optional[Expr] = None, + filter: Expr | None = None, ) -> Expr: """Computes the average of the dependent variable ``y``. @@ -2061,7 +2135,7 @@ def regr_avgy( def regr_count( y: Expr, x: Expr, - filter: Optional[Expr] = None, + filter: Expr | None = None, ) -> Expr: """Counts the number of rows in which both expressions are not null. @@ -2084,7 +2158,7 @@ def regr_count( def regr_intercept( y: Expr, x: Expr, - filter: Optional[Expr] = None, + filter: Expr | None = None, ) -> Expr: """Computes the intercept from the linear regression. @@ -2107,7 +2181,7 @@ def regr_intercept( def regr_r2( y: Expr, x: Expr, - filter: Optional[Expr] = None, + filter: Expr | None = None, ) -> Expr: """Computes the R-squared value from linear regression. @@ -2130,7 +2204,7 @@ def regr_r2( def regr_slope( y: Expr, x: Expr, - filter: Optional[Expr] = None, + filter: Expr | None = None, ) -> Expr: """Computes the slope from linear regression. @@ -2153,7 +2227,7 @@ def regr_slope( def regr_sxx( y: Expr, x: Expr, - filter: Optional[Expr] = None, + filter: Expr | None = None, ) -> Expr: """Computes the sum of squares of the independent variable ``x``. @@ -2176,7 +2250,7 @@ def regr_sxx( def regr_sxy( y: Expr, x: Expr, - filter: Optional[Expr] = None, + filter: Expr | None = None, ) -> Expr: """Computes the sum of products of pairs of numbers. @@ -2199,7 +2273,7 @@ def regr_sxy( def regr_syy( y: Expr, x: Expr, - filter: Optional[Expr] = None, + filter: Expr | None = None, ) -> Expr: """Computes the sum of squares of the dependent variable ``y``. @@ -2221,8 +2295,8 @@ def regr_syy( def first_value( expression: Expr, - filter: Optional[Expr] = None, - order_by: Optional[list[Expr | SortExpr]] = None, + filter: Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, null_treatment: NullTreatment = NullTreatment.RESPECT_NULLS, ) -> Expr: """Returns the first value in a group of values. @@ -2235,8 +2309,13 @@ def first_value( Args: expression: Argument to perform bitwise calculation on filter: If provided, only compute against rows for which the filter is True - order_by: Set the ordering of the expression to evaluate + order_by: Set the ordering of the expression to evaluate. Accepts + column names or expressions. null_treatment: Assign whether to respect or ignore null values. + + For example:: + + df.aggregate([], first_value(col("a"), order_by="ts")) """ order_by_raw = sort_list_to_raw_sort_list(order_by) filter_raw = filter.expr if filter is not None else None @@ -2253,8 +2332,8 @@ def first_value( def last_value( expression: Expr, - filter: Optional[Expr] = None, - order_by: Optional[list[Expr | SortExpr]] = None, + filter: Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, null_treatment: NullTreatment = NullTreatment.RESPECT_NULLS, ) -> Expr: """Returns the last value in a group of values. @@ -2267,8 +2346,13 @@ def last_value( Args: expression: Argument to perform bitwise calculation on filter: If provided, only compute against rows for which the filter is True - order_by: Set the ordering of the expression to evaluate + order_by: Set the ordering of the expression to evaluate. Accepts + column names or expressions. null_treatment: Assign whether to respect or ignore null values. + + For example:: + + df.aggregate([], last_value(col("a"), order_by="ts")) """ order_by_raw = sort_list_to_raw_sort_list(order_by) filter_raw = filter.expr if filter is not None else None @@ -2286,8 +2370,8 @@ def last_value( def nth_value( expression: Expr, n: int, - filter: Optional[Expr] = None, - order_by: Optional[list[Expr | SortExpr]] = None, + filter: Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, null_treatment: NullTreatment = NullTreatment.RESPECT_NULLS, ) -> Expr: """Returns the n-th value in a group of values. @@ -2301,8 +2385,13 @@ def nth_value( expression: Argument to perform bitwise calculation on n: Index of value to return. Starts at 1. filter: If provided, only compute against rows for which the filter is True - order_by: Set the ordering of the expression to evaluate + order_by: Set the ordering of the expression to evaluate. Accepts + column names or expressions. null_treatment: Assign whether to respect or ignore null values. + + For example:: + + df.aggregate([], nth_value(col("a"), 2, order_by="ts")) """ order_by_raw = sort_list_to_raw_sort_list(order_by) filter_raw = filter.expr if filter is not None else None @@ -2318,7 +2407,7 @@ def nth_value( ) -def bit_and(expression: Expr, filter: Optional[Expr] = None) -> Expr: +def bit_and(expression: Expr, filter: Expr | None = None) -> Expr: """Computes the bitwise AND of the argument. This aggregate function will bitwise compare every value in the input partition. @@ -2334,7 +2423,7 @@ def bit_and(expression: Expr, filter: Optional[Expr] = None) -> Expr: return Expr(f.bit_and(expression.expr, filter=filter_raw)) -def bit_or(expression: Expr, filter: Optional[Expr] = None) -> Expr: +def bit_or(expression: Expr, filter: Expr | None = None) -> Expr: """Computes the bitwise OR of the argument. This aggregate function will bitwise compare every value in the input partition. @@ -2351,7 +2440,7 @@ def bit_or(expression: Expr, filter: Optional[Expr] = None) -> Expr: def bit_xor( - expression: Expr, distinct: bool = False, filter: Optional[Expr] = None + expression: Expr, distinct: bool = False, filter: Expr | None = None ) -> Expr: """Computes the bitwise XOR of the argument. @@ -2369,7 +2458,7 @@ def bit_xor( return Expr(f.bit_xor(expression.expr, distinct=distinct, filter=filter_raw)) -def bool_and(expression: Expr, filter: Optional[Expr] = None) -> Expr: +def bool_and(expression: Expr, filter: Expr | None = None) -> Expr: """Computes the boolean AND of the argument. This aggregate function will compare every value in the input partition. These are @@ -2386,7 +2475,7 @@ def bool_and(expression: Expr, filter: Optional[Expr] = None) -> Expr: return Expr(f.bool_and(expression.expr, filter=filter_raw)) -def bool_or(expression: Expr, filter: Optional[Expr] = None) -> Expr: +def bool_or(expression: Expr, filter: Expr | None = None) -> Expr: """Computes the boolean OR of the argument. This aggregate function will compare every value in the input partition. These are @@ -2406,16 +2495,16 @@ def bool_or(expression: Expr, filter: Optional[Expr] = None) -> Expr: def lead( arg: Expr, shift_offset: int = 1, - default_value: Optional[Any] = None, - partition_by: Optional[list[Expr]] = None, - order_by: Optional[list[Expr | SortExpr]] = None, + default_value: Any | None = None, + partition_by: list[Expr] | Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, ) -> Expr: """Create a lead window function. Lead operation will return the argument that is in the next shift_offset-th row in the partition. For example ``lead(col("b"), shift_offset=3, default_value=5)`` will return the 3rd following value in column ``b``. At the end of the partition, where - no futher values can be returned it will return the default value of 5. + no further values can be returned it will return the default value of 5. Here is an example of both the ``lead`` and :py:func:`datafusion.functions.lag` functions on a simple DataFrame:: @@ -2437,14 +2526,17 @@ def lead( shift_offset: Number of rows following the current row. default_value: Value to return if shift_offet row does not exist. partition_by: Expressions to partition the window frame on. - order_by: Set ordering within the window frame. + order_by: Set ordering within the window frame. Accepts + column names or expressions. + + For example:: + + lead(col("b"), order_by="ts") """ if not isinstance(default_value, pa.Scalar) and default_value is not None: default_value = pa.scalar(default_value) - partition_cols = ( - [col.expr for col in partition_by] if partition_by is not None else None - ) + partition_by_raw = expr_list_to_raw_expr_list(partition_by) order_by_raw = sort_list_to_raw_sort_list(order_by) return Expr( @@ -2452,7 +2544,7 @@ def lead( arg.expr, shift_offset, default_value, - partition_by=partition_cols, + partition_by=partition_by_raw, order_by=order_by_raw, ) ) @@ -2461,15 +2553,15 @@ def lead( def lag( arg: Expr, shift_offset: int = 1, - default_value: Optional[Any] = None, - partition_by: Optional[list[Expr]] = None, - order_by: Optional[list[Expr | SortExpr]] = None, + default_value: Any | None = None, + partition_by: list[Expr] | Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, ) -> Expr: """Create a lag window function. Lag operation will return the argument that is in the previous shift_offset-th row in the partition. For example ``lag(col("b"), shift_offset=3, default_value=5)`` - will return the 3rd previous value in column ``b``. At the beginnig of the + will return the 3rd previous value in column ``b``. At the beginning of the partition, where no values can be returned it will return the default value of 5. Here is an example of both the ``lag`` and :py:func:`datafusion.functions.lead` @@ -2489,14 +2581,17 @@ def lag( shift_offset: Number of rows before the current row. default_value: Value to return if shift_offet row does not exist. partition_by: Expressions to partition the window frame on. - order_by: Set ordering within the window frame. + order_by: Set ordering within the window frame. Accepts + column names or expressions. + + For example:: + + lag(col("b"), order_by="ts") """ if not isinstance(default_value, pa.Scalar): default_value = pa.scalar(default_value) - partition_cols = ( - [col.expr for col in partition_by] if partition_by is not None else None - ) + partition_by_raw = expr_list_to_raw_expr_list(partition_by) order_by_raw = sort_list_to_raw_sort_list(order_by) return Expr( @@ -2504,15 +2599,15 @@ def lag( arg.expr, shift_offset, default_value, - partition_by=partition_cols, + partition_by=partition_by_raw, order_by=order_by_raw, ) ) def row_number( - partition_by: Optional[list[Expr]] = None, - order_by: Optional[list[Expr | SortExpr]] = None, + partition_by: list[Expr] | Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, ) -> Expr: """Create a row number window function. @@ -2531,30 +2626,33 @@ def row_number( Args: partition_by: Expressions to partition the window frame on. - order_by: Set ordering within the window frame. + order_by: Set ordering within the window frame. Accepts + column names or expressions. + + For example:: + + row_number(order_by="points") """ - partition_cols = ( - [col.expr for col in partition_by] if partition_by is not None else None - ) + partition_by_raw = expr_list_to_raw_expr_list(partition_by) order_by_raw = sort_list_to_raw_sort_list(order_by) return Expr( f.row_number( - partition_by=partition_cols, + partition_by=partition_by_raw, order_by=order_by_raw, ) ) def rank( - partition_by: Optional[list[Expr]] = None, - order_by: Optional[list[Expr | SortExpr]] = None, + partition_by: list[Expr] | Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, ) -> Expr: """Create a rank window function. Returns the rank based upon the window order. Consecutive equal values will receive the same rank, but the next different value will not be consecutive but rather the - number of rows that preceed it plus one. This is similar to Olympic medals. If two + number of rows that precede it plus one. This is similar to Olympic medals. If two people tie for gold, the next place is bronze. There would be no silver medal. Here is an example of a dataframe with a window ordered by descending ``points`` and the associated rank. @@ -2572,24 +2670,27 @@ def rank( Args: partition_by: Expressions to partition the window frame on. - order_by: Set ordering within the window frame. + order_by: Set ordering within the window frame. Accepts + column names or expressions. + + For example:: + + rank(order_by="points") """ - partition_cols = ( - [col.expr for col in partition_by] if partition_by is not None else None - ) + partition_by_raw = expr_list_to_raw_expr_list(partition_by) order_by_raw = sort_list_to_raw_sort_list(order_by) return Expr( f.rank( - partition_by=partition_cols, + partition_by=partition_by_raw, order_by=order_by_raw, ) ) def dense_rank( - partition_by: Optional[list[Expr]] = None, - order_by: Optional[list[Expr | SortExpr]] = None, + partition_by: list[Expr] | Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, ) -> Expr: """Create a dense_rank window function. @@ -2608,24 +2709,27 @@ def dense_rank( Args: partition_by: Expressions to partition the window frame on. - order_by: Set ordering within the window frame. + order_by: Set ordering within the window frame. Accepts + column names or expressions. + + For example:: + + dense_rank(order_by="points") """ - partition_cols = ( - [col.expr for col in partition_by] if partition_by is not None else None - ) + partition_by_raw = expr_list_to_raw_expr_list(partition_by) order_by_raw = sort_list_to_raw_sort_list(order_by) return Expr( f.dense_rank( - partition_by=partition_cols, + partition_by=partition_by_raw, order_by=order_by_raw, ) ) def percent_rank( - partition_by: Optional[list[Expr]] = None, - order_by: Optional[list[Expr | SortExpr]] = None, + partition_by: list[Expr] | Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, ) -> Expr: """Create a percent_rank window function. @@ -2645,29 +2749,32 @@ def percent_rank( Args: partition_by: Expressions to partition the window frame on. - order_by: Set ordering within the window frame. + order_by: Set ordering within the window frame. Accepts + column names or expressions. + + For example:: + + percent_rank(order_by="points") """ - partition_cols = ( - [col.expr for col in partition_by] if partition_by is not None else None - ) + partition_by_raw = expr_list_to_raw_expr_list(partition_by) order_by_raw = sort_list_to_raw_sort_list(order_by) return Expr( f.percent_rank( - partition_by=partition_cols, + partition_by=partition_by_raw, order_by=order_by_raw, ) ) def cume_dist( - partition_by: Optional[list[Expr]] = None, - order_by: Optional[list[Expr | SortExpr]] = None, + partition_by: list[Expr] | Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, ) -> Expr: """Create a cumulative distribution window function. This window function is similar to :py:func:`rank` except that the returned values - are the ratio of the row number to the total numebr of rows. Here is an example of a + are the ratio of the row number to the total number of rows. Here is an example of a dataframe with a window ordered by descending ``points`` and the associated cumulative distribution:: @@ -2682,16 +2789,19 @@ def cume_dist( Args: partition_by: Expressions to partition the window frame on. - order_by: Set ordering within the window frame. + order_by: Set ordering within the window frame. Accepts + column names or expressions. + + For example:: + + cume_dist(order_by="points") """ - partition_cols = ( - [col.expr for col in partition_by] if partition_by is not None else None - ) + partition_by_raw = expr_list_to_raw_expr_list(partition_by) order_by_raw = sort_list_to_raw_sort_list(order_by) return Expr( f.cume_dist( - partition_by=partition_cols, + partition_by=partition_by_raw, order_by=order_by_raw, ) ) @@ -2699,8 +2809,8 @@ def cume_dist( def ntile( groups: int, - partition_by: Optional[list[Expr]] = None, - order_by: Optional[list[Expr | SortExpr]] = None, + partition_by: list[Expr] | Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, ) -> Expr: """Create a n-tile window function. @@ -2723,17 +2833,20 @@ def ntile( Args: groups: Number of groups for the n-tile to be divided into. partition_by: Expressions to partition the window frame on. - order_by: Set ordering within the window frame. + order_by: Set ordering within the window frame. Accepts + column names or expressions. + + For example:: + + ntile(3, order_by="points") """ - partition_cols = ( - [col.expr for col in partition_by] if partition_by is not None else None - ) + partition_by_raw = expr_list_to_raw_expr_list(partition_by) order_by_raw = sort_list_to_raw_sort_list(order_by) return Expr( f.ntile( Expr.literal(groups).expr, - partition_by=partition_cols, + partition_by=partition_by_raw, order_by=order_by_raw, ) ) @@ -2742,13 +2855,13 @@ def ntile( def string_agg( expression: Expr, delimiter: str, - filter: Optional[Expr] = None, - order_by: Optional[list[Expr | SortExpr]] = None, + filter: Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, ) -> Expr: """Concatenates the input strings. This aggregate function will concatenate input strings, ignoring null values, and - seperating them with the specified delimiter. Non-string values will be converted to + separating them with the specified delimiter. Non-string values will be converted to their string equivalents. If using the builder functions described in ref:`_aggregation` this function ignores @@ -2758,7 +2871,12 @@ def string_agg( expression: Argument to perform bitwise calculation on delimiter: Text to place between each value of expression filter: If provided, only compute against rows for which the filter is True - order_by: Set the ordering of the expression to evaluate + order_by: Set the ordering of the expression to evaluate. Accepts + column names or expressions. + + For example:: + + df.aggregate([], string_agg(col("a"), ",", order_by="b")) """ order_by_raw = sort_list_to_raw_sort_list(order_by) filter_raw = filter.expr if filter is not None else None diff --git a/python/datafusion/html_formatter.py b/python/datafusion/html_formatter.py index 12a7e4553..65eb1f042 100644 --- a/python/datafusion/html_formatter.py +++ b/python/datafusion/html_formatter.py @@ -14,698 +14,16 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -"""HTML formatting utilities for DataFusion DataFrames.""" -from __future__ import annotations +"""Deprecated module for dataframe formatting.""" -from typing import ( - Any, - Callable, - Optional, - Protocol, - runtime_checkable, -) - - -def _validate_positive_int(value: Any, param_name: str) -> None: - """Validate that a parameter is a positive integer. - - Args: - value: The value to validate - param_name: Name of the parameter (used in error message) - - Raises: - ValueError: If the value is not a positive integer - """ - if not isinstance(value, int) or value <= 0: - msg = f"{param_name} must be a positive integer" - raise ValueError(msg) - - -def _validate_bool(value: Any, param_name: str) -> None: - """Validate that a parameter is a boolean. - - Args: - value: The value to validate - param_name: Name of the parameter (used in error message) - - Raises: - TypeError: If the value is not a boolean - """ - if not isinstance(value, bool): - msg = f"{param_name} must be a boolean" - raise TypeError(msg) - - -@runtime_checkable -class CellFormatter(Protocol): - """Protocol for cell value formatters.""" - - def __call__(self, value: Any) -> str: - """Format a cell value to string representation.""" - ... - - -@runtime_checkable -class StyleProvider(Protocol): - """Protocol for HTML style providers.""" - - def get_cell_style(self) -> str: - """Get the CSS style for table cells.""" - ... - - def get_header_style(self) -> str: - """Get the CSS style for header cells.""" - ... - - -class DefaultStyleProvider: - """Default implementation of StyleProvider.""" - - def get_cell_style(self) -> str: - """Get the CSS style for table cells. - - Returns: - CSS style string - """ - return ( - "border: 1px solid black; padding: 8px; text-align: left; " - "white-space: nowrap;" - ) - - def get_header_style(self) -> str: - """Get the CSS style for header cells. - - Returns: - CSS style string - """ - return ( - "border: 1px solid black; padding: 8px; text-align: left; " - "background-color: #f2f2f2; white-space: nowrap; min-width: fit-content; " - "max-width: fit-content;" - ) - - -class DataFrameHtmlFormatter: - """Configurable HTML formatter for DataFusion DataFrames. - - This class handles the HTML rendering of DataFrames for display in - Jupyter notebooks and other rich display contexts. - - This class supports extension through composition. Key extension points: - - Provide a custom StyleProvider for styling cells and headers - - Register custom formatters for specific types - - Provide custom cell builders for specialized cell rendering - - Args: - max_cell_length: Maximum characters to display in a cell before truncation - max_width: Maximum width of the HTML table in pixels - max_height: Maximum height of the HTML table in pixels - max_memory_bytes: Maximum memory in bytes for rendered data (default: 2MB) - min_rows_display: Minimum number of rows to display - repr_rows: Default number of rows to display in repr output - enable_cell_expansion: Whether to add expand/collapse buttons for long cell - values - custom_css: Additional CSS to include in the HTML output - show_truncation_message: Whether to display a message when data is truncated - style_provider: Custom provider for cell and header styles - use_shared_styles: Whether to load styles and scripts only once per notebook - session - """ - - # Class variable to track if styles have been loaded in the notebook - _styles_loaded = False - - def __init__( - self, - max_cell_length: int = 25, - max_width: int = 1000, - max_height: int = 300, - max_memory_bytes: int = 2 * 1024 * 1024, # 2 MB - min_rows_display: int = 20, - repr_rows: int = 10, - enable_cell_expansion: bool = True, - custom_css: Optional[str] = None, - show_truncation_message: bool = True, - style_provider: Optional[StyleProvider] = None, - use_shared_styles: bool = True, - ) -> None: - """Initialize the HTML formatter. - - Parameters - ---------- - max_cell_length : int, default 25 - Maximum length of cell content before truncation. - max_width : int, default 1000 - Maximum width of the displayed table in pixels. - max_height : int, default 300 - Maximum height of the displayed table in pixels. - max_memory_bytes : int, default 2097152 (2MB) - Maximum memory in bytes for rendered data. - min_rows_display : int, default 20 - Minimum number of rows to display. - repr_rows : int, default 10 - Default number of rows to display in repr output. - enable_cell_expansion : bool, default True - Whether to allow cells to expand when clicked. - custom_css : str, optional - Custom CSS to apply to the HTML table. - show_truncation_message : bool, default True - Whether to show a message indicating that content has been truncated. - style_provider : StyleProvider, optional - Provider of CSS styles for the HTML table. If None, DefaultStyleProvider - is used. - use_shared_styles : bool, default True - Whether to use shared styles across multiple tables. - - Raises: - ------ - ValueError - If max_cell_length, max_width, max_height, max_memory_bytes, - min_rows_display, or repr_rows is not a positive integer. - TypeError - If enable_cell_expansion, show_truncation_message, or use_shared_styles is - not a boolean, - or if custom_css is provided but is not a string, - or if style_provider is provided but does not implement the StyleProvider - protocol. - """ - # Validate numeric parameters - _validate_positive_int(max_cell_length, "max_cell_length") - _validate_positive_int(max_width, "max_width") - _validate_positive_int(max_height, "max_height") - _validate_positive_int(max_memory_bytes, "max_memory_bytes") - _validate_positive_int(min_rows_display, "min_rows_display") - _validate_positive_int(repr_rows, "repr_rows") - - # Validate boolean parameters - _validate_bool(enable_cell_expansion, "enable_cell_expansion") - _validate_bool(show_truncation_message, "show_truncation_message") - _validate_bool(use_shared_styles, "use_shared_styles") - - # Validate custom_css - if custom_css is not None and not isinstance(custom_css, str): - msg = "custom_css must be None or a string" - raise TypeError(msg) - - # Validate style_provider - if style_provider is not None and not isinstance(style_provider, StyleProvider): - msg = "style_provider must implement the StyleProvider protocol" - raise TypeError(msg) - - self.max_cell_length = max_cell_length - self.max_width = max_width - self.max_height = max_height - self.max_memory_bytes = max_memory_bytes - self.min_rows_display = min_rows_display - self.repr_rows = repr_rows - self.enable_cell_expansion = enable_cell_expansion - self.custom_css = custom_css - self.show_truncation_message = show_truncation_message - self.style_provider = style_provider or DefaultStyleProvider() - self.use_shared_styles = use_shared_styles - # Registry for custom type formatters - self._type_formatters: dict[type, CellFormatter] = {} - # Custom cell builders - self._custom_cell_builder: Optional[Callable[[Any, int, int, str], str]] = None - self._custom_header_builder: Optional[Callable[[Any], str]] = None - - def register_formatter(self, type_class: type, formatter: CellFormatter) -> None: - """Register a custom formatter for a specific data type. - - Args: - type_class: The type to register a formatter for - formatter: Function that takes a value of the given type and returns - a formatted string - """ - self._type_formatters[type_class] = formatter - - def set_custom_cell_builder( - self, builder: Callable[[Any, int, int, str], str] - ) -> None: - """Set a custom cell builder function. - - Args: - builder: Function that takes (value, row, col, table_id) and returns HTML - """ - self._custom_cell_builder = builder - - def set_custom_header_builder(self, builder: Callable[[Any], str]) -> None: - """Set a custom header builder function. - - Args: - builder: Function that takes a field and returns HTML - """ - self._custom_header_builder = builder - - @classmethod - def is_styles_loaded(cls) -> bool: - """Check if HTML styles have been loaded in the current session. - - This method is primarily intended for debugging UI rendering issues - related to style loading. - - Returns: - True if styles have been loaded, False otherwise - - Example: - >>> from datafusion.html_formatter import DataFrameHtmlFormatter - >>> DataFrameHtmlFormatter.is_styles_loaded() - False - """ - return cls._styles_loaded - - def format_html( - self, - batches: list, - schema: Any, - has_more: bool = False, - table_uuid: str | None = None, - ) -> str: - """Format record batches as HTML. - - This method is used by DataFrame's _repr_html_ implementation and can be - called directly when custom HTML rendering is needed. - - Args: - batches: List of Arrow RecordBatch objects - schema: Arrow Schema object - has_more: Whether there are more batches not shown - table_uuid: Unique ID for the table, used for JavaScript interactions - - Returns: - HTML string representation of the data - - Raises: - TypeError: If schema is invalid and no batches are provided - """ - if not batches: - return "No data to display" - - # Validate schema - if schema is None or not hasattr(schema, "__iter__"): - msg = "Schema must be provided" - raise TypeError(msg) - - # Generate a unique ID if none provided - table_uuid = table_uuid or f"df-{id(batches)}" - - # Build HTML components - html = [] - - # Only include styles and scripts if: - # 1. Not using shared styles, OR - # 2. Using shared styles but they haven't been loaded yet - include_styles = ( - not self.use_shared_styles or not DataFrameHtmlFormatter._styles_loaded - ) - - if include_styles: - html.extend(self._build_html_header()) - # If we're using shared styles, mark them as loaded - if self.use_shared_styles: - DataFrameHtmlFormatter._styles_loaded = True - - html.extend(self._build_table_container_start()) - - # Add table header and body - html.extend(self._build_table_header(schema)) - html.extend(self._build_table_body(batches, table_uuid)) - - html.append("
" + f"{field.name}
" + f"
" + "" + "" + f"{formatted_value}" + f"" + f"
" + f"
{formatted_value}
") - html.append("
") - - # Add footer (JavaScript and messages) - if include_styles and self.enable_cell_expansion: - html.append(self._get_javascript()) - - # Always add truncation message if needed (independent of styles) - if has_more and self.show_truncation_message: - html.append("
Data truncated due to size.
") - - return "\n".join(html) - - def _build_html_header(self) -> list[str]: - """Build the HTML header with CSS styles.""" - html = [] - html.append("") - return html +import warnings - def _build_table_container_start(self) -> list[str]: - """Build the opening tags for the table container.""" - html = [] - html.append( - f'
' - ) - html.append('') - return html +from datafusion.dataframe_formatter import * # noqa: F403 - def _build_table_header(self, schema: Any) -> list[str]: - """Build the HTML table header with column names.""" - html = [] - html.append("") - html.append("") - for field in schema: - if self._custom_header_builder: - html.append(self._custom_header_builder(field)) - else: - html.append( - f"" - ) - html.append("") - html.append("") - return html - - def _build_table_body(self, batches: list, table_uuid: str) -> list[str]: - """Build the HTML table body with data rows.""" - html = [] - html.append("") - - row_count = 0 - for batch in batches: - for row_idx in range(batch.num_rows): - row_count += 1 - html.append("") - - for col_idx, column in enumerate(batch.columns): - # Get the raw value from the column - raw_value = self._get_cell_value(column, row_idx) - - # Always check for type formatters first to format the value - formatted_value = self._format_cell_value(raw_value) - - # Then apply either custom cell builder or standard cell formatting - if self._custom_cell_builder: - # Pass both the raw value and formatted value to let the - # builder decide - cell_html = self._custom_cell_builder( - raw_value, row_count, col_idx, table_uuid - ) - html.append(cell_html) - else: - # Standard cell formatting with formatted value - if ( - len(str(raw_value)) > self.max_cell_length - and self.enable_cell_expansion - ): - cell_html = self._build_expandable_cell( - formatted_value, row_count, col_idx, table_uuid - ) - else: - cell_html = self._build_regular_cell(formatted_value) - html.append(cell_html) - - html.append("") - - html.append("") - return html - - def _get_cell_value(self, column: Any, row_idx: int) -> Any: - """Extract a cell value from a column. - - Args: - column: Arrow array - row_idx: Row index - - Returns: - The raw cell value - """ - try: - value = column[row_idx] - - if hasattr(value, "as_py"): - return value.as_py() - except (AttributeError, TypeError): - pass - else: - return value - - def _format_cell_value(self, value: Any) -> str: - """Format a cell value for display. - - Uses registered type formatters if available. - - Args: - value: The cell value to format - - Returns: - Formatted cell value as string - """ - # Check for custom type formatters - for type_cls, formatter in self._type_formatters.items(): - if isinstance(value, type_cls): - return formatter(value) - - # If no formatter matched, return string representation - return str(value) - - def _build_expandable_cell( - self, formatted_value: str, row_count: int, col_idx: int, table_uuid: str - ) -> str: - """Build an expandable cell for long content.""" - short_value = str(formatted_value)[: self.max_cell_length] - return ( - f"" - ) - - def _build_regular_cell(self, formatted_value: str) -> str: - """Build a regular table cell.""" - return ( - f"" - ) - - def _build_html_footer(self, has_more: bool) -> list[str]: - """Build the HTML footer with JavaScript and messages.""" - html = [] - - # Add JavaScript for interactivity only if cell expansion is enabled - # and we're not using the shared styles approach - if self.enable_cell_expansion and not self.use_shared_styles: - html.append(self._get_javascript()) - - # Add truncation message if needed - if has_more and self.show_truncation_message: - html.append("
Data truncated due to size.
") - - return html - - def _get_default_css(self) -> str: - """Get default CSS styles for the HTML table.""" - return """ - .expandable-container { - display: inline-block; - max-width: 200px; - } - .expandable { - white-space: nowrap; - overflow: hidden; - text-overflow: ellipsis; - display: block; - } - .full-text { - display: none; - white-space: normal; - } - .expand-btn { - cursor: pointer; - color: blue; - text-decoration: underline; - border: none; - background: none; - font-size: inherit; - display: block; - margin-top: 5px; - } - """ - - def _get_javascript(self) -> str: - """Get JavaScript code for interactive elements.""" - return """ - - """ - - -class FormatterManager: - """Manager class for the global DataFrame HTML formatter instance.""" - - _default_formatter: DataFrameHtmlFormatter = DataFrameHtmlFormatter() - - @classmethod - def set_formatter(cls, formatter: DataFrameHtmlFormatter) -> None: - """Set the global DataFrame HTML formatter. - - Args: - formatter: The formatter instance to use globally - """ - cls._default_formatter = formatter - _refresh_formatter_reference() - - @classmethod - def get_formatter(cls) -> DataFrameHtmlFormatter: - """Get the current global DataFrame HTML formatter. - - Returns: - The global HTML formatter instance - """ - return cls._default_formatter - - -def get_formatter() -> DataFrameHtmlFormatter: - """Get the current global DataFrame HTML formatter. - - This function is used by the DataFrame._repr_html_ implementation to access - the shared formatter instance. It can also be used directly when custom - HTML rendering is needed. - - Returns: - The global HTML formatter instance - - Example: - >>> from datafusion.html_formatter import get_formatter - >>> formatter = get_formatter() - >>> formatter.max_cell_length = 50 # Increase cell length - """ - return FormatterManager.get_formatter() - - -def set_formatter(formatter: DataFrameHtmlFormatter) -> None: - """Set the global DataFrame HTML formatter. - - Args: - formatter: The formatter instance to use globally - - Example: - >>> from datafusion.html_formatter import get_formatter, set_formatter - >>> custom_formatter = DataFrameHtmlFormatter(max_cell_length=100) - >>> set_formatter(custom_formatter) - """ - FormatterManager.set_formatter(formatter) - - -def configure_formatter(**kwargs: Any) -> None: - """Configure the global DataFrame HTML formatter. - - This function creates a new formatter with the provided configuration - and sets it as the global formatter for all DataFrames. - - Args: - **kwargs: Formatter configuration parameters like max_cell_length, - max_width, max_height, enable_cell_expansion, etc. - - Raises: - ValueError: If any invalid parameters are provided - - Example: - >>> from datafusion.html_formatter import configure_formatter - >>> configure_formatter( - ... max_cell_length=50, - ... max_height=500, - ... enable_cell_expansion=True, - ... use_shared_styles=True - ... ) - """ - # Valid parameters accepted by DataFrameHtmlFormatter - valid_params = { - "max_cell_length", - "max_width", - "max_height", - "max_memory_bytes", - "min_rows_display", - "repr_rows", - "enable_cell_expansion", - "custom_css", - "show_truncation_message", - "style_provider", - "use_shared_styles", - } - - # Check for invalid parameters - invalid_params = set(kwargs) - valid_params - if invalid_params: - msg = ( - f"Invalid formatter parameters: {', '.join(invalid_params)}. " - f"Valid parameters are: {', '.join(valid_params)}" - ) - raise ValueError(msg) - - # Create and set formatter with validated parameters - set_formatter(DataFrameHtmlFormatter(**kwargs)) - - -def reset_formatter() -> None: - """Reset the global DataFrame HTML formatter to default settings. - - This function creates a new formatter with default configuration - and sets it as the global formatter for all DataFrames. - - Example: - >>> from datafusion.html_formatter import reset_formatter - >>> reset_formatter() # Reset formatter to default settings - """ - formatter = DataFrameHtmlFormatter() - # Reset the styles_loaded flag to ensure styles will be reloaded - DataFrameHtmlFormatter._styles_loaded = False - set_formatter(formatter) - - -def reset_styles_loaded_state() -> None: - """Reset the styles loaded state to force reloading of styles. - - This can be useful when switching between notebook sessions or - when styles need to be refreshed. - - Example: - >>> from datafusion.html_formatter import reset_styles_loaded_state - >>> reset_styles_loaded_state() # Force styles to reload in next render - """ - DataFrameHtmlFormatter._styles_loaded = False - - -def _refresh_formatter_reference() -> None: - """Refresh formatter reference in any modules using it. - - This helps ensure that changes to the formatter are reflected in existing - DataFrames that might be caching the formatter reference. - """ - # This is a no-op but signals modules to refresh their reference +warnings.warn( + "The module 'html_formatter' is deprecated and will be removed in the next release." + "Please use 'dataframe_formatter' instead.", + DeprecationWarning, + stacklevel=3, +) diff --git a/python/datafusion/input/location.py b/python/datafusion/input/location.py index 08d98d115..b804ac18b 100644 --- a/python/datafusion/input/location.py +++ b/python/datafusion/input/location.py @@ -17,7 +17,6 @@ """The default input source for DataFusion.""" -import glob from pathlib import Path from typing import Any @@ -84,6 +83,7 @@ def build_table( raise RuntimeError(msg) # Input could possibly be multiple files. Create a list if so - input_files = glob.glob(input_item) + input_path = Path(input_item) + input_files = [str(p) for p in input_path.parent.glob(input_path.name)] return SqlTable(table_name, columns, num_rows, input_files) diff --git a/python/datafusion/io.py b/python/datafusion/io.py index 551e20a6f..4f9c3c516 100644 --- a/python/datafusion/io.py +++ b/python/datafusion/io.py @@ -22,15 +22,17 @@ from typing import TYPE_CHECKING from datafusion.context import SessionContext -from datafusion.dataframe import DataFrame if TYPE_CHECKING: import pathlib import pyarrow as pa + from datafusion.dataframe import DataFrame from datafusion.expr import Expr + from .options import CsvReadOptions + def read_parquet( path: str | pathlib.Path, @@ -126,6 +128,7 @@ def read_csv( file_extension: str = ".csv", table_partition_cols: list[tuple[str, str | pa.DataType]] | None = None, file_compression_type: str | None = None, + options: CsvReadOptions | None = None, ) -> DataFrame: """Read a CSV data source. @@ -147,15 +150,12 @@ def read_csv( selected for data input. table_partition_cols: Partition columns. file_compression_type: File compression type. + options: Set advanced options for CSV reading. This cannot be + combined with any of the other options in this method. Returns: DataFrame representation of the read CSV files """ - if table_partition_cols is None: - table_partition_cols = [] - - path = [str(p) for p in path] if isinstance(path, list) else str(path) - return SessionContext.global_ctx().read_csv( path, schema, @@ -165,6 +165,7 @@ def read_csv( file_extension, table_partition_cols, file_compression_type, + options, ) diff --git a/python/datafusion/options.py b/python/datafusion/options.py new file mode 100644 index 000000000..ec19f37d0 --- /dev/null +++ b/python/datafusion/options.py @@ -0,0 +1,284 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""Options for reading various file formats.""" + +from __future__ import annotations + +import warnings +from typing import TYPE_CHECKING + +import pyarrow as pa + +from datafusion.expr import sort_list_to_raw_sort_list + +if TYPE_CHECKING: + from datafusion.expr import SortExpr + +from ._internal import options + +__all__ = ["CsvReadOptions"] + +DEFAULT_MAX_INFER_SCHEMA = 1000 + + +class CsvReadOptions: + """Options for reading CSV files. + + This class provides a builder pattern for configuring CSV reading options. + All methods starting with ``with_`` return ``self`` to allow method chaining. + """ + + def __init__( + self, + *, + has_header: bool = True, + delimiter: str = ",", + quote: str = '"', + terminator: str | None = None, + escape: str | None = None, + comment: str | None = None, + newlines_in_values: bool = False, + schema: pa.Schema | None = None, + schema_infer_max_records: int = DEFAULT_MAX_INFER_SCHEMA, + file_extension: str = ".csv", + table_partition_cols: list[tuple[str, pa.DataType]] | None = None, + file_compression_type: str = "", + file_sort_order: list[list[SortExpr]] | None = None, + null_regex: str | None = None, + truncated_rows: bool = False, + ) -> None: + """Initialize CsvReadOptions. + + Args: + has_header: Does the CSV file have a header row? If schema inference + is run on a file with no headers, default column names are created. + delimiter: Column delimiter character. Must be a single ASCII character. + quote: Quote character for fields containing delimiters or newlines. + Must be a single ASCII character. + terminator: Optional line terminator character. If ``None``, uses CRLF. + Must be a single ASCII character. + escape: Optional escape character for quotes. Must be a single ASCII + character. + comment: If specified, lines beginning with this character are ignored. + Must be a single ASCII character. + newlines_in_values: Whether newlines in quoted values are supported. + Parsing newlines in quoted values may be affected by execution + behavior such as parallel file scanning. Setting this to ``True`` + ensures that newlines in values are parsed successfully, which may + reduce performance. + schema: Optional PyArrow schema representing the CSV files. If ``None``, + the CSV reader will try to infer it based on data in the file. + schema_infer_max_records: Maximum number of rows to read from CSV files + for schema inference if needed. + file_extension: File extension; only files with this extension are + selected for data input. + table_partition_cols: Partition columns as a list of tuples of + (column_name, data_type). + file_compression_type: File compression type. Supported values are + ``"gzip"``, ``"bz2"``, ``"xz"``, ``"zstd"``, or empty string for + uncompressed. + file_sort_order: Optional sort order of the files as a list of sort + expressions per file. + null_regex: Optional regex pattern to match null values in the CSV. + truncated_rows: Whether to allow truncated rows when parsing. By default + this is ``False`` and will error if the CSV rows have different + lengths. When set to ``True``, it will allow records with less than + the expected number of columns and fill the missing columns with + nulls. If the record's schema is not nullable, it will still return + an error. + """ + validate_single_character("delimiter", delimiter) + validate_single_character("quote", quote) + validate_single_character("terminator", terminator) + validate_single_character("escape", escape) + validate_single_character("comment", comment) + + self.has_header = has_header + self.delimiter = delimiter + self.quote = quote + self.terminator = terminator + self.escape = escape + self.comment = comment + self.newlines_in_values = newlines_in_values + self.schema = schema + self.schema_infer_max_records = schema_infer_max_records + self.file_extension = file_extension + self.table_partition_cols = table_partition_cols or [] + self.file_compression_type = file_compression_type + self.file_sort_order = file_sort_order or [] + self.null_regex = null_regex + self.truncated_rows = truncated_rows + + def with_has_header(self, has_header: bool) -> CsvReadOptions: + """Configure whether the CSV has a header row.""" + self.has_header = has_header + return self + + def with_delimiter(self, delimiter: str) -> CsvReadOptions: + """Configure the column delimiter.""" + self.delimiter = delimiter + return self + + def with_quote(self, quote: str) -> CsvReadOptions: + """Configure the quote character.""" + self.quote = quote + return self + + def with_terminator(self, terminator: str | None) -> CsvReadOptions: + """Configure the line terminator character.""" + self.terminator = terminator + return self + + def with_escape(self, escape: str | None) -> CsvReadOptions: + """Configure the escape character.""" + self.escape = escape + return self + + def with_comment(self, comment: str | None) -> CsvReadOptions: + """Configure the comment character.""" + self.comment = comment + return self + + def with_newlines_in_values(self, newlines_in_values: bool) -> CsvReadOptions: + """Configure whether newlines in values are supported.""" + self.newlines_in_values = newlines_in_values + return self + + def with_schema(self, schema: pa.Schema | None) -> CsvReadOptions: + """Configure the schema.""" + self.schema = schema + return self + + def with_schema_infer_max_records( + self, schema_infer_max_records: int + ) -> CsvReadOptions: + """Configure maximum records for schema inference.""" + self.schema_infer_max_records = schema_infer_max_records + return self + + def with_file_extension(self, file_extension: str) -> CsvReadOptions: + """Configure the file extension filter.""" + self.file_extension = file_extension + return self + + def with_table_partition_cols( + self, table_partition_cols: list[tuple[str, pa.DataType]] + ) -> CsvReadOptions: + """Configure table partition columns.""" + self.table_partition_cols = table_partition_cols + return self + + def with_file_compression_type(self, file_compression_type: str) -> CsvReadOptions: + """Configure file compression type.""" + self.file_compression_type = file_compression_type + return self + + def with_file_sort_order( + self, file_sort_order: list[list[SortExpr]] + ) -> CsvReadOptions: + """Configure file sort order.""" + self.file_sort_order = file_sort_order + return self + + def with_null_regex(self, null_regex: str | None) -> CsvReadOptions: + """Configure null value regex pattern.""" + self.null_regex = null_regex + return self + + def with_truncated_rows(self, truncated_rows: bool) -> CsvReadOptions: + """Configure whether to allow truncated rows.""" + self.truncated_rows = truncated_rows + return self + + def to_inner(self) -> options.CsvReadOptions: + """Convert this object into the underlying Rust structure. + + This is intended for internal use only. + """ + file_sort_order = ( + [] + if self.file_sort_order is None + else [ + sort_list_to_raw_sort_list(sort_list) + for sort_list in self.file_sort_order + ] + ) + + return options.CsvReadOptions( + has_header=self.has_header, + delimiter=ord(self.delimiter[0]) if self.delimiter else ord(","), + quote=ord(self.quote[0]) if self.quote else ord('"'), + terminator=ord(self.terminator[0]) if self.terminator else None, + escape=ord(self.escape[0]) if self.escape else None, + comment=ord(self.comment[0]) if self.comment else None, + newlines_in_values=self.newlines_in_values, + schema=self.schema, + schema_infer_max_records=self.schema_infer_max_records, + file_extension=self.file_extension, + table_partition_cols=_convert_table_partition_cols( + self.table_partition_cols + ), + file_compression_type=self.file_compression_type or "", + file_sort_order=file_sort_order, + null_regex=self.null_regex, + truncated_rows=self.truncated_rows, + ) + + +def validate_single_character(name: str, value: str | None) -> None: + if value is not None and len(value) != 1: + message = f"{name} must be a single character" + raise ValueError(message) + + +def _convert_table_partition_cols( + table_partition_cols: list[tuple[str, str | pa.DataType]], +) -> list[tuple[str, pa.DataType]]: + warn = False + converted_table_partition_cols = [] + + for col, data_type in table_partition_cols: + if isinstance(data_type, str): + warn = True + if data_type == "string": + converted_data_type = pa.string() + elif data_type == "int": + converted_data_type = pa.int32() + else: + message = ( + f"Unsupported literal data type '{data_type}' for partition " + "column. Supported types are 'string' and 'int'" + ) + raise ValueError(message) + else: + converted_data_type = data_type + + converted_table_partition_cols.append((col, converted_data_type)) + + if warn: + message = ( + "using literals for table_partition_cols data types is deprecated," + "use pyarrow types instead" + ) + warnings.warn( + message, + category=DeprecationWarning, + stacklevel=2, + ) + + return converted_table_partition_cols diff --git a/python/datafusion/plan.py b/python/datafusion/plan.py index 0b7bebcb3..fb54fd624 100644 --- a/python/datafusion/plan.py +++ b/python/datafusion/plan.py @@ -98,6 +98,12 @@ def to_proto(self) -> bytes: """ return self._raw_plan.to_proto() + def __eq__(self, other: LogicalPlan) -> bool: + """Test equality.""" + if not isinstance(other, LogicalPlan): + return False + return self._raw_plan.__eq__(other._raw_plan) + class ExecutionPlan: """Represent nodes in the DataFusion Physical Plan.""" diff --git a/python/datafusion/record_batch.py b/python/datafusion/record_batch.py index 556eaa786..c24cde0ac 100644 --- a/python/datafusion/record_batch.py +++ b/python/datafusion/record_batch.py @@ -46,6 +46,26 @@ def to_pyarrow(self) -> pa.RecordBatch: """Convert to :py:class:`pa.RecordBatch`.""" return self.record_batch.to_pyarrow() + def __arrow_c_array__( + self, requested_schema: object | None = None + ) -> tuple[object, object]: + """Export the record batch via the Arrow C Data Interface. + + This allows zero-copy interchange with libraries that support the + `Arrow PyCapsule interface `_. + + Args: + requested_schema: Attempt to provide the record batch using this + schema. Only straightforward projections such as column + selection or reordering are applied. + + Returns: + Two Arrow PyCapsule objects representing the ``ArrowArray`` and + ``ArrowSchema``. + """ + return self.record_batch.__arrow_c_array__(requested_schema) + class RecordBatchStream: """This class represents a stream of record batches. @@ -63,19 +83,19 @@ def next(self) -> RecordBatch: return next(self) async def __anext__(self) -> RecordBatch: - """Async iterator function.""" + """Return the next :py:class:`RecordBatch` in the stream asynchronously.""" next_batch = await self.rbs.__anext__() return RecordBatch(next_batch) def __next__(self) -> RecordBatch: - """Iterator function.""" + """Return the next :py:class:`RecordBatch` in the stream.""" next_batch = next(self.rbs) return RecordBatch(next_batch) def __aiter__(self) -> typing_extensions.Self: - """Async iterator function.""" + """Return an asynchronous iterator over record batches.""" return self def __iter__(self) -> typing_extensions.Self: - """Iterator function.""" + """Return an iterator over record batches.""" return self diff --git a/python/datafusion/substrait.py b/python/datafusion/substrait.py index f10adfb0c..3115238fa 100644 --- a/python/datafusion/substrait.py +++ b/python/datafusion/substrait.py @@ -67,6 +67,26 @@ def encode(self) -> bytes: """ return self.plan_internal.encode() + def to_json(self) -> str: + """Get the JSON representation of the Substrait plan. + + Returns: + A JSON representation of the Substrait plan. + """ + return self.plan_internal.to_json() + + @staticmethod + def from_json(json: str) -> Plan: + """Parse a plan from a JSON string representation. + + Args: + json: JSON representation of a Substrait plan. + + Returns: + Plan object representing the Substrait plan. + """ + return Plan(substrait_internal.Plan.from_json(json)) + @deprecated("Use `Plan` instead.") class plan(Plan): # noqa: N801 diff --git a/python/datafusion/user_defined.py b/python/datafusion/user_defined.py index dd634c7fb..d4e5302b5 100644 --- a/python/datafusion/user_defined.py +++ b/python/datafusion/user_defined.py @@ -22,15 +22,19 @@ import functools from abc import ABCMeta, abstractmethod from enum import Enum -from typing import TYPE_CHECKING, Any, Callable, Optional, TypeVar, overload +from typing import TYPE_CHECKING, Any, Protocol, TypeGuard, TypeVar, cast, overload import pyarrow as pa import datafusion._internal as df_internal +from datafusion import SessionContext from datafusion.expr import Expr if TYPE_CHECKING: + from _typeshed import CapsuleType as _PyCapsule + _R = TypeVar("_R", bound=pa.DataType) + from collections.abc import Callable, Sequence class Volatility(Enum): @@ -77,6 +81,38 @@ def __str__(self) -> str: return self.name.lower() +def data_type_or_field_to_field(value: pa.DataType | pa.Field, name: str) -> pa.Field: + """Helper function to return a Field from either a Field or DataType.""" + if isinstance(value, pa.Field): + return value + return pa.field(name, type=value) + + +def data_types_or_fields_to_field_list( + inputs: Sequence[pa.Field | pa.DataType] | pa.Field | pa.DataType, +) -> list[pa.Field]: + """Helper function to return a list of Fields.""" + if isinstance(inputs, pa.DataType): + return [pa.field("value", type=inputs)] + if isinstance(inputs, pa.Field): + return [inputs] + + return [ + data_type_or_field_to_field(v, f"value_{idx}") for (idx, v) in enumerate(inputs) + ] + + +class ScalarUDFExportable(Protocol): + """Type hint for object that has __datafusion_scalar_udf__ PyCapsule.""" + + def __datafusion_scalar_udf__(self) -> object: ... # noqa: D105 + + +def _is_pycapsule(value: object) -> TypeGuard[_PyCapsule]: + """Return ``True`` when ``value`` is a CPython ``PyCapsule``.""" + return value.__class__.__name__ == "PyCapsule" + + class ScalarUDF: """Class for performing scalar user-defined functions (UDF). @@ -88,18 +124,21 @@ def __init__( self, name: str, func: Callable[..., _R], - input_types: pa.DataType | list[pa.DataType], - return_type: _R, + input_fields: list[pa.Field], + return_field: _R, volatility: Volatility | str, ) -> None: """Instantiate a scalar user-defined function (UDF). See helper method :py:func:`udf` for argument details. """ - if isinstance(input_types, pa.DataType): - input_types = [input_types] + if hasattr(func, "__datafusion_scalar_udf__"): + self._udf = df_internal.ScalarUDF.from_pycapsule(func) + return + if isinstance(input_fields, pa.DataType): + input_fields = [input_fields] self._udf = df_internal.ScalarUDF( - name, func, input_types, return_type, str(volatility) + name, func, input_fields, return_field, str(volatility) ) def __repr__(self) -> str: @@ -118,22 +157,26 @@ def __call__(self, *args: Expr) -> Expr: @overload @staticmethod def udf( - input_types: list[pa.DataType], - return_type: _R, + input_fields: Sequence[pa.DataType | pa.Field] | pa.DataType | pa.Field, + return_field: pa.DataType | pa.Field, volatility: Volatility | str, - name: Optional[str] = None, + name: str | None = None, ) -> Callable[..., ScalarUDF]: ... @overload @staticmethod def udf( func: Callable[..., _R], - input_types: list[pa.DataType], - return_type: _R, + input_fields: Sequence[pa.DataType | pa.Field] | pa.DataType | pa.Field, + return_field: pa.DataType | pa.Field, volatility: Volatility | str, - name: Optional[str] = None, + name: str | None = None, ) -> ScalarUDF: ... + @overload + @staticmethod + def udf(func: ScalarUDFExportable) -> ScalarUDF: ... + @staticmethod def udf(*args: Any, **kwargs: Any): # noqa: D417 """Create a new User-Defined Function (UDF). @@ -141,17 +184,24 @@ def udf(*args: Any, **kwargs: Any): # noqa: D417 This class can be used both as either a function or a decorator. Usage: - - As a function: ``udf(func, input_types, return_type, volatility, name)``. - - As a decorator: ``@udf(input_types, return_type, volatility, name)``. + - As a function: ``udf(func, input_fields, return_field, volatility, name)``. + - As a decorator: ``@udf(input_fields, return_field, volatility, name)``. When used a decorator, do **not** pass ``func`` explicitly. + In lieu of passing a PyArrow Field, you can pass a DataType for simplicity. + When you do so, it will be assumed that the nullability of the inputs and + output are True and that they have no metadata. + Args: func (Callable, optional): Only needed when calling as a function. - Skip this argument when using ``udf`` as a decorator. - input_types (list[pa.DataType]): The data types of the arguments - to ``func``. This list must be of the same length as the number of - arguments. - return_type (_R): The data type of the return value from the function. + Skip this argument when using `udf` as a decorator. If you have a Rust + backed ScalarUDF within a PyCapsule, you can pass this parameter + and ignore the rest. They will be determined directly from the + underlying function. See the online documentation for more information. + input_fields (list[pa.Field | pa.DataType]): The data types or Fields + of the arguments to ``func``. This list must be of the same length + as the number of arguments. + return_field (_R): The field of the return value from the function. volatility (Volatility | str): See `Volatility` for allowed values. name (Optional[str]): A descriptive name for the function. @@ -171,14 +221,14 @@ def double_func(x): @udf([pa.int32()], pa.int32(), "volatile", "double_it") def double_udf(x): return x * 2 - """ + """ # noqa: W505 E501 def _function( func: Callable[..., _R], - input_types: list[pa.DataType], - return_type: _R, + input_fields: Sequence[pa.DataType | pa.Field] | pa.DataType | pa.Field, + return_field: pa.DataType | pa.Field, volatility: Volatility | str, - name: Optional[str] = None, + name: str | None = None, ) -> ScalarUDF: if not callable(func): msg = "`func` argument must be callable" @@ -188,46 +238,76 @@ def _function( name = func.__qualname__.lower() else: name = func.__class__.__name__.lower() + input_fields = data_types_or_fields_to_field_list(input_fields) + return_field = data_type_or_field_to_field(return_field, "value") return ScalarUDF( name=name, func=func, - input_types=input_types, - return_type=return_type, + input_fields=input_fields, + return_field=return_field, volatility=volatility, ) def _decorator( - input_types: list[pa.DataType], - return_type: _R, + input_fields: Sequence[pa.DataType | pa.Field] | pa.DataType | pa.Field, + return_field: _R, volatility: Volatility | str, - name: Optional[str] = None, + name: str | None = None, ) -> Callable: - def decorator(func: Callable): + def decorator(func: Callable) -> Callable: udf_caller = ScalarUDF.udf( - func, input_types, return_type, volatility, name + func, input_fields, return_field, volatility, name ) @functools.wraps(func) - def wrapper(*args: Any, **kwargs: Any): + def wrapper(*args: Any, **kwargs: Any) -> Callable: return udf_caller(*args, **kwargs) return wrapper return decorator + if hasattr(args[0], "__datafusion_scalar_udf__"): + return ScalarUDF.from_pycapsule(args[0]) + if args and callable(args[0]): # Case 1: Used as a function, require the first parameter to be callable return _function(*args, **kwargs) # Case 2: Used as a decorator with parameters return _decorator(*args, **kwargs) + @staticmethod + def from_pycapsule(func: ScalarUDFExportable) -> ScalarUDF: + """Create a Scalar UDF from ScalarUDF PyCapsule object. + + This function will instantiate a Scalar UDF that uses a DataFusion + ScalarUDF that is exported via the FFI bindings. + """ + name = str(func.__class__) + return ScalarUDF( + name=name, + func=func, + input_fields=None, + return_field=None, + volatility=None, + ) + class Accumulator(metaclass=ABCMeta): """Defines how an :py:class:`AggregateUDF` accumulates values.""" @abstractmethod def state(self) -> list[pa.Scalar]: - """Return the current state.""" + """Return the current state. + + While this function template expects PyArrow Scalar values return type, + you can return any value that can be converted into a Scalar. This + includes basic Python data types such as integers and strings. In + addition to primitive types, we currently support PyArrow, nanoarrow, + and arro3 objects in addition to primitive data types. Other objects + that support the Arrow FFI standard will be given a "best attempt" at + conversion to scalar objects. + """ @abstractmethod def update(self, *values: pa.Array) -> None: @@ -239,7 +319,22 @@ def merge(self, states: list[pa.Array]) -> None: @abstractmethod def evaluate(self) -> pa.Scalar: - """Return the resultant value.""" + """Return the resultant value. + + While this function template expects a PyArrow Scalar value return type, + you can return any value that can be converted into a Scalar. This + includes basic Python data types such as integers and strings. In + addition to primitive types, we currently support PyArrow, nanoarrow, + and arro3 objects in addition to primitive data types. Other objects + that support the Arrow FFI standard will be given a "best attempt" at + conversion to scalar objects. + """ + + +class AggregateUDFExportable(Protocol): + """Type hint for object that has __datafusion_aggregate_udf__ PyCapsule.""" + + def __datafusion_aggregate_udf__(self) -> object: ... # noqa: D105 class AggregateUDF: @@ -249,6 +344,7 @@ class AggregateUDF: also :py:class:`ScalarUDF` for operating on a row by row basis. """ + @overload def __init__( self, name: str, @@ -257,12 +353,48 @@ def __init__( return_type: pa.DataType, state_type: list[pa.DataType], volatility: Volatility | str, + ) -> None: ... + + @overload + def __init__( + self, + name: str, + accumulator: AggregateUDFExportable, + input_types: None = ..., + return_type: None = ..., + state_type: None = ..., + volatility: None = ..., + ) -> None: ... + + def __init__( + self, + name: str, + accumulator: Callable[[], Accumulator] | AggregateUDFExportable, + input_types: list[pa.DataType] | None, + return_type: pa.DataType | None, + state_type: list[pa.DataType] | None, + volatility: Volatility | str | None, ) -> None: """Instantiate a user-defined aggregate function (UDAF). See :py:func:`udaf` for a convenience function and argument descriptions. """ + if hasattr(accumulator, "__datafusion_aggregate_udf__"): + self._udaf = df_internal.AggregateUDF.from_pycapsule(accumulator) + return + if ( + input_types is None + or return_type is None + or state_type is None + or volatility is None + ): + msg = ( + "`input_types`, `return_type`, `state_type`, and `volatility` " + "must be provided when `accumulator` is callable." + ) + raise TypeError(msg) + self._udaf = df_internal.AggregateUDF( name, accumulator, @@ -292,7 +424,7 @@ def udaf( return_type: pa.DataType, state_type: list[pa.DataType], volatility: Volatility | str, - name: Optional[str] = None, + name: str | None = None, ) -> Callable[..., AggregateUDF]: ... @overload @@ -303,11 +435,19 @@ def udaf( return_type: pa.DataType, state_type: list[pa.DataType], volatility: Volatility | str, - name: Optional[str] = None, + name: str | None = None, ) -> AggregateUDF: ... + @overload + @staticmethod + def udaf(accum: AggregateUDFExportable) -> AggregateUDF: ... + + @overload + @staticmethod + def udaf(accum: _PyCapsule) -> AggregateUDF: ... + @staticmethod - def udaf(*args: Any, **kwargs: Any): # noqa: D417 + def udaf(*args: Any, **kwargs: Any): # noqa: D417, C901 """Create a new User-Defined Aggregate Function (UDAF). This class allows you to define an aggregate function that can be used in @@ -364,6 +504,10 @@ def udf4() -> Summarize: Args: accum: The accumulator python function. Only needed when calling as a function. Skip this argument when using ``udaf`` as a decorator. + If you have a Rust backed AggregateUDF within a PyCapsule, you can + pass this parameter and ignore the rest. They will be determined + directly from the underlying function. See the online documentation + for more information. input_types: The data types of the arguments to ``accum``. return_type: The data type of the return value. state_type: The data types of the intermediate accumulation. @@ -381,7 +525,7 @@ def _function( return_type: pa.DataType, state_type: list[pa.DataType], volatility: Volatility | str, - name: Optional[str] = None, + name: str | None = None, ) -> AggregateUDF: if not callable(accum): msg = "`func` must be callable." @@ -407,7 +551,7 @@ def _decorator( return_type: pa.DataType, state_type: list[pa.DataType], volatility: Volatility | str, - name: Optional[str] = None, + name: str | None = None, ) -> Callable[..., Callable[..., Expr]]: def decorator(accum: Callable[[], Accumulator]) -> Callable[..., Expr]: udaf_caller = AggregateUDF.udaf( @@ -422,12 +566,38 @@ def wrapper(*args: Any, **kwargs: Any) -> Expr: return decorator + if hasattr(args[0], "__datafusion_aggregate_udf__") or _is_pycapsule(args[0]): + return AggregateUDF.from_pycapsule(args[0]) + if args and callable(args[0]): # Case 1: Used as a function, require the first parameter to be callable return _function(*args, **kwargs) # Case 2: Used as a decorator with parameters return _decorator(*args, **kwargs) + @staticmethod + def from_pycapsule(func: AggregateUDFExportable | _PyCapsule) -> AggregateUDF: + """Create an Aggregate UDF from AggregateUDF PyCapsule object. + + This function will instantiate a Aggregate UDF that uses a DataFusion + AggregateUDF that is exported via the FFI bindings. + """ + if _is_pycapsule(func): + aggregate = cast(AggregateUDF, object.__new__(AggregateUDF)) + aggregate._udaf = df_internal.AggregateUDF.from_pycapsule(func) + return aggregate + + capsule = cast(AggregateUDFExportable, func) + name = str(capsule.__class__) + return AggregateUDF( + name=name, + accumulator=capsule, + input_types=None, + return_type=None, + state_type=None, + volatility=None, + ) + class WindowEvaluator: """Evaluator class for user-defined window functions (UDWF). @@ -460,7 +630,7 @@ def memoize(self) -> None: """ def get_range(self, idx: int, num_rows: int) -> tuple[int, int]: # noqa: ARG002 - """Return the range for the window fuction. + """Return the range for the window function. If `uses_window_frame` flag is `false`. This method is used to calculate required range for the window function during @@ -588,6 +758,12 @@ def include_rank(self) -> bool: return False +class WindowUDFExportable(Protocol): + """Type hint for object that has __datafusion_window_udf__ PyCapsule.""" + + def __datafusion_window_udf__(self) -> object: ... # noqa: D105 + + class WindowUDF: """Class for performing window user-defined functions (UDF). @@ -608,6 +784,9 @@ def __init__( See :py:func:`udwf` for a convenience function and argument descriptions. """ + if hasattr(func, "__datafusion_window_udf__"): + self._udwf = df_internal.WindowUDF.from_pycapsule(func) + return self._udwf = df_internal.WindowUDF( name, func, input_types, return_type, str(volatility) ) @@ -631,7 +810,7 @@ def udwf( input_types: pa.DataType | list[pa.DataType], return_type: pa.DataType, volatility: Volatility | str, - name: Optional[str] = None, + name: str | None = None, ) -> Callable[..., WindowUDF]: ... @overload @@ -641,7 +820,7 @@ def udwf( input_types: pa.DataType | list[pa.DataType], return_type: pa.DataType, volatility: Volatility | str, - name: Optional[str] = None, + name: str | None = None, ) -> WindowUDF: ... @staticmethod @@ -683,7 +862,10 @@ def biased_numbers() -> BiasedNumbers: Args: func: Only needed when calling as a function. Skip this argument when - using ``udwf`` as a decorator. + using ``udwf`` as a decorator. If you have a Rust backed WindowUDF + within a PyCapsule, you can pass this parameter and ignore the rest. + They will be determined directly from the underlying function. See + the online documentation for more information. input_types: The data types of the arguments. return_type: The data type of the return value. volatility: See :py:class:`Volatility` for allowed values. @@ -692,6 +874,9 @@ def biased_numbers() -> BiasedNumbers: Returns: A user-defined window function that can be used in window function calls. """ + if hasattr(args[0], "__datafusion_window_udf__"): + return WindowUDF.from_pycapsule(args[0]) + if args and callable(args[0]): # Case 1: Used as a function, require the first parameter to be callable return WindowUDF._create_window_udf(*args, **kwargs) @@ -704,7 +889,7 @@ def _create_window_udf( input_types: pa.DataType | list[pa.DataType], return_type: pa.DataType, volatility: Volatility | str, - name: Optional[str] = None, + name: str | None = None, ) -> WindowUDF: """Create a WindowUDF instance from function arguments.""" if not callable(func): @@ -742,7 +927,7 @@ def _create_window_udf_decorator( input_types: pa.DataType | list[pa.DataType], return_type: pa.DataType, volatility: Volatility | str, - name: Optional[str] = None, + name: str | None = None, ) -> Callable[[Callable[[], WindowEvaluator]], Callable[..., Expr]]: """Create a decorator for a WindowUDF.""" @@ -759,6 +944,22 @@ def wrapper(*args: Any, **kwargs: Any) -> Expr: return decorator + @staticmethod + def from_pycapsule(func: WindowUDFExportable) -> WindowUDF: + """Create a Window UDF from WindowUDF PyCapsule object. + + This function will instantiate a Window UDF that uses a DataFusion + WindowUDF that is exported via the FFI bindings. + """ + name = str(func.__class__) + return WindowUDF( + name=name, + func=func, + input_types=None, + return_type=None, + volatility=None, + ) + class TableFunction: """Class for performing user-defined table functions (UDTF). @@ -768,16 +969,14 @@ class TableFunction: """ def __init__( - self, - name: str, - func: Callable[[], any], + self, name: str, func: Callable[[], any], ctx: SessionContext | None = None ) -> None: """Instantiate a user-defined table function (UDTF). See :py:func:`udtf` for a convenience function and argument descriptions. """ - self._udtf = df_internal.TableFunction(name, func) + self._udtf = df_internal.TableFunction(name, func, ctx) def __call__(self, *args: Expr) -> Any: """Execute the UDTF and return a table provider.""" @@ -823,7 +1022,7 @@ def _create_table_udf( @staticmethod def _create_table_udf_decorator( - name: Optional[str] = None, + name: str | None = None, ) -> Callable[[Callable[[], WindowEvaluator]], Callable[..., Expr]]: """Create a decorator for a WindowUDF.""" diff --git a/python/tests/conftest.py b/python/tests/conftest.py index 9548fbfe4..26ed7281d 100644 --- a/python/tests/conftest.py +++ b/python/tests/conftest.py @@ -17,7 +17,7 @@ import pyarrow as pa import pytest -from datafusion import SessionContext +from datafusion import DataFrame, SessionContext from pyarrow.csv import write_csv @@ -49,3 +49,12 @@ def database(ctx, tmp_path): delimiter=",", schema_infer_max_records=10, ) + + +@pytest.fixture +def fail_collect(monkeypatch): + def _fail_collect(self, *args, **kwargs): # pragma: no cover - failure path + msg = "collect should not be called" + raise AssertionError(msg) + + monkeypatch.setattr(DataFrame, "collect", _fail_collect) diff --git a/python/tests/test_aggregation.py b/python/tests/test_aggregation.py index 49dfb38cf..f595127fa 100644 --- a/python/tests/test_aggregation.py +++ b/python/tests/test_aggregation.py @@ -88,7 +88,7 @@ def df_aggregate_100(): f.covar_samp(column("b"), column("c")), lambda a, b, c, d: np.array(np.cov(b, c, ddof=1)[0][1]), ), - # f.grouping(col_a), # No physical plan implemented yet + # f.grouping(col_a), # noqa: ERA001 No physical plan implemented yet (f.max(column("a")), lambda a, b, c, d: np.array(np.max(a))), (f.mean(column("b")), lambda a, b, c, d: np.array(np.mean(b))), (f.median(column("b")), lambda a, b, c, d: np.array(np.median(b))), @@ -130,11 +130,27 @@ def test_aggregation_stats(df, agg_expr, calc_expected): (f.median(column("b"), filter=column("a") != 2), pa.array([5]), False), (f.approx_median(column("b"), filter=column("a") != 2), pa.array([5]), False), (f.approx_percentile_cont(column("b"), 0.5), pa.array([4]), False), + ( + f.approx_percentile_cont( + column("b").sort(ascending=True, nulls_first=False), + 0.5, + num_centroids=2, + ), + pa.array([4]), + False, + ), ( f.approx_percentile_cont_with_weight(column("b"), lit(0.6), 0.5), pa.array([6], type=pa.float64()), False, ), + ( + f.approx_percentile_cont_with_weight( + column("b").sort(ascending=False, nulls_first=False), lit(0.6), 0.5 + ), + pa.array([6], type=pa.float64()), + False, + ), ( f.approx_percentile_cont_with_weight( column("b"), lit(0.6), 0.5, filter=column("a") != lit(3) @@ -154,6 +170,11 @@ def test_aggregation_stats(df, agg_expr, calc_expected): pa.array([[6, 4, 4]]), False, ), + ( + f.array_agg(column("b"), order_by=column("c")), + pa.array([[6, 4, 4]]), + False, + ), (f.avg(column("b"), filter=column("a") != lit(1)), pa.array([5.0]), False), (f.sum(column("b"), filter=column("a") != lit(1)), pa.array([10]), False), (f.count(column("b"), distinct=True), pa.array([2]), False), @@ -329,6 +350,15 @@ def test_bit_and_bool_fns(df, name, expr, result): ), [None, None], ), + ( + "first_value_no_list_order_by", + f.first_value( + column("b"), + order_by=column("b"), + null_treatment=NullTreatment.RESPECT_NULLS, + ), + [None, None], + ), ( "first_value_ignore_null", f.first_value( @@ -343,6 +373,11 @@ def test_bit_and_bool_fns(df, name, expr, result): f.last_value(column("a"), order_by=[column("a").sort(ascending=False)]), [0, 4], ), + ( + "last_value_no_list_ordered", + f.last_value(column("a"), order_by=column("a")), + [3, 6], + ), ( "last_value_with_null", f.last_value( @@ -366,6 +401,11 @@ def test_bit_and_bool_fns(df, name, expr, result): f.nth_value(column("a"), 2, order_by=[column("a").sort(ascending=False)]), [2, 5], ), + ( + "nth_value_no_list_ordered", + f.nth_value(column("a"), 2, order_by=column("a").sort(ascending=False)), + [2, 5], + ), ( "nth_value_with_null", f.nth_value( @@ -414,6 +454,11 @@ def test_first_last_value(df_partitioned, name, expr, result) -> None: f.string_agg(column("a"), ",", order_by=[column("b")]), "one,three,two,two", ), + ( + "string_agg", + f.string_agg(column("a"), ",", order_by=column("b")), + "one,three,two,two", + ), ], ) def test_string_agg(name, expr, result) -> None: diff --git a/python/tests/test_catalog.py b/python/tests/test_catalog.py index 23b328458..71c08da26 100644 --- a/python/tests/test_catalog.py +++ b/python/tests/test_catalog.py @@ -14,9 +14,18 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +from __future__ import annotations +from typing import TYPE_CHECKING + +import datafusion as dfn import pyarrow as pa +import pyarrow.dataset as ds import pytest +from datafusion import Catalog, SessionContext, Table, udtf + +if TYPE_CHECKING: + from datafusion.catalog import CatalogProvider, CatalogProviderExportable # Note we take in `database` as a variable even though we don't use @@ -27,9 +36,9 @@ def test_basic(ctx, database): ctx.catalog("non-existent") default = ctx.catalog() - assert default.names() == ["public"] + assert default.names() == {"public"} - for db in [default.database("public"), default.database()]: + for db in [default.schema("public"), default.schema()]: assert db.names() == {"csv1", "csv", "csv2"} table = db.table("csv") @@ -41,3 +50,267 @@ def test_basic(ctx, database): pa.field("float", pa.float64(), nullable=True), ] ) + + +def create_dataset() -> Table: + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + dataset = ds.dataset([batch]) + return Table(dataset) + + +class CustomSchemaProvider(dfn.catalog.SchemaProvider): + def __init__(self): + self.tables = {"table1": create_dataset()} + + def table_names(self) -> set[str]: + return set(self.tables.keys()) + + def register_table(self, name: str, table: Table): + self.tables[name] = table + + def deregister_table(self, name, cascade: bool = True): + del self.tables[name] + + def table(self, name: str) -> Table | None: + return self.tables[name] + + def table_exist(self, name: str) -> bool: + return name in self.tables + + +class CustomErrorSchemaProvider(CustomSchemaProvider): + def table(self, name: str) -> Table | None: + message = f"{name} is not an acceptable name" + raise ValueError(message) + + +class CustomCatalogProvider(dfn.catalog.CatalogProvider): + def __init__(self): + self.schemas = {"my_schema": CustomSchemaProvider()} + + def schema_names(self) -> set[str]: + return set(self.schemas.keys()) + + def schema(self, name: str): + return self.schemas[name] + + def register_schema(self, name: str, schema: dfn.catalog.Schema): + self.schemas[name] = schema + + def deregister_schema(self, name, cascade: bool): + del self.schemas[name] + + +class CustomCatalogProviderList(dfn.catalog.CatalogProviderList): + def __init__(self): + self.catalogs = {"my_catalog": CustomCatalogProvider()} + + def catalog_names(self) -> set[str]: + return set(self.catalogs.keys()) + + def catalog(self, name: str) -> Catalog | None: + return self.catalogs[name] + + def register_catalog( + self, name: str, catalog: CatalogProviderExportable | CatalogProvider | Catalog + ) -> None: + self.catalogs[name] = catalog + + +def test_python_catalog_provider_list(ctx: SessionContext): + ctx.register_catalog_provider_list(CustomCatalogProviderList()) + + # Ensure `datafusion` catalog does not exist since + # we replaced the catalog list + assert ctx.catalog_names() == {"my_catalog"} + + # Ensure registering works + ctx.register_catalog_provider("second_catalog", Catalog.memory_catalog()) + assert ctx.catalog_names() == {"my_catalog", "second_catalog"} + + +def test_python_catalog_provider(ctx: SessionContext): + ctx.register_catalog_provider("my_catalog", CustomCatalogProvider()) + + # Check the default catalog provider + assert ctx.catalog("datafusion").names() == {"public"} + + my_catalog = ctx.catalog("my_catalog") + assert my_catalog.names() == {"my_schema"} + + my_catalog.register_schema("second_schema", CustomSchemaProvider()) + assert my_catalog.schema_names() == {"my_schema", "second_schema"} + + my_catalog.deregister_schema("my_schema") + assert my_catalog.schema_names() == {"second_schema"} + + +def test_in_memory_providers(ctx: SessionContext): + catalog = dfn.catalog.Catalog.memory_catalog() + ctx.register_catalog_provider("in_mem_catalog", catalog) + + assert ctx.catalog_names() == {"datafusion", "in_mem_catalog"} + + schema = dfn.catalog.Schema.memory_schema() + catalog.register_schema("in_mem_schema", schema) + + schema.register_table("my_table", create_dataset()) + + batches = ctx.sql("select * from in_mem_catalog.in_mem_schema.my_table").collect() + + assert len(batches) == 1 + assert batches[0].column(0) == pa.array([1, 2, 3]) + assert batches[0].column(1) == pa.array([4, 5, 6]) + + +def test_python_schema_provider(ctx: SessionContext): + catalog = ctx.catalog() + + catalog.deregister_schema("public") + + catalog.register_schema("test_schema1", CustomSchemaProvider()) + assert catalog.names() == {"test_schema1"} + + catalog.register_schema("test_schema2", CustomSchemaProvider()) + catalog.deregister_schema("test_schema1") + assert catalog.names() == {"test_schema2"} + + +def test_python_table_provider(ctx: SessionContext): + catalog = ctx.catalog() + + catalog.register_schema("custom_schema", CustomSchemaProvider()) + schema = catalog.schema("custom_schema") + + assert schema.table_names() == {"table1"} + + schema.deregister_table("table1") + schema.register_table("table2", create_dataset()) + assert schema.table_names() == {"table2"} + + # Use the default schema instead of our custom schema + + schema = catalog.schema() + + schema.register_table("table3", create_dataset()) + assert schema.table_names() == {"table3"} + + schema.deregister_table("table3") + schema.register_table("table4", create_dataset()) + assert schema.table_names() == {"table4"} + + +def test_schema_register_table_with_pyarrow_dataset(ctx: SessionContext): + schema = ctx.catalog().schema() + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + dataset = ds.dataset([batch]) + table_name = "pa_dataset" + + try: + schema.register_table(table_name, dataset) + assert table_name in schema.table_names() + + result = ctx.sql(f"SELECT a, b FROM {table_name}").collect() + + assert len(result) == 1 + assert result[0].column(0) == pa.array([1, 2, 3]) + assert result[0].column(1) == pa.array([4, 5, 6]) + finally: + schema.deregister_table(table_name) + + +def test_exception_not_mangled(ctx: SessionContext): + """Test registering all python providers and running a query against them.""" + + catalog_name = "custom_catalog" + schema_name = "custom_schema" + + ctx.register_catalog_provider(catalog_name, CustomCatalogProvider()) + + catalog = ctx.catalog(catalog_name) + + # Clean out previous schemas if they exist so we can start clean + for schema_name in catalog.schema_names(): + catalog.deregister_schema(schema_name, cascade=False) + + catalog.register_schema(schema_name, CustomErrorSchemaProvider()) + + schema = catalog.schema(schema_name) + + for table_name in schema.table_names(): + schema.deregister_table(table_name) + + schema.register_table("test_table", create_dataset()) + + with pytest.raises(ValueError, match="^test_table is not an acceptable name$"): + ctx.sql(f"select * from {catalog_name}.{schema_name}.test_table") + + +def test_in_end_to_end_python_providers(ctx: SessionContext): + """Test registering all python providers and running a query against them.""" + + all_catalog_names = [ + "datafusion", + "custom_catalog", + "in_mem_catalog", + ] + + all_schema_names = [ + "custom_schema", + "in_mem_schema", + ] + + ctx.register_catalog_provider(all_catalog_names[1], CustomCatalogProvider()) + ctx.register_catalog_provider( + all_catalog_names[2], dfn.catalog.Catalog.memory_catalog() + ) + + for catalog_name in all_catalog_names: + catalog = ctx.catalog(catalog_name) + + # Clean out previous schemas if they exist so we can start clean + for schema_name in catalog.schema_names(): + catalog.deregister_schema(schema_name, cascade=False) + + catalog.register_schema(all_schema_names[0], CustomSchemaProvider()) + catalog.register_schema(all_schema_names[1], dfn.catalog.Schema.memory_schema()) + + for schema_name in all_schema_names: + schema = catalog.schema(schema_name) + + for table_name in schema.table_names(): + schema.deregister_table(table_name) + + schema.register_table("test_table", create_dataset()) + + for catalog_name in all_catalog_names: + for schema_name in all_schema_names: + table_full_name = f"{catalog_name}.{schema_name}.test_table" + + batches = ctx.sql(f"select * from {table_full_name}").collect() + + assert len(batches) == 1 + assert batches[0].column(0) == pa.array([1, 2, 3]) + assert batches[0].column(1) == pa.array([4, 5, 6]) + + +def test_register_python_function_as_udtf(ctx: SessionContext): + basic_table = Table(ctx.sql("SELECT 3 AS value")) + + @udtf("my_table_function") + def my_table_function_udtf() -> Table: + return basic_table + + ctx.register_udtf(my_table_function_udtf) + + result = ctx.sql("SELECT * FROM my_table_function()").collect() + assert len(result) == 1 + assert len(result[0]) == 1 + assert len(result[0][0]) == 1 + assert result[0][0][0].as_py() == 3 diff --git a/python/tests/test_concurrency.py b/python/tests/test_concurrency.py new file mode 100644 index 000000000..f790f9473 --- /dev/null +++ b/python/tests/test_concurrency.py @@ -0,0 +1,126 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations + +from concurrent.futures import ThreadPoolExecutor + +import pyarrow as pa +from datafusion import Config, SessionContext, col, lit +from datafusion import functions as f +from datafusion.common import SqlSchema + + +def _run_in_threads(fn, count: int = 8) -> None: + with ThreadPoolExecutor(max_workers=count) as executor: + futures = [executor.submit(fn, i) for i in range(count)] + for future in futures: + # Propagate any exception raised in the worker thread. + future.result() + + +def test_concurrent_access_to_shared_structures() -> None: + """Exercise SqlSchema, Config, and DataFrame concurrently.""" + + schema = SqlSchema("concurrency") + config = Config() + ctx = SessionContext() + + batch = pa.record_batch([pa.array([1, 2, 3], type=pa.int32())], names=["value"]) + df = ctx.create_dataframe([[batch]]) + + config_key = "datafusion.execution.batch_size" + expected_rows = batch.num_rows + + def worker(index: int) -> None: + schema.name = f"concurrency-{index}" + assert schema.name.startswith("concurrency-") + # Exercise getters that use internal locks. + assert isinstance(schema.tables, list) + assert isinstance(schema.views, list) + assert isinstance(schema.functions, list) + + config.set(config_key, str(1024 + index)) + assert config.get(config_key) is not None + # Access the full config map to stress lock usage. + assert config_key in config.get_all() + + batches = df.collect() + assert sum(batch.num_rows for batch in batches) == expected_rows + + _run_in_threads(worker, count=12) + + +def test_config_set_during_get_all() -> None: + """Ensure config writes proceed while another thread reads all entries.""" + + config = Config() + key = "datafusion.execution.batch_size" + + def reader() -> None: + for _ in range(200): + # get_all should not hold the lock while converting to Python objects + config.get_all() + + def writer() -> None: + for index in range(200): + config.set(key, str(1024 + index)) + + with ThreadPoolExecutor(max_workers=2) as executor: + reader_future = executor.submit(reader) + writer_future = executor.submit(writer) + reader_future.result(timeout=10) + writer_future.result(timeout=10) + + assert config.get(key) is not None + + +def test_case_builder_reuse_from_multiple_threads() -> None: + """Ensure the case builder can be safely reused across threads.""" + + ctx = SessionContext() + values = pa.array([0, 1, 2, 3, 4], type=pa.int32()) + df = ctx.create_dataframe([[pa.record_batch([values], names=["value"])]]) + + base_builder = f.case(col("value")) + + def add_case(i: int) -> None: + nonlocal base_builder + base_builder = base_builder.when(lit(i), lit(f"value-{i}")) + + _run_in_threads(add_case, count=8) + + with ThreadPoolExecutor(max_workers=2) as executor: + otherwise_future = executor.submit(base_builder.otherwise, lit("default")) + case_expr = otherwise_future.result() + + result = df.select(case_expr.alias("label")).collect() + assert sum(batch.num_rows for batch in result) == len(values) + + predicate_builder = f.when(col("value") == lit(0), lit("zero")) + + def add_predicate(i: int) -> None: + predicate_builder.when(col("value") == lit(i + 1), lit(f"value-{i + 1}")) + + _run_in_threads(add_predicate, count=4) + + with ThreadPoolExecutor(max_workers=2) as executor: + end_future = executor.submit(predicate_builder.end) + predicate_expr = end_future.result() + + result = df.select(predicate_expr.alias("label")).collect() + assert sum(batch.num_rows for batch in result) == len(values) diff --git a/python/tests/test_context.py b/python/tests/test_context.py index 4a15ac9cf..5853f9feb 100644 --- a/python/tests/test_context.py +++ b/python/tests/test_context.py @@ -22,11 +22,13 @@ import pyarrow.dataset as ds import pytest from datafusion import ( + CsvReadOptions, DataFrame, RuntimeEnvBuilder, SessionConfig, SessionContext, SQLOptions, + Table, column, literal, ) @@ -57,7 +59,7 @@ def test_runtime_configs(tmp_path, path_to_str): ctx = SessionContext(config, runtime) assert ctx is not None - db = ctx.catalog("foo").database("bar") + db = ctx.catalog("foo").schema("bar") assert db is not None @@ -70,7 +72,7 @@ def test_temporary_files(tmp_path, path_to_str): ctx = SessionContext(config, runtime) assert ctx is not None - db = ctx.catalog("foo").database("bar") + db = ctx.catalog("foo").schema("bar") assert db is not None @@ -91,7 +93,7 @@ def test_create_context_with_all_valid_args(): ctx = SessionContext(config, runtime) # verify that at least some of the arguments worked - ctx.catalog("foo").database("bar") + ctx.catalog("foo").schema("bar") with pytest.raises(KeyError): ctx.catalog("datafusion") @@ -105,7 +107,7 @@ def test_register_record_batches(ctx): ctx.register_record_batches("t", [[batch]]) - assert ctx.catalog().database().names() == {"t"} + assert ctx.catalog().schema().names() == {"t"} result = ctx.sql("SELECT a+b, a-b FROM t").collect() @@ -121,7 +123,7 @@ def test_create_dataframe_registers_unique_table_name(ctx): ) df = ctx.create_dataframe([[batch]]) - tables = list(ctx.catalog().database().names()) + tables = list(ctx.catalog().schema().names()) assert df assert len(tables) == 1 @@ -141,7 +143,7 @@ def test_create_dataframe_registers_with_defined_table_name(ctx): ) df = ctx.create_dataframe([[batch]], name="tbl") - tables = list(ctx.catalog().database().names()) + tables = list(ctx.catalog().schema().names()) assert df assert len(tables) == 1 @@ -155,7 +157,7 @@ def test_from_arrow_table(ctx): # convert to DataFrame df = ctx.from_arrow(table) - tables = list(ctx.catalog().database().names()) + tables = list(ctx.catalog().schema().names()) assert df assert len(tables) == 1 @@ -200,7 +202,7 @@ def test_from_arrow_table_with_name(ctx): # convert to DataFrame with optional name df = ctx.from_arrow(table, name="tbl") - tables = list(ctx.catalog().database().names()) + tables = list(ctx.catalog().schema().names()) assert df assert tables[0] == "tbl" @@ -213,7 +215,7 @@ def test_from_arrow_table_empty(ctx): # convert to DataFrame df = ctx.from_arrow(table) - tables = list(ctx.catalog().database().names()) + tables = list(ctx.catalog().schema().names()) assert df assert len(tables) == 1 @@ -228,7 +230,7 @@ def test_from_arrow_table_empty_no_schema(ctx): # convert to DataFrame df = ctx.from_arrow(table) - tables = list(ctx.catalog().database().names()) + tables = list(ctx.catalog().schema().names()) assert df assert len(tables) == 1 @@ -246,7 +248,7 @@ def test_from_pylist(ctx): ] df = ctx.from_pylist(data) - tables = list(ctx.catalog().database().names()) + tables = list(ctx.catalog().schema().names()) assert df assert len(tables) == 1 @@ -260,7 +262,7 @@ def test_from_pydict(ctx): data = {"a": [1, 2, 3], "b": [4, 5, 6]} df = ctx.from_pydict(data) - tables = list(ctx.catalog().database().names()) + tables = list(ctx.catalog().schema().names()) assert df assert len(tables) == 1 @@ -276,7 +278,7 @@ def test_from_pandas(ctx): pandas_df = pd.DataFrame(data) df = ctx.from_pandas(pandas_df) - tables = list(ctx.catalog().database().names()) + tables = list(ctx.catalog().schema().names()) assert df assert len(tables) == 1 @@ -292,7 +294,7 @@ def test_from_polars(ctx): polars_df = pd.DataFrame(data) df = ctx.from_polars(polars_df) - tables = list(ctx.catalog().database().names()) + tables = list(ctx.catalog().schema().names()) assert df assert len(tables) == 1 @@ -303,7 +305,7 @@ def test_from_polars(ctx): def test_register_table(ctx, database): default = ctx.catalog() - public = default.database("public") + public = default.schema("public") assert public.names() == {"csv", "csv1", "csv2"} table = public.table("csv") @@ -311,9 +313,9 @@ def test_register_table(ctx, database): assert public.names() == {"csv", "csv1", "csv2", "csv3"} -def test_read_table(ctx, database): +def test_read_table_from_catalog(ctx, database): default = ctx.catalog() - public = default.database("public") + public = default.schema("public") assert public.names() == {"csv", "csv1", "csv2"} table = public.table("csv") @@ -321,15 +323,74 @@ def test_read_table(ctx, database): table_df.show() +def test_read_table_from_df(ctx): + df = ctx.from_pydict({"a": [1, 2]}) + result = ctx.read_table(df).collect() + assert [b.to_pydict() for b in result] == [{"a": [1, 2]}] + + +def test_read_table_from_dataset(ctx): + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + dataset = ds.dataset([batch]) + + result = ctx.read_table(dataset).collect() + + assert result[0].column(0) == pa.array([1, 2, 3]) + assert result[0].column(1) == pa.array([4, 5, 6]) + + def test_deregister_table(ctx, database): default = ctx.catalog() - public = default.database("public") + public = default.schema("public") assert public.names() == {"csv", "csv1", "csv2"} ctx.deregister_table("csv") assert public.names() == {"csv1", "csv2"} +def test_register_table_from_dataframe(ctx): + df = ctx.from_pydict({"a": [1, 2]}) + ctx.register_table("df_tbl", df) + result = ctx.sql("SELECT * FROM df_tbl").collect() + assert [b.to_pydict() for b in result] == [{"a": [1, 2]}] + + +@pytest.mark.parametrize("temporary", [True, False]) +def test_register_table_from_dataframe_into_view(ctx, temporary): + df = ctx.from_pydict({"a": [1, 2]}) + table = df.into_view(temporary=temporary) + assert isinstance(table, Table) + if temporary: + assert table.kind == "temporary" + else: + assert table.kind == "view" + + ctx.register_table("view_tbl", table) + result = ctx.sql("SELECT * FROM view_tbl").collect() + assert [b.to_pydict() for b in result] == [{"a": [1, 2]}] + + +def test_table_from_dataframe(ctx): + df = ctx.from_pydict({"a": [1, 2]}) + table = Table(df) + assert isinstance(table, Table) + ctx.register_table("from_dataframe_tbl", table) + result = ctx.sql("SELECT * FROM from_dataframe_tbl").collect() + assert [b.to_pydict() for b in result] == [{"a": [1, 2]}] + + +def test_table_from_dataframe_internal(ctx): + df = ctx.from_pydict({"a": [1, 2]}) + table = Table(df.df) + assert isinstance(table, Table) + ctx.register_table("from_internal_dataframe_tbl", table) + result = ctx.sql("SELECT * FROM from_internal_dataframe_tbl").collect() + assert [b.to_pydict() for b in result] == [{"a": [1, 2]}] + + def test_register_dataset(ctx): # create a RecordBatch and register it as a pyarrow.dataset.Dataset batch = pa.RecordBatch.from_arrays( @@ -339,7 +400,7 @@ def test_register_dataset(ctx): dataset = ds.dataset([batch]) ctx.register_dataset("t", dataset) - assert ctx.catalog().database().names() == {"t"} + assert ctx.catalog().schema().names() == {"t"} result = ctx.sql("SELECT a+b, a-b FROM t").collect() @@ -356,7 +417,7 @@ def test_dataset_filter(ctx, capfd): dataset = ds.dataset([batch]) ctx.register_dataset("t", dataset) - assert ctx.catalog().database().names() == {"t"} + assert ctx.catalog().schema().names() == {"t"} df = ctx.sql("SELECT a+b, a-b FROM t WHERE a BETWEEN 2 and 3 AND b > 5") # Make sure the filter was pushed down in Physical Plan @@ -455,7 +516,7 @@ def test_dataset_filter_nested_data(ctx): dataset = ds.dataset([batch]) ctx.register_dataset("t", dataset) - assert ctx.catalog().database().names() == {"t"} + assert ctx.catalog().schema().names() == {"t"} df = ctx.table("t") @@ -566,6 +627,8 @@ def test_read_csv_list(ctx): def test_read_csv_compressed(ctx, tmp_path): test_data_path = pathlib.Path("testing/data/csv/aggregate_test_100.csv") + expected = ctx.read_csv(test_data_path).collect() + # File compression type gzip_path = tmp_path / "aggregate_test_100.csv.gz" @@ -576,7 +639,13 @@ def test_read_csv_compressed(ctx, tmp_path): gzipped_file.writelines(csv_file) csv_df = ctx.read_csv(gzip_path, file_extension=".gz", file_compression_type="gz") - csv_df.select(column("c1")).show() + assert csv_df.collect() == expected + + csv_df = ctx.read_csv( + gzip_path, + options=CsvReadOptions(file_extension=".gz", file_compression_type="gz"), + ) + assert csv_df.collect() == expected def test_read_parquet(ctx): @@ -650,3 +719,154 @@ def test_create_dataframe_with_global_ctx(batch): result = df.collect()[0].column(0) assert result == pa.array([4, 5, 6]) + + +def test_csv_read_options_builder_pattern(): + """Test CsvReadOptions builder pattern.""" + from datafusion import CsvReadOptions + + options = ( + CsvReadOptions() + .with_has_header(False) # noqa: FBT003 + .with_delimiter("|") + .with_quote("'") + .with_schema_infer_max_records(2000) + .with_truncated_rows(True) # noqa: FBT003 + .with_newlines_in_values(True) # noqa: FBT003 + .with_file_extension(".tsv") + ) + assert options.has_header is False + assert options.delimiter == "|" + assert options.quote == "'" + assert options.schema_infer_max_records == 2000 + assert options.truncated_rows is True + assert options.newlines_in_values is True + assert options.file_extension == ".tsv" + + +def read_csv_with_options_inner( + tmp_path: pathlib.Path, + csv_content: str, + options: CsvReadOptions, + expected: pa.RecordBatch, + as_read: bool, + global_ctx: bool, +) -> None: + from datafusion import SessionContext + + # Create a test CSV file + group_dir = tmp_path / "group=a" + group_dir.mkdir(exist_ok=True) + + csv_path = group_dir / "test.csv" + csv_path.write_text(csv_content) + + ctx = SessionContext() + + if as_read: + if global_ctx: + from datafusion.io import read_csv + + df = read_csv(str(tmp_path), options=options) + else: + df = ctx.read_csv(str(tmp_path), options=options) + else: + ctx.register_csv("test_table", str(tmp_path), options=options) + df = ctx.sql("SELECT * FROM test_table") + df.show() + + # Verify the data + result = df.collect() + assert len(result) == 1 + assert result[0] == expected + + +@pytest.mark.parametrize( + ("as_read", "global_ctx"), + [ + (True, True), + (True, False), + (False, False), + ], +) +def test_read_csv_with_options(tmp_path, as_read, global_ctx): + """Test reading CSV with CsvReadOptions.""" + + csv_content = "Alice;30;|New York; NY|\nBob;25\n#Charlie;35;Paris\nPhil;75;Detroit' MI\nKarin;50;|Stockholm\nSweden|" # noqa: E501 + + # Some of the read options are difficult to test in combination + # such as schema and schema_infer_max_records so run multiple tests + # file_sort_order doesn't impact reading, but included here to ensure + # all options parse correctly + options = CsvReadOptions( + has_header=False, + delimiter=";", + quote="|", + terminator="\n", + escape="\\", + comment="#", + newlines_in_values=True, + schema_infer_max_records=1, + null_regex="[pP]+aris", + truncated_rows=True, + file_sort_order=[[column("column_1").sort(), column("column_2")], ["column_3"]], + ) + + expected = pa.RecordBatch.from_arrays( + [ + pa.array(["Alice", "Bob", "Phil", "Karin"]), + pa.array([30, 25, 75, 50]), + pa.array(["New York; NY", None, "Detroit' MI", "Stockholm\nSweden"]), + ], + names=["column_1", "column_2", "column_3"], + ) + + read_csv_with_options_inner( + tmp_path, csv_content, options, expected, as_read, global_ctx + ) + + schema = pa.schema( + [ + pa.field("name", pa.string(), nullable=False), + pa.field("age", pa.float32(), nullable=False), + pa.field("location", pa.string(), nullable=True), + ] + ) + options.with_schema(schema) + + expected = pa.RecordBatch.from_arrays( + [ + pa.array(["Alice", "Bob", "Phil", "Karin"]), + pa.array([30.0, 25.0, 75.0, 50.0]), + pa.array(["New York; NY", None, "Detroit' MI", "Stockholm\nSweden"]), + ], + schema=schema, + ) + + read_csv_with_options_inner( + tmp_path, csv_content, options, expected, as_read, global_ctx + ) + + csv_content = "name,age\nAlice,30\nBob,25\nCharlie,35\nDiego,40\nEmily,15" + + expected = pa.RecordBatch.from_arrays( + [ + pa.array(["Alice", "Bob", "Charlie", "Diego", "Emily"]), + pa.array([30, 25, 35, 40, 15]), + pa.array(["a", "a", "a", "a", "a"]), + ], + schema=pa.schema( + [ + pa.field("name", pa.string(), nullable=True), + pa.field("age", pa.int64(), nullable=True), + pa.field("group", pa.string(), nullable=False), + ] + ), + ) + options = CsvReadOptions( + table_partition_cols=[("group", pa.string())], + ) + + read_csv_with_options_inner( + tmp_path, csv_content, options, expected, as_read, global_ctx + ) diff --git a/python/tests/test_dataframe.py b/python/tests/test_dataframe.py index 64220ce9c..71abe2925 100644 --- a/python/tests/test_dataframe.py +++ b/python/tests/test_dataframe.py @@ -16,10 +16,12 @@ # under the License. import ctypes import datetime +import itertools import os import re import threading import time +from pathlib import Path from typing import Any import pyarrow as pa @@ -27,24 +29,34 @@ import pytest from datafusion import ( DataFrame, + InsertOp, + ParquetColumnOptions, + ParquetWriterOptions, + RecordBatch, SessionContext, WindowFrame, column, literal, + udf, +) +from datafusion import ( + col as df_col, ) from datafusion import ( functions as f, ) -from datafusion.expr import Window -from datafusion.html_formatter import ( +from datafusion.dataframe import DataFrameWriteOptions +from datafusion.dataframe_formatter import ( DataFrameHtmlFormatter, configure_formatter, get_formatter, reset_formatter, - reset_styles_loaded_state, ) +from datafusion.expr import EXPR_TYPE_ERROR, Window from pyarrow.csv import write_csv +pa_cffi = pytest.importorskip("pyarrow.cffi") + MB = 1024 * 1024 @@ -54,9 +66,7 @@ def ctx(): @pytest.fixture -def df(): - ctx = SessionContext() - +def df(ctx): # create a RecordBatch and a new DataFrame from it batch = pa.RecordBatch.from_arrays( [pa.array([1, 2, 3]), pa.array([4, 5, 6]), pa.array([8, 5, 8])], @@ -66,6 +76,54 @@ def df(): return ctx.from_arrow(batch) +@pytest.fixture +def large_df(): + ctx = SessionContext() + + rows = 100000 + data = { + "a": list(range(rows)), + "b": [f"s-{i}" for i in range(rows)], + "c": [float(i + 0.1) for i in range(rows)], + } + batch = pa.record_batch(data) + + return ctx.from_arrow(batch) + + +@pytest.fixture +def large_multi_batch_df(): + """Create a DataFrame with multiple record batches for testing stream behavior. + + This fixture creates 10 batches of 10,000 rows each (100,000 rows total), + ensuring the DataFrame spans multiple batches. This is essential for testing + that memory limits actually cause early stream termination rather than + truncating all collected data. + """ + ctx = SessionContext() + + # Create multiple batches, each with 10,000 rows + batches = [] + rows_per_batch = 10000 + num_batches = 10 + + for batch_idx in range(num_batches): + start_row = batch_idx * rows_per_batch + end_row = start_row + rows_per_batch + data = { + "a": list(range(start_row, end_row)), + "b": [f"s-{i}" for i in range(start_row, end_row)], + "c": [float(i + 0.1) for i in range(start_row, end_row)], + } + batch = pa.record_batch(data) + batches.append(batch) + + # Register as record batches to maintain multi-batch structure + # Using [batches] wraps list in another list as required by register_record_batches + ctx.register_record_batches("large_multi_batch_data", [batches]) + return ctx.table("large_multi_batch_data") + + @pytest.fixture def struct_df(): ctx = SessionContext() @@ -201,6 +259,48 @@ def test_select(df): assert result.column(1) == pa.array([1, 2, 3]) +def test_select_exprs(df): + df_1 = df.select_exprs( + "a + b", + "a - b", + ) + + # execute and collect the first (and only) batch + result = df_1.collect()[0] + + assert result.column(0) == pa.array([5, 7, 9]) + assert result.column(1) == pa.array([-3, -3, -3]) + + df_2 = df.select_exprs("b", "a") + + # execute and collect the first (and only) batch + result = df_2.collect()[0] + + assert result.column(0) == pa.array([4, 5, 6]) + assert result.column(1) == pa.array([1, 2, 3]) + + df_3 = df.select_exprs( + "abs(a + b)", + "abs(a - b)", + ) + + # execute and collect the first (and only) batch + result = df_3.collect()[0] + + assert result.column(0) == pa.array([5, 7, 9]) + assert result.column(1) == pa.array([3, 3, 3]) + + +def test_drop_quoted_columns(): + ctx = SessionContext() + batch = pa.RecordBatch.from_arrays([pa.array([1, 2, 3])], names=["ID_For_Students"]) + df = ctx.create_dataframe([[batch]]) + + # Both should work + assert df.drop('"ID_For_Students"').schema().names == [] + assert df.drop("ID_For_Students").schema().names == [] + + def test_select_mixed_expr_string(df): df = df.select(column("b"), "a") @@ -211,6 +311,14 @@ def test_select_mixed_expr_string(df): assert result.column(1) == pa.array([1, 2, 3]) +def test_select_unsupported(df): + with pytest.raises( + TypeError, + match=f"Expected Expr or column name.*{re.escape(EXPR_TYPE_ERROR)}", + ): + df.select(1) + + def test_filter(df): df1 = df.filter(column("a") > literal(2)).select( column("a") + column("b"), @@ -236,6 +344,66 @@ def test_filter(df): assert result.column(2) == pa.array([5]) +def test_filter_string_predicates(df): + df_str = df.filter("a > 2") + result = df_str.collect()[0] + + assert result.column(0) == pa.array([3]) + assert result.column(1) == pa.array([6]) + assert result.column(2) == pa.array([8]) + + df_mixed = df.filter("a > 1", column("b") != literal(6)) + result_mixed = df_mixed.collect()[0] + + assert result_mixed.column(0) == pa.array([2]) + assert result_mixed.column(1) == pa.array([5]) + assert result_mixed.column(2) == pa.array([5]) + + df_strings = df.filter("a > 1", "b < 6") + result_strings = df_strings.collect()[0] + + assert result_strings.column(0) == pa.array([2]) + assert result_strings.column(1) == pa.array([5]) + assert result_strings.column(2) == pa.array([5]) + + +def test_parse_sql_expr(df): + plan1 = df.filter(df.parse_sql_expr("a > 2")).logical_plan() + plan2 = df.filter(column("a") > literal(2)).logical_plan() + # object equality not implemented but string representation should match + assert str(plan1) == str(plan2) + + df1 = df.filter(df.parse_sql_expr("a > 2")).select( + column("a") + column("b"), + column("a") - column("b"), + ) + + # execute and collect the first (and only) batch + result = df1.collect()[0] + + assert result.column(0) == pa.array([9]) + assert result.column(1) == pa.array([-3]) + + df.show() + # verify that if there is no filter applied, internal dataframe is unchanged + df2 = df.filter() + assert df.df == df2.df + + df3 = df.filter(df.parse_sql_expr("a > 1"), df.parse_sql_expr("b != 6")) + result = df3.collect()[0] + + assert result.column(0) == pa.array([2]) + assert result.column(1) == pa.array([5]) + assert result.column(2) == pa.array([5]) + + +def test_show_empty(df, capsys): + df_empty = df.filter(column("a") > literal(3)) + df_empty.show() + captured = capsys.readouterr() + assert "DataFrame has no rows" in captured.out + + def test_sort(df): df = df.sort(column("b").sort(ascending=False)) @@ -245,6 +413,54 @@ def test_sort(df): assert table.to_pydict() == expected +def test_sort_string_and_expression_equivalent(df): + from datafusion import col + + result_str = df.sort("a").to_pydict() + result_expr = df.sort(col("a")).to_pydict() + assert result_str == result_expr + + +def test_sort_unsupported(df): + with pytest.raises( + TypeError, + match=f"Expected Expr or column name.*{re.escape(EXPR_TYPE_ERROR)}", + ): + df.sort(1) + + +def test_aggregate_string_and_expression_equivalent(df): + from datafusion import col + + result_str = df.aggregate("a", [f.count()]).sort("a").to_pydict() + result_expr = df.aggregate(col("a"), [f.count()]).sort("a").to_pydict() + assert result_str == result_expr + + +def test_aggregate_tuple_group_by(df): + result_list = df.aggregate(["a"], [f.count()]).sort("a").to_pydict() + result_tuple = df.aggregate(("a",), [f.count()]).sort("a").to_pydict() + assert result_tuple == result_list + + +def test_aggregate_tuple_aggs(df): + result_list = df.aggregate("a", [f.count()]).sort("a").to_pydict() + result_tuple = df.aggregate("a", (f.count(),)).sort("a").to_pydict() + assert result_tuple == result_list + + +def test_filter_string_equivalent(df): + df1 = df.filter("a > 1").to_pydict() + df2 = df.filter(column("a") > literal(1)).to_pydict() + assert df1 == df2 + + +def test_filter_string_invalid(df): + with pytest.raises(Exception) as excinfo: + df.filter("this is not valid sql").collect() + assert "Expected Expr" not in str(excinfo.value) + + def test_drop(df): df = df.drop("c") @@ -299,6 +515,21 @@ def test_tail(df): assert result.column(2) == pa.array([8]) +def test_with_column_sql_expression(df): + df = df.with_column("c", "a + b") + + # execute and collect the first (and only) batch + result = df.collect()[0] + + assert result.schema.field(0).name == "a" + assert result.schema.field(1).name == "b" + assert result.schema.field(2).name == "c" + + assert result.column(0) == pa.array([1, 2, 3]) + assert result.column(1) == pa.array([4, 5, 6]) + assert result.column(2) == pa.array([5, 7, 9]) + + def test_with_column(df): df = df.with_column("c", column("a") + column("b")) @@ -345,6 +576,37 @@ def test_with_columns(df): assert result.column(6) == pa.array([5, 7, 9]) +def test_with_columns_str(df): + df = df.with_columns( + "a + b as c", + "a + b as d", + [ + "a + b as e", + "a + b as f", + ], + g="a + b", + ) + + # execute and collect the first (and only) batch + result = df.collect()[0] + + assert result.schema.field(0).name == "a" + assert result.schema.field(1).name == "b" + assert result.schema.field(2).name == "c" + assert result.schema.field(3).name == "d" + assert result.schema.field(4).name == "e" + assert result.schema.field(5).name == "f" + assert result.schema.field(6).name == "g" + + assert result.column(0) == pa.array([1, 2, 3]) + assert result.column(1) == pa.array([4, 5, 6]) + assert result.column(2) == pa.array([5, 7, 9]) + assert result.column(3) == pa.array([5, 7, 9]) + assert result.column(4) == pa.array([5, 7, 9]) + assert result.column(5) == pa.array([5, 7, 9]) + assert result.column(6) == pa.array([5, 7, 9]) + + def test_cast(df): df = df.cast({"a": pa.float16(), "b": pa.list_(pa.uint32())}) expected = pa.schema( @@ -354,6 +616,41 @@ def test_cast(df): assert df.schema() == expected +def test_iter_batches(df): + batches = [] + for batch in df: + batches.append(batch) # noqa: PERF402 + + # Delete DataFrame to ensure RecordBatches remain valid + del df + + assert len(batches) == 1 + + batch = batches[0] + assert isinstance(batch, RecordBatch) + pa_batch = batch.to_pyarrow() + assert pa_batch.column(0).to_pylist() == [1, 2, 3] + assert pa_batch.column(1).to_pylist() == [4, 5, 6] + assert pa_batch.column(2).to_pylist() == [8, 5, 8] + + +def test_iter_returns_datafusion_recordbatch(df): + for batch in df: + assert isinstance(batch, RecordBatch) + + +def test_execute_stream_basic(df): + stream = df.execute_stream() + batches = list(stream) + + assert len(batches) == 1 + assert isinstance(batches[0], RecordBatch) + pa_batch = batches[0].to_pyarrow() + assert pa_batch.column(0).to_pylist() == [1, 2, 3] + assert pa_batch.column(1).to_pylist() == [4, 5, 6] + assert pa_batch.column(2).to_pylist() == [8, 5, 8] + + def test_with_column_renamed(df): df = df.with_column("c", column("a") + column("b")).with_column_renamed("c", "sum") @@ -384,7 +681,6 @@ def test_unnest_without_nulls(nested_df): assert result.column(1) == pa.array([7, 8, 8, 9, 9, 9]) -@pytest.mark.filterwarnings("ignore:`join_keys`:DeprecationWarning") def test_join(): ctx = SessionContext() @@ -401,26 +697,41 @@ def test_join(): df1 = ctx.create_dataframe([[batch]], "r") df2 = df.join(df1, on="a", how="inner") - df2.show() - df2 = df2.sort(column("l.a")) + df2 = df2.sort(column("a")) table = pa.Table.from_batches(df2.collect()) expected = {"a": [1, 2], "c": [8, 10], "b": [4, 5]} assert table.to_pydict() == expected - df2 = df.join(df1, left_on="a", right_on="a", how="inner") - df2.show() + # Test the default behavior for dropping duplicate keys + # Since we may have a duplicate column name and pa.Table() + # hides the fact, instead we need to explicitly check the + # resultant arrays. + df2 = df.join( + df1, left_on="a", right_on="a", how="inner", coalesce_duplicate_keys=True + ) + df2 = df2.sort(column("a")) + result = df2.collect()[0] + assert result.num_columns == 3 + assert result.column(0) == pa.array([1, 2], pa.int64()) + assert result.column(1) == pa.array([4, 5], pa.int64()) + assert result.column(2) == pa.array([8, 10], pa.int64()) + + df2 = df.join( + df1, left_on="a", right_on="a", how="inner", coalesce_duplicate_keys=False + ) df2 = df2.sort(column("l.a")) - table = pa.Table.from_batches(df2.collect()) - - expected = {"a": [1, 2], "c": [8, 10], "b": [4, 5]} - assert table.to_pydict() == expected + result = df2.collect()[0] + assert result.num_columns == 4 + assert result.column(0) == pa.array([1, 2], pa.int64()) + assert result.column(1) == pa.array([4, 5], pa.int64()) + assert result.column(2) == pa.array([1, 2], pa.int64()) + assert result.column(3) == pa.array([8, 10], pa.int64()) # Verify we don't make a breaking change to pre-43.0.0 # where users would pass join_keys as a positional argument df2 = df.join(df1, (["a"], ["a"]), how="inner") - df2.show() - df2 = df2.sort(column("l.a")) + df2 = df2.sort(column("a")) table = pa.Table.from_batches(df2.collect()) expected = {"a": [1, 2], "c": [8, 10], "b": [4, 5]} @@ -445,7 +756,7 @@ def test_join_invalid_params(): with pytest.deprecated_call(): df2 = df.join(df1, join_keys=(["a"], ["a"]), how="inner") df2.show() - df2 = df2.sort(column("l.a")) + df2 = df2.sort(column("a")) table = pa.Table.from_batches(df2.collect()) expected = {"a": [1, 2], "c": [8, 10], "b": [4, 5]} @@ -503,6 +814,58 @@ def test_join_on(): assert table.to_pydict() == expected +def test_join_full_with_drop_duplicate_keys(): + ctx = SessionContext() + + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 3, 5, 7, 9]), pa.array([True, True, True, True, True])], + names=["log_time", "key_frame"], + ) + key_frame = ctx.create_dataframe([[batch]]) + + batch = pa.RecordBatch.from_arrays( + [pa.array([2, 4, 6, 8, 10])], + names=["log_time"], + ) + query_times = ctx.create_dataframe([[batch]]) + + merged = query_times.join( + key_frame, + left_on="log_time", + right_on="log_time", + how="full", + coalesce_duplicate_keys=True, + ) + merged = merged.sort(column("log_time")) + result = merged.collect()[0] + + assert result.num_columns == 2 + assert result.column(0).to_pylist() == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + + +def test_join_on_invalid_expr(): + ctx = SessionContext() + + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2]), pa.array([4, 5])], + names=["a", "b"], + ) + df = ctx.create_dataframe([[batch]], "l") + df1 = ctx.create_dataframe([[batch]], "r") + + with pytest.raises( + TypeError, match=r"Use col\(\)/column\(\) or lit\(\)/literal\(\)" + ): + df.join_on(df1, "a") + + +def test_aggregate_invalid_aggs(df): + with pytest.raises( + TypeError, match=r"Use col\(\)/column\(\) or lit\(\)/literal\(\)" + ): + df.aggregate([], "a") + + def test_distinct(): ctx = SessionContext() @@ -535,12 +898,25 @@ def test_distinct(): ), [2, 1, 3, 4, 2, 1, 3], ), + ( + "row_w_params_no_lists", + f.row_number( + order_by=column("b"), + partition_by=column("c"), + ), + [2, 1, 3, 4, 2, 1, 3], + ), ("rank", f.rank(order_by=[column("b")]), [3, 1, 3, 5, 6, 1, 6]), ( "rank_w_params", f.rank(order_by=[column("b"), column("a")], partition_by=[column("c")]), [2, 1, 3, 4, 2, 1, 3], ), + ( + "rank_w_params_no_lists", + f.rank(order_by=column("a"), partition_by=column("c")), + [1, 2, 3, 4, 1, 2, 3], + ), ( "dense_rank", f.dense_rank(order_by=[column("b")]), @@ -551,6 +927,11 @@ def test_distinct(): f.dense_rank(order_by=[column("b"), column("a")], partition_by=[column("c")]), [2, 1, 3, 4, 2, 1, 3], ), + ( + "dense_rank_w_params_no_lists", + f.dense_rank(order_by=column("a"), partition_by=column("c")), + [1, 2, 3, 4, 1, 2, 3], + ), ( "percent_rank", f.round(f.percent_rank(order_by=[column("b")]), literal(3)), @@ -566,6 +947,14 @@ def test_distinct(): ), [0.333, 0.0, 0.667, 1.0, 0.5, 0.0, 1.0], ), + ( + "percent_rank_w_params_no_lists", + f.round( + f.percent_rank(order_by=column("a"), partition_by=column("c")), + literal(3), + ), + [0.0, 0.333, 0.667, 1.0, 0.0, 0.5, 1.0], + ), ( "cume_dist", f.round(f.cume_dist(order_by=[column("b")]), literal(3)), @@ -581,6 +970,14 @@ def test_distinct(): ), [0.5, 0.25, 0.75, 1.0, 0.667, 0.333, 1.0], ), + ( + "cume_dist_w_params_no_lists", + f.round( + f.cume_dist(order_by=column("a"), partition_by=column("c")), + literal(3), + ), + [0.25, 0.5, 0.75, 1.0, 0.333, 0.667, 1.0], + ), ( "ntile", f.ntile(2, order_by=[column("b")]), @@ -591,6 +988,11 @@ def test_distinct(): f.ntile(2, order_by=[column("b"), column("a")], partition_by=[column("c")]), [1, 1, 2, 2, 1, 1, 2], ), + ( + "ntile_w_params_no_lists", + f.ntile(2, order_by=column("b"), partition_by=column("c")), + [1, 1, 2, 2, 1, 1, 2], + ), ("lead", f.lead(column("b"), order_by=[column("b")]), [7, None, 8, 9, 9, 7, None]), ( "lead_w_params", @@ -603,6 +1005,17 @@ def test_distinct(): ), [8, 7, -1, -1, -1, 9, -1], ), + ( + "lead_w_params_no_lists", + f.lead( + column("b"), + shift_offset=2, + default_value=-1, + order_by=column("b"), + partition_by=column("c"), + ), + [8, 7, -1, -1, -1, 9, -1], + ), ("lag", f.lag(column("b"), order_by=[column("b")]), [None, None, 7, 7, 8, None, 9]), ( "lag_w_params", @@ -615,6 +1028,17 @@ def test_distinct(): ), [-1, -1, None, 7, -1, -1, None], ), + ( + "lag_w_params_no_lists", + f.lag( + column("b"), + shift_offset=2, + default_value=-1, + order_by=column("b"), + partition_by=column("c"), + ), + [-1, -1, None, 7, -1, -1, None], + ), ( "first_value", f.first_value(column("a")).over( @@ -622,6 +1046,20 @@ def test_distinct(): ), [1, 1, 1, 1, 5, 5, 5], ), + ( + "first_value_without_list_args", + f.first_value(column("a")).over( + Window(partition_by=column("c"), order_by=column("b")) + ), + [1, 1, 1, 1, 5, 5, 5], + ), + ( + "first_value_order_by_string", + f.first_value(column("a")).over( + Window(partition_by=[column("c")], order_by="b") + ), + [1, 1, 1, 1, 5, 5, 5], + ), ( "last_value", f.last_value(column("a")).over( @@ -664,6 +1102,27 @@ def test_window_functions(partitioned_df, name, expr, result): assert table.sort_by("a").to_pydict() == expected +@pytest.mark.parametrize("partition", ["c", df_col("c")]) +def test_rank_partition_by_accepts_string(partitioned_df, partition): + """Passing a string to partition_by should match using col().""" + df = partitioned_df.select( + f.rank(order_by=column("a"), partition_by=partition).alias("r") + ) + table = pa.Table.from_batches(df.sort(column("a")).collect()) + assert table.column("r").to_pylist() == [1, 2, 3, 4, 1, 2, 3] + + +@pytest.mark.parametrize("partition", ["c", df_col("c")]) +def test_window_partition_by_accepts_string(partitioned_df, partition): + """Window.partition_by accepts string identifiers.""" + expr = f.first_value(column("a")).over( + Window(partition_by=partition, order_by=column("b")) + ) + df = partitioned_df.select(expr.alias("fv")) + table = pa.Table.from_batches(df.sort(column("a")).collect()) + assert table.column("fv").to_pylist() == [1, 1, 1, 1, 5, 5, 5] + + @pytest.mark.parametrize( ("units", "start_bound", "end_bound"), [ @@ -692,46 +1151,90 @@ def test_valid_window_frame(units, start_bound, end_bound): ], ) def test_invalid_window_frame(units, start_bound, end_bound): - with pytest.raises(RuntimeError): + with pytest.raises(NotImplementedError, match=f"(?i){units}"): WindowFrame(units, start_bound, end_bound) def test_window_frame_defaults_match_postgres(partitioned_df): - # ref: https://github.com/apache/datafusion-python/issues/688 - - window_frame = WindowFrame("rows", None, None) - col_a = column("a") - # Using `f.window` with or without an unbounded window_frame produces the same - # results. These tests are included as a regression check but can be removed when - # f.window() is deprecated in favor of using the .over() approach. - no_frame = f.window("avg", [col_a]).alias("no_frame") - with_frame = f.window("avg", [col_a], window_frame=window_frame).alias("with_frame") - df_1 = partitioned_df.select(col_a, no_frame, with_frame) + # When order is not set, the default frame should be unbounded preceding to + # unbounded following. When order is set, the default frame is unbounded preceding + # to current row. + no_order = f.avg(col_a).over(Window()).alias("over_no_order") + with_order = f.avg(col_a).over(Window(order_by=[col_a])).alias("over_with_order") + df = partitioned_df.select(col_a, no_order, with_order) expected = { "a": [0, 1, 2, 3, 4, 5, 6], - "no_frame": [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0], - "with_frame": [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0], + "over_no_order": [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0], + "over_with_order": [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0], } - assert df_1.sort(col_a).to_pydict() == expected + assert df.sort(col_a).to_pydict() == expected - # When order is not set, the default frame should be unounded preceeding to - # unbounded following. When order is set, the default frame is unbounded preceeding - # to current row. - no_order = f.avg(col_a).over(Window()).alias("over_no_order") - with_order = f.avg(col_a).over(Window(order_by=[col_a])).alias("over_with_order") - df_2 = partitioned_df.select(col_a, no_order, with_order) - expected = { - "a": [0, 1, 2, 3, 4, 5, 6], - "over_no_order": [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0], - "over_with_order": [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0], - } +def _build_last_value_df(df): + return df.select( + f.last_value(column("a")) + .over( + Window( + partition_by=[column("c")], + order_by=[column("b")], + window_frame=WindowFrame("rows", None, None), + ) + ) + .alias("expr"), + f.last_value(column("a")) + .over( + Window( + partition_by=[column("c")], + order_by="b", + window_frame=WindowFrame("rows", None, None), + ) + ) + .alias("str"), + ) + - assert df_2.sort(col_a).to_pydict() == expected +def _build_nth_value_df(df): + return df.select( + f.nth_value(column("b"), 3).over(Window(order_by=[column("a")])).alias("expr"), + f.nth_value(column("b"), 3).over(Window(order_by="a")).alias("str"), + ) + + +def _build_rank_df(df): + return df.select( + f.rank(order_by=[column("b")]).alias("expr"), + f.rank(order_by="b").alias("str"), + ) + + +def _build_array_agg_df(df): + return df.aggregate( + [column("c")], + [ + f.array_agg(column("a"), order_by=[column("a")]).alias("expr"), + f.array_agg(column("a"), order_by="a").alias("str"), + ], + ).sort(column("c")) + + +@pytest.mark.parametrize( + ("builder", "expected"), + [ + pytest.param(_build_last_value_df, [3, 3, 3, 3, 6, 6, 6], id="last_value"), + pytest.param(_build_nth_value_df, [None, None, 7, 7, 7, 7, 7], id="nth_value"), + pytest.param(_build_rank_df, [1, 1, 3, 3, 5, 6, 6], id="rank"), + pytest.param(_build_array_agg_df, [[0, 1, 2, 3], [4, 5, 6]], id="array_agg"), + ], +) +def test_order_by_string_equivalence(partitioned_df, builder, expected): + df = builder(partitioned_df) + table = pa.Table.from_batches(df.collect()) + assert table.column("expr").to_pylist() == expected + assert table.column("expr").to_pylist() == table.column("str").to_pylist() def test_html_formatter_cell_dimension(df, clean_formatter_state): @@ -968,7 +1471,7 @@ def get_header_style(self) -> str: def test_html_formatter_memory(df, clean_formatter_state): """Test the memory and row control parameters in DataFrameHtmlFormatter.""" - configure_formatter(max_memory_bytes=10, min_rows_display=1) + configure_formatter(max_memory_bytes=10, min_rows=1) html_output = df._repr_html_() # Count the number of table rows in the output @@ -978,7 +1481,7 @@ def test_html_formatter_memory(df, clean_formatter_state): assert tr_count == 2 # 1 for header row, 1 for data row assert "data truncated" in html_output.lower() - configure_formatter(max_memory_bytes=10 * MB, min_rows_display=1) + configure_formatter(max_memory_bytes=10 * MB, min_rows=1) html_output = df._repr_html_() # With larger memory limit and min_rows=2, should display all rows tr_count = count_table_rows(html_output) @@ -988,19 +1491,140 @@ def test_html_formatter_memory(df, clean_formatter_state): assert "data truncated" not in html_output.lower() -def test_html_formatter_repr_rows(df, clean_formatter_state): - configure_formatter(min_rows_display=2, repr_rows=2) +def test_html_formatter_memory_boundary_conditions(large_df, clean_formatter_state): + """Test memory limit behavior at boundary conditions with large dataset. + + This test validates that the formatter correctly handles edge cases when + the memory limit is reached with a large dataset (100,000 rows), ensuring + that min_rows constraint is properly respected while respecting memory limits. + Uses large_df to actually test memory limit behavior with realistic data sizes. + """ + + # Get the raw size of the data to test boundary conditions + # First, capture output with no limits + # NOTE: max_rows=200000 is set well above the dataset size (100k rows) to ensure + # we're testing memory limits, not row limits. Default max_rows=10 would + # truncate before memory limit is reached. + configure_formatter(max_memory_bytes=10 * MB, min_rows=1, max_rows=200000) + unrestricted_output = large_df._repr_html_() + unrestricted_rows = count_table_rows(unrestricted_output) + + # Test 1: Very small memory limit should still respect min_rows + # With large dataset, this should definitely hit memory limit before min_rows + configure_formatter(max_memory_bytes=10, min_rows=1) + html_output = large_df._repr_html_() + tr_count = count_table_rows(html_output) + assert tr_count >= 2 # At least header + 1 data row (minimum) + # Should show truncation since we limited memory so aggressively + assert "data truncated" in html_output.lower() + + # Test 2: Memory limit at default size (2MB) should truncate the large dataset + # Default max_rows would truncate at 10 rows, so we don't set it here to test + # that memory limit is respected even with default row limit + configure_formatter(max_memory_bytes=2 * MB, min_rows=1) + html_output = large_df._repr_html_() + tr_count = count_table_rows(html_output) + assert tr_count >= 2 # At least header + min_rows + # Should be truncated since full dataset is much larger than 2MB + assert tr_count < unrestricted_rows + + # Test 3: Very large memory limit should show much more data + # NOTE: max_rows=200000 is critical here - without it, default max_rows=10 + # would limit output to 10 rows even though we have 100MB of memory available + configure_formatter(max_memory_bytes=100 * MB, min_rows=1, max_rows=200000) + html_output = large_df._repr_html_() + tr_count = count_table_rows(html_output) + # Should show significantly more rows, possibly all + assert tr_count > 100 # Should show substantially more rows + + # Test 4: Min rows should override memory limit + # With tiny memory and larger min_rows, min_rows should win + configure_formatter(max_memory_bytes=10, min_rows=2) + html_output = large_df._repr_html_() + tr_count = count_table_rows(html_output) + assert tr_count >= 3 # At least header + 2 data rows (min_rows) + # Should show truncation message despite min_rows being satisfied + assert "data truncated" in html_output.lower() + + # Test 5: With reasonable memory and min_rows settings + # NOTE: max_rows=200000 ensures we test memory limit behavior, not row limit + configure_formatter(max_memory_bytes=2 * MB, min_rows=10, max_rows=200000) + html_output = large_df._repr_html_() + tr_count = count_table_rows(html_output) + assert tr_count >= 11 # header + at least 10 data rows (min_rows) + # Should be truncated due to memory limit + assert tr_count < unrestricted_rows + + +def test_html_formatter_stream_early_termination( + large_multi_batch_df, clean_formatter_state +): + """Test that memory limits cause early stream termination with multi-batch data. + + This test specifically validates that the formatter stops collecting data when + the memory limit is reached, rather than collecting all data and then truncating. + The large_multi_batch_df fixture creates 10 record batches, allowing us to verify + that not all batches are consumed when memory limit is hit. + + Key difference from test_html_formatter_memory_boundary_conditions: + - Uses multi-batch DataFrame to verify stream termination behavior + - Tests with memory limit exceeded by 2-3 batches but not 1 batch + - Verifies partial data + truncation message + respects min_rows + """ + + # Get baseline: how much data fits without memory limit + configure_formatter(max_memory_bytes=100 * MB, min_rows=1, max_rows=200000) + unrestricted_output = large_multi_batch_df._repr_html_() + unrestricted_rows = count_table_rows(unrestricted_output) + + # Test 1: Memory limit exceeded by ~2 batches (each batch ~10k rows) + # With 1 batch (~1-2MB), we should have space. With 2-3 batches, we exceed limit. + # Set limit to ~3MB to ensure we collect ~1 batch before hitting limit + configure_formatter(max_memory_bytes=3 * MB, min_rows=1, max_rows=200000) + html_output = large_multi_batch_df._repr_html_() + tr_count = count_table_rows(html_output) + + # Should show significant truncation (not all 100k rows) + assert tr_count < unrestricted_rows, "Should be truncated by memory limit" + assert tr_count >= 2, "Should respect min_rows" + assert "data truncated" in html_output.lower(), "Should indicate truncation" + + # Test 2: Very tight memory limit should still respect min_rows + # Even with tiny memory (10 bytes), should show at least min_rows + configure_formatter(max_memory_bytes=10, min_rows=5, max_rows=200000) + html_output = large_multi_batch_df._repr_html_() + tr_count = count_table_rows(html_output) + + assert tr_count >= 6, "Should show header + at least min_rows (5)" + assert "data truncated" in html_output.lower(), "Should indicate truncation" + + # Test 3: Memory limit should take precedence over max_rows in early termination + # With max_rows=100 but small memory limit, should terminate early due to memory + configure_formatter(max_memory_bytes=2 * MB, min_rows=1, max_rows=100) + html_output = large_multi_batch_df._repr_html_() + tr_count = count_table_rows(html_output) + + # Should be truncated by memory limit (showing more than max_rows would suggest + # but less than unrestricted) + assert tr_count >= 2, "Should respect min_rows" + assert tr_count < unrestricted_rows, "Should be truncated" + # Output should indicate why truncation occurred + assert "data truncated" in html_output.lower() + + +def test_html_formatter_max_rows(df, clean_formatter_state): + configure_formatter(min_rows=2, max_rows=2) html_output = df._repr_html_() tr_count = count_table_rows(html_output) - # Tabe should have header row (1) + 2 data rows = 3 rows + # Table should have header row (1) + 2 data rows = 3 rows assert tr_count == 3 - configure_formatter(min_rows_display=2, repr_rows=3) + configure_formatter(min_rows=2, max_rows=3) html_output = df._repr_html_() tr_count = count_table_rows(html_output) - # Tabe should have header row (1) + 3 data rows = 4 rows + # Table should have header row (1) + 3 data rows = 4 rows assert tr_count == 4 @@ -1022,17 +1646,42 @@ def test_html_formatter_validation(): with pytest.raises(ValueError, match="max_memory_bytes must be a positive integer"): DataFrameHtmlFormatter(max_memory_bytes=-100) - with pytest.raises(ValueError, match="min_rows_display must be a positive integer"): - DataFrameHtmlFormatter(min_rows_display=0) + with pytest.raises(ValueError, match="min_rows must be a positive integer"): + DataFrameHtmlFormatter(min_rows=0) + + with pytest.raises(ValueError, match="min_rows must be a positive integer"): + DataFrameHtmlFormatter(min_rows=-5) - with pytest.raises(ValueError, match="min_rows_display must be a positive integer"): - DataFrameHtmlFormatter(min_rows_display=-5) + with pytest.raises(ValueError, match="max_rows must be a positive integer"): + DataFrameHtmlFormatter(max_rows=0) - with pytest.raises(ValueError, match="repr_rows must be a positive integer"): - DataFrameHtmlFormatter(repr_rows=0) + with pytest.raises(ValueError, match="max_rows must be a positive integer"): + DataFrameHtmlFormatter(max_rows=-10) + + with pytest.raises( + ValueError, match="min_rows must be less than or equal to max_rows" + ): + DataFrameHtmlFormatter(min_rows=5, max_rows=4) - with pytest.raises(ValueError, match="repr_rows must be a positive integer"): - DataFrameHtmlFormatter(repr_rows=-10) + +def test_repr_rows_backward_compatibility(clean_formatter_state): + """Test that repr_rows parameter still works as deprecated alias.""" + # Should work when not conflicting with max_rows + with pytest.warns(DeprecationWarning, match="repr_rows parameter is deprecated"): + formatter = DataFrameHtmlFormatter(repr_rows=15, min_rows=10) + assert formatter.max_rows == 15 + assert formatter.repr_rows == 15 + + # Should fail when conflicting with max_rows + with pytest.raises(ValueError, match="Cannot specify both repr_rows and max_rows"): + DataFrameHtmlFormatter(repr_rows=5, max_rows=10) + + # Setting repr_rows via property should warn + formatter2 = DataFrameHtmlFormatter() + with pytest.warns(DeprecationWarning, match="repr_rows is deprecated"): + formatter2.repr_rows = 7 + assert formatter2.max_rows == 7 + assert formatter2.repr_rows == 7 def test_configure_formatter(df, clean_formatter_state): @@ -1044,8 +1693,8 @@ def test_configure_formatter(df, clean_formatter_state): max_width = 500 max_height = 30 max_memory_bytes = 3 * MB - min_rows_display = 2 - repr_rows = 2 + min_rows = 2 + max_rows = 2 enable_cell_expansion = False show_truncation_message = False use_shared_styles = False @@ -1057,8 +1706,8 @@ def test_configure_formatter(df, clean_formatter_state): assert formatter_default.max_width != max_width assert formatter_default.max_height != max_height assert formatter_default.max_memory_bytes != max_memory_bytes - assert formatter_default.min_rows_display != min_rows_display - assert formatter_default.repr_rows != repr_rows + assert formatter_default.min_rows != min_rows + assert formatter_default.max_rows != max_rows assert formatter_default.enable_cell_expansion != enable_cell_expansion assert formatter_default.show_truncation_message != show_truncation_message assert formatter_default.use_shared_styles != use_shared_styles @@ -1069,8 +1718,8 @@ def test_configure_formatter(df, clean_formatter_state): max_width=max_width, max_height=max_height, max_memory_bytes=max_memory_bytes, - min_rows_display=min_rows_display, - repr_rows=repr_rows, + min_rows=min_rows, + max_rows=max_rows, enable_cell_expansion=enable_cell_expansion, show_truncation_message=show_truncation_message, use_shared_styles=use_shared_styles, @@ -1080,8 +1729,8 @@ def test_configure_formatter(df, clean_formatter_state): assert formatter_custom.max_width == max_width assert formatter_custom.max_height == max_height assert formatter_custom.max_memory_bytes == max_memory_bytes - assert formatter_custom.min_rows_display == min_rows_display - assert formatter_custom.repr_rows == repr_rows + assert formatter_custom.min_rows == min_rows + assert formatter_custom.max_rows == max_rows assert formatter_custom.enable_cell_expansion == enable_cell_expansion assert formatter_custom.show_truncation_message == show_truncation_message assert formatter_custom.use_shared_styles == use_shared_styles @@ -1196,7 +1845,6 @@ def test_execution_plan(aggregate_df): # indent plan will be different for everyone due to absolute path # to filename, so we just check for some expected content assert "AggregateExec:" in indent - assert "CoalesceBatchesExec:" in indent assert "RepartitionExec:" in indent assert "DataSourceExec:" in indent assert "file_type=csv" in indent @@ -1221,7 +1869,7 @@ def test_execution_plan(aggregate_df): @pytest.mark.asyncio async def test_async_iteration_of_df(aggregate_df): rows_returned = 0 - async for batch in aggregate_df.execute_stream(): + async for batch in aggregate_df: assert batch is not None rows_returned += len(batch.to_pyarrow()[0]) @@ -1236,6 +1884,14 @@ def test_repartition_by_hash(df): df.repartition_by_hash(column("a"), num=2) +def test_repartition_by_hash_sql_expression(df): + df.repartition_by_hash("a", num=2) + + +def test_repartition_by_hash_mix(df): + df.repartition_by_hash(column("a"), "b", num=2) + + def test_intersect(): ctx = SessionContext() @@ -1299,6 +1955,18 @@ def test_collect_partitioned(): assert [[batch]] == ctx.create_dataframe([[batch]]).collect_partitioned() +def test_collect_column(ctx: SessionContext): + batch_1 = pa.RecordBatch.from_pydict({"a": [1, 2, 3]}) + batch_2 = pa.RecordBatch.from_pydict({"a": [4, 5, 6]}) + batch_3 = pa.RecordBatch.from_pydict({"a": [7, 8, 9]}) + + ctx.register_record_batches("t", [[batch_1, batch_2], [batch_3]]) + + result = ctx.table("t").sort(column("a")).collect_column("a") + expected = pa.array([1, 2, 3, 4, 5, 6, 7, 8, 9]) + assert result == expected + + def test_union(ctx): batch = pa.RecordBatch.from_arrays( [pa.array([1, 2, 3]), pa.array([4, 5, 6])], @@ -1409,6 +2077,53 @@ def test_to_arrow_table(df): assert set(pyarrow_table.column_names) == {"a", "b", "c"} +def test_parquet_non_null_column_to_pyarrow(ctx, tmp_path): + path = tmp_path.joinpath("t.parquet") + + ctx.sql("create table t_(a int not null)").collect() + ctx.sql("insert into t_ values (1), (2), (3)").collect() + ctx.sql(f"copy (select * from t_) to '{path}'").collect() + + ctx.register_parquet("t", path) + pyarrow_table = ctx.sql("select max(a) as m from t").to_arrow_table() + assert pyarrow_table.to_pydict() == {"m": [3]} + + +def test_parquet_empty_batch_to_pyarrow(ctx, tmp_path): + path = tmp_path.joinpath("t.parquet") + + ctx.sql("create table t_(a int not null)").collect() + ctx.sql("insert into t_ values (1), (2), (3)").collect() + ctx.sql(f"copy (select * from t_) to '{path}'").collect() + + ctx.register_parquet("t", path) + pyarrow_table = ctx.sql("select * from t limit 0").to_arrow_table() + assert pyarrow_table.schema == pa.schema( + [ + pa.field("a", pa.int32(), nullable=False), + ] + ) + + +def test_parquet_null_aggregation_to_pyarrow(ctx, tmp_path): + path = tmp_path.joinpath("t.parquet") + + ctx.sql("create table t_(a int not null)").collect() + ctx.sql("insert into t_ values (1), (2), (3)").collect() + ctx.sql(f"copy (select * from t_) to '{path}'").collect() + + ctx.register_parquet("t", path) + pyarrow_table = ctx.sql( + "select max(a) as m from (select * from t where a < 0)" + ).to_arrow_table() + assert pyarrow_table.to_pydict() == {"m": [None]} + assert pyarrow_table.schema == pa.schema( + [ + pa.field("m", pa.int32(), nullable=True), + ] + ) + + def test_execute_stream(df): stream = df.execute_stream() assert all(batch is not None for batch in stream) @@ -1491,6 +2206,121 @@ def test_empty_to_arrow_table(df): assert set(pyarrow_table.column_names) == {"a", "b", "c"} +def test_iter_batches_dataframe(fail_collect): + ctx = SessionContext() + + batch1 = pa.record_batch([pa.array([1])], names=["a"]) + batch2 = pa.record_batch([pa.array([2])], names=["a"]) + df = ctx.create_dataframe([[batch1], [batch2]]) + + expected = [batch1, batch2] + results = [b.to_pyarrow() for b in df] + + assert len(results) == len(expected) + for exp in expected: + assert any(got.equals(exp) for got in results) + + +def test_arrow_c_stream_to_table_and_reader(fail_collect): + ctx = SessionContext() + + # Create a DataFrame with two separate record batches + batch1 = pa.record_batch([pa.array([1])], names=["a"]) + batch2 = pa.record_batch([pa.array([2])], names=["a"]) + df = ctx.create_dataframe([[batch1], [batch2]]) + + table = pa.Table.from_batches(batch.to_pyarrow() for batch in df) + batches = table.to_batches() + + assert len(batches) == 2 + expected = [batch1, batch2] + for exp in expected: + assert any(got.equals(exp) for got in batches) + assert table.schema == df.schema() + assert table.column("a").num_chunks == 2 + + reader = pa.RecordBatchReader.from_stream(df) + assert isinstance(reader, pa.RecordBatchReader) + reader_table = pa.Table.from_batches(reader) + expected = pa.Table.from_batches([batch1, batch2]) + assert reader_table.equals(expected) + + +def test_arrow_c_stream_order(): + ctx = SessionContext() + + batch1 = pa.record_batch([pa.array([1])], names=["a"]) + batch2 = pa.record_batch([pa.array([2])], names=["a"]) + + df = ctx.create_dataframe([[batch1, batch2]]) + + table = pa.Table.from_batches(batch.to_pyarrow() for batch in df) + expected = pa.Table.from_batches([batch1, batch2]) + + assert table.equals(expected) + col = table.column("a") + assert col.chunk(0)[0].as_py() == 1 + assert col.chunk(1)[0].as_py() == 2 + + +def test_arrow_c_stream_schema_selection(fail_collect): + ctx = SessionContext() + + batch = pa.RecordBatch.from_arrays( + [ + pa.array([1, 2]), + pa.array([3, 4]), + pa.array([5, 6]), + ], + names=["a", "b", "c"], + ) + df = ctx.create_dataframe([[batch]]) + + requested_schema = pa.schema([("c", pa.int64()), ("a", pa.int64())]) + + c_schema = pa_cffi.ffi.new("struct ArrowSchema*") + address = int(pa_cffi.ffi.cast("uintptr_t", c_schema)) + requested_schema._export_to_c(address) + capsule_new = ctypes.pythonapi.PyCapsule_New + capsule_new.restype = ctypes.py_object + capsule_new.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p] + + reader = pa.RecordBatchReader.from_stream(df, schema=requested_schema) + + assert reader.schema == requested_schema + + batches = list(reader) + + assert len(batches) == 1 + expected_batch = pa.record_batch( + [pa.array([5, 6]), pa.array([1, 2])], names=["c", "a"] + ) + assert batches[0].equals(expected_batch) + + +def test_arrow_c_stream_schema_mismatch(fail_collect): + ctx = SessionContext() + + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2]), pa.array([3, 4])], names=["a", "b"] + ) + df = ctx.create_dataframe([[batch]]) + + bad_schema = pa.schema([("a", pa.string())]) + + c_schema = pa_cffi.ffi.new("struct ArrowSchema*") + address = int(pa_cffi.ffi.cast("uintptr_t", c_schema)) + bad_schema._export_to_c(address) + + capsule_new = ctypes.pythonapi.PyCapsule_New + capsule_new.restype = ctypes.py_object + capsule_new.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p] + bad_capsule = capsule_new(ctypes.c_void_p(address), b"arrow_schema", None) + + with pytest.raises(Exception, match="Fail to merge schema"): + df.__arrow_c_stream__(bad_capsule) + + def test_to_pylist(df): # Convert datafusion dataframe to Python list pylist = df.to_pylist() @@ -1545,6 +2375,69 @@ def test_write_csv(ctx, df, tmp_path, path_to_str): assert result == expected +def generate_test_write_params() -> list[tuple]: + # Overwrite and Replace are not implemented for many table writers + insert_ops = [InsertOp.APPEND, None] + sort_by_cases = [ + (None, [1, 2, 3], "unsorted"), + (column("c"), [2, 1, 3], "single_column_expr"), + (column("a").sort(ascending=False), [3, 2, 1], "single_sort_expr"), + ([column("c"), column("b")], [2, 1, 3], "list_col_expr"), + ( + [column("c").sort(ascending=False), column("b").sort(ascending=False)], + [3, 1, 2], + "list_sort_expr", + ), + ] + + formats = ["csv", "json", "parquet", "table"] + + return [ + pytest.param( + output_format, + insert_op, + sort_by, + expected_a, + id=f"{output_format}_{test_id}", + ) + for output_format, insert_op, ( + sort_by, + expected_a, + test_id, + ) in itertools.product(formats, insert_ops, sort_by_cases) + ] + + +@pytest.mark.parametrize( + ("output_format", "insert_op", "sort_by", "expected_a"), + generate_test_write_params(), +) +def test_write_files_with_options( + ctx, df, tmp_path, output_format, insert_op, sort_by, expected_a +) -> None: + write_options = DataFrameWriteOptions(insert_operation=insert_op, sort_by=sort_by) + + if output_format == "csv": + df.write_csv(tmp_path, with_header=True, write_options=write_options) + ctx.register_csv("test_table", tmp_path) + elif output_format == "json": + df.write_json(tmp_path, write_options=write_options) + ctx.register_json("test_table", tmp_path) + elif output_format == "parquet": + df.write_parquet(tmp_path, write_options=write_options) + ctx.register_parquet("test_table", tmp_path) + elif output_format == "table": + batch = pa.RecordBatch.from_arrays([[], [], []], schema=df.schema()) + ctx.register_record_batches("test_table", [[batch]]) + ctx.table("test_table").show() + df.write_table("test_table", write_options=write_options) + + result = ctx.table("test_table").to_pydict()["a"] + ctx.table("test_table").show() + + assert result == expected_a + + @pytest.mark.parametrize("path_to_str", [True, False]) def test_write_json(ctx, df, tmp_path, path_to_str): path = str(tmp_path) if path_to_str else tmp_path @@ -1632,6 +2525,428 @@ def test_write_compressed_parquet_default_compression_level(df, tmp_path, compre df.write_parquet(str(path), compression=compression) +def test_write_parquet_with_options_default_compression(df, tmp_path): + """Test that the default compression is ZSTD.""" + df.write_parquet(tmp_path) + + for file in tmp_path.rglob("*.parquet"): + metadata = pq.ParquetFile(file).metadata.to_dict() + for row_group in metadata["row_groups"]: + for col in row_group["columns"]: + assert col["compression"].lower() == "zstd" + + +@pytest.mark.parametrize( + "compression", + ["gzip(6)", "brotli(7)", "zstd(15)", "snappy", "uncompressed"], +) +def test_write_parquet_with_options_compression(df, tmp_path, compression): + import re + + path = tmp_path + df.write_parquet_with_options( + str(path), ParquetWriterOptions(compression=compression) + ) + + # test that the actual compression scheme is the one written + for _root, _dirs, files in os.walk(path): + for file in files: + if file.endswith(".parquet"): + metadata = pq.ParquetFile(tmp_path / file).metadata.to_dict() + for row_group in metadata["row_groups"]: + for col in row_group["columns"]: + assert col["compression"].lower() == re.sub( + r"\(\d+\)", "", compression + ) + + result = pq.read_table(str(path)).to_pydict() + expected = df.to_pydict() + + assert result == expected + + +@pytest.mark.parametrize( + "compression", + ["gzip(12)", "brotli(15)", "zstd(23)"], +) +def test_write_parquet_with_options_wrong_compression_level(df, tmp_path, compression): + path = tmp_path + + with pytest.raises(Exception, match=r"valid compression range .*? exceeded."): + df.write_parquet_with_options( + str(path), ParquetWriterOptions(compression=compression) + ) + + +@pytest.mark.parametrize("compression", ["wrong", "wrong(12)"]) +def test_write_parquet_with_options_invalid_compression(df, tmp_path, compression): + path = tmp_path + + with pytest.raises(Exception, match="Unknown or unsupported parquet compression"): + df.write_parquet_with_options( + str(path), ParquetWriterOptions(compression=compression) + ) + + +@pytest.mark.parametrize( + ("writer_version", "format_version"), + [("1.0", "1.0"), ("2.0", "2.6"), (None, "1.0")], +) +def test_write_parquet_with_options_writer_version( + df, tmp_path, writer_version, format_version +): + """Test the Parquet writer version. Note that writer_version=2.0 results in + format_version=2.6""" + if writer_version is None: + df.write_parquet_with_options(tmp_path, ParquetWriterOptions()) + else: + df.write_parquet_with_options( + tmp_path, ParquetWriterOptions(writer_version=writer_version) + ) + + for file in tmp_path.rglob("*.parquet"): + parquet = pq.ParquetFile(file) + metadata = parquet.metadata.to_dict() + assert metadata["format_version"] == format_version + + +@pytest.mark.parametrize("writer_version", ["1.2.3", "custom-version", "0"]) +def test_write_parquet_with_options_wrong_writer_version(df, tmp_path, writer_version): + """Test that invalid writer versions in Parquet throw an exception.""" + with pytest.raises(Exception, match="Invalid parquet writer version"): + df.write_parquet_with_options( + tmp_path, ParquetWriterOptions(writer_version=writer_version) + ) + + +@pytest.mark.parametrize("dictionary_enabled", [True, False, None]) +def test_write_parquet_with_options_dictionary_enabled( + df, tmp_path, dictionary_enabled +): + """Test enabling/disabling the dictionaries in Parquet.""" + df.write_parquet_with_options( + tmp_path, ParquetWriterOptions(dictionary_enabled=dictionary_enabled) + ) + # by default, the dictionary is enabled, so None results in True + result = dictionary_enabled if dictionary_enabled is not None else True + + for file in tmp_path.rglob("*.parquet"): + parquet = pq.ParquetFile(file) + metadata = parquet.metadata.to_dict() + + for row_group in metadata["row_groups"]: + for col in row_group["columns"]: + assert col["has_dictionary_page"] == result + + +@pytest.mark.parametrize( + ("statistics_enabled", "has_statistics"), + [("page", True), ("chunk", True), ("none", False), (None, True)], +) +def test_write_parquet_with_options_statistics_enabled( + df, tmp_path, statistics_enabled, has_statistics +): + """Test configuring the statistics in Parquet. In pyarrow we can only check for + column-level statistics, so "page" and "chunk" are tested in the same way.""" + df.write_parquet_with_options( + tmp_path, ParquetWriterOptions(statistics_enabled=statistics_enabled) + ) + + for file in tmp_path.rglob("*.parquet"): + parquet = pq.ParquetFile(file) + metadata = parquet.metadata.to_dict() + + for row_group in metadata["row_groups"]: + for col in row_group["columns"]: + if has_statistics: + assert col["statistics"] is not None + else: + assert col["statistics"] is None + + +@pytest.mark.parametrize("max_row_group_size", [1000, 5000, 10000, 100000]) +def test_write_parquet_with_options_max_row_group_size( + large_df, tmp_path, max_row_group_size +): + """Test configuring the max number of rows per group in Parquet. These test cases + guarantee that the number of rows for each row group is max_row_group_size, given + the total number of rows is a multiple of max_row_group_size.""" + path = f"{tmp_path}/t.parquet" + large_df.write_parquet_with_options( + path, ParquetWriterOptions(max_row_group_size=max_row_group_size) + ) + + parquet = pq.ParquetFile(path) + metadata = parquet.metadata.to_dict() + for row_group in metadata["row_groups"]: + assert row_group["num_rows"] == max_row_group_size + + +@pytest.mark.parametrize("created_by", ["datafusion", "datafusion-python", "custom"]) +def test_write_parquet_with_options_created_by(df, tmp_path, created_by): + """Test configuring the created by metadata in Parquet.""" + df.write_parquet_with_options(tmp_path, ParquetWriterOptions(created_by=created_by)) + + for file in tmp_path.rglob("*.parquet"): + parquet = pq.ParquetFile(file) + metadata = parquet.metadata.to_dict() + assert metadata["created_by"] == created_by + + +@pytest.mark.parametrize("statistics_truncate_length", [5, 25, 50]) +def test_write_parquet_with_options_statistics_truncate_length( + df, tmp_path, statistics_truncate_length +): + """Test configuring the truncate limit in Parquet's row-group-level statistics.""" + ctx = SessionContext() + data = { + "a": [ + "a_the_quick_brown_fox_jumps_over_the_lazy_dog", + "m_the_quick_brown_fox_jumps_over_the_lazy_dog", + "z_the_quick_brown_fox_jumps_over_the_lazy_dog", + ], + "b": ["a_smaller", "m_smaller", "z_smaller"], + } + df = ctx.from_arrow(pa.record_batch(data)) + df.write_parquet_with_options( + tmp_path, + ParquetWriterOptions(statistics_truncate_length=statistics_truncate_length), + ) + + for file in tmp_path.rglob("*.parquet"): + parquet = pq.ParquetFile(file) + metadata = parquet.metadata.to_dict() + + for row_group in metadata["row_groups"]: + for col in row_group["columns"]: + statistics = col["statistics"] + assert len(statistics["min"]) <= statistics_truncate_length + assert len(statistics["max"]) <= statistics_truncate_length + + +def test_write_parquet_with_options_default_encoding(tmp_path): + """Test that, by default, Parquet files are written with dictionary encoding. + Note that dictionary encoding is not used for boolean values, so it is not tested + here.""" + ctx = SessionContext() + data = { + "a": [1, 2, 3], + "b": ["1", "2", "3"], + "c": [1.01, 2.02, 3.03], + } + df = ctx.from_arrow(pa.record_batch(data)) + df.write_parquet_with_options(tmp_path, ParquetWriterOptions()) + + for file in tmp_path.rglob("*.parquet"): + parquet = pq.ParquetFile(file) + metadata = parquet.metadata.to_dict() + + for row_group in metadata["row_groups"]: + for col in row_group["columns"]: + assert col["encodings"] == ("PLAIN", "RLE", "RLE_DICTIONARY") + + +@pytest.mark.parametrize( + ("encoding", "data_types", "result"), + [ + ("plain", ["int", "float", "str", "bool"], ("PLAIN", "RLE")), + ("rle", ["bool"], ("RLE",)), + ("delta_binary_packed", ["int"], ("RLE", "DELTA_BINARY_PACKED")), + ("delta_length_byte_array", ["str"], ("RLE", "DELTA_LENGTH_BYTE_ARRAY")), + ("delta_byte_array", ["str"], ("RLE", "DELTA_BYTE_ARRAY")), + ("byte_stream_split", ["int", "float"], ("RLE", "BYTE_STREAM_SPLIT")), + ], +) +def test_write_parquet_with_options_encoding(tmp_path, encoding, data_types, result): + """Test different encodings in Parquet in their respective support column types.""" + ctx = SessionContext() + + data = {} + for data_type in data_types: + if data_type == "int": + data["int"] = [1, 2, 3] + elif data_type == "float": + data["float"] = [1.01, 2.02, 3.03] + elif data_type == "str": + data["str"] = ["a", "b", "c"] + elif data_type == "bool": + data["bool"] = [True, False, True] + + df = ctx.from_arrow(pa.record_batch(data)) + df.write_parquet_with_options( + tmp_path, ParquetWriterOptions(encoding=encoding, dictionary_enabled=False) + ) + + for file in tmp_path.rglob("*.parquet"): + parquet = pq.ParquetFile(file) + metadata = parquet.metadata.to_dict() + + for row_group in metadata["row_groups"]: + for col in row_group["columns"]: + assert col["encodings"] == result + + +@pytest.mark.parametrize("encoding", ["bit_packed"]) +def test_write_parquet_with_options_unsupported_encoding(df, tmp_path, encoding): + """Test that unsupported Parquet encodings do not work.""" + # BaseException is used since this throws a Rust panic: https://github.com/PyO3/pyo3/issues/3519 + with pytest.raises(BaseException, match="Encoding .*? is not supported"): + df.write_parquet_with_options(tmp_path, ParquetWriterOptions(encoding=encoding)) + + +@pytest.mark.parametrize("encoding", ["non_existent", "unknown", "plain123"]) +def test_write_parquet_with_options_invalid_encoding(df, tmp_path, encoding): + """Test that invalid Parquet encodings do not work.""" + with pytest.raises(Exception, match="Unknown or unsupported parquet encoding"): + df.write_parquet_with_options(tmp_path, ParquetWriterOptions(encoding=encoding)) + + +@pytest.mark.parametrize("encoding", ["plain_dictionary", "rle_dictionary"]) +def test_write_parquet_with_options_dictionary_encoding_fallback( + df, tmp_path, encoding +): + """Test that the dictionary encoding cannot be used as fallback in Parquet.""" + # BaseException is used since this throws a Rust panic: https://github.com/PyO3/pyo3/issues/3519 + with pytest.raises( + BaseException, match="Dictionary encoding can not be used as fallback encoding" + ): + df.write_parquet_with_options(tmp_path, ParquetWriterOptions(encoding=encoding)) + + +def test_write_parquet_with_options_bloom_filter(df, tmp_path): + """Test Parquet files with and without (default) bloom filters. Since pyarrow does + not expose any information about bloom filters, the easiest way to confirm that they + are actually written is to compare the file size.""" + path_no_bloom_filter = tmp_path / "1" + path_bloom_filter = tmp_path / "2" + + df.write_parquet_with_options(path_no_bloom_filter, ParquetWriterOptions()) + df.write_parquet_with_options( + path_bloom_filter, ParquetWriterOptions(bloom_filter_on_write=True) + ) + + size_no_bloom_filter = 0 + for file in path_no_bloom_filter.rglob("*.parquet"): + size_no_bloom_filter += Path(file).stat().st_size + + size_bloom_filter = 0 + for file in path_bloom_filter.rglob("*.parquet"): + size_bloom_filter += Path(file).stat().st_size + + assert size_no_bloom_filter < size_bloom_filter + + +def test_write_parquet_with_options_column_options(df, tmp_path): + """Test writing Parquet files with different options for each column, which replace + the global configs (when provided).""" + data = { + "a": [1, 2, 3], + "b": ["a", "b", "c"], + "c": [False, True, False], + "d": [1.01, 2.02, 3.03], + "e": [4, 5, 6], + } + + column_specific_options = { + "a": ParquetColumnOptions(statistics_enabled="none"), + "b": ParquetColumnOptions(encoding="plain", dictionary_enabled=False), + "c": ParquetColumnOptions( + compression="snappy", encoding="rle", dictionary_enabled=False + ), + "d": ParquetColumnOptions( + compression="zstd(6)", + encoding="byte_stream_split", + dictionary_enabled=False, + statistics_enabled="none", + ), + # column "e" will use the global configs + } + + results = { + "a": { + "statistics": False, + "compression": "brotli", + "encodings": ("PLAIN", "RLE", "RLE_DICTIONARY"), + }, + "b": { + "statistics": True, + "compression": "brotli", + "encodings": ("PLAIN", "RLE"), + }, + "c": { + "statistics": True, + "compression": "snappy", + "encodings": ("RLE",), + }, + "d": { + "statistics": False, + "compression": "zstd", + "encodings": ("RLE", "BYTE_STREAM_SPLIT"), + }, + "e": { + "statistics": True, + "compression": "brotli", + "encodings": ("PLAIN", "RLE", "RLE_DICTIONARY"), + }, + } + + ctx = SessionContext() + df = ctx.from_arrow(pa.record_batch(data)) + df.write_parquet_with_options( + tmp_path, + ParquetWriterOptions( + compression="brotli(8)", column_specific_options=column_specific_options + ), + ) + + for file in tmp_path.rglob("*.parquet"): + parquet = pq.ParquetFile(file) + metadata = parquet.metadata.to_dict() + + for row_group in metadata["row_groups"]: + for col in row_group["columns"]: + column_name = col["path_in_schema"] + result = results[column_name] + assert (col["statistics"] is not None) == result["statistics"] + assert col["compression"].lower() == result["compression"].lower() + assert col["encodings"] == result["encodings"] + + +def test_write_parquet_options(df, tmp_path): + options = ParquetWriterOptions(compression="gzip", compression_level=6) + df.write_parquet(str(tmp_path), options) + + result = pq.read_table(str(tmp_path)).to_pydict() + expected = df.to_pydict() + + assert result == expected + + +def test_write_parquet_options_error(df, tmp_path): + options = ParquetWriterOptions(compression="gzip", compression_level=6) + with pytest.raises(ValueError): + df.write_parquet(str(tmp_path), options, compression_level=1) + + +def test_write_table(ctx, df): + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3])], + names=["a"], + ) + + ctx.register_record_batches("t", [[batch]]) + + df = ctx.table("t").with_column("a", column("a") * literal(-1)) + + ctx.table("t").show() + + df.write_table("t") + result = ctx.table("t").sort(column("a")).collect()[0][0].to_pylist() + expected = [-3, -2, -1, 1, 2, 3] + + assert result == expected + + def test_dataframe_export(df) -> None: # Guarantees that we have the canonical implementation # reading our dataframe export @@ -1755,27 +3070,15 @@ def test_html_formatter_shared_styles(df, clean_formatter_state): # First, ensure we're using shared styles configure_formatter(use_shared_styles=True) - # Get HTML output for first table - should include styles html_first = df._repr_html_() - - # Verify styles are included in first render - assert "
" - f"{field.name}
" - f"
" - "" - "" - f"{formatted_value}" - f"" - f"
" - f"
{formatted_value}