diff --git a/.acceptance.goreleaser.yml b/.acceptance.goreleaser.yml
index f96f03d5..b3e10084 100644
--- a/.acceptance.goreleaser.yml
+++ b/.acceptance.goreleaser.yml
@@ -12,10 +12,15 @@ builds:
env:
- CC=x86_64-linux-gnu-gcc
- CXX=x86_64-linux-gnu-g++
+ - CGO_ENABLED=1
+ - GOFLAGS=
+ - CGO_LDFLAGS=
ldflags:
- -s -w -X main.version={{.Version}} -X main.date={{.Date}} -X main.commit={{.Commit}} -X main.builtBy=goreleaser
+ tags: []
+
archives:
- id: homebrew
format: tar.gz
diff --git a/.cursor/rules/general.mdc b/.cursor/rules/general.mdc
new file mode 100644
index 00000000..1e956cb2
--- /dev/null
+++ b/.cursor/rules/general.mdc
@@ -0,0 +1,20 @@
+---
+description:
+globs:
+alwaysApply: false
+---
+# general rules to always apply
+## confirmation/avoid too much initiative
+- DO not make any change I have not explicitly asked for
+- NEVER make any changes if I have only asked you a question but not explicitly asked you to make an action
+- Ask for confirmation before making ANY changes, with a summary of what you will do
+## format
+- Use lower case for sql always
+## general attitude
+- Use a neutral tone of voice and do not be too positive/enthusiastic.
+ - When I report a problem, do NOT say "perfect I see the problem" as that sounds like you know the solution
+ - When you have made a change do NOT say "now everything will be working" until you have confirmation that it does work
+ - Always look at my ideas and suggestions critically and look for flaws in my logic
+ -
+
+
\ No newline at end of file
diff --git a/.github/workflows/01-tailpipe-release.yaml b/.github/workflows/01-tailpipe-release.yaml
index 293b0df3..4345a0fd 100644
--- a/.github/workflows/01-tailpipe-release.yaml
+++ b/.github/workflows/01-tailpipe-release.yaml
@@ -5,12 +5,12 @@ on:
inputs:
environment:
type: choice
- description: 'Select Release Type'
+ description: "Select Release Type"
options:
- # to change the values in this option, we also need to update the condition test below in at least 3 location. Search for github.event.inputs.environment
- - Development (alpha)
- - Development (beta)
- - Final (RC and final release)
+ # to change the values in this option, we also need to update the condition test below in at least 3 location. Search for github.event.inputs.environment
+ - Development (alpha)
+ - Development (beta)
+ - Final (RC and final release)
required: true
version:
description: "Version (without 'v')"
@@ -67,7 +67,7 @@ jobs:
build_and_release:
name: Build and Release Tailpipe
needs: [ensure_branch_in_homebrew]
- runs-on: ubuntu-latest
+ runs-on: ubuntu-24.04-arm
steps:
- name: validate
if: github.ref == 'refs/heads/develop'
@@ -110,6 +110,20 @@ jobs:
token: ${{ secrets.GH_ACCESS_TOKEN }}
ref: main
+ - name: Set up Docker
+ uses: docker/setup-buildx-action@v3
+
+ - name: Install Docker (if needed)
+ run: |
+ if ! command -v docker &> /dev/null; then
+ sudo apt-get update
+ sudo apt-get install -y docker.io
+ fi
+
+ - name: Verify Docker installation
+ run: |
+ docker --version
+
- name: Calculate version
id: calculate_version
run: |
@@ -132,7 +146,7 @@ jobs:
# this is required, check golangci-lint-action docs
- uses: actions/setup-go@19bb51245e9c80abacb2e91cc42b33fa478b8639 # v4.2.1
with:
- go-version: '1.23'
+ go-version: "1.23"
cache: false # setup-go v4 caches by default, do not change this parameter, check golangci-lint-action doc: https://github.com/golangci/golangci-lint-action/pull/704
- name: Setup release environment
@@ -143,6 +157,8 @@ jobs:
- name: Release publish
run: |-
cd tailpipe
+ git config --global user.name "Tailpipe GitHub Actions Bot"
+ git config --global user.email noreply@github.com
make release
create_pr_in_homebrew:
@@ -223,3 +239,117 @@ jobs:
git add .
git commit -m "Versioning brew formulas"
git push origin $VERSION
+
+ update_homebrew_tap:
+ name: Update homebrew-tap formula
+ if: ${{ github.event.inputs.environment == 'Final (RC and final release)' }}
+ needs: update_pr_for_versioning
+ runs-on: ubuntu-latest
+ steps:
+ - name: Calculate version
+ id: calculate_version
+ run: |
+ echo "VERSION=v${{ github.event.inputs.version }}" >> $GITHUB_ENV
+
+ - name: Parse semver string
+ id: semver_parser
+ uses: booxmedialtd/ws-action-parse-semver@7784200024d6b3fc01253e617ec0168daf603de3 # v1.4.7
+ with:
+ input_string: ${{ github.event.inputs.version }}
+
+ - name: Checkout
+ if: steps.semver_parser.outputs.prerelease == ''
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ with:
+ repository: turbot/homebrew-tap
+ token: ${{ secrets.GH_ACCESS_TOKEN }}
+ ref: main
+
+ - name: Get pull request title
+ if: steps.semver_parser.outputs.prerelease == ''
+ id: pr_title
+ run: >-
+ echo "PR_TITLE=$(
+ gh pr view $VERSION --json title | jq .title | tr -d '"'
+ )" >> $GITHUB_OUTPUT
+
+ - name: Output
+ if: steps.semver_parser.outputs.prerelease == ''
+ run: |
+ echo ${{ steps.pr_title.outputs.PR_TITLE }}
+ echo ${{ env.VERSION }}
+
+ - name: Fail if PR title does not match with version
+ if: steps.semver_parser.outputs.prerelease == ''
+ run: |
+ if [[ "${{ steps.pr_title.outputs.PR_TITLE }}" == "Tailpipe ${{ env.VERSION }}" ]]; then
+ echo "Correct version"
+ else
+ echo "Incorrect version"
+ exit 1
+ fi
+
+ - name: Merge pull request to update brew formula
+ if: steps.semver_parser.outputs.prerelease == ''
+ run: |
+ git fetch --all
+ gh pr merge $VERSION --squash --delete-branch
+ git push origin --delete bump-brew
+
+ trigger_smoke_tests:
+ name: Trigger smoke tests
+ if: ${{ github.event.inputs.environment == 'Final (RC and final release)' }}
+ needs: [update_homebrew_tap]
+ runs-on: ubuntu-latest
+ steps:
+ - name: Calculate version
+ id: calculate_version
+ run: |
+ echo "VERSION=v${{ github.event.inputs.version }}" >> $GITHUB_ENV
+
+ - name: Parse semver string
+ id: semver_parser
+ uses: booxmedialtd/ws-action-parse-semver@3576f3a20a39f8752fe0d8195f5ed384090285dc # v1.3.0
+ with:
+ input_string: ${{ github.event.inputs.version }}
+
+ - name: Trigger smoke test workflow
+ if: steps.semver_parser.outputs.prerelease == ''
+ run: |
+ echo "Triggering smoke test workflow for version $VERSION..."
+ gh workflow run "12-test-post-release-linux-distros.yaml" \
+ --ref ${{ github.ref }} \
+ --field version=$VERSION \
+ --repo ${{ github.repository }}
+ env:
+ GH_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
+
+ - name: Get smoke test workflow run URL
+ if: steps.semver_parser.outputs.prerelease == ''
+ run: |
+ echo "Waiting for smoke test workflow to start..."
+ sleep 10
+
+ # Get the most recent run of the smoke test workflow
+ RUN_ID=$(gh run list \
+ --workflow="12-test-post-release-linux-distros.yaml" \
+ --repo ${{ github.repository }} \
+ --limit 1 \
+ --json databaseId \
+ --jq '.[0].databaseId')
+
+ if [ -n "$RUN_ID" ]; then
+ WORKFLOW_URL="https://github.com/${{ github.repository }}/actions/runs/$RUN_ID"
+ echo "✅ Smoke test workflow triggered successfully!"
+ echo "🔗 Monitor progress at: $WORKFLOW_URL"
+ echo ""
+ echo "Workflow details:"
+ echo " - Version: $VERSION"
+ echo " - Workflow: 12-test-post-release-linux-distros.yaml"
+ echo " - Run ID: $RUN_ID"
+ else
+ echo "⚠️ Could not retrieve workflow run ID. Check manually at:"
+ echo "https://github.com/${{ github.repository }}/actions/workflows/12-test-post-release-linux-distros.yaml"
+ fi
+ env:
+ GH_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
diff --git a/.github/workflows/10-test-lint.yaml b/.github/workflows/10-test-lint.yaml
index 821ef7fc..049ba71e 100644
--- a/.github/workflows/10-test-lint.yaml
+++ b/.github/workflows/10-test-lint.yaml
@@ -31,7 +31,6 @@ jobs:
with:
repository: turbot/tailpipe-plugin-sdk
path: tailpipe-plugin-sdk
- token: ${{ secrets.GH_ACCESS_TOKEN }}
ref: develop
- name: Checkout Tailpipe Core Plugin repository
@@ -39,7 +38,6 @@ jobs:
with:
repository: turbot/tailpipe-plugin-core
path: tailpipe-plugin-core
- token: ${{ secrets.GH_ACCESS_TOKEN }}
ref: main
# this is required, check golangci-lint-action docs
diff --git a/.github/workflows/11-test-acceptance.yaml b/.github/workflows/11-test-acceptance.yaml
index 6717c664..cbcbc901 100644
--- a/.github/workflows/11-test-acceptance.yaml
+++ b/.github/workflows/11-test-acceptance.yaml
@@ -16,7 +16,7 @@ env:
jobs:
goreleaser:
name: Build
- runs-on: ubuntu-latest
+ runs-on: ubuntu-24.04-arm
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
@@ -36,7 +36,6 @@ jobs:
with:
repository: turbot/tailpipe-plugin-sdk
path: tailpipe-plugin-sdk
- token: ${{ secrets.GH_ACCESS_TOKEN }}
ref: develop
- name: Checkout Tailpipe Core Plugin repository
@@ -44,7 +43,6 @@ jobs:
with:
repository: turbot/tailpipe-plugin-core
path: tailpipe-plugin-core
- token: ${{ secrets.GH_ACCESS_TOKEN }}
ref: main
# this is required, check golangci-lint-action docs
@@ -97,6 +95,8 @@ jobs:
- "partition_delete"
- "core_formats"
- "table_block"
+ - "config_precedence"
+ - "plugin"
runs-on: ${{ matrix.platform }}
steps:
- name: Checkout
diff --git a/.github/workflows/12-test-post-release-linux-distros.yaml b/.github/workflows/12-test-post-release-linux-distros.yaml
new file mode 100644
index 00000000..4e58287a
--- /dev/null
+++ b/.github/workflows/12-test-post-release-linux-distros.yaml
@@ -0,0 +1,226 @@
+name: "12 - Test: Linux Distros (Post-release)"
+
+on:
+ workflow_dispatch:
+ inputs:
+ version:
+ description: "Version to test (with 'v' prefix, e.g., v1.0.0)"
+ required: true
+ type: string
+
+env:
+ # Version from input
+ VERSION: ${{ github.event.inputs.version }}
+ # Disable update checks during smoke tests
+ TAILPIPE_UPDATE_CHECK: false
+ SLACK_WEBHOOK_URL: ${{ secrets.PIPELING_RELEASE_BOT_WEBHOOK_URL }}
+
+jobs:
+ smoke_test_ubuntu_24:
+ name: Smoke test (Ubuntu 24, x86_64)
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+
+ - name: Download Linux Release Artifact
+ run: |
+ mkdir -p ./artifacts
+ gh release download ${{ env.VERSION }} \
+ --pattern "tailpipe.linux.amd64.tar.gz" \
+ --dir ./artifacts \
+ --repo ${{ github.repository }}
+ # Rename to expected format
+ mv ./artifacts/tailpipe.linux.amd64.tar.gz ./artifacts/linux.tar.gz
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
+
+ - name: Pull Ubuntu latest Image
+ run: docker pull ubuntu:latest
+
+ - name: Create and Start Ubuntu latest Container
+ run: |
+ docker run -d --name ubuntu-24-test -v ${{ github.workspace }}/artifacts:/artifacts -v ${{ github.workspace }}/scripts:/scripts ubuntu:latest tail -f /dev/null
+
+ - name: Get runner/container info
+ run: |
+ docker exec ubuntu-24-test /scripts/linux_container_info.sh
+
+ - name: Install dependencies and setup environment
+ run: |
+ docker exec ubuntu-24-test /scripts/prepare_ubuntu_container.sh
+
+ - name: Run smoke tests
+ run: |
+ docker exec ubuntu-24-test /scripts/smoke_test.sh
+
+ - name: Stop and Remove Container
+ run: |
+ docker stop ubuntu-24-test
+ docker rm ubuntu-24-test
+
+ smoke_test_centos_9:
+ name: Smoke test (CentOS Stream 9, x86_64)
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+
+ - name: Download Linux Release Artifact
+ run: |
+ mkdir -p ./artifacts
+ gh release download ${{ env.VERSION }} \
+ --pattern "tailpipe.linux.amd64.tar.gz" \
+ --dir ./artifacts \
+ --repo ${{ github.repository }}
+ # Rename to expected format
+ mv ./artifacts/tailpipe.linux.amd64.tar.gz ./artifacts/linux.tar.gz
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
+
+ - name: Pull CentOS Stream 9 image
+ run: docker pull quay.io/centos/centos:stream9
+
+ - name: Create and Start CentOS stream9 Container
+ run: |
+ docker run -d --name centos-stream9-test -v ${{ github.workspace }}/artifacts:/artifacts -v ${{ github.workspace }}/scripts:/scripts quay.io/centos/centos:stream9 tail -f /dev/null
+
+ - name: Get runner/container info
+ run: |
+ docker exec centos-stream9-test /scripts/linux_container_info.sh
+
+ - name: Install dependencies and setup environment
+ run: |
+ docker exec centos-stream9-test /scripts/prepare_centos_container.sh
+
+ - name: Run smoke tests
+ run: |
+ docker exec centos-stream9-test /scripts/smoke_test.sh
+
+ - name: Stop and Remove Container
+ run: |
+ docker stop centos-stream9-test
+ docker rm centos-stream9-test
+
+ smoke_test_amazonlinux:
+ name: Smoke test (Amazon Linux 2023, x86_64)
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+
+ - name: Download Linux Release Artifact
+ run: |
+ mkdir -p ./artifacts
+ gh release download ${{ env.VERSION }} \
+ --pattern "tailpipe.linux.amd64.tar.gz" \
+ --dir ./artifacts \
+ --repo ${{ github.repository }}
+ # Rename to expected format
+ mv ./artifacts/tailpipe.linux.amd64.tar.gz ./artifacts/linux.tar.gz
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
+
+ - name: Pull Amazon Linux 2023 Image
+ run: docker pull amazonlinux:2023
+
+ - name: Create and Start Amazon Linux 2023 Container
+ run: |
+ docker run -d --name amazonlinux-2023-test -v ${{ github.workspace }}/artifacts:/artifacts -v ${{ github.workspace }}/scripts:/scripts amazonlinux:2023 tail -f /dev/null
+
+ - name: Get runner/container info
+ run: |
+ docker exec amazonlinux-2023-test /scripts/linux_container_info.sh
+
+ - name: Install dependencies and setup environment
+ run: |
+ docker exec amazonlinux-2023-test /scripts/prepare_amazonlinux_container.sh
+
+ - name: Run smoke tests
+ run: |
+ docker exec amazonlinux-2023-test /scripts/smoke_test.sh
+
+ - name: Stop and Remove Container
+ run: |
+ docker stop amazonlinux-2023-test
+ docker rm amazonlinux-2023-test
+
+ smoke_test_linux_arm64:
+ name: Smoke test (Ubuntu 24, ARM64)
+ runs-on: ubuntu-24.04-arm
+ steps:
+ - name: Checkout
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+
+ - name: Download Linux ARM64 Release Artifact
+ run: |
+ mkdir -p ./artifacts
+ gh release download ${{ env.VERSION }} \
+ --pattern "tailpipe.linux.arm64.tar.gz" \
+ --dir ./artifacts \
+ --repo ${{ github.repository }}
+ # Rename to expected format
+ mv ./artifacts/tailpipe.linux.arm64.tar.gz ./artifacts/linux.tar.gz
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Extract Linux Artifacts and Install Binary
+ run: |
+ sudo tar -xzf ./artifacts/linux.tar.gz -C /usr/local/bin
+ sudo chmod +x /usr/local/bin/tailpipe
+
+ - name: Install jq
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y jq
+
+ - name: Get runner/container info
+ run: |
+ uname -a
+ cat /etc/os-release
+
+ - name: Run smoke tests
+ run: |
+ chmod +x ${{ github.workspace }}/scripts/smoke_test.sh
+ ${{ github.workspace }}/scripts/smoke_test.sh
+
+ notify_completion:
+ name: Notify completion
+ runs-on: ubuntu-latest
+ needs:
+ [
+ smoke_test_ubuntu_24,
+ smoke_test_centos_9,
+ smoke_test_amazonlinux,
+ smoke_test_linux_arm64,
+ ]
+ if: always()
+ steps:
+ - name: Check results and notify
+ run: |
+ # Check if all jobs succeeded
+ UBUNTU_24_RESULT="${{ needs.smoke_test_ubuntu_24.result }}"
+ CENTOS_9_RESULT="${{ needs.smoke_test_centos_9.result }}"
+ AMAZONLINUX_RESULT="${{ needs.smoke_test_amazonlinux.result }}"
+ ARM64_RESULT="${{ needs.smoke_test_linux_arm64.result }}"
+
+ WORKFLOW_URL="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
+
+ if [ "$UBUNTU_24_RESULT" = "success" ] && [ "$CENTOS_9_RESULT" = "success" ] && [ "$AMAZONLINUX_RESULT" = "success" ] && [ "$ARM64_RESULT" = "success" ]; then
+ MESSAGE="✅ Tailpipe ${{ env.VERSION }} smoke tests passed!\n\n🔗 View details: $WORKFLOW_URL"
+ else
+ MESSAGE="❌ Tailpipe ${{ env.VERSION }} smoke tests failed!\n\n🔗 View details: $WORKFLOW_URL"
+ fi
+
+ curl -X POST -H 'Content-type: application/json' \
+ --data "{\"text\":\"$MESSAGE\"}" \
+ ${{ env.SLACK_WEBHOOK_URL }}
diff --git a/.github/workflows/30-stale.yaml b/.github/workflows/30-stale.yaml
index 2b4cbbb1..36e32042 100644
--- a/.github/workflows/30-stale.yaml
+++ b/.github/workflows/30-stale.yaml
@@ -18,22 +18,26 @@ jobs:
id: stale-issues-and-prs
uses: actions/stale@1160a2240286f5da8ec72b1c0816ce2481aabf84 # v8.0.0
with:
- close-issue-message: |
- This issue was closed because it has been stalled for 90 days with no activity.
- close-issue-reason: 'not_planned'
- close-pr-message: |
- This PR was closed because it has been stalled for 90 days with no activity.
- # Set days-before-close to 30 because we want to close the issue/PR after 90 days total, since days-before-stale is set to 60
- days-before-close: 30
+ # TODO: Add back the closing of stale issue part later on
+ # close-issue-message: |
+ # This issue was closed because it has been stalled for 90 days with no activity.
+ # close-issue-reason: "not_planned"
+ # close-pr-message: |
+ # This PR was closed because it has been stalled for 90 days with no activity.
+ # # Set days-before-close to 30 because we want to close the issue/PR after 90 days total, since days-before-stale is set to 60
+ # days-before-close: 30
+ days-before-close: -1
days-before-stale: 60
debug-only: ${{ inputs.dryRun }}
- exempt-issue-labels: 'good first issue,help wanted,blocker'
+ exempt-issue-labels: "good first issue,help wanted,blocker"
repo-token: ${{ secrets.GITHUB_TOKEN }}
- stale-issue-label: 'stale'
+ stale-issue-label: "stale"
stale-issue-message: |
- This issue is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 30 days.
- stale-pr-label: 'stale'
+ This issue is stale because it has been open 60 days with no activity.
+ # This issue is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 30 days.
+ stale-pr-label: "stale"
stale-pr-message: |
- This PR is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 30 days.
+ This PR is stale because it has been open 60 days with no activity.
+ # This PR is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 30 days.
start-date: "2021-02-09"
operations-per-run: 1000
diff --git a/.github/workflows/31-add-issues-to-pipeling-issue-tracker.yaml b/.github/workflows/31-add-issues-to-pipeling-issue-tracker.yaml
new file mode 100644
index 00000000..594defea
--- /dev/null
+++ b/.github/workflows/31-add-issues-to-pipeling-issue-tracker.yaml
@@ -0,0 +1,13 @@
+name: Assign Issue to Project
+
+on:
+ issues:
+ types: [opened]
+
+jobs:
+ add-to-project:
+ uses: turbot/steampipe-workflows/.github/workflows/assign-issue-to-pipeling-issue-tracker.yml@main
+ with:
+ issue_number: ${{ github.event.issue.number }}
+ repository: ${{ github.repository }}
+ secrets: inherit
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 6f1db318..e16ec82a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,7 +4,8 @@
*.dll
*.so
*.dylib
-
+/test_apps/
+/memtest
# Editor cache and lock files
*.swp
*.swo
@@ -28,4 +29,7 @@
go.work
# Dist directory is created by goreleaser
-/dist
\ No newline at end of file
+/dist
+
+# Sysroot directory is created by make build-sysroot
+/sysroot
\ No newline at end of file
diff --git a/.golangci.yml b/.golangci.yml
index b6748a19..f42ba77e 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -70,3 +70,4 @@ run:
issues:
exclude-dirs:
- "tests/acceptance"
+ - "test_apps"
diff --git a/.goreleaser.yml b/.goreleaser.yml
index d8e8a667..f8834287 100644
--- a/.goreleaser.yml
+++ b/.goreleaser.yml
@@ -1,40 +1,50 @@
version: 2
builds:
- - id: tailpipe-linux-arm64
+ - id: tailpipe-linux-amd64
binary: tailpipe
goos:
- linux
goarch:
- - arm64
+ - amd64
env:
- - CC=aarch64-linux-gnu-gcc
- - CXX=aarch64-linux-gnu-g++
+ - CC=x86_64-linux-gnu-gcc
+ - CXX=x86_64-linux-gnu-g++
+ - CGO_ENABLED=1
+ - GOFLAGS=
+ - CGO_LDFLAGS=
# Custom ldflags.
#
# Default: '-s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.Date}} -X main.builtBy=goreleaser'
# Templates: allowed
ldflags:
- # Go Releaser analyzes your Git repository and identifies the most recent Git tag (typically the highest version number) as the version for your release.
+ # Goreleaser analyzes your Git repository and identifies the most recent Git tag (typically the highest version number) as the version for your release.
# This is how it determines the value of {{.Version}}.
- -s -w -X main.version={{.Version}} -X main.date={{.Date}} -X main.commit={{.Commit}} -X main.builtBy=goreleaser
- - id: tailpipe-linux-amd64
+ tags: []
+
+ - id: tailpipe-linux-arm64
binary: tailpipe
goos:
- linux
goarch:
- - amd64
+ - arm64
env:
- - CC=x86_64-linux-gnu-gcc
- - CXX=x86_64-linux-gnu-g++
+ - CC=gcc
+ - CXX=g++
+ - CGO_ENABLED=1
+ - GOFLAGS=
+ - CGO_LDFLAGS=
ldflags:
- -s -w -X main.version={{.Version}} -X main.date={{.Date}} -X main.commit={{.Commit}} -X main.builtBy=goreleaser
+ tags: []
+
- id: tailpipe-darwin-arm64
binary: tailpipe
goos:
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a6048822..3508990a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,66 @@
+## v0.7.1 [2025-10-07]
+_Bug Fixes_
+- Build: Restored CentOS/RHEL 9 compatibility by pinning the build image to an older libstdc++/GCC baseline. Previous build linked against newer GLIBCXX symbols, causing Tailpipe to fail on CentOS/RHEL 9.
+
+## v0.7.0 [2025-09-22]
+
+### _Major Changes_
+* Replace native Parquet conversion with a **DuckLake database backend**. ([#546](https://github.com/turbot/tailpipe/issues/546))
+ - DuckLake is DuckDB’s new lakehouse format: data remains in Parquet files, but metadata is efficiently tracked in a
+ separate DuckDB database.
+ - DuckLake supports function-based partitioning, which allows data to be partitioned by year and month. This enables
+ efficient file pruning on `tp_timestamp` without needing a separate `tp_date` filter. A `tp_date` column will still
+ be present for compatibility, but it is no longer required for efficient query filtering.
+ - Existing data will be **automatically migrated** the next time Tailpipe runs. Migration does **not**
+ occur if progress output is disabled (`--progress=false`) or when using machine-readable output (`json`, `line`,
+ `csv`).
+
+ **Note:** For CentOS/RHEL users, the minimum supported version is now **CentOS Stream 10 / RHEL 10** due to `libstdc++` library compatibility.
+
+* The `connect` command now returns the path to an **initialisation SQL script** instead of the database path. ([#550](https://github.com/turbot/tailpipe/issues/550))
+ - The script sets up DuckDB with required extensions, attaches the Tailpipe database, and defines views with optional
+ filters.
+ - You can pass the generated script to DuckDB using the `--init` argument to immediately configure the session. For
+ example:
+ ```sh
+ duckdb --init $(tailpipe connect)
+ ```
+ **Note:** The minimum supported DuckDB version is 1.4.0.
+
+### _Bug Fixes_
+* Include partitions for local plugins in the `tailpipe plugin list` command. ([#538](https://github.com/turbot/tailpipe/issues/538))
+
+
+## v0.6.2 [2025-07-24]
+_Bug fixes_
+* Fix issue where `--to` was not respected for zero granularity data. ([#483](https://github.com/turbot/tailpipe/issues/483))
+* Fix issue where the relative time passed to `from/to` args were getting parsed incorrectly. ([#485](https://github.com/turbot/tailpipe/issues/485))
+* Fix issue where Tailpipe was crashing if the collection state file had nil trunk states from the previous collection. ([#489](https://github.com/turbot/tailpipe/issues/489))
+* Fix `.inspect` output to show the plugin name for custom tables. ([#360](https://github.com/turbot/tailpipe/issues/360))
+* Fix query JSON outputs to be consistent with DuckDB. ([#432](https://github.com/turbot/tailpipe/issues/432))
+
+_Dependencies_
+* Upgrade `go-viper/mapstructure/v2` and `oauth2` packages to remediate high and moderate vulnerabilities.
+
+## v0.6.1 [2025-07-02]
+_Bug fixes_
+* Update core version to v0.2.9 - fix issue where collection state is not being saved for zero granularity collection. ([#251](https://github.com/turbot/tailpipe-plugin-sdk/issues/251))
+
+## v0.6.0 [2025-07-02]
+_What's new_
+* Add `--to` flag for `collect`, allowing collection of standalone time ranges. ([#238](https://github.com/turbot/tailpipe/issues/238))
+* Add `--overwrite` flag for `collect`, allowing recollection of existing data. ([#454](https://github.com/turbot/tailpipe/issues/454))
+
+_Bug fixes_
+* Fix issue where collection state end-objects are cleared when collection is complete,
+meaning no further data will be collected for that day. ([#250](https://github.com/turbot/tailpipe-plugin-sdk/issues/250))
+
+_Behaviour Change_
+
+When passing a `from` time to a collection, the existing partition data is no longer cleared before the collection starts.
+This means that data will not by default be recollected for time ranges that have already been collected.
+To recollect data for a time range, pass the new `--overwrite` flag to the `collect` command.
+
## v0.5.0 [2025-06-20]
_What's new_
* Added `tp_index` property to partition HCL. Use this to specify the source column for the `tp_index`. ([#414](https://github.com/turbot/tailpipe/issues/414))
diff --git a/Makefile b/Makefile
index be9846cb..74f25773 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
OUTPUT_DIR?=/usr/local/bin
PACKAGE_NAME := github.com/turbot/tailpipe
-GOLANG_CROSS_VERSION ?= v1.23.2
+GOLANG_CROSS_VERSION ?= gcc13-osxcross-20251006102018
# sed 's/[\/_]/-/g': Replaces both slashes (/) and underscores (_) with hyphens (-).
# sed 's/[^a-zA-Z0-9.-]//g': Removes any character that isn’t alphanumeric, a dot (.), or a hyphen (-).
@@ -23,13 +23,14 @@ release-dry-run:
-v `pwd`/../tailpipe-plugin-sdk:/go/src/tailpipe-plugin-sdk \
-v `pwd`/../tailpipe-plugin-core:/go/src/tailpipe-plugin-core \
-w /go/src/tailpipe \
- ghcr.io/goreleaser/goreleaser-cross:${GOLANG_CROSS_VERSION} \
+ ghcr.io/turbot/goreleaser-cross:${GOLANG_CROSS_VERSION} \
--clean --skip=validate --skip=publish --snapshot
.PHONY: release-acceptance
release-acceptance:
@docker run \
--rm \
+ --platform=linux/arm64 \
-e CGO_ENABLED=1 \
-v /var/run/docker.sock:/var/run/docker.sock \
-v `pwd`:/go/src/tailpipe \
@@ -37,7 +38,7 @@ release-acceptance:
-v `pwd`/../tailpipe-plugin-sdk:/go/src/tailpipe-plugin-sdk \
-v `pwd`/../tailpipe-plugin-core:/go/src/tailpipe-plugin-core \
-w /go/src/tailpipe \
- ghcr.io/goreleaser/goreleaser-cross:${GOLANG_CROSS_VERSION} \
+ ghcr.io/turbot/goreleaser-cross:${GOLANG_CROSS_VERSION} \
--clean --skip=validate --skip=publish --snapshot --config=.acceptance.goreleaser.yml
.PHONY: release
@@ -48,6 +49,7 @@ release:
fi
docker run \
--rm \
+ --platform=linux/arm64 \
-e CGO_ENABLED=1 \
--env-file .release-env \
-v /var/run/docker.sock:/var/run/docker.sock \
@@ -56,5 +58,5 @@ release:
-v `pwd`/../tailpipe-plugin-sdk:/go/src/tailpipe-plugin-sdk \
-v `pwd`/../tailpipe-plugin-core:/go/src/tailpipe-plugin-core \
-w /go/src/tailpipe \
- ghcr.io/goreleaser/goreleaser-cross:${GOLANG_CROSS_VERSION} \
- release --clean --skip=validate
+ ghcr.io/turbot/goreleaser-cross:${GOLANG_CROSS_VERSION} \
+ release --clean --skip=validate
\ No newline at end of file
diff --git a/README.md b/README.md
index 3daa5a14..e8ae6611 100644
--- a/README.md
+++ b/README.md
@@ -1,8 +1,8 @@
-[](https://hub.tailpipe-io.vercel.app/)
-[](https://hub.tailpipe-io.vercel.app/)
-[](https://turbot.com/community/join?utm_id=gspreadme&utm_source=github&utm_medium=repo&utm_campaign=github&utm_content=readme)
+[](https://hub.tailpipe.io/)
+[](https://hub.tailpipe.io/)
+[](https://turbot.com/community/join?utm_id=gspreadme&utm_source=github&utm_medium=repo&utm_campaign=github&utm_content=readme)
[](https://turbot.com?utm_id=gspreadme&utm_source=github&utm_medium=repo&utm_campaign=github&utm_content=readme)
# select * from logs;
diff --git a/cmd/collect.go b/cmd/collect.go
index 590396d0..80f228af 100644
--- a/cmd/collect.go
+++ b/cmd/collect.go
@@ -5,21 +5,25 @@ import (
"errors"
"fmt"
"log/slog"
+ "os"
+ "strconv"
"strings"
"time"
- "github.com/danwakefield/fnmatch"
+ "github.com/hashicorp/hcl/v2"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/turbot/go-kit/helpers"
"github.com/turbot/pipe-fittings/v2/cmdconfig"
pconstants "github.com/turbot/pipe-fittings/v2/constants"
- "github.com/turbot/pipe-fittings/v2/contexthelpers"
- "github.com/turbot/pipe-fittings/v2/error_helpers"
+ "github.com/turbot/pipe-fittings/v2/modconfig"
"github.com/turbot/pipe-fittings/v2/parse"
+ localcmdconfig "github.com/turbot/tailpipe/internal/cmdconfig"
"github.com/turbot/tailpipe/internal/collector"
"github.com/turbot/tailpipe/internal/config"
- "github.com/turbot/tailpipe/internal/parquet"
+ "github.com/turbot/tailpipe/internal/constants"
+ "github.com/turbot/tailpipe/internal/database"
+ error_helpers "github.com/turbot/tailpipe/internal/error_helpers"
"github.com/turbot/tailpipe/internal/plugin"
"golang.org/x/exp/maps"
)
@@ -48,14 +52,17 @@ Every time you run tailpipe collect, Tailpipe refreshes its views over all colle
cmdconfig.OnCmd(cmd).
AddBoolFlag(pconstants.ArgCompact, true, "Compact the parquet files after collection").
AddStringFlag(pconstants.ArgFrom, "", "Collect days newer than a relative or absolute date (collection defaulting to 7 days if not specified)").
- AddBoolFlag(pconstants.ArgProgress, true, "Show active progress of collection, set to false to disable")
+ AddStringFlag(pconstants.ArgTo, "", "Collect days older than a relative or absolute date (defaulting to now if not specified)").
+ AddBoolFlag(pconstants.ArgProgress, true, "Show active progress of collection, set to false to disable").
+ AddBoolFlag(pconstants.ArgOverwrite, false, "Recollect data from the source even if it has already been collected")
return cmd
}
func runCollectCmd(cmd *cobra.Command, args []string) {
- ctx, cancel := context.WithCancel(cmd.Context())
- contexthelpers.StartCancelHandler(cancel)
+ // use the signal-aware/cancelable context created upstream in preRunHook
+ ctx := cmd.Context()
+ ctx, cancel := context.WithCancel(ctx) //nolint:govet // cancel is needed for the doCollect func
var err error
defer func() {
@@ -64,30 +71,49 @@ func runCollectCmd(cmd *cobra.Command, args []string) {
}
if err != nil {
- error_helpers.ShowError(ctx, err)
+ if error_helpers.IsCancelledError(err) {
+ fmt.Println("tailpipe collect command cancelled.") //nolint:forbidigo // ui output
+ } else {
+ error_helpers.ShowError(ctx, err)
+ }
setExitCodeForCollectError(err)
}
}()
- err = doCollect(ctx, cancel, args)
- if errors.Is(err, context.Canceled) {
- // clear error so we don't show it with normal error reporting
- err = nil
- fmt.Println("Collection cancelled.") //nolint:forbidigo // ui output
+ // if diagnostic mode is set, print out config and return
+ if _, ok := os.LookupEnv(constants.EnvConfigDump); ok {
+ localcmdconfig.DisplayConfig()
+ return //nolint:govet // this is explicitly used in tests
}
+
+ err = doCollect(ctx, cancel, args)
+
}
func doCollect(ctx context.Context, cancel context.CancelFunc, args []string) error {
// arg `from` accepts ISO 8601 date(2024-01-01), ISO 8601 datetime(2006-01-02T15:04:05), ISO 8601 datetime with ms(2006-01-02T15:04:05.000),
// RFC 3339 datetime with timezone(2006-01-02T15:04:05Z07:00) and relative time formats(T-2Y, T-10m, T-10W, T-180d, T-9H, T-10M)
var fromTime time.Time
+ // toTime defaults to now, but can be set to a specific time
+ toTime := time.Now()
+ var err error
if viper.IsSet(pconstants.ArgFrom) {
- var err error
- fromTime, err = parseFromTime(viper.GetString(pconstants.ArgFrom))
+ fromTime, err = parseFromToTime(viper.GetString(pconstants.ArgFrom))
if err != nil {
return err
}
}
+ if viper.IsSet(pconstants.ArgTo) {
+ toTime, err = parseFromToTime(viper.GetString(pconstants.ArgTo))
+ if err != nil {
+ return err
+ }
+ }
+ // validate from and to times
+ if err = validateCollectionTimeRange(fromTime, toTime); err != nil {
+ return err
+ }
+
partitions, err := getPartitions(args)
if err != nil {
return fmt.Errorf("failed to get partition config: %w", err)
@@ -97,7 +123,14 @@ func doCollect(ctx context.Context, cancel context.CancelFunc, args []string) er
for _, partition := range partitions {
partitionNames = append(partitionNames, partition.FullName)
}
- slog.Info("Starting collection", "partition(s)", partitionNames, "from", fromTime)
+ slog.Info("Starting collection", "partition(s)", partitionNames, "from", fromTime, "to", toTime)
+
+ // Create backup of metadata database before starting collection
+ if err := database.BackupDucklakeMetadata(); err != nil {
+ slog.Warn("Failed to backup metadata database", "error", err)
+ // Continue with collection - backup failure shouldn't block the operation
+ }
+
// now we have the partitions, we can start collecting
// start the plugin manager
@@ -106,20 +139,10 @@ func doCollect(ctx context.Context, cancel context.CancelFunc, args []string) er
// collect each partition serially
var errList []error
+
for _, partition := range partitions {
- // if a from time is set, clear the partition data from that time forward
- if !fromTime.IsZero() {
- slog.Info("Deleting parquet files after the from time", "partition", partition.Name, "from", fromTime)
- _, err := parquet.DeleteParquetFiles(partition, fromTime)
- if err != nil {
- slog.Warn("Failed to delete parquet files after the from time", "partition", partition.Name, "from", fromTime, "error", err)
- errList = append(errList, err)
- continue
- }
- slog.Info("Completed deleting parquet files after the from time", "partition", partition.Name, "from", fromTime)
- }
// do the collection
- err = collectPartition(ctx, cancel, partition, fromTime, pluginManager)
+ err = collectPartition(ctx, cancel, partition, fromTime, toTime, pluginManager)
if err != nil {
errList = append(errList, err)
}
@@ -133,14 +156,27 @@ func doCollect(ctx context.Context, cancel context.CancelFunc, args []string) er
return nil
}
-func collectPartition(ctx context.Context, cancel context.CancelFunc, partition *config.Partition, fromTime time.Time, pluginManager *plugin.PluginManager) (err error) {
+func validateCollectionTimeRange(fromTime time.Time, toTime time.Time) error {
+ if !fromTime.IsZero() && !toTime.IsZero() && fromTime.After(toTime) {
+ return fmt.Errorf("invalid time range: 'from' time %s is after 'to' time %s", fromTime.Format(time.DateOnly), toTime.Format(time.DateOnly))
+ }
+ if toTime.After(time.Now()) {
+ return fmt.Errorf("invalid time range: 'to' time %s is in the future", toTime.Format(time.DateOnly))
+ }
+ return nil
+}
+
+func collectPartition(ctx context.Context, cancel context.CancelFunc, partition *config.Partition, fromTime time.Time, toTime time.Time, pluginManager *plugin.PluginManager) (err error) {
+ t := time.Now()
c, err := collector.New(pluginManager, partition, cancel)
if err != nil {
return fmt.Errorf("failed to create collector: %w", err)
}
defer c.Close()
- if err = c.Collect(ctx, fromTime); err != nil {
+ overwrite := viper.GetBool(pconstants.ArgOverwrite)
+
+ if err = c.Collect(ctx, fromTime, toTime, overwrite); err != nil {
return err
}
@@ -151,13 +187,14 @@ func collectPartition(ctx context.Context, cancel context.CancelFunc, partition
return err
}
- slog.Info("Collection complete", "partition", partition.Name)
+ slog.Info("Collection complete", "partition", partition.Name, "duration", time.Since(t).Seconds())
// compact the parquet files
if viper.GetBool(pconstants.ArgCompact) {
err = c.Compact(ctx)
if err != nil {
return err
}
+
}
// update status to show complete and display collection summary
@@ -166,6 +203,7 @@ func collectPartition(ctx context.Context, cancel context.CancelFunc, partition
return nil
}
+// getPartitions resolves the provided args to a list of partitions.
func getPartitions(args []string) ([]*config.Partition, error) {
// we have loaded tailpipe config by this time
tailpipeConfig := config.GlobalConfig
@@ -179,7 +217,12 @@ func getPartitions(args []string) ([]*config.Partition, error) {
var partitions []*config.Partition
for _, arg := range args {
- partitionNames, err := getPartitionsForArg(maps.Keys(tailpipeConfig.Partitions), arg)
+ if syntheticPartition, ok := getSyntheticPartition(arg); ok {
+ partitions = append(partitions, syntheticPartition)
+ continue
+ }
+
+ partitionNames, err := database.GetPartitionsForArg(tailpipeConfig.Partitions, arg)
if err != nil {
errorList = append(errorList, err)
} else if len(partitionNames) == 0 {
@@ -192,73 +235,135 @@ func getPartitions(args []string) ([]*config.Partition, error) {
}
if len(errorList) > 0 {
- // TODO #errors better formating/error message https://github.com/turbot/tailpipe/issues/106
- return nil, errors.Join(errorList...)
+ // Return a well-formatted multi-error with a count and indented bullet list
+ return nil, formatErrorsWithCount(errorList)
}
return partitions, nil
}
-func getPartitionsForArg(partitions []string, arg string) ([]string, error) {
- tablePattern, partitionPattern, err := getPartitionMatchPatternsForArg(partitions, arg)
- if err != nil {
- return nil, err
+// formatErrorsWithCount returns an error summarizing a list of errors with a count and indented lines
+func formatErrorsWithCount(errs []error) error {
+ if len(errs) == 0 {
+ return nil
}
- // now match the partition
- var res []string
- for _, partition := range partitions {
- pattern := tablePattern + "." + partitionPattern
- if fnmatch.Match(pattern, partition, fnmatch.FNM_CASEFOLD) {
- res = append(res, partition)
- }
+ if len(errs) == 1 {
+ return errs[0]
}
- return res, nil
-}
-func getPartitionMatchPatternsForArg(partitions []string, arg string) (string, string, error) {
- var tablePattern, partitionPattern string
- parts := strings.Split(arg, ".")
- switch len(parts) {
- case 1:
- var err error
- tablePattern, partitionPattern, err = getPartitionMatchPatternsForSinglePartName(partitions, arg)
- if err != nil {
- return "", "", err
+ var b strings.Builder
+ b.WriteString(fmt.Sprintf("%d errors:\n", len(errs)))
+ for i, e := range errs {
+ b.WriteString(fmt.Sprintf(" %s", e.Error()))
+ if i < len(errs)-1 {
+ b.WriteString("\n")
}
- case 2:
- // use the args as provided
- tablePattern = parts[0]
- partitionPattern = parts[1]
- default:
- return "", "", fmt.Errorf("invalid partition name: %s", arg)
- }
- return tablePattern, partitionPattern, nil
+ }
+ return errors.New(b.String())
}
-// getPartitionMatchPatternsForSinglePartName returns the table and partition patterns for a single part name
-// e.g. if the arg is "aws*"
-func getPartitionMatchPatternsForSinglePartName(partitions []string, arg string) (string, string, error) {
- var tablePattern, partitionPattern string
- // '*' is not valid for a single part arg
- if arg == "*" {
- return "", "", fmt.Errorf("invalid partition name: %s", arg)
+// getSyntheticPartition parses a synthetic partition specification string and creates a test partition configuration.
+// This function enables testing and performance benchmarking by generating dummy data instead of collecting from real sources.
+//
+// Synthetic partition format: synthetic_cols_rows_chunk_ms
+// Example: "synthetic_50cols_2000000rows_10000chunk_100ms"
+// - 50cols: Number of columns to generate in the synthetic table
+// - 2000000rows: Total number of rows to generate
+// - 10000chunk: Number of rows per chunk (affects memory usage and processing)
+// - 100ms: Delivery interval between chunks (simulates real-time data collection)
+//
+// The function validates the format and numeric values, returning a properly configured Partition
+// with SyntheticMetadata that will be used by the collector to generate test data.
+//
+// Returns:
+// - *config.Partition: The configured synthetic partition if parsing succeeds
+// - bool: true if the argument was a valid synthetic partition, false otherwise
+func getSyntheticPartition(arg string) (*config.Partition, bool) {
+ // Check if this is a synthetic partition by looking for the "synthetic_" prefix
+ if !strings.HasPrefix(arg, "synthetic_") {
+ return nil, false
}
- // check whether there is table with this name
- // partitions is a list of Unqualified names, i.e. .
- for _, partition := range partitions {
- table := strings.Split(partition, ".")[0]
- // if the arg matches a table name, set table pattern to the arg and partition pattern to *
- if fnmatch.Match(arg, table, fnmatch.FNM_CASEFOLD) {
- tablePattern = arg
- partitionPattern = "*"
- return tablePattern, partitionPattern, nil
- }
+ // Parse the synthetic partition parameters by splitting on underscores
+ // Expected format: synthetic_cols_rows_chunk_ms
+ parts := strings.Split(arg, "_")
+ if len(parts) != 5 {
+ // Invalid format - synthetic partitions must have exactly 5 parts
+ slog.Debug("Synthetic partition parsing failed: invalid format", "arg", arg, "parts", len(parts), "expected", 5)
+ return nil, false
}
- // so there IS NOT a table with this name - set table pattern to * and user provided partition name
- tablePattern = "*"
- partitionPattern = arg
- return tablePattern, partitionPattern, nil
+
+ // Extract and parse the numeric values from each part
+ // Remove the suffix to get just the numeric value
+ colsStr := strings.TrimSuffix(parts[1], "cols")
+ rowsStr := strings.TrimSuffix(parts[2], "rows")
+ chunkStr := strings.TrimSuffix(parts[3], "chunk")
+ intervalStr := strings.TrimSuffix(parts[4], "ms")
+
+ // Parse columns count - determines how many columns the synthetic table will have
+ cols, err := strconv.Atoi(colsStr)
+ if err != nil {
+ // Invalid columns value, not a synthetic partition
+ slog.Debug("Synthetic partition parsing failed: invalid columns value", "arg", arg, "colsStr", colsStr, "error", err)
+ return nil, false
+ }
+
+ // Parse rows count - total number of rows to generate
+ rows, err := strconv.Atoi(rowsStr)
+ if err != nil {
+ // Invalid rows value, not a synthetic partition
+ slog.Debug("Synthetic partition parsing failed: invalid rows value", "arg", arg, "rowsStr", rowsStr, "error", err)
+ return nil, false
+ }
+
+ // Parse chunk size - number of rows per chunk (affects memory usage and processing efficiency)
+ chunk, err := strconv.Atoi(chunkStr)
+ if err != nil {
+ // Invalid chunk value, not a synthetic partition
+ slog.Debug("Synthetic partition parsing failed: invalid chunk value", "arg", arg, "chunkStr", chunkStr, "error", err)
+ return nil, false
+ }
+
+ // Parse delivery interval - milliseconds between chunk deliveries (simulates real-time data flow)
+ interval, err := strconv.Atoi(intervalStr)
+ if err != nil {
+ // Invalid interval value, not a synthetic partition
+ slog.Debug("Synthetic partition parsing failed: invalid interval value", "arg", arg, "intervalStr", intervalStr, "error", err)
+ return nil, false
+ }
+
+ // Validate the parsed values - all must be positive integers
+ if cols <= 0 || rows <= 0 || chunk <= 0 || interval <= 0 {
+ // Invalid values, not a synthetic partition
+ slog.Debug("Synthetic partition parsing failed: invalid values", "arg", arg, "cols", cols, "rows", rows, "chunk", chunk, "interval", interval)
+ return nil, false
+ }
+
+ // Create a synthetic partition with proper HCL block structure
+ // This mimics the structure that would be created from a real HCL configuration file
+ block := &hcl.Block{
+ Type: "partition",
+ Labels: []string{"synthetic", arg},
+ }
+
+ // Create the partition configuration with synthetic metadata
+ partition := &config.Partition{
+ HclResourceImpl: modconfig.NewHclResourceImpl(block, fmt.Sprintf("partition.synthetic.%s", arg)),
+ TableName: "synthetic", // All synthetic partitions use the "synthetic" table name
+ TpIndexColumn: "'default'", // Use a default index column for synthetic data
+ SyntheticMetadata: &config.SyntheticMetadata{
+ Columns: cols, // Number of columns to generate
+ Rows: rows, // Total number of rows to generate
+ ChunkSize: chunk, // Rows per chunk
+ DeliveryIntervalMs: interval, // Milliseconds between chunk deliveries
+ },
+ }
+
+ // Set the unqualified name for the partition (used in logging and identification)
+ partition.UnqualifiedName = fmt.Sprintf("%s.%s", partition.TableName, partition.ShortName)
+
+ slog.Debug("Synthetic partition parsed successfully", "arg", arg, "columns", cols, "rows", rows, "chunkSize", chunk, "deliveryIntervalMs", interval)
+ return partition, true
}
func setExitCodeForCollectError(err error) {
@@ -266,21 +371,25 @@ func setExitCodeForCollectError(err error) {
if exitCode != 0 || err == nil {
return
}
+ // set exit code for cancellation
+ if error_helpers.IsCancelledError(err) {
+ exitCode = pconstants.ExitCodeOperationCancelled
+ return
+ }
- // TODO #errors - assign exit codes https://github.com/turbot/tailpipe/issues/106
- exitCode = 1
+ exitCode = pconstants.ExitCodeCollectionFailed
}
// parse the from time
-func parseFromTime(fromArg string) (time.Time, error) {
+func parseFromToTime(arg string) (time.Time, error) {
now := time.Now()
// validate the granularity
granularity := time.Hour * 24
- fromTime, err := parse.ParseTime(fromArg, now)
+ fromTime, err := parse.ParseTime(arg, now)
if err != nil {
- return time.Time{}, fmt.Errorf("failed to parse 'from' argument: %w", err)
+ return time.Time{}, fmt.Errorf("failed to parse '%s' argument: %w", arg, err)
}
return fromTime.Truncate(granularity), nil
diff --git a/cmd/collect_test.go b/cmd/collect_test.go
index 725c27dc..73fc0c99 100644
--- a/cmd/collect_test.go
+++ b/cmd/collect_test.go
@@ -1,254 +1,140 @@
package cmd
import (
- "reflect"
"testing"
+
+ "github.com/turbot/tailpipe/internal/config"
)
-func Test_getPartition(t *testing.T) {
- type args struct {
- partitions []string
- name string
- }
+func Test_getSyntheticPartition(t *testing.T) {
tests := []struct {
- name string
- args args
- want []string
- wantErr bool
+ name string
+ arg string
+ wantPart *config.Partition
+ wantOk bool
}{
{
- name: "Invalid partition name",
- args: args{
- partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
- name: "*",
- },
- wantErr: true,
- },
- {
- name: "Full partition name, exists",
- args: args{
- partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
- name: "aws_s3_cloudtrail_log.p1",
- },
- want: []string{"aws_s3_cloudtrail_log.p1"},
- },
- {
- name: "Full partition name, does not exist",
- args: args{
- partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
- name: "aws_s3_cloudtrail_log.p3",
+ name: "Valid synthetic partition",
+ arg: "synthetic_50cols_2000000rows_10000chunk_100ms",
+ wantOk: true,
+ wantPart: &config.Partition{
+ TableName: "synthetic",
+ SyntheticMetadata: &config.SyntheticMetadata{
+ Columns: 50,
+ Rows: 2000000,
+ ChunkSize: 10000,
+ DeliveryIntervalMs: 100,
+ },
},
- want: nil,
},
{
- name: "Table name",
- args: args{
- partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
- name: "aws_s3_cloudtrail_log",
- },
- want: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
+ name: "Not a synthetic partition",
+ arg: "aws_cloudtrail_log.p1",
+ wantOk: false,
},
{
- name: "Table name (exists) with wildcard",
- args: args{
- partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
- name: "aws_s3_cloudtrail_log.*",
- },
- want: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
+ name: "Invalid synthetic partition format - too few parts",
+ arg: "synthetic_50cols_2000000rows_10000chunk",
+ wantOk: false,
},
{
- name: "Table name (exists) with ?",
- args: args{
- partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
- name: "aws_s3_cloudtrail_log.p?",
- },
- want: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
+ name: "Invalid synthetic partition format - too many parts",
+ arg: "synthetic_50cols_2000000rows_10000chunk_100ms_extra",
+ wantOk: false,
},
{
- name: "Table name (exists) with non matching partition wildacard",
- args: args{
- partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
- name: "aws_s3_cloudtrail_log.d*?",
- },
- want: nil,
+ name: "Invalid synthetic partition - non-numeric columns",
+ arg: "synthetic_abccols_2000000rows_10000chunk_100ms",
+ wantOk: false,
},
{
- name: "Table name (does not exist)) with wildcard",
- args: args{
- partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
- name: "foo.*",
- },
- want: nil,
+ name: "Invalid synthetic partition - non-numeric rows",
+ arg: "synthetic_50cols_abcrows_10000chunk_100ms",
+ wantOk: false,
},
{
- name: "Partition short name, exists",
- args: args{
- partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2", "aws_elb_access_log.p1", "aws_elb_access_log.p2"},
- name: "p1",
- },
- want: []string{"aws_s3_cloudtrail_log.p1", "aws_elb_access_log.p1"},
+ name: "Invalid synthetic partition - non-numeric chunk",
+ arg: "synthetic_50cols_2000000rows_abcchunk_100ms",
+ wantOk: false,
},
{
- name: "Table wildcard, partition short name, exists",
- args: args{
- partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2", "aws_elb_access_log.p1", "aws_elb_access_log.p2"},
- name: "*.p1",
- },
- want: []string{"aws_s3_cloudtrail_log.p1", "aws_elb_access_log.p1"},
+ name: "Invalid synthetic partition - non-numeric interval",
+ arg: "synthetic_50cols_2000000rows_10000chunk_abcms",
+ wantOk: false,
},
{
- name: "Partition short name, does not exist",
- args: args{
- partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2", "aws_elb_access_log.p1", "aws_elb_access_log.p2"},
- name: "p3",
- },
- want: nil,
+ name: "Invalid synthetic partition - zero values",
+ arg: "synthetic_0cols_2000000rows_10000chunk_100ms",
+ wantOk: false,
},
{
- name: "Table wildcard, partition short name, does not exist",
- args: args{
- partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2", "aws_elb_access_log.p1", "aws_elb_access_log.p2"},
- name: "*.p3",
- },
- want: nil,
+ name: "Invalid synthetic partition - negative values",
+ arg: "synthetic_-50cols_2000000rows_10000chunk_100ms",
+ wantOk: false,
},
{
- name: "Table wildcard, no dot",
- args: args{
- partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2", "aws_elb_access_log.p1", "aws_elb_access_log.p2"},
- name: "aws*",
- },
- want: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2", "aws_elb_access_log.p1", "aws_elb_access_log.p2"},
+ name: "Invalid synthetic partition - zero interval",
+ arg: "synthetic_50cols_2000000rows_10000chunk_0ms",
+ wantOk: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- got, err := getPartitionsForArg(tt.args.partitions, tt.args.name)
- if (err != nil) != tt.wantErr {
- t.Errorf("getPartitions() error = %v, wantErr %v", err, tt.wantErr)
+ gotPart, gotOk := getSyntheticPartition(tt.arg)
+ if gotOk != tt.wantOk {
+ t.Errorf("getSyntheticPartition() gotOk = %v, want %v", gotOk, tt.wantOk)
return
}
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("getPartitions() got = %v, want %v", got, tt.want)
+ if gotOk {
+ if gotPart.TableName != tt.wantPart.TableName {
+ t.Errorf("getSyntheticPartition() TableName = %v, want %v", gotPart.TableName, tt.wantPart.TableName)
+ }
+ if gotPart.SyntheticMetadata == nil {
+ t.Errorf("getSyntheticPartition() SyntheticMetadata is nil")
+ return
+ }
+ if gotPart.SyntheticMetadata.Columns != tt.wantPart.SyntheticMetadata.Columns {
+ t.Errorf("getSyntheticPartition() Columns = %v, want %v", gotPart.SyntheticMetadata.Columns, tt.wantPart.SyntheticMetadata.Columns)
+ }
+ if gotPart.SyntheticMetadata.Rows != tt.wantPart.SyntheticMetadata.Rows {
+ t.Errorf("getSyntheticPartition() Rows = %v, want %v", gotPart.SyntheticMetadata.Rows, tt.wantPart.SyntheticMetadata.Rows)
+ }
+ if gotPart.SyntheticMetadata.ChunkSize != tt.wantPart.SyntheticMetadata.ChunkSize {
+ t.Errorf("getSyntheticPartition() ChunkSize = %v, want %v", gotPart.SyntheticMetadata.ChunkSize, tt.wantPart.SyntheticMetadata.ChunkSize)
+ }
+ if gotPart.SyntheticMetadata.DeliveryIntervalMs != tt.wantPart.SyntheticMetadata.DeliveryIntervalMs {
+ t.Errorf("getSyntheticPartition() DeliveryIntervalMs = %v, want %v", gotPart.SyntheticMetadata.DeliveryIntervalMs, tt.wantPart.SyntheticMetadata.DeliveryIntervalMs)
+ }
}
})
}
}
-func Test_getPartitionMatchPatternsForArg(t *testing.T) {
- type args struct {
- partitions []string
- arg string
- }
- tests := []struct {
- name string
- args args
- wantTablePattern string
- wantPartPattern string
- wantErr bool
+func Test_getSyntheticPartition_Logging(t *testing.T) {
+ // Test that logging works for various failure scenarios
+ testCases := []struct {
+ name string
+ arg string
}{
- {
- name: "Valid table and partition pattern",
- args: args{
- partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
- arg: "aws_s3_cloudtrail_log.p1",
- },
- wantTablePattern: "aws_s3_cloudtrail_log",
- wantPartPattern: "p1",
- },
- {
- name: "Wildcard partition pattern",
- args: args{
- partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2", "aws_elb_access_log.p1"},
- arg: "aws_s3_cloudtrail_log.*",
- },
- wantTablePattern: "aws_s3_cloudtrail_log",
- wantPartPattern: "*",
- },
- {
- name: "Wildcard in table and partition both",
- args: args{
- partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2", "aws_elb_access_log.p1"},
- arg: "aws*.*",
- },
- wantTablePattern: "aws*",
- wantPartPattern: "*",
- },
- {
- name: "Wildcard table pattern",
- args: args{
- partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_elb_access_log.p1"},
- arg: "*.p1",
- },
- wantTablePattern: "*",
- wantPartPattern: "p1",
- },
- {
- name: "Invalid partition name",
- args: args{
- partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
- arg: "*",
- },
- wantErr: true,
- },
- {
- name: "Table exists without partition",
- args: args{
- partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
- arg: "aws_s3_cloudtrail_log",
- },
- wantTablePattern: "aws_s3_cloudtrail_log",
- wantPartPattern: "*",
- },
- {
- name: "Partition only, multiple tables",
- args: args{
- partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_elb_access_log.p1"},
- arg: "p1",
- },
- wantTablePattern: "*",
- wantPartPattern: "p1",
- },
- {
- name: "Invalid argument with multiple dots",
- args: args{
- partitions: []string{"aws_s3_cloudtrail_log.p1"},
- arg: "aws.s3.cloudtrail",
- },
- wantErr: true,
- },
- {
- name: "Non-existing table name",
- args: args{
- partitions: []string{"aws_s3_cloudtrail_log.p1"},
- arg: "non_existing_table.p1",
- },
- wantTablePattern: "non_existing_table",
- wantPartPattern: "p1",
- },
- {
- name: "Partition name does not exist",
- args: args{
- partitions: []string{"aws_s3_cloudtrail_log.p1"},
- arg: "p2",
- },
- wantTablePattern: "*",
- wantPartPattern: "p2",
- },
+ {"Invalid format", "synthetic_50cols_2000000rows_10000chunk"},
+ {"Invalid columns", "synthetic_abccols_2000000rows_10000chunk_100ms"},
+ {"Invalid rows", "synthetic_50cols_abcrows_10000chunk_100ms"},
+ {"Invalid chunk", "synthetic_50cols_2000000rows_abcchunk_100ms"},
+ {"Invalid interval", "synthetic_50cols_2000000rows_10000chunk_abcms"},
+ {"Zero values", "synthetic_0cols_2000000rows_10000chunk_100ms"},
+ {"Valid partition", "synthetic_50cols_2000000rows_10000chunk_100ms"},
}
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- gotTablePattern, gotPartPattern, err := getPartitionMatchPatternsForArg(tt.args.partitions, tt.args.arg)
- if (err != nil) != tt.wantErr {
- t.Errorf("getPartitionMatchPatternsForArg() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if gotTablePattern != tt.wantTablePattern {
- t.Errorf("getPartitionMatchPatternsForArg() gotTablePattern = %v, want %v", gotTablePattern, tt.wantTablePattern)
- }
- if gotPartPattern != tt.wantPartPattern {
- t.Errorf("getPartitionMatchPatternsForArg() gotPartPattern = %v, want %v", gotPartPattern, tt.wantPartPattern)
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ // This test ensures the function doesn't panic and handles logging gracefully
+ // The actual log output would be visible when running with debug level enabled
+ _, ok := getSyntheticPartition(tc.arg)
+
+ // Just verify the function completes without error
+ // The logging is a side effect that we can't easily test without capturing log output
+ if tc.name == "Valid partition" && !ok {
+ t.Errorf("Expected valid partition to return true")
}
})
}
diff --git a/cmd/compact.go b/cmd/compact.go
index 272dd08e..d95d8e4e 100644
--- a/cmd/compact.go
+++ b/cmd/compact.go
@@ -2,28 +2,29 @@ package cmd
import (
"context"
- "errors"
"fmt"
- "golang.org/x/exp/maps"
"log/slog"
"os"
"time"
"github.com/briandowns/spinner"
"github.com/spf13/cobra"
+ "github.com/spf13/viper"
"github.com/turbot/go-kit/helpers"
"github.com/turbot/pipe-fittings/v2/cmdconfig"
pconstants "github.com/turbot/pipe-fittings/v2/constants"
- "github.com/turbot/pipe-fittings/v2/contexthelpers"
- "github.com/turbot/pipe-fittings/v2/error_helpers"
+ localcmdconfig "github.com/turbot/tailpipe/internal/cmdconfig"
"github.com/turbot/tailpipe/internal/config"
- "github.com/turbot/tailpipe/internal/parquet"
+ "github.com/turbot/tailpipe/internal/constants"
+ "github.com/turbot/tailpipe/internal/database"
+ error_helpers "github.com/turbot/tailpipe/internal/error_helpers"
+ "golang.org/x/exp/maps"
)
func compactCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "compact [table|table.partition] [flags]",
- Args: cobra.ArbitraryArgs,
+ Args: cobra.MaximumNArgs(1),
Run: runCompactCmd,
Short: "Compact multiple parquet files per day to one per day",
Long: `Compact multiple parquet files per day to one per day.`,
@@ -36,8 +37,8 @@ func compactCmd() *cobra.Command {
func runCompactCmd(cmd *cobra.Command, args []string) {
var err error
- ctx, cancel := context.WithCancel(cmd.Context())
- contexthelpers.StartCancelHandler(cancel)
+ // use the signal-aware/cancelable context created upstream in preRunHook
+ ctx := cmd.Context()
defer func() {
if r := recover(); r != nil {
@@ -45,66 +46,81 @@ func runCompactCmd(cmd *cobra.Command, args []string) {
}
if err != nil {
setExitCodeForCompactError(err)
- error_helpers.ShowError(ctx, err)
+
+ if error_helpers.IsCancelledError(err) {
+ //nolint:forbidigo // ui
+ fmt.Println("tailpipe compact command cancelled.")
+ } else {
+ error_helpers.ShowError(ctx, err)
+ }
}
}()
+ // if diagnostic mode is set, print out config and return
+ if _, ok := os.LookupEnv(constants.EnvConfigDump); ok {
+ localcmdconfig.DisplayConfig()
+ return
+ }
+
slog.Info("Compacting parquet files")
+ db, err := database.NewDuckDb(database.WithDuckLake())
+ error_helpers.FailOnError(err)
+ defer db.Close()
+
// verify that the provided args resolve to at least one partition
if _, err := getPartitions(args); err != nil {
error_helpers.FailOnError(err)
}
// Get table and partition patterns
- patterns, err := getPartitionPatterns(args, maps.Keys(config.GlobalConfig.Partitions))
+ patterns, err := database.GetPartitionPatternsForArgs(maps.Keys(config.GlobalConfig.Partitions), args...)
error_helpers.FailOnErrorWithMessage(err, "failed to get partition patterns")
- status, err := doCompaction(ctx, patterns...)
- if errors.Is(err, context.Canceled) {
- // clear error so we don't show it with normal error reporting
- err = nil
+ // Create backup of metadata database before starting compaction
+ if err := database.BackupDucklakeMetadata(); err != nil {
+ slog.Warn("Failed to backup metadata database", "error", err)
+ // Continue with compaction - backup failure shouldn't block the operation
}
- if err == nil {
- // print the final status
- statusString := status.VerboseString()
- if statusString == "" {
- statusString = "No files to compact."
- }
- if ctx.Err() != nil {
- // instead show the status as cancelled
- statusString = "Compaction cancelled: " + statusString
- }
+ // do the compaction
+ status, err := doCompaction(ctx, db, patterns)
+ // print the final status
+ statusString := status.VerboseString()
+ if err == nil {
fmt.Println(statusString) //nolint:forbidigo // ui
}
// defer block will show the error
}
-func doCompaction(ctx context.Context, patterns ...parquet.PartitionPattern) (*parquet.CompactionStatus, error) {
+func doCompaction(ctx context.Context, db *database.DuckDb, patterns []*database.PartitionPattern) (*database.CompactionStatus, error) {
s := spinner.New(
spinner.CharSets[14],
100*time.Millisecond,
spinner.WithHiddenCursor(true),
spinner.WithWriter(os.Stdout),
)
+ // if the flag was provided, migrate the tp_index files
+ reindex := viper.GetBool(pconstants.ArgReindex)
// start and stop spinner around the processing
s.Start()
defer s.Stop()
s.Suffix = " compacting parquet files"
-
// define func to update the spinner suffix with the number of files compacted
- var status = parquet.NewCompactionStatus()
- updateTotals := func(counts parquet.CompactionStatus) {
- status.Update(counts)
- s.Suffix = fmt.Sprintf(" compacting parquet files (%d files -> %d files)", status.Source, status.Dest)
+ var status = database.NewCompactionStatus()
+
+ updateTotals := func(updatedStatus database.CompactionStatus) {
+ status = &updatedStatus
+ if status.Message != "" {
+ s.Suffix = " compacting parquet files: " + status.Message
+ }
}
// do compaction
- err := parquet.CompactDataFiles(ctx, updateTotals, patterns...)
+ err := database.CompactDataFiles(ctx, db, updateTotals, reindex, patterns...)
return status, err
}
@@ -114,5 +130,11 @@ func setExitCodeForCompactError(err error) {
if exitCode != 0 || err == nil {
return
}
- exitCode = 1
+ // set exit code for cancellation
+ if error_helpers.IsCancelledError(err) {
+ exitCode = pconstants.ExitCodeOperationCancelled
+ return
+ }
+
+ exitCode = pconstants.ExitCodeCompactFailed
}
diff --git a/cmd/connect.go b/cmd/connect.go
index 2dcafd46..7b83a59d 100644
--- a/cmd/connect.go
+++ b/cmd/connect.go
@@ -4,8 +4,6 @@ import (
"context"
"encoding/json"
"fmt"
- "golang.org/x/exp/maps"
- "io"
"log"
"os"
"path/filepath"
@@ -19,14 +17,14 @@ import (
"github.com/turbot/pipe-fittings/v2/cmdconfig"
"github.com/turbot/pipe-fittings/v2/connection"
pconstants "github.com/turbot/pipe-fittings/v2/constants"
- "github.com/turbot/pipe-fittings/v2/error_helpers"
pfilepaths "github.com/turbot/pipe-fittings/v2/filepaths"
"github.com/turbot/pipe-fittings/v2/parse"
+ localcmdconfig "github.com/turbot/tailpipe/internal/cmdconfig"
"github.com/turbot/tailpipe/internal/config"
"github.com/turbot/tailpipe/internal/constants"
"github.com/turbot/tailpipe/internal/database"
- "github.com/turbot/tailpipe/internal/filepaths"
- "github.com/turbot/tailpipe/internal/parquet"
+ error_helpers "github.com/turbot/tailpipe/internal/error_helpers"
+ "golang.org/x/exp/maps"
)
// variable used to assign the output mode flag
@@ -37,8 +35,41 @@ func connectCmd() *cobra.Command {
Use: "connect [flags]",
Args: cobra.ArbitraryArgs,
Run: runConnectCmd,
- Short: "Return a connection string for a database, with a schema determined by the provided parameters",
- Long: `Return a connection string for a database, with a schema determined by the provided parameters.`,
+ Short: "Return the path of SQL script to initialise DuckDB to use the tailpipe database",
+ Long: `Return the path of SQL script to initialise DuckDB to use the tailpipe database.
+
+The generated SQL script contains:
+- DuckDB extension installations (sqlite, ducklake)
+- Database attachment configuration
+- View definitions with optional filters
+
+Examples:
+ # Basic usage - generate init script
+ tailpipe connect
+
+ # Filter by time range
+ tailpipe connect --from "2024-01-01" --to "2024-01-31"
+
+ # Filter by specific partitions
+ tailpipe connect --partition "aws_cloudtrail_log.recent"
+
+ # Filter by indexes with wildcards
+ tailpipe connect --index "prod-*" --index "staging"
+
+ # Combine multiple filters
+ tailpipe connect --from "T-7d" --partition "aws.*" --index "prod-*"
+
+ # Output as JSON
+ tailpipe connect --output json
+
+Time formats supported:
+ - ISO 8601 date: 2024-01-01
+ - ISO 8601 datetime: 2024-01-01T15:04:05
+ - RFC 3339 with timezone: 2024-01-01T15:04:05Z
+ - Relative time: T-7d, T-2Y, T-10m, T-180d
+
+The generated script can be used with DuckDB:
+ duckdb -init /path/to/generated/script.sql`,
}
// args `from` and `to` accept:
@@ -62,67 +93,153 @@ func connectCmd() *cobra.Command {
func runConnectCmd(cmd *cobra.Command, _ []string) {
var err error
- var databaseFilePath string
+ var initFilePath string
+ // use the signal-aware/cancelable context created upstream in preRunHook
ctx := cmd.Context()
defer func() {
if r := recover(); r != nil {
err = helpers.ToError(r)
}
- setExitCodeForConnectError(err)
- displayOutput(ctx, databaseFilePath, err)
+ if err != nil {
+ if error_helpers.IsCancelledError(err) {
+ fmt.Println("tailpipe connect command cancelled.") //nolint:forbidigo // ui output
+ } else {
+ error_helpers.ShowError(ctx, err)
+ }
+ setExitCodeForConnectError(err)
+ }
+ displayOutput(ctx, initFilePath, err)
}()
- databaseFilePath, err = generateDbFile(ctx)
+ // if diagnostic mode is set, print out config and return
+ if _, ok := os.LookupEnv(constants.EnvConfigDump); ok {
+ localcmdconfig.DisplayConfig()
+ return
+ }
+
+ initFilePath, err = generateInitFile(ctx)
// we are done - the defer block will print either the filepath (if successful) or the error (if not)
-}
-func generateDbFile(ctx context.Context) (string, error) {
- databaseFilePath := generateTempDBFilename(config.GlobalWorkspaceProfile.GetDataDir())
+}
+func generateInitFile(ctx context.Context) (string, error) {
// cleanup the old db files if not in use
- err := cleanupOldDbFiles()
+ err := cleanupOldInitFiles()
if err != nil {
return "", err
}
- // first build the filters
+ // generate a filename to write the init sql to, inside the data dir
+ initFilePath := generateInitFilename(config.GlobalWorkspaceProfile.GetDataDir())
+
+ // get the sql to attach readonly to the database
+ commands := database.GetDucklakeInitCommands(true)
+
+ // build the filters from the to, from and index args
+ // these will be used in the view definitions
filters, err := getFilters()
if err != nil {
return "", fmt.Errorf("error building filters: %w", err)
}
- // if there are no filters, just copy the db file
- if len(filters) == 0 {
- err = copyDBFile(filepaths.TailpipeDbFilePath(), databaseFilePath)
- return databaseFilePath, err
+ // create a temporary duckdb instance pass to get the view definitions
+ db, err := database.NewDuckDb(database.WithDuckLakeReadonly())
+ if err != nil {
+ return "", fmt.Errorf("failed to create duckdb: %w", err)
}
+ defer db.Close()
- // Open a DuckDB connection (creates the file if it doesn't exist)
- db, err := database.NewDuckDb(database.WithDbFile(databaseFilePath))
+ // get the view creation SQL, with filters applied
+ viewCommands, err := database.GetCreateViewsSql(ctx, db, filters...)
+ if err != nil {
+ return "", err
+ }
+ commands = append(commands, viewCommands...)
+ // now build a string
+ var str strings.Builder
+ for _, cmd := range commands {
+ str.WriteString(fmt.Sprintf("-- %s\n%s;\n\n", cmd.Description, cmd.Command))
+ }
+ // write out the init file
+ err = os.WriteFile(initFilePath, []byte(str.String()), 0644) //nolint:gosec // we want the init file to be readable
if err != nil {
- return "", fmt.Errorf("failed to open DuckDB connection: %w", err)
+ return "", fmt.Errorf("failed to write init file: %w", err)
}
- defer db.Close()
+ return initFilePath, err
+}
+
+// cleanupOldInitFiles deletes old db init files (older than a day)
+func cleanupOldInitFiles() error {
+ baseDir := pfilepaths.GetDataDir()
+ log.Printf("[INFO] Cleaning up old init files in %s\n", baseDir)
+ cutoffTime := time.Now().Add(-constants.InitFileMaxAge) // Files older than 1 day
+
+ // The baseDir ("$TAILPIPE_INSTALL_DIR/data") is expected to have subdirectories for different workspace
+ // profiles(default, work etc). Each subdirectory may contain multiple .db files.
+ // Example structure:
+ // data/
+ // ├── default/
+ // │ ├── tailpipe_init_20250115182129.sql
+ // │ ├── tailpipe_init_20250115193816.sql
+ // │ └── ...
+ // ├── work/
+ // │ ├── tailpipe_init_20250115182129.sql
+ // │ ├── tailpipe_init_20250115193816.sql
+ // │ └── ...
+ // So we traverse all these subdirectories for each workspace and process the relevant files.
+ err := filepath.Walk(baseDir, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return fmt.Errorf("error accessing path %s: %v", path, err)
+ }
+
+ // skip directories and non-`.sql` files
+ if info.IsDir() || !strings.HasSuffix(info.Name(), ".sql") {
+ return nil
+ }
+
+ // only process `tailpipe_init_*.sql` files
+ if !strings.HasPrefix(info.Name(), "tailpipe_init_") {
+ return nil
+ }
+
+ // check if the file is older than the cutoff time
+ if info.ModTime().After(cutoffTime) {
+ log.Printf("[DEBUG] Skipping deleting file %s(%s) as it is not older than %s\n", path, info.ModTime().String(), cutoffTime)
+ return nil
+ }
+
+ err = os.Remove(path)
+ if err != nil {
+ log.Printf("[INFO] Failed to delete db file %s: %v", path, err)
+ } else {
+ log.Printf("[DEBUG] Cleaned up old unused db file: %s\n", path)
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ return err
+ }
+ return nil
- err = database.AddTableViews(ctx, db, filters...)
- return databaseFilePath, err
}
-func displayOutput(ctx context.Context, databaseFilePath string, err error) {
+func displayOutput(ctx context.Context, initFilePath string, err error) {
switch viper.GetString(pconstants.ArgOutput) {
case pconstants.OutputFormatText:
if err == nil {
// output the filepath
- fmt.Println(databaseFilePath) //nolint:forbidigo // ui output
+ fmt.Println(initFilePath) //nolint:forbidigo // ui output
} else {
error_helpers.ShowError(ctx, err)
}
case pconstants.OutputFormatJSON:
res := connection.TailpipeConnectResponse{
- DatabaseFilepath: databaseFilePath,
+ InitScriptPath: initFilePath,
}
if err != nil {
res.Error = err.Error()
@@ -140,6 +257,8 @@ func displayOutput(ctx context.Context, databaseFilePath string, err error) {
}
}
+// getFilters builds a set of SQL filters based on the provided command line args
+// supported args are `from`, `to`, `partition` and `index`
func getFilters() ([]string, error) {
var result []string
if viper.IsSet(pconstants.ArgFrom) {
@@ -152,9 +271,8 @@ func getFilters() ([]string, error) {
return nil, fmt.Errorf("invalid date format for 'from': %s", from)
}
// format as SQL timestamp
- fromDate := t.Format(time.DateOnly)
fromTimestamp := t.Format(time.DateTime)
- result = append(result, fmt.Sprintf("tp_date >= date '%s' and tp_timestamp >= timestamp '%s'", fromDate, fromTimestamp))
+ result = append(result, fmt.Sprintf("tp_timestamp >= timestamp '%s'", fromTimestamp))
}
if viper.IsSet(pconstants.ArgTo) {
to := viper.GetString(pconstants.ArgTo)
@@ -166,9 +284,8 @@ func getFilters() ([]string, error) {
return nil, fmt.Errorf("invalid date format for 'to': %s", to)
}
// format as SQL timestamp
- toDate := t.Format(time.DateOnly)
toTimestamp := t.Format(time.DateTime)
- result = append(result, fmt.Sprintf("tp_date <= date '%s' and tp_timestamp <= timestamp '%s'", toDate, toTimestamp))
+ result = append(result, fmt.Sprintf("tp_timestamp <= timestamp '%s'", toTimestamp))
}
if viper.IsSet(pconstants.ArgPartition) {
// we have loaded tailpipe config by this time
@@ -193,115 +310,10 @@ func getFilters() ([]string, error) {
return result, nil
}
-// generateTempDBFilename generates a temporary filename with a timestamp
-func generateTempDBFilename(dataDir string) string {
- timestamp := time.Now().Format("20060102150405") // e.g., 20241031103000
- return filepath.Join(dataDir, fmt.Sprintf("tailpipe_%s.db", timestamp))
-}
-
-func setExitCodeForConnectError(err error) {
- // if exit code already set, leave as is
- // NOTE: DO NOT set exit code if the output format is JSON
- if exitCode != 0 || err == nil || viper.GetString(pconstants.ArgOutput) == pconstants.OutputFormatJSON {
- return
- }
-
- exitCode = 1
-}
-
-// copyDBFile copies the source database file to the destination
-func copyDBFile(src, dst string) error {
- sourceFile, err := os.Open(src)
- if err != nil {
- return err
- }
- defer sourceFile.Close()
-
- destFile, err := os.Create(dst)
- if err != nil {
- return err
- }
- defer destFile.Close()
-
- _, err = io.Copy(destFile, sourceFile)
- return err
-}
-
-// cleanupOldDbFiles deletes old db files(older than a day) that are not in use
-func cleanupOldDbFiles() error {
- baseDir := pfilepaths.GetDataDir()
- log.Printf("[INFO] Cleaning up old db files in %s\n", baseDir)
- cutoffTime := time.Now().Add(-constants.DbFileMaxAge) // Files older than 1 day
-
- // The baseDir ("$TAILPIPE_INSTALL_DIR/data") is expected to have subdirectories for different workspace
- // profiles(default, work etc). Each subdirectory may contain multiple .db files.
- // Example structure:
- // data/
- // ├── default/
- // │ ├── tailpipe_20250115182129.db
- // │ ├── tailpipe_20250115193816.db
- // │ ├── tailpipe.db
- // │ └── ...
- // ├── work/
- // │ ├── tailpipe_20250115182129.db
- // │ ├── tailpipe_20250115193816.db
- // │ ├── tailpipe.db
- // │ └── ...
- // So we traverse all these subdirectories for each workspace and process the relevant files.
- err := filepath.Walk(baseDir, func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return fmt.Errorf("error accessing path %s: %v", path, err)
- }
-
- // skip directories and non-`.db` files
- if info.IsDir() || !strings.HasSuffix(info.Name(), ".db") {
- return nil
- }
-
- // skip `tailpipe.db` file
- if info.Name() == "tailpipe.db" {
- return nil
- }
-
- // only process `tailpipe_*.db` files
- if !strings.HasPrefix(info.Name(), "tailpipe_") {
- return nil
- }
-
- // check if the file is older than the cutoff time
- if info.ModTime().After(cutoffTime) {
- log.Printf("[DEBUG] Skipping deleting file %s(%s) as it is not older than %s\n", path, info.ModTime().String(), cutoffTime)
- return nil
- }
-
- // check for a lock on the file
- db, err := database.NewDuckDb(database.WithDbFile(path))
- if err != nil {
- log.Printf("[INFO] Skipping deletion of file %s due to error: %v\n", path, err)
- return nil
- }
- defer db.Close()
-
- // if no lock, delete the file
- err = os.Remove(path)
- if err != nil {
- log.Printf("[INFO] Failed to delete db file %s: %v", path, err)
- } else {
- log.Printf("[DEBUG] Cleaned up old unused db file: %s\n", path)
- }
-
- return nil
- })
-
- if err != nil {
- return err
- }
- return nil
-}
-
+// getPartitionSqlFilters builds SQL filters for the provided partition args
func getPartitionSqlFilters(partitionArgs []string, availablePartitions []string) (string, error) {
- // Get table and partition patterns using getPartitionPatterns
- patterns, err := getPartitionPatterns(partitionArgs, availablePartitions)
+ // Get table and partition patterns using GetPartitionPatternsForArgs
+ patterns, err := database.GetPartitionPatternsForArgs(availablePartitions, partitionArgs...)
if err != nil {
return "", fmt.Errorf("error processing partition args: %w", err)
}
@@ -357,6 +369,7 @@ func getPartitionSqlFilters(partitionArgs []string, availablePartitions []string
return sqlFilters, nil
}
+// getIndexSqlFilters builds SQL filters for the provided index args
func getIndexSqlFilters(indexArgs []string) (string, error) {
// Return empty if no indexes provided
if len(indexArgs) == 0 {
@@ -385,30 +398,34 @@ func getIndexSqlFilters(indexArgs []string) (string, error) {
return sqlFilter, nil
}
-// getPartitionPatterns returns the table and partition patterns for the given partition args
-func getPartitionPatterns(partitionArgs []string, partitions []string) ([]parquet.PartitionPattern, error) {
- var res []parquet.PartitionPattern
- for _, arg := range partitionArgs {
- tablePattern, partitionPattern, err := getPartitionMatchPatternsForArg(partitions, arg)
- if err != nil {
- return nil, fmt.Errorf("error processing partition arg '%s': %w", arg, err)
- }
-
- res = append(res, parquet.PartitionPattern{Table: tablePattern, Partition: partitionPattern})
- }
-
- return res, nil
-}
-
// convert partition patterns with '*' wildcards to SQL '%' wildcards
-func replaceWildcards(patterns []parquet.PartitionPattern) []parquet.PartitionPattern {
- updatedPatterns := make([]parquet.PartitionPattern, len(patterns))
+func replaceWildcards(patterns []*database.PartitionPattern) []*database.PartitionPattern {
+ updatedPatterns := make([]*database.PartitionPattern, len(patterns))
for i, p := range patterns {
- updatedPatterns[i] = parquet.PartitionPattern{
+ updatedPatterns[i] = &database.PartitionPattern{
Table: strings.ReplaceAll(p.Table, "*", "%"),
Partition: strings.ReplaceAll(p.Partition, "*", "%")}
}
return updatedPatterns
}
+
+func setExitCodeForConnectError(err error) {
+ // if exit code already set, leave as is
+ // NOTE: DO NOT set exit code if the output format is JSON
+ if exitCode != 0 || err == nil || viper.GetString(pconstants.ArgOutput) == pconstants.OutputFormatJSON {
+ return
+ }
+ if error_helpers.IsCancelledError(err) {
+ exitCode = pconstants.ExitCodeOperationCancelled
+ return
+ }
+ exitCode = pconstants.ExitCodeConnectFailed
+}
+
+// generateInitFilename generates a temporary filename with a timestamp
+func generateInitFilename(dataDir string) string {
+ timestamp := time.Now().Format("20060102150405") // e.g., 20241031103000
+ return filepath.Join(dataDir, fmt.Sprintf("tailpipe_init_%s.sql", timestamp))
+}
diff --git a/cmd/connect_test.go b/cmd/connect_test.go
deleted file mode 100644
index 62fb47ec..00000000
--- a/cmd/connect_test.go
+++ /dev/null
@@ -1,194 +0,0 @@
-package cmd
-
-import (
- "testing"
-)
-
-func Test_getPartitionSqlFilters(t *testing.T) {
- tests := []struct {
- name string
- partitions []string
- args []string
- wantFilters string
- wantErr bool
- }{
- {
- name: "Basic partition filters with wildcard",
- partitions: []string{
- "aws_cloudtrail_log.p1",
- "aws_cloudtrail_log.p2",
- "github_audit_log.p1",
- },
- args: []string{"aws_cloudtrail_log.*", "github_audit_log.p1"},
- wantFilters: "tp_table = 'aws_cloudtrail_log' OR " +
- "(tp_table = 'github_audit_log' and tp_partition = 'p1')",
- wantErr: false,
- },
- {
- name: "Wildcard in table and exact partition",
- partitions: []string{
- "aws_cloudtrail_log.p1",
- "sys_logs.p2",
- },
- args: []string{"aws*.p1", "sys_logs.*"},
- wantFilters: "(tp_table like 'aws%' and tp_partition = 'p1') OR " +
- "tp_table = 'sys_logs'",
- wantErr: false,
- },
- {
- name: "Exact table and partition",
- partitions: []string{
- "aws_cloudtrail_log.p1",
- },
- args: []string{"aws_cloudtrail_log.p1"},
- wantFilters: "(tp_table = 'aws_cloudtrail_log' and tp_partition = 'p1')",
- wantErr: false,
- },
- {
- name: "Partition with full wildcard",
- partitions: []string{
- "aws_cloudtrail_log.p1",
- },
- args: []string{"aws_cloudtrail_log.*"},
- wantFilters: "tp_table = 'aws_cloudtrail_log'",
- wantErr: false,
- },
- {
- name: "Table with full wildcard",
- partitions: []string{
- "aws_cloudtrail_log.p1",
- },
- args: []string{"*.p1"},
- wantFilters: "tp_partition = 'p1'",
- wantErr: false,
- },
- {
- name: "Both table and partition with full wildcards",
- partitions: []string{
- "aws_cloudtrail_log.p1",
- },
- args: []string{"*.*"},
- wantFilters: "",
- wantErr: false,
- },
- {
- name: "Empty input",
- partitions: []string{"aws_cloudtrail_log.p1"},
- args: []string{},
- wantFilters: "",
- wantErr: false,
- },
- {
- name: "Multiple wildcards in table and partition",
- partitions: []string{
- "aws_cloudtrail_log.p1",
- "sys_logs.p2",
- },
- args: []string{"aws*log.p*"},
- wantFilters: "(tp_table like 'aws%log' and tp_partition like 'p%')",
- wantErr: false,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- gotFilters, err := getPartitionSqlFilters(tt.args, tt.partitions)
- if (err != nil) != tt.wantErr {
- t.Errorf("getPartitionSqlFilters() name = %s error = %v, wantErr %v", tt.name, err, tt.wantErr)
- return
- }
- if gotFilters != tt.wantFilters {
- t.Errorf("getPartitionSqlFilters() name = %s got = %v, want %v", tt.name, gotFilters, tt.wantFilters)
- }
- })
- }
-}
-
-func Test_getIndexSqlFilters(t *testing.T) {
- tests := []struct {
- name string
- indexArgs []string
- wantFilters string
- wantErr bool
- }{
- {
- name: "Multiple indexes with wildcards and exact values",
- indexArgs: []string{"1234*", "456789012345", "98*76"},
- wantFilters: "cast(tp_index as varchar) like '1234%' OR " +
- "tp_index = '456789012345' OR " +
- "cast(tp_index as varchar) like '98%76'",
- wantErr: false,
- },
- {
- name: "Single index with wildcard",
- indexArgs: []string{"12345678*"},
- wantFilters: "cast(tp_index as varchar) like '12345678%'",
- wantErr: false,
- },
- {
- name: "No input provided",
- indexArgs: []string{},
- wantFilters: "",
- wantErr: false,
- },
- {
- name: "Fully wildcarded index",
- indexArgs: []string{"*"},
- wantFilters: "",
- wantErr: false,
- },
- {
- name: "Exact numeric index",
- indexArgs: []string{"123456789012"},
- wantFilters: "tp_index = '123456789012'",
- wantErr: false,
- },
- {
- name: "Mixed patterns",
- indexArgs: []string{"12*", "3456789", "9*76"},
- wantFilters: "cast(tp_index as varchar) like '12%' OR " +
- "tp_index = '3456789' OR " +
- "cast(tp_index as varchar) like '9%76'",
- wantErr: false,
- },
- {
- name: "Multiple exact values",
- indexArgs: []string{"123456789012", "987654321098"},
- wantFilters: "tp_index = '123456789012' OR tp_index = '987654321098'",
- wantErr: false,
- },
- {
- name: "Leading and trailing spaces in exact value",
- indexArgs: []string{" 123456789012 "},
- wantFilters: "tp_index = ' 123456789012 '", // Spaces preserved
- wantErr: false,
- },
- {
- name: "Combination of wildcards and exact values",
- indexArgs: []string{"*456*", "1234", "98*76"},
- wantFilters: "cast(tp_index as varchar) like '%456%' OR " +
- "tp_index = '1234' OR " +
- "cast(tp_index as varchar) like '98%76'",
- wantErr: false,
- },
- {
- name: "Empty string as index",
- indexArgs: []string{""},
- wantFilters: "tp_index = ''",
- wantErr: false,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- gotFilters, err := getIndexSqlFilters(tt.indexArgs)
- if (err != nil) != tt.wantErr {
- t.Errorf("getIndexSqlFilters() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if gotFilters != tt.wantFilters {
- t.Errorf("getIndexSqlFilters() got = %v, want %v", gotFilters, tt.wantFilters)
- }
- })
- }
-}
diff --git a/cmd/format.go b/cmd/format.go
index 15165e23..94f8759d 100644
--- a/cmd/format.go
+++ b/cmd/format.go
@@ -1,8 +1,8 @@
package cmd
import (
- "context"
"fmt"
+ "os"
"strings"
"github.com/spf13/cobra"
@@ -10,12 +10,12 @@ import (
"github.com/turbot/go-kit/helpers"
"github.com/turbot/pipe-fittings/v2/cmdconfig"
pconstants "github.com/turbot/pipe-fittings/v2/constants"
- "github.com/turbot/pipe-fittings/v2/contexthelpers"
- "github.com/turbot/pipe-fittings/v2/error_helpers"
"github.com/turbot/pipe-fittings/v2/printers"
"github.com/turbot/pipe-fittings/v2/utils"
+ localcmdconfig "github.com/turbot/tailpipe/internal/cmdconfig"
"github.com/turbot/tailpipe/internal/constants"
"github.com/turbot/tailpipe/internal/display"
+ error_helpers "github.com/turbot/tailpipe/internal/error_helpers"
)
// variable used to assign the output mode flag
@@ -67,18 +67,32 @@ func formatListCmd() *cobra.Command {
}
func runFormatListCmd(cmd *cobra.Command, args []string) {
- //setup a cancel context and start cancel handler
- ctx, cancel := context.WithCancel(cmd.Context())
- contexthelpers.StartCancelHandler(cancel)
+ // use the signal-aware/cancelable context created upstream in preRunHook
+ ctx := cmd.Context()
utils.LogTime("runFormatListCmd start")
+ var err error
defer func() {
utils.LogTime("runFormatListCmd end")
if r := recover(); r != nil {
- error_helpers.ShowError(ctx, helpers.ToError(r))
- exitCode = pconstants.ExitCodeUnknownErrorPanic
+ err = helpers.ToError(r)
+ }
+ if err != nil {
+ if error_helpers.IsCancelledError(err) {
+ //nolint:forbidigo // ui output
+ fmt.Println("tailpipe format list command cancelled.")
+ } else {
+ error_helpers.ShowError(ctx, err)
+ }
+ setExitCodeForFormatError(err)
}
}()
+ // if diagnostic mode is set, print out config and return
+ if _, ok := os.LookupEnv(constants.EnvConfigDump); ok {
+ localcmdconfig.DisplayConfig()
+ return
+ }
+
// Get Resources
resources, err := display.ListFormatResources(ctx)
error_helpers.FailOnError(err)
@@ -91,8 +105,8 @@ func runFormatListCmd(cmd *cobra.Command, args []string) {
// Print
err = printer.PrintResource(ctx, printableResource, cmd.OutOrStdout())
if err != nil {
- error_helpers.ShowError(ctx, err)
- exitCode = pconstants.ExitCodeUnknownErrorPanic
+ exitCode = pconstants.ExitCodeOutputRenderingFailed
+ return
}
}
@@ -116,15 +130,23 @@ func formatShowCmd() *cobra.Command {
}
func runFormatShowCmd(cmd *cobra.Command, args []string) {
- //setup a cancel context and start cancel handler
- ctx, cancel := context.WithCancel(cmd.Context())
- contexthelpers.StartCancelHandler(cancel)
+ // use the signal-aware/cancelable context created upstream in preRunHook
+ ctx := cmd.Context()
utils.LogTime("runFormatShowCmd start")
+ var err error
defer func() {
utils.LogTime("runFormatShowCmd end")
if r := recover(); r != nil {
- error_helpers.ShowError(ctx, helpers.ToError(r))
- exitCode = pconstants.ExitCodeUnknownErrorPanic
+ err = helpers.ToError(r)
+ }
+ if err != nil {
+ if error_helpers.IsCancelledError(err) {
+ //nolint:forbidigo // ui output
+ fmt.Println("tailpipe format show command cancelled.")
+ } else {
+ error_helpers.ShowError(ctx, err)
+ }
+ setExitCodeForFormatError(err)
}
}()
@@ -141,7 +163,21 @@ func runFormatShowCmd(cmd *cobra.Command, args []string) {
// Print
err = printer.PrintResource(ctx, printableResource, cmd.OutOrStdout())
if err != nil {
- error_helpers.ShowError(ctx, err)
- exitCode = pconstants.ExitCodeUnknownErrorPanic
+ exitCode = pconstants.ExitCodeOutputRenderingFailed
+ return
+ }
+}
+
+func setExitCodeForFormatError(err error) {
+ // set exit code only if an error occurred and no exit code is already set
+ if exitCode != 0 || err == nil {
+ return
+ }
+ // set exit code for cancellation
+ if error_helpers.IsCancelledError(err) {
+ exitCode = pconstants.ExitCodeOperationCancelled
+ return
}
+ // no dedicated format exit code exists yet; use generic nonzero failure
+ exitCode = 1
}
diff --git a/cmd/partition.go b/cmd/partition.go
index a57b4d0f..7faf8be2 100644
--- a/cmd/partition.go
+++ b/cmd/partition.go
@@ -2,6 +2,7 @@ package cmd
import (
"context"
+ "errors"
"fmt"
"log/slog"
"os"
@@ -14,15 +15,16 @@ import (
"github.com/turbot/go-kit/helpers"
"github.com/turbot/pipe-fittings/v2/cmdconfig"
pconstants "github.com/turbot/pipe-fittings/v2/constants"
- "github.com/turbot/pipe-fittings/v2/contexthelpers"
- "github.com/turbot/pipe-fittings/v2/error_helpers"
"github.com/turbot/pipe-fittings/v2/printers"
+ "github.com/turbot/pipe-fittings/v2/statushooks"
"github.com/turbot/pipe-fittings/v2/utils"
+ localcmdconfig "github.com/turbot/tailpipe/internal/cmdconfig"
"github.com/turbot/tailpipe/internal/config"
"github.com/turbot/tailpipe/internal/constants"
+ "github.com/turbot/tailpipe/internal/database"
"github.com/turbot/tailpipe/internal/display"
+ error_helpers "github.com/turbot/tailpipe/internal/error_helpers"
"github.com/turbot/tailpipe/internal/filepaths"
- "github.com/turbot/tailpipe/internal/parquet"
"github.com/turbot/tailpipe/internal/plugin"
)
@@ -73,20 +75,38 @@ func partitionListCmd() *cobra.Command {
}
func runPartitionListCmd(cmd *cobra.Command, args []string) {
- //setup a cancel context and start cancel handler
- ctx, cancel := context.WithCancel(cmd.Context())
- contexthelpers.StartCancelHandler(cancel)
+ // use the signal-aware/cancelable context created upstream in preRunHook
+ ctx := cmd.Context()
utils.LogTime("runPartitionListCmd start")
+ var err error
defer func() {
utils.LogTime("runPartitionListCmd end")
if r := recover(); r != nil {
- error_helpers.ShowError(ctx, helpers.ToError(r))
- exitCode = pconstants.ExitCodeUnknownErrorPanic
+ err = helpers.ToError(r)
+ }
+ if err != nil {
+ if error_helpers.IsCancelledError(err) {
+ //nolint:forbidigo // ui output
+ fmt.Println("taillpipe partition list command cancelled.")
+ } else {
+ error_helpers.ShowError(ctx, err)
+ }
+ setExitCodeForPartitionError(err)
}
}()
+ // if diagnostic mode is set, print out config and return
+ if _, ok := os.LookupEnv(constants.EnvConfigDump); ok {
+ localcmdconfig.DisplayConfig()
+ return
+ }
+
+ db, err := database.NewDuckDb(database.WithDuckLakeReadonly())
+ error_helpers.FailOnError(err)
+ defer db.Close()
+
// Get Resources
- resources, err := display.ListPartitionResources(ctx)
+ resources, err := display.ListPartitionResources(ctx, db)
error_helpers.FailOnError(err)
printableResource := display.NewPrintableResource(resources...)
@@ -97,15 +117,15 @@ func runPartitionListCmd(cmd *cobra.Command, args []string) {
// Print
err = printer.PrintResource(ctx, printableResource, cmd.OutOrStdout())
if err != nil {
- error_helpers.ShowError(ctx, err)
- exitCode = pconstants.ExitCodeUnknownErrorPanic
+ exitCode = pconstants.ExitCodeOutputRenderingFailed
+ return
}
}
// Show Partition
func partitionShowCmd() *cobra.Command {
var cmd = &cobra.Command{
- Use: "show",
+ Use: "show ",
Args: cobra.ExactArgs(1),
Run: runPartitionShowCmd,
Short: "Show details for a specific partition",
@@ -123,21 +143,53 @@ func partitionShowCmd() *cobra.Command {
}
func runPartitionShowCmd(cmd *cobra.Command, args []string) {
- //setup a cancel context and start cancel handler
- ctx, cancel := context.WithCancel(cmd.Context())
- contexthelpers.StartCancelHandler(cancel)
+ // use the signal-aware/cancelable context created upstream in preRunHook
+ // TODO: https://github.com/turbot/tailpipe/issues/563 none of the functions called in this command will return a
+ // cancellation error. Cancellation won't work right now
+ ctx := cmd.Context()
utils.LogTime("runPartitionShowCmd start")
+ var err error
defer func() {
utils.LogTime("runPartitionShowCmd end")
if r := recover(); r != nil {
- error_helpers.ShowError(ctx, helpers.ToError(r))
- exitCode = pconstants.ExitCodeUnknownErrorPanic
+ err = helpers.ToError(r)
+ }
+ if err != nil {
+ if error_helpers.IsCancelledError(err) {
+ //nolint:forbidigo // ui output
+ fmt.Println("tailpipe partition show command cancelled.")
+ } else {
+ error_helpers.ShowError(ctx, err)
+ }
+ setExitCodeForPartitionError(err)
}
}()
+ // if diagnostic mode is set, print out config and return
+ if _, ok := os.LookupEnv(constants.EnvConfigDump); ok {
+ localcmdconfig.DisplayConfig()
+ return
+ }
+
+ // open a readonly db connection
+ db, err := database.NewDuckDb(database.WithDuckLakeReadonly())
+ error_helpers.FailOnError(err)
+ defer db.Close()
+
// Get Resources
- partitionName := args[0]
- resource, err := display.GetPartitionResource(partitionName)
+
+ partitions, err := getPartitions(args)
+ error_helpers.FailOnError(err)
+ // if no partitions are found, return an error
+ if len(partitions) == 0 {
+ error_helpers.FailOnError(fmt.Errorf("no partitions found matching %s", args[0]))
+ }
+ // if more than one partition is found, return an error
+ if len(partitions) > 1 {
+ error_helpers.FailOnError(fmt.Errorf("multiple partitions found matching %s, please specify a more specific partition name", args[0]))
+ }
+
+ resource, err := display.GetPartitionResource(cmd.Context(), partitions[0], db)
error_helpers.FailOnError(err)
printableResource := display.NewPrintableResource(resource)
@@ -148,14 +200,14 @@ func runPartitionShowCmd(cmd *cobra.Command, args []string) {
// Print
err = printer.PrintResource(ctx, printableResource, cmd.OutOrStdout())
if err != nil {
- error_helpers.ShowError(ctx, err)
- exitCode = pconstants.ExitCodeUnknownErrorPanic
+ exitCode = pconstants.ExitCodeOutputRenderingFailed
+ return
}
}
func partitionDeleteCmd() *cobra.Command {
cmd := &cobra.Command{
- Use: "delete ",
+ Use: "delete ",
Args: cobra.ExactArgs(1),
Run: runPartitionDeleteCmd,
Short: "Delete a partition for the specified period",
@@ -171,32 +223,67 @@ func partitionDeleteCmd() *cobra.Command {
cmdconfig.OnCmd(cmd).
AddStringFlag(pconstants.ArgFrom, "", "Specify the start time").
+ AddStringFlag(pconstants.ArgTo, "", "Specify the end time").
AddBoolFlag(pconstants.ArgForce, false, "Force delete without confirmation")
return cmd
}
func runPartitionDeleteCmd(cmd *cobra.Command, args []string) {
+ // use the signal-aware/cancelable context created upstream in preRunHook
+ // TODO: https://github.com/turbot/tailpipe/issues/563 none of the functions called in this command will return a
+ // cancellation error. Cancellation won't work right now
ctx := cmd.Context()
-
+ var err error
defer func() {
if r := recover(); r != nil {
- exitCode = pconstants.ExitCodeUnknownErrorPanic
- error_helpers.FailOnError(helpers.ToError(r))
+ err = helpers.ToError(r)
+ }
+ if err != nil {
+ if error_helpers.IsCancelledError(err) {
+ //nolint:forbidigo // ui output
+ fmt.Println("Partition cancelled.")
+ } else {
+ error_helpers.ShowError(ctx, err)
+ }
+ setExitCodeForPartitionError(err)
}
}()
- // arg `fromTime` accepts ISO 8601 date(2024-01-01), ISO 8601 datetime(2006-01-02T15:04:05), ISO 8601 datetime with ms(2006-01-02T15:04:05.000),
- // RFC 3339 datetime with timezone(2006-01-02T15:04:05Z07:00) and relative time formats(T-2Y, T-10m, T-10W, T-180d, T-9H, T-10M)
+ // if diagnostic mode is set, print out config and return
+ if _, ok := os.LookupEnv(constants.EnvConfigDump); ok {
+ localcmdconfig.DisplayConfig()
+ return
+ }
+ // args `fromTime` and `ToTime` accepts:
+ // - ISO 8601 date(2024-01-01)
+ // - ISO 8601 datetime(2006-01-02T15:04:05)
+ // - ISO 8601 datetime with ms(2006-01-02T15:04:05.000)
+ // - RFC 3339 datetime with timezone(2006-01-02T15:04:05Z07:00)
+ // - relative time formats(T-2Y, T-10m, T-10W, T-180d, T-9H, T-10M)
var fromTime time.Time
- var fromStr string
+ // toTime defaults to now, but can be set to a specific time
+ toTime := time.Now()
+ // confirm deletion
+ var fromStr, toStr string
+
if viper.IsSet(pconstants.ArgFrom) {
var err error
- fromTime, err = parseFromTime(viper.GetString(pconstants.ArgFrom))
- error_helpers.FailOnError(err)
+ fromTime, err = parseFromToTime(viper.GetString(pconstants.ArgFrom))
+ error_helpers.FailOnErrorWithMessage(err, "invalid from time")
fromStr = fmt.Sprintf(" from %s", fromTime.Format(time.DateOnly))
}
+ if viper.IsSet(pconstants.ArgTo) {
+ var err error
+ toTime, err = parseFromToTime(viper.GetString(pconstants.ArgTo))
+ error_helpers.FailOnErrorWithMessage(err, "invalid to time")
+ }
+ toStr = fmt.Sprintf(" to %s", toTime.Format(time.DateOnly))
+ if toTime.Before(fromTime) {
+ error_helpers.FailOnError(fmt.Errorf("to time %s cannot be before from time %s", toTime.Format(time.RFC3339), fromTime.Format(time.RFC3339)))
+ }
+ // retrieve the partition
partitionName := args[0]
partition, ok := config.GlobalConfig.Partitions[partitionName]
if !ok {
@@ -204,16 +291,37 @@ func runPartitionDeleteCmd(cmd *cobra.Command, args []string) {
}
if !viper.GetBool(pconstants.ArgForce) {
- // confirm deletion
- msg := fmt.Sprintf("Are you sure you want to delete partition %s%s?", partitionName, fromStr)
+ msg := fmt.Sprintf("Are you sure you want to delete partition %s%s%s?", partitionName, fromStr, toStr)
if !utils.UserConfirmationWithDefault(msg, true) {
fmt.Println("Deletion cancelled") //nolint:forbidigo//expected output
return
}
}
-
- filesDeleted, err := parquet.DeleteParquetFiles(partition, fromTime)
+ db, err := database.NewDuckDb(database.WithDuckLake())
error_helpers.FailOnError(err)
+ defer db.Close()
+
+ // Create backup before deletion
+ slog.Info("Creating backup before partition deletion", "partition", partitionName)
+ if err := database.BackupDucklakeMetadata(); err != nil {
+ slog.Warn("Failed to create backup before partition deletion", "error", err)
+ // Continue with deletion - backup failure should not prevent deletion
+ }
+
+ // show spinner while deleting the partition
+ spinner := statushooks.NewStatusSpinnerHook()
+ spinner.SetStatus(fmt.Sprintf("Deleting partition %s", partition.TableName))
+ spinner.Show()
+ rowsDeleted, err := database.DeletePartition(ctx, partition, fromTime, toTime, db)
+ spinner.Hide()
+ if err != nil {
+ if errors.Is(err, context.Canceled) {
+ exitCode = pconstants.ExitCodeOperationCancelled
+ } else {
+ exitCode = 1
+ }
+ error_helpers.FailOnError(err)
+ }
// build the collection state path
collectionStatePath := partition.CollectionStatePath(config.GlobalWorkspaceProfile.GetCollectionDir())
@@ -222,6 +330,7 @@ func runPartitionDeleteCmd(cmd *cobra.Command, args []string) {
if fromTime.IsZero() {
err := os.Remove(collectionStatePath)
if err != nil && !os.IsNotExist(err) {
+ exitCode = 1
error_helpers.FailOnError(fmt.Errorf("failed to delete collection state file: %s", err.Error()))
}
} else {
@@ -230,24 +339,43 @@ func runPartitionDeleteCmd(cmd *cobra.Command, args []string) {
pluginManager := plugin.NewPluginManager()
defer pluginManager.Close()
err = pluginManager.UpdateCollectionState(ctx, partition, fromTime, collectionStatePath)
- error_helpers.FailOnError(err)
+ if err != nil {
+ if errors.Is(err, context.Canceled) {
+ exitCode = pconstants.ExitCodeOperationCancelled
+ } else {
+ exitCode = 1
+ }
+ error_helpers.FailOnError(err)
+ }
}
// now prune the collection folders
err = filepaths.PruneTree(config.GlobalWorkspaceProfile.GetCollectionDir())
if err != nil {
- slog.Warn("DeleteParquetFiles failed to prune empty collection folders", "error", err)
+ slog.Warn("DeletePartition failed to prune empty collection folders", "error", err)
}
- msg := buildStatusMessage(filesDeleted, partitionName, fromStr)
+ msg := buildStatusMessage(rowsDeleted, partitionName, fromStr)
fmt.Println(msg) //nolint:forbidigo//expected output
}
-func buildStatusMessage(filesDeleted int, partition string, fromStr string) interface{} {
- var deletedStr = " (no parquet files deleted)"
- if filesDeleted > 0 {
- deletedStr = fmt.Sprintf(" (deleted %d parquet %s)", filesDeleted, utils.Pluralize("file", filesDeleted))
+func buildStatusMessage(rowsDeleted int, partition string, fromStr string) interface{} {
+ var deletedStr = " (nothing deleted)"
+ if rowsDeleted > 0 {
+ deletedStr = fmt.Sprintf(" (deleted %d %s)", rowsDeleted, utils.Pluralize("rows", rowsDeleted))
}
return fmt.Sprintf("\nDeleted partition '%s'%s%s.\n", partition, fromStr, deletedStr)
}
+
+func setExitCodeForPartitionError(err error) {
+ if exitCode != 0 || err == nil {
+ return
+ }
+ if error_helpers.IsCancelledError(err) {
+ exitCode = pconstants.ExitCodeOperationCancelled
+ return
+ }
+ // no dedicated partition exit code; use generic nonzero failure
+ exitCode = 1
+}
diff --git a/cmd/plugin.go b/cmd/plugin.go
index 55878629..25879a51 100644
--- a/cmd/plugin.go
+++ b/cmd/plugin.go
@@ -3,6 +3,7 @@ package cmd
import (
"context"
"fmt"
+ "os"
"strings"
"sync"
"time"
@@ -14,8 +15,7 @@ import (
"github.com/turbot/go-kit/helpers"
"github.com/turbot/pipe-fittings/v2/cmdconfig"
pconstants "github.com/turbot/pipe-fittings/v2/constants"
- "github.com/turbot/pipe-fittings/v2/contexthelpers"
- "github.com/turbot/pipe-fittings/v2/error_helpers"
+ "github.com/turbot/pipe-fittings/v2/filepaths"
"github.com/turbot/pipe-fittings/v2/installationstate"
pociinstaller "github.com/turbot/pipe-fittings/v2/ociinstaller"
pplugin "github.com/turbot/pipe-fittings/v2/plugin"
@@ -23,9 +23,11 @@ import (
"github.com/turbot/pipe-fittings/v2/statushooks"
"github.com/turbot/pipe-fittings/v2/utils"
"github.com/turbot/pipe-fittings/v2/versionfile"
+ localcmdconfig "github.com/turbot/tailpipe/internal/cmdconfig"
"github.com/turbot/tailpipe/internal/config"
"github.com/turbot/tailpipe/internal/constants"
"github.com/turbot/tailpipe/internal/display"
+ error_helpers "github.com/turbot/tailpipe/internal/error_helpers"
"github.com/turbot/tailpipe/internal/ociinstaller"
"github.com/turbot/tailpipe/internal/plugin"
)
@@ -182,10 +184,9 @@ Examples:
// Show plugin
func pluginShowCmd() *cobra.Command {
var cmd = &cobra.Command{
- Use: "show ",
- Args: cobra.ExactArgs(1),
- Run: runPluginShowCmd,
- // TODO improve descriptions https://github.com/turbot/tailpipe/issues/111
+ Use: "show ",
+ Args: cobra.ExactArgs(1),
+ Run: runPluginShowCmd,
Short: "Show details of a plugin",
Long: `Show the tables and sources provided by plugin`,
}
@@ -236,16 +237,37 @@ var pluginInstallSteps = []string{
}
func runPluginInstallCmd(cmd *cobra.Command, args []string) {
+ // use the signal-aware/cancelable context created upstream in preRunHook
+ // TODO: https://github.com/turbot/tailpipe/issues/563 none of the functions called in this command will return a
+ // cancellation error. Cancellation won't work right now
ctx := cmd.Context()
utils.LogTime("runPluginInstallCmd install")
+ var err error
defer func() {
utils.LogTime("runPluginInstallCmd end")
if r := recover(); r != nil {
- error_helpers.ShowError(ctx, helpers.ToError(r))
- exitCode = pconstants.ExitCodeUnknownErrorPanic
+ err = helpers.ToError(r)
+ }
+ if err != nil {
+ if error_helpers.IsCancelledError(err) {
+ //nolint:forbidigo // ui output
+ fmt.Println("tailpipe plugin install command cancelled.")
+ } else {
+ error_helpers.ShowError(ctx, err)
+ }
+ setExitCodeForPluginError(err, 1)
}
}()
+ // Clean up plugin temporary directories from previous crashes/interrupted installations
+ filepaths.CleanupPluginTempDirs()
+
+ // if diagnostic mode is set, print out config and return
+ if _, ok := os.LookupEnv(constants.EnvConfigDump); ok {
+ localcmdconfig.DisplayConfig()
+ return
+ }
+
// args to 'plugin install' -- one or more plugins to install
// plugin names can be simple names for "standard" plugins, constraint suffixed names
// or full refs to the OCI image
@@ -287,7 +309,7 @@ func runPluginInstallCmd(cmd *cobra.Command, args []string) {
report := &pplugin.PluginInstallReport{
Plugin: pluginName,
Skipped: true,
- SkipReason: pconstants.InstallMessagePluginNotFound,
+ SkipReason: pconstants.InstallMessagePluginNotDistributedViaHub,
IsUpdateReport: false,
}
reportChannel <- report
@@ -363,16 +385,37 @@ func doPluginInstall(ctx context.Context, bar *uiprogress.Bar, pluginName string
}
func runPluginUpdateCmd(cmd *cobra.Command, args []string) {
+ // use the signal-aware/cancelable context created upstream in preRunHook
+ // TODO: https://github.com/turbot/tailpipe/issues/563 none of the functions called in this command will return a
+ // cancellation error. Cancellation won't work right now
ctx := cmd.Context()
utils.LogTime("runPluginUpdateCmd start")
+ var err error
defer func() {
utils.LogTime("runPluginUpdateCmd end")
if r := recover(); r != nil {
- error_helpers.ShowError(ctx, helpers.ToError(r))
- exitCode = pconstants.ExitCodeUnknownErrorPanic
+ err = helpers.ToError(r)
+ }
+ if err != nil {
+ if error_helpers.IsCancelledError(err) {
+ //nolint:forbidigo // ui output
+ fmt.Println("tailpipe plugin update command cancelled.")
+ } else {
+ error_helpers.ShowError(ctx, err)
+ }
+ setExitCodeForPluginError(err, 1)
}
}()
+ // Clean up plugin temporary directories from previous crashes/interrupted installations
+ filepaths.CleanupPluginTempDirs()
+
+ // if diagnostic mode is set, print out config and return
+ if _, ok := os.LookupEnv(constants.EnvConfigDump); ok {
+ localcmdconfig.DisplayConfig()
+ return
+ }
+
// args to 'plugin update' -- one or more plugins to update
// These can be simple names for "standard" plugins, constraint suffixed names
// or full refs to the OCI image
@@ -441,6 +484,20 @@ func runPluginUpdateCmd(cmd *cobra.Command, args []string) {
return
}
} else {
+ // Plugin not installed locally. If it's a hub plugin, check if it exists in hub.
+ org, name, constraint := ref.GetOrgNameAndStream()
+ if ref.IsFromTurbotHub() {
+ if _, err := pplugin.GetLatestPluginVersionByConstraint(ctx, state.InstallationID, org, name, constraint); err != nil {
+ updateResults = append(updateResults, &pplugin.PluginInstallReport{
+ Skipped: true,
+ Plugin: p,
+ SkipReason: pconstants.InstallMessagePluginNotDistributedViaHub,
+ IsUpdateReport: true,
+ })
+ continue
+ }
+ }
+ // Exists on hub (or not a hub plugin) but not installed locally
exitCode = pconstants.ExitCodePluginNotFound
updateResults = append(updateResults, &pplugin.PluginInstallReport{
Skipped: true,
@@ -609,20 +666,46 @@ func installPlugin(ctx context.Context, resolvedPlugin pplugin.ResolvedPluginVer
}
func runPluginUninstallCmd(cmd *cobra.Command, args []string) {
- // setup a cancel context and start cancel handler
- ctx, cancel := context.WithCancel(cmd.Context())
- contexthelpers.StartCancelHandler(cancel)
+ // use the signal-aware/cancelable context created upstream in preRunHook
+ // TODO: https://github.com/turbot/tailpipe/issues/563 none of the functions called in this command will return a
+ // cancellation error. Cancellation won't work right now
+ ctx := cmd.Context()
utils.LogTime("runPluginUninstallCmd uninstall")
-
+ var err error
defer func() {
utils.LogTime("runPluginUninstallCmd end")
if r := recover(); r != nil {
- error_helpers.ShowError(ctx, helpers.ToError(r))
- exitCode = pconstants.ExitCodeUnknownErrorPanic
+ err = helpers.ToError(r)
+ }
+ if err != nil {
+ if error_helpers.IsCancelledError(err) {
+ //nolint:forbidigo // ui output
+ fmt.Println("tailpipe plugin uninstall command cancelled.")
+ } else {
+ error_helpers.ShowError(ctx, err)
+ }
+ setExitCodeForPluginError(err, 1)
}
}()
+ // Clean up plugin temporary directories from previous crashes/interrupted installations
+ filepaths.CleanupPluginTempDirs()
+
+ // if diagnostic mode is set, print out config and return
+ if _, ok := os.LookupEnv(constants.EnvConfigDump); ok {
+ localcmdconfig.DisplayConfig()
+ return
+ }
+
+ // load installation state (needed for hub existence checks)
+ state, err := installationstate.Load()
+ if err != nil {
+ error_helpers.ShowError(ctx, fmt.Errorf("could not load state"))
+ exitCode = pconstants.ExitCodePluginLoadingError
+ return
+ }
+
if len(args) == 0 {
fmt.Println() //nolint:forbidigo // ui output
error_helpers.ShowError(ctx, fmt.Errorf("you need to provide at least one plugin to uninstall"))
@@ -640,6 +723,18 @@ func runPluginUninstallCmd(cmd *cobra.Command, args []string) {
if report, err := plugin.Remove(ctx, p); err != nil {
if strings.Contains(err.Error(), "not found") {
exitCode = pconstants.ExitCodePluginNotFound
+ // check hub existence to tailor message
+ ref := pociinstaller.NewImageRef(p)
+ if ref.IsFromTurbotHub() {
+ org, name, constraint := ref.GetOrgNameAndStream()
+ if _, herr := pplugin.GetLatestPluginVersionByConstraint(ctx, state.InstallationID, org, name, constraint); herr != nil {
+ // Not on hub and not installed locally
+ error_helpers.ShowError(ctx, fmt.Errorf("Failed to uninstall '%s' not found on hub and not installed locally.", p))
+ continue
+ }
+ }
+ } else if error_helpers.IsCancelledError(err) {
+ exitCode = pconstants.ExitCodeOperationCancelled
}
error_helpers.ShowErrorWithMessage(ctx, err, fmt.Sprintf("Failed to uninstall plugin '%s'", p))
} else {
@@ -672,19 +767,37 @@ func resolveUpdatePluginsFromArgs(args []string) ([]string, error) {
}
func runPluginListCmd(cmd *cobra.Command, _ []string) {
- //setup a cancel context and start cancel handler
- ctx, cancel := context.WithCancel(cmd.Context())
- contexthelpers.StartCancelHandler(cancel)
+ // use the signal-aware/cancelable context created upstream in preRunHook
+ ctx := cmd.Context()
utils.LogTime("runPluginListCmd list")
+
+ // Clean up plugin temporary directories from previous crashes/interrupted installations
+ filepaths.CleanupPluginTempDirs()
+
+ var err error
defer func() {
utils.LogTime("runPluginListCmd end")
if r := recover(); r != nil {
- error_helpers.ShowError(ctx, helpers.ToError(r))
- exitCode = pconstants.ExitCodeUnknownErrorPanic
+ err = helpers.ToError(r)
+ }
+ if err != nil {
+ if error_helpers.IsCancelledError(err) {
+ //nolint:forbidigo // ui output
+ fmt.Println("tailpipe plugin list command cancelled.")
+ } else {
+ error_helpers.ShowError(ctx, err)
+ }
+ setExitCodeForPluginError(err, pconstants.ExitCodePluginListFailure)
}
}()
+ // if diagnostic mode is set, print out config and return
+ if _, ok := os.LookupEnv(constants.EnvConfigDump); ok {
+ localcmdconfig.DisplayConfig()
+ return
+ }
+
// Get Resource(s)
resources, err := display.ListPlugins(ctx)
error_helpers.FailOnError(err)
@@ -701,32 +814,52 @@ func runPluginListCmd(cmd *cobra.Command, _ []string) {
// Print
err = printer.PrintResource(ctx, printableResource, cmd.OutOrStdout())
if err != nil {
- error_helpers.ShowError(ctx, err)
- exitCode = pconstants.ExitCodePluginListFailure
+ exitCode = pconstants.ExitCodeOutputRenderingFailed
+ return
}
}
func runPluginShowCmd(cmd *cobra.Command, args []string) {
+ // use the signal-aware/cancelable context created upstream in preRunHook
+ // TODO: https://github.com/turbot/tailpipe/issues/563 none of the functions called in this command will return a
+ // cancellation error. Cancellation won't work right now
+ ctx := cmd.Context()
+
// we expect 1 argument, the plugin name
if len(args) != 1 {
- error_helpers.ShowError(cmd.Context(), fmt.Errorf("you need to provide the name of a plugin"))
+ error_helpers.ShowError(ctx, fmt.Errorf("you need to provide the name of a plugin"))
exitCode = pconstants.ExitCodeInsufficientOrWrongInputs
return
}
- //setup a cancel context and start cancel handler
- ctx, cancel := context.WithCancel(cmd.Context())
- contexthelpers.StartCancelHandler(cancel)
-
utils.LogTime("runPluginShowCmd start")
+
+ // Clean up plugin temporary directories from previous crashes/interrupted installations
+ filepaths.CleanupPluginTempDirs()
+
+ var err error
defer func() {
utils.LogTime("runPluginShowCmd end")
if r := recover(); r != nil {
- error_helpers.ShowError(ctx, helpers.ToError(r))
- exitCode = pconstants.ExitCodeUnknownErrorPanic
+ err = helpers.ToError(r)
+ }
+ if err != nil {
+ if error_helpers.IsCancelledError(err) {
+ //nolint:forbidigo // ui output
+ fmt.Println("tailpipe plugin show command cancelled.")
+ } else {
+ error_helpers.ShowError(ctx, err)
+ }
+ setExitCodeForPluginError(err, pconstants.ExitCodePluginShowFailure)
}
}()
+ // if diagnostic mode is set, print out config and return
+ if _, ok := os.LookupEnv(constants.EnvConfigDump); ok {
+ localcmdconfig.DisplayConfig()
+ return
+ }
+
// Get Resource(s)
resource, err := display.GetPluginResource(ctx, args[0])
error_helpers.FailOnError(err)
@@ -739,7 +872,18 @@ func runPluginShowCmd(cmd *cobra.Command, args []string) {
// Print
err = printer.PrintResource(ctx, printableResource, cmd.OutOrStdout())
if err != nil {
- error_helpers.ShowError(ctx, err)
- exitCode = pconstants.ExitCodePluginListFailure
+ exitCode = pconstants.ExitCodeOutputRenderingFailed
+ return
+ }
+}
+
+func setExitCodeForPluginError(err error, nonCancelCode int) {
+ if exitCode != 0 || err == nil {
+ return
+ }
+ if error_helpers.IsCancelledError(err) {
+ exitCode = pconstants.ExitCodeOperationCancelled
+ return
}
+ exitCode = nonCancelCode
}
diff --git a/cmd/query.go b/cmd/query.go
index d767de7d..cdc6e011 100644
--- a/cmd/query.go
+++ b/cmd/query.go
@@ -1,8 +1,8 @@
package cmd
import (
- "context"
"fmt"
+ "os"
"strings"
"github.com/spf13/cobra"
@@ -12,6 +12,7 @@ import (
"github.com/turbot/pipe-fittings/v2/cmdconfig"
pconstants "github.com/turbot/pipe-fittings/v2/constants"
"github.com/turbot/pipe-fittings/v2/error_helpers"
+ localcmdconfig "github.com/turbot/tailpipe/internal/cmdconfig"
"github.com/turbot/tailpipe/internal/constants"
"github.com/turbot/tailpipe/internal/database"
"github.com/turbot/tailpipe/internal/interactive"
@@ -72,16 +73,27 @@ func runQueryCmd(cmd *cobra.Command, args []string) {
}
if err != nil {
error_helpers.ShowError(ctx, err)
- setExitCodeForQueryError(err)
+ exitCode = pconstants.ExitCodeInitializationFailed
}
}()
- // get a connection to the database
- var db *database.DuckDb
- db, err = openDatabaseConnection(ctx)
- if err != nil {
+ // if diagnostic mode is set, print out config and return
+ if _, ok := os.LookupEnv(constants.EnvConfigDump); ok {
+ localcmdconfig.DisplayConfig()
return
}
+
+ // build the filters from the to, from and index args
+ filters, err := getFilters()
+ if err != nil {
+ error_helpers.FailOnError(fmt.Errorf("error building filters: %w", err))
+ }
+
+ // now create a readonly connection to the database, passing in any filters
+ db, err := database.NewDuckDb(database.WithDuckLakeReadonly(filters...))
+ if err != nil {
+ error_helpers.FailOnError(err)
+ }
defer db.Close()
// if an arg was passed, just execute the query
@@ -99,25 +111,4 @@ func runQueryCmd(cmd *cobra.Command, args []string) {
// if there were any errors, they would have been shown already from `RunBatchSession` - just set the exit code
exitCode = pconstants.ExitCodeQueryExecutionFailed
}
-
-}
-
-// generate a db file - this will respect any time/index filters specified in the command args
-func openDatabaseConnection(ctx context.Context) (*database.DuckDb, error) {
- dbFilePath, err := generateDbFile(ctx)
- if err != nil {
- return nil, err
- }
- // Open a DuckDB connection
- return database.NewDuckDb(database.WithDbFile(dbFilePath))
-}
-
-func setExitCodeForQueryError(err error) {
- // if exit code already set, leave as is
- if exitCode != 0 || err == nil {
- return
- }
-
- // TODO #errors - assign exit codes https://github.com/turbot/tailpipe/issues/106
- exitCode = 1
}
diff --git a/cmd/root.go b/cmd/root.go
index d33ca2fa..402c3c0e 100644
--- a/cmd/root.go
+++ b/cmd/root.go
@@ -1,6 +1,7 @@
package cmd
import (
+ "errors"
"os"
"github.com/spf13/cobra"
@@ -10,8 +11,8 @@ import (
"github.com/turbot/pipe-fittings/v2/error_helpers"
"github.com/turbot/pipe-fittings/v2/filepaths"
"github.com/turbot/pipe-fittings/v2/utils"
- localcmdconfig "github.com/turbot/tailpipe/internal/cmdconfig"
"github.com/turbot/tailpipe/internal/constants"
+ "github.com/turbot/tailpipe/internal/migration"
)
var exitCode int
@@ -35,7 +36,6 @@ func rootCommand() *cobra.Command {
rootCmd.SetVersionTemplate("Tailpipe v{{.Version}}\n")
- // TODO #config this will not reflect changes to install-dir - do we need to default in a different way https://github.com/turbot/tailpipe/issues/112
defaultConfigPath := filepaths.EnsureConfigDir()
cmdconfig.
@@ -63,18 +63,21 @@ func rootCommand() *cobra.Command {
}
func Execute() int {
- // if diagnostic mode is set, print out config and return
- if _, ok := os.LookupEnv(constants.EnvConfigDump); ok {
- localcmdconfig.DisplayConfig()
- return 0
- }
-
- rootCmd := rootCommand()
utils.LogTime("cmd.root.Execute start")
defer utils.LogTime("cmd.root.Execute end")
+ rootCmd := rootCommand()
+
+ // set the error output to stdout (as it;s common usage to redirect stderr to a file to capture logs
+ rootCmd.SetErr(os.Stdout)
+ // if the error is dues to unsupported migration, set a specific exit code - this will bve picked up by powerpipe
if err := rootCmd.Execute(); err != nil {
- exitCode = -1
+ var unsupportedErr *migration.UnsupportedError
+ if errors.As(err, &unsupportedErr) {
+ exitCode = pconstants.ExitCodeMigrationUnsupported
+ } else {
+ exitCode = 1
+ }
}
return exitCode
}
diff --git a/cmd/source.go b/cmd/source.go
index 1067c36d..dcb9d5e6 100644
--- a/cmd/source.go
+++ b/cmd/source.go
@@ -1,8 +1,8 @@
package cmd
import (
- "context"
"fmt"
+ "os"
"strings"
"github.com/spf13/cobra"
@@ -10,10 +10,10 @@ import (
"github.com/turbot/go-kit/helpers"
"github.com/turbot/pipe-fittings/v2/cmdconfig"
pconstants "github.com/turbot/pipe-fittings/v2/constants"
- "github.com/turbot/pipe-fittings/v2/contexthelpers"
"github.com/turbot/pipe-fittings/v2/error_helpers"
"github.com/turbot/pipe-fittings/v2/printers"
"github.com/turbot/pipe-fittings/v2/utils"
+ localcmdconfig "github.com/turbot/tailpipe/internal/cmdconfig"
"github.com/turbot/tailpipe/internal/constants"
"github.com/turbot/tailpipe/internal/display"
)
@@ -64,18 +64,32 @@ func sourceListCmd() *cobra.Command {
}
func runSourceListCmd(cmd *cobra.Command, args []string) {
- //setup a cancel context and start cancel handler
- ctx, cancel := context.WithCancel(cmd.Context())
- contexthelpers.StartCancelHandler(cancel)
+ // use the signal-aware/cancelable context created upstream in preRunHook
+ ctx := cmd.Context()
utils.LogTime("runSourceListCmd start")
+ var err error
defer func() {
utils.LogTime("runSourceListCmd end")
if r := recover(); r != nil {
- error_helpers.ShowError(ctx, helpers.ToError(r))
- exitCode = pconstants.ExitCodeUnknownErrorPanic
+ err = helpers.ToError(r)
+ }
+ if err != nil {
+ if error_helpers.IsCancelledError(err) {
+ //nolint:forbidigo // ui output
+ fmt.Println("tailpipe source list command cancelled.")
+ } else {
+ error_helpers.ShowError(ctx, err)
+ }
+ setExitCodeForSourceError(err)
}
}()
+ // if diagnostic mode is set, print out config and return
+ if _, ok := os.LookupEnv(constants.EnvConfigDump); ok {
+ localcmdconfig.DisplayConfig()
+ return
+ }
+
// Get Resources
resources, err := display.ListSourceResources(ctx)
error_helpers.FailOnError(err)
@@ -88,8 +102,8 @@ func runSourceListCmd(cmd *cobra.Command, args []string) {
// Print
err = printer.PrintResource(ctx, printableResource, cmd.OutOrStdout())
if err != nil {
- error_helpers.ShowError(ctx, err)
- exitCode = pconstants.ExitCodeUnknownErrorPanic
+ exitCode = pconstants.ExitCodeOutputRenderingFailed
+ return
}
}
@@ -113,18 +127,34 @@ func sourceShowCmd() *cobra.Command {
}
func runSourceShowCmd(cmd *cobra.Command, args []string) {
- //setup a cancel context and start cancel handler
- ctx, cancel := context.WithCancel(cmd.Context())
- contexthelpers.StartCancelHandler(cancel)
+ // use the signal-aware/cancelable context created upstream in preRunHook
+ // TODO: https://github.com/turbot/tailpipe/issues/563 none of the functions called in this command will return a
+ // cancellation error. Cancellation won't work right now
+ ctx := cmd.Context()
utils.LogTime("runSourceShowCmd start")
+ var err error
defer func() {
utils.LogTime("runSourceShowCmd end")
if r := recover(); r != nil {
- error_helpers.ShowError(ctx, helpers.ToError(r))
- exitCode = pconstants.ExitCodeUnknownErrorPanic
+ err = helpers.ToError(r)
+ }
+ if err != nil {
+ if error_helpers.IsCancelledError(err) {
+ //nolint:forbidigo // ui output
+ fmt.Println("tailpipe source show command cancelled.")
+ } else {
+ error_helpers.ShowError(ctx, err)
+ }
+ setExitCodeForSourceError(err)
}
}()
+ // if diagnostic mode is set, print out config and return
+ if _, ok := os.LookupEnv(constants.EnvConfigDump); ok {
+ localcmdconfig.DisplayConfig()
+ return
+ }
+
// Get Resources
resourceName := args[0]
resource, err := display.GetSourceResource(ctx, resourceName)
@@ -138,7 +168,18 @@ func runSourceShowCmd(cmd *cobra.Command, args []string) {
// Print
err = printer.PrintResource(ctx, printableResource, cmd.OutOrStdout())
if err != nil {
- error_helpers.ShowError(ctx, err)
- exitCode = pconstants.ExitCodeUnknownErrorPanic
+ exitCode = pconstants.ExitCodeOutputRenderingFailed
+ return
+ }
+}
+
+func setExitCodeForSourceError(err error) {
+ if exitCode != 0 || err == nil {
+ return
+ }
+ if error_helpers.IsCancelledError(err) {
+ exitCode = pconstants.ExitCodeOperationCancelled
+ return
}
+ exitCode = 1
}
diff --git a/cmd/table.go b/cmd/table.go
index 1ee88718..7574d84d 100644
--- a/cmd/table.go
+++ b/cmd/table.go
@@ -1,8 +1,8 @@
package cmd
import (
- "context"
"fmt"
+ "os"
"strings"
"github.com/spf13/cobra"
@@ -10,12 +10,13 @@ import (
"github.com/turbot/go-kit/helpers"
"github.com/turbot/pipe-fittings/v2/cmdconfig"
pconstants "github.com/turbot/pipe-fittings/v2/constants"
- "github.com/turbot/pipe-fittings/v2/contexthelpers"
- "github.com/turbot/pipe-fittings/v2/error_helpers"
"github.com/turbot/pipe-fittings/v2/printers"
"github.com/turbot/pipe-fittings/v2/utils"
+ localcmdconfig "github.com/turbot/tailpipe/internal/cmdconfig"
"github.com/turbot/tailpipe/internal/constants"
+ "github.com/turbot/tailpipe/internal/database"
"github.com/turbot/tailpipe/internal/display"
+ "github.com/turbot/tailpipe/internal/error_helpers"
)
func tableCmd() *cobra.Command {
@@ -65,20 +66,39 @@ func tableListCmd() *cobra.Command {
}
func runTableListCmd(cmd *cobra.Command, args []string) {
- //setup a cancel context and start cancel handler
- ctx, cancel := context.WithCancel(cmd.Context())
- contexthelpers.StartCancelHandler(cancel)
+ // use the signal-aware/cancelable context created upstream in preRunHook
+ ctx := cmd.Context()
utils.LogTime("runSourceListCmd start")
+ var err error
defer func() {
utils.LogTime("runSourceListCmd end")
if r := recover(); r != nil {
- error_helpers.ShowError(ctx, helpers.ToError(r))
- exitCode = pconstants.ExitCodeUnknownErrorPanic
+ err = helpers.ToError(r)
+ }
+ if err != nil {
+ if error_helpers.IsCancelledError(err) {
+ //nolint:forbidigo // ui output
+ fmt.Println("tailpipe table list command cancelled.")
+ } else {
+ error_helpers.ShowError(ctx, err)
+ }
+ setExitCodeForTableError(err)
}
}()
+ // if diagnostic mode is set, print out config and return
+ if _, ok := os.LookupEnv(constants.EnvConfigDump); ok {
+ localcmdconfig.DisplayConfig()
+ return
+ }
+
+ // open a readonly db connection
+ db, err := database.NewDuckDb(database.WithDuckLakeReadonly())
+ error_helpers.FailOnError(err)
+ defer db.Close()
+
// Get Resources
- resources, err := display.ListTableResources(ctx)
+ resources, err := display.ListTableResources(ctx, db)
error_helpers.FailOnError(err)
printableResource := display.NewPrintableResource(resources...)
@@ -89,8 +109,8 @@ func runTableListCmd(cmd *cobra.Command, args []string) {
// Print
err = printer.PrintResource(ctx, printableResource, cmd.OutOrStdout())
if err != nil {
- error_helpers.ShowError(ctx, err)
- exitCode = pconstants.ExitCodeUnknownErrorPanic
+ exitCode = pconstants.ExitCodeOutputRenderingFailed
+ return
}
}
@@ -115,20 +135,39 @@ func tableShowCmd() *cobra.Command {
}
func runTableShowCmd(cmd *cobra.Command, args []string) {
- //setup a cancel context and start cancel handler
- ctx, cancel := context.WithCancel(cmd.Context())
- contexthelpers.StartCancelHandler(cancel)
+ // use the signal-aware/cancelable context created upstream in preRunHook
+ ctx := cmd.Context()
utils.LogTime("runTableShowCmd start")
+ var err error
defer func() {
utils.LogTime("runTableShowCmd end")
if r := recover(); r != nil {
- error_helpers.ShowError(ctx, helpers.ToError(r))
- exitCode = pconstants.ExitCodeUnknownErrorPanic
+ err = helpers.ToError(r)
+ }
+ if err != nil {
+ if error_helpers.IsCancelledError(err) {
+ //nolint:forbidigo // ui output
+ fmt.Println("tailpipe table show command cancelled.")
+ } else {
+ error_helpers.ShowError(ctx, err)
+ }
+ setExitCodeForTableError(err)
}
}()
+ // if diagnostic mode is set, print out config and return
+ if _, ok := os.LookupEnv(constants.EnvConfigDump); ok {
+ localcmdconfig.DisplayConfig()
+ return
+ }
+
+ // open a readonly db connection
+ db, err := database.NewDuckDb(database.WithDuckLakeReadonly())
+ error_helpers.FailOnError(err)
+ defer db.Close()
+
// Get Resources
- resource, err := display.GetTableResource(ctx, args[0])
+ resource, err := display.GetTableResource(ctx, args[0], db)
error_helpers.FailOnError(err)
printableResource := display.NewPrintableResource(resource)
@@ -139,7 +178,18 @@ func runTableShowCmd(cmd *cobra.Command, args []string) {
// Print
err = printer.PrintResource(ctx, printableResource, cmd.OutOrStdout())
if err != nil {
- error_helpers.ShowError(ctx, err)
- exitCode = pconstants.ExitCodeUnknownErrorPanic
+ exitCode = pconstants.ExitCodeOutputRenderingFailed
+ return
+ }
+}
+
+func setExitCodeForTableError(err error) {
+ if exitCode != 0 || err == nil {
+ return
+ }
+ if error_helpers.IsCancelledError(err) {
+ exitCode = pconstants.ExitCodeOperationCancelled
+ return
}
+ exitCode = 1
}
diff --git a/go.mod b/go.mod
index ce80add4..0a633771 100644
--- a/go.mod
+++ b/go.mod
@@ -8,22 +8,21 @@ replace (
github.com/c-bata/go-prompt => github.com/turbot/go-prompt v0.2.6-steampipe.0.0.20221028122246-eb118ec58d50
//github.com/turbot/pipe-fittings/v2 => ../pipe-fittings
//github.com/turbot/tailpipe-plugin-core => ../tailpipe-plugin-core
-//github.com/turbot/tailpipe-plugin-sdk => ../tailpipe-plugin-sdk
+// github.com/turbot/tailpipe-plugin-sdk => ../tailpipe-plugin-sdk
)
require (
- github.com/Masterminds/semver/v3 v3.2.1
- github.com/hashicorp/hcl/v2 v2.20.1
+ github.com/Masterminds/semver/v3 v3.4.0
+ github.com/hashicorp/hcl/v2 v2.24.0
github.com/mattn/go-isatty v0.0.20
- github.com/spf13/cobra v1.8.1
+ github.com/spf13/cobra v1.9.1
github.com/spf13/viper v1.19.0
- github.com/stretchr/testify v1.10.0
+ github.com/stretchr/testify v1.11.0
github.com/turbot/go-kit v1.3.0
- github.com/turbot/pipe-fittings/v2 v2.5.1
- github.com/turbot/tailpipe-plugin-sdk v0.8.0-rc.1
- github.com/zclconf/go-cty v1.14.4
- golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c
-
+ github.com/turbot/pipe-fittings/v2 v2.7.0
+ github.com/turbot/tailpipe-plugin-sdk v0.9.3
+ github.com/zclconf/go-cty v1.16.3
+ golang.org/x/exp v0.0.0-20250718183923-645b1fa84792
)
require (
@@ -33,39 +32,46 @@ require (
github.com/charmbracelet/bubbletea v1.2.4
github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964
github.com/dustin/go-humanize v1.0.1
- github.com/fsnotify/fsnotify v1.8.0
+ github.com/fatih/color v1.18.0
+ github.com/fsnotify/fsnotify v1.9.0
github.com/gosuri/uiprogress v0.0.1
github.com/hashicorp/go-hclog v1.6.3
github.com/hashicorp/go-plugin v1.6.1
github.com/hashicorp/go-version v1.7.0
github.com/jedib0t/go-pretty/v6 v6.5.9
- github.com/marcboeker/go-duckdb/v2 v2.1.0
+ github.com/marcboeker/go-duckdb/v2 v2.4.0
+ github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02
github.com/thediveo/enumflag/v2 v2.0.5
- github.com/turbot/tailpipe-plugin-core v0.2.7
- golang.org/x/sync v0.12.0
- golang.org/x/text v0.23.0
- google.golang.org/protobuf v1.36.1
+ github.com/turbot/tailpipe-plugin-core v0.2.10
+ golang.org/x/text v0.28.0
+ google.golang.org/grpc v1.75.0
+ google.golang.org/protobuf v1.36.8
)
require (
github.com/goccy/go-json v0.10.5 // indirect
- github.com/google/flatbuffers v25.1.24+incompatible // indirect
- github.com/klauspost/cpuid/v2 v2.2.9 // indirect
+ github.com/google/flatbuffers v25.2.10+incompatible // indirect
+ github.com/klauspost/cpuid/v2 v2.3.0 // indirect
github.com/pierrec/lz4/v4 v4.1.22 // indirect
github.com/zeebo/xxh3 v1.0.2 // indirect
)
require (
- cloud.google.com/go v0.115.0 // indirect
- cloud.google.com/go/auth v0.7.2 // indirect
- cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect
- cloud.google.com/go/compute/metadata v0.5.2 // indirect
- cloud.google.com/go/iam v1.1.10 // indirect
- cloud.google.com/go/storage v1.42.0 // indirect
+ cel.dev/expr v0.24.0 // indirect
+ cloud.google.com/go v0.121.0 // indirect
+ cloud.google.com/go/auth v0.16.0 // indirect
+ cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
+ cloud.google.com/go/compute/metadata v0.7.0 // indirect
+ cloud.google.com/go/iam v1.5.0 // indirect
+ cloud.google.com/go/monitoring v1.24.0 // indirect
+ cloud.google.com/go/storage v1.52.0 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
+ github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect
+ github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect
+ github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
github.com/agext/levenshtein v1.2.3 // indirect
- github.com/apache/arrow-go/v18 v18.1.0 // indirect
+ github.com/apache/arrow-go/v18 v18.4.1 // indirect
github.com/apparentlymart/go-cidr v1.1.0 // indirect
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
github.com/aws/aws-sdk-go v1.44.183 // indirect
@@ -91,6 +97,7 @@ require (
github.com/charmbracelet/lipgloss v1.0.0 // indirect
github.com/charmbracelet/x/ansi v0.4.5 // indirect
github.com/charmbracelet/x/term v0.2.1 // indirect
+ github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect
github.com/containerd/containerd v1.7.27 // indirect
github.com/containerd/errdefs v0.3.0 // indirect
github.com/containerd/log v0.1.0 // indirect
@@ -99,40 +106,42 @@ require (
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dgraph-io/ristretto v0.2.0 // indirect
github.com/dlclark/regexp2 v1.4.0 // indirect
- github.com/duckdb/duckdb-go-bindings v0.1.13 // indirect
- github.com/duckdb/duckdb-go-bindings/darwin-amd64 v0.1.8 // indirect
- github.com/duckdb/duckdb-go-bindings/darwin-arm64 v0.1.8 // indirect
- github.com/duckdb/duckdb-go-bindings/linux-amd64 v0.1.8 // indirect
- github.com/duckdb/duckdb-go-bindings/linux-arm64 v0.1.8 // indirect
- github.com/duckdb/duckdb-go-bindings/windows-amd64 v0.1.8 // indirect
+ github.com/duckdb/duckdb-go-bindings v0.1.19 // indirect
+ github.com/duckdb/duckdb-go-bindings/darwin-amd64 v0.1.19 // indirect
+ github.com/duckdb/duckdb-go-bindings/darwin-arm64 v0.1.19 // indirect
+ github.com/duckdb/duckdb-go-bindings/linux-amd64 v0.1.19 // indirect
+ github.com/duckdb/duckdb-go-bindings/linux-arm64 v0.1.19 // indirect
+ github.com/duckdb/duckdb-go-bindings/windows-amd64 v0.1.19 // indirect
github.com/elastic/go-grok v0.3.1 // indirect
+ github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
+ github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
- github.com/fatih/color v1.17.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
github.com/gertd/go-pluralize v0.2.1 // indirect
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
github.com/go-git/go-billy/v5 v5.6.0 // indirect
github.com/go-git/go-git/v5 v5.13.0 // indirect
- github.com/go-logr/logr v1.4.2 // indirect
+ github.com/go-jose/go-jose/v4 v4.1.1 // indirect
+ github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.20.0 // indirect
- github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
- github.com/goccy/go-yaml v1.11.2 // indirect
+ github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
+ github.com/goccy/go-yaml v1.17.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
- github.com/google/go-cmp v0.6.0 // indirect
- github.com/google/s2a-go v0.1.7 // indirect
+ github.com/google/go-cmp v0.7.0 // indirect
+ github.com/google/s2a-go v0.1.9 // indirect
github.com/google/uuid v1.6.0 // indirect
- github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
- github.com/googleapis/gax-go/v2 v2.13.0 // indirect
+ github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
+ github.com/googleapis/gax-go/v2 v2.14.1 // indirect
github.com/gosuri/uilive v0.0.4 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
- github.com/hashicorp/go-getter v1.7.5 // indirect
+ github.com/hashicorp/go-getter v1.7.9 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-safetemp v1.0.0 // indirect
github.com/hashicorp/go-uuid v1.0.3 // indirect
@@ -160,8 +169,8 @@ require (
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/magefile/mage v1.15.0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
- github.com/marcboeker/go-duckdb/arrowmapping v0.0.6 // indirect
- github.com/marcboeker/go-duckdb/mapping v0.0.6 // indirect
+ github.com/marcboeker/go-duckdb/arrowmapping v0.0.19 // indirect
+ github.com/marcboeker/go-duckdb/mapping v0.0.19 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-localereader v0.0.1 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
@@ -169,7 +178,7 @@ require (
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
- github.com/mitchellh/go-wordwrap v1.0.0 // indirect
+ github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
@@ -183,6 +192,7 @@ require (
github.com/pjbgf/sha1cd v0.3.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pkg/term v1.1.0 // indirect
+ github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/rs/xid v1.5.0 // indirect
@@ -190,13 +200,13 @@ require (
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/satyrius/gonx v1.4.0 // indirect
github.com/sethvargo/go-retry v0.3.0 // indirect
- github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.6.0 // indirect
- github.com/spf13/pflag v1.0.5 // indirect
+ github.com/spf13/pflag v1.0.10 // indirect
+ github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
github.com/stevenle/topsort v0.2.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
@@ -204,32 +214,36 @@ require (
github.com/tkrajina/go-reflector v0.5.8 // indirect
github.com/turbot/pipes-sdk-go v0.12.0 // indirect
github.com/turbot/terraform-components v0.0.0-20231213122222-1f3526cab7a7 // indirect
- github.com/ulikunitz/xz v0.5.10 // indirect
+ github.com/ulikunitz/xz v0.5.14 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
github.com/zclconf/go-cty-yaml v1.0.3 // indirect
- go.opencensus.io v0.24.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
- go.opentelemetry.io/otel v1.31.0 // indirect
- go.opentelemetry.io/otel/metric v1.31.0 // indirect
- go.opentelemetry.io/otel/trace v1.31.0 // indirect
+ github.com/zeebo/errs v1.4.0 // indirect
+ go.opentelemetry.io/auto/sdk v1.1.0 // indirect
+ go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
+ go.opentelemetry.io/otel v1.37.0 // indirect
+ go.opentelemetry.io/otel/metric v1.37.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.37.0 // indirect
+ go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect
+ go.opentelemetry.io/otel/trace v1.37.0 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
- golang.org/x/crypto v0.36.0 // indirect
- golang.org/x/mod v0.22.0 // indirect
- golang.org/x/net v0.38.0 // indirect
- golang.org/x/oauth2 v0.23.0 // indirect
- golang.org/x/sys v0.31.0 // indirect
- golang.org/x/term v0.30.0 // indirect
- golang.org/x/time v0.5.0 // indirect
- golang.org/x/tools v0.29.0 // indirect
+ golang.org/x/crypto v0.41.0 // indirect
+ golang.org/x/mod v0.27.0 // indirect
+ golang.org/x/net v0.43.0 // indirect
+ golang.org/x/oauth2 v0.30.0 // indirect
+ golang.org/x/sync v0.16.0 // indirect
+ golang.org/x/sys v0.35.0 // indirect
+ golang.org/x/term v0.34.0 // indirect
+ golang.org/x/time v0.11.0 // indirect
+ golang.org/x/tools v0.36.0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
- google.golang.org/api v0.189.0 // indirect
- google.golang.org/genproto v0.0.0-20240722135656-d784300faade // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect
- google.golang.org/grpc v1.69.2 // indirect
+ google.golang.org/api v0.230.0 // indirect
+ google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/go.sum b/go.sum
index 7c5b3309..1640c3f0 100644
--- a/go.sum
+++ b/go.sum
@@ -1,8 +1,11 @@
+cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
+cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
@@ -15,6 +18,7 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
@@ -26,32 +30,96 @@ cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+Y
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
+cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U=
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU=
cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA=
-cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14=
-cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU=
+cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM=
+cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I=
+cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY=
+cloud.google.com/go v0.121.0 h1:pgfwva8nGw7vivjZiRfrmglGWiCJBP+0OmDpenG/Fwg=
+cloud.google.com/go v0.121.0/go.mod h1:rS7Kytwheu/y9buoDmu5EIpMMCI4Mb8ND4aeN4Vwj7Q=
+cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4=
+cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw=
+cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E=
+cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o=
+cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE=
+cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM=
+cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ=
cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw=
cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY=
+cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg=
+cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ=
+cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k=
+cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw=
cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI=
cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4=
+cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M=
+cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE=
+cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE=
+cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk=
+cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc=
+cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8=
+cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc=
+cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04=
+cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8=
+cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY=
+cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM=
+cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc=
+cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU=
+cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI=
+cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8=
+cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno=
+cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak=
+cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84=
+cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A=
+cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E=
cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4=
cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0=
+cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY=
+cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k=
cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ=
cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk=
+cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0=
+cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc=
+cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI=
+cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ=
+cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI=
+cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08=
cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o=
cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s=
cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0=
+cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ=
+cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY=
+cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo=
+cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg=
+cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw=
cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY=
cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw=
cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI=
-cloud.google.com/go/auth v0.7.2 h1:uiha352VrCDMXg+yoBtaD0tUF4Kv9vrtrWPYXwutnDE=
-cloud.google.com/go/auth v0.7.2/go.mod h1:VEc4p5NNxycWQTMQEDQF0bd6aTMb6VgYDXEwiJJQAbs=
-cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI=
-cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I=
+cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo=
+cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0=
+cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E=
+cloud.google.com/go/auth v0.16.0 h1:Pd8P1s9WkcrBE2n/PhAwKsdrR35V3Sg2II9B+ndM3CU=
+cloud.google.com/go/auth v0.16.0/go.mod h1:1howDHJ5IETh/LwYs3ZxvlkXF48aSqqJUM+5o02dNOI=
+cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
+cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0=
cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8=
+cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8=
+cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM=
+cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU=
+cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc=
+cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI=
+cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss=
+cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE=
+cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE=
+cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g=
+cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4=
+cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8=
+cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM=
+cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@@ -59,12 +127,44 @@ cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUM
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA=
+cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw=
+cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc=
+cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E=
+cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac=
+cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q=
+cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU=
cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY=
cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s=
+cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI=
+cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y=
+cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss=
+cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc=
cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM=
cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI=
+cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0=
+cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk=
+cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q=
+cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg=
+cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590=
+cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8=
+cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk=
+cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk=
+cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE=
+cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU=
+cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U=
+cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA=
+cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M=
+cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg=
+cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s=
+cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM=
+cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk=
+cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA=
cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY=
cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI=
+cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4=
+cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI=
+cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y=
+cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs=
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
@@ -72,129 +172,466 @@ cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz
cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU=
-cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo=
-cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k=
+cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
+cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
+cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE=
+cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo=
+cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA=
+cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs=
+cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU=
+cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE=
+cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU=
+cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
+cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
+cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
+cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU=
+cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo=
+cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY=
+cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck=
+cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w=
+cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg=
+cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo=
+cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4=
+cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM=
+cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA=
cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I=
cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4=
+cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI=
+cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s=
cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0=
cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs=
cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc=
+cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE=
+cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM=
+cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M=
+cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0=
+cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8=
cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM=
cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ=
+cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE=
cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo=
cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE=
+cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0=
+cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA=
+cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE=
+cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38=
+cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w=
+cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8=
cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I=
cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ=
+cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM=
+cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA=
+cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A=
+cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ=
+cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs=
+cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s=
+cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI=
+cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4=
cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo=
cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA=
+cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM=
+cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c=
cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo=
cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ=
+cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g=
+cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4=
+cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs=
+cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww=
+cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c=
+cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s=
+cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI=
+cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ=
cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4=
cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0=
cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8=
+cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek=
+cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0=
+cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM=
+cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4=
+cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE=
+cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM=
+cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q=
+cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4=
cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU=
cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU=
+cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k=
+cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4=
+cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM=
+cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs=
cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y=
cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg=
+cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE=
cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk=
cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w=
+cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc=
+cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY=
+cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU=
+cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI=
+cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8=
+cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M=
+cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc=
+cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw=
+cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw=
+cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY=
+cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w=
+cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI=
+cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs=
+cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg=
+cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE=
cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk=
cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg=
+cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY=
+cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08=
+cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw=
+cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA=
+cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c=
cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM=
cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA=
+cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w=
+cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM=
+cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0=
+cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60=
+cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo=
+cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg=
cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o=
cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A=
+cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw=
cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0=
cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0=
+cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E=
+cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw=
+cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA=
+cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI=
+cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y=
cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc=
+cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM=
+cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o=
+cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo=
+cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c=
cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc=
-cloud.google.com/go/iam v1.1.10 h1:ZSAr64oEhQSClwBL670MsJAW5/RLiC6kfw3Bqmd5ZDI=
-cloud.google.com/go/iam v1.1.10/go.mod h1:iEgMq62sg8zx446GCaijmA2Miwg5o3UbO+nI47WHJps=
+cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc=
+cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg=
+cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE=
+cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY=
+cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY=
+cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0=
+cloud.google.com/go/iam v1.5.0 h1:QlLcVMhbLGOjRcGe6VTGGTyQib8dRLK2B/kYNV0+2xs=
+cloud.google.com/go/iam v1.5.0/go.mod h1:U+DOtKQltF/LxPEtcDLoobcsZMilSRwR7mgNL7knOpo=
+cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc=
+cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A=
+cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk=
+cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo=
+cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74=
+cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM=
+cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY=
+cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4=
+cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs=
+cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g=
+cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o=
+cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE=
+cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA=
+cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg=
+cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0=
+cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg=
+cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w=
+cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24=
+cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI=
cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic=
cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI=
+cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE=
+cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8=
+cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY=
cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8=
cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08=
-cloud.google.com/go/longrunning v0.5.9 h1:haH9pAuXdPAMqHvzX0zlWQigXT7B0+CL4/2nXXdBo5k=
-cloud.google.com/go/longrunning v0.5.9/go.mod h1:HD+0l9/OOW0za6UWdKJtXoFAX/BGg/3Wj8p10NeWF7c=
+cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo=
+cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw=
+cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M=
+cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc=
+cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA=
+cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE=
+cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc=
+cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo=
+cloud.google.com/go/longrunning v0.6.6 h1:XJNDo5MUfMM05xK3ewpbSdmt7R2Zw+aQEMbdQR65Rbw=
+cloud.google.com/go/longrunning v0.6.6/go.mod h1:hyeGJUrPHcx0u2Uu1UFSoYZLn4lkMrccJig0t4FI7yw=
+cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE=
+cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM=
+cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA=
+cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI=
+cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw=
+cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY=
cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4=
cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w=
+cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I=
cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE=
cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM=
+cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA=
+cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY=
+cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM=
cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY=
cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s=
+cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8=
+cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI=
+cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo=
+cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk=
+cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4=
+cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w=
+cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw=
+cloud.google.com/go/monitoring v1.24.0 h1:csSKiCJ+WVRgNkRzzz3BPoGjFhjPY23ZTcaenToJxMM=
+cloud.google.com/go/monitoring v1.24.0/go.mod h1:Bd1PRK5bmQBQNnuGwHBfUamAV1ys9049oEPHnn4pcsc=
cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA=
cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o=
+cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM=
+cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8=
+cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E=
+cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM=
+cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8=
+cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4=
+cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY=
cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ=
cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU=
+cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k=
+cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU=
cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY=
cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34=
+cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA=
+cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0=
+cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE=
+cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ=
+cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4=
+cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs=
+cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI=
+cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA=
+cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk=
+cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ=
+cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE=
+cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc=
+cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc=
cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs=
cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg=
+cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo=
+cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw=
+cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw=
cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E=
cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU=
+cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70=
+cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo=
+cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs=
cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0=
cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA=
+cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk=
+cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg=
+cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE=
+cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw=
+cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc=
cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0=
cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI=
+cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg=
+cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI=
+cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0=
+cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8=
+cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4=
+cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg=
+cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k=
+cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM=
cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4=
cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o=
cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk=
cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo=
+cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE=
+cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U=
+cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA=
+cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c=
cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg=
cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4=
+cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac=
cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg=
cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c=
+cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs=
+cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70=
+cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ=
cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y=
cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A=
+cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA=
+cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM=
+cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ=
+cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA=
+cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0=
+cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots=
+cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo=
+cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI=
+cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU=
+cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg=
+cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA=
cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4=
cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY=
+cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc=
+cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y=
+cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14=
+cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do=
+cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo=
+cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM=
+cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg=
cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s=
cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI=
+cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk=
+cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44=
+cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc=
+cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc=
cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA=
+cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4=
+cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4=
+cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU=
cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4=
cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0=
cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU=
+cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q=
+cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA=
+cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8=
+cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0=
cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU=
cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc=
+cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk=
+cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk=
+cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0=
+cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag=
+cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU=
+cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s=
+cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA=
+cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc=
+cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk=
cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs=
cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg=
+cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4=
+cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U=
+cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY=
+cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s=
+cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco=
+cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo=
+cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc=
+cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4=
+cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E=
+cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU=
+cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec=
+cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA=
+cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4=
+cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw=
+cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A=
+cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos=
+cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk=
+cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M=
cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM=
cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ=
+cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0=
+cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco=
+cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0=
+cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=
cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s=
-cloud.google.com/go/storage v1.42.0 h1:4QtGpplCVt1wz6g5o1ifXd656P5z+yNgzdw1tVfp0cU=
-cloud.google.com/go/storage v1.42.0/go.mod h1:HjMXRFq65pGKFn6hxj6x3HCyR41uSB72Z0SO/Vn6JFQ=
+cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y=
+cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4=
+cloud.google.com/go/storage v1.52.0 h1:ROpzMW/IwipKtatA69ikxibdzQSiXJrY9f6IgBa9AlA=
+cloud.google.com/go/storage v1.52.0/go.mod h1:4wrBAbAYUvYkbrf19ahGm4I5kDQhESSqN3CGEkMGvOY=
+cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w=
+cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I=
+cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4=
+cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw=
cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw=
cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g=
+cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM=
+cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA=
+cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c=
+cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8=
+cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4=
+cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc=
+cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ=
+cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg=
+cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM=
+cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28=
+cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y=
+cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA=
+cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk=
+cloud.google.com/go/trace v1.11.3 h1:c+I4YFjxRQjvAhRmSsmjpASUKq88chOX854ied0K/pE=
+cloud.google.com/go/trace v1.11.3/go.mod h1:pt7zCYiDSQjC9Y2oqCsh9jF4GStB/hmjrYLsxRR27q8=
+cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs=
+cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg=
+cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0=
+cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos=
+cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos=
+cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk=
+cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw=
+cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg=
+cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk=
+cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ=
+cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ=
cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU=
cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4=
+cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M=
+cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU=
+cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU=
cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0=
cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo=
cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo=
+cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY=
+cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E=
+cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY=
+cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0=
+cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE=
+cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g=
+cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc=
+cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY=
+cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208=
+cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8=
+cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY=
+cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w=
+cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8=
+cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes=
cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE=
cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg=
+cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc=
+cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A=
+cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg=
+cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo=
+cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ=
+cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng=
cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0=
cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M=
+cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M=
+cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA=
+cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8=
+git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
-github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 h1:fYE9p3esPxA/C0rQ0AHhP0drtPXDRhaWiwg1DPqO7IU=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0/go.mod h1:BnBReJLvVYx2CS/UHOgVz2BXKXD9wsQPxZug20nZhd0=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0 h1:OqVGm6Ei3x5+yZmSJG1Mh2NwHvpVmZ08CB5qJhT9Nuk=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0/go.mod h1:SZiPHWGOOk3bl8tkevxkoiwPgsIl6CwrWcbwjfHZpdM=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 h1:6/0iUd0xrnX7qt+mLNRwg5c0PGv8wpE8K90ryANQwMI=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0=
+github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk=
+github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
+github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/Microsoft/hcsshim v0.11.7 h1:vl/nj3Bar/CvJSYo7gIQPyRWc9f3c6IeSNavBTSZNZQ=
@@ -204,15 +641,23 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpH
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
+github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY=
+github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk=
+github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
+github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM=
github.com/alecthomas/chroma v0.10.0 h1:7XDcGkCQopCNKjZHfYrNLraA+M7e0fMiJ/Mfikbfjek=
github.com/alecthomas/chroma v0.10.0/go.mod h1:jtJATyUxlIORhUOFNA9NZDWGAQ8wpxQQqNSB4rjA/1s=
-github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
-github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
+github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
+github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ=
+github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/apache/arrow-go/v18 v18.1.0 h1:agLwJUiVuwXZdwPYVrlITfx7bndULJ/dggbnLFgDp/Y=
-github.com/apache/arrow-go/v18 v18.1.0/go.mod h1:tigU/sIgKNXaesf5d7Y95jBBKS5KsxTqYBKXFsvKzo0=
-github.com/apache/thrift v0.21.0 h1:tdPmh/ptjE1IJnhbhrcl2++TauVjy242rkV/UzJChnE=
-github.com/apache/thrift v0.21.0/go.mod h1:W1H8aR/QRtYNvrPeFXBtobyRkd0/YVhTc6i07XIAgDw=
+github.com/apache/arrow-go/v18 v18.4.1 h1:q/jVkBWCJOB9reDgaIZIdruLQUb1kbkvOnOFezVH1C4=
+github.com/apache/arrow-go/v18 v18.4.1/go.mod h1:tLyFubsAl17bvFdUAy24bsSvA/6ww95Iqi67fTpGu3E=
+github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0=
+github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI=
+github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU=
+github.com/apache/thrift v0.22.0 h1:r7mTJdj51TMDe6RtcmNdQxgn9XcyfGDOzegMDRg47uc=
+github.com/apache/thrift v0.22.0/go.mod h1:1e7J/O1Ae6ZQMTYdy9xa3w9k+XHWPfRvdPyJeynQ+/g=
github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU=
github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc=
github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I=
@@ -256,6 +701,8 @@ github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQ
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0=
github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE=
+github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
+github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A=
github.com/briandowns/spinner v1.23.0/go.mod h1:rPG4gmXeN3wQV/TsAY4w8lPdIM6RX3yqeBQJSrbXjuE=
github.com/btubbs/datetime v0.1.1 h1:KuV+F9tyq/hEnezmKZNGk8dzqMVsId6EpFVrQCfA3To=
@@ -263,8 +710,11 @@ github.com/btubbs/datetime v0.1.1/go.mod h1:n2BZ/2ltnRzNiz27aE3wUb2onNttQdC+WFxA
github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA=
github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/charmbracelet/bubbletea v1.2.4 h1:KN8aCViA0eps9SCOThb2/XPIlea3ANJLUkv3KnQRNCE=
@@ -284,11 +734,17 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls=
+github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
github.com/containerd/containerd v1.7.27 h1:yFyEyojddO3MIGVER2xJLWoCIn+Up4GaHFquP7hsFII=
@@ -301,7 +757,8 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
-github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo=
github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964 h1:y5HC9v93H5EPKqaS1UYVg1uYah5Xf51mBfIoWehClUQ=
@@ -316,18 +773,20 @@ github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WA
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E=
github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
-github.com/duckdb/duckdb-go-bindings v0.1.13 h1:3Ec0SjMBuzt7wExde5ZoMXd1Nk91LJmpopq2Ee6g9Pw=
-github.com/duckdb/duckdb-go-bindings v0.1.13/go.mod h1:pBnfviMzANT/9hi4bg+zW4ykRZZPCXlVuvBWEcZofkc=
-github.com/duckdb/duckdb-go-bindings/darwin-amd64 v0.1.8 h1:n4RNMqiUPao53YKmlh36zGEr49CnUXGVKOtOMCEhwFE=
-github.com/duckdb/duckdb-go-bindings/darwin-amd64 v0.1.8/go.mod h1:Ezo7IbAfB8NP7CqPIN8XEHKUg5xdRRQhcPPlCXImXYA=
-github.com/duckdb/duckdb-go-bindings/darwin-arm64 v0.1.8 h1:3ZBS6wETlZp9UDmaWJ4O4k7ZSjqQjyhMW5aZZBXThqM=
-github.com/duckdb/duckdb-go-bindings/darwin-arm64 v0.1.8/go.mod h1:eS7m/mLnPQgVF4za1+xTyorKRBuK0/BA44Oy6DgrGXI=
-github.com/duckdb/duckdb-go-bindings/linux-amd64 v0.1.8 h1:KCUI9KSAUKbYasNlTcjky30nbDtF18S6s6R3usXWLqk=
-github.com/duckdb/duckdb-go-bindings/linux-amd64 v0.1.8/go.mod h1:1GOuk1PixiESxLaCGFhag+oFi7aP+9W8byymRAvunBk=
-github.com/duckdb/duckdb-go-bindings/linux-arm64 v0.1.8 h1:QgKzpNG7EMPq3ayYcr0LzGfC+dCzGA/Gm6Y7ndbrXHg=
-github.com/duckdb/duckdb-go-bindings/linux-arm64 v0.1.8/go.mod h1:o7crKMpT2eOIi5/FY6HPqaXcvieeLSqdXXaXbruGX7w=
-github.com/duckdb/duckdb-go-bindings/windows-amd64 v0.1.8 h1:lmseSULUmuVycRBJ6DVH86eFOQhHz32hN8mfxF7z+0w=
-github.com/duckdb/duckdb-go-bindings/windows-amd64 v0.1.8/go.mod h1:IlOhJdVKUJCAPj3QsDszUo8DVdvp1nBFp4TUJVdw99s=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/duckdb/duckdb-go-bindings v0.1.19 h1:t8fwgKlr/5BEa5TJzvo3Vdr3yAgoYiR7L/TqyMuUQ2k=
+github.com/duckdb/duckdb-go-bindings v0.1.19/go.mod h1:pBnfviMzANT/9hi4bg+zW4ykRZZPCXlVuvBWEcZofkc=
+github.com/duckdb/duckdb-go-bindings/darwin-amd64 v0.1.19 h1:CdNZfRcFUFxI4Q+1Tu4TBFln9tkIn6bDwVwh9LeEsoo=
+github.com/duckdb/duckdb-go-bindings/darwin-amd64 v0.1.19/go.mod h1:Ezo7IbAfB8NP7CqPIN8XEHKUg5xdRRQhcPPlCXImXYA=
+github.com/duckdb/duckdb-go-bindings/darwin-arm64 v0.1.19 h1:mVijr3WFz3TXZLtAm5Hb6qEnstacZdFI5QQNuE9R2QQ=
+github.com/duckdb/duckdb-go-bindings/darwin-arm64 v0.1.19/go.mod h1:eS7m/mLnPQgVF4za1+xTyorKRBuK0/BA44Oy6DgrGXI=
+github.com/duckdb/duckdb-go-bindings/linux-amd64 v0.1.19 h1:jhchUY24T5bQLOwGyK0BzB6+HQmsRjAbgUZDKWo4ajs=
+github.com/duckdb/duckdb-go-bindings/linux-amd64 v0.1.19/go.mod h1:1GOuk1PixiESxLaCGFhag+oFi7aP+9W8byymRAvunBk=
+github.com/duckdb/duckdb-go-bindings/linux-arm64 v0.1.19 h1:CFcH+Bze2OgTaTLM94P3gJ554alnCCDnt1BH/nO8RJ8=
+github.com/duckdb/duckdb-go-bindings/linux-arm64 v0.1.19/go.mod h1:o7crKMpT2eOIi5/FY6HPqaXcvieeLSqdXXaXbruGX7w=
+github.com/duckdb/duckdb-go-bindings/windows-amd64 v0.1.19 h1:x/8t04sgCVU8JL0XLUZWmC1FAX13ZjM58EmsyPjvrvY=
+github.com/duckdb/duckdb-go-bindings/windows-amd64 v0.1.19/go.mod h1:IlOhJdVKUJCAPj3QsDszUo8DVdvp1nBFp4TUJVdw99s=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/elastic/go-grok v0.3.1 h1:WEhUxe2KrwycMnlvMimJXvzRa7DoByJB4PVUIE1ZD/U=
@@ -343,24 +802,44 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
+github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34=
+github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q=
+github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M=
+github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA=
+github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A=
+github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw=
+github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI=
+github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo=
+github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w=
+github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss=
+github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
+github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
-github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4=
-github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI=
+github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
+github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
+github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
-github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
-github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
+github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
+github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
github.com/gertd/go-pluralize v0.2.1 h1:M3uASbVjMnTsPb0PNqg+E/24Vwigyo/tvyMTtAlLgiA=
github.com/gertd/go-pluralize v0.2.1/go.mod h1:rbYaKDbsXxmRfr8uygAEKhOWsjyrrqrkHVpZvoOp8zk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g=
+github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks=
+github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
+github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
+github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
github.com/go-git/go-billy/v5 v5.6.0 h1:w2hPNtoehvJIxR00Vb4xX94qHQi/ApZfX+nBE2Cjio8=
@@ -372,13 +851,19 @@ github.com/go-git/go-git/v5 v5.13.0/go.mod h1:Wjo7/JyVKtQgUNdXYXIepzWfJQkUEIGvkv
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI=
+github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA=
+github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U=
+github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
-github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
+github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M=
+github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
@@ -391,15 +876,19 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
-github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
-github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
+github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
-github.com/goccy/go-yaml v1.11.2 h1:joq77SxuyIs9zzxEjgyLBugMQ9NEgTWxXfz2wVqwAaQ=
-github.com/goccy/go-yaml v1.11.2/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU=
+github.com/goccy/go-yaml v1.17.1 h1:LI34wktB2xEE3ONG/2Ar54+/HJVBriAGJ55PHls4YuY=
+github.com/goccy/go-yaml v1.17.1/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
+github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -431,15 +920,18 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
+github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/flatbuffers v25.1.24+incompatible h1:4wPqL3K7GzBd1CwyhSd3usxLKOaJN/AC6puCca6Jm7o=
-github.com/google/flatbuffers v25.1.24+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
+github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
+github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q=
+github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -455,13 +947,15 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc=
github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
@@ -473,6 +967,7 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
@@ -481,8 +976,8 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd h1:1FjCyPC+syAzJ5/2S8fqdZK1R22vvA0J7JZKcuOIQ7Y=
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
-github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
+github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
+github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
@@ -490,8 +985,10 @@ github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg=
-github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
-github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
+github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
+github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
+github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4=
+github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
@@ -501,9 +998,12 @@ github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99
github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo=
github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY=
-github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s=
-github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A=
+github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8=
+github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI=
+github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q=
+github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA=
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
+github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
github.com/gosuri/uilive v0.0.4 h1:hUEBpQDj8D8jXgtCdBu7sWsy5sbW/5GhuO8KBwJ2jyY=
@@ -511,13 +1011,15 @@ github.com/gosuri/uilive v0.0.4/go.mod h1:V/epo5LjjlDE5RJUcqx8dbw+zc93y5Ya3yg8tf
github.com/gosuri/uiprogress v0.0.1 h1:0kpv/XY/qTmFWl/SkaJykZXrBBzwwadmW8fRb7RJSxw=
github.com/gosuri/uiprogress v0.0.1/go.mod h1:C1RTYn4Sc7iEyf6j8ft5dyoZ4212h8G1ol9QQluh5+0=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
-github.com/hashicorp/go-getter v1.7.5 h1:dT58k9hQ/vbxNMwoI5+xFYAJuv6152UNvdHokfI5wE4=
-github.com/hashicorp/go-getter v1.7.5/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744=
+github.com/hashicorp/go-getter v1.7.9 h1:G9gcjrDixz7glqJ+ll5IWvggSBR+R0B54DSRt4qfdC4=
+github.com/hashicorp/go-getter v1.7.9/go.mod h1:dyFCmT1AQkDfOIt9NH8pw9XBDqNrIKJT5ylbpi7zPNE=
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
@@ -535,8 +1037,8 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hashicorp/hcl/v2 v2.20.1 h1:M6hgdyz7HYt1UN9e61j+qKJBqR3orTWbI1HKBJEdxtc=
-github.com/hashicorp/hcl/v2 v2.20.1/go.mod h1:TZDqQ4kNKCbh1iJp99FdPiUaVDDUPivbqxZulxDYqL4=
+github.com/hashicorp/hcl/v2 v2.24.0 h1:2QJdZ454DSsYGoaE6QheQZjtKZSUs9Nh2izTWiwQxvE=
+github.com/hashicorp/hcl/v2 v2.24.0/go.mod h1:oGoO1FIQYfn/AgyOhlg9qLC6/nOJPX3qGbkZpYAcqfM=
github.com/hashicorp/terraform-registry-address v0.2.1 h1:QuTf6oJ1+WSflJw6WYOHhLgwUiQ0FrROpHPYFtwTYWM=
github.com/hashicorp/terraform-registry-address v0.2.1/go.mod h1:BSE9fIFzp0qWsJUUyGquo4ldV9k2n+psif6NYkBRS3Y=
github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ=
@@ -545,6 +1047,7 @@ github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE
github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ=
github.com/hokaccha/go-prettyjson v0.0.0-20211117102719-0474bc63780f h1:7LYC+Yfkj3CTRcShK0KOL/w6iTiKyqqBA9a41Wnggw8=
github.com/hokaccha/go-prettyjson v0.0.0-20211117102719-0474bc63780f/go.mod h1:pFlLw2CfqZiIBOx6BuCeRLCrfxBJipTY0nIOF/VbGcI=
+github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI=
github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@@ -584,17 +1087,25 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/karrick/gows v0.3.0 h1:/FGSuBiJMUqNOJPsAdLvHFg7RnkFoWBS8USpdco5ONQ=
github.com/karrick/gows v0.3.0/go.mod h1:kdZ/jfdo8yqKYn+BMjBkhP+/oRKUABR1abaomzRi/n8=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4=
github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE=
+github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
-github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY=
-github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8=
+github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
+github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@@ -607,16 +1118,19 @@ github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczG
github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
+github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
+github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
+github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o=
github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg=
github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
-github.com/marcboeker/go-duckdb/arrowmapping v0.0.6 h1:FaNX2JP4pKw7Xh2rMBCCvqWIafhX3nSXrUffexNRB68=
-github.com/marcboeker/go-duckdb/arrowmapping v0.0.6/go.mod h1:WjLM334CLZux/OtAeF0DT2n9LyNqquqT3EhCHQcflNk=
-github.com/marcboeker/go-duckdb/mapping v0.0.6 h1:Y+nHQDHXqo78i8MM4UP7qVmFgTAofbdvpUdRdxJXjSk=
-github.com/marcboeker/go-duckdb/mapping v0.0.6/go.mod h1:k1lwBZvSza+RSpuA1kcMS/vxlNuqqFynoDef/clDD2M=
-github.com/marcboeker/go-duckdb/v2 v2.1.0 h1:mhAEwy+Ut9Iji+QvyjkB86HhhC/r/H0RRKpkwfANu88=
-github.com/marcboeker/go-duckdb/v2 v2.1.0/go.mod h1:W76KqN7EWTm8kpU2irA0V4f1R+6QEt3uLUVZ3wAtZ7M=
+github.com/marcboeker/go-duckdb/arrowmapping v0.0.19 h1:kMxJBauR2+jwRoSFjiL/DysQtKRBCkNSLZz7GUvEG8A=
+github.com/marcboeker/go-duckdb/arrowmapping v0.0.19/go.mod h1:19JWoch6I++gIrWUz1MLImIoFGri9yL54JaWn/Ujvbo=
+github.com/marcboeker/go-duckdb/mapping v0.0.19 h1:xZ7LCyFZZm/4X631lOZY74p3QHINMnWJ+OakKw5d3Ao=
+github.com/marcboeker/go-duckdb/mapping v0.0.19/go.mod h1:Kz9xYOkhhkgCaGgAg34ciKaks9ED2V7BzHzG6dnVo/o=
+github.com/marcboeker/go-duckdb/v2 v2.4.0 h1:XztCDzB0fYvokiVer1myuFX4QvOdnicdTPRp4D+x2Ok=
+github.com/marcboeker/go-duckdb/v2 v2.4.0/go.mod h1:qpTBjqtTS5+cfD3o2Sl/W70cmxKj6zhjtvVxs1Wuy7k=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
@@ -624,7 +1138,6 @@ github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
-github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
@@ -639,6 +1152,7 @@ github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-tty v0.0.3 h1:5OfyWorkyO7xP52Mq7tB36ajHDG5OHrmBGIS/DtakQI=
github.com/mattn/go-tty v0.0.3/go.mod h1:ihxohKRERHTVzN+aSVRwACLCeqIoZAWpoICkkvrWyR0=
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs=
@@ -651,8 +1165,8 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=
github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=
-github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=
-github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
+github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
+github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
@@ -681,28 +1195,45 @@ github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQ
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
+github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY=
+github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
+github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
+github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
+github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/pkg/term v1.1.0 h1:xIAAdCMh3QIAy+5FrE8Ad8XoDhEU4ufwbaSozViP9kk=
github.com/pkg/term v1.1.0/go.mod h1:E25nymQcrSllhX42Ok8MRm1+hyBdHY0dCeiKZ9jpNGw=
+github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
+github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
+github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
-github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
+github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk=
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
@@ -724,22 +1255,29 @@ github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
+github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
-github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
-github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
-github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
-github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
+github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
+github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
+github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
+github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE=
+github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g=
github.com/stevenle/topsort v0.2.0 h1:LLWgtp34HPX6/RBDRS0kElVxGOTzGBLI1lSAa5Lb46k=
github.com/stevenle/topsort v0.2.0/go.mod h1:ck2WG2/ZrOr6dLApQ/5Xrqy5wv3T0qhKYWE7r9tkibc=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
@@ -749,10 +1287,11 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
-github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
-github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.11.0 h1:ib4sjIrwZKxE5u/Japgo/7SJV3PvgjGiRNAvTVGqQl8=
+github.com/stretchr/testify v1.11.0/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/thediveo/enumflag/v2 v2.0.5 h1:VJjvlAqUb6m6mxOrB/0tfBJI0Kvi9wJ8ulh38xK87i8=
@@ -769,18 +1308,19 @@ github.com/turbot/go-kit v1.3.0 h1:6cIYPAO5hO9fG7Zd5UBC4Ch3+C6AiiyYS0UQnrUlTV0=
github.com/turbot/go-kit v1.3.0/go.mod h1:piKJMYCF8EYmKf+D2B78Csy7kOHGmnQVOWingtLKWWQ=
github.com/turbot/go-prompt v0.2.6-steampipe.0.0.20221028122246-eb118ec58d50 h1:zs87uA6QZsYLk4RRxDOIxt8ro/B2V6HzoMWm05Lo7ao=
github.com/turbot/go-prompt v0.2.6-steampipe.0.0.20221028122246-eb118ec58d50/go.mod h1:vFnjEGDIIA/Lib7giyE4E9c50Lvl8j0S+7FVlAwDAVw=
-github.com/turbot/pipe-fittings/v2 v2.5.1 h1:Y5qiKaUQSWcS7vy+KOLLyIDoNUY5jUdIopHpxcyH5nU=
-github.com/turbot/pipe-fittings/v2 v2.5.1/go.mod h1:szte433cBDCaZcGe5zMVGG7uTl9HMaEYaQmuvzZRYIQ=
+github.com/turbot/pipe-fittings/v2 v2.7.0 h1:eCmpMNlVtV3AxOzsn8njE3O6aoHc74WVAHOntia2hqY=
+github.com/turbot/pipe-fittings/v2 v2.7.0/go.mod h1:V619+tgfLaqoEXFDNzA2p24TBZVf4IkDL9FDLQecMnE=
github.com/turbot/pipes-sdk-go v0.12.0 h1:esbbR7bALa5L8n/hqroMPaQSSo3gNM/4X0iTmHa3D6U=
github.com/turbot/pipes-sdk-go v0.12.0/go.mod h1:Mb+KhvqqEdRbz/6TSZc2QWDrMa5BN3E4Xw+gPt2TRkc=
-github.com/turbot/tailpipe-plugin-core v0.2.7 h1:nLXyg1X6j6eKD6cTi+opX+tBCbkR2t1GMNuz8wCfd38=
-github.com/turbot/tailpipe-plugin-core v0.2.7/go.mod h1:bI4haOVPeqi10PM6ALKIydoTfCsNLEPrq/9Omo1Mf1g=
-github.com/turbot/tailpipe-plugin-sdk v0.8.0-rc.1 h1:sjDzgzMdLnZlIbyd5LzlrWyq/0yDkZyJhxoGw9jJFs0=
-github.com/turbot/tailpipe-plugin-sdk v0.8.0-rc.1/go.mod h1:kpvafTVw6KUx/kpFMshbzQLuZ6ApdWMS5ZqYQzp1q/A=
+github.com/turbot/tailpipe-plugin-core v0.2.10 h1:2+B7W4hzyS/pBr1y5ns9w84piWGq/x+WdCUjyPaPreQ=
+github.com/turbot/tailpipe-plugin-core v0.2.10/go.mod h1:dHzPUR1p5GksSvDqqEeZEvvJX6wTEwK/ZDev//9nSLw=
+github.com/turbot/tailpipe-plugin-sdk v0.9.3 h1:JpGpGPwehqdXnRO3aqkQTpd96Vx2blY+AkXP8lYB32g=
+github.com/turbot/tailpipe-plugin-sdk v0.9.3/go.mod h1:Egojp0j7+th/4Bh6muMuF6aZa5iE3MuiJ4pzBo0J2mg=
github.com/turbot/terraform-components v0.0.0-20231213122222-1f3526cab7a7 h1:qDMxFVd8Zo0rIhnEBdCIbR+T6WgjwkxpFZMN8zZmmjg=
github.com/turbot/terraform-components v0.0.0-20231213122222-1f3526cab7a7/go.mod h1:5hzpfalEjfcJWp9yq75/EZoEu2Mzm34eJAPm3HOW2tw=
-github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/ulikunitz/xz v0.5.14 h1:uv/0Bq533iFdnMHZdRBTOlaNMdb1+ZxXIlHDZHIHcvg=
+github.com/ulikunitz/xz v0.5.14/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -788,17 +1328,20 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
-github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8=
-github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
-github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b h1:FosyBZYxY34Wul7O/MSKey3txpPYyCqVO5ZyceuQJEI=
-github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8=
+github.com/zclconf/go-cty v1.16.3 h1:osr++gw2T61A8KVYHoQiFbFd1Lh3JOCXc/jFLJXKTxk=
+github.com/zclconf/go-cty v1.16.3/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
+github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo=
+github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM=
github.com/zclconf/go-cty-yaml v1.0.3 h1:og/eOQ7lvA/WWhHGFETVWNduJM7Rjsv2RRpx1sdFMLc=
github.com/zclconf/go-cty-yaml v1.0.3/go.mod h1:9YLUH4g7lOhVWqUbctnVlZ5KLpg7JAprQNgxSZ1Gyxs=
github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ=
github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
+github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM=
+github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
@@ -810,21 +1353,29 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
-go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
-go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
-go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
-go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
-go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
-go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
-go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
-go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
-go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
-go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw=
+go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ=
+go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
+go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
+go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0 h1:PB3Zrjs1sG1GBX51SXyTSoOTqcDglmsk7nT6tkKPb/k=
+go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0/go.mod h1:U2R3XyVPzn0WX7wOIypPuptulsMcPDPs/oiSVOMVnHY=
+go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
+go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
+go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
+go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
+go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc=
+go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
+go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
+go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
+go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
@@ -832,25 +1383,48 @@ go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTV
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
-golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
+golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
+golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
+golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
+golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
+golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
+golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
+golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c h1:KL/ZBHXgKGVmuZBZ01Lt57yE5ws8ZPSkkihmEyq7FXc=
-golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
+golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
+golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 h1:R9PFI6EUdfVKgwKjZef7QIwGcBKu86OEFpJ9nUEP2l4=
+golang.org/x/exp v0.0.0-20250718183923-645b1fa84792/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc=
+golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
+golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
+golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
+golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -874,9 +1448,17 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
-golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
+golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
+golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -907,11 +1489,14 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
@@ -922,10 +1507,23 @@ golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
-golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
-golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
+golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
+golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
+golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
+golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
+golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
+golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -949,10 +1547,14 @@ golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7Lm
golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
-golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A=
-golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
-golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
+golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I=
+golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw=
+golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
+golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
+golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -966,9 +1568,15 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
-golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
+golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1009,11 +1617,14 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1022,9 +1633,12 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1043,17 +1657,39 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
-golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
+golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
-golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
+golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
+golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
+golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
+golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
+golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
+golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
+golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1063,16 +1699,31 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
-golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
+golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
+golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
-golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
+golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
+golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
@@ -1085,6 +1736,7 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -1113,18 +1765,26 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE=
-golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588=
+golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
+golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
+golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
+golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1135,8 +1795,16 @@ golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNq
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY=
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
-gonum.org/v1/gonum v0.15.1 h1:FNy7N6OUZVUaWG9pTiD+jlhdQ3lMP+/LcTpJ6+a8sQ0=
-gonum.org/v1/gonum v0.15.1/go.mod h1:eZTZuRFrzu5pcyjN5wJhcIhnUdNijYxX1T2IcrOGY0o=
+gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
+gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
+gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0=
+gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA=
+gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
+gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
+gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
+gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
+gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY=
+gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@@ -1184,9 +1852,18 @@ google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaE
google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
+google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08=
google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70=
-google.golang.org/api v0.189.0 h1:equMo30LypAkdkLMBqfeIqtyAnlyig1JSZArl4XPwdI=
-google.golang.org/api v0.189.0/go.mod h1:FLWGJKb0hb+pU2j+rJqwbnsF+ym+fQs73rbJ+KAUgy8=
+google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo=
+google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0=
+google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
+google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
+google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
+google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI=
+google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0=
+google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg=
+google.golang.org/api v0.230.0 h1:2u1hni3E+UXAXrONrrkfWpi/V6cyKVAbfGVeGtC3OxM=
+google.golang.org/api v0.230.0/go.mod h1:aqvtoMk7YkiXx+6U12arQFExiRV9D/ekvMCwCd/TksQ=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1229,7 +1906,9 @@ google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
@@ -1262,6 +1941,7 @@ google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2
google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
+google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
@@ -1294,13 +1974,41 @@ google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53B
google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U=
google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
-google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
-google.golang.org/genproto v0.0.0-20240722135656-d784300faade h1:lKFsS7wpngDgSCeFn7MoLy+wBDQZ1UQIJD4UNM1Qvkg=
-google.golang.org/genproto v0.0.0-20240722135656-d784300faade/go.mod h1:FfBgJBJg9GcpPvKIuHSZ/aE1g2ecGL74upMzGZjiGEY=
-google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U=
-google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
+google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
+google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
+google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo=
+google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE=
+google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA=
+google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw=
+google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw=
+google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA=
+google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
+google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
+google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
+google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
+google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
+google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
+google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb h1:ITgPrl429bc6+2ZraNSzMDk3I95nmQln2fuPstKwFDE=
+google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE=
+google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 h1:FiusG7LWj+4byqhbvmB+Q93B/mOxJLN2DTozDuZm4EU=
+google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:kXqgZtrWaf6qS3jZOCnCH7WYfrvFjkC51bM8fz3RsCA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -1327,6 +2035,7 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
@@ -1336,8 +2045,13 @@ google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
-google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
-google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
+google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
+google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
+google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
+google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
+google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
+google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4=
+google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@@ -1354,8 +2068,11 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
-google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
+google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
@@ -1381,9 +2098,45 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
+lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
+lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
+modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
+modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
+modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
+modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc=
+modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw=
+modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ=
+modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ=
+modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws=
+modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo=
+modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ=
+modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
+modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA=
+modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A=
+modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU=
+modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU=
+modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA=
+modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0=
+modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s=
+modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw=
+modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw=
+modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
+modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
+modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4=
+modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw=
+modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=
+modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw=
+modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
+modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8=
oras.land/oras-go/v2 v2.5.0 h1:o8Me9kLY74Vp5uw07QXPiitjsw7qNXi8Twd+19Zf02c=
oras.land/oras-go/v2 v2.5.0/go.mod h1:z4eisnLP530vwIOUOJeBIj0aGI0L1C3d53atvCBqZHg=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
diff --git a/internal/cmdconfig/cmd_hooks.go b/internal/cmdconfig/cmd_hooks.go
index 2d7ecbce..2272aa00 100644
--- a/internal/cmdconfig/cmd_hooks.go
+++ b/internal/cmdconfig/cmd_hooks.go
@@ -13,7 +13,9 @@ import (
"github.com/turbot/pipe-fittings/v2/app_specific"
"github.com/turbot/pipe-fittings/v2/cmdconfig"
pconstants "github.com/turbot/pipe-fittings/v2/constants"
- "github.com/turbot/pipe-fittings/v2/error_helpers"
+ "github.com/turbot/pipe-fittings/v2/contexthelpers"
+ perror_helpers "github.com/turbot/pipe-fittings/v2/error_helpers"
+
"github.com/turbot/pipe-fittings/v2/filepaths"
pparse "github.com/turbot/pipe-fittings/v2/parse"
"github.com/turbot/pipe-fittings/v2/task"
@@ -21,8 +23,8 @@ import (
"github.com/turbot/pipe-fittings/v2/workspace_profile"
"github.com/turbot/tailpipe/internal/config"
"github.com/turbot/tailpipe/internal/constants"
- "github.com/turbot/tailpipe/internal/database"
"github.com/turbot/tailpipe/internal/logger"
+ "github.com/turbot/tailpipe/internal/migration"
"github.com/turbot/tailpipe/internal/parse"
"github.com/turbot/tailpipe/internal/plugin"
)
@@ -47,9 +49,8 @@ func preRunHook(cmd *cobra.Command, args []string) error {
ew := initGlobalConfig(ctx)
// display any warnings
ew.ShowWarnings()
- // TODO #errors sort exit code https://github.com/turbot/tailpipe/issues/106
// check for error
- error_helpers.FailOnError(ew.Error)
+ perror_helpers.FailOnError(ew.Error)
// pump in the initial set of logs (AFTER we have loaded the config, which may specify log level)
displayStartupLog()
@@ -60,7 +61,35 @@ func preRunHook(cmd *cobra.Command, args []string) error {
// set the max memory if specified
setMemoryLimit()
- return nil
+ // create cancel context and set back on command
+ baseCtx := cmd.Context()
+ ctx, cancel := context.WithCancel(baseCtx)
+
+ // start the cancel handler to call cancel on interrupt signals
+ contexthelpers.StartCancelHandler(cancel)
+ cmd.SetContext(ctx)
+
+ // migrate legacy data to DuckLake:
+ // Prior to Tailpipe v0.7.0 we stored data as native Parquet files alongside a tailpipe.db
+ // (DuckDB) that defined SQL views. From v0.7.0 onward Tailpipe uses DuckLake, which
+ // introduces a metadata database (metadata.sqlite). We run a one-time migration here to
+ // move existing user data into DuckLake’s layout so it can be queried and managed via
+ // the new metadata model.
+ // start migration
+ err := migration.MigrateDataToDucklake(cmd.Context())
+ if err != nil {
+ // we do not want Cobra usage errors for migration errors - suppress
+
+ // suppress usage and error printing for migration errors
+ cmd.SilenceUsage = true
+ // for cancelled errors, also silence the error message
+ if perror_helpers.IsCancelledError(err) {
+ cmd.SilenceErrors = true
+ }
+ }
+
+ // return (possibly nil) error from migration
+ return err
}
func displayStartupLog() {
@@ -128,7 +157,7 @@ func runScheduledTasks(ctx context.Context, cmd *cobra.Command, args []string) c
}
// initConfig reads in config file and ENV variables if set.
-func initGlobalConfig(ctx context.Context) error_helpers.ErrorAndWarnings {
+func initGlobalConfig(ctx context.Context) perror_helpers.ErrorAndWarnings {
utils.LogTime("cmdconfig.initGlobalConfig start")
defer utils.LogTime("cmdconfig.initGlobalConfig end")
@@ -145,20 +174,14 @@ func initGlobalConfig(ctx context.Context) error_helpers.ErrorAndWarnings {
// load workspace profile from the configured install dir
loader, err := cmdconfig.GetWorkspaceProfileLoader[*workspace_profile.TailpipeWorkspaceProfile](parseOpts...)
if err != nil {
- return error_helpers.NewErrorsAndWarning(err)
+ return perror_helpers.NewErrorsAndWarning(err)
}
config.GlobalWorkspaceProfile = loader.GetActiveWorkspaceProfile()
// create the required data and internal folder for this workspace if needed
err = config.GlobalWorkspaceProfile.EnsureWorkspaceDirs()
if err != nil {
- return error_helpers.NewErrorsAndWarning(err)
- }
-
- // ensure we have a database file for this workspace
- err = database.EnsureDatabaseFile(ctx)
- if err != nil {
- return error_helpers.NewErrorsAndWarning(err)
+ return perror_helpers.NewErrorsAndWarning(err)
}
var cmd = viper.Get(pconstants.ConfigKeyActiveCommand).(*cobra.Command)
@@ -179,33 +202,17 @@ func initGlobalConfig(ctx context.Context) error_helpers.ErrorAndWarnings {
// NOTE: if this installed the core plugin, the plugin version file will be updated and the updated file returned
pluginVersionFile, err := plugin.EnsureCorePlugin(ctx)
if err != nil {
- return error_helpers.NewErrorsAndWarning(err)
+ return perror_helpers.NewErrorsAndWarning(err)
}
// load the connection config and HCL options (passing plugin versions
tailpipeConfig, loadConfigErrorsAndWarnings := parse.LoadTailpipeConfig(pluginVersionFile)
- if loadConfigErrorsAndWarnings.Error != nil {
- return loadConfigErrorsAndWarnings
- }
+ if loadConfigErrorsAndWarnings.Error == nil {
+ // store global config
+ config.GlobalConfig = tailpipeConfig
- if loadConfigErrorsAndWarnings.Warnings != nil {
- for _, warning := range loadConfigErrorsAndWarnings.Warnings {
- error_helpers.ShowWarning(warning)
- }
}
- // store global config
- config.GlobalConfig = tailpipeConfig
-
- // now validate all config values have appropriate values
- return validateConfig()
-}
-
-// now validate config values have appropriate values
-func validateConfig() error_helpers.ErrorAndWarnings {
- var res = error_helpers.ErrorAndWarnings{}
-
- // TODO #config validate
- return res
+ return loadConfigErrorsAndWarnings
}
diff --git a/internal/cmdconfig/diagnostics.go b/internal/cmdconfig/diagnostics.go
index b40a97ec..ea2d26fa 100644
--- a/internal/cmdconfig/diagnostics.go
+++ b/internal/cmdconfig/diagnostics.go
@@ -9,8 +9,8 @@ import (
"strings"
"github.com/spf13/viper"
- "github.com/turbot/pipe-fittings/v2/error_helpers"
"github.com/turbot/tailpipe/internal/constants"
+ error_helpers "github.com/turbot/tailpipe/internal/error_helpers"
)
// DisplayConfig prints all config set via WorkspaceProfile or HCL options
@@ -51,7 +51,7 @@ func DisplayConfig() {
sort.Strings(lines)
var b strings.Builder
- b.WriteString("\n================\nSteampipe Config\n================\n\n")
+ b.WriteString("\n================\nTailpipe Config\n================\n\n")
for _, line := range lines {
b.WriteString(line)
diff --git a/internal/collector/collector.go b/internal/collector/collector.go
index ffced4ca..0d04b0a5 100644
--- a/internal/collector/collector.go
+++ b/internal/collector/collector.go
@@ -13,13 +13,14 @@ import (
tea "github.com/charmbracelet/bubbletea"
"github.com/spf13/viper"
pconstants "github.com/turbot/pipe-fittings/v2/constants"
+ "github.com/turbot/pipe-fittings/v2/statushooks"
"github.com/turbot/tailpipe-plugin-sdk/events"
sdkfilepaths "github.com/turbot/tailpipe-plugin-sdk/filepaths"
"github.com/turbot/tailpipe-plugin-sdk/row_source"
"github.com/turbot/tailpipe/internal/config"
"github.com/turbot/tailpipe/internal/database"
- "github.com/turbot/tailpipe/internal/filepaths"
- "github.com/turbot/tailpipe/internal/parquet"
+ localfilepaths "github.com/turbot/tailpipe/internal/filepaths"
+ "github.com/turbot/pipe-fittings/v2/filepaths"
"github.com/turbot/tailpipe/internal/plugin"
)
@@ -38,7 +39,7 @@ type Collector struct {
// the execution is used to manage the state of the collection
execution *execution
// the parquet convertor is used to convert the JSONL files to parquet
- parquetConvertor *parquet.Converter
+ parquetConvertor *database.Converter
// the current plugin status - used to update the spinner
status status
@@ -51,6 +52,9 @@ type Collector struct {
// the path to the JSONL files - the plugin will write to this path
sourcePath string
+ // database connection
+ db *database.DuckDb
+
// bubble tea app
app *tea.Program
cancel context.CancelFunc
@@ -61,9 +65,12 @@ func New(pluginManager *plugin.PluginManager, partition *config.Partition, cance
// get the temp data dir for this collection
// - this is located in ~/.turbot/internal/collection//
// first clear out any old collection temp dirs
- filepaths.CleanupCollectionTempDirs()
+ // get the collection directory for this workspace
+ collectionDir := config.GlobalWorkspaceProfile.GetCollectionDir()
+
+ filepaths.CleanupPidTempDirs(collectionDir)
// then create a new collection temp dir
- collectionTempDir := filepaths.EnsureCollectionTempDir()
+ collectionTempDir := localfilepaths.EnsureCollectionTempDir()
// create the collector
c := &Collector{
@@ -84,6 +91,18 @@ func New(pluginManager *plugin.PluginManager, partition *config.Partition, cance
}
c.sourcePath = sourcePath
+ // create the DuckDB connection
+ // load inet extension in addition to the DuckLake extension
+ db, err := database.NewDuckDb(
+ database.WithDuckDbExtensions(pconstants.DuckDbExtensions),
+ database.WithDuckLake(),
+ )
+
+ if err != nil {
+ return nil, fmt.Errorf("failed to create DuckDB connection: %w", err)
+ }
+ c.db = db
+
return c, nil
}
@@ -95,15 +114,16 @@ func New(pluginManager *plugin.PluginManager, partition *config.Partition, cance
func (c *Collector) Close() {
close(c.Events)
- if c.parquetConvertor != nil {
- c.parquetConvertor.Close()
- }
-
// if inbox path is empty, remove it (ignore errors)
_ = os.Remove(c.sourcePath)
// delete the collection temp dir
_ = os.RemoveAll(c.collectionTempDir)
+
+ // close the tea app
+ if c.app != nil {
+ c.app.Quit()
+ }
}
// Collect asynchronously starts the collection process
@@ -113,7 +133,7 @@ func (c *Collector) Close() {
// - starts the collection UI
// - creates a parquet writer, which will process the JSONL files as they are written
// - starts listening to plugin events
-func (c *Collector) Collect(ctx context.Context, fromTime time.Time) (err error) {
+func (c *Collector) Collect(ctx context.Context, fromTime, toTime time.Time, overwrite bool) (err error) {
if c.execution != nil {
return errors.New("collection already in progress")
}
@@ -127,22 +147,20 @@ func (c *Collector) Collect(ctx context.Context, fromTime time.Time) (err error)
}
}()
- // create the execution
- // NOTE: create _before_ calling the plugin to ensure it is ready to receive the started event
- c.execution = newExecution(c.partition)
-
- // tell plugin to start collecting
- collectResponse, err := c.pluginManager.Collect(ctx, c.partition, fromTime, c.collectionTempDir)
- if err != nil {
- return err
+ var collectResponse *plugin.CollectResponse
+ // is this is a synthetic partition?
+ if c.partition.SyntheticMetadata != nil {
+ if collectResponse, err = c.doCollectSynthetic(ctx, fromTime, toTime, overwrite); err != nil {
+ return err
+ }
+ } else {
+ if collectResponse, err = c.doCollect(ctx, fromTime, toTime, overwrite); err != nil {
+ return err
+ }
}
- // _now_ set the execution id
- c.execution.id = collectResponse.ExecutionId
-
// validate the schema returned by the plugin
- err = collectResponse.Schema.Validate()
- if err != nil {
+ if err = collectResponse.Schema.Validate(); err != nil {
err := fmt.Errorf("table '%s' returned invalid schema: %w", c.partition.TableName, err)
// set execution to error
c.execution.done(err)
@@ -152,20 +170,33 @@ func (c *Collector) Collect(ctx context.Context, fromTime time.Time) (err error)
// determine the time to start collecting from
resolvedFromTime := collectResponse.FromTime
+ // if we are overwriting, we need to delete any existing data in the partition
+ if overwrite {
+ // show spinner while deleting the partition
+ spinner := statushooks.NewStatusSpinnerHook()
+ spinner.SetStatus(fmt.Sprintf("Deleting partition %s", c.partition.TableName))
+ spinner.Show()
+ err := c.deletePartitionData(ctx, resolvedFromTime.Time, toTime)
+ spinner.Hide()
+ if err != nil {
+ // set execution to error
+ c.execution.done(err)
+ // and return error
+ return fmt.Errorf("failed to delete partition data: %w", err)
+ }
+ }
+
// display the progress UI
- err = c.showCollectionStatus(resolvedFromTime)
+ err = c.showCollectionStatus(resolvedFromTime, toTime)
if err != nil {
return err
}
- // if there is a from time, add a filter to the partition - this will be used by the parquet writer
- if !resolvedFromTime.Time.IsZero() {
- // NOTE: handle null timestamp so we get a validation error for null timestamps, rather than excluding the row
- c.partition.AddFilter(fmt.Sprintf("(tp_timestamp is null or tp_timestamp >= '%s')", resolvedFromTime.Time.Format("2006-01-02T15:04:05")))
- }
+ // if we have a from or to time, add filters to the partition
+ c.addTimeRangeFilters(resolvedFromTime, toTime)
// create a parquet writer
- parquetConvertor, err := parquet.NewParquetConverter(ctx, cancel, c.execution.id, c.partition, c.sourcePath, collectResponse.Schema, c.updateRowCount)
+ parquetConvertor, err := database.NewParquetConverter(ctx, cancel, c.execution.id, c.partition, c.sourcePath, collectResponse.Schema, c.updateRowCount, c.db)
if err != nil {
return fmt.Errorf("failed to create parquet writer: %w", err)
}
@@ -177,6 +208,36 @@ func (c *Collector) Collect(ctx context.Context, fromTime time.Time) (err error)
return nil
}
+func (c *Collector) doCollect(ctx context.Context, fromTime time.Time, toTime time.Time, overwrite bool) (*plugin.CollectResponse, error) {
+ // create the execution
+ // NOTE: create _before_ calling the plugin to ensure it is ready to receive the started event
+ c.execution = newExecution(c.partition)
+
+ // tell plugin to start collecting
+ collectResponse, err := c.pluginManager.Collect(ctx, c.partition, fromTime, toTime, overwrite, c.collectionTempDir)
+ if err != nil {
+ return nil, err
+ }
+
+ // _now_ set the execution id
+ c.execution.id = collectResponse.ExecutionId
+ return collectResponse, nil
+}
+
+// addTimeRangeFilters adds filters to the partition based on the from and to time
+func (c *Collector) addTimeRangeFilters(resolvedFromTime *row_source.ResolvedFromTime, toTime time.Time) {
+ // if there is a from time, add a filter to the partition - this will be used by the parquet writer
+ if !resolvedFromTime.Time.IsZero() {
+ // NOTE: handle null timestamp so we get a validation error for null timestamps, rather than excluding the row
+ c.partition.AddFilter(fmt.Sprintf("(tp_timestamp is null or tp_timestamp >= '%s')", resolvedFromTime.Time.Format("2006-01-02T15:04:05")))
+ }
+ // if to time was set as arg, add that filter as well
+ if viper.IsSet(pconstants.ArgTo) {
+ // NOTE: handle null timestamp so we get a validation error for null timestamps, rather than excluding the row
+ c.partition.AddFilter(fmt.Sprintf("(tp_timestamp is null or tp_timestamp < '%s')", toTime.Format("2006-01-02T15:04:05")))
+ }
+}
+
// Notify implements observer.Observer
// send an event down the channel to be picked up by the handlePluginEvent goroutine
func (c *Collector) Notify(_ context.Context, event events.Event) error {
@@ -203,14 +264,18 @@ func (c *Collector) Compact(ctx context.Context) error {
c.updateApp(AwaitingCompactionMsg{})
- updateAppCompactionFunc := func(compactionStatus parquet.CompactionStatus) {
+ updateAppCompactionFunc := func(status database.CompactionStatus) {
c.statusLock.Lock()
defer c.statusLock.Unlock()
- c.status.UpdateCompactionStatus(&compactionStatus)
+ c.status.compactionStatus = &status
c.updateApp(CollectionStatusUpdateMsg{status: c.status})
}
- partitionPattern := parquet.NewPartitionPattern(c.partition)
- err := parquet.CompactDataFiles(ctx, updateAppCompactionFunc, partitionPattern)
+ partitionPattern := database.NewPartitionPattern(c.partition)
+
+ // NOTE: we DO NOT reindex when compacting after collection
+ reindex := false
+ err := database.CompactDataFiles(ctx, c.db, updateAppCompactionFunc, reindex, &partitionPattern)
+
if err != nil {
return fmt.Errorf("failed to compact data files: %w", err)
}
@@ -230,89 +295,20 @@ func (c *Collector) Completed() {
}
}
-// handlePluginEvent handles an event from a plugin
-func (c *Collector) handlePluginEvent(ctx context.Context, e events.Event) {
- // handlePluginEvent the event
- // switch based on the struct of the event
- switch ev := e.(type) {
- case *events.Started:
- slog.Info("Started event", "execution", ev.ExecutionId)
- c.execution.state = ExecutionState_STARTED
- case *events.Status:
- c.statusLock.Lock()
- defer c.statusLock.Unlock()
- c.status.UpdateWithPluginStatus(ev)
- c.updateApp(CollectionStatusUpdateMsg{status: c.status})
- case *events.Chunk:
-
- executionId := ev.ExecutionId
- chunkNumber := ev.ChunkNumber
-
- // log every 100 chunks
- if ev.ChunkNumber%100 == 0 {
- slog.Debug("Chunk event", "execution", ev.ExecutionId, "chunk", ev.ChunkNumber)
- }
-
- err := c.parquetConvertor.AddChunk(executionId, chunkNumber)
- if err != nil {
- slog.Error("failed to add chunk to parquet writer", "error", err)
- c.execution.done(err)
- }
- case *events.Complete:
- slog.Info("Complete event", "execution", ev.ExecutionId)
-
- // was there an error?
- if ev.Err != nil {
- slog.Error("execution error", "execution", ev.ExecutionId, "error", ev.Err)
- // update the execution
- c.execution.done(ev.Err)
- return
- }
- // this event means all JSON files have been written - we need to wait for all to be converted to parquet
- // we then combine the parquet files into a single file
-
- // start thread waiting for conversion to complete
- // - this will wait for all parquet files to be written, and will then combine these into a single parquet file
- slog.Info("handlePluginEvent - waiting for conversions to complete")
- go func() {
- err := c.waitForConversions(ctx, ev)
- if err != nil {
- slog.Error("error waiting for execution to complete", "error", err)
- c.execution.done(err)
- } else {
- slog.Info("handlePluginEvent - conversions all complete")
- }
- }()
-
- case *events.Error:
- // TODO #errors error events are deprecated an will only be sent for plugins not using sdk > v0.2.0
- // TODO #errors decide what (if anything) we should do with error events from old plugins https://github.com/turbot/tailpipe/issues/297
- //ev := e.GetErrorEvent()
- //// for now just store errors and display at end
- ////c.execution.state = ExecutionState_ERROR
- ////c.execution.error = fmt.Errorf("plugin error: %s", ev.Error)
- //slog.Warn("plugin error", "execution", ev.ExecutionId, "error", ev.Error)
- }
-}
-
-func (c *Collector) createTableView(ctx context.Context) error {
- // so we are done writing chunks - now update the db to add a view to this data
- // Open a DuckDB connection
- db, err := database.NewDuckDb(database.WithDbFile(filepaths.TailpipeDbFilePath()))
+// deletePartitionData deletes all parquet files in the partition between the fromTime and toTime
+func (c *Collector) deletePartitionData(ctx context.Context, fromTime, toTime time.Time) error {
+ slog.Info("Deleting parquet files after the from time", "partition", c.partition.Name, "from", fromTime)
+ _, err := database.DeletePartition(ctx, c.partition, fromTime, toTime, c.db)
if err != nil {
- return err
- }
- defer db.Close()
+ slog.Warn("Failed to delete parquet files after the from time", "partition", c.partition.Name, "from", fromTime, "error", err)
- err = database.AddTableView(ctx, c.execution.table, db)
- if err != nil {
- return err
}
- return nil
+ slog.Info("Completed deleting parquet files after the from time", "partition", c.partition.Name, "from", fromTime)
+ return err
}
-func (c *Collector) showCollectionStatus(resolvedFromTime *row_source.ResolvedFromTime) error {
- c.status.Init(c.partition.GetUnqualifiedName(), resolvedFromTime)
+func (c *Collector) showCollectionStatus(resolvedFromTime *row_source.ResolvedFromTime, toTime time.Time) error {
+ c.status.Init(c.partition.GetUnqualifiedName(), resolvedFromTime, toTime)
// if the progress flag is set, start the tea app to display the progress
if viper.GetBool(pconstants.ArgProgress) {
@@ -393,17 +389,7 @@ func (c *Collector) waitForConversions(ctx context.Context, ce *events.Complete)
}
// wait for the conversions to complete
- c.parquetConvertor.WaitForConversions(ctx)
-
- // create or update the table view for ths table being collected
- if err := c.createTableView(ctx); err != nil {
- slog.Error("error creating table view", "error", err)
- return err
- }
-
- slog.Info("handlePluginEvent - conversions all complete")
-
- return nil
+ return c.parquetConvertor.WaitForConversions(ctx)
}
// listenToEvents listens to the events channel and handles events
@@ -412,9 +398,65 @@ func (c *Collector) listenToEvents(ctx context.Context) {
select {
case <-ctx.Done():
return
- case event := <-c.Events:
- c.handlePluginEvent(ctx, event)
+ case e := <-c.Events:
+ c.handlePluginEvent(ctx, e)
+ }
+ }
+}
+
+// handlePluginEvent handles an event from a plugin
+func (c *Collector) handlePluginEvent(ctx context.Context, e events.Event) {
+ // handlePluginEvent the event
+ // switch based on the struct of the event
+ switch ev := e.(type) {
+ case *events.Started:
+ slog.Info("Started event", "execution", ev.ExecutionId)
+ c.execution.state = ExecutionState_STARTED
+ case *events.Status:
+ c.statusLock.Lock()
+ defer c.statusLock.Unlock()
+ c.status.UpdateWithPluginStatus(ev)
+ c.updateApp(CollectionStatusUpdateMsg{status: c.status})
+ case *events.Chunk:
+
+ executionId := ev.ExecutionId
+ chunkNumber := ev.ChunkNumber
+
+ // log every 100 chunks
+ if ev.ChunkNumber%100 == 0 {
+ slog.Debug("Chunk event", "execution", ev.ExecutionId, "chunk", ev.ChunkNumber)
+ }
+
+ err := c.parquetConvertor.AddChunk(executionId, chunkNumber)
+ if err != nil {
+ slog.Error("failed to add chunk to parquet writer", "error", err)
+ c.execution.done(err)
+ }
+ case *events.Complete:
+ slog.Info("Complete event", "execution", ev.ExecutionId)
+
+ // was there an error?
+ if ev.Err != nil {
+ slog.Error("execution error", "execution", ev.ExecutionId, "error", ev.Err)
+ // update the execution
+ c.execution.done(ev.Err)
+ return
}
+ // this event means all JSON files have been written - we need to wait for all to be converted to parquet
+ // we then combine the parquet files into a single file
+
+ // start thread waiting for conversion to complete
+ // - this will wait for all parquet files to be written, and will then combine these into a single parquet file
+ slog.Info("handlePluginEvent - waiting for conversions to complete")
+ go func() {
+ err := c.waitForConversions(ctx, ev)
+ if err != nil {
+ slog.Error("error waiting for execution to complete", "error", err)
+ c.execution.done(err)
+ } else {
+ slog.Info("all conversions complete")
+ }
+ }()
}
}
diff --git a/internal/collector/collector_synthetic.go b/internal/collector/collector_synthetic.go
new file mode 100644
index 00000000..57527ef7
--- /dev/null
+++ b/internal/collector/collector_synthetic.go
@@ -0,0 +1,649 @@
+package collector
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "math"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "bufio"
+ "runtime"
+ "sync"
+
+ "github.com/turbot/tailpipe-plugin-sdk/events"
+ "github.com/turbot/tailpipe-plugin-sdk/row_source"
+ "github.com/turbot/tailpipe-plugin-sdk/schema"
+ "github.com/turbot/tailpipe-plugin-sdk/table"
+ "github.com/turbot/tailpipe/internal/config"
+ "github.com/turbot/tailpipe/internal/plugin"
+)
+
+// doCollectSynthetic initiates synthetic data collection for testing and performance benchmarking.
+// This function simulates the data collection process by generating dummy data instead of collecting from real sources.
+//
+// The function:
+// 1. Creates an execution context to track the synthetic collection process
+// 2. Builds a synthetic schema based on the number of columns specified in the partition metadata
+// 3. Starts a background goroutine to generate and write synthetic data in chunks
+// 4. Returns a CollectResponse that mimics what a real plugin would return
+//
+// This enables testing of the entire data collection pipeline without requiring actual data sources,
+// making it useful for performance testing, load testing, and development/debugging scenarios.
+//
+// Parameters:
+// - ctx: Context for cancellation and timeout handling
+// - fromTime: Start time for the synthetic data (timestamps will be distributed across this range)
+// - toTime: End time for the synthetic data
+// - overwrite: Whether to overwrite existing data (not used in synthetic collection)
+//
+// Returns:
+// - *plugin.CollectResponse: Response containing execution ID and schema information
+// - error: Any error that occurred during initialization
+func (c *Collector) doCollectSynthetic(ctx context.Context, fromTime time.Time, toTime time.Time, overwrite bool) (*plugin.CollectResponse, error) {
+ // Create the execution context to track the synthetic collection process
+ // This must be created before starting the collection goroutine to ensure proper event handling
+ c.execution = &execution{
+ id: "synthetic", // Use "synthetic" as the execution ID
+ partition: c.partition.UnqualifiedName, // Full partition name for identification
+ table: c.partition.TableName, // Table name (always "synthetic" for synthetic partitions)
+ plugin: "synthetic", // Plugin name for logging and identification
+ state: ExecutionState_PENDING, // Initial state before collection starts
+ completionChan: make(chan error, 1), // Channel to signal completion or errors
+ }
+
+ // Build the synthetic schema based on the number of columns specified in the partition metadata
+ // This creates a table schema with the specified number of columns of various types
+ schema := buildsyntheticchema(c.partition.SyntheticMetadata.Columns)
+
+ // Start a background goroutine to perform the actual synthetic data generation
+ // This simulates the asynchronous nature of real data collection
+ go c.collectSynthetic(ctx, schema, fromTime, toTime)
+
+ // Build a collect response that mimics what a real plugin would return
+ // This allows the synthetic collection to integrate seamlessly with the existing collection pipeline
+ collectResponse := &plugin.CollectResponse{
+ ExecutionId: c.execution.id, // Use the execution ID for tracking
+ Schema: schema, // The generated synthetic schema
+ FromTime: &row_source.ResolvedFromTime{
+ Time: fromTime, // Start time for the data collection
+ Source: "synthetic", // Source identifier for synthetic data
+ },
+ }
+
+ // Update the execution ID to match the response (in case it was modified)
+ c.execution.id = collectResponse.ExecutionId
+ return collectResponse, nil
+}
+
+// syntheticColumnTypes defines the available column types for synthetic data generation
+var syntheticColumnTypes = []struct {
+ Name string
+ SQLType string
+ StructFields []*schema.ColumnSchema
+}{
+ {"string_col", "VARCHAR", nil},
+ {"int_col", "INTEGER", nil},
+ {"float_col", "DOUBLE", nil},
+ {"bool_col", "BOOLEAN", nil},
+ {"json_col", "JSON", nil},
+ {"timestamp_col", "TIMESTAMP", nil},
+ {"array_col", "JSON", nil},
+ {"nested_json_col", "JSON", nil},
+ {"uuid_col", "VARCHAR", nil},
+ {"simple_struct_col", "STRUCT", []*schema.ColumnSchema{
+ {
+ SourceName: "id",
+ ColumnName: "id",
+ Type: "INTEGER",
+ Description: "Simple struct ID field",
+ },
+ {
+ SourceName: "name",
+ ColumnName: "name",
+ Type: "VARCHAR",
+ Description: "Simple struct name field",
+ },
+ {
+ SourceName: "active",
+ ColumnName: "active",
+ Type: "BOOLEAN",
+ Description: "Simple struct active field",
+ },
+ }},
+ {"nested_struct_col", "STRUCT", []*schema.ColumnSchema{
+ {
+ SourceName: "metadata",
+ ColumnName: "metadata",
+ Type: "STRUCT",
+ StructFields: []*schema.ColumnSchema{
+ {
+ SourceName: "created_at",
+ ColumnName: "created_at",
+ Type: "VARCHAR",
+ Description: "Creation timestamp",
+ },
+ {
+ SourceName: "version",
+ ColumnName: "version",
+ Type: "VARCHAR",
+ Description: "Version string",
+ },
+ },
+ Description: "Metadata information",
+ },
+ {
+ SourceName: "data",
+ ColumnName: "data",
+ Type: "STRUCT",
+ StructFields: []*schema.ColumnSchema{
+ {
+ SourceName: "field1",
+ ColumnName: "field1",
+ Type: "INTEGER",
+ Description: "Numeric field 1",
+ },
+ {
+ SourceName: "field2",
+ ColumnName: "field2",
+ Type: "VARCHAR",
+ Description: "String field 2",
+ },
+ {
+ SourceName: "field3",
+ ColumnName: "field3",
+ Type: "BOOLEAN",
+ Description: "Boolean field 3",
+ },
+ },
+ Description: "Data fields",
+ },
+ }},
+ {"complex_struct_col", "STRUCT", []*schema.ColumnSchema{
+ {
+ SourceName: "user",
+ ColumnName: "user",
+ Type: "STRUCT",
+ StructFields: []*schema.ColumnSchema{
+ {
+ SourceName: "id",
+ ColumnName: "id",
+ Type: "INTEGER",
+ Description: "User ID",
+ },
+ {
+ SourceName: "name",
+ ColumnName: "name",
+ Type: "VARCHAR",
+ Description: "User name",
+ },
+ {
+ SourceName: "profile",
+ ColumnName: "profile",
+ Type: "STRUCT",
+ StructFields: []*schema.ColumnSchema{
+ {
+ SourceName: "age",
+ ColumnName: "age",
+ Type: "INTEGER",
+ Description: "User age",
+ },
+ {
+ SourceName: "email",
+ ColumnName: "email",
+ Type: "VARCHAR",
+ Description: "User email",
+ },
+ {
+ SourceName: "verified",
+ ColumnName: "verified",
+ Type: "BOOLEAN",
+ Description: "Email verified",
+ },
+ },
+ Description: "User profile information",
+ },
+ },
+ Description: "User information",
+ },
+ {
+ SourceName: "settings",
+ ColumnName: "settings",
+ Type: "STRUCT",
+ StructFields: []*schema.ColumnSchema{
+ {
+ SourceName: "theme",
+ ColumnName: "theme",
+ Type: "VARCHAR",
+ Description: "UI theme",
+ },
+ {
+ SourceName: "notifications",
+ ColumnName: "notifications",
+ Type: "BOOLEAN",
+ Description: "Notifications enabled",
+ },
+ },
+ Description: "User settings",
+ },
+ }},
+}
+
+// ConcurrentDataGenerator handles concurrent data generation and marshaling
+type ConcurrentDataGenerator struct {
+ numWorkers int
+ rowChan chan []byte
+ errorChan chan error
+ doneChan chan bool
+}
+
+// NewConcurrentDataGenerator creates a new concurrent data generator
+func NewConcurrentDataGenerator(numWorkers int) *ConcurrentDataGenerator {
+ return &ConcurrentDataGenerator{
+ numWorkers: numWorkers,
+ rowChan: make(chan []byte, numWorkers*100), // Buffer for generated rows
+ errorChan: make(chan error, 1),
+ doneChan: make(chan bool, 1),
+ }
+}
+
+// generateRowData generates a single row's JSON data
+func generateRowData(rowIndex int, partition *config.Partition, tableSchema *schema.TableSchema, fromTime time.Time, timestampInterval time.Duration) ([]byte, error) {
+ // Create row map
+ rowMap := make(map[string]any, len(tableSchema.Columns))
+ timestamp := fromTime.Add(time.Duration(rowIndex) * timestampInterval).Format("2006-01-02 15:04:05")
+
+ // Populate row map (skip tp_index and tp_date)
+ for _, column := range tableSchema.Columns {
+ if column.ColumnName == "tp_index" || column.ColumnName == "tp_date" {
+ continue
+ }
+
+ switch column.ColumnName {
+ case "tp_timestamp":
+ rowMap[column.ColumnName] = timestamp
+ case "tp_partition":
+ rowMap[column.ColumnName] = partition.ShortName
+ case "tp_table":
+ rowMap[column.ColumnName] = partition.TableName
+ default:
+ // Generate synthetic data for other columns
+ rowMap[column.ColumnName] = generateSyntheticValue(column, rowIndex)
+ }
+ }
+
+ // Marshal to JSON
+ data, err := json.Marshal(rowMap)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal row %d: %w", rowIndex, err)
+ }
+
+ // Add newline
+ data = append(data, '\n')
+ return data, nil
+}
+
+// worker generates data for a range of rows
+func (cdg *ConcurrentDataGenerator) worker(startRow, endRow int, partition *config.Partition, tableSchema *schema.TableSchema, fromTime time.Time, timestampInterval time.Duration) {
+ for rowIndex := startRow; rowIndex < endRow; rowIndex++ {
+ data, err := generateRowData(rowIndex, partition, tableSchema, fromTime, timestampInterval)
+ if err != nil {
+ select {
+ case cdg.errorChan <- err:
+ default:
+ }
+ return
+ }
+
+ select {
+ case cdg.rowChan <- data:
+ case <-cdg.doneChan:
+ return
+ }
+ }
+}
+
+// writeOptimizedChunkToJSONLConcurrent uses multiple goroutines for data generation
+func writeOptimizedChunkToJSONLConcurrent(filepath string, tableSchema *schema.TableSchema, rows int, startRowIndex int, partition *config.Partition, fromTime time.Time, timestampInterval time.Duration) error {
+ file, err := os.Create(filepath)
+ if err != nil {
+ return fmt.Errorf("failed to create file %s: %w", filepath, err)
+ }
+ defer file.Close()
+
+ // Use buffered writer for better I/O performance
+ bufWriter := bufio.NewWriter(file)
+ defer bufWriter.Flush()
+
+ // Determine number of workers (use CPU cores, but cap at reasonable number)
+ numWorkers := runtime.NumCPU()
+ if numWorkers > 8 {
+ numWorkers = 8 // Cap at 8 to avoid too much overhead
+ }
+ if numWorkers > rows {
+ numWorkers = rows // Don't create more workers than rows
+ }
+
+ // Create concurrent data generator
+ cdg := NewConcurrentDataGenerator(numWorkers)
+
+ // Calculate rows per worker
+ rowsPerWorker := rows / numWorkers
+ remainder := rows % numWorkers
+
+ // Start workers
+ var wg sync.WaitGroup
+ startRow := startRowIndex
+ for i := 0; i < numWorkers; i++ {
+ endRow := startRow + rowsPerWorker
+ if i < remainder {
+ endRow++ // Distribute remainder rows
+ }
+
+ wg.Add(1)
+ go func(start, end int) {
+ defer wg.Done()
+ cdg.worker(start, end, partition, tableSchema, fromTime, timestampInterval)
+ }(startRow, endRow)
+
+ startRow = endRow
+ }
+
+ // Start a goroutine to close the row channel when all workers are done
+ go func() {
+ wg.Wait()
+ close(cdg.rowChan)
+ }()
+
+ // Write rows from channel to file
+ rowsWritten := 0
+ for data := range cdg.rowChan {
+ if _, err := bufWriter.Write(data); err != nil {
+ close(cdg.doneChan) // Signal workers to stop
+ return fmt.Errorf("failed to write row %d: %w", rowsWritten, err)
+ }
+ rowsWritten++
+ }
+
+ // Check for errors
+ select {
+ case err := <-cdg.errorChan:
+ return fmt.Errorf("worker error: %w", err)
+ default:
+ }
+
+ if rowsWritten != rows {
+ return fmt.Errorf("expected %d rows, but wrote %d", rows, rowsWritten)
+ }
+
+ return nil
+}
+
+func buildsyntheticchema(columns int) *schema.TableSchema {
+ // Create a basic schema with the required number of columns
+ // Start with required tp_ fields
+ s := &schema.TableSchema{
+ Columns: make([]*schema.ColumnSchema, 0, columns+5), // +5 for tp_ fields (including tp_index and tp_date)
+ }
+
+ // Add required tp_ fields first
+ tpFields := []struct {
+ name string
+ columnType string
+ description string
+ }{
+ {"tp_timestamp", "TIMESTAMP", "Timestamp when the record was collected"},
+ {"tp_partition", "VARCHAR", "Partition identifier"},
+ {"tp_table", "VARCHAR", "Table identifier"},
+ {"tp_index", "VARCHAR", "Index identifier"},
+ {"tp_date", "VARCHAR", "Date identifier"},
+ }
+
+ for _, tpField := range tpFields {
+ column := &schema.ColumnSchema{
+ SourceName: tpField.name,
+ ColumnName: tpField.name,
+ Type: tpField.columnType,
+ StructFields: nil,
+ Description: tpField.description,
+ Required: true, // tp_ fields are always required
+ NullIf: "",
+ Transform: "",
+ }
+ s.Columns = append(s.Columns, column)
+ }
+
+ // Add the specified number of synthetic columns by cycling through the column types
+ for i := 0; i < columns; i++ {
+ // Cycle through the column types
+ typeIndex := i % len(syntheticColumnTypes)
+ baseType := syntheticColumnTypes[typeIndex]
+
+ // Create a unique column name
+ columnName := fmt.Sprintf("%s_%d", baseType.Name, i)
+
+ column := &schema.ColumnSchema{
+ SourceName: columnName,
+ ColumnName: columnName,
+ Type: baseType.SQLType,
+ StructFields: baseType.StructFields,
+ Description: fmt.Sprintf("Synthetic column of type %s", baseType.SQLType),
+ Required: false,
+ NullIf: "",
+ Transform: "",
+ }
+
+ s.Columns = append(s.Columns, column)
+ }
+
+ return s
+}
+
+// collectSynthetic generates synthetic data in chunks and writes it to JSONL files.
+// This function runs in a background goroutine and simulates the data collection process
+// by generating dummy data according to the synthetic partition specifications.
+//
+// The function:
+// 1. Notifies that collection has started
+// 2. Calculates timestamp intervals to distribute timestamps across the time range
+// 3. Generates data in chunks according to the specified chunk size
+// 4. Writes each chunk to a JSONL file using optimized concurrent writing
+// 5. Respects the delivery interval to simulate real-time data flow
+// 6. Sends progress events (chunk and status) to maintain the collection UI
+// 7. Handles cancellation and error conditions gracefully
+// 8. Notifies completion when all data has been generated
+//
+// Parameters:
+// - ctx: Context for cancellation and timeout handling
+// - tableSchema: The schema defining the structure of the synthetic data
+// - fromTime: Start time for timestamp generation
+// - toTime: End time for timestamp generation
+func (c *Collector) collectSynthetic(ctx context.Context, tableSchema *schema.TableSchema, fromTime time.Time, toTime time.Time) {
+ metadata := c.partition.SyntheticMetadata
+
+ // Set the execution state to started to indicate collection is in progress
+ c.execution.state = ExecutionState_STARTED
+
+ // Notify that collection has started - this triggers the collection UI to show progress
+ if err := c.Notify(ctx, &events.Started{ExecutionId: c.execution.id}); err != nil {
+ slog.Error("failed to notify started event", "error", err)
+ c.execution.completionChan <- fmt.Errorf("failed to notify started event: %w", err)
+ return
+ }
+
+ var chunkIdx int32 = 0 // Track the current chunk number
+ var totalRowsProcessed int64 = 0 // Track total rows processed for progress reporting
+
+ // Calculate timestamp interval to distribute timestamps evenly across the time range
+ // This ensures synthetic data has realistic timestamp progression
+ var timestampInterval time.Duration
+ if metadata.Rows > 1 {
+ // Distribute timestamps evenly between fromTime and toTime
+ timestampInterval = toTime.Sub(fromTime) / time.Duration(metadata.Rows-1)
+ } else {
+ // Single row case - no interval needed
+ timestampInterval = 0
+ }
+
+ // Generate data in chunks according to the specified chunk size
+ // This allows for memory-efficient processing of large datasets
+ for rowCount := 0; rowCount < metadata.Rows; rowCount += metadata.ChunkSize {
+ t := time.Now() // Track chunk processing time for delivery interval calculation
+
+ // Check if context is cancelled - allows for graceful shutdown
+ select {
+ case <-ctx.Done():
+ c.execution.completionChan <- ctx.Err()
+ return
+ default:
+ }
+
+ // Calculate the number of rows for this chunk (may be less than chunk size for the last chunk)
+ rows := int(math.Min(float64(metadata.Rows-rowCount), float64(metadata.ChunkSize)))
+
+ // Generate filename for this chunk's JSONL file
+ filename := table.ExecutionIdToJsonlFileName(c.execution.id, chunkIdx)
+ filepath := filepath.Join(c.sourcePath, filename)
+
+ // Write the chunk to JSONL file using optimized concurrent approach
+ // This generates synthetic data and writes it efficiently to disk
+ if err := writeOptimizedChunkToJSONLConcurrent(filepath, tableSchema, rows, rowCount, c.partition, fromTime, timestampInterval); err != nil {
+ c.execution.completionChan <- fmt.Errorf("error writing chunk to JSONL file: %w", err)
+ return
+ }
+
+ dur := time.Since(t) // Calculate how long this chunk took to process
+
+ // Respect the delivery interval to simulate real-time data flow
+ // If processing was faster than the interval, wait for the remaining time
+ if metadata.DeliveryIntervalMs > 0 && dur < time.Duration(metadata.DeliveryIntervalMs)*time.Millisecond {
+ slog.Debug("Waiting for delivery interval", "duration", dur, "expected", time.Duration(metadata.DeliveryIntervalMs)*time.Millisecond)
+ select {
+ case <-time.After(time.Duration(metadata.DeliveryIntervalMs)*time.Millisecond - dur):
+ // Wait for the remaining time
+ case <-ctx.Done():
+ // Context was cancelled during wait
+ c.execution.completionChan <- ctx.Err()
+ return
+ }
+ }
+
+ // Send chunk event to notify that a chunk has been completed
+ // This updates the collection UI and allows other components to process the chunk
+ chunkEvent := &events.Chunk{ExecutionId: c.execution.id, ChunkNumber: chunkIdx}
+ if err := c.Notify(ctx, chunkEvent); err != nil {
+ slog.Error("failed to notify chunk event", "error", err)
+ c.execution.completionChan <- fmt.Errorf("failed to notify chunk event: %w", err)
+ return
+ }
+
+ // Update total rows processed and send status event
+ // This provides progress information to the collection UI
+ totalRowsProcessed += int64(rows)
+ statusEvent := &events.Status{ExecutionId: c.execution.id, RowsReceived: totalRowsProcessed, RowsEnriched: totalRowsProcessed}
+ if err := c.Notify(ctx, statusEvent); err != nil {
+ slog.Error("failed to notify status event", "error", err)
+ c.execution.completionChan <- fmt.Errorf("failed to notify status event: %w", err)
+ return
+ }
+
+ chunkIdx++ // Move to next chunk
+ }
+
+ // Send completion event to indicate all data has been generated
+ // This triggers final processing and updates the collection UI
+ if err := c.Notify(ctx, events.NewCompletedEvent(c.execution.id, int64(metadata.Rows), chunkIdx, nil)); err != nil {
+ slog.Error("failed to notify completed event", "error", err)
+ c.execution.completionChan <- fmt.Errorf("failed to notify completed event: %w", err)
+ return
+ }
+
+ // Signal completion by sending nil to the completion channel
+ // This allows the main collection process to know that synthetic data generation is complete
+ c.execution.completionChan <- nil
+}
+
+func generateSyntheticValue(column *schema.ColumnSchema, rowIndex int) any {
+ // Use the column's Type field directly instead of fuzzy matching on name
+ columnType := column.Type
+
+ // Generate value based on exact type match (case-insensitive)
+ switch strings.ToUpper(columnType) {
+ case "VARCHAR":
+ return fmt.Sprintf("%s_val%d", column.ColumnName, rowIndex%100000)
+ case "INTEGER":
+ return (rowIndex % 100000) + 1
+ case "DOUBLE":
+ return float64(rowIndex%100000) * 0.1
+ case "BOOLEAN":
+ return rowIndex%2 == 0
+ case "JSON":
+ return generateJSONValue(column, rowIndex)
+ case "TIMESTAMP":
+ return time.Now().AddDate(0, 0, -rowIndex%30).Format("2006-01-02 15:04:05")
+ default:
+ // Handle struct types and complex types
+ if strings.Contains(strings.ToUpper(columnType), "STRUCT") {
+ return generateStructValue(column, rowIndex)
+ }
+ // For any other unrecognized type, throw an error
+ panic(fmt.Sprintf("Unsupported column type '%s' for column '%s'", columnType, column.ColumnName))
+ }
+}
+
+func generateJSONValue(column *schema.ColumnSchema, rowIndex int) any {
+ // Generate different JSON structures based on column name
+ if strings.Contains(column.ColumnName, "nested_json") {
+ return map[string]any{
+ "metadata": map[string]any{
+ "created_at": time.Now().AddDate(0, 0, -rowIndex%30).Format("2006-01-02"),
+ "version": fmt.Sprintf("v%d.%d", rowIndex%10, rowIndex%5),
+ },
+ "data": map[string]any{
+ "field1": rowIndex % 100000,
+ "field2": fmt.Sprintf("field_%d", rowIndex%100000),
+ "field3": rowIndex%2 == 0,
+ },
+ }
+ } else if strings.Contains(column.ColumnName, "array") {
+ return []any{
+ fmt.Sprintf("item_%d", rowIndex%100000),
+ rowIndex % 100000,
+ rowIndex%2 == 0,
+ float64(rowIndex%100000) * 0.1,
+ }
+ } else {
+ // Default JSON object
+ return map[string]any{
+ "id": rowIndex % 100000,
+ "name": fmt.Sprintf("item_%d", rowIndex%100000),
+ "value": (rowIndex % 100000) + 1,
+ "tags": []string{"tag1", "tag2", "tag3"},
+ }
+ }
+}
+
+func generateStructValue(column *schema.ColumnSchema, rowIndex int) any {
+ if column.StructFields == nil {
+ return map[string]any{
+ "id": rowIndex % 100000,
+ "name": fmt.Sprintf("struct_%d", rowIndex%100000),
+ }
+ }
+
+ result := make(map[string]any)
+ for _, field := range column.StructFields {
+ if field.StructFields != nil {
+ // Nested struct
+ result[field.ColumnName] = generateStructValue(field, rowIndex)
+ } else {
+ // Simple field
+ result[field.ColumnName] = generateSyntheticValue(field, rowIndex)
+ }
+ }
+ return result
+}
diff --git a/internal/collector/status.go b/internal/collector/status.go
index 6dc5d304..e1601db9 100644
--- a/internal/collector/status.go
+++ b/internal/collector/status.go
@@ -2,6 +2,7 @@ package collector
import (
"fmt"
+ "github.com/turbot/tailpipe/internal/database"
"path/filepath"
"strings"
"time"
@@ -11,7 +12,6 @@ import (
"github.com/turbot/tailpipe-plugin-sdk/events"
"github.com/turbot/tailpipe-plugin-sdk/logging"
"github.com/turbot/tailpipe-plugin-sdk/row_source"
- "github.com/turbot/tailpipe/internal/parquet"
)
const uiErrorsToDisplay = 15
@@ -28,14 +28,16 @@ type status struct {
complete bool
partitionName string
fromTime *row_source.ResolvedFromTime
- compactionStatus *parquet.CompactionStatus
+ compactionStatus *database.CompactionStatus
+ toTime time.Time
}
// Init initializes the status with the partition name and resolved from time of the collection and marks start of collection for timing
-func (s *status) Init(partitionName string, fromTime *row_source.ResolvedFromTime) {
+func (s *status) Init(partitionName string, fromTime *row_source.ResolvedFromTime, toTime time.Time) {
s.started = time.Now()
s.partitionName = partitionName
s.fromTime = fromTime
+ s.toTime = toTime
}
// UpdateWithPluginStatus updates the status with the values from the plugin status event
@@ -54,28 +56,15 @@ func (s *status) UpdateConversionStatus(rowsConverted, failedRows int64, errors
}
}
-// UpdateCompactionStatus updates the status with the values from the compaction status event
-func (s *status) UpdateCompactionStatus(compactionStatus *parquet.CompactionStatus) {
- if compactionStatus == nil {
- return
- }
-
- if s.compactionStatus == nil {
- s.compactionStatus = parquet.NewCompactionStatus()
- }
-
- s.compactionStatus.Update(*compactionStatus)
-}
-
// CollectionHeader returns a string to display at the top of the collection status for app or alone for non-progress display
func (s *status) CollectionHeader() string {
// wrap the source in parentheses if it exists
fromTimeSource := s.fromTime.Source
if s.fromTime.Source != "" {
- fromTimeSource = fmt.Sprintf("(%s)", s.fromTime.Source)
+ fromTimeSource = fmt.Sprintf(" (%s)", s.fromTime.Source)
}
- return fmt.Sprintf("\nCollecting logs for %s from %s %s\n\n", s.partitionName, s.fromTime.Time.Format(time.DateOnly), fromTimeSource)
+ return fmt.Sprintf("\nCollecting logs for %s from %s%s to %s\n\n", s.partitionName, s.fromTime.Time.Format(time.DateOnly), fromTimeSource, s.toTime.Format(time.DateOnly))
}
// String returns a string representation of the status used as body of app display or final output for non-progress display
@@ -218,14 +207,11 @@ func (s *status) displayFilesSection() string {
var out strings.Builder
out.WriteString("Files:\n")
- if s.compactionStatus.Source == 0 && s.compactionStatus.Uncompacted == 0 {
+ if s.compactionStatus.InitialFiles == 0 {
// no counts available, display status text
out.WriteString(fmt.Sprintf(" %s\n", statusText))
} else {
- // display counts source => dest
- l := int64(s.compactionStatus.Source + s.compactionStatus.Uncompacted)
- r := int64(s.compactionStatus.Dest + s.compactionStatus.Uncompacted)
- out.WriteString(fmt.Sprintf(" Compacted: %s => %s\n", humanize.Comma(l), humanize.Comma(r)))
+ out.WriteString(fmt.Sprintf(" %s\n", s.compactionStatus.String()))
}
out.WriteString("\n")
@@ -285,14 +271,22 @@ func (s *status) displayErrorsSection() string {
// displayTimingSection returns a string representation of the timing section of the status (time elapsed since start of collection)
func (s *status) displayTimingSection() string {
duration := time.Since(s.started)
- timeLabel := "Time:"
// if we're complete, change the time label to show this
if s.complete {
- timeLabel = "Completed:"
+ if s.compactionStatus != nil && s.compactionStatus.Duration > 0 {
+ var sb strings.Builder
+ sb.WriteString(fmt.Sprintf("Collection: %s\n", utils.HumanizeDuration(duration)))
+ sb.WriteString(fmt.Sprintf("Compaction: %s\n", utils.HumanizeDuration(s.compactionStatus.Duration)))
+ sb.WriteString(fmt.Sprintf("Total: %s\n", utils.HumanizeDuration(duration+s.compactionStatus.Duration)))
+ return sb.String()
+ }
+ return fmt.Sprintf("Completed: %s\n", utils.HumanizeDuration(duration))
+ } else {
+ // if not complete, show elapsed time
+ return fmt.Sprintf("Time: %s\n", utils.HumanizeDuration(duration))
}
- return fmt.Sprintf("%s %s\n", timeLabel, utils.HumanizeDuration(duration))
}
// writeCountLine returns a formatted string for a count line in the status display, used for alignment and readability
diff --git a/internal/collector/tui.go b/internal/collector/tui.go
index 7ff1f31f..e3e5b785 100644
--- a/internal/collector/tui.go
+++ b/internal/collector/tui.go
@@ -1,12 +1,11 @@
package collector
import (
+ "github.com/turbot/tailpipe/internal/database"
"strings"
"time"
tea "github.com/charmbracelet/bubbletea"
-
- "github.com/turbot/tailpipe/internal/parquet"
)
type collectionModel struct {
@@ -64,7 +63,7 @@ func (c collectionModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
return c, nil
case AwaitingCompactionMsg:
// this doesn't do anything useful except trigger a view update with file compaction placeholder
- cs := parquet.CompactionStatus{}
+ cs := database.CompactionStatus{}
c.status.compactionStatus = &cs
return c, nil
case tickMsg:
diff --git a/internal/config/connection.go b/internal/config/connection.go
index e82416fa..400df1b6 100644
--- a/internal/config/connection.go
+++ b/internal/config/connection.go
@@ -37,12 +37,10 @@ func (c *TailpipeConnection) GetSubType() string {
func (c *TailpipeConnection) ToProto() *proto.ConfigData {
return &proto.ConfigData{
- //Target: c.Name(),
- // TODO fix connection parsing to populate name
+ // Target is of form `connection.`
Target: "connection." + c.Plugin,
-
- Hcl: c.Hcl,
- Range: proto.RangeToProto(c.DeclRange),
+ Hcl: c.Hcl,
+ Range: proto.RangeToProto(c.DeclRange),
}
}
@@ -71,47 +69,3 @@ func NewTailpipeConnection(block *hcl.Block, fullName string) (modconfig.HclReso
c.UnqualifiedName = fmt.Sprintf("%s.%s", c.Plugin, c.ShortName)
return c, nil
}
-
-// TODO implement if needed https://github.com/turbot/tailpipe/issues/34
-//
-//func CtyValueToConnection(value cty.Value) (_ *TailpipeConnection, err error) {
-// defer func() {
-// if r := recover(); r != nil {
-// err = perr.BadRequestWithMessage("unable to decode connection: " + r.(string))
-// }
-// }()
-//
-// // get the name, block type and range and use to construct a connection
-// shortName := value.GetAttr("short_name").AsString()
-// name := value.GetAttr("name").AsString()
-// block := &hcl.Block{
-// Labels: []string{"connection", name},
-// }
-//
-//
-//
-// // now instantiate an empty connection of the correct type
-// conn, err := NewTailpipeConnection(&hcl.Block{}, name)
-// if err != nil {
-// return nil, perr.BadRequestWithMessage("unable to decode connection: " + err.Error())
-// }
-//
-// // split the cty value into fields for ConnectionImpl and the derived connection,
-// // (NOTE: exclude the 'env', 'type', 'resource_type' fields, which are manually added)
-// baseValue, derivedValue, err := getKnownCtyFields(value, conn.GetConnectionImpl(), "env", "type", "resource_type")
-// if err != nil {
-// return nil, perr.BadRequestWithMessage("unable to decode connection: " + err.Error())
-// }
-// // decode the base fields into the ConnectionImpl
-// err = gocty.FromCtyValue(baseValue, conn.GetConnectionImpl())
-// if err != nil {
-// return nil, perr.BadRequestWithMessage("unable to decode ConnectionImpl: " + err.Error())
-// }
-// // decode remaining fields into the derived connection
-// err = gocty.FromCtyValue(derivedValue, &conn)
-// if err != nil {
-// return nil, perr.BadRequestWithMessage("unable to decode connection: " + err.Error())
-// }
-//
-// return nil, nil
-//}
diff --git a/internal/config/partition.go b/internal/config/partition.go
index 4407788b..8086f82f 100644
--- a/internal/config/partition.go
+++ b/internal/config/partition.go
@@ -21,6 +21,13 @@ func init() {
registerResourceWithSubType(schema.BlockTypePartition)
}
+type SyntheticMetadata struct {
+ Columns int
+ Rows int
+ ChunkSize int
+ DeliveryIntervalMs int
+}
+
type Partition struct {
modconfig.HclResourceImpl
// required to allow partial decoding
@@ -45,7 +52,10 @@ type Partition struct {
// an option filter in the format of a SQL where clause
Filter string `cty:"filter"`
// the sql column to use for the tp_index
- TpIndexColumn string `cty:"tp_index_column"`
+ TpIndexColumn string `cty:"tp_index"`
+
+ // if this is a synthetic partition for testing, this will be non-null
+ SyntheticMetadata *SyntheticMetadata
}
func NewPartition(block *hcl.Block, fullName string) (modconfig.HclResource, hcl.Diagnostics) {
@@ -78,7 +88,7 @@ func (p *Partition) SetConfigHcl(u *HclBytes) {
func (p *Partition) InferPluginName(v *versionfile.PluginVersionFile) string {
// NOTE: we cannot call the TailpipeConfig.GetPluginForTable function as tailpipe config is not populated yet
if p.CustomTable != nil {
- return constants.CorePluginName
+ return constants.CorePluginInstallStream()
}
return GetPluginForTable(p.TableName, v.Plugins)
diff --git a/internal/config/source.go b/internal/config/source.go
index 3efcf3c0..b5ab318c 100644
--- a/internal/config/source.go
+++ b/internal/config/source.go
@@ -1,6 +1,7 @@
package config
import (
+ "github.com/turbot/pipe-fittings/v2/hclhelpers"
"github.com/turbot/tailpipe-plugin-sdk/grpc/proto"
)
@@ -13,10 +14,23 @@ type Source struct {
Config *HclBytes `cty:"config"`
}
+func NewSource(sourceType string) *Source {
+ return &Source{
+ Type: sourceType,
+ Config: &HclBytes{
+ Hcl: []byte{},
+ Range: hclhelpers.Range{},
+ },
+ }
+}
func (s *Source) ToProto() *proto.ConfigData {
+ var hcl []byte
+ if s.Config != nil {
+ hcl = s.Config.Hcl
+ }
return &proto.ConfigData{
Target: "source." + s.Type,
- Hcl: s.Config.Hcl,
+ Hcl: hcl,
Range: proto.RangeToProto(s.Config.Range.HclRange()),
}
}
diff --git a/internal/config/table.go b/internal/config/table.go
index b8a4e521..154967ef 100644
--- a/internal/config/table.go
+++ b/internal/config/table.go
@@ -21,7 +21,7 @@ type Table struct {
// required to allow partial decoding
Remain hcl.Body `hcl:",remain" json:"-"`
- // the default format for this table (todo make a map keyed by source name?)
+ // the default format for this table
DefaultSourceFormat *Format `hcl:"format" cty:"format"`
Columns []Column `hcl:"column,block" cty:"columns"`
diff --git a/internal/constants/connect.go b/internal/constants/connect.go
new file mode 100644
index 00000000..c91b9948
--- /dev/null
+++ b/internal/constants/connect.go
@@ -0,0 +1,6 @@
+package constants
+
+import "time"
+
+// InitFileMaxAge is the maximum age of an db init file before it is cleaned up
+const InitFileMaxAge = 24 * time.Hour
diff --git a/internal/constants/database.go b/internal/constants/database.go
deleted file mode 100644
index f7667e5f..00000000
--- a/internal/constants/database.go
+++ /dev/null
@@ -1,8 +0,0 @@
-package constants
-
-import "time"
-
-const (
- TailpipeDbName = "tailpipe.db"
- DbFileMaxAge = 24 * time.Hour
-)
diff --git a/internal/constants/duckdb_extensions.go b/internal/constants/duckdb_extensions.go
deleted file mode 100644
index e7d02979..00000000
--- a/internal/constants/duckdb_extensions.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package constants
-
-var DuckDbExtensions = []string{"json", "inet"}
diff --git a/internal/constants/metaquery_commands.go b/internal/constants/metaquery_commands.go
index 8f8ee7b5..978e51ad 100644
--- a/internal/constants/metaquery_commands.go
+++ b/internal/constants/metaquery_commands.go
@@ -3,9 +3,7 @@ package constants
// Metaquery commands
const (
- //CmdTableList = ".tables" // List all tables
- CmdOutput = ".output" // Set output mode
- //CmdTiming = ".timing" // Toggle query timer
+ CmdOutput = ".output" // Set output mode
CmdHeaders = ".header" // Toggle headers output
CmdSeparator = ".separator" // Set the column separator
CmdExit = ".exit" // Exit the interactive prompt
@@ -15,5 +13,4 @@ const (
CmdClear = ".clear" // clear the console
CmdHelp = ".help" // list all meta commands
CmdAutoComplete = ".autocomplete" // enable or disable auto complete
- TpPrefix = "tp_" // tailpipe prefix for tailpipe specific columns
)
diff --git a/internal/constants/plugin.go b/internal/constants/plugin.go
index 9d318cec..fc44e390 100644
--- a/internal/constants/plugin.go
+++ b/internal/constants/plugin.go
@@ -1,11 +1,61 @@
package constants
+import (
+ "strings"
+)
+
const (
- CorePluginName = "core"
- CorePluginFullName = "hub.tailpipe.io/plugins/turbot/core@latest"
- MinCorePluginVersion = "v0.2.7"
+
+ // MinCorePluginVersion should be set for production releases - it is the minimum version of the core plugin that is required
+ MinCorePluginVersion = "v0.2.10"
+ // CorePluginVersion may be set for pre-release versions - it allows us to pin a pre-release version of the core plugin
+ // NOTE: they must NOT both be set
+ CorePluginVersion = ""
// TailpipeHubOCIBase is the tailpipe hub URL
TailpipeHubOCIBase = "hub.tailpipe.io/"
+
// BaseImageRef is the prefix for all tailpipe plugin images
BaseImageRef = "ghcr.io/turbot/tailpipe"
)
+
+// CorePluginRequiredVersionConstraint returns a version constraint for the required core plugin version
+// normally we set the core version by setting constants.MinCorePluginVersion
+// However if we want ot pin to a specific version (e.g. an rc version) we can set constants.CorePluginVersion instead
+// one of constants.CorePluginVersion and constants.MinCorePluginVersion may be set
+// if both are set this is a bug
+func CorePluginRequiredVersionConstraint() (requiredConstraint string) {
+ if CorePluginVersion == "" && MinCorePluginVersion == "" {
+ panic("one of constants.CorePluginName or constants.MinCorePluginVersion must be set")
+ }
+ if CorePluginVersion != "" && MinCorePluginVersion != "" {
+ panic("both constants.CorePluginVersion and constants.MinCorePluginVersion are set, this is a bug")
+ }
+ if MinCorePluginVersion != "" {
+ requiredConstraint = ">=" + MinCorePluginVersion
+ return requiredConstraint
+ }
+
+ // so CorePluginVersion is set - return as-is
+ return CorePluginVersion
+}
+
+// CorePluginInstallStream returns the plugin stream used to install the core plugin
+// under normal circumstances (i.e. if MinCorePluginVersion is set) this is "core@latest"
+func CorePluginInstallStream() string {
+ var installConstraint string
+ if MinCorePluginVersion != "" {
+ installConstraint = "latest"
+ } else {
+ // so CorePluginVersion is set
+ // tactical - trim 'v' as installation expects no v
+ installConstraint = strings.TrimPrefix(CorePluginVersion, "v")
+
+ }
+
+ return "core@" + installConstraint
+}
+
+func CorePluginFullName() string {
+ installStream := CorePluginInstallStream()
+ return "hub.tailpipe.io/plugins/turbot/" + installStream
+}
diff --git a/internal/database/backup.go b/internal/database/backup.go
new file mode 100644
index 00000000..05d106c4
--- /dev/null
+++ b/internal/database/backup.go
@@ -0,0 +1,138 @@
+package database
+
+import (
+ "fmt"
+ "log/slog"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/turbot/pipe-fittings/v2/utils"
+ "github.com/turbot/tailpipe/internal/config"
+)
+
+// BackupDucklakeMetadata creates a timestamped backup of the DuckLake metadata database.
+// It creates backup files with format: metadata.sqlite.backup.YYYYMMDDHHMMSS
+// and also backs up the WAL file if it exists:
+// - metadata.sqlite-wal.backup.YYYYMMDDHHMMSS
+// It removes any existing backup files to maintain only the most recent backup.
+//
+// The backup is created in the same directory as the original database file.
+// If the database file doesn't exist, no backup is created and no error is returned.
+//
+// Returns an error if the backup operation fails.
+func BackupDucklakeMetadata() error {
+ // Get the path to the DuckLake metadata database
+ dbPath := config.GlobalWorkspaceProfile.GetDucklakeDbPath()
+
+ // Check if the database file exists
+ if _, err := os.Stat(dbPath); os.IsNotExist(err) {
+ slog.Debug("DuckLake metadata database does not exist, skipping backup", "path", dbPath)
+ return nil
+ } else if err != nil {
+ return fmt.Errorf("failed to check if database exists: %w", err)
+ }
+
+ // Generate timestamp for backup filename
+ timestamp := time.Now().Format("20060102150405") // YYYYMMDDHHMMSS format
+
+ // Create backup filenames
+ dbDir := filepath.Dir(dbPath)
+ mainBackupFilename := fmt.Sprintf("metadata.sqlite.backup.%s", timestamp)
+ mainBackupPath := filepath.Join(dbDir, mainBackupFilename)
+
+ // Also prepare paths for WAL file
+ walPath := dbPath + "-wal"
+ walBackupFilename := fmt.Sprintf("metadata.sqlite-wal.backup.%s", timestamp)
+ walBackupPath := filepath.Join(dbDir, walBackupFilename)
+
+ slog.Info("Creating backup of DuckLake metadata database", "source", dbPath, "backup", mainBackupPath)
+
+ // Create the main database backup first
+ if err := utils.CopyFile(dbPath, mainBackupPath); err != nil {
+ return fmt.Errorf("failed to create main database backup: %w", err)
+ }
+
+ // Backup WAL file if it exists
+ if _, err := os.Stat(walPath); err == nil {
+ if err := utils.CopyFile(walPath, walBackupPath); err != nil {
+ slog.Warn("Failed to backup WAL file", "source", walPath, "error", err)
+ // Continue - WAL backup failure is not critical
+ } else {
+ slog.Debug("Successfully backed up WAL file", "backup", walBackupPath)
+ }
+ }
+
+ slog.Info("Successfully created backup of DuckLake metadata database", "backup", mainBackupPath)
+
+ // Clean up old backup files after successfully creating the new one
+ if err := cleanupOldBackups(dbDir, timestamp); err != nil {
+ slog.Warn("Failed to clean up old backup files", "error", err)
+ // Don't return error - the backup was successful, cleanup is just housekeeping
+ }
+ return nil
+}
+
+// isBackupFile checks if a filename matches any of the backup patterns
+func isBackupFile(filename string) bool {
+ backupPrefixes := []string{
+ "metadata.sqlite.backup.",
+ "metadata.sqlite-wal.backup.",
+ }
+
+ for _, prefix := range backupPrefixes {
+ if strings.HasPrefix(filename, prefix) {
+ return true
+ }
+ }
+ return false
+}
+
+// shouldRemoveBackup determines if a backup file should be removed
+func shouldRemoveBackup(filename, excludeTimestamp string) bool {
+ if !isBackupFile(filename) {
+ return false
+ }
+ // Don't remove files with the current timestamp
+ return !strings.HasSuffix(filename, "."+excludeTimestamp)
+}
+
+// cleanupOldBackups removes all existing backup files in the specified directory,
+// except for the newly created backup files with the given timestamp.
+// Backup files are identified by the patterns:
+// - metadata.sqlite.backup.*
+// - metadata.sqlite-wal.backup.*
+func cleanupOldBackups(dir, excludeTimestamp string) error {
+ entries, err := os.ReadDir(dir)
+ if err != nil {
+ return fmt.Errorf("failed to read directory: %w", err)
+ }
+
+ var deletedCount int
+ for _, entry := range entries {
+ if entry.IsDir() {
+ continue
+ }
+
+ filename := entry.Name()
+ if !shouldRemoveBackup(filename, excludeTimestamp) {
+ continue
+ }
+
+ backupPath := filepath.Join(dir, filename)
+ if err := os.Remove(backupPath); err != nil {
+ slog.Warn("Failed to remove old backup file", "file", backupPath, "error", err)
+ // Continue removing other files even if one fails
+ } else {
+ slog.Debug("Removed old backup file", "file", backupPath)
+ deletedCount++
+ }
+ }
+
+ if deletedCount > 0 {
+ slog.Debug("Cleaned up old backup files", "count", deletedCount)
+ }
+
+ return nil
+}
diff --git a/internal/database/cleanup.go b/internal/database/cleanup.go
new file mode 100644
index 00000000..5a189362
--- /dev/null
+++ b/internal/database/cleanup.go
@@ -0,0 +1,136 @@
+package database
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "time"
+
+ "github.com/turbot/pipe-fittings/v2/constants"
+ "github.com/turbot/tailpipe/internal/config"
+)
+
+// DeletePartition deletes data for the specified partition and date range from the given Ducklake connected database.
+func DeletePartition(ctx context.Context, partition *config.Partition, from, to time.Time, db *DuckDb) (rowCount int, err error) {
+ // First check if the table exists using DuckLake metadata
+ tableExistsQuery := fmt.Sprintf(`select exists (select 1 from %s.ducklake_table where table_name = ?)`, constants.DuckLakeMetadataCatalog)
+ var tableExists bool
+ if err := db.QueryRowContext(ctx, tableExistsQuery, partition.TableName).Scan(&tableExists); err != nil {
+ return 0, fmt.Errorf("failed to check if table exists: %w", err)
+ }
+
+ if !tableExists {
+ // Table doesn't exist, return 0 rows affected (not an error)
+ return 0, nil
+ }
+
+ // build a delete query for the partition
+ // Note: table names cannot be parameterized, so we use string formatting for the table name
+ query := fmt.Sprintf(`delete from "%s" where tp_partition = ? and tp_timestamp >= ? and tp_timestamp <= ?`, partition.TableName)
+ // Execute the query with parameters for the partition and date range
+ result, err := db.ExecContext(ctx, query, partition.ShortName, from, to)
+ if err != nil {
+ return 0, fmt.Errorf("failed to delete partition: %w", err)
+ }
+
+ // Get the number of rows affected by the delete operation
+ rowsAffected, err := result.RowsAffected()
+ if err != nil {
+ return 0, fmt.Errorf("failed to get rows affected count: %w", err)
+ }
+ rowCount = int(rowsAffected)
+
+ // Only perform cleanup if we actually deleted some rows
+ if rowCount > 0 {
+ if err = DucklakeCleanup(ctx, db); err != nil {
+ return 0, err
+ }
+ }
+
+ return rowCount, nil
+}
+
+// DucklakeCleanup performs removes old snapshots deletes expired and unused parquet files from the DuckDB database.
+func DucklakeCleanup(ctx context.Context, db *DuckDb) error {
+ slog.Info("Cleaning up DuckLake snapshots and expired files")
+ // now clean old snapshots
+ if err := expirePrevSnapshots(ctx, db); err != nil {
+ return err
+ }
+ // delete expired files
+ if err := cleanupExpiredFiles(ctx, db); err != nil {
+ return err
+ }
+ return nil
+}
+
+// expirePrevSnapshots expires all snapshots but the latest
+// Ducklake stores a snapshot corresponding to each database operation - this allows the tracking of the history of changes
+// However we do not need (currently) take advantage of this ducklake functionality, so we can remove all but the latest snapshot
+// To do this we get the date of the most recent snapshot and then expire all snapshots older than that date.
+// We then call ducklake_cleanup to remove the expired files.
+func expirePrevSnapshots(ctx context.Context, db *DuckDb) error {
+ slog.Info("Expiring old DuckLake snapshots")
+ defer slog.Info("DuckLake snapshot expiration complete")
+
+ // 1) get the timestamp of the latest snapshot from the metadata schema
+ var latestTimestamp string
+ query := fmt.Sprintf(`select snapshot_time from %s.ducklake_snapshot order by snapshot_id desc limit 1`, constants.DuckLakeMetadataCatalog)
+
+ err := db.QueryRowContext(ctx, query).Scan(&latestTimestamp)
+ if err != nil {
+ return fmt.Errorf("failed to get latest snapshot timestamp: %w", err)
+ }
+
+ // Parse the snapshot time
+ // NOTE: rather than cast as timestamp, we read as a string then remove any timezone component
+ // This is because of the dubious behaviour of ducklake_expire_snapshots described below
+ // try various formats
+ formats := []string{
+ "2006-01-02 15:04:05.999-07:00", // +05:30
+ "2006-01-02 15:04:05.999-07", // +01
+ "2006-01-02 15:04:05.999", // no timezone
+ }
+ var parsedTime time.Time
+ for _, format := range formats {
+ parsedTime, err = time.Parse(format, latestTimestamp)
+ if err == nil {
+ break
+ }
+ }
+ if err != nil {
+ return fmt.Errorf("failed to parse snapshot time '%s': %w", latestTimestamp, err)
+ }
+
+ // format the time
+ // Note: ducklake_expire_snapshots expects a local time without timezone,
+ // i.e if the time is '2025-08-26 13:25:10.365 +0100', we should pass '2025-08-26 13:25:10.365'
+ formattedTime := parsedTime.Format("2006-01-02 15:04:05.000")
+ slog.Debug("Latest snapshot timestamp", "timestamp", latestTimestamp)
+
+ // 2) expire all snapshots older than the latest one
+ // Note: ducklake_expire_snapshots uses named parameters which cannot be parameterized with standard SQL placeholders
+ expireQuery := fmt.Sprintf(`call ducklake_expire_snapshots('%s', older_than => '%s')`, constants.DuckLakeCatalog, formattedTime)
+
+ _, err = db.ExecContext(ctx, expireQuery)
+ if err != nil {
+ return fmt.Errorf("failed to expire old snapshots: %w", err)
+ }
+
+ return nil
+}
+
+// cleanupExpiredFiles deletes and files marked as expired in the ducklake system.
+func cleanupExpiredFiles(ctx context.Context, db *DuckDb) error {
+ slog.Info("Cleaning up expired files in DuckLake")
+ defer slog.Info("DuckLake expired files cleanup complete")
+
+ cleanupQuery := fmt.Sprintf("call ducklake_cleanup_old_files('%s', cleanup_all => true)", constants.DuckLakeCatalog)
+
+ _, err := db.ExecContext(ctx, cleanupQuery)
+ if err != nil {
+ return fmt.Errorf("failed to cleanup expired files: %w", err)
+ }
+
+ return nil
+}
diff --git a/internal/database/compact.go b/internal/database/compact.go
new file mode 100644
index 00000000..c81b5a0a
--- /dev/null
+++ b/internal/database/compact.go
@@ -0,0 +1,435 @@
+package database
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+
+ "log/slog"
+ "strings"
+ "time"
+
+ "github.com/turbot/pipe-fittings/v2/backend"
+ "github.com/turbot/pipe-fittings/v2/constants"
+)
+
+const (
+ // maxCompactionRowsPerChunk is the maximum number of rows to compact in a single insert operation
+ maxCompactionRowsPerChunk = 5_000_000
+)
+
+func CompactDataFiles(ctx context.Context, db *DuckDb, updateFunc func(CompactionStatus), reindex bool, patterns ...*PartitionPattern) error {
+ slog.Info("Compacting DuckLake data files")
+
+ t := time.Now()
+
+ // get a list of partition key combinations which match any of the patterns
+ partitionKeys, err := getPartitionKeysMatchingPattern(ctx, db, patterns)
+ if err != nil {
+ return fmt.Errorf("failed to get partition keys requiring compaction: %w", err)
+ }
+
+ if len(partitionKeys) == 0 {
+ slog.Info("No matching partitions found for compaction")
+ return nil
+ }
+
+ status, err := orderDataFiles(ctx, db, updateFunc, partitionKeys, reindex)
+ if err != nil {
+ slog.Error("Failed to compact DuckLake parquet files", "error", err)
+ return err
+ }
+
+ // now expire unused snapshots
+ if err := expirePrevSnapshots(ctx, db); err != nil {
+ slog.Error("Failed to expire previous DuckLake snapshots", "error", err)
+ return err
+ }
+
+ // so we should now have multiple, time ordered parquet files
+ // now merge the the parquet files in the duckdb database
+ // the will minimise the parquet file count to the optimum
+ if err := mergeParquetFiles(ctx, db); err != nil {
+ slog.Error("Failed to merge DuckLake parquet files", "error", err)
+ return err
+ }
+
+ // delete unused files
+ if err := cleanupExpiredFiles(ctx, db); err != nil {
+ slog.Error("Failed to cleanup expired files", "error", err)
+ return err
+ }
+
+ // get the file count after merging and cleanup
+ err = status.getFinalFileCounts(ctx, db, partitionKeys)
+ if err != nil {
+ // just log
+ slog.Error("Failed to get final file counts", "error", err)
+ }
+ // set the compaction time
+ status.Duration = time.Since(t)
+
+ // call final update
+ updateFunc(*status)
+
+ slog.Info("DuckLake compaction complete", "source_file_count", status.InitialFiles, "destination_file_count", status.FinalFiles)
+ return nil
+}
+
+// mergeParquetFiles combines adjacent parquet files in the DuckDB database.
+func mergeParquetFiles(ctx context.Context, db *DuckDb) error {
+ if _, err := db.ExecContext(ctx, "call merge_adjacent_files()"); err != nil {
+ if ctx.Err() != nil {
+ return err
+ }
+ return fmt.Errorf("failed to merge parquet files: %w", err)
+ }
+ return nil
+}
+
+// we order data files as follows:
+// - get list of partition keys matching patterns. For each key:
+// - analyze file fragmentation to identify overlapping time ranges
+// - for each overlapping time range, reorder all data in that range
+// - delete original unordered entries for that time range
+func orderDataFiles(ctx context.Context, db *DuckDb, updateFunc func(CompactionStatus), partitionKeys []*partitionKey, reindex bool) (*CompactionStatus, error) {
+ slog.Info("Ordering DuckLake data files")
+
+ status := NewCompactionStatus()
+ // get total file and row count into status
+ err := status.getInitialCounts(ctx, db, partitionKeys)
+ if err != nil {
+ return nil, err
+ }
+
+ // map of table columns, allowing us to lazy load them
+ tableColumnLookup := make(map[string][]string)
+
+ // build list of partition keys to reorder
+ var reorderList []*reorderMetadata
+
+ status.Message = "identifying files to reorder"
+ updateFunc(*status)
+
+ // Process each partition key to determine if we need to reorder
+ for _, pk := range partitionKeys {
+ // determine which files are not time ordered and build a set of time ranges which need reordering
+ // (NOTS: if we are reindexing, we need to reorder the ALL data for the partition key)
+ reorderMetadata, err := getTimeRangesToReorder(ctx, db, pk, reindex)
+ if err != nil {
+ slog.Error("failed to get unorderedRanges", "partition", pk, "error", err)
+ return nil, err
+ }
+
+ // if no files out of order, nothing to do
+ if reorderMetadata != nil {
+ reorderList = append(reorderList, reorderMetadata)
+ } else {
+ slog.Debug("Partition key is not out of order - skipping reordering",
+ "tp_table", pk.tpTable,
+ "tp_partition", pk.tpPartition,
+ // "tp_index", pk.tpIndex,
+ "year", pk.year,
+ "month", pk.month,
+ )
+ }
+ }
+
+ // now get the total rows to reorder
+ for _, rm := range reorderList {
+ status.InitialFiles += rm.pk.fileCount
+ status.RowsToCompact += rm.rowCount
+ }
+
+ // clear message - it will be sent on next update func
+ status.Message = ""
+
+ // now iterate over reorderlist to do reordering
+ for _, rm := range reorderList {
+ pk := rm.pk
+
+ // get the columns for this table - check map first - if not present, read from metadata and populate the map
+ columns, err := getColumns(ctx, db, pk.tpTable, tableColumnLookup)
+ if err != nil {
+ slog.Error("failed to get columns", "table", pk.tpTable, "error", err)
+ return nil, err
+ }
+
+ tx, err := db.BeginTx(ctx, nil)
+ if err != nil {
+ // This is a system failure - stop everything
+ return nil, fmt.Errorf("failed to begin transaction for partition %v: %w", pk, err)
+ }
+
+ slog.Debug("Compacting partition entries",
+ "tp_table", pk.tpTable,
+ "tp_partition", pk.tpPartition,
+ "tp_index", pk.tpIndex,
+ "year", pk.year,
+ "month", pk.month,
+ "unorderedRanges", len(rm.unorderedRanges),
+ )
+
+ // func to update status with number of rows compacted for this partition key
+ // - passed to orderPartitionKey
+ updateRowsFunc := func(rowsCompacted int64) {
+ status.RowsCompacted += rowsCompacted
+ if status.TotalRows > 0 {
+ status.UpdateProgress()
+ }
+ updateFunc(*status)
+ }
+
+ if err := orderPartitionKey(ctx, tx, pk, rm, updateRowsFunc, reindex, columns); err != nil {
+ slog.Error("failed to compact partition", "partition", pk, "error", err)
+ txErr := tx.Rollback()
+ if txErr != nil {
+ slog.Error("failed to rollback transaction after compaction", "partition", pk, "error", txErr)
+ }
+ return nil, err
+ }
+
+ if err := tx.Commit(); err != nil {
+ slog.Error("failed to commit transaction after compaction", "partition", pk, "error", err)
+ txErr := tx.Rollback()
+ if txErr != nil {
+ slog.Error("failed to rollback transaction after compaction", "partition", pk, "error", txErr)
+ }
+ return nil, err
+ }
+
+ slog.Info("Compacted and ordered all partition entries",
+ "tp_table", pk.tpTable,
+ "tp_partition", pk.tpPartition,
+ "tp_index", pk.tpIndex,
+ "year", pk.year,
+ "month", pk.month,
+ "input_files", pk.fileCount,
+ )
+
+ }
+
+ slog.Info("Finished ordering DuckLake data file")
+ return status, nil
+}
+
+// getColumns retrieves column information for a table, checking the map first and reading from metadata if not present
+func getColumns(ctx context.Context, db *DuckDb, table string, columns map[string][]string) ([]string, error) {
+ // Check if columns are already cached
+ if cachedColumns, exists := columns[table]; exists {
+ return cachedColumns, nil
+ }
+
+ // Read top level columns from DuckLake metadata
+ query := fmt.Sprintf(`
+ select c.column_name
+ from %s.ducklake_column c
+ join %s.ducklake_table t on c.table_id = t.table_id
+ where t.table_name = ?
+ and t.end_snapshot is null
+ and c.end_snapshot is null
+ and c.parent_column is null
+ order by c.column_order`, constants.DuckLakeMetadataCatalog, constants.DuckLakeMetadataCatalog)
+
+ rows, err := db.QueryContext(ctx, query, table)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get columns for table %s: %w", table, err)
+ }
+ defer rows.Close()
+
+ var columnNames []string
+ for rows.Next() {
+ var columnName string
+ if err := rows.Scan(&columnName); err != nil {
+ return nil, fmt.Errorf("failed to scan column: %w", err)
+ }
+ columnNames = append(columnNames, columnName)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error reading columns: %w", err)
+ }
+
+ // Cache the columns for future use
+ columns[table] = columnNames
+
+ // and return
+ return columnNames, nil
+}
+
+// orderPartitionKey processes overlapping time ranges for a partition key:
+// - iterates over each unordered time range
+// - reorders all data within each time range (potentially in chunks for large ranges)
+// - deletes original unordered entries for that time range
+func orderPartitionKey(ctx context.Context, tx *sql.Tx, pk *partitionKey, rm *reorderMetadata, updateRowsCompactedFunc func(int64), reindex bool, columns []string) error {
+
+ slog.Debug("partition statistics",
+ "tp_table", pk.tpTable,
+ "tp_partition", pk.tpPartition,
+ "tp_index", pk.tpIndex,
+ "year", pk.year,
+ "month", pk.month,
+ "row_count", rm.rowCount,
+ "total file_count", pk.fileCount,
+ "min_timestamp", rm.minTimestamp,
+ "max_timestamp", rm.maxTimestamp,
+ "total_ranges", len(rm.unorderedRanges),
+ )
+
+ // Process each overlapping time range
+ for i, timeRange := range rm.unorderedRanges {
+ slog.Debug("processing overlapping time range",
+ "range_index", i+1,
+ "start_time", timeRange.StartTime,
+ "end_time", timeRange.EndTime,
+ "row_count", timeRange.RowCount)
+
+ // Use the pre-calculated time range and row count from the struct
+ minTime := timeRange.StartTime
+ maxTime := timeRange.EndTime
+ rowCount := timeRange.RowCount
+
+ // Determine chunking strategy for this time range
+ chunks, intervalDuration := determineChunkingInterval(minTime, maxTime, rowCount)
+
+ slog.Debug("processing time range in chunks",
+ "range_index", i+1,
+ "row_count", rowCount,
+ "chunks", chunks,
+ "interval_duration", intervalDuration.String())
+
+ // Process this time range in chunks
+ currentStart := minTime
+ for i := 1; currentStart.Before(maxTime); i++ {
+ currentEnd := currentStart.Add(intervalDuration)
+ if currentEnd.After(maxTime) {
+ currentEnd = maxTime
+ }
+
+ // For the final chunk, make it inclusive to catch the last row
+ isFinalChunk := currentEnd.Equal(maxTime)
+
+ rowsInserted, err := insertOrderedDataForTimeRange(ctx, tx, pk, currentStart, currentEnd, isFinalChunk, reindex, columns)
+ if err != nil {
+ return fmt.Errorf("failed to insert ordered data for time range %s to %s: %w",
+ currentStart.Format("2006-01-02 15:04:05"),
+ currentEnd.Format("2006-01-02 15:04:05"), err)
+ }
+ updateRowsCompactedFunc(rowsInserted)
+ slog.Debug(fmt.Sprintf("processed chunk %d/%d for range %d", i, chunks, i+1))
+
+ // Ensure next chunk starts exactly where this one ended to prevent gaps
+ currentStart = currentEnd
+ }
+
+ // Delete original unordered entries for this time range
+ err := deleteUnorderedEntriesForTimeRange(ctx, tx, rm, minTime, maxTime)
+ if err != nil {
+ return fmt.Errorf("failed to delete unordered entries for time range: %w", err)
+ }
+
+ slog.Debug("completed time range",
+ "range_index", i+1)
+ }
+
+ return nil
+}
+
+// insertOrderedDataForTimeRange inserts ordered data for a specific time range within a partition key
+func insertOrderedDataForTimeRange(ctx context.Context, tx *sql.Tx, pk *partitionKey, startTime, endTime time.Time, isFinalChunk, reindex bool, columns []string) (int64, error) {
+ // sanitize table name
+ tableName, err := backend.SanitizeDuckDBIdentifier(pk.tpTable)
+ if err != nil {
+ return 0, err
+ }
+
+ // Build column list for insert
+ insertColumns := strings.Join(columns, ", ")
+
+ // Build select fields
+ selectFields := insertColumns
+ // For reindexing, replace tp_index with the partition config column
+ if reindex && pk.partitionConfig != nil {
+ selectFields = strings.ReplaceAll(selectFields, "tp_index", fmt.Sprintf("%s as tp_index", pk.partitionConfig.TpIndexColumn))
+ }
+ // For the final chunk, use inclusive end time to catch the last row
+ timeEndOperator := "<"
+ if isFinalChunk {
+ timeEndOperator = "<="
+ }
+
+ //nolint: gosec // sanitized
+ insertQuery := fmt.Sprintf(`insert into %s (%s)
+ select %s
+ from %s
+ where tp_timestamp >= ?
+ and tp_timestamp %s ?
+ and tp_partition = ?
+ and tp_index = ?
+ order by tp_timestamp`,
+ tableName,
+ insertColumns,
+ selectFields,
+ tableName,
+ timeEndOperator)
+ // For overlapping files, we need to reorder ALL rows in the overlapping time range
+ args := []interface{}{startTime, endTime, pk.tpPartition, pk.tpIndex}
+
+ result, err := tx.ExecContext(ctx, insertQuery, args...)
+ if err != nil {
+ return 0, fmt.Errorf("failed to insert ordered data for time range: %w", err)
+ }
+ rowsInserted, err := result.RowsAffected()
+ if err != nil {
+ return 0, fmt.Errorf("failed to get rows affected count: %w", err)
+ }
+ return rowsInserted, nil
+}
+
+// deleteUnorderedEntriesForTimeRange deletes the original unordered entries for a specific time range within a partition key
+func deleteUnorderedEntriesForTimeRange(ctx context.Context, tx *sql.Tx, rm *reorderMetadata, startTime, endTime time.Time) error {
+ // Delete all rows in the time range for this partition key (we're re-inserting them in order)
+ tableName, err := backend.SanitizeDuckDBIdentifier(rm.pk.tpTable)
+ if err != nil {
+ return err
+ }
+ //nolint: gosec // sanitized
+ deleteQuery := fmt.Sprintf(`delete from %s
+ where tp_partition = ?
+ and tp_index = ?
+ and tp_timestamp >= ?
+ and tp_timestamp <= ?
+ and rowid <= ?`,
+ tableName)
+
+ args := []interface{}{rm.pk.tpPartition, rm.pk.tpIndex, startTime, endTime, rm.maxRowId}
+
+ _, err = tx.ExecContext(ctx, deleteQuery, args...)
+ if err != nil {
+ return fmt.Errorf("failed to delete unordered entries for time range: %w", err)
+ }
+
+ return nil
+}
+
+// determineChunkingInterval calculates the optimal chunking strategy for a time range based on row count.
+// It returns the number of chunks and the duration of each chunk interval.
+// For large datasets, it splits the time range into multiple chunks to stay within maxCompactionRowsPerChunk.
+// Ensures minimum chunk interval is at least 1 hour to avoid excessive fragmentation.
+func determineChunkingInterval(startTime, endTime time.Time, rowCount int64) (chunks int, intervalDuration time.Duration) {
+ intervalDuration = endTime.Sub(startTime)
+ chunks = 1
+
+ // If row count is greater than maxCompactionRowsPerChunk, calculate appropriate chunk interval
+ if rowCount > maxCompactionRowsPerChunk {
+ chunks = int((rowCount + maxCompactionRowsPerChunk - 1) / maxCompactionRowsPerChunk)
+ intervalDuration = intervalDuration / time.Duration(chunks)
+
+ // Ensure minimum interval is at least 1 hour
+ if intervalDuration < time.Hour {
+ intervalDuration = time.Hour
+ }
+ }
+
+ return chunks, intervalDuration
+}
diff --git a/internal/database/compaction_status.go b/internal/database/compaction_status.go
new file mode 100644
index 00000000..5b345f31
--- /dev/null
+++ b/internal/database/compaction_status.go
@@ -0,0 +1,195 @@
+package database
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/dustin/go-humanize"
+ "github.com/turbot/go-kit/types"
+ "github.com/turbot/pipe-fittings/v2/backend"
+ "github.com/turbot/pipe-fittings/v2/constants"
+ "github.com/turbot/pipe-fittings/v2/utils"
+)
+
+type CompactionStatus struct {
+ Message string
+ InitialFiles int
+ FinalFiles int
+ RowsCompacted int64
+ RowsToCompact int64
+ TotalRows int64
+ ProgressPercent float64
+
+ MigrateSource int // number of source files migrated
+ MigrateDest int // number of destination files after migration
+ PartitionIndexExpressions map[string]string // the index expression used for migration for each partition
+ Duration time.Duration // duration of the compaction process
+}
+
+func NewCompactionStatus() *CompactionStatus {
+ return &CompactionStatus{
+ PartitionIndexExpressions: make(map[string]string),
+ }
+}
+
+func (s *CompactionStatus) VerboseString() string {
+ var migratedString string
+ // Show migration status for each partition if any
+ if s.MigrateSource > 0 {
+ migratedString = fmt.Sprintf(`Migrated tp_index for %d %s`,
+ len(s.PartitionIndexExpressions),
+ utils.Pluralize("partition", len(s.PartitionIndexExpressions)),
+ )
+ if s.MigrateSource != s.MigrateDest {
+ migratedString += fmt.Sprintf(" (%d %s migrated to %d %s)",
+ s.MigrateSource,
+ utils.Pluralize("file", s.MigrateSource),
+ s.MigrateDest,
+ utils.Pluralize("file", s.MigrateDest))
+ }
+ migratedString += ".\n"
+ }
+
+ var compactedString string
+ if s.RowsCompacted == 0 {
+ compactedString = "\nNo files required compaction."
+ } else {
+ // if the file count is the same, we must have just ordered
+ if s.InitialFiles == s.FinalFiles {
+ compactedString = fmt.Sprintf("Ordered %s rows in %s files (%s).\n", s.TotalRowsString(), s.InitialFilesString(), s.DurationString())
+ } else {
+ compactedString = fmt.Sprintf("Compacted and ordered %s rows in %s files into %s files in (%s).\n", s.TotalRowsString(), s.InitialFilesString(), s.FinalFilesString(), s.DurationString())
+ }
+ }
+
+ return migratedString + compactedString
+}
+
+func (s *CompactionStatus) String() string {
+ var migratedString string
+ var compactedString string
+ if s.RowsCompacted == 0 {
+ compactedString = "No files required compaction."
+ } else {
+ // if the file count is the same, we must have just ordered
+ if s.InitialFiles == s.FinalFiles {
+ compactedString = fmt.Sprintf("Ordered %s rows in %s files in %s.\n", s.TotalRowsString(), s.InitialFilesString(), s.Duration.String())
+ } else {
+ compactedString = fmt.Sprintf("Compacted and ordered %s rows in %s files into %s files in %s.\n", s.TotalRowsString(), s.InitialFilesString(), s.FinalFilesString(), s.Duration.String())
+ }
+ }
+
+ return migratedString + compactedString
+}
+
+func (s *CompactionStatus) TotalRowsString() any {
+ return humanize.Comma(s.TotalRows)
+}
+func (s *CompactionStatus) InitialFilesString() any {
+ return humanize.Comma(int64(s.InitialFiles))
+}
+func (s *CompactionStatus) FinalFilesString() any {
+ return humanize.Comma(int64(s.FinalFiles))
+}
+func (s *CompactionStatus) DurationString() string {
+ return utils.HumanizeDuration(s.Duration)
+}
+func (s *CompactionStatus) RowsCompactedString() any {
+ return humanize.Comma(s.RowsCompacted)
+}
+func (s *CompactionStatus) ProgressPercentString() string {
+ return fmt.Sprintf("%.1f%%", s.ProgressPercent)
+}
+
+func (s *CompactionStatus) UpdateProgress() {
+ // calc percentage from RowsToCompact but print TotalRows in status message
+ s.ProgressPercent = (float64(s.RowsCompacted) / float64(s.RowsToCompact)) * 100
+ s.Message = fmt.Sprintf(" (%0.1f%% of %s rows)", s.ProgressPercent, types.ToHumanisedString(s.TotalRows))
+
+}
+
+func (s *CompactionStatus) getInitialCounts(ctx context.Context, db *DuckDb, partitionKeys []*partitionKey) error {
+ partitionNameMap := make(map[string]map[string]struct{})
+ for _, pk := range partitionKeys {
+ s.InitialFiles += pk.fileCount
+ if partitionNameMap[pk.tpTable] == nil {
+ partitionNameMap[pk.tpTable] = make(map[string]struct{})
+ }
+ partitionNameMap[pk.tpTable][pk.tpPartition] = struct{}{}
+ }
+
+ // get row count for each table
+ totalRows := int64(0)
+ for tpTable, tpPartitions := range partitionNameMap {
+
+ // Sanitize partition values for SQL injection protection
+ sanitizedPartitions := make([]string, 0, len(tpPartitions))
+ for partition := range tpPartitions {
+ sp, err := backend.SanitizeDuckDBIdentifier(partition)
+ if err != nil {
+ return fmt.Errorf("failed to sanitize partition %s: %w", partition, err)
+ }
+ // Quote the sanitized partition name for the IN clause
+ sanitizedPartitions = append(sanitizedPartitions, fmt.Sprintf("'%s'", sp))
+ }
+
+ tableName, err := backend.SanitizeDuckDBIdentifier(tpTable)
+ if err != nil {
+ return fmt.Errorf("failed to sanitize table name %s: %w", tpTable, err)
+ }
+
+ query := fmt.Sprintf("select count(*) from %s where tp_partition in (%s)",
+ tableName,
+ strings.Join(sanitizedPartitions, ", "))
+
+ var tableRowCount int64
+ err = db.QueryRowContext(ctx, query).Scan(&tableRowCount)
+ if err != nil {
+ return fmt.Errorf("failed to get row count for table %s: %w", tpTable, err)
+ }
+
+ totalRows += tableRowCount
+ }
+
+ s.TotalRows = totalRows
+ return nil
+}
+
+func (s *CompactionStatus) getFinalFileCounts(ctx context.Context, db *DuckDb, partitionKeys []*partitionKey) error {
+ // Get unique table names from partition keys
+ tableNames := make(map[string]struct{})
+ for _, pk := range partitionKeys {
+ tableNames[pk.tpTable] = struct{}{}
+ }
+
+ // Count files for each table from metadata
+ totalFileCount := 0
+ for tableName := range tableNames {
+ // Sanitize table name
+ sanitizedTableName, err := backend.SanitizeDuckDBIdentifier(tableName)
+ if err != nil {
+ return fmt.Errorf("failed to sanitize table name %s: %w", tableName, err)
+ }
+
+ // Query to count files for this table from DuckLake metadata
+ query := fmt.Sprintf(`select count(*) from %s.ducklake_data_file df
+ join %s.ducklake_table t on df.table_id = t.table_id
+ where t.table_name = '%s' and df.end_snapshot is null`,
+ constants.DuckLakeMetadataCatalog,
+ constants.DuckLakeMetadataCatalog,
+ sanitizedTableName)
+
+ var tableFileCount int
+ err = db.QueryRowContext(ctx, query).Scan(&tableFileCount)
+ if err != nil {
+ return fmt.Errorf("failed to get file count for table %s: %w", tableName, err)
+ }
+
+ totalFileCount += tableFileCount
+ }
+
+ s.FinalFiles = totalFileCount
+ return nil
+}
diff --git a/internal/database/compaction_types.go b/internal/database/compaction_types.go
new file mode 100644
index 00000000..2fc88d87
--- /dev/null
+++ b/internal/database/compaction_types.go
@@ -0,0 +1,178 @@
+package database
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+)
+
+// getTimeRangesToReorder analyzes file fragmentation and creates disorder metrics for a partition key.
+// It queries DuckLake metadata to get all files for the partition, their timestamp ranges, and row counts.
+// Then it identifies groups of files with overlapping time ranges that need compaction.
+// Returns metrics including total file count and overlapping file sets with their metadata.
+func getTimeRangesToReorder(ctx context.Context, db *DuckDb, pk *partitionKey, reindex bool) (*reorderMetadata, error) {
+ // NOTE: if we are reindexing, we must rewrite the entire partition key
+ // - return a single range for the entire partition key
+ if reindex {
+ rm, err := newReorderMetadata(ctx, db, pk)
+ if err != nil {
+ return nil, fmt.Errorf("failed to retrieve stats for partition key: %w", err)
+ }
+
+ // make a single time range
+ rm.unorderedRanges = []unorderedDataTimeRange{
+ {
+ StartTime: rm.minTimestamp,
+ EndTime: rm.maxTimestamp,
+ RowCount: rm.rowCount,
+ },
+ }
+
+ return rm, nil
+ }
+
+ // first query the metadata to get a list of files, their timestamp ranges and row counts for this partition key
+ fileRanges, err := getFileRangesForPartitionKey(ctx, db, pk)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get file ranges for partition key: %w", err)
+ }
+
+ // Now identify which of these ranges overlap and for each overlapping set, build a superset time range
+ unorderedRanges, err := pk.findOverlappingFileRanges(fileRanges)
+ if err != nil {
+ return nil, fmt.Errorf("failed to build unordered time ranges: %w", err)
+ }
+
+ // if there are no unordered ranges, return nil
+ if len(unorderedRanges) == 0 {
+ return nil, nil
+ }
+
+ // get stats for the partition key
+ rm, err := newReorderMetadata(ctx, db, pk)
+ if err != nil {
+ return nil, fmt.Errorf("failed to retrieve stats for partition key: %w", err)
+ }
+ rm.unorderedRanges = unorderedRanges
+ return rm, nil
+
+}
+
+// query the metadata to get a list of files, their timestamp ranges and row counts for this partition key
+func getFileRangesForPartitionKey(ctx context.Context, db *DuckDb, pk *partitionKey) ([]fileTimeRange, error) {
+ query := `select
+ df.path,
+ cast(fcs.min_value as timestamp) as min_timestamp,
+ cast(fcs.max_value as timestamp) as max_timestamp,
+ df.record_count
+ from __ducklake_metadata_tailpipe_ducklake.ducklake_data_file df
+ join __ducklake_metadata_tailpipe_ducklake.ducklake_file_partition_value fpv1
+ on df.data_file_id = fpv1.data_file_id and fpv1.partition_key_index = 0
+ join __ducklake_metadata_tailpipe_ducklake.ducklake_file_partition_value fpv2
+ on df.data_file_id = fpv2.data_file_id and fpv2.partition_key_index = 1
+ join __ducklake_metadata_tailpipe_ducklake.ducklake_file_partition_value fpv3
+ on df.data_file_id = fpv3.data_file_id and fpv3.partition_key_index = 2
+ join __ducklake_metadata_tailpipe_ducklake.ducklake_file_partition_value fpv4
+ on df.data_file_id = fpv4.data_file_id and fpv4.partition_key_index = 3
+ join __ducklake_metadata_tailpipe_ducklake.ducklake_table t
+ on df.table_id = t.table_id
+ join __ducklake_metadata_tailpipe_ducklake.ducklake_file_column_stats fcs
+ on df.data_file_id = fcs.data_file_id
+ and df.table_id = fcs.table_id
+ join __ducklake_metadata_tailpipe_ducklake.ducklake_column c
+ on fcs.column_id = c.column_id
+ and fcs.table_id = c.table_id
+ where t.table_name = ?
+ and fpv1.partition_value = ?
+ and fpv2.partition_value = ?
+ and fpv3.partition_value = ?
+ and fpv4.partition_value = ?
+ and c.column_name = 'tp_timestamp'
+ and df.end_snapshot is null
+ and c.end_snapshot is null
+ order by df.data_file_id`
+
+ rows, err := db.QueryContext(ctx, query, pk.tpTable, pk.tpPartition, pk.tpIndex, pk.year, pk.month)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get file timestamp ranges: %w", err)
+ }
+ defer rows.Close()
+
+ var fileRanges []fileTimeRange
+ for rows.Next() {
+ var path string
+ var minTime, maxTime time.Time
+ var rowCount int64
+ if err := rows.Scan(&path, &minTime, &maxTime, &rowCount); err != nil {
+ return nil, fmt.Errorf("failed to scan file range: %w", err)
+ }
+ fileRanges = append(fileRanges, fileTimeRange{path: path, min: minTime, max: maxTime, rowCount: rowCount})
+ }
+
+ totalFiles := len(fileRanges)
+ if totalFiles <= 1 {
+ return nil, nil
+ }
+
+ // build string for the ranges
+ var rangesStr strings.Builder
+ for i, file := range fileRanges {
+ rangesStr.WriteString(fmt.Sprintf("start: %s, end: %s", file.min.String(), file.max.String()))
+ if i < len(fileRanges)-1 {
+ rangesStr.WriteString(", ")
+ }
+ }
+ return fileRanges, nil
+}
+
+type fileTimeRange struct {
+ path string
+ min time.Time
+ max time.Time
+ rowCount int64
+}
+
+// unorderedDataTimeRange represents a time range containing unordered data that needs reordering
+type unorderedDataTimeRange struct {
+ StartTime time.Time // start of the time range containing unordered data
+ EndTime time.Time // end of the time range containing unordered data
+ RowCount int64 // total row count in this time range
+}
+
+// newUnorderedDataTimeRange creates a single unorderedDataTimeRange from overlapping files
+func newUnorderedDataTimeRange(overlappingFiles []fileTimeRange) (unorderedDataTimeRange, error) {
+ var rowCount int64
+ var startTime, endTime time.Time
+
+ // Single loop to sum row counts and calculate time range
+ for i, file := range overlappingFiles {
+ rowCount += file.rowCount
+
+ // Calculate time range
+ if i == 0 {
+ startTime = file.min
+ endTime = file.max
+ } else {
+ if file.min.Before(startTime) {
+ startTime = file.min
+ }
+ if file.max.After(endTime) {
+ endTime = file.max
+ }
+ }
+ }
+
+ return unorderedDataTimeRange{
+ StartTime: startTime,
+ EndTime: endTime,
+ RowCount: rowCount,
+ }, nil
+}
+
+// rangesOverlap checks if two timestamp ranges overlap (excluding contiguous ranges)
+func rangesOverlap(r1, r2 fileTimeRange) bool {
+ // Two ranges overlap if one starts before the other ends AND they're not just touching
+ // Contiguous ranges (where one ends exactly when the other starts) are NOT considered overlapping
+ return r1.min.Before(r2.max) && r2.min.Before(r1.max)
+}
diff --git a/internal/parquet/conversion_error.go b/internal/database/conversion_error.go
similarity index 61%
rename from internal/parquet/conversion_error.go
rename to internal/database/conversion_error.go
index 16725f85..d4c8608a 100644
--- a/internal/parquet/conversion_error.go
+++ b/internal/database/conversion_error.go
@@ -1,4 +1,4 @@
-package parquet
+package database
import (
"bytes"
@@ -10,18 +10,18 @@ import (
"strings"
)
-// handleConversionError attempts to handle conversion errors by counting the number of lines in the file.
+// handleConversionError attempts to handle conversion errors by counting the number of lines in the files.
// if we fail, just return the raw error.
-func handleConversionError(err error, path string) error {
+func handleConversionError(message string, err error, paths ...string) error {
logArgs := []any{
"error",
err,
"path",
- path,
+ paths,
}
// try to count the number of rows in the file
- rows, countErr := countLines(path)
+ rows, countErr := countLinesForFiles(paths...)
if countErr == nil {
logArgs = append(logArgs, "rows_affected", rows)
}
@@ -33,9 +33,19 @@ func handleConversionError(err error, path string) error {
}
// return wrapped error
- return NewConversionError(err, rows, path)
+ return NewConversionError(fmt.Errorf("%s: %w", message, err), rows, paths...)
+}
+func countLinesForFiles(filenames ...string) (int64, error) {
+ total := 0
+ for _, filename := range filenames {
+ count, err := countLines(filename)
+ if err != nil {
+ return 0, fmt.Errorf("failed to count lines in %s: %w", filename, err)
+ }
+ total += int(count)
+ }
+ return int64(total), nil
}
-
func countLines(filename string) (int64, error) {
file, err := os.Open(filename)
if err != nil {
@@ -61,15 +71,19 @@ func countLines(filename string) (int64, error) {
}
type ConversionError struct {
- SourceFile string
+ SourceFiles []string
BaseError error
RowsAffected int64
displayError string
}
-func NewConversionError(err error, rowsAffected int64, path string) *ConversionError {
+func NewConversionError(err error, rowsAffected int64, paths ...string) *ConversionError {
+ sourceFiles := make([]string, len(paths))
+ for i, path := range paths {
+ sourceFiles[i] = filepath.Base(path)
+ }
return &ConversionError{
- SourceFile: filepath.Base(path),
+ SourceFiles: sourceFiles,
BaseError: err,
RowsAffected: rowsAffected,
displayError: strings.Split(err.Error(), "\n")[0],
@@ -77,7 +91,7 @@ func NewConversionError(err error, rowsAffected int64, path string) *ConversionE
}
func (c *ConversionError) Error() string {
- return fmt.Sprintf("%s: %s", c.SourceFile, c.displayError)
+ return fmt.Sprintf("%s: %s", strings.Join(c.SourceFiles, ", "), c.displayError)
}
// Merge adds a second error to the conversion error message.
diff --git a/internal/parquet/conversion_error_test.go b/internal/database/conversion_error_test.go
similarity index 97%
rename from internal/parquet/conversion_error_test.go
rename to internal/database/conversion_error_test.go
index be3766f2..04e022b2 100644
--- a/internal/parquet/conversion_error_test.go
+++ b/internal/database/conversion_error_test.go
@@ -1,4 +1,4 @@
-package parquet
+package database
import (
"errors"
diff --git a/internal/database/convertor.go b/internal/database/convertor.go
new file mode 100644
index 00000000..36e873f5
--- /dev/null
+++ b/internal/database/convertor.go
@@ -0,0 +1,296 @@
+package database
+
+import (
+ "context"
+ "database/sql"
+ "errors"
+ "fmt"
+ "log/slog"
+ "sync"
+ "sync/atomic"
+
+ "github.com/turbot/pipe-fittings/v2/backend"
+ "github.com/turbot/tailpipe-plugin-sdk/schema"
+ "github.com/turbot/tailpipe/internal/config"
+)
+
+const chunkBufferLength = 1000
+
+// Converter struct executes all the conversions for a single collection
+// it therefore has a unique execution executionId, and will potentially convert of multiple JSONL files
+// each file is assumed to have the filename format _.jsonl
+// so when new input files are available, we simply store the chunk number
+type Converter struct {
+ // the execution executionId
+ executionId string
+
+ // the file scheduledChunks numbers available to process
+ scheduledChunks []int32
+
+ scheduleLock sync.Mutex
+ processLock sync.Mutex
+
+ // waitGroup to track job completion
+ // this is incremented when a file is scheduled and decremented when the file is processed
+ wg sync.WaitGroup
+
+ // the number of jsonl files processed so far
+ //fileCount int32
+
+ // the number of conversions executed
+ //conversionCount int32
+
+ // the number of rows written
+ rowCount int64
+ // the number of rows which were NOT converted due to conversion errors encountered
+ failedRowCount int64
+
+ // the source file location
+ sourceDir string
+ // the dest file location
+ destDir string
+
+ // the format string for the query to read the JSON scheduledChunks - this is reused for all scheduledChunks,
+ // with just the filename being added when the query is executed
+ readJsonQueryFormat string
+
+ // the table conversionSchema - populated when the first chunk arrives if the conversionSchema is not already complete
+ conversionSchema *schema.ConversionSchema
+ // the source schema - which may be partial - used to build the full conversionSchema
+ // we store separately for the purpose of change detection
+ tableSchema *schema.TableSchema
+
+ // viewQueryOnce ensures the schema inference only happens once for the first chunk,
+ // even if multiple scheduledChunks arrive concurrently. Combined with schemaWg, this ensures
+ // all subsequent scheduledChunks wait for the initial schema inference to complete before proceeding.
+ viewQueryOnce sync.Once
+ // schemaWg is used to block processing of subsequent scheduledChunks until the initial
+ // schema inference is complete. This ensures all scheduledChunks wait for the schema
+ // to be fully initialized before proceeding with their processing.
+ schemaWg sync.WaitGroup
+
+ // the partition being collected
+ Partition *config.Partition
+ // func which we call with updated row count
+ statusFunc func(int64, int64, ...error)
+
+ // the DuckDB database connection - this must have a ducklake attachment
+ db *DuckDb
+}
+
+func NewParquetConverter(ctx context.Context, cancel context.CancelFunc, executionId string, partition *config.Partition, sourceDir string, tableSchema *schema.TableSchema, statusFunc func(int64, int64, ...error), db *DuckDb) (*Converter, error) {
+ // get the data dir - this will already have been created by the config loader
+ destDir := config.GlobalWorkspaceProfile.GetDataDir()
+
+ // normalise the table schema to use lowercase column names
+ tableSchema.NormaliseColumnTypes()
+
+ w := &Converter{
+ executionId: executionId,
+ scheduledChunks: make([]int32, 0, chunkBufferLength), // Pre-allocate reasonable capacity
+ Partition: partition,
+ sourceDir: sourceDir,
+ destDir: destDir,
+ tableSchema: tableSchema,
+ statusFunc: statusFunc,
+ db: db,
+ }
+
+ // done
+ return w, nil
+}
+
+// AddChunk adds a new chunk to the list of scheduledChunks to be processed
+// if this is the first chunk, determine if we have a full conversionSchema yet and if not infer from the chunk
+// signal the scheduler that `scheduledChunks are available
+func (w *Converter) AddChunk(executionId string, chunk int32) error {
+ var err error
+
+ // wait on the schemaWg to ensure that schema inference is complete before processing the chunk
+ w.schemaWg.Wait()
+
+ // Execute schema inference exactly once for the first chunk.
+ // The WaitGroup ensures all subsequent scheduledChunks wait for this to complete.
+ // If schema inference fails, the error is captured and returned to the caller.
+ w.viewQueryOnce.Do(func() {
+ err = w.onFirstChunk(executionId, chunk)
+ })
+ if err != nil {
+ return fmt.Errorf("failed to infer schema: %w", err)
+ }
+
+ // lock the schedule lock to ensure that we can safely add to the scheduled scheduledChunks
+ w.scheduleLock.Lock()
+ // add to scheduled scheduledChunks
+ w.scheduledChunks = append(w.scheduledChunks, chunk)
+ w.scheduleLock.Unlock()
+
+ // increment the wait group to track the scheduled chunk
+ w.wg.Add(1)
+
+ // ok try to lock the process lock - that will fail if another process is running
+ if w.processLock.TryLock() {
+ // and process = we now have the process lock
+ // NOTE: process chunks will keep processing as long as there are scheduledChunks to process, including
+ // scheduledChunks that were scheduled while we were processing
+ go w.processAllChunks()
+ }
+
+ return nil
+}
+
+// getChunksToProcess returns the chunks to process, up to a maximum of maxChunksToProcess
+// it also trims the scheduledChunks to remove the processed chunks
+func (w *Converter) getChunksToProcess() []int32 {
+ // now determine if there are more chunks to process
+ w.scheduleLock.Lock()
+ defer w.scheduleLock.Unlock()
+
+ // provide a mechanism to limit the max chunks we process at once
+ // a high value for this seems fine (it's possible we do not actually need a limit at all)
+ const maxChunksToProcess = 2000
+ var chunksToProcess []int32
+ if len(w.scheduledChunks) > maxChunksToProcess {
+ slog.Debug("Converter.AddChunk limiting chunks to process to max", "scheduledChunks", len(w.scheduledChunks), "maxChunksToProcess", maxChunksToProcess)
+ chunksToProcess = w.scheduledChunks[:maxChunksToProcess]
+ // trim the scheduled chunks to remove the processed chunks
+ w.scheduledChunks = w.scheduledChunks[maxChunksToProcess:]
+ } else {
+ slog.Debug("Converter.AddChunk processing all scheduled chunks", "scheduledChunks", len(w.scheduledChunks))
+ chunksToProcess = w.scheduledChunks
+ // clear the scheduled chunks
+ w.scheduledChunks = nil
+ }
+ return chunksToProcess
+}
+
+// onFirstChunk is called when the first chunk is added to the converter
+// it is responsible for building the conversion schema if it does not already exist
+// (we must wait for the first chunk as we may need to infer the schema from the chunk data)
+// once the conversion schema is built, we can create the DuckDB table for this partition and build the
+// read query format string that we will use to read the JSON data from the file
+func (w *Converter) onFirstChunk(executionId string, chunk int32) error {
+ w.schemaWg.Add(1)
+ defer w.schemaWg.Done()
+ if err := w.buildConversionSchema(executionId, chunk); err != nil {
+ // err will be returned by the parent function
+ return err
+ }
+ // create the DuckDB table for this partition if it does not already exist
+ if err := EnsureDuckLakeTable(w.conversionSchema.Columns, w.db, w.Partition.TableName); err != nil {
+ return fmt.Errorf("failed to create DuckDB table: %w", err)
+ }
+ w.readJsonQueryFormat = buildReadJsonQueryFormat(w.conversionSchema, w.Partition)
+
+ return nil
+}
+
+// WaitForConversions waits for all jobs to be processed or for the context to be cancelled
+func (w *Converter) WaitForConversions(ctx context.Context) error {
+ slog.Info("Converter.WaitForConversions - waiting for all jobs to be processed or context to be cancelled.")
+ // wait for the wait group within a goroutine so we can also check the context
+ done := make(chan struct{})
+ go func() {
+ w.wg.Wait()
+ close(done)
+ }()
+
+ select {
+ case <-ctx.Done():
+ slog.Info("WaitForConversions - context cancelled.")
+ return ctx.Err()
+ case <-done:
+ slog.Info("WaitForConversions - all jobs processed.")
+ return nil
+ }
+}
+
+// addJobErrors calls the status func with any job errors, first summing the failed rows in any conversion errors
+func (w *Converter) addJobErrors(errorList ...error) {
+ var failedRowCount int64
+
+ for _, err := range errorList {
+ var conversionError = &ConversionError{}
+ if errors.As(err, &conversionError) {
+ failedRowCount = atomic.AddInt64(&w.failedRowCount, conversionError.RowsAffected)
+ }
+ slog.Error("conversion error", "error", err)
+ }
+
+ // update the status function with the new error count (no need to use atomic for errorList as we are already locked)
+ w.statusFunc(atomic.LoadInt64(&w.rowCount), failedRowCount, errorList...)
+}
+
+// updateRowCount atomically increments the row count and calls the statusFunc
+func (w *Converter) updateRowCount(count int64) {
+ atomic.AddInt64(&w.rowCount, count)
+ // call the status function with the new row count
+ w.statusFunc(atomic.LoadInt64(&w.rowCount), atomic.LoadInt64(&w.failedRowCount))
+}
+
+// CheckTableSchema checks if the specified table exists in the DuckDB database and compares its schema with the
+// provided schema.
+// it returns a TableSchemaStatus indicating whether the table exists, whether the schema matches, and any differences.
+// THis is not used at present but will be used when we implement ducklake schema evolution handling
+func (w *Converter) CheckTableSchema(db *sql.DB, tableName string, conversionSchema schema.ConversionSchema) (TableSchemaStatus, error) {
+ // Check if table exists
+ exists, err := w.tableExists(db, tableName)
+ if err != nil {
+ return TableSchemaStatus{}, err
+ }
+
+ if !exists {
+ return TableSchemaStatus{}, nil
+ }
+
+ // Get existing schema
+ existingSchema, err := w.getTableSchema(db, tableName)
+ if err != nil {
+ return TableSchemaStatus{}, fmt.Errorf("failed to retrieve schema: %w", err)
+ }
+
+ // Use constructor to create status from comparison
+ diff := NewTableSchemaStatusFromComparison(existingSchema, conversionSchema)
+ return diff, nil
+}
+
+func (w *Converter) tableExists(db *sql.DB, tableName string) (bool, error) {
+ sanitizedTableName, err := backend.SanitizeDuckDBIdentifier(tableName)
+ if err != nil {
+ return false, fmt.Errorf("invalid table name %s: %w", tableName, err)
+ }
+ //nolint:gosec // table name is sanitized
+ query := fmt.Sprintf("select exists (select 1 from information_schema.tables where table_name = '%s')", sanitizedTableName)
+ var exists int
+ if err := db.QueryRow(query).Scan(&exists); err != nil {
+ return false, err
+ }
+ return exists == 1, nil
+}
+
+func (w *Converter) getTableSchema(db *sql.DB, tableName string) (map[string]schema.ColumnSchema, error) {
+ query := fmt.Sprintf("pragma table_info(%s);", tableName)
+ rows, err := db.Query(query)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ schemaMap := make(map[string]schema.ColumnSchema)
+ for rows.Next() {
+ var name, dataType string
+ var notNull, pk int
+ var dfltValue sql.NullString
+
+ if err := rows.Scan(&name, &dataType, ¬Null, &dfltValue, &pk); err != nil {
+ return nil, err
+ }
+
+ schemaMap[name] = schema.ColumnSchema{
+ ColumnName: name,
+ Type: dataType,
+ }
+ }
+
+ return schemaMap, nil
+}
diff --git a/internal/database/convertor_convert.go b/internal/database/convertor_convert.go
new file mode 100644
index 00000000..3e53b389
--- /dev/null
+++ b/internal/database/convertor_convert.go
@@ -0,0 +1,275 @@
+package database
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "log/slog"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/turbot/pipe-fittings/v2/utils"
+
+ "github.com/marcboeker/go-duckdb/v2"
+ "github.com/turbot/tailpipe-plugin-sdk/table"
+)
+
+// process all available chunks
+// this is called when a chunk is added but will continue processing any further chunks added while we were processing
+func (w *Converter) processAllChunks() {
+ // note we ALREADY HAVE THE PROCESS LOCK - be sure to release it when we are done
+ defer w.processLock.Unlock()
+
+ // so we have the process lock AND the schedule lock
+ // move the scheduled chunks to the chunks to process
+ // (scheduledChunks may be empty, in which case we will break out of the loop)
+ chunksToProcess := w.getChunksToProcess()
+ for len(chunksToProcess) > 0 {
+ err := w.processChunks(chunksToProcess)
+ if err != nil {
+ slog.Error("Error processing chunks", "error", err)
+ // call add job errors and carry on
+ w.addJobErrors(err)
+ }
+ //- get next batch of chunks
+ chunksToProcess = w.getChunksToProcess()
+ }
+
+ // if we get here, we have processed all scheduled chunks (but more may come later
+ log.Print("BatchProcessor: all scheduled chunks processed for execution")
+}
+
+// process a batch of chunks
+// Note whether successful of not, this decrements w.wg by the chunk count on return
+func (w *Converter) processChunks(chunksToProcess []int32) error {
+ // decrement the wait group by the number of chunks processed
+ defer func() {
+ w.wg.Add(len(chunksToProcess) * -1)
+ }()
+
+ // build a list of filenames to process
+ filenamesToProcess, err := w.chunkNumbersToFilenames(chunksToProcess)
+ if err != nil {
+ slog.Error("chunkNumbersToFilenames failed")
+ // chunkNumbersToFilenames returns a conversionError
+ return err
+ }
+
+ // execute conversion query for the chunks
+ // (insertBatchIntoDuckLake will return a coinversionError)
+ err = w.insertBatchIntoDuckLake(filenamesToProcess)
+ // delete the files after processing (successful or otherwise) - we will just return err
+ for _, filename := range filenamesToProcess {
+ if deleteErr := os.Remove(filename); deleteErr != nil {
+ slog.Error("Failed to delete file after processing", "file", filename, "error", err)
+ // give conversion error precedence
+ if err == nil {
+ err = deleteErr
+ }
+ }
+ }
+ // return error (if any)
+ return err
+}
+
+func (w *Converter) chunkNumbersToFilenames(chunks []int32) ([]string, error) {
+ var filenames = make([]string, len(chunks))
+ var missingFiles []string
+ for i, chunkNumber := range chunks {
+ // build the source filename
+ jsonlFilePath := filepath.Join(w.sourceDir, table.ExecutionIdToJsonlFileName(w.executionId, chunkNumber))
+ // verify file exists
+ if _, err := os.Stat(jsonlFilePath); os.IsNotExist(err) {
+ missingFiles = append(missingFiles, jsonlFilePath)
+ }
+ // remove single quotes from the file path to avoid issues with SQL queries
+ escapedPath := strings.ReplaceAll(jsonlFilePath, "'", "''")
+ filenames[i] = escapedPath
+ }
+ if len(missingFiles) > 0 {
+ // raise conversion error for the missing files - we do now know the row count so pass zero
+ return filenames, NewConversionError(fmt.Errorf("%s not found",
+ utils.Pluralize("file", len(missingFiles))),
+ 0,
+ missingFiles...)
+
+ }
+ return filenames, nil
+}
+
+func (w *Converter) insertBatchIntoDuckLake(filenames []string) (err error) {
+ t := time.Now()
+
+ // copy the data from the jsonl file to a temp table
+ if err := w.copyChunkToTempTable(filenames); err != nil {
+ // copyChunkToTempTable will already have called handleSchemaChangeError anf handleConversionError
+ return err
+ }
+
+ tempTime := time.Now()
+
+ // now validate the data
+ validateRowsError := w.validateRows(filenames)
+ if validateRowsError != nil {
+ // if the error is NOT RowValidationError, just return it
+ // (if it is a validation error, we have special handling)
+ if !errors.Is(validateRowsError, &RowValidationError{}) {
+ return validateRowsError
+ }
+
+ // so it IS a row validation error - the invalid rows will have been removed from the temp table
+ // - process the rest of the chunk
+ // ensure that we return the row validation error, merged with any other error we receive
+ defer func() {
+ if err == nil {
+ err = validateRowsError
+ } else {
+ // so we have an error (aside from the any validation error)
+ // convert the validation error to a conversion error (which will be wrapping the validation error
+ var conversionError *ConversionError
+ // we expect this will always pass
+ if errors.As(validateRowsError, &conversionError) {
+ conversionError.Merge(err)
+ }
+ err = conversionError
+ }
+ }()
+ }
+
+ slog.Debug("about to insert rows into ducklake table")
+
+ rowCount, err := w.insertIntoDucklake(w.Partition.TableName)
+ if err != nil {
+ slog.Error("failed to insert into DuckLake table", "table", w.Partition.TableName, "error", err)
+ return err
+ }
+
+ td := tempTime.Sub(t)
+ cd := time.Since(tempTime)
+ total := time.Since(t)
+
+ // Update counters and advance to the next batch
+ // if we have an error, return it below
+ // update the row count
+ w.updateRowCount(rowCount)
+
+ slog.Debug("inserted rows into DuckLake table", "chunks", len(filenames), "row count", rowCount, "error", err, "temp time", td.Milliseconds(), "conversion time", cd.Milliseconds(), "total time ", total.Milliseconds())
+ return nil
+}
+
+func (w *Converter) copyChunkToTempTable(jsonlFilePaths []string) error {
+ var queryBuilder strings.Builder
+
+ // Check for empty file paths
+ if len(jsonlFilePaths) == 0 {
+ return fmt.Errorf("no file paths provided")
+ }
+
+ // Create SQL array of file paths
+ var fileSQL string
+ if len(jsonlFilePaths) == 1 {
+ fileSQL = fmt.Sprintf("'%s'", jsonlFilePaths[0])
+ } else {
+ // For multiple files, create a properly quoted array
+ var quotedPaths []string
+ for _, jsonFilePath := range jsonlFilePaths {
+ quotedPaths = append(quotedPaths, fmt.Sprintf("'%s'", jsonFilePath))
+ }
+ fileSQL = "[" + strings.Join(quotedPaths, ", ") + "]"
+ }
+
+ // render the read JSON query with the jsonl file path
+ // - this build a select clause which selects the required data from the JSONL file (with columns types specified)
+ selectQuery := fmt.Sprintf(w.readJsonQueryFormat, fileSQL)
+
+ // Step: Prepare the temp table from JSONL input
+ //
+ // - Drop the temp table if it exists
+ // - Create a new temp table by executing the dselect query
+ queryBuilder.WriteString(fmt.Sprintf(`
+drop table if exists temp_data;
+
+create temp table temp_data as
+ %s
+`, selectQuery))
+
+ _, err := w.db.Exec(queryBuilder.String())
+ if err != nil {
+ // if the error is a schema change error, determine whether the schema of these chunks is
+ // different to the inferred schema
+ // w.handleSchemaChangeError either returns a schema change error or the original error
+ return w.handleSchemaChangeError(err, jsonlFilePaths...)
+ }
+
+ return nil
+}
+
+// insertIntoDucklakeForBatch writes a batch of rows from the temp_data table to the specified target DuckDB table.
+//
+// It selects rows based on rowid, using the provided startRowId and rowCount to control the range:
+// - Rows with rowid > startRowId and rowid <= (startRowId + rowCount) are selected.
+//
+// This approach allows for efficient batching from the temporary table into the final destination table.
+//
+// To prevent schema mismatches, it explicitly lists columns in the INSERT statement based on the conversion schema.
+//
+// Returns the number of rows inserted and any error encountered.
+func (w *Converter) insertIntoDucklake(targetTable string) (int64, error) {
+ // quote the table name
+ targetTable = fmt.Sprintf(`"%s"`, targetTable)
+
+ // Build the final INSERT INTO ... SELECT statement using the fully qualified table name.
+ columns := w.conversionSchema.ColumnString
+ insertQuery := fmt.Sprintf(`
+ insert into %s (%s)
+ select %s from temp_data
+ `, targetTable, columns, columns)
+
+ // Execute the insert statement
+ result, err := w.db.Exec(insertQuery)
+ if err != nil {
+ slog.Error(fmt.Sprintf("failed to insert data into DuckLake table db %p", w.db.DB), "table", targetTable, "error", err, "db", w.db.DB)
+ // It's helpful to wrap the error with context about what failed.
+ return 0, fmt.Errorf("failed to insert data into %s: %w", targetTable, err)
+ }
+
+ // Get the number of rows that were actually inserted.
+ insertedRowCount, err := result.RowsAffected()
+ if err != nil {
+ return 0, fmt.Errorf("failed to get number of affected rows: %w", err)
+ }
+
+ return insertedRowCount, nil
+}
+
+// handleSchemaChangeError determines if the error is because the schema of this chunk is different to the inferred schema
+// infer the schema of this chunk and compare - if they are different, return that in an error
+func (w *Converter) handleSchemaChangeError(origError error, jsonlFilePaths ...string) error {
+ // check all files for a schema change error
+ for _, jsonlFilePath := range jsonlFilePaths {
+ err := w.detectSchemaChange(jsonlFilePath)
+ if err != nil {
+ // if the error returned from detectSchemaChange is a SchemaChangeError, return that instead of the original error
+ // (ignore any other error - we will fall through to return original error)
+ var schemaChangeError = &SchemaChangeError{}
+ if errors.As(err, &schemaChangeError) {
+ // update err and fall through to handleConversionError - this wraps the error with additional row count info
+ return schemaChangeError
+ }
+ }
+ }
+
+ // just return the original error
+ return origError
+}
+
+// conversionRanOutOfMemory checks if the error is an out-of-memory error from DuckDB
+func conversionRanOutOfMemory(err error) bool {
+ var duckDBErr = &duckdb.Error{}
+ if errors.As(err, &duckDBErr) {
+ return duckDBErr.Type == duckdb.ErrorTypeOutOfMemory
+ }
+ return false
+}
diff --git a/internal/parquet/convertor_infer.go b/internal/database/convertor_schema.go
similarity index 88%
rename from internal/parquet/convertor_infer.go
rename to internal/database/convertor_schema.go
index 058ebe86..929d0795 100644
--- a/internal/parquet/convertor_infer.go
+++ b/internal/database/convertor_schema.go
@@ -1,13 +1,12 @@
-package parquet
+package database
import (
"encoding/json"
"fmt"
+ "path/filepath"
+
"github.com/turbot/tailpipe-plugin-sdk/schema"
"github.com/turbot/tailpipe-plugin-sdk/table"
- "github.com/turbot/tailpipe/internal/database"
- "log"
- "path/filepath"
)
// populate the ConversionSchema
@@ -46,8 +45,8 @@ func (w *Converter) inferConversionSchema(executionId string, chunkNumber int32)
}
func (w *Converter) InferSchemaForJSONLFile(filePath string) (*schema.TableSchema, error) {
- // TODO figure out why we need this hack - trying 2 different methods
- inferredSchema, err := w.inferSchemaForJSONLFileWithDescribe(filePath)
+ // depending on the data we have observed that one of the two queries will work
+ inferredSchema, err := w.inferSchemaForJSONLFileWithDescribe(w.db, filePath)
if err != nil {
inferredSchema, err = w.inferSchemaForJSONLFileWithJSONStructure(filePath)
}
@@ -62,13 +61,6 @@ func (w *Converter) InferSchemaForJSONLFile(filePath string) (*schema.TableSchem
// it uses 2 different queries as depending on the data, one or the other has been observed to work
// (needs investigation)
func (w *Converter) inferSchemaForJSONLFileWithJSONStructure(filePath string) (*schema.TableSchema, error) {
- // Open DuckDB connection
- db, err := database.NewDuckDb()
- if err != nil {
- log.Fatalf("failed to open DuckDB connection: %v", err)
- }
- defer db.Close()
-
// Query to infer schema using json_structure
query := `
select json_structure(json)::varchar as schema
@@ -77,7 +69,7 @@ func (w *Converter) inferSchemaForJSONLFileWithJSONStructure(filePath string) (*
`
var schemaStr string
- err = db.QueryRow(query, filePath).Scan(&schemaStr)
+ err := w.db.QueryRow(query, filePath).Scan(&schemaStr)
if err != nil {
return nil, fmt.Errorf("failed to execute query: %w", err)
}
@@ -105,15 +97,7 @@ func (w *Converter) inferSchemaForJSONLFileWithJSONStructure(filePath string) (*
return res, nil
}
-func (w *Converter) inferSchemaForJSONLFileWithDescribe(filePath string) (*schema.TableSchema, error) {
-
- // Open DuckDB connection
- db, err := database.NewDuckDb()
- if err != nil {
- log.Fatalf("failed to open DuckDB connection: %v", err)
- }
- defer db.Close()
-
+func (w *Converter) inferSchemaForJSONLFileWithDescribe(db *DuckDb, filePath string) (*schema.TableSchema, error) {
// Use DuckDB to describe the schema of the JSONL file
query := `SELECT column_name, column_type FROM (DESCRIBE (SELECT * FROM read_json_auto(?)))`
diff --git a/internal/database/convertor_validate.go b/internal/database/convertor_validate.go
new file mode 100644
index 00000000..8c96d944
--- /dev/null
+++ b/internal/database/convertor_validate.go
@@ -0,0 +1,110 @@
+package database
+
+import (
+ "fmt"
+ "strings"
+)
+
+// validateRows validates required fields are non null
+// it also validates that the schema of the chunk is the same as the inferred schema and if it is not, reports a useful error
+// the query count of invalid rows and a list of null fields
+func (w *Converter) validateRows(jsonlFilePaths []string) error {
+ // build array of required columns to validate
+ var requiredColumns []string
+ for _, col := range w.conversionSchema.Columns {
+ if col.Required {
+ // if the column is required, add it to the list of columns to validate
+ requiredColumns = append(requiredColumns, col.ColumnName)
+ }
+ }
+
+ // if we have no columns to validate, biuld a validation query to return the number of invalid rows and the columns with nulls
+ validationQuery := w.buildValidationQuery(requiredColumns)
+
+ row := w.db.QueryRow(validationQuery)
+ var failedRowCount int64
+ var columnsWithNullsInterface []interface{}
+
+ err := row.Scan(&failedRowCount, &columnsWithNullsInterface)
+ if err != nil {
+ return handleConversionError("row validation query failed", err, jsonlFilePaths...)
+ }
+
+ if failedRowCount == 0 {
+ // no rows with nulls - we are done
+ return nil
+ }
+
+ // delete invalid rows from the temp table
+ if err := w.deleteInvalidRows(requiredColumns); err != nil {
+ // failed to delete invalid rows - return an error
+ err := handleConversionError("failed to delete invalid rows from temp table", err, jsonlFilePaths...)
+ return err
+ }
+
+ // Convert the interface slice to string slice
+ var columnsWithNulls []string
+ for _, col := range columnsWithNullsInterface {
+ if col != nil {
+ columnsWithNulls = append(columnsWithNulls, col.(string))
+ }
+ }
+
+ // we have a failure - return an error with details about which columns had nulls
+ // wrap a row validation error inside a conversion error
+ return NewConversionError(NewRowValidationError(failedRowCount, columnsWithNulls), failedRowCount, jsonlFilePaths...)
+}
+
+// buildValidationQuery builds a query to copy the data from the select query to a temp table
+// it then validates that the required columns are not null, removing invalid rows and returning
+// the count of invalid rows and the columns with nulls
+func (w *Converter) buildValidationQuery(requiredColumns []string) string {
+ var queryBuilder strings.Builder
+
+ // Build the validation query that:
+ // - Counts distinct rows that have null values in required columns
+ // - Lists all required columns that contain null values
+ queryBuilder.WriteString(`select
+ count(distinct rowid) as rows_with_required_nulls, -- Count unique rows with nulls in required columns
+ coalesce(list(distinct col), []) as required_columns_with_nulls -- List required columns that have null values, defaulting to empty list if NULL
+from (`)
+
+ // Step 3: For each required column we need to validate:
+ // - Create a query that selects rows where this column is null
+ // - Include the column name so we know which column had the null
+ // - UNION ALL combines all these results (faster than UNION as we don't need to deduplicate)
+ for i, col := range requiredColumns {
+ if i > 0 {
+ queryBuilder.WriteString(" union all\n")
+ }
+ // For each required column, create a query that:
+ // - Selects the rowid (to count distinct rows)
+ // - Includes the column name (to list which columns had nulls)
+ // - Only includes rows where this column is null
+ queryBuilder.WriteString(fmt.Sprintf(" select rowid, '%s' as col from temp_data where %s is null\n", col, col))
+ }
+
+ queryBuilder.WriteString(");")
+
+ return queryBuilder.String()
+}
+
+// buildNullCheckQuery builds a WHERE clause to check for null values in the specified columns
+func (w *Converter) buildNullCheckQuery(requiredColumns []string) string {
+
+ // build a slice of null check conditions
+ conditions := make([]string, len(requiredColumns))
+ for i, col := range requiredColumns {
+ conditions[i] = fmt.Sprintf("%s is null", col)
+ }
+ return strings.Join(conditions, " or ")
+}
+
+// deleteInvalidRows removes rows with null values in the specified columns from the temp table
+func (w *Converter) deleteInvalidRows(requiredColumns []string) error {
+ whereClause := w.buildNullCheckQuery(requiredColumns)
+ query := fmt.Sprintf("delete from temp_data where %s;", whereClause)
+
+ _, err := w.db.Exec(query)
+ return err
+}
diff --git a/internal/database/create.go b/internal/database/create.go
deleted file mode 100644
index 9c237152..00000000
--- a/internal/database/create.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package database
-
-import (
- "context"
- _ "github.com/marcboeker/go-duckdb/v2"
- filehelpers "github.com/turbot/go-kit/files"
- _ "github.com/turbot/go-kit/helpers"
- _ "github.com/turbot/pipe-fittings/v2/utils"
- "github.com/turbot/tailpipe/internal/filepaths"
-)
-
-func EnsureDatabaseFile(ctx context.Context) error {
- databaseFilePath := filepaths.TailpipeDbFilePath()
- if filehelpers.FileExists(databaseFilePath) {
- return nil
- }
-
- //
- // Open a DuckDB connection (creates the file if it doesn't exist)
- db, err := NewDuckDb(WithDbFile(databaseFilePath))
- if err != nil {
- return err
- }
- defer db.Close()
-
- return AddTableViews(ctx, db)
-}
diff --git a/internal/database/duck_db.go b/internal/database/duck_db.go
index adf2eb42..d3033dbb 100644
--- a/internal/database/duck_db.go
+++ b/internal/database/duck_db.go
@@ -4,10 +4,13 @@ import (
"context"
"database/sql"
"fmt"
+ "log/slog"
"os"
+ "strings"
+ pconstants "github.com/turbot/pipe-fittings/v2/constants"
pf "github.com/turbot/pipe-fittings/v2/filepaths"
- "github.com/turbot/tailpipe/internal/constants"
+ "github.com/turbot/tailpipe/internal/config"
"github.com/turbot/tailpipe/internal/filepaths"
)
@@ -18,59 +21,86 @@ import (
type DuckDb struct {
// duckDb connection
*sql.DB
- extensions []string
- dataSourceName string
- tempDir string
- maxMemoryMb int
+ extensions []string
+ dataSourceName string
+ tempDir string
+ maxMemoryMb int
+ ducklakeEnabled bool
+ // create a read only connection to ducklake
+ duckLakeReadOnly bool
+
+ // a list of view filters - if this is set, we create a set of views in the database, one per table,
+ // applying the specified filter
+ // NOTE: if view filters are specified, the connection is set to READ ONLY mode (even if read only option is not set)
+ viewFilters []string
}
-func NewDuckDb(opts ...DuckDbOpt) (*DuckDb, error) {
- w := &DuckDb{}
+func NewDuckDb(opts ...DuckDbOpt) (_ *DuckDb, err error) {
+ slog.Info("Initializing DuckDB connection")
+
+ d := &DuckDb{}
for _, opt := range opts {
- opt(w)
+ opt(d)
}
+ defer func() {
+ if err != nil {
+ // If an error occurs during initialization, close the DB connection if it was opened
+ if d.DB != nil {
+ _ = d.DB.Close()
+ }
+ d.DB = nil // ensure DB is nil to avoid further operations on a closed connection
+ }
+ }()
+
// Connect to DuckDB
- db, err := sql.Open("duckdb", w.dataSourceName)
+ db, err := sql.Open("duckdb", d.dataSourceName)
if err != nil {
return nil, fmt.Errorf("failed to open DuckDB connection: %w", err)
}
- w.DB = db
+ d.DB = db
+
+ // for duckdb, limit connections to 1 - DuckDB is designed for single-connection usage
+ d.SetMaxOpenConns(1)
+
+ // set the extension directory
+ if _, err := d.DB.Exec("set extension_directory = ?;", pf.EnsurePipesDuckDbExtensionsDir()); err != nil {
+ return nil, fmt.Errorf("failed to set extension_directory: %w", err)
+ }
- if len(w.extensions) > 0 {
- // install and load the JSON extension
- if err := w.installAndLoadExtensions(); err != nil {
+ if len(d.extensions) > 0 {
+ // set extension dir and install any specified extensions
+ if err := d.installAndLoadExtensions(); err != nil {
return nil, fmt.Errorf(": %w", err)
}
}
+ if d.ducklakeEnabled {
+ if err := d.connectDucklake(context.Background()); err != nil {
+ return nil, fmt.Errorf("failed to connect to DuckLake: %w", err)
+ }
+ }
- // Configure DuckDB's temp directory:
- // - If WithTempDir option was provided, use that directory
- // - Otherwise, use the collection temp directory (a subdirectory in the user's home directory
- // where temporary files for data collection are stored)
- tempDir := w.tempDir
- if tempDir == "" {
- baseDir := filepaths.EnsureCollectionTempDir()
- // Create a unique subdirectory with 'duckdb-' prefix
- // it is important to use a unique directory for each DuckDB instance as otherwise temp files from
- // different instances can conflict with each other, causing memory swapping issues
- uniqueTempDir, err := os.MkdirTemp(baseDir, "duckdb-")
+ // view filters are used to create a database with a filtered set of data to query,
+ // used to support date filtering for the index command
+ if len(d.viewFilters) > 0 {
+ err = d.createFilteredViews(d.viewFilters)
if err != nil {
- return nil, fmt.Errorf("failed to create unique temp directory: %w", err)
+ return nil, fmt.Errorf("failed to create filtered views: %w", err)
}
- tempDir = uniqueTempDir
}
- if _, err := db.Exec("set temp_directory = ?;", tempDir); err != nil {
- _ = w.Close()
- return nil, fmt.Errorf("failed to set temp_directory: %w", err)
+ // Configure DuckDB's temp directory
+ if err := d.setTempDir(); err != nil {
+ return nil, fmt.Errorf("failed to set DuckDB temp directory: %w", err)
}
- if w.maxMemoryMb > 0 {
- if _, err := db.Exec("set max_memory = ? || 'MB';", w.maxMemoryMb); err != nil {
- _ = w.Close()
+ // set the max memory if specified
+ if d.maxMemoryMb > 0 {
+ if _, err := db.Exec("set max_memory = ? || 'MB';", d.maxMemoryMb); err != nil {
+ _ = d.Close()
return nil, fmt.Errorf("failed to set max_memory: %w", err)
}
}
- return w, nil
+
+ return d, nil
}
func (d *DuckDb) Query(query string, args ...any) (*sql.Rows, error) {
@@ -127,13 +157,8 @@ func (d *DuckDb) installAndLoadExtensions() error {
return nil
}
- // set the extension directory
- if _, err := d.DB.Exec("set extension_directory = ?;", pf.EnsurePipesDuckDbExtensionsDir()); err != nil {
- return fmt.Errorf("failed to set extension_directory: %w", err)
- }
-
// install and load the extensions
- for _, extension := range constants.DuckDbExtensions {
+ for _, extension := range pconstants.DuckDbExtensions {
if _, err := d.DB.Exec(fmt.Sprintf("INSTALL '%s'; LOAD '%s';", extension, extension)); err != nil {
return fmt.Errorf("failed to install and load extension %s: %s", extension, err.Error())
}
@@ -141,3 +166,126 @@ func (d *DuckDb) installAndLoadExtensions() error {
return nil
}
+
+// connectDucklake connects the given DuckDB connection to DuckLake
+func (d *DuckDb) connectDucklake(ctx context.Context) error {
+ // we share the same set of commands for tailpipe connection - get init commands and execute them
+ commands := GetDucklakeInitCommands(d.duckLakeReadOnly)
+ // if there are NO view filters, set the default catalog to ducklake
+ // if there are view filters, the views will be created in the default memory catalog so do not change the default
+ if len(d.viewFilters) == 0 {
+ commands = append(commands, SqlCommand{
+ Description: "set default catalog to ducklake",
+ Command: fmt.Sprintf("use %s", pconstants.DuckLakeCatalog),
+ })
+ }
+
+ // tactical: if read only mode is set and the ducklake database does not exists, create it
+ // (creating a read only connection will FAIL if the ducklake database has not been created yet
+ // - writeable connections will create the database if it does not exist)
+ if d.duckLakeReadOnly {
+ if err := ensureDucklakeDb(); err != nil {
+ return fmt.Errorf("failed to ensure ducklake database exists: %w", err)
+ }
+ }
+
+ for _, cmd := range commands {
+ slog.Info(cmd.Description, "command", cmd.Command)
+ _, err := d.ExecContext(ctx, cmd.Command)
+ if err != nil {
+ return fmt.Errorf("%s failed: %w", cmd.Description, err)
+ }
+ }
+
+ return nil
+}
+
+// ensureDucklakeDb checks if the ducklake database file exists, and if not, creates it by opening
+// and closing a duckdb connection with ducklake enabled
+// this is used if we we are creating a readonly db connection to ducklake
+// - readonly connections will fail if the ducklake database does not exist
+func ensureDucklakeDb() error {
+ //check db file exists
+ _, err := os.Stat(config.GlobalWorkspaceProfile.GetDucklakeDbPath())
+ if err == nil {
+ // file exists - nothing to do
+ return nil
+ }
+ // create a duck db connection then close again
+ db, err := NewDuckDb(WithDuckLake())
+ if err != nil {
+ return err
+ }
+ if err := db.Close(); err != nil {
+ return fmt.Errorf("failed to close duckdb connection: %w", err)
+ }
+ return nil
+
+}
+
+func (d *DuckDb) createFilteredViews(filters []string) error {
+ // get the sql to create the views based on the filters
+ viewSql, err := GetCreateViewsSql(context.Background(), d, d.viewFilters...)
+ if err != nil {
+ return fmt.Errorf("failed to get create views sql: %w", err)
+ }
+ // execute the commands to create the views
+ slog.Info("Creating views")
+ for _, cmd := range viewSql {
+ if _, err := d.Exec(cmd.Command); err != nil {
+ return fmt.Errorf("failed to create view: %w", err)
+ }
+ }
+ return nil
+}
+
+// Configure DuckDB's temp directory
+// - If WithTempDir option was provided, use that directory
+// - Otherwise, use the collection temp directory (a subdirectory in the user's home directory
+// where temporary files for data collection are stored)
+func (d *DuckDb) setTempDir() error {
+ tempDir := d.tempDir
+ if tempDir == "" {
+ baseDir := filepaths.EnsureCollectionTempDir()
+ // Create a unique subdirectory with 'duckdb-' prefix
+ // it is important to use a unique directory for each DuckDB instance as otherwise temp files from
+ // different instances can conflict with each other, causing memory swapping issues
+ uniqueTempDir, err := os.MkdirTemp(baseDir, "duckdb-")
+ if err != nil {
+ return fmt.Errorf("failed to create unique temp directory: %w", err)
+ }
+ tempDir = uniqueTempDir
+ }
+
+ if _, err := d.Exec("set temp_directory = ?;", tempDir); err != nil {
+ _ = d.Close()
+ return fmt.Errorf("failed to set temp_directory: %w", err)
+ }
+ return nil
+}
+
+// GetDucklakeInitCommands returns the set of SQL commands required to initialize and connect to DuckLake.
+// this is used both for tailpipe to connect to ducklake and also for tailpipe connect to build the init script
+// It returns an ordered slice of SQL commands.
+func GetDucklakeInitCommands(readonly bool) []SqlCommand {
+ attachOptions := []string{
+ fmt.Sprintf("data_path '%s'", config.GlobalWorkspaceProfile.GetDataDir()),
+ "meta_journal_mode 'WAL'",
+ }
+ // if readonly mode is requested, add the option
+ if readonly {
+ attachOptions = append(attachOptions, "READ_ONLY")
+ }
+ attachQuery := fmt.Sprintf(`attach 'ducklake:sqlite:%s' AS %s (
+ %s)`,
+ config.GlobalWorkspaceProfile.GetDucklakeDbPath(),
+ pconstants.DuckLakeCatalog,
+ strings.Join(attachOptions, ",\n\t"))
+
+ commands := []SqlCommand{
+ {Description: "install sqlite extension", Command: "install sqlite"},
+ {Description: "install ducklake extension", Command: "install ducklake;"},
+ {Description: "attach to ducklake database", Command: attachQuery},
+ }
+ return commands
+}
diff --git a/internal/database/duck_db_error.go b/internal/database/duck_db_error.go
index d03e0a80..693c48a5 100644
--- a/internal/database/duck_db_error.go
+++ b/internal/database/duck_db_error.go
@@ -7,6 +7,7 @@ import (
"os"
"regexp"
"sort"
+ "strconv"
"strings"
"time"
@@ -81,7 +82,6 @@ func handleDuckDbError(err error) error {
return newInvalidParquetError(updatedFilename)
}
// so we have no filename
- //TODO handle Invalid Error: TProtocolException: Invalid data
}
return err
@@ -162,21 +162,34 @@ func newInvalidParquetError(parquetFilePath string) error {
parquetFilePath: parquetFilePath,
}
+ var year, month int
+
// Extract table, partition and date from path components
parts := strings.Split(parquetFilePath, "/")
for _, part := range parts {
- if strings.HasPrefix(part, "tp_table=") {
+ switch {
+ case strings.HasPrefix(part, "tp_table="):
err.table = strings.TrimPrefix(part, "tp_table=")
- } else if strings.HasPrefix(part, "tp_partition=") {
+ case strings.HasPrefix(part, "tp_partition="):
err.partition = strings.TrimPrefix(part, "tp_partition=")
- } else if strings.HasPrefix(part, "tp_date=") {
- dateString := strings.TrimPrefix(part, "tp_date=")
- date, parseErr := time.Parse("2006-01-02", dateString)
+ case strings.HasPrefix(part, "year="):
+ yearString := strings.TrimPrefix(part, "year=")
+ y, parseErr := strconv.Atoi(yearString)
+ if parseErr == nil {
+ year = y
+ }
+ case strings.HasPrefix(part, "month="):
+ monthString := strings.TrimPrefix(part, "month=")
+ m, parseErr := strconv.Atoi(monthString)
if parseErr == nil {
- err.date = date
+ month = m
}
}
}
+ // if we have a year and month, set the error date
+ if year > 0 && month > 0 {
+ err.date = time.Date(year, time.Month(month), 1, 0, 0, 0, 0, time.UTC)
+ }
return err
}
diff --git a/internal/database/duck_db_options.go b/internal/database/duck_db_options.go
index 1f39c46c..9b303915 100644
--- a/internal/database/duck_db_options.go
+++ b/internal/database/duck_db_options.go
@@ -38,3 +38,22 @@ func WithMaxMemoryMb(maxMemoryMb int) DuckDbOpt {
d.maxMemoryMb = maxMemoryMb
}
}
+
+// WithDuckLake enables the DuckLake extension for DuckDB.
+func WithDuckLake() DuckDbOpt {
+ return func(d *DuckDb) {
+ d.ducklakeEnabled = true
+ }
+}
+
+// WithDuckLakeReadonly enables the DuckLake extension in read-only mode.
+// filters is an optional list of SQL filter expressions - if specified, a view will be created for each table in the database
+// and the filters will be applied to the view.
+// If no filters are specified, the ducklake attachment will be set as the default catalog so the tables can be accessed directly
+func WithDuckLakeReadonly(filters ...string) DuckDbOpt {
+ return func(d *DuckDb) {
+ d.ducklakeEnabled = true
+ d.duckLakeReadOnly = true
+ d.viewFilters = filters
+ }
+}
diff --git a/internal/database/duck_db_test.go b/internal/database/duck_db_test.go
index 9d7cdbc6..36c15bb2 100644
--- a/internal/database/duck_db_test.go
+++ b/internal/database/duck_db_test.go
@@ -110,8 +110,8 @@ func Test_executeWithParquetErrorRetry(t *testing.T) {
// Helper function to create a test file with proper path structure
mkTestFile := func(attempt int) string {
- // Create a path that matches the expected format: tp_table=aws_cloudtrail/tp_partition=cloudtrail/tp_date=2024-03-20/test.parquet.N
- path := filepath.Join(tmpDir, "tp_table=aws_cloudtrail", "tp_partition=cloudtrail", "tp_date=2024-03-20")
+ // Create a path that matches the expected format: tp_table=aws_cloudtrail/tp_partition=cloudtrail/year=2024/month=03/test.parquet
+ path := filepath.Join(tmpDir, "tp_table=aws_cloudtrail", "tp_partition=cloudtrail", "year=2024", "month=03")
if err := os.MkdirAll(path, 0755); err != nil {
t.Fatalf("failed to create test directory: %v", err)
}
@@ -206,6 +206,9 @@ func Test_executeWithParquetErrorRetry(t *testing.T) {
}
func TestDuckDb_WrapperMethods(t *testing.T) {
+ // TODO fix me
+ t.Skip("Skipping this test due to CI issues")
+
// Create a temporary directory for testing
tmpDir := t.TempDir()
@@ -217,7 +220,9 @@ func TestDuckDb_WrapperMethods(t *testing.T) {
// Test Query
t.Run("Query", func(t *testing.T) {
- rows, err := db.Query("select 1")
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel()
+ rows, err := db.QueryContext(ctx, "select 1")
if err != nil {
t.Errorf("Query failed: %v", err)
}
@@ -228,7 +233,8 @@ func TestDuckDb_WrapperMethods(t *testing.T) {
// Test QueryContext
t.Run("QueryContext", func(t *testing.T) {
- ctx := context.Background()
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel()
rows, err := db.QueryContext(ctx, "select 1")
if err != nil {
t.Errorf("QueryContext failed: %v", err)
@@ -240,7 +246,9 @@ func TestDuckDb_WrapperMethods(t *testing.T) {
// Test QueryRow
t.Run("QueryRow", func(t *testing.T) {
- row := db.QueryRow("select 1")
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel()
+ row := db.QueryRowContext(ctx, "select 1")
if row == nil {
t.Error("QueryRow returned nil")
}
@@ -248,7 +256,8 @@ func TestDuckDb_WrapperMethods(t *testing.T) {
// Test QueryRowContext
t.Run("QueryRowContext", func(t *testing.T) {
- ctx := context.Background()
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel()
row := db.QueryRowContext(ctx, "select 1")
if row == nil {
t.Error("QueryRowContext returned nil")
@@ -257,7 +266,9 @@ func TestDuckDb_WrapperMethods(t *testing.T) {
// Test Exec
t.Run("Exec", func(t *testing.T) {
- result, err := db.Exec("select 1")
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel()
+ result, err := db.ExecContext(ctx, "select 1")
if err != nil {
t.Errorf("Exec failed: %v", err)
}
@@ -268,7 +279,8 @@ func TestDuckDb_WrapperMethods(t *testing.T) {
// Test ExecContext
t.Run("ExecContext", func(t *testing.T) {
- ctx := context.Background()
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel()
result, err := db.ExecContext(ctx, "select 1")
if err != nil {
t.Errorf("ExecContext failed: %v", err)
diff --git a/internal/database/ducklake_table.go b/internal/database/ducklake_table.go
new file mode 100644
index 00000000..b774b17c
--- /dev/null
+++ b/internal/database/ducklake_table.go
@@ -0,0 +1,107 @@
+package database
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/turbot/tailpipe-plugin-sdk/constants"
+ "github.com/turbot/tailpipe-plugin-sdk/schema"
+)
+
+// EnsureDuckLakeTable determines whether we have a ducklake table for this table, and if so, whether it needs schema updating
+func EnsureDuckLakeTable(columns []*schema.ColumnSchema, db *DuckDb, tableName string) error {
+ query := fmt.Sprintf("select exists (select 1 from information_schema.tables where table_name = '%s')", tableName)
+ var exists bool
+ if err := db.QueryRow(query).Scan(&exists); err != nil {
+ return err
+ }
+ if !exists {
+ return createDuckLakeTable(columns, db, tableName)
+ }
+ return nil
+}
+
+// createDuckLakeTable creates a DuckLake table based on the ConversionSchema
+func createDuckLakeTable(columns []*schema.ColumnSchema, db *DuckDb, tableName string) error {
+
+ // Generate the CREATE TABLE SQL
+ createTableSQL := buildCreateDucklakeTableSQL(columns, tableName)
+
+ // Execute the CREATE TABLE statement
+ _, err := db.Exec(createTableSQL)
+ if err != nil {
+ return fmt.Errorf("failed to create table %s: %w", tableName, err)
+ }
+
+ // Set partitioning using ALTER TABLE
+ // partition by the partition, index, year and month
+ partitionColumns := []string{constants.TpPartition, constants.TpIndex, fmt.Sprintf("year(%s)", constants.TpTimestamp), fmt.Sprintf("month(%s)", constants.TpTimestamp)}
+ alterTableSQL := fmt.Sprintf(`alter table "%s" set partitioned by (%s);`,
+ tableName,
+ strings.Join(partitionColumns, ", "))
+
+ _, err = db.Exec(alterTableSQL)
+ if err != nil {
+ return fmt.Errorf("failed to set partitioning for table %s: %w", tableName, err)
+ }
+
+ return nil
+}
+
+// buildCreateDucklakeTableSQL generates the CREATE TABLE SQL statement based on the ConversionSchema
+func buildCreateDucklakeTableSQL(columns []*schema.ColumnSchema, tableName string) string {
+ // Build column definitions in sorted order
+ var columnDefinitions []string
+ for _, column := range columns {
+ columnDef := buildColumnDefinition(column)
+ columnDefinitions = append(columnDefinitions, columnDef)
+ }
+
+ return fmt.Sprintf(`create table if not exists "%s" (
+%s
+);`,
+ tableName,
+ strings.Join(columnDefinitions, ",\n"))
+}
+
+// buildColumnDefinition generates the SQL definition for a single column
+func buildColumnDefinition(column *schema.ColumnSchema) string {
+ columnName := fmt.Sprintf("\"%s\"", column.ColumnName)
+
+ // Handle different column types
+ switch column.Type {
+ case "struct":
+ // For struct types, we need to build the struct definition
+ structDef := buildStructDefinition(column)
+ return fmt.Sprintf("\t%s %s", columnName, structDef)
+ case "json":
+ // json type
+ return fmt.Sprintf("\t%s json", columnName)
+ default:
+ // For scalar types, just use the type directly (lower case)
+ return fmt.Sprintf("\t%s %s", columnName, strings.ToLower(column.Type))
+ }
+}
+
+// buildStructDefinition generates the SQL struct definition for a struct column
+func buildStructDefinition(column *schema.ColumnSchema) string {
+ if len(column.StructFields) == 0 {
+ return "struct"
+ }
+
+ var fieldDefinitions []string
+ for _, field := range column.StructFields {
+ fieldName := fmt.Sprintf("\"%s\"", field.ColumnName)
+ fieldType := strings.ToLower(field.Type)
+
+ if field.Type == "struct" {
+ // Recursively build nested struct definition
+ nestedStruct := buildStructDefinition(field)
+ fieldDefinitions = append(fieldDefinitions, fmt.Sprintf("%s %s", fieldName, nestedStruct))
+ } else {
+ fieldDefinitions = append(fieldDefinitions, fmt.Sprintf("%s %s", fieldName, fieldType))
+ }
+ }
+
+ return fmt.Sprintf("struct(%s)", strings.Join(fieldDefinitions, ", "))
+}
diff --git a/internal/database/file_metadata.go b/internal/database/file_metadata.go
new file mode 100644
index 00000000..6d48665c
--- /dev/null
+++ b/internal/database/file_metadata.go
@@ -0,0 +1,102 @@
+package database
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/turbot/pipe-fittings/v2/constants"
+)
+
+// FileMetadata represents the result of a file metadata query
+type FileMetadata struct {
+ FileSize int64
+ FileCount int64
+ RowCount int64
+}
+
+// TableExists checks if a table exists in the DuckLake metadata tables
+func TableExists(ctx context.Context, tableName string, db *DuckDb) (bool, error) {
+ query := fmt.Sprintf(`select count(*) from %s.ducklake_table where table_name = ?`, constants.DuckLakeMetadataCatalog)
+
+ var count int64
+ err := db.QueryRowContext(ctx, query, tableName).Scan(&count)
+ if err != nil {
+ return false, fmt.Errorf("unable to check if table %s exists: %w", tableName, err)
+ }
+
+ return count > 0, nil
+}
+
+// GetTableFileMetadata gets file metadata for a specific table from DuckLake metadata tables
+func GetTableFileMetadata(ctx context.Context, tableName string, db *DuckDb) (*FileMetadata, error) {
+ // first see if the table exists
+ exists, err := TableExists(ctx, tableName, db)
+ if err != nil {
+ return nil, fmt.Errorf("unable to check if table %s exists: %w", tableName, err)
+ }
+ if !exists {
+ // leave everything at zero
+ return &FileMetadata{}, nil
+ }
+
+ query := fmt.Sprintf(`select
+ sum(f.file_size_bytes) as total_size,
+ count(*) as file_count,
+ sum(f.record_count) as row_count
+from %s.ducklake_data_file f
+ join %s.ducklake_partition_info p on f.partition_id = p.partition_id
+ join %s.ducklake_table tp on p.table_id = tp.table_id
+where tp.table_name = ? and f.end_snapshot is null`,
+ constants.DuckLakeMetadataCatalog,
+ constants.DuckLakeMetadataCatalog,
+ constants.DuckLakeMetadataCatalog)
+
+ var totalSize, fileCount, rowCount int64
+ err = db.QueryRowContext(ctx, query, tableName).Scan(&totalSize, &fileCount, &rowCount)
+ if err != nil {
+ return nil, fmt.Errorf("unable to obtain file metadata for table %s: %w", tableName, err)
+ }
+
+ return &FileMetadata{
+ FileSize: totalSize,
+ FileCount: fileCount,
+ RowCount: rowCount,
+ }, nil
+}
+
+// GetPartitionFileMetadata gets file metadata for a specific partition from DuckLake metadata tables
+func GetPartitionFileMetadata(ctx context.Context, tableName, partitionName string, db *DuckDb) (*FileMetadata, error) {
+ // first see if the table exists
+ exists, err := TableExists(ctx, tableName, db)
+ if err != nil {
+ return nil, fmt.Errorf("unable to check if table %s exists: %w", tableName, err)
+ }
+ if !exists {
+ // leave everything at zero
+ return &FileMetadata{}, nil
+ }
+
+ query := fmt.Sprintf(`select
+ coalesce(sum(f.file_size_bytes), 0) as total_size,
+ coalesce(count(*), 0) as file_count,
+ coalesce(sum(f.record_count), 0) as row_count
+from %s.ducklake_data_file f
+ join %s.ducklake_file_partition_value fpv on f.data_file_id = fpv.data_file_id
+ join %s.ducklake_table tp on fpv.table_id = tp.table_id
+where tp.table_name = ? and fpv.partition_value = ? and f.end_snapshot is null`,
+ constants.DuckLakeMetadataCatalog,
+ constants.DuckLakeMetadataCatalog,
+ constants.DuckLakeMetadataCatalog)
+
+ var totalSize, fileCount, rowCount int64
+ err = db.QueryRowContext(ctx, query, tableName, partitionName).Scan(&totalSize, &fileCount, &rowCount)
+ if err != nil {
+ return nil, fmt.Errorf("unable to obtain file metadata for partition %s.%s: %w", tableName, partitionName, err)
+ }
+
+ return &FileMetadata{
+ FileSize: totalSize,
+ FileCount: fileCount,
+ RowCount: rowCount,
+ }, nil
+}
diff --git a/internal/database/partition_key.go b/internal/database/partition_key.go
new file mode 100644
index 00000000..053ad58d
--- /dev/null
+++ b/internal/database/partition_key.go
@@ -0,0 +1,175 @@
+package database
+
+import (
+ "context"
+ "fmt"
+ "github.com/turbot/tailpipe/internal/config"
+ "sort"
+)
+
+// partitionKey is used to uniquely identify a a combination of ducklake partition columns:
+// tp_table, tp_partition, tp_index, year(tp_timestamp), month(tp_timestamp)
+// It also stores the file and row stats for that partition key
+type partitionKey struct {
+ tpTable string
+ tpPartition string
+ tpIndex string
+ year string // year(tp_timestamp) from partition value
+ month string // month(tp_timestamp) from partition value
+ fileCount int // number of files for this partition key
+ partitionConfig *config.Partition
+}
+
+// query the ducklake_data_file table to get all partition keys combinations which satisfy the provided patterns,
+// along with the file and row stats for each partition key combination
+func getPartitionKeysMatchingPattern(ctx context.Context, db *DuckDb, patterns []*PartitionPattern) ([]*partitionKey, error) {
+ // This query joins the DuckLake metadata tables to get partition key combinations:
+ // - ducklake_data_file: contains file metadata and links to tables
+ // - ducklake_file_partition_value: contains partition values for each file
+ // - ducklake_table: contains table names
+ //
+ // The partition key structure is:
+ // - fpv1 (index 0): tp_partition (e.g., "2024-07")
+ // - fpv2 (index 1): tp_index (e.g., "index1")
+ // - fpv3 (index 2): year(tp_timestamp) (e.g., "2024")
+ // - fpv4 (index 3): month(tp_timestamp) (e.g., "7")
+ //
+ // We group by these partition keys and count files per combination,
+ // filtering for active files (end_snapshot is null)
+ // NOTE: Assumes partitions are defined in order: tp_partition (0), tp_index (1), year(tp_timestamp) (2), month(tp_timestamp) (3)
+ query := `select
+ t.table_name as tp_table,
+ fpv1.partition_value as tp_partition,
+ fpv2.partition_value as tp_index,
+ fpv3.partition_value as year,
+ fpv4.partition_value as month,
+ count(*) as file_count
+from __ducklake_metadata_tailpipe_ducklake.ducklake_data_file df
+join __ducklake_metadata_tailpipe_ducklake.ducklake_file_partition_value fpv1
+ on df.data_file_id = fpv1.data_file_id and fpv1.partition_key_index = 0
+join __ducklake_metadata_tailpipe_ducklake.ducklake_file_partition_value fpv2
+ on df.data_file_id = fpv2.data_file_id and fpv2.partition_key_index = 1
+join __ducklake_metadata_tailpipe_ducklake.ducklake_file_partition_value fpv3
+ on df.data_file_id = fpv3.data_file_id and fpv3.partition_key_index = 2
+join __ducklake_metadata_tailpipe_ducklake.ducklake_file_partition_value fpv4
+ on df.data_file_id = fpv4.data_file_id and fpv4.partition_key_index = 3
+join __ducklake_metadata_tailpipe_ducklake.ducklake_table t
+ on df.table_id = t.table_id
+where df.end_snapshot is null
+group by
+ t.table_name,
+ fpv1.partition_value,
+ fpv2.partition_value,
+ fpv3.partition_value,
+ fpv4.partition_value;`
+
+ rows, err := db.QueryContext(ctx, query)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get partition keys requiring compaction: %w", err)
+ }
+ defer rows.Close()
+
+ var partitionKeys []*partitionKey
+ for rows.Next() {
+ var pk = &partitionKey{}
+
+ if err := rows.Scan(&pk.tpTable, &pk.tpPartition, &pk.tpIndex, &pk.year, &pk.month, &pk.fileCount); err != nil {
+ return nil, fmt.Errorf("failed to scan partition key row: %w", err)
+ }
+
+ // retrieve the partition config for this key (which may not exist - that is ok
+ partitionConfig, ok := config.GlobalConfig.Partitions[pk.partitionName()]
+ if ok {
+ pk.partitionConfig = partitionConfig
+ }
+
+ // check whether this partition key matches any of the provided patterns and whether there are any files
+ if pk.fileCount > 0 && PartitionMatchesPatterns(pk.tpTable, pk.tpPartition, patterns) {
+ partitionKeys = append(partitionKeys, pk)
+ }
+ }
+
+ return partitionKeys, nil
+}
+
+// findOverlappingFileRanges finds sets of files that have overlapping time ranges and converts them to unorderedDataTimeRange
+func (p *partitionKey) findOverlappingFileRanges(fileRanges []fileTimeRange) ([]unorderedDataTimeRange, error) {
+ if len(fileRanges) <= 1 {
+ return []unorderedDataTimeRange{}, nil
+ }
+
+ // Sort by start time - O(n log n)
+ sort.Slice(fileRanges, func(i, j int) bool {
+ return fileRanges[i].min.Before(fileRanges[j].min)
+ })
+
+ var unorderedRanges []unorderedDataTimeRange
+ processedFiles := make(map[string]struct{})
+
+ for i, currentFile := range fileRanges {
+ if _, processed := processedFiles[currentFile.path]; processed {
+ continue
+ }
+
+ // Find all files that overlap with this one
+ overlappingFiles := p.findFilesOverlappingWith(currentFile, fileRanges[i+1:], processedFiles)
+
+ // Only keep sets with multiple files (single files don't need compaction)
+ if len(overlappingFiles) > 1 {
+ // Convert overlapping files to unorderedDataTimeRange
+ timeRange, err := newUnorderedDataTimeRange(overlappingFiles)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create unordered time range: %w", err)
+ }
+ unorderedRanges = append(unorderedRanges, timeRange)
+ }
+ }
+
+ return unorderedRanges, nil
+}
+
+// findFilesOverlappingWith finds all files that overlap with the given file
+func (p *partitionKey) findFilesOverlappingWith(startFile fileTimeRange, remainingFiles []fileTimeRange, processedFiles map[string]struct{}) []fileTimeRange {
+ overlappingFileRanges := []fileTimeRange{startFile}
+ processedFiles[startFile.path] = struct{}{}
+ setMaxEnd := startFile.max
+
+ for _, candidateFile := range remainingFiles {
+ if _, processed := processedFiles[candidateFile.path]; processed {
+ continue
+ }
+
+ // Early termination: if candidate starts after set ends, no more overlaps
+ if candidateFile.min.After(setMaxEnd) {
+ break
+ }
+
+ // Check if this file overlaps with any file in our set
+ if p.fileOverlapsWithSet(candidateFile, overlappingFileRanges) {
+ overlappingFileRanges = append(overlappingFileRanges, candidateFile)
+ processedFiles[candidateFile.path] = struct{}{}
+
+ // Update set's max end time
+ if candidateFile.max.After(setMaxEnd) {
+ setMaxEnd = candidateFile.max
+ }
+ }
+ }
+
+ return overlappingFileRanges
+}
+
+// fileOverlapsWithSet checks if a file overlaps with any file in the set
+func (p *partitionKey) fileOverlapsWithSet(candidateFile fileTimeRange, fileSet []fileTimeRange) bool {
+ for _, setFile := range fileSet {
+ if rangesOverlap(setFile, candidateFile) {
+ return true
+ }
+ }
+ return false
+}
+
+// return fully qualified partition name (table.partition)
+func (p *partitionKey) partitionName() string {
+ return fmt.Sprintf("%s.%s", p.tpTable, p.tpPartition)
+}
diff --git a/internal/database/partition_key_test.go b/internal/database/partition_key_test.go
new file mode 100644
index 00000000..f995aa55
--- /dev/null
+++ b/internal/database/partition_key_test.go
@@ -0,0 +1,383 @@
+package database
+
+import (
+ "testing"
+ "time"
+)
+
+// timeString is a helper function to create time.Time from string
+func timeString(timeStr string) time.Time {
+ t, err := time.Parse("2006-01-02 15:04:05", timeStr)
+ if err != nil {
+ panic(err)
+ }
+ return t
+}
+
+func TestPartitionKeyRangeOperations(t *testing.T) {
+ pk := &partitionKey{}
+
+ tests := []struct {
+ name string
+ testType string // "rangesOverlap", "findOverlappingFileRanges", "newUnorderedDataTimeRange"
+ input interface{}
+ expected interface{}
+ }{
+ // Test cases for rangesOverlap function
+ {
+ name: "rangesOverlap - overlapping ranges",
+ testType: "rangesOverlap",
+ input: struct {
+ r1 fileTimeRange
+ r2 fileTimeRange
+ }{
+ r1: fileTimeRange{min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-02 00:00:00")},
+ r2: fileTimeRange{min: timeString("2024-01-01 12:00:00"), max: timeString("2024-01-03 00:00:00")},
+ },
+ expected: true,
+ },
+ {
+ name: "rangesOverlap - non-overlapping ranges",
+ testType: "rangesOverlap",
+ input: struct {
+ r1 fileTimeRange
+ r2 fileTimeRange
+ }{
+ r1: fileTimeRange{min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-02 00:00:00")},
+ r2: fileTimeRange{min: timeString("2024-01-03 00:00:00"), max: timeString("2024-01-04 00:00:00")},
+ },
+ expected: false,
+ },
+ {
+ name: "rangesOverlap - touching ranges (contiguous, not overlapping)",
+ testType: "rangesOverlap",
+ input: struct {
+ r1 fileTimeRange
+ r2 fileTimeRange
+ }{
+ r1: fileTimeRange{min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-02 00:00:00")},
+ r2: fileTimeRange{min: timeString("2024-01-02 00:00:00"), max: timeString("2024-01-03 00:00:00")},
+ },
+ expected: false,
+ },
+ {
+ name: "rangesOverlap - identical ranges",
+ testType: "rangesOverlap",
+ input: struct {
+ r1 fileTimeRange
+ r2 fileTimeRange
+ }{
+ r1: fileTimeRange{min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-02 00:00:00")},
+ r2: fileTimeRange{min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-02 00:00:00")},
+ },
+ expected: true,
+ },
+ {
+ name: "rangesOverlap - partial overlap",
+ testType: "rangesOverlap",
+ input: struct {
+ r1 fileTimeRange
+ r2 fileTimeRange
+ }{
+ r1: fileTimeRange{min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-02 12:00:00")},
+ r2: fileTimeRange{min: timeString("2024-01-02 00:00:00"), max: timeString("2024-01-03 00:00:00")},
+ },
+ expected: true,
+ },
+ {
+ name: "rangesOverlap - one range completely inside another",
+ testType: "rangesOverlap",
+ input: struct {
+ r1 fileTimeRange
+ r2 fileTimeRange
+ }{
+ r1: fileTimeRange{min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-05 00:00:00")},
+ r2: fileTimeRange{min: timeString("2024-01-02 00:00:00"), max: timeString("2024-01-03 00:00:00")},
+ },
+ expected: true,
+ },
+ {
+ name: "rangesOverlap - ranges with same start time",
+ testType: "rangesOverlap",
+ input: struct {
+ r1 fileTimeRange
+ r2 fileTimeRange
+ }{
+ r1: fileTimeRange{min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-02 00:00:00")},
+ r2: fileTimeRange{min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-03 00:00:00")},
+ },
+ expected: true,
+ },
+ {
+ name: "rangesOverlap - ranges with same end time",
+ testType: "rangesOverlap",
+ input: struct {
+ r1 fileTimeRange
+ r2 fileTimeRange
+ }{
+ r1: fileTimeRange{min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-02 00:00:00")},
+ r2: fileTimeRange{min: timeString("2024-01-01 12:00:00"), max: timeString("2024-01-02 00:00:00")},
+ },
+ expected: true,
+ },
+
+ // Test cases for findOverlappingFileRanges function
+ {
+ name: "findOverlappingFileRanges - no overlaps",
+ testType: "findOverlappingFileRanges",
+ input: []fileTimeRange{
+ {path: "file1", min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-02 00:00:00"), rowCount: 1000},
+ {path: "file2", min: timeString("2024-01-03 00:00:00"), max: timeString("2024-01-04 00:00:00"), rowCount: 2000},
+ {path: "file3", min: timeString("2024-01-05 00:00:00"), max: timeString("2024-01-06 00:00:00"), rowCount: 1500},
+ },
+ expected: []unorderedDataTimeRange{},
+ },
+ {
+ name: "findOverlappingFileRanges - simple overlap",
+ testType: "findOverlappingFileRanges",
+ input: []fileTimeRange{
+ {path: "file1", min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-02 00:00:00"), rowCount: 1000},
+ {path: "file2", min: timeString("2024-01-01 12:00:00"), max: timeString("2024-01-03 00:00:00"), rowCount: 2000},
+ },
+ expected: []unorderedDataTimeRange{
+ {
+ StartTime: timeString("2024-01-01 00:00:00"),
+ EndTime: timeString("2024-01-03 00:00:00"),
+ RowCount: 3000,
+ },
+ },
+ },
+ {
+ name: "findOverlappingFileRanges - cross-overlapping sets",
+ testType: "findOverlappingFileRanges",
+ input: []fileTimeRange{
+ {path: "file1", min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-02 00:00:00"), rowCount: 1000},
+ {path: "file2", min: timeString("2024-01-01 12:00:00"), max: timeString("2024-01-03 00:00:00"), rowCount: 2000},
+ {path: "file3", min: timeString("2024-01-02 12:00:00"), max: timeString("2024-01-04 00:00:00"), rowCount: 1500},
+ {path: "file4", min: timeString("2024-01-03 12:00:00"), max: timeString("2024-01-05 00:00:00"), rowCount: 1800},
+ },
+ expected: []unorderedDataTimeRange{
+ {
+ StartTime: timeString("2024-01-01 00:00:00"),
+ EndTime: timeString("2024-01-05 00:00:00"),
+ RowCount: 6300,
+ },
+ },
+ },
+ {
+ name: "findOverlappingFileRanges - multiple separate groups",
+ testType: "findOverlappingFileRanges",
+ input: []fileTimeRange{
+ {path: "file1", min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-02 00:00:00"), rowCount: 1000},
+ {path: "file2", min: timeString("2024-01-01 12:00:00"), max: timeString("2024-01-03 00:00:00"), rowCount: 2000},
+ {path: "file3", min: timeString("2024-01-05 00:00:00"), max: timeString("2024-01-06 00:00:00"), rowCount: 1500},
+ {path: "file4", min: timeString("2024-01-05 12:00:00"), max: timeString("2024-01-07 00:00:00"), rowCount: 1800},
+ },
+ expected: []unorderedDataTimeRange{
+ {
+ StartTime: timeString("2024-01-01 00:00:00"),
+ EndTime: timeString("2024-01-03 00:00:00"),
+ RowCount: 3000,
+ },
+ {
+ StartTime: timeString("2024-01-05 00:00:00"),
+ EndTime: timeString("2024-01-07 00:00:00"),
+ RowCount: 3300,
+ },
+ },
+ },
+ {
+ name: "findOverlappingFileRanges - single file",
+ testType: "findOverlappingFileRanges",
+ input: []fileTimeRange{
+ {path: "file1", min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-02 00:00:00"), rowCount: 1000},
+ },
+ expected: []unorderedDataTimeRange{},
+ },
+ {
+ name: "findOverlappingFileRanges - empty input",
+ testType: "findOverlappingFileRanges",
+ input: []fileTimeRange{},
+ expected: []unorderedDataTimeRange{},
+ },
+ {
+ name: "findOverlappingFileRanges - three overlapping files",
+ testType: "findOverlappingFileRanges",
+ input: []fileTimeRange{
+ {path: "file1", min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-02 00:00:00"), rowCount: 1000},
+ {path: "file2", min: timeString("2024-01-01 12:00:00"), max: timeString("2024-01-02 12:00:00"), rowCount: 2000},
+ {path: "file3", min: timeString("2024-01-02 00:00:00"), max: timeString("2024-01-03 00:00:00"), rowCount: 1500},
+ },
+ expected: []unorderedDataTimeRange{
+ {
+ StartTime: timeString("2024-01-01 00:00:00"),
+ EndTime: timeString("2024-01-03 00:00:00"),
+ RowCount: 4500,
+ },
+ },
+ },
+ {
+ name: "findOverlappingFileRanges - files with identical time ranges",
+ testType: "findOverlappingFileRanges",
+ input: []fileTimeRange{
+ {path: "file1", min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-02 00:00:00"), rowCount: 1000},
+ {path: "file2", min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-02 00:00:00"), rowCount: 2000},
+ },
+ expected: []unorderedDataTimeRange{
+ {
+ StartTime: timeString("2024-01-01 00:00:00"),
+ EndTime: timeString("2024-01-02 00:00:00"),
+ RowCount: 3000,
+ },
+ },
+ },
+
+ // Test cases for newUnorderedDataTimeRange function
+ {
+ name: "newUnorderedDataTimeRange - single file",
+ testType: "newUnorderedDataTimeRange",
+ input: []fileTimeRange{
+ {path: "file1", min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-02 00:00:00"), rowCount: 1000},
+ },
+ expected: unorderedDataTimeRange{
+ StartTime: timeString("2024-01-01 00:00:00"),
+ EndTime: timeString("2024-01-02 00:00:00"),
+ RowCount: 1000,
+ },
+ },
+ {
+ name: "newUnorderedDataTimeRange - multiple overlapping files",
+ testType: "newUnorderedDataTimeRange",
+ input: []fileTimeRange{
+ {path: "file1", min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-02 00:00:00"), rowCount: 1000},
+ {path: "file2", min: timeString("2024-01-01 12:00:00"), max: timeString("2024-01-03 00:00:00"), rowCount: 2000},
+ {path: "file3", min: timeString("2024-01-02 00:00:00"), max: timeString("2024-01-04 00:00:00"), rowCount: 1500},
+ },
+ expected: unorderedDataTimeRange{
+ StartTime: timeString("2024-01-01 00:00:00"), // earliest start
+ EndTime: timeString("2024-01-04 00:00:00"), // latest end
+ RowCount: 4500, // sum of all row counts
+ },
+ },
+ {
+ name: "newUnorderedDataTimeRange - files with zero row counts",
+ testType: "newUnorderedDataTimeRange",
+ input: []fileTimeRange{
+ {path: "file1", min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-02 00:00:00"), rowCount: 0},
+ {path: "file2", min: timeString("2024-01-01 12:00:00"), max: timeString("2024-01-03 00:00:00"), rowCount: 1000},
+ },
+ expected: unorderedDataTimeRange{
+ StartTime: timeString("2024-01-01 00:00:00"),
+ EndTime: timeString("2024-01-03 00:00:00"),
+ RowCount: 1000,
+ },
+ },
+ {
+ name: "newUnorderedDataTimeRange - files with same start time",
+ testType: "newUnorderedDataTimeRange",
+ input: []fileTimeRange{
+ {path: "file1", min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-02 00:00:00"), rowCount: 1000},
+ {path: "file2", min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-03 00:00:00"), rowCount: 2000},
+ },
+ expected: unorderedDataTimeRange{
+ StartTime: timeString("2024-01-01 00:00:00"),
+ EndTime: timeString("2024-01-03 00:00:00"),
+ RowCount: 3000,
+ },
+ },
+ {
+ name: "newUnorderedDataTimeRange - files with same end time",
+ testType: "newUnorderedDataTimeRange",
+ input: []fileTimeRange{
+ {path: "file1", min: timeString("2024-01-01 00:00:00"), max: timeString("2024-01-02 00:00:00"), rowCount: 1000},
+ {path: "file2", min: timeString("2024-01-01 12:00:00"), max: timeString("2024-01-02 00:00:00"), rowCount: 2000},
+ },
+ expected: unorderedDataTimeRange{
+ StartTime: timeString("2024-01-01 00:00:00"),
+ EndTime: timeString("2024-01-02 00:00:00"),
+ RowCount: 3000,
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ switch tt.testType {
+ case "rangesOverlap":
+ input := tt.input.(struct {
+ r1 fileTimeRange
+ r2 fileTimeRange
+ })
+ result := rangesOverlap(input.r1, input.r2)
+ expected := tt.expected.(bool)
+ if result != expected {
+ t.Errorf("rangesOverlap() = %v, expected %v", result, expected)
+ }
+
+ case "findOverlappingFileRanges":
+ input := tt.input.([]fileTimeRange)
+ expected := tt.expected.([]unorderedDataTimeRange)
+ result, err := pk.findOverlappingFileRanges(input)
+ if err != nil {
+ t.Fatalf("findOverlappingFileRanges() error = %v", err)
+ }
+ if !compareUnorderedRangesets(result, expected) {
+ t.Errorf("findOverlappingFileRanges() = %v, expected %v", result, expected)
+ }
+
+ case "newUnorderedDataTimeRange":
+ input := tt.input.([]fileTimeRange)
+ expected := tt.expected.(unorderedDataTimeRange)
+ result, err := newUnorderedDataTimeRange(input)
+ if err != nil {
+ t.Fatalf("newUnorderedDataTimeRange() error = %v", err)
+ }
+ if !result.StartTime.Equal(expected.StartTime) {
+ t.Errorf("StartTime = %v, expected %v", result.StartTime, expected.StartTime)
+ }
+ if !result.EndTime.Equal(expected.EndTime) {
+ t.Errorf("EndTime = %v, expected %v", result.EndTime, expected.EndTime)
+ }
+ if result.RowCount != expected.RowCount {
+ t.Errorf("RowCount = %v, expected %v", result.RowCount, expected.RowCount)
+ }
+ }
+ })
+ }
+}
+
+// compareUnorderedRangesets compares two slices of unorderedDataTimeRange, ignoring order
+func compareUnorderedRangesets(actual []unorderedDataTimeRange, expected []unorderedDataTimeRange) bool {
+ if len(actual) != len(expected) {
+ return false
+ }
+
+ // Convert to sets for comparison using time range as key
+ actualSets := make(map[string]unorderedDataTimeRange)
+ expectedSets := make(map[string]unorderedDataTimeRange)
+
+ for _, set := range actual {
+ key := set.StartTime.Format("2006-01-02 15:04:05") + "-" + set.EndTime.Format("2006-01-02 15:04:05")
+ actualSets[key] = set
+ }
+
+ for _, set := range expected {
+ key := set.StartTime.Format("2006-01-02 15:04:05") + "-" + set.EndTime.Format("2006-01-02 15:04:05")
+ expectedSets[key] = set
+ }
+
+ // Check if each set in actual has a matching set in expected
+ for key, actualSet := range actualSets {
+ expectedSet, exists := expectedSets[key]
+ if !exists || !unorderedRangesetsEqual(actualSet, expectedSet) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// unorderedRangesetsEqual compares two unorderedDataTimeRange structs
+func unorderedRangesetsEqual(a, b unorderedDataTimeRange) bool {
+ return a.StartTime.Equal(b.StartTime) && a.EndTime.Equal(b.EndTime) && a.RowCount == b.RowCount
+}
diff --git a/internal/database/partition_pattern.go b/internal/database/partition_pattern.go
new file mode 100644
index 00000000..ddaae33d
--- /dev/null
+++ b/internal/database/partition_pattern.go
@@ -0,0 +1,123 @@
+package database
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/danwakefield/fnmatch"
+ "github.com/turbot/tailpipe/internal/config"
+ "golang.org/x/exp/maps"
+)
+
+// PartitionPattern represents a pattern used to match partitions.
+// It consists of a table pattern and a partition pattern, both of which are
+// used to match a given table and partition name.
+type PartitionPattern struct {
+ Table string
+ Partition string
+}
+
+func NewPartitionPattern(partition *config.Partition) PartitionPattern {
+ return PartitionPattern{
+ Table: partition.TableName,
+ Partition: partition.ShortName,
+ }
+}
+
+// PartitionMatchesPatterns checks if the given table and partition match any of the provided patterns.
+func PartitionMatchesPatterns(table, partition string, patterns []*PartitionPattern) bool {
+ if len(patterns) == 0 {
+ return true
+ }
+ // do ANY patterns match
+ gotMatch := false
+ for _, pattern := range patterns {
+ if fnmatch.Match(pattern.Table, table, fnmatch.FNM_CASEFOLD) &&
+ fnmatch.Match(pattern.Partition, partition, fnmatch.FNM_CASEFOLD) {
+ gotMatch = true
+ }
+ }
+ return gotMatch
+}
+
+// GetPartitionsForArg returns the actual partition names that match the given argument.
+// The partitionNames list is needed to determine whether a single-part argument refers to a table or partition.
+func GetPartitionsForArg(partitionMap map[string]*config.Partition, arg string) ([]string, error) {
+ partitionNames := maps.Keys(partitionMap)
+ partitionPattern, err := GetPartitionPatternsForArgs(partitionNames, arg)
+ if err != nil {
+ return nil, err
+ }
+ // now match the partition
+ var res []string
+ for _, partition := range partitionMap {
+ if PartitionMatchesPatterns(partition.TableName, partition.ShortName, partitionPattern) {
+ res = append(res, partition.UnqualifiedName)
+ }
+ }
+ return res, nil
+}
+
+// GetPartitionPatternsForArgs returns the table and partition patterns for the given partition args.
+// The partitions list is needed to determine whether single-part arguments refer to tables or partitions.
+func GetPartitionPatternsForArgs(partitions []string, partitionArgs ...string) ([]*PartitionPattern, error) {
+ var res []*PartitionPattern
+ for _, arg := range partitionArgs {
+ partitionPattern, err := GetPartitionMatchPatternsForArg(partitions, arg)
+ if err != nil {
+ return nil, fmt.Errorf("error processing partition arg '%s': %w", arg, err)
+ }
+
+ res = append(res, partitionPattern)
+ }
+
+ return res, nil
+}
+
+// GetPartitionMatchPatternsForArg parses a single partition argument into a PartitionPattern.
+// The partitions list is needed to determine whether single-part arguments refer to tables or partitions.
+func GetPartitionMatchPatternsForArg(partitions []string, arg string) (*PartitionPattern, error) {
+ var partitionPattern *PartitionPattern
+ parts := strings.Split(arg, ".")
+ switch len(parts) {
+ case 1:
+ var err error
+ partitionPattern, err = getPartitionPatternsForSinglePartName(partitions, arg)
+ if err != nil {
+ return nil, err
+ }
+ case 2:
+ // use the args as provided
+ partitionPattern = &PartitionPattern{Table: parts[0], Partition: parts[1]}
+ default:
+ return nil, fmt.Errorf("invalid partition name: %s", arg)
+ }
+ return partitionPattern, nil
+}
+
+// getPartitionPatternsForSinglePartName determines whether a single-part argument refers to a table or partition.
+// The partitions list is needed to check if the argument matches any existing table names.
+// e.g. if the arg is "aws*" and it matches table "aws_cloudtrail_log", it's treated as a table pattern.
+func getPartitionPatternsForSinglePartName(partitions []string, arg string) (*PartitionPattern, error) {
+ var tablePattern, partitionPattern string
+ // '*' is not valid for a single part arg
+ if arg == "*" {
+ return nil, fmt.Errorf("invalid partition name: %s", arg)
+ }
+ // check whether there is table with this name
+ // partitions is a list of Unqualified names, i.e. .
+ for _, partition := range partitions {
+ table := strings.Split(partition, ".")[0]
+
+ // if the arg matches a table name, set table pattern to the arg and partition pattern to *
+ if fnmatch.Match(arg, table, fnmatch.FNM_CASEFOLD) {
+ tablePattern = arg
+ partitionPattern = "*"
+ return &PartitionPattern{Table: tablePattern, Partition: partitionPattern}, nil
+ }
+ }
+ // so there IS NOT a table with this name - set table pattern to * and user provided partition name
+ tablePattern = "*"
+ partitionPattern = arg
+ return &PartitionPattern{Table: tablePattern, Partition: partitionPattern}, nil
+}
diff --git a/internal/database/partition_pattern_test.go b/internal/database/partition_pattern_test.go
new file mode 100644
index 00000000..81158c1d
--- /dev/null
+++ b/internal/database/partition_pattern_test.go
@@ -0,0 +1,296 @@
+package database
+
+import (
+ "github.com/turbot/pipe-fittings/v2/modconfig"
+ "github.com/turbot/tailpipe/internal/config"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+)
+
+func Test_getPartition(t *testing.T) {
+ type args struct {
+ partitions []string
+ name string
+ }
+ tests := []struct {
+ name string
+ args args
+ want []string
+ wantErr bool
+ }{
+ {
+ name: "Invalid partition name",
+ args: args{
+ partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
+ name: "*",
+ },
+ wantErr: true,
+ },
+ {
+ name: "Full partition name, exists",
+ args: args{
+ partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
+ name: "aws_s3_cloudtrail_log.p1",
+ },
+ want: []string{"aws_s3_cloudtrail_log.p1"},
+ },
+ {
+ name: "Full partition name, does not exist",
+ args: args{
+ partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
+ name: "aws_s3_cloudtrail_log.p3",
+ },
+ want: nil,
+ },
+ {
+ name: "Table name",
+ args: args{
+ partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
+ name: "aws_s3_cloudtrail_log",
+ },
+ want: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
+ },
+ {
+ name: "Table name (exists) with wildcard",
+ args: args{
+ partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
+ name: "aws_s3_cloudtrail_log.*",
+ },
+ want: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
+ },
+ {
+ name: "Table name (exists) with ?",
+ args: args{
+ partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
+ name: "aws_s3_cloudtrail_log.p?",
+ },
+ want: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
+ },
+ {
+ name: "Table name (exists) with non matching partition wildacard",
+ args: args{
+ partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
+ name: "aws_s3_cloudtrail_log.d*?",
+ },
+ want: nil,
+ },
+ {
+ name: "Table name (does not exist)) with wildcard",
+ args: args{
+ partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
+ name: "foo.*",
+ },
+ want: nil,
+ },
+ {
+ name: "Partition short name, exists",
+ args: args{
+ partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2", "aws_elb_access_log.p1", "aws_elb_access_log.p2"},
+ name: "p1",
+ },
+ want: []string{"aws_s3_cloudtrail_log.p1", "aws_elb_access_log.p1"},
+ },
+ {
+ name: "Table wildcard, partition short name, exists",
+ args: args{
+ partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2", "aws_elb_access_log.p1", "aws_elb_access_log.p2"},
+ name: "*.p1",
+ },
+ want: []string{"aws_s3_cloudtrail_log.p1", "aws_elb_access_log.p1"},
+ },
+ {
+ name: "Partition short name, does not exist",
+ args: args{
+ partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2", "aws_elb_access_log.p1", "aws_elb_access_log.p2"},
+ name: "p3",
+ },
+ want: nil,
+ },
+ {
+ name: "Table wildcard, partition short name, does not exist",
+ args: args{
+ partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2", "aws_elb_access_log.p1", "aws_elb_access_log.p2"},
+ name: "*.p3",
+ },
+ want: nil,
+ },
+ {
+ name: "Table wildcard, no dot",
+ args: args{
+ partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2", "aws_elb_access_log.p1", "aws_elb_access_log.p2"},
+ name: "aws*",
+ },
+ want: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2", "aws_elb_access_log.p1", "aws_elb_access_log.p2"},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var partitions = getPartitions(tt.args.partitions)
+
+ got, err := GetPartitionsForArg(partitions, tt.args.name)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("getPartitions() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ // sort the slices before comparing
+ sort.Strings(tt.want)
+ sort.Strings(got)
+
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("getPartitions() got = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func getPartitions(partitions []string) map[string]*config.Partition {
+ var partitionMap = make(map[string]*config.Partition)
+ for _, p := range partitions {
+ parts := strings.SplitN(p, ".", 2)
+ if len(parts) != 2 {
+ continue
+ }
+ partitionMap[p] = &config.Partition{
+ HclResourceImpl: modconfig.HclResourceImpl{
+ UnqualifiedName: p,
+ ShortName: parts[1],
+ },
+ TableName: parts[0],
+ }
+ }
+ return partitionMap
+}
+
+func Test_getPartitionMatchPatternsForArg(t *testing.T) {
+ type args struct {
+ partitions []string
+ arg string
+ }
+ tests := []struct {
+ name string
+ args args
+ wantTablePattern string
+ wantPartPattern string
+ wantErr bool
+ }{
+ {
+ name: "Valid table and partition pattern",
+ args: args{
+ partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
+ arg: "aws_s3_cloudtrail_log.p1",
+ },
+ wantTablePattern: "aws_s3_cloudtrail_log",
+ wantPartPattern: "p1",
+ },
+ {
+ name: "Wildcard partition pattern",
+ args: args{
+ partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2", "aws_elb_access_log.p1"},
+ arg: "aws_s3_cloudtrail_log.*",
+ },
+ wantTablePattern: "aws_s3_cloudtrail_log",
+ wantPartPattern: "*",
+ },
+ {
+ name: "Wildcard in table and partition both",
+ args: args{
+ partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2", "aws_elb_access_log.p1"},
+ arg: "aws*.*",
+ },
+ wantTablePattern: "aws*",
+ wantPartPattern: "*",
+ },
+ {
+ name: "Wildcard table pattern",
+ args: args{
+ partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_elb_access_log.p1"},
+ arg: "*.p1",
+ },
+ wantTablePattern: "*",
+ wantPartPattern: "p1",
+ },
+ {
+ name: "Invalid partition name",
+ args: args{
+ partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
+ arg: "*",
+ },
+ wantErr: true,
+ },
+ {
+ name: "Table exists without partition",
+ args: args{
+ partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"},
+ arg: "aws_s3_cloudtrail_log",
+ },
+ wantTablePattern: "aws_s3_cloudtrail_log",
+ wantPartPattern: "*",
+ },
+ {
+ name: "Partition only, multiple tables",
+ args: args{
+ partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_elb_access_log.p1"},
+ arg: "p1",
+ },
+ wantTablePattern: "*",
+ wantPartPattern: "p1",
+ },
+ {
+ name: "Invalid argument with multiple dots",
+ args: args{
+ partitions: []string{"aws_s3_cloudtrail_log.p1"},
+ arg: "aws.s3.cloudtrail",
+ },
+ wantErr: true,
+ },
+ {
+ name: "Non-existing table name",
+ args: args{
+ partitions: []string{"aws_s3_cloudtrail_log.p1"},
+ arg: "non_existing_table.p1",
+ },
+ wantTablePattern: "non_existing_table",
+ wantPartPattern: "p1",
+ },
+ {
+ name: "Partition name does not exist",
+ args: args{
+ partitions: []string{"aws_s3_cloudtrail_log.p1"},
+ arg: "p2",
+ },
+ wantTablePattern: "*",
+ wantPartPattern: "p2",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ partitionPattern, err := GetPartitionMatchPatternsForArg(tt.args.partitions, tt.args.arg)
+
+ if err != nil {
+ if !tt.wantErr {
+ t.Errorf("GetPartitionMatchPatternsForArg() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ return
+ } else if tt.wantErr {
+ t.Errorf("GetPartitionMatchPatternsForArg() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if err != nil {
+ // must be a wanted err
+ return
+ }
+
+ gotTablePattern := partitionPattern.Table
+ gotPartPattern := partitionPattern.Partition
+ if gotTablePattern != tt.wantTablePattern {
+ t.Errorf("GetPartitionMatchPatternsForArg() gotTablePattern = %v, want %v", gotTablePattern, tt.wantTablePattern)
+ }
+ if gotPartPattern != tt.wantPartPattern {
+ t.Errorf("GetPartitionMatchPatternsForArg() gotPartPattern = %v, want %v", gotPartPattern, tt.wantPartPattern)
+ }
+ })
+ }
+}
diff --git a/internal/database/partitions.go b/internal/database/partitions.go
deleted file mode 100644
index 9d6f25c4..00000000
--- a/internal/database/partitions.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package database
-
-import (
- "context"
- "fmt"
-
- "github.com/turbot/tailpipe/internal/config"
- "github.com/turbot/tailpipe/internal/filepaths"
-)
-
-// ListPartitions uses DuckDB to build a list of all partitions for all tables
-func ListPartitions(ctx context.Context) ([]string, error) {
- // Hive format is table, partition, index, date
-
- dataDir := config.GlobalWorkspaceProfile.GetDataDir()
- if dataDir == "" {
- return nil, fmt.Errorf("data directory is not set")
- }
- // TODO KAI handle no partitions
-
- // Build DuckDB query to get the names of all partitions underneath data dir
- parquetPath := filepaths.GetParquetFileGlobForTable(dataDir, "*", "")
- query := `select distinct tp_table || '.' || tp_partition from read_parquet('` + parquetPath + `', hive_partitioning=true)`
-
- // Open DuckDB in-memory database
- db, err := NewDuckDb()
- if err != nil {
- return nil, fmt.Errorf("failed to open DuckDB: %v", err)
- }
- defer db.Close()
-
- rows, err := db.QueryContext(ctx, query)
- if err != nil {
- return nil, fmt.Errorf("failed to execute query: %v", err)
- }
- defer rows.Close()
-
- var partitions []string
- for rows.Next() {
- var partition string
- if err := rows.Scan(&partition); err != nil {
- return nil, fmt.Errorf("failed to scan row: %v", err)
- }
- partitions = append(partitions, partition)
- }
-
- if err := rows.Err(); err != nil {
- return nil, fmt.Errorf("error iterating rows: %v", err)
- }
-
- return partitions, nil
-}
diff --git a/internal/parquet/convertor_schema.go b/internal/database/read_json_query.go
similarity index 74%
rename from internal/parquet/convertor_schema.go
rename to internal/database/read_json_query.go
index a8851ddb..0ff2e1cc 100644
--- a/internal/parquet/convertor_schema.go
+++ b/internal/database/read_json_query.go
@@ -1,4 +1,4 @@
-package parquet
+package database
import (
"fmt"
@@ -8,29 +8,28 @@ import (
"github.com/turbot/go-kit/helpers"
"github.com/turbot/tailpipe-plugin-sdk/constants"
"github.com/turbot/tailpipe-plugin-sdk/schema"
+ "github.com/turbot/tailpipe/internal/config"
)
-// buildViewQuery builds a format string used to construct the conversion query which reads from the source ndjson file
-/*
-select
- as