diff --git a/.github/workflows/11-test-acceptance.yaml b/.github/workflows/11-test-acceptance.yaml index 12fdd916..5f3919b9 100644 --- a/.github/workflows/11-test-acceptance.yaml +++ b/.github/workflows/11-test-acceptance.yaml @@ -29,7 +29,7 @@ jobs: with: repository: turbot/pipe-fittings path: pipe-fittings - ref: tp-pragma + ref: tp - name: Checkout Tailpipe plugin SDK repository uses: actions/checkout@v4 diff --git a/cmd/collect.go b/cmd/collect.go index 515ce526..73ad1404 100644 --- a/cmd/collect.go +++ b/cmd/collect.go @@ -189,38 +189,47 @@ func getPartitions(args []string) ([]*config.Partition, error) { } func getPartitionsForArg(partitions []string, arg string) ([]string, error) { + tablePattern, partitionPattern, err := getPartitionMatchPatternsForArg(partitions, arg) + if err != nil { + return nil, err + } + // now match the partition + var res []string + for _, partition := range partitions { + pattern := tablePattern + "." + partitionPattern + if fnmatch.Match(pattern, partition, fnmatch.FNM_CASEFOLD) { + res = append(res, partition) + } + } + return res, nil +} + +func getPartitionMatchPatternsForArg(partitions []string, arg string) (string, string, error) { var tablePattern, partitionPattern string parts := strings.Split(arg, ".") switch len(parts) { case 1: var err error - tablePattern, partitionPattern, err = getPartitionMatchPatterns(partitions, arg, parts) + tablePattern, partitionPattern, err = getPartitionMatchPatternsForSinglePartName(partitions, arg) if err != nil { - return nil, err + return "", "", err } case 2: // use the args as provided tablePattern = parts[0] partitionPattern = parts[1] default: - return nil, fmt.Errorf("invalid partition name: %s", arg) - } - - // now match the partition - var res []string - for _, partition := range partitions { - pattern := tablePattern + "." + partitionPattern - if fnmatch.Match(pattern, partition, fnmatch.FNM_CASEFOLD) { - res = append(res, partition) - } + return "", "", fmt.Errorf("invalid partition name: %s", arg) } - return res, nil + return tablePattern, partitionPattern, nil } -func getPartitionMatchPatterns(partitions []string, arg string, parts []string) (string, string, error) { +// getPartitionMatchPatternsForSinglePartName returns the table and partition patterns for a single part name +// e.g. if the arg is "aws*" +func getPartitionMatchPatternsForSinglePartName(partitions []string, arg string) (string, string, error) { var tablePattern, partitionPattern string // '*' is not valid for a single part arg - if parts[0] == "*" { + if arg == "*" { return "", "", fmt.Errorf("invalid partition name: %s", arg) } // check whether there is table with this name @@ -237,7 +246,7 @@ func getPartitionMatchPatterns(partitions []string, arg string, parts []string) } // so there IS NOT a table with this name - set table pattern to * and user provided partition name tablePattern = "*" - partitionPattern = parts[0] + partitionPattern = arg return tablePattern, partitionPattern, nil } diff --git a/cmd/collect_test.go b/cmd/collect_test.go index 014b5232..725c27dc 100644 --- a/cmd/collect_test.go +++ b/cmd/collect_test.go @@ -134,3 +134,122 @@ func Test_getPartition(t *testing.T) { }) } } + +func Test_getPartitionMatchPatternsForArg(t *testing.T) { + type args struct { + partitions []string + arg string + } + tests := []struct { + name string + args args + wantTablePattern string + wantPartPattern string + wantErr bool + }{ + { + name: "Valid table and partition pattern", + args: args{ + partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"}, + arg: "aws_s3_cloudtrail_log.p1", + }, + wantTablePattern: "aws_s3_cloudtrail_log", + wantPartPattern: "p1", + }, + { + name: "Wildcard partition pattern", + args: args{ + partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2", "aws_elb_access_log.p1"}, + arg: "aws_s3_cloudtrail_log.*", + }, + wantTablePattern: "aws_s3_cloudtrail_log", + wantPartPattern: "*", + }, + { + name: "Wildcard in table and partition both", + args: args{ + partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2", "aws_elb_access_log.p1"}, + arg: "aws*.*", + }, + wantTablePattern: "aws*", + wantPartPattern: "*", + }, + { + name: "Wildcard table pattern", + args: args{ + partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_elb_access_log.p1"}, + arg: "*.p1", + }, + wantTablePattern: "*", + wantPartPattern: "p1", + }, + { + name: "Invalid partition name", + args: args{ + partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"}, + arg: "*", + }, + wantErr: true, + }, + { + name: "Table exists without partition", + args: args{ + partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_s3_cloudtrail_log.p2"}, + arg: "aws_s3_cloudtrail_log", + }, + wantTablePattern: "aws_s3_cloudtrail_log", + wantPartPattern: "*", + }, + { + name: "Partition only, multiple tables", + args: args{ + partitions: []string{"aws_s3_cloudtrail_log.p1", "aws_elb_access_log.p1"}, + arg: "p1", + }, + wantTablePattern: "*", + wantPartPattern: "p1", + }, + { + name: "Invalid argument with multiple dots", + args: args{ + partitions: []string{"aws_s3_cloudtrail_log.p1"}, + arg: "aws.s3.cloudtrail", + }, + wantErr: true, + }, + { + name: "Non-existing table name", + args: args{ + partitions: []string{"aws_s3_cloudtrail_log.p1"}, + arg: "non_existing_table.p1", + }, + wantTablePattern: "non_existing_table", + wantPartPattern: "p1", + }, + { + name: "Partition name does not exist", + args: args{ + partitions: []string{"aws_s3_cloudtrail_log.p1"}, + arg: "p2", + }, + wantTablePattern: "*", + wantPartPattern: "p2", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotTablePattern, gotPartPattern, err := getPartitionMatchPatternsForArg(tt.args.partitions, tt.args.arg) + if (err != nil) != tt.wantErr { + t.Errorf("getPartitionMatchPatternsForArg() error = %v, wantErr %v", err, tt.wantErr) + return + } + if gotTablePattern != tt.wantTablePattern { + t.Errorf("getPartitionMatchPatternsForArg() gotTablePattern = %v, want %v", gotTablePattern, tt.wantTablePattern) + } + if gotPartPattern != tt.wantPartPattern { + t.Errorf("getPartitionMatchPatternsForArg() gotPartPattern = %v, want %v", gotPartPattern, tt.wantPartPattern) + } + }) + } +} diff --git a/cmd/connect.go b/cmd/connect.go index 037c5741..f45ba2c8 100644 --- a/cmd/connect.go +++ b/cmd/connect.go @@ -26,6 +26,7 @@ import ( "github.com/turbot/tailpipe/internal/constants" "github.com/turbot/tailpipe/internal/database" "github.com/turbot/tailpipe/internal/filepaths" + "golang.org/x/exp/maps" ) // variable used to assign the output mode flag @@ -50,6 +51,8 @@ func connectCmd() *cobra.Command { cmdconfig.OnCmd(cmd). AddStringFlag(pconstants.ArgFrom, "", "Specify the start time"). AddStringFlag(pconstants.ArgTo, "", "Specify the end time"). + AddStringSliceFlag(pconstants.ArgIndex, nil, "Specify the index to use"). + AddStringSliceFlag(pconstants.ArgPartition, nil, "Specify the partition to use"). AddVarFlag(enumflag.New(&connectOutputMode, pconstants.ArgOutput, constants.ConnectOutputModeIds, enumflag.EnumCaseInsensitive), pconstants.ArgOutput, fmt.Sprintf("Output format; one of: %s", strings.Join(constants.FlagValues(constants.PluginOutputModeIds), ", "))) @@ -167,6 +170,26 @@ func getFilters() ([]string, error) { toTimestamp := t.Format(time.DateTime) result = append(result, fmt.Sprintf("tp_date <= DATE '%s' AND tp_timestamp <= TIMESTAMP '%s'", toDate, toTimestamp)) } + if viper.IsSet(pconstants.ArgPartition) { + // we have loaded tailpipe config by this time + availablePartitions := config.GlobalConfig.Partitions + partitionArgs := viper.GetStringSlice(pconstants.ArgPartition) + // get the SQL filters from the provided partition + sqlFilters, err := getPartitionSqlFilters(partitionArgs, maps.Keys(availablePartitions)) + if err != nil { + return nil, err + } + result = append(result, sqlFilters) + } + if viper.IsSet(pconstants.ArgIndex) { + indexArgs := viper.GetStringSlice(pconstants.ArgIndex) + // get the SQL filters from the provided index + sqlFilters, err := getIndexSqlFilters(indexArgs) + if err != nil { + return nil, err + } + result = append(result, sqlFilters) + } return result, nil } @@ -275,3 +298,122 @@ func cleanupOldDbFiles() error { } return nil } + +func getPartitionSqlFilters(partitionArgs []string, availablePartitions []string) (string, error) { + // Get table and partition patterns using getPartitionPatterns + tablePatterns, partitionPatterns, err := getPartitionPatterns(partitionArgs, availablePartitions) + if err != nil { + return "", fmt.Errorf("error processing partition args: %w", err) + } + + // Handle the case when patterns are empty + if len(tablePatterns) == 0 || len(partitionPatterns) == 0 { + return "", nil + } + + // Replace wildcards from '*' to '%' for SQL compatibility + updatedTables, updatedPartitions := replaceWildcards(tablePatterns, partitionPatterns) + + var conditions []string + + for i := 0; i < len(updatedTables); i++ { + table := updatedTables[i] + partition := updatedPartitions[i] + + var tableCondition, partitionCondition string + + // If there is no wildcard, use '=' instead of LIKE + if table == "%" { + // Skip table condition if full wildcard + tableCondition = "" + } else if strings.Contains(table, "%") { + tableCondition = fmt.Sprintf("tp_table LIKE '%s'", table) + } else { + tableCondition = fmt.Sprintf("tp_table = '%s'", table) + } + + if partition == "%" { + // Skip partition condition if full wildcard + partitionCondition = "" + } else if strings.Contains(partition, "%") { + partitionCondition = fmt.Sprintf("tp_partition LIKE '%s'", partition) + } else { + partitionCondition = fmt.Sprintf("tp_partition = '%s'", partition) + } + + // Remove empty conditions and combine valid ones + if tableCondition != "" && partitionCondition != "" { + conditions = append(conditions, fmt.Sprintf("(%s AND %s)", tableCondition, partitionCondition)) + } else if tableCondition != "" { + conditions = append(conditions, tableCondition) + } else if partitionCondition != "" { + conditions = append(conditions, partitionCondition) + } + } + + // Combine all conditions with OR + sqlFilters := strings.Join(conditions, " OR ") + + return sqlFilters, nil +} + +func getIndexSqlFilters(indexArgs []string) (string, error) { + // Return empty if no indexes provided + if len(indexArgs) == 0 { + return "", nil + } + + // Build SQL filter based on whether wildcards are present + var conditions []string + for _, index := range indexArgs { + if index == "*" { + // Skip index condition if full wildcard + conditions = append(conditions, "") + } else if strings.Contains(index, "*") { + // Replace '*' wildcard with '%' for SQL LIKE compatibility + index = strings.ReplaceAll(index, "*", "%") + conditions = append(conditions, fmt.Sprintf("CAST(tp_index AS VARCHAR) LIKE '%s'", index)) + } else { + // Exact match using '=' + conditions = append(conditions, fmt.Sprintf("tp_index = '%s'", index)) + } + } + + // Combine all conditions with OR + sqlFilter := strings.Join(conditions, " OR ") + + return sqlFilter, nil +} + +// getPartitionPatterns returns the table and partition patterns for the given partition args +func getPartitionPatterns(partitionArgs []string, partitions []string) ([]string, []string, error) { + var tablePatterns []string + var partitionPatterns []string + + for _, arg := range partitionArgs { + tablePattern, partitionPattern, err := getPartitionMatchPatternsForArg(partitions, arg) + if err != nil { + return nil, nil, fmt.Errorf("error processing partition arg '%s': %w", arg, err) + } + + tablePatterns = append(tablePatterns, tablePattern) + partitionPatterns = append(partitionPatterns, partitionPattern) + } + + return tablePatterns, partitionPatterns, nil +} + +func replaceWildcards(tablePatterns []string, partitionPatterns []string) ([]string, []string) { + updatedTables := make([]string, len(tablePatterns)) + updatedPartitions := make([]string, len(partitionPatterns)) + + for i, table := range tablePatterns { + updatedTables[i] = strings.ReplaceAll(table, "*", "%") + } + + for i, partition := range partitionPatterns { + updatedPartitions[i] = strings.ReplaceAll(partition, "*", "%") + } + + return updatedTables, updatedPartitions +} diff --git a/cmd/connect_test.go b/cmd/connect_test.go new file mode 100644 index 00000000..7c1138f0 --- /dev/null +++ b/cmd/connect_test.go @@ -0,0 +1,194 @@ +package cmd + +import ( + "testing" +) + +func Test_getPartitionSqlFilters(t *testing.T) { + tests := []struct { + name string + partitions []string + args []string + wantFilters string + wantErr bool + }{ + { + name: "Basic partition filters with wildcard", + partitions: []string{ + "aws_cloudtrail_log.p1", + "aws_cloudtrail_log.p2", + "github_audit_log.p1", + }, + args: []string{"aws_cloudtrail_log.*", "github_audit_log.p1"}, + wantFilters: "tp_table = 'aws_cloudtrail_log' OR " + + "(tp_table = 'github_audit_log' AND tp_partition = 'p1')", + wantErr: false, + }, + { + name: "Wildcard in table and exact partition", + partitions: []string{ + "aws_cloudtrail_log.p1", + "sys_logs.p2", + }, + args: []string{"aws*.p1", "sys_logs.*"}, + wantFilters: "(tp_table LIKE 'aws%' AND tp_partition = 'p1') OR " + + "tp_table = 'sys_logs'", + wantErr: false, + }, + { + name: "Exact table and partition", + partitions: []string{ + "aws_cloudtrail_log.p1", + }, + args: []string{"aws_cloudtrail_log.p1"}, + wantFilters: "(tp_table = 'aws_cloudtrail_log' AND tp_partition = 'p1')", + wantErr: false, + }, + { + name: "Partition with full wildcard", + partitions: []string{ + "aws_cloudtrail_log.p1", + }, + args: []string{"aws_cloudtrail_log.*"}, + wantFilters: "tp_table = 'aws_cloudtrail_log'", + wantErr: false, + }, + { + name: "Table with full wildcard", + partitions: []string{ + "aws_cloudtrail_log.p1", + }, + args: []string{"*.p1"}, + wantFilters: "tp_partition = 'p1'", + wantErr: false, + }, + { + name: "Both table and partition with full wildcards", + partitions: []string{ + "aws_cloudtrail_log.p1", + }, + args: []string{"*.*"}, + wantFilters: "", + wantErr: false, + }, + { + name: "Empty input", + partitions: []string{"aws_cloudtrail_log.p1"}, + args: []string{}, + wantFilters: "", + wantErr: false, + }, + { + name: "Multiple wildcards in table and partition", + partitions: []string{ + "aws_cloudtrail_log.p1", + "sys_logs.p2", + }, + args: []string{"aws*log.p*"}, + wantFilters: "(tp_table LIKE 'aws%log' AND tp_partition LIKE 'p%')", + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotFilters, err := getPartitionSqlFilters(tt.args, tt.partitions) + if (err != nil) != tt.wantErr { + t.Errorf("getPartitionSqlFilters() name = %s error = %v, wantErr %v", tt.name, err, tt.wantErr) + return + } + if gotFilters != tt.wantFilters { + t.Errorf("getPartitionSqlFilters() name = %s got = %v, want %v", tt.name, gotFilters, tt.wantFilters) + } + }) + } +} + +func Test_getIndexSqlFilters(t *testing.T) { + tests := []struct { + name string + indexArgs []string + wantFilters string + wantErr bool + }{ + { + name: "Multiple indexes with wildcards and exact values", + indexArgs: []string{"1234*", "456789012345", "98*76"}, + wantFilters: "CAST(tp_index AS VARCHAR) LIKE '1234%' OR " + + "tp_index = '456789012345' OR " + + "CAST(tp_index AS VARCHAR) LIKE '98%76'", + wantErr: false, + }, + { + name: "Single index with wildcard", + indexArgs: []string{"12345678*"}, + wantFilters: "CAST(tp_index AS VARCHAR) LIKE '12345678%'", + wantErr: false, + }, + { + name: "No input provided", + indexArgs: []string{}, + wantFilters: "", + wantErr: false, + }, + { + name: "Fully wildcarded index", + indexArgs: []string{"*"}, + wantFilters: "", + wantErr: false, + }, + { + name: "Exact numeric index", + indexArgs: []string{"123456789012"}, + wantFilters: "tp_index = '123456789012'", + wantErr: false, + }, + { + name: "Mixed patterns", + indexArgs: []string{"12*", "3456789", "9*76"}, + wantFilters: "CAST(tp_index AS VARCHAR) LIKE '12%' OR " + + "tp_index = '3456789' OR " + + "CAST(tp_index AS VARCHAR) LIKE '9%76'", + wantErr: false, + }, + { + name: "Multiple exact values", + indexArgs: []string{"123456789012", "987654321098"}, + wantFilters: "tp_index = '123456789012' OR tp_index = '987654321098'", + wantErr: false, + }, + { + name: "Leading and trailing spaces in exact value", + indexArgs: []string{" 123456789012 "}, + wantFilters: "tp_index = ' 123456789012 '", // Spaces preserved + wantErr: false, + }, + { + name: "Combination of wildcards and exact values", + indexArgs: []string{"*456*", "1234", "98*76"}, + wantFilters: "CAST(tp_index AS VARCHAR) LIKE '%456%' OR " + + "tp_index = '1234' OR " + + "CAST(tp_index AS VARCHAR) LIKE '98%76'", + wantErr: false, + }, + { + name: "Empty string as index", + indexArgs: []string{""}, + wantFilters: "tp_index = ''", + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotFilters, err := getIndexSqlFilters(tt.indexArgs) + if (err != nil) != tt.wantErr { + t.Errorf("getIndexSqlFilters() error = %v, wantErr %v", err, tt.wantErr) + return + } + if gotFilters != tt.wantFilters { + t.Errorf("getIndexSqlFilters() got = %v, want %v", gotFilters, tt.wantFilters) + } + }) + } +} diff --git a/cmd/query.go b/cmd/query.go index 36fc1a5a..fcf4d82a 100644 --- a/cmd/query.go +++ b/cmd/query.go @@ -43,6 +43,8 @@ func queryCmd() *cobra.Command { fmt.Sprintf("Output format; one of: %s", strings.Join(constants.FlagValues(constants.QueryOutputModeIds), ", "))). AddStringFlag(pconstants.ArgFrom, "", "Specify the start time"). AddStringFlag(pconstants.ArgTo, "", "Specify the end time"). + AddStringSliceFlag(pconstants.ArgIndex, nil, "Specify the index to use"). + AddStringSliceFlag(pconstants.ArgPartition, nil, "Specify the partition to use"). AddBoolFlag(pconstants.ArgHeader, true, "Include column headers csv and table output"). AddStringFlag(pconstants.ArgSeparator, ",", "Separator string for csv output") diff --git a/go.mod b/go.mod index be4c739f..b1808035 100644 --- a/go.mod +++ b/go.mod @@ -30,7 +30,6 @@ require ( github.com/alecthomas/chroma v0.10.0 github.com/briandowns/spinner v1.23.0 github.com/c-bata/go-prompt v0.0.0-00010101000000-000000000000 - github.com/charmbracelet/bubbles v0.20.0 github.com/charmbracelet/bubbletea v1.2.4 github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964 github.com/dustin/go-humanize v1.0.1 @@ -90,7 +89,6 @@ require ( github.com/bmatcuk/doublestar v1.3.4 // indirect github.com/btubbs/datetime v0.1.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/charmbracelet/harmonica v0.2.0 // indirect github.com/charmbracelet/lipgloss v1.0.0 // indirect github.com/charmbracelet/x/ansi v0.4.5 // indirect github.com/charmbracelet/x/term v0.2.1 // indirect diff --git a/go.sum b/go.sum index 37d03719..d81890d0 100644 --- a/go.sum +++ b/go.sum @@ -267,12 +267,8 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/charmbracelet/bubbles v0.20.0 h1:jSZu6qD8cRQ6k9OMfR1WlM+ruM8fkPWkHvQWD9LIutE= -github.com/charmbracelet/bubbles v0.20.0/go.mod h1:39slydyswPy+uVOHZ5x/GjwVAFkCsV8IIVy+4MhzwwU= github.com/charmbracelet/bubbletea v1.2.4 h1:KN8aCViA0eps9SCOThb2/XPIlea3ANJLUkv3KnQRNCE= github.com/charmbracelet/bubbletea v1.2.4/go.mod h1:Qr6fVQw+wX7JkWWkVyXYk/ZUQ92a6XNekLXa3rR18MM= -github.com/charmbracelet/harmonica v0.2.0 h1:8NxJWRWg/bzKqqEaaeFNipOu77YR5t8aSwG4pgaUBiQ= -github.com/charmbracelet/harmonica v0.2.0/go.mod h1:KSri/1RMQOZLbw7AHqgcBycp8pgJnQMYYT8QZRqZ1Ao= github.com/charmbracelet/lipgloss v1.0.0 h1:O7VkGDvqEdGi93X+DeqsQ7PKHDgtQfF8j8/O2qFMQNg= github.com/charmbracelet/lipgloss v1.0.0/go.mod h1:U5fy9Z+C38obMs+T+tJqst9VGzlOYGj4ri9reL3qUlo= github.com/charmbracelet/x/ansi v0.4.5 h1:LqK4vwBNaXw2AyGIICa5/29Sbdq58GbGdFngSexTdRM= diff --git a/internal/collector/tui.go b/internal/collector/tui.go index 6482896c..35e8c6b1 100644 --- a/internal/collector/tui.go +++ b/internal/collector/tui.go @@ -5,7 +5,7 @@ import ( "strings" "time" - "github.com/charmbracelet/bubbletea" + tea "github.com/charmbracelet/bubbletea" "github.com/dustin/go-humanize" "github.com/turbot/pipe-fittings/utils" diff --git a/internal/parquet/delete.go b/internal/parquet/delete.go index ed88703e..af136855 100644 --- a/internal/parquet/delete.go +++ b/internal/parquet/delete.go @@ -3,12 +3,13 @@ package parquet import ( "database/sql" "fmt" - "github.com/turbot/tailpipe/internal/config" - "github.com/turbot/tailpipe/internal/filepaths" "log/slog" "os" "strings" "time" + + "github.com/turbot/tailpipe/internal/filepaths" + "github.com/turbot/tailpipe/internal/config" ) func DeleteParquetFiles(partition *config.Partition, from time.Time) (int, error) { @@ -45,7 +46,7 @@ func DeleteParquetFiles(partition *config.Partition, from time.Time) (int, error func deletePartitionFrom(db *sql.DB, dataDir string, partition *config.Partition, from time.Time) (_ int, err error) { parquetGlobPath := filepaths.GetParquetFileGlobForPartition(dataDir, partition.TableName, partition.ShortName, "") - // TODO verify for SQL injection - c an we use params + //nolint:gosec // TODO verify for SQL injection - c an we use params query := fmt.Sprintf(` SELECT DISTINCT '%s/tp_table=' || tp_table || '/tp_partition=' || tp_partition || '/tp_index=' || tp_index || '/tp_date=' || tp_date AS hive_path, diff --git a/tests/acceptance/test_files/from_and_to.bats b/tests/acceptance/test_files/from_and_to.bats index 0b89a988..6eeb4dc2 100644 --- a/tests/acceptance/test_files/from_and_to.bats +++ b/tests/acceptance/test_files/from_and_to.bats @@ -11,7 +11,7 @@ partition "chaos_date_time" "date_time_inc" { EOF # tailpipe collect - tailpipe collect chaos_date_time.date_time_inc + tailpipe collect chaos_date_time.date_time_inc --from="2006-01-02" # run tailpipe query with --from and verify the timestamps run tailpipe query "select tp_timestamp from chaos_date_time order by tp_timestamp asc" --output csv --from="2007-01-25" @@ -37,7 +37,7 @@ partition "chaos_date_time" "date_time_inc" { EOF # tailpipe collect - tailpipe collect chaos_date_time.date_time_inc + tailpipe collect chaos_date_time.date_time_inc --from="2006-01-02" # run tailpipe query with --from and verify the timestamps run tailpipe query "select tp_timestamp from chaos_date_time order by tp_timestamp asc" --output csv --from="2007-01-25T15:04:05" @@ -63,7 +63,7 @@ partition "chaos_date_time" "date_time_inc" { EOF # tailpipe collect - tailpipe collect chaos_date_time.date_time_inc + tailpipe collect chaos_date_time.date_time_inc --from="2006-01-02" # run tailpipe query with --from and verify the timestamps run tailpipe query "select tp_timestamp from chaos_date_time order by tp_timestamp asc" --output csv --from="2007-01-25T15:04:05.000" @@ -89,7 +89,7 @@ partition "chaos_date_time" "date_time_inc" { EOF # tailpipe collect - tailpipe collect chaos_date_time.date_time_inc + tailpipe collect chaos_date_time.date_time_inc --from="2006-01-02" # run tailpipe query with --from and verify the timestamps run tailpipe query "select tp_timestamp from chaos_date_time order by tp_timestamp asc" --output csv --from="2007-01-25T15:04:05Z" @@ -116,7 +116,7 @@ partition "chaos_date_time" "date_time_inc" { EOF # tailpipe collect - tailpipe collect chaos_date_time.date_time_inc + tailpipe collect chaos_date_time.date_time_inc --from="2006-01-02" # run tailpipe query with --from and verify the timestamps run tailpipe query "select tp_timestamp from chaos_date_time order by tp_timestamp asc" --output csv --from="T-18Y"