diff --git a/.cargo/audit.toml b/.cargo/audit.toml new file mode 100644 index 00000000..6ca240cb --- /dev/null +++ b/.cargo/audit.toml @@ -0,0 +1,4 @@ +[advisories] +ignore = [ + "RUSTSEC-2024-0437" # in protobuf via prometheus, but we're not using proto so it shouldn't be an issue +] diff --git a/.envrc b/.envrc index a8ff4b71..7a32a50f 100644 --- a/.envrc +++ b/.envrc @@ -1,3 +1,10 @@ # this line sources your `.envrc.local` file source_env_if_exists .envrc.local + +# Install nix-direnv which provides significantly faster Nix integration +if ! has nix_direnv_version || ! nix_direnv_version 3.0.5; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.5/direnvrc" "sha256-RuwIS+QKFj/T9M2TFXScjBsLR6V3A17YVoEW/Q6AZ1w=" +fi + +# Apply the devShell configured in flake.nix use flake diff --git a/.github/ISSUE_TEMPLATE/native-query.md b/.github/ISSUE_TEMPLATE/native-query.md new file mode 100644 index 00000000..2a425eb5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/native-query.md @@ -0,0 +1,47 @@ +--- +name: Native Query Support +about: Report problems generating native query configurations using the CLI +title: "[Native Query]" +labels: native query +--- + + + +### Connector version + + + +### What form of error did you see? + + + +- [ ] Type inference is not currently implemented for stage / query predicate operator / aggregation operator +- [ ] Cannot infer types for this pipeline +- [ ] Type mismatch +- [ ] Could not read aggregation pipeline +- [ ] other error +- [ ] I have feedback that does not relate to a specific error + +### Error or feedback details + + + +### What did you want to happen? + + + +### Command and pipeline + + + +### Schema + + + + + + + +### Other details + + diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index f5e939aa..22624963 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -7,42 +7,12 @@ on: - 'v*' jobs: - binary: - name: deploy::binary - runs-on: ubuntu-latest - steps: - - name: Checkout 🛎️ - uses: actions/checkout@v3 - - - name: Install Nix ❄ - uses: DeterminateSystems/nix-installer-action@v4 - - - name: Link Cachix 🔌 - uses: cachix/cachix-action@v12 - with: - name: '${{ vars.CACHIX_CACHE_NAME }}' - authToken: '${{ secrets.CACHIX_CACHE_AUTH_TOKEN }}' - - - name: Login to GitHub Container Registry 📦 - uses: docker/login-action@v1 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: build the crate using nix 🔨 - run: nix build --print-build-logs - - - name: Create release 🚀 - uses: actions/upload-artifact@v3 - with: - name: mongodb-connector - path: result/bin/mongodb-connector - docker: name: deploy::docker - needs: binary - runs-on: ubuntu-latest + + # This job doesn't work as written on ubuntu-24.04. The problem is described + # in this issue: https://github.com/actions/runner-images/issues/10443 + runs-on: ubuntu-22.04 steps: - name: Checkout 🛎️ uses: actions/checkout@v3 @@ -70,7 +40,7 @@ jobs: # For now, only run on tagged releases because main builds generate a Docker image tag name that # is not easily accessible here if: ${{ startsWith(github.ref, 'refs/tags/v') }} - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v4 @@ -88,16 +58,59 @@ jobs: path: ./connector-definition/dist/connector-definition.tgz compression-level: 0 # Already compressed + # Builds with nix for simplicity + build-connector-binaries: + name: build the connector binaries + strategy: + matrix: + include: + - target: x86_64-linux + - target: aarch64-linux + runs-on: ubuntu-24.04 + steps: + - name: Checkout 🛎️ + uses: actions/checkout@v3 + + - name: Install Nix ❄ + uses: DeterminateSystems/nix-installer-action@v4 + + - name: Link Cachix 🔌 + uses: cachix/cachix-action@v12 + with: + name: '${{ vars.CACHIX_CACHE_NAME }}' + authToken: '${{ secrets.CACHIX_CACHE_AUTH_TOKEN }}' + + - name: Login to GitHub Container Registry 📦 + uses: docker/login-action@v1 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build statically-linked binary 🔨 + run: | + nix build --print-build-logs .#mongodb-connector-${{ matrix.target }} + mkdir -p release + cp result/bin/mongodb-connector release/mongodb-connector-${{ matrix.target }} + + - name: Upload binaries to workflow artifacts 🚀 + uses: actions/upload-artifact@v4 + with: + name: mongodb-connector-${{ matrix.target }} + path: release + if-no-files-found: error + + # Builds without nix to get Windows binaries build-cli-binaries: name: build the CLI binaries strategy: matrix: include: - - runner: ubuntu-latest + - runner: ubuntu-24.04 target: x86_64-unknown-linux-musl rustflags: -C target-feature=+crt-static linux-packages: musl-tools - - runner: ubuntu-latest + - runner: ubuntu-24.04 target: aarch64-unknown-linux-musl rustflags: -C target-feature=+crt-static linux-packages: gcc-aarch64-linux-gnu musl-tools @@ -184,8 +197,9 @@ jobs: needs: - docker - connector-definition + - build-connector-binaries - build-cli-binaries - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 if: ${{ startsWith(github.ref, 'refs/tags/v') }} steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 08be8b15..3583317e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -10,7 +10,7 @@ on: jobs: tests: name: Tests - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - name: Checkout 🛎️ uses: actions/checkout@v3 @@ -30,8 +30,24 @@ jobs: - name: run linter checks with clippy 🔨 run: nix build .#checks.x86_64-linux.lint --print-build-logs - - name: audit for reported security problems 🔨 - run: nix build .#checks.x86_64-linux.audit --print-build-logs - - name: run integration tests 📋 run: nix develop --command just test-mongodb-versions + + audit: + name: Security Audit + runs-on: ubuntu-24.04 + steps: + - name: Checkout 🛎️ + uses: actions/checkout@v3 + + - name: Install Nix ❄ + uses: DeterminateSystems/nix-installer-action@v4 + + - name: Link Cachix 🔌 + uses: cachix/cachix-action@v12 + with: + name: '${{ vars.CACHIX_CACHE_NAME }}' + authToken: '${{ secrets.CACHIX_CACHE_AUTH_TOKEN }}' + + - name: audit for reported security problems 🔨 + run: nix develop --command cargo audit diff --git a/.gitignore b/.gitignore index 9bbaa564..bd97b4fb 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,9 @@ debug/ target/ +.cargo/* +!.cargo/audit.toml + # These are backup files generated by rustfmt **/*.rs.bk diff --git a/CHANGELOG.md b/CHANGELOG.md index 4dde5ad7..3cca308d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,481 @@ This changelog documents the changes between release versions. ## [Unreleased] +### Added + +- You can now group documents for aggregation according to multiple grouping criteria ([#144](https://github.com/hasura/ndc-mongodb/pull/144), [#145](https://github.com/hasura/ndc-mongodb/pull/145)) + +### Changed + +- **BREAKING:** Update to ndc-spec v0.2 ([#139](https://github.com/hasura/ndc-mongodb/pull/139)) +- **BREAKING:** Remove custom count aggregation - use standard count instead ([#144](https://github.com/hasura/ndc-mongodb/pull/144)) +- Results for `avg` and `sum` aggregations are coerced to consistent result types ([#144](https://github.com/hasura/ndc-mongodb/pull/144)) + +#### ndc-spec v0.2 + +This database connector communicates with the GraphQL Engine using an IR +described by [ndc-spec](https://hasura.github.io/ndc-spec/). Version 0.2 makes +a number of improvements to the spec, and enables features that were previously +not possible. Highlights of those new features include: + +- relationships can use a nested object field on the target side as a join key +- grouping result documents, and aggregating on groups of documents +- queries on fields of nested collections (document fields that are arrays of objects) +- filtering on scalar values inside array document fields - previously it was possible to filter on fields of objects inside arrays, but not on scalars + +For more details on what has changed in the spec see [the +changelog](https://hasura.github.io/ndc-spec/specification/changelog.html#020). + +Use of the new spec requires a version of GraphQL Engine that supports ndc-spec +v0.2, and there are required metadata changes. + +#### Removed custom count aggregation + +Previously there were two options for getting document counts named `count` and +`_count`. These did the same thing. `count` has been removed - use `_count` +instead. + +#### Results for `avg` and `sum` aggregations are coerced to consistent result types + +This change is required for compliance with ndc-spec. + +Results for `avg` are always coerced to `double`. + +Results for `sum` are coerced to `double` if the summed inputs use a fractional +numeric type, or to `long` if inputs use an integral numeric type. + +### Changed + +## [1.8.1] - 2025-06-04 + +### Fixed + +- Include TLS root certificates in docker images to fix connections to otel collectors ([#167](https://github.com/hasura/ndc-mongodb/pull/167)) + +#### Root certificates + +Connections to MongoDB use the Rust MongoDB driver, which uses rust-tls, which bundles its own root certificate store. +So there was no problem connecting to MongoDB over TLS. But the connector's OpenTelemetry library uses openssl instead +of rust-tls, and openssl requires a separate certificate store to be installed. So this release fixes connections to +OpenTelemetry collectors over https. + +## [1.8.0] - 2025-04-25 + +### Added + +- Add option to skip rows on response type mismatch ([#162](https://github.com/hasura/ndc-mongodb/pull/162)) + +#### Option to skip rows on response type mismatch + +When sending response data for a query if we encounter a value that does not match the type declared in the connector +schema the default behavior is to respond with an error. That prevents the user from getting any data. This change adds +an option to silently skip rows that contain type mismatches so that the user can get a partial set of result data. + +This can come up if, for example, you have database documents with a field that nearly always contains an `int` value, +but in a handful of cases that field contains a `string`. Introspection may determine that the type of the field is +`int` if the random document sampling does not happen to check one of the documents with a `string`. Then when you run +a query that _does_ read one of those documents the query fails because the connector refuses to return a value of an +unexpected type. + +The new option, `onResponseTypeMismatch`, has two possible values: `fail` (the existing, default behavior), or `skipRow` +(the new, opt-in behavior). If you set the option to `skipRow` in the example case above the connector will silently +exclude documents with unexpected `string` values in the response. This allows you to get access to the "good" data. +This is opt-in because we don't want to exclude data if users are not aware that might be happening. + +The option is set in connector configuration in `configuration.json`. Here is an example configuration: + +```json +{ + "introspectionOptions": { + "sampleSize": 1000, + "noValidatorSchema": false, + "allSchemaNullable": false + }, + "serializationOptions": { + "extendedJsonMode": "relaxed", + "onResponseTypeMismatch": "skipRow" + } +} +``` + +The `skipRow` behavior does not affect aggregations, or queries that do not request the field with the unexpected type. + +## [1.7.2] - 2025-04-16 + +### Fixed + +- Database introspection no longer fails if any individual collection cannot be sampled ([#160](https://github.com/hasura/ndc-mongodb/pull/160)) + +## [1.7.1] - 2025-03-12 + +### Added + +- Add watch command while initializing metadata ([#157](https://github.com/hasura/ndc-mongodb/pull/157)) + +## [1.7.0] - 2025-03-10 + +### Added + +- Add uuid scalar type ([#148](https://github.com/hasura/ndc-mongodb/pull/148)) + +### Changed + +- On database introspection newly-added collection fields will be added to existing schema configurations ([#152](https://github.com/hasura/ndc-mongodb/pull/152)) + +### Fixed + +- Update dependencies to get fixes for reported security vulnerabilities ([#149](https://github.com/hasura/ndc-mongodb/pull/149)) + +#### Changes to database introspection + +Previously running introspection would not update existing schema definitions, it would only add definitions for +newly-added collections. This release changes that behavior to make conservative changes to existing definitions: + +- added fields, either top-level or nested, will be added to existing schema definitions +- types for fields that are already configured will **not** be changed automatically +- fields that appear to have been added to collections will **not** be removed from configurations + +We take such a conservative approach to schema configuration changes because we want to avoid accidental breaking API +changes, and because schema configuration can be edited by hand, and we don't want to accidentally reverse such +modifications. + +If you want to make type changes to fields that are already configured, or if you want to remove fields from schema +configuration you can either make those edits to schema configurations by hand, or you can delete schema files before +running introspection. + +#### UUID scalar type + +Previously UUID values would show up in GraphQL as `BinData`. BinData is a generalized BSON type for binary data. It +doesn't provide a great interface for working with UUIDs because binary data must be given as a JSON object with binary +data in base64-encoding (while UUIDs are usually given in a specific hex-encoded string format), and there is also +a mandatory "subtype" field. For example a BinData value representing a UUID fetched via GraphQL looks like this: + +```json +{ "base64": "QKaT0MAKQl2vXFNeN/3+nA==", "subType":"04" } +``` + +With this change UUID fields can use the new `uuid` type instead of `binData`. Values of type `uuid` are represented in +JSON as strings. The same value in a field with type `uuid` looks like this: + +```json +"40a693d0-c00a-425d-af5c-535e37fdfe9c" +``` + +This means that you can now, for example, filter using string representations for UUIDs: + +```gql +query { + posts(where: {id: {_eq: "40a693d0-c00a-425d-af5c-535e37fdfe9c"}}) { + title + } +} +``` + +Introspection has been updated so that database fields containing UUIDs will use the `uuid` type when setting up new +collections, or when re-introspecting after deleting the existing schema configuration. For migrating you may delete and +re-introspect, or edit schema files to change occurrences of `binData` to `uuid`. + +#### Security Fixes + +Rust dependencies have been updated to get fixes for these advisories: + +- +- + +## [1.6.0] - 2025-01-17 + +### Added + +- You can now aggregate values in nested object fields ([#136](https://github.com/hasura/ndc-mongodb/pull/136)) + +### Changed + +- Result types for aggregation operations other than count are now nullable ([#136](https://github.com/hasura/ndc-mongodb/pull/136)) + +### Fixed + +- Upgrade dependencies to get fix for RUSTSEC-2024-0421, a vulnerability in domain name comparisons ([#138](https://github.com/hasura/ndc-mongodb/pull/138)) +- Aggregations on empty document sets now produce `null` instead of failing with an error ([#136](https://github.com/hasura/ndc-mongodb/pull/136)) +- Handle collection validators with object fields that do not list properties ([#140](https://github.com/hasura/ndc-mongodb/pull/140)) + +#### Fix for RUSTSEC-2024-0421 / CVE-2024-12224 + +Updates dependencies to upgrade the library, idna, to get a version that is not +affected by a vulnerability reported in [RUSTSEC-2024-0421][]. + +[RUSTSEC-2024-0421]: https://rustsec.org/advisories/RUSTSEC-2024-0421 + +The vulnerability allows an attacker to craft a domain name that older versions +of idna interpret as identical to a legitimate domain name, but that is in fact +a different name. We do not expect that this impacts the MongoDB connector since +it uses the affected library exclusively to connect to MongoDB databases, and +database URLs are supplied by trusted administrators. But better to be safe than +sorry. + +#### Validators with object fields that do not list properties + +If a collection validator species an property of type `object`, but does not specify a list of nested properties for that object then we will infer the `ExtendedJSON` type for that property. For a collection created with this set of options would have the type `ExtendedJSON` for its `reactions` field: + +```json +{ + "validator": { + "$jsonSchema": { + "bsonType": "object", + "properties": { + "reactions": { "bsonType": "object" }, + } + } + } +} +``` + +If the validator specifies a map of nested properties, but that map is empty, then we interpret that as an empty object type. + +## [1.5.0] - 2024-12-05 + +### Added + +- Adds CLI command to manage native queries with automatic type inference ([#131](https://github.com/hasura/ndc-mongodb/pull/131)) + +### Changed + +- Updates MongoDB Rust driver from v2.8 to v3.1.0 ([#124](https://github.com/hasura/ndc-mongodb/pull/124)) + +### Fixed + +- The connector previously used Cloudflare's DNS resolver. Now it uses the locally-configured DNS resolver. ([#125](https://github.com/hasura/ndc-mongodb/pull/125)) +- Fixed connector not picking up configuration changes when running locally using the ddn CLI workflow. ([#133](https://github.com/hasura/ndc-mongodb/pull/133)) + +#### Managing native queries with the CLI + +New in this release is a CLI plugin command to create, list, inspect, and delete +native queries. A big advantage of using the command versus writing native query +configurations by hand is that the command will type-check your query's +aggregation pipeline, and will write type declarations automatically. + +This is a BETA feature - it is a work in progress, and will not work for all +cases. It is safe to experiment with since it is limited to managing native +query configuration files, and does not lock you into anything. + +You can run the new command like this: + +```sh +ddn connector plugin --connector app/connector/my_connector/connector.yaml -- native-query +``` + +To create a native query create a file with a `.json` extension that contains +the aggregation pipeline for you query. For example this pipeline in +`title_word_frequency.json` outputs frequency counts for words appearing in +movie titles in a given year: + +```json +[ + { + "$match": { + "year": "{{ year }}" + } + }, + { + "$replaceWith": { + "title_words": { "$split": ["$title", " "] } + } + }, + { "$unwind": { "path": "$title_words" } }, + { + "$group": { + "_id": "$title_words", + "count": { "$count": {} } + } + } +] +``` + +In your supergraph directory run a command like this using the path to the pipeline file as an argument, + +```sh +ddn connector plugin --connector app/connector/my_connector/connector.yaml -- native-query create title_word_frequency.json --collection movies +``` + +You should see output like this: + +``` +Wrote native query configuration to your-project/connector/native_queries/title_word_frequency.json + +input collection: movies +representation: collection + +## parameters + +year: int! + +## result type + +{ + _id: string!, + count: int! +} +``` + +For more details see the +[documentation page](https://hasura.io/docs/3.0/connectors/mongodb/native-operations/native-queries/#manage-native-queries-with-the-ddn-cli). + +## [1.4.0] - 2024-11-14 + +### Added + +- Adds `_in` and `_nin` operators ([#122](https://github.com/hasura/ndc-mongodb/pull/122)) + +### Changed + +- **BREAKING:** If `configuration.json` cannot be parsed the connector will fail to start. This change also prohibits unknown keys in that file. These changes will help to prevent typos configuration being silently ignored. ([#115](https://github.com/hasura/ndc-mongodb/pull/115)) + +### Fixed + +- Fixes for filtering by complex predicate that references variables, or field names that require escaping ([#111](https://github.com/hasura/ndc-mongodb/pull/111)) +- Escape names if necessary instead of failing when joining relationship on field names with special characters ([#113](https://github.com/hasura/ndc-mongodb/pull/113)) + +#### `_in` and `_nin` + +These operators compare document values for equality against a given set of +options. `_in` matches documents where one of the given values matches, `_nin` matches +documents where none of the given values matches. For example this query selects +movies that are rated either "G" or "TV-G": + +```graphql +query { + movies( + where: { rated: { _in: ["G", "TV-G"] } } + order_by: { id: Asc } + limit: 5 + ) { + title + rated + } +} +``` + +## [1.3.0] - 2024-10-01 + +### Fixed + +- Selecting nested fields with names that begin with a dollar sign ([#108](https://github.com/hasura/ndc-mongodb/pull/108)) +- Sorting by fields with names that begin with a dollar sign ([#109](https://github.com/hasura/ndc-mongodb/pull/109)) + +### Changed + +## [1.2.0] - 2024-09-12 + +### Added + +- Extended JSON fields now support all comparison and aggregation functions ([#99](https://github.com/hasura/ndc-mongodb/pull/99)) +- Update to ndc-spec v0.1.6 which allows filtering by object values in array fields ([#101](https://github.com/hasura/ndc-mongodb/pull/101)) + +#### Filtering by values in arrays + +In this update you can filter by making comparisons to object values inside +arrays. For example consider a MongoDB database with these three documents: + +```json +{ "institution": "Black Mesa", "staff": [{ "name": "Freeman" }, { "name": "Calhoun" }] } +{ "institution": "Aperture Science", "staff": [{ "name": "GLaDOS" }, { "name": "Chell" }] } +{ "institution": "City 17", "staff": [{ "name": "Alyx" }, { "name": "Freeman" }, { "name": "Breen" }] } +``` + +You can now write a GraphQL query with a `where` clause that checks individual +entries in the `staff` arrays: + +```graphql +query { + institutions(where: { staff: { name: { _eq: "Freeman" } } }) { + institution + } +} +``` + +Which produces the result: + +```json +{ "data": { "institutions": [ + { "institution": "Black Mesa" }, + { "institution": "City 17" } +] } } +``` + +The filter selects documents where **any** element in the array passes the +condition. If you want to select only documents where _every_ array element +passes then negate the comparison on array element values, and also negate the +entire predicate like this: + +```graphql +query EveryElementMustMatch { + institutions( + where: { _not: { staff: { name: { _neq: "Freeman" } } } } + ) { + institution + } +} +``` + +**Note:** It is currently only possible to filter on arrays that contain +objects. Filtering on arrays that contain scalar values or nested arrays will +come later. + +To configure DDN metadata to filter on array fields configure the +`BooleanExpressionType` for the containing document object type to use an +**object** boolean expression type for comparisons on the array field. The +GraphQL Engine will transparently distribute object comparisons over array +elements. For example the above example is configured with this boolean +expression type for documents: + +```yaml +--- +kind: BooleanExpressionType +version: v1 +definition: + name: InstitutionComparisonExp + operand: + object: + type: Institution + comparableFields: + - fieldName: id + booleanExpressionType: ObjectIdComparisonExp + - fieldName: institution + booleanExpressionType: StringComparisonExp + - fieldName: staff + booleanExpressionType: InstitutionStaffComparisonExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: InstitutionComparisonExp +``` + +`InstitutionStaffComparisonExp` is the boolean expression type for objects +inside the `staff` array. It looks like this: + +```yaml +--- +kind: BooleanExpressionType +version: v1 +definition: + name: InstitutionStaffComparisonExp + operand: + object: + type: InstitutionStaff + comparableFields: + - fieldName: name + booleanExpressionType: StringComparisonExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: InstitutionStaffComparisonExp +``` + ## [1.1.0] - 2024-08-16 - Accept predicate arguments in native mutations and native queries ([#92](https://github.com/hasura/ndc-mongodb/pull/92)) diff --git a/Cargo.lock b/Cargo.lock index 3f1ef987..bbf2d61b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "addr2line" @@ -109,6 +109,12 @@ version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +[[package]] +name = "arrayvec" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" + [[package]] name = "assert_json" version = "0.1.0" @@ -120,6 +126,21 @@ dependencies = [ "thiserror", ] +[[package]] +name = "async-compression" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddb939d66e4ae03cee6091612804ba446b12878410cfa17f785f4dd67d4014e8" +dependencies = [ + "flate2", + "futures-core", + "memchr", + "pin-project-lite", + "tokio", + "zstd", + "zstd-safe", +] + [[package]] name = "async-stream" version = "0.3.5" @@ -142,6 +163,15 @@ dependencies = [ "syn 2.0.66", ] +[[package]] +name = "async-tempfile" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acb90d9834a8015109afc79f1f548223a0614edcbab62fb35b62d4b707e975e7" +dependencies = [ + "tokio", +] + [[package]] name = "async-trait" version = "0.1.80" @@ -352,9 +382,14 @@ checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "cc" -version = "1.0.99" +version = "1.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96c51067fd44124faa7f870b4b1c969379ad32b2ba805aa959430ceaa384f695" +checksum = "04da6a0d40b948dfc4fa8f5bbf402b0fc1a64a28dbf7d12ffd683550f2c1b63a" +dependencies = [ + "jobserver", + "libc", + "shlex", +] [[package]] name = "cfg-if" @@ -394,7 +429,7 @@ dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim 0.11.1", + "strsim", ] [[package]] @@ -403,7 +438,7 @@ version = "4.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c780290ccf4fb26629baa7a1081e68ced113f1d3ec302fa5948f1c381ebf06c6" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", "syn 2.0.66", @@ -439,21 +474,25 @@ checksum = "97af0562545a7d7f3d9222fcf909963bec36dcb502afaacab98c6ffac8da47ce" [[package]] name = "configuration" -version = "1.1.0" +version = "1.8.1" dependencies = [ "anyhow", + "async-tempfile", "futures", - "itertools", + "googletest 0.12.0", + "itertools 0.14.0", "mongodb", "mongodb-support", "ndc-models", "ndc-query-plan", + "ref-cast", "schemars", "serde", "serde_json", "serde_yaml", "tokio", "tokio-stream", + "tracing", ] [[package]] @@ -510,9 +549,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.13" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" dependencies = [ "crossbeam-utils", ] @@ -533,38 +572,14 @@ dependencies = [ "typenum", ] -[[package]] -name = "darling" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" -dependencies = [ - "darling_core 0.13.4", - "darling_macro 0.13.4", -] - [[package]] name = "darling" version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83b2eb4d90d12bdda5ed17de686c2acb4c57914f8f921b8da7e112b5a36f3fe1" dependencies = [ - "darling_core 0.20.9", - "darling_macro 0.20.9", -] - -[[package]] -name = "darling_core" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.10.0", - "syn 1.0.109", + "darling_core", + "darling_macro", ] [[package]] @@ -577,28 +592,17 @@ dependencies = [ "ident_case", "proc-macro2", "quote", - "strsim 0.11.1", + "strsim", "syn 2.0.66", ] -[[package]] -name = "darling_macro" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" -dependencies = [ - "darling_core 0.13.4", - "quote", - "syn 1.0.109", -] - [[package]] name = "darling_macro" version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" dependencies = [ - "darling_core 0.20.9", + "darling_core", "quote", "syn 2.0.66", ] @@ -639,7 +643,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "rustc_version 0.4.0", + "rustc_version", "syn 1.0.109", ] @@ -649,6 +653,12 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" +[[package]] +name = "difflib" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" + [[package]] name = "digest" version = "0.10.7" @@ -706,14 +716,14 @@ dependencies = [ [[package]] name = "enum-as-inner" -version = "0.4.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" +checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" dependencies = [ - "heck 0.4.1", + "heck", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.66", ] [[package]] @@ -827,9 +837,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -837,9 +847,9 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" @@ -854,15 +864,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", @@ -871,21 +881,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -932,6 +942,51 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +[[package]] +name = "googletest" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22e38fa267f4db1a2fa51795ea4234eaadc3617a97486a9f158de9256672260e" +dependencies = [ + "googletest_macro 0.12.0", + "num-traits", + "regex", + "rustversion", +] + +[[package]] +name = "googletest" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dce026f84cdd339bf71be01b24fe67470ee634282f68c1c4b563d00a9f002b05" +dependencies = [ + "googletest_macro 0.13.0", + "num-traits", + "regex", + "rustversion", +] + +[[package]] +name = "googletest_macro" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "171deab504ad43a9ea80324a3686a0cbe9436220d9d0b48ae4d7f7bd303b48a9" +dependencies = [ + "quote", + "syn 2.0.66", +] + +[[package]] +name = "googletest_macro" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5070fa86976044fe2b004d874c10af5d1aed6d8f6a72ff93a6eb29cc87048bc" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.66", +] + [[package]] name = "h2" version = "0.3.26" @@ -1006,12 +1061,6 @@ dependencies = [ "http 0.2.12", ] -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - [[package]] name = "heck" version = "0.5.0" @@ -1030,6 +1079,51 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hickory-proto" +version = "0.24.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ad3d6d98c648ed628df039541a5577bee1a7c83e9e16fe3dbedeea4cdfeb971" +dependencies = [ + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna", + "ipnet", + "once_cell", + "rand", + "thiserror", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "hickory-resolver" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a2e2aba9c389ce5267d31cf1e4dace82390ae276b0b364ea55630b1fa1b44b4" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto", + "ipconfig", + "lru-cache", + "once_cell", + "parking_lot", + "rand", + "resolv-conf", + "smallvec", + "thiserror", + "tokio", + "tracing", +] + [[package]] name = "hmac" version = "0.12.1" @@ -1141,7 +1235,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.7", + "socket2", "tokio", "tower-service", "tracing", @@ -1222,7 +1316,7 @@ dependencies = [ "http-body 1.0.0", "hyper 1.3.1", "pin-project-lite", - "socket2 0.5.7", + "socket2", "tokio", "tower", "tower-service", @@ -1376,17 +1470,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" -[[package]] -name = "idna" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] - [[package]] name = "idna" version = "1.0.0" @@ -1442,7 +1525,7 @@ dependencies = [ [[package]] name = "integration-tests" -version = "1.1.0" +version = "1.8.1" dependencies = [ "anyhow", "assert_json", @@ -1462,7 +1545,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.7", + "socket2", "widestring", "windows-sys 0.48.0", "winreg 0.50.0", @@ -1489,12 +1572,30 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +[[package]] +name = "jobserver" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +dependencies = [ + "libc", +] + [[package]] name = "js-sys" version = "0.3.69" @@ -1504,6 +1605,17 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "json-structural-diff" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e878e36a8a44c158505c2c818abdc1350413ad83dcb774a0459f6a7ef2b65cbf" +dependencies = [ + "difflib", + "regex", + "serde_json", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -1580,12 +1692,6 @@ dependencies = [ "regex-automata 0.1.10", ] -[[package]] -name = "matches" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" - [[package]] name = "matchit" version = "0.7.3" @@ -1624,6 +1730,12 @@ dependencies = [ "unicase", ] +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "miniz_oxide" version = "0.7.3" @@ -1646,14 +1758,13 @@ dependencies = [ [[package]] name = "mockall" -version = "0.12.1" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43766c2b5203b10de348ffe19f7e54564b64f3d6018ff7648d1e2d6d3a0f0a48" +checksum = "39a6bfcc6c8c7eed5ee98b9c3e33adc726054389233e201c95dab2d41a3839d2" dependencies = [ "cfg-if", "downcast", "fragile", - "lazy_static", "mockall_derive", "predicates", "predicates-tree", @@ -1661,9 +1772,9 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.12.1" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af7cbce79ec385a1d4f54baa90a76401eb15d9cab93685f62e7e9f942aa00ae2" +checksum = "25ca3004c2efe9011bd4e461bd8256445052b9615405b4f7ea43fc8ca5c20898" dependencies = [ "cfg-if", "proc-macro2", @@ -1673,8 +1784,9 @@ dependencies = [ [[package]] name = "mongodb" -version = "2.8.2" -source = "git+https://github.com/hasura/mongo-rust-driver.git?branch=upstream-time-series-fix#5df5e10153b043c3bf93748d53969fa4345b6250" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c857d71f918b38221baf2fdff7207fec9984b4504901544772b1edf0302d669f" dependencies = [ "async-trait", "base64 0.13.1", @@ -1688,10 +1800,13 @@ dependencies = [ "futures-io", "futures-util", "hex", + "hickory-proto", + "hickory-resolver", "hmac", - "lazy_static", "log", "md-5", + "mongodb-internal-macros", + "once_cell", "pbkdf2", "percent-encoding", "rand", @@ -1700,20 +1815,18 @@ dependencies = [ "rustls-pemfile 1.0.4", "serde", "serde_bytes", - "serde_with 1.14.0", + "serde_with", "sha-1", "sha2", - "socket2 0.4.10", + "socket2", "stringprep", - "strsim 0.10.0", + "strsim", "take_mut", "thiserror", "tokio", "tokio-rustls 0.24.1", "tokio-util", "tracing", - "trust-dns-proto", - "trust-dns-resolver", "typed-builder 0.10.0", "uuid", "webpki-roots", @@ -1721,7 +1834,7 @@ dependencies = [ [[package]] name = "mongodb-agent-common" -version = "1.1.0" +version = "1.8.1" dependencies = [ "anyhow", "async-trait", @@ -1734,7 +1847,7 @@ dependencies = [ "http 0.2.12", "indent", "indexmap 2.2.6", - "itertools", + "itertools 0.14.0", "lazy_static", "mockall", "mongodb", @@ -1743,6 +1856,7 @@ dependencies = [ "ndc-models", "ndc-query-plan", "ndc-test-helpers", + "nonempty", "once_cell", "pretty_assertions", "proptest", @@ -1750,7 +1864,7 @@ dependencies = [ "schemars", "serde", "serde_json", - "serde_with 3.8.1", + "serde_with", "test-helpers", "thiserror", "time", @@ -1760,29 +1874,42 @@ dependencies = [ [[package]] name = "mongodb-cli-plugin" -version = "1.1.0" +version = "1.8.1" dependencies = [ "anyhow", + "async-tempfile", "clap", "configuration", + "enum-iterator", "futures-util", + "googletest 0.13.0", + "indent", "indexmap 2.2.6", - "itertools", + "itertools 0.14.0", + "json-structural-diff", "mongodb", "mongodb-agent-common", "mongodb-support", "ndc-models", + "ndc-test-helpers", + "nom", + "nonempty", + "pretty", + "pretty_assertions", "proptest", + "ref-cast", + "regex", "serde", "serde_json", "test-helpers", + "textwrap", "thiserror", "tokio", ] [[package]] name = "mongodb-connector" -version = "1.1.0" +version = "1.8.1" dependencies = [ "anyhow", "async-trait", @@ -1791,7 +1918,7 @@ dependencies = [ "futures", "http 0.2.12", "indexmap 2.2.6", - "itertools", + "itertools 0.14.0", "mongodb", "mongodb-agent-common", "mongodb-support", @@ -1807,9 +1934,20 @@ dependencies = [ "tracing", ] +[[package]] +name = "mongodb-internal-macros" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a6dbc533e93429a71c44a14c04547ac783b56d3f22e6c4f12b1b994cf93844e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.66", +] + [[package]] name = "mongodb-support" -version = "1.1.0" +version = "1.8.1" dependencies = [ "anyhow", "enum-iterator", @@ -1840,28 +1978,28 @@ dependencies = [ [[package]] name = "ndc-models" -version = "0.1.5" -source = "git+http://github.com/hasura/ndc-spec.git?tag=v0.1.5#78f52768bd02a8289194078a5abc2432c8e3a758" +version = "0.2.4" +source = "git+http://github.com/hasura/ndc-spec.git?tag=v0.2.4#df67fa6469431f9304aac9c237e9d2327d20da20" dependencies = [ "indexmap 2.2.6", "ref-cast", "schemars", "serde", "serde_json", - "serde_with 3.8.1", + "serde_with", "smol_str", ] [[package]] name = "ndc-query-plan" -version = "1.1.0" +version = "1.8.1" dependencies = [ "anyhow", "derivative", "enum-iterator", "indent", "indexmap 2.2.6", - "itertools", + "itertools 0.14.0", "lazy_static", "ndc-models", "ndc-test-helpers", @@ -1874,17 +2012,16 @@ dependencies = [ [[package]] name = "ndc-sdk" -version = "0.2.1" -source = "git+https://github.com/hasura/ndc-sdk-rs.git?tag=v0.2.1#83a906e8a744ee78d84aeee95f61bf3298a982ea" +version = "0.8.0" +source = "git+https://github.com/hasura/ndc-sdk-rs.git?rev=v0.8.0#0c93ded023767c8402ace015aff5023115d8dcb6" dependencies = [ "async-trait", "axum", "axum-extra", - "bytes", "clap", "http 0.2.12", - "mime", "ndc-models", + "ndc-sdk-core", "ndc-test", "opentelemetry", "opentelemetry-http", @@ -1894,7 +2031,7 @@ dependencies = [ "opentelemetry_sdk", "prometheus", "reqwest 0.11.27", - "serde", + "semver", "serde_json", "thiserror", "tokio", @@ -1905,22 +2042,42 @@ dependencies = [ "url", ] +[[package]] +name = "ndc-sdk-core" +version = "0.8.0" +source = "git+https://github.com/hasura/ndc-sdk-rs.git?rev=v0.8.0#0c93ded023767c8402ace015aff5023115d8dcb6" +dependencies = [ + "async-trait", + "axum", + "bytes", + "http 0.2.12", + "mime", + "ndc-models", + "ndc-test", + "prometheus", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", +] + [[package]] name = "ndc-test" -version = "0.1.5" -source = "git+http://github.com/hasura/ndc-spec.git?tag=v0.1.5#78f52768bd02a8289194078a5abc2432c8e3a758" +version = "0.2.4" +source = "git+http://github.com/hasura/ndc-spec.git?tag=v0.2.4#df67fa6469431f9304aac9c237e9d2327d20da20" dependencies = [ "async-trait", "clap", "colorful", "indexmap 2.2.6", "ndc-models", + "pretty_assertions", "rand", - "reqwest 0.11.27", - "semver 1.0.23", + "reqwest 0.12.4", + "semver", "serde", "serde_json", - "smol_str", "thiserror", "tokio", "url", @@ -1928,20 +2085,30 @@ dependencies = [ [[package]] name = "ndc-test-helpers" -version = "1.1.0" +version = "1.8.1" dependencies = [ "indexmap 2.2.6", - "itertools", + "itertools 0.14.0", "ndc-models", "serde_json", "smol_str", ] +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + [[package]] name = "nonempty" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "303e8749c804ccd6ca3b428de7fe0d86cb86bc7606bc15291f100fd487960bb8" +checksum = "549e471b99ccaf2f89101bec68f4d244457d5a95a9c3d0672e9564124397741d" [[package]] name = "nu-ansi-term" @@ -1996,9 +2163,9 @@ checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "openssl" -version = "0.10.66" +version = "0.10.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" +checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8" dependencies = [ "bitflags 2.5.0", "cfg-if", @@ -2028,9 +2195,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.103" +version = "0.9.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" +checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" dependencies = [ "cc", "libc", @@ -2277,11 +2444,23 @@ dependencies = [ "termtree", ] +[[package]] +name = "pretty" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b55c4d17d994b637e2f4daf6e5dc5d660d209d5642377d675d7a1c3ab69fa579" +dependencies = [ + "arrayvec", + "termcolor", + "typed-arena", + "unicode-width", +] + [[package]] name = "pretty_assertions" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af7cee1a6c8a5b9208b3cb1061f10c0cb689087b3d8ce85fb9d2dd7a29b6ba66" +checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" dependencies = [ "diff", "yansi", @@ -2325,7 +2504,7 @@ dependencies = [ "rand", "rand_chacha", "rand_xorshift", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", "rusty-fork", "tempfile", "unarray", @@ -2348,7 +2527,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", - "itertools", + "itertools 0.12.1", "proc-macro2", "quote", "syn 2.0.66", @@ -2451,14 +2630,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.5" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.7", - "regex-syntax 0.8.4", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", ] [[package]] @@ -2472,13 +2651,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", ] [[package]] @@ -2489,9 +2668,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" @@ -2513,7 +2692,6 @@ dependencies = [ "js-sys", "log", "mime", - "mime_guess", "native-tls", "once_cell", "percent-encoding", @@ -2556,6 +2734,7 @@ dependencies = [ "js-sys", "log", "mime", + "mime_guess", "native-tls", "once_cell", "percent-encoding", @@ -2588,15 +2767,14 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.8" +version = "0.17.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +checksum = "ed9b823fa29b721a59671b41d6b06e66b29e0628e207e8b1c3ceeda701ec928d" dependencies = [ "cc", "cfg-if", "getrandom", "libc", - "spin", "untrusted", "windows-sys 0.52.0", ] @@ -2607,32 +2785,23 @@ version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" -[[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -dependencies = [ - "semver 0.9.0", -] - [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.23", + "semver", ] [[package]] name = "rustc_version_runtime" -version = "0.2.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d31b7153270ebf48bf91c65ae5b0c00e749c4cfad505f66530ac74950249582f" +checksum = "2dd18cd2bae1820af0b6ad5e54f4a51d0f3fcc53b05f845675074efcc7af071d" dependencies = [ - "rustc_version 0.2.3", - "semver 0.9.0", + "rustc_version", + "semver", ] [[package]] @@ -2832,32 +3001,17 @@ dependencies = [ "libc", ] -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - [[package]] name = "semver" version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - [[package]] name = "serde" -version = "1.0.203" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -2873,9 +3027,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.203" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", @@ -2895,12 +3049,13 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.117" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "indexmap 2.2.6", "itoa", + "memchr", "ryu", "serde", ] @@ -2927,16 +3082,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_with" -version = "1.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" -dependencies = [ - "serde", - "serde_with_macros 1.5.2", -] - [[package]] name = "serde_with" version = "3.8.1" @@ -2951,29 +3096,17 @@ dependencies = [ "serde", "serde_derive", "serde_json", - "serde_with_macros 3.8.1", + "serde_with_macros", "time", ] -[[package]] -name = "serde_with_macros" -version = "1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" -dependencies = [ - "darling 0.13.4", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "serde_with_macros" version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "65569b702f41443e8bc8bbb1c5779bd0450bbe723b56198980e80ec45780bce2" dependencies = [ - "darling 0.20.9", + "darling", "proc-macro2", "quote", "syn 2.0.66", @@ -3034,6 +3167,12 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + [[package]] name = "signal-hook-registry" version = "1.4.2" @@ -3064,6 +3203,12 @@ version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +[[package]] +name = "smawk" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c" + [[package]] name = "smol_str" version = "0.1.24" @@ -3073,16 +3218,6 @@ dependencies = [ "serde", ] -[[package]] -name = "socket2" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "socket2" version = "0.5.7" @@ -3093,12 +3228,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "spin" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" - [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -3116,12 +3245,6 @@ dependencies = [ "unicode-properties", ] -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - [[package]] name = "strsim" version = "0.11.1" @@ -3235,7 +3358,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test-helpers" -version = "1.1.0" +version = "1.8.1" dependencies = [ "configuration", "enum-iterator", @@ -3247,6 +3370,17 @@ dependencies = [ "proptest", ] +[[package]] +name = "textwrap" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9" +dependencies = [ + "smawk", + "unicode-linebreak", + "unicode-width", +] + [[package]] name = "thiserror" version = "1.0.61" @@ -3347,7 +3481,7 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.7", + "socket2", "tokio-macros", "windows-sys 0.48.0", ] @@ -3487,6 +3621,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" dependencies = [ + "async-compression", "bitflags 2.5.0", "bytes", "futures-core", @@ -3496,6 +3631,8 @@ dependencies = [ "http-range-header", "mime", "pin-project-lite", + "tokio", + "tokio-util", "tower-layer", "tower-service", "tracing", @@ -3604,57 +3741,18 @@ dependencies = [ "tracing-serde", ] -[[package]] -name = "trust-dns-proto" -version = "0.21.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c31f240f59877c3d4bb3b3ea0ec5a6a0cff07323580ff8c7a605cd7d08b255d" -dependencies = [ - "async-trait", - "cfg-if", - "data-encoding", - "enum-as-inner", - "futures-channel", - "futures-io", - "futures-util", - "idna 0.2.3", - "ipnet", - "lazy_static", - "log", - "rand", - "smallvec", - "thiserror", - "tinyvec", - "tokio", - "url", -] - -[[package]] -name = "trust-dns-resolver" -version = "0.21.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4ba72c2ea84515690c9fcef4c6c660bb9df3036ed1051686de84605b74fd558" -dependencies = [ - "cfg-if", - "futures-util", - "ipconfig", - "lazy_static", - "log", - "lru-cache", - "parking_lot", - "resolv-conf", - "smallvec", - "thiserror", - "tokio", - "trust-dns-proto", -] - [[package]] name = "try-lock" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" +[[package]] +name = "typed-arena" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6af6ae20167a9ece4bcb41af5b80f8a1f1df981f6391189ce00fd257af04126a" + [[package]] name = "typed-builder" version = "0.10.0" @@ -3719,6 +3817,12 @@ version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +[[package]] +name = "unicode-linebreak" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" + [[package]] name = "unicode-normalization" version = "0.1.23" @@ -3759,7 +3863,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7c25da092f0a868cdf09e8674cd3b7ef3a7d92a24253e663a2fb85e2496de56" dependencies = [ "form_urlencoded", - "idna 1.0.0", + "idna", "percent-encoding", ] @@ -4159,9 +4263,9 @@ dependencies = [ [[package]] name = "yansi" -version = "0.5.1" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yoke" @@ -4255,3 +4359,31 @@ dependencies = [ "quote", "syn 2.0.66", ] + +[[package]] +name = "zstd" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "7.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" +dependencies = [ + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.15+zstd.1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" +dependencies = [ + "cc", + "pkg-config", +] diff --git a/Cargo.toml b/Cargo.toml index dc7a9e4b..6300b317 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.1.0" +version = "1.8.1" [workspace] members = [ @@ -18,27 +18,20 @@ resolver = "2" # The tag or rev of ndc-models must match the locked tag or rev of the # ndc-models dependency of ndc-sdk [workspace.dependencies] -ndc-sdk = { git = "https://github.com/hasura/ndc-sdk-rs.git", tag = "v0.2.1" } -ndc-models = { git = "http://github.com/hasura/ndc-spec.git", tag = "v0.1.5" } +ndc-sdk = { git = "https://github.com/hasura/ndc-sdk-rs.git", rev = "v0.8.0" } +ndc-models = { git = "http://github.com/hasura/ndc-spec.git", tag = "v0.2.4" } indexmap = { version = "2", features = [ "serde", ] } # should match the version that ndc-models uses -itertools = "^0.12.1" -mongodb = { version = "2.8", features = ["tracing-unstable"] } +itertools = "^0.14.0" +mongodb = { version = "^3.1.0", features = ["tracing-unstable"] } +nonempty = "^0.11.0" schemars = "^0.8.12" +serde = { version = "1", features = ["derive"] } +serde_json = { version = "1.0", features = ["preserve_order", "raw_value"] } ref-cast = "1.0.23" -# Connecting to MongoDB Atlas database with time series collections fails in the -# latest released version of the MongoDB Rust driver. A fix has been merged, but -# it has not been released yet: https://github.com/mongodb/mongo-rust-driver/pull/1077 -# -# We are using a branch of the driver that cherry-picks that fix onto the v2.8.2 -# release. -[patch.crates-io.mongodb] -git = "https://github.com/hasura/mongo-rust-driver.git" -branch = "upstream-time-series-fix" - # Set opt levels according to recommendations in insta documentation [profile.dev.package] insta.opt-level = 3 diff --git a/DEVELOPING.md b/DEVELOPING.md deleted file mode 100644 index e44d470d..00000000 --- a/DEVELOPING.md +++ /dev/null @@ -1,56 +0,0 @@ -# Developing - -## Project Maintenance Notes - -### Updating GraphQL Engine for integration tests - -It's important to keep the GraphQL Engine version updated to make sure that the -connector is working with the latest engine version. To update run, - -```sh -$ nix flake lock --update-input graphql-engine-source -``` - -Then commit the changes to `flake.lock` to version control. - -A specific engine version can be specified by editing `flake.lock` instead of -running the above command like this: - -```diff - graphql-engine-source = { -- url = "github:hasura/graphql-engine"; -+ url = "github:hasura/graphql-engine/"; - flake = false; - }; -``` - -### Updating Rust version - -Updating the Rust version used in the Nix build system requires two steps (in -any order): - -- update `rust-overlay` which provides Rust toolchains -- edit `rust-toolchain.toml` to specify the desired toolchain version - -To update `rust-overlay` run, - -```sh -$ nix flake lock --update-input rust-overlay -``` - -If you are using direnv to automatically apply the nix dev environment note that -edits to `rust-toolchain.toml` will not automatically update your environment. -You can make a temporary edit to `flake.nix` (like adding a space somewhere) -which will trigger an update, and then you can revert the change. - -### Updating other project dependencies - -You can update all dependencies declared in `flake.nix` at once by running, - -```sh -$ nix flake update -``` - -This will update `graphql-engine-source` and `rust-overlay` as described above, -and will also update `advisory-db` to get updated security notices for cargo -dependencies, `nixpkgs` to get updates to openssl. diff --git a/README.md b/README.md index a437d162..49cfa111 100644 --- a/README.md +++ b/README.md @@ -1,124 +1,190 @@ -# Hasura MongoDB Connector +# Hasura MongoDB Data Connector -## Requirements +[![Docs](https://img.shields.io/badge/docs-v3.x-brightgreen.svg?style=flat)](https://hasura.io/docs/3.0/connectors/mongodb/) +[![ndc-hub](https://img.shields.io/badge/ndc--hub-postgres-blue.svg?style=flat)](https://hasura.io/connectors/mongodb) +[![License](https://img.shields.io/badge/license-Apache--2.0-purple.svg?style=flat)](LICENSE.txt) -* Rust via Rustup -* MongoDB `>= 6` -* OpenSSL development files +This Hasura data connector connects MongoDB to your data graph giving you an +instant GraphQL API to access your MongoDB data. Supports MongoDB 6 or later. -or get dependencies automatically with Nix +This connector is built using the [Rust Data Connector SDK](https://github.com/hasura/ndc-hub#rusk-sdk) and implements the [Data Connector Spec](https://github.com/hasura/ndc-spec). -Some of the build instructions require Nix. To set that up [install Nix][], and -configure it to [enable flakes][]. +- [See the listing in the Hasura Hub](https://hasura.io/connectors/mongodb) +- [Hasura V3 Documentation](https://hasura.io/docs/3.0/) -[install Nix]: https://nixos.org/download.html -[enable flakes]: https://nixos.wiki/wiki/Flakes +Docs for the MongoDB data connector: -## Build & Run +- [Usage](https://hasura.io/docs/3.0/connectors/mongodb/) +- [Building](./docs/building.md) +- [Development](./docs/development.md) +- [Docker Images](./docs/docker-images.md) +- [Code of Conduct](./docs/code-of-conduct.md) +- [Contributing](./docs/contributing.md) +- [Limitations](./docs/limitations.md) +- [Support](./docs/support.md) +- [Security](./docs/security.md) -To build a statically-linked binary run, +## Features -```sh -$ nix build --print-build-logs && cp result/bin/mongodb-connector -``` +Below, you'll find a matrix of all supported features for the MongoDB data connector: -To cross-compile a statically-linked ARM build for Linux run, +| Feature | Supported | Notes | +| ----------------------------------------------- | --------- | ----- | +| Native Queries + Logical Models | ✅ | | +| Simple Object Query | ✅ | | +| Filter / Search | ✅ | | +| Filter by fields of Nested Objects | ✅ | | +| Filter by values in Nested Arrays | ✅ | | +| Simple Aggregation | ✅ | | +| Aggregate fields of Nested Objects | ❌ | | +| Aggregate values of Nested Arrays | ❌ | | +| Sort | ✅ | | +| Sorty by fields of Nested Objects | ❌ | | +| Paginate | ✅ | | +| Collection Relationships | ✅ | | +| Remote Relationships | ✅ | | +| Relationships Keyed by Fields of Nested Objects | ❌ | | +| Mutations | ✅ | Provided by custom [Native Mutations][] - predefined basic mutations are also planned | -```sh -$ nix build .#mongo-connector-aarch64-linux --print-build-logs && cp result/bin/mongodb-connector -``` +[Native Mutations]: https://hasura.io/docs/3.0/connectors/mongodb/native-operations/native-mutations + +## Before you get Started + +1. The [DDN CLI](https://hasura.io/docs/3.0/cli/installation) and [Docker](https://docs.docker.com/engine/install/) installed +2. A [supergraph](https://hasura.io/docs/3.0/getting-started/init-supergraph) +3. A [subgraph](https://hasura.io/docs/3.0/getting-started/init-subgraph) -The Nix configuration outputs Docker images in `.tar.gz` files. You can use -`docker load -i` to install these to the local machine's docker daemon. But it -may be more helpful to use `skopeo` for this purpose so that you can apply -a chosen tag, or override the image name. +The steps below explain how to initialize and configure a connector for local +development on your data graph. You can learn how to deploy a connector — after +it's been configured +— [here](https://hasura.io/docs/3.0/getting-started/deployment/deploy-a-connector). -To build and install a Docker image locally (you can change -`mongodb-connector:1.2.3` to whatever image name and tag you prefer), +For instructions on local development on the MongoDB connector itself see +[development.md](development.md). -```sh -$ nix build .#docker --print-build-logs \ - && skopeo --insecure-policy copy docker-archive:result docker-daemon:mongo-connector:1.2.3 +## Using the MongoDB connector + +### Step 1: Authenticate your CLI session + +```bash +ddn auth login ``` -To build a Docker image with a cross-compiled ARM binary, +### Step 2: Configure the connector -```sh -$ nix build .#docker-aarch64-linux --print-build-logs \ - && skopeo --insecure-policy copy docker-archive:result docker-daemon:mongo-connector:1.2.3 +Once you have an initialized supergraph and subgraph, run the initialization command in interactive mode while +providing a name for the connector in the prompt: + +```bash +ddn connector init -i ``` -If you don't want to install `skopeo` you can run it through Nix, `nix run -nixpkgs#skopeo -- --insecure-policy copy docker-archive:result docker-daemon:mongo-connector:1.2.3` +`` may be any name you choose for your particular project. + +#### Step 2.1: Choose the hasura/mongodb from the list +#### Step 2.2: Choose a port for the connector -## Developing +The CLI will ask for a specific port to run the connector on. Choose a port that is not already in use or use the +default suggested port. -This project uses a devShell configuration in `flake.nix` that automatically -loads specific version of Rust, mongosh, and other utilities. The easiest way to -make use of the devShell is to install nix, direnv and nix-direnv. See -https://github.com/nix-community/nix-direnv +#### Step 2.3: Provide env vars for the connector -Direnv will source `.envrc`, install the appropriate Nix packages automatically -(isolated from the rest of your system packages), and configure your shell to -use the project dependencies when you cd into the project directory. All shell -modifications are reversed when you navigate to another directory. +| Name | Description | +|------------------------|----------------------------------------------------------------------| +| `MONGODB_DATABASE_URI` | Connection URI for the MongoDB database to connect - see notes below | -### Running the Connector During Development +`MONGODB_DATABASE_URI` is a string with your database' hostname, login +credentials, and database name. A simple example is +`mongodb://admin@pass:localhost/my_database`. If you are using a hosted database +on MongoDB Atlas you can get the URI from the "Data Services" tab in the project +dashboard: -If you have set up nix and direnv then you can use arion to run the agent with -all of the services that it needs to function. Arion is a frontend for -docker-compose that adds a layer of convenience where it can easily load agent -code changes. It is automatically included with the project's devShell. +- open the "Data Services" tab +- click "Get connection string" +- you will see a 3-step dialog - ignore all 3 steps, you don't need to change anything +- copy the string that begins with `mongodb+srv://` + +## Step 3: Introspect the connector -To start all services run: +Set up configuration for the connector with this command. This will introspect +your database to infer a schema with types for your data. - $ arion up -d +```bash +ddn connector introspect +``` + +Remember to use the same value for `` That you used in step 2. -To recompile and restart the agent after code changes run: +This will create a tree of files that looks like this (this example is based on the +[sample_mflix][] sample database): - $ arion up -d connector +[sample_mflix]: https://www.mongodb.com/docs/atlas/sample-data/sample-mflix/ + +``` +app/connector +└── + ├── compose.yaml -- defines a docker service for the connector + ├── connector.yaml -- defines connector version to fetch from hub, subgraph, env var mapping + ├── configuration.json -- options for configuring the connector + ├── schema -- inferred types for collection documents - one file per collection + │ ├── comments.json + │ ├── movies.json + │ ├── sessions.json + │ ├── theaters.json + │ └── users.json + ├── native_mutations -- custom mongodb commands to appear in your data graph + │ └── your_mutation.json + └── native_queries -- custom mongodb aggregation pipelines to appear in your data graph + └── your_query.json +``` -Arion delegates to docker-compose so it uses the same subcommands with the same -flags. Note that the PostgreSQL and MongoDB services use persistent volumes so -if you want to completely reset the state of those services you will need to -remove volumes using the `docker volume rm` command. +The `native_mutations` and `native_queries` directories will not be created +automatically - create those directories as needed. -The arion configuration runs these services: +Feel free to edit these files to change options, or to make manual tweaks to +inferred schema types. If inferred types do not look accurate you can edit +`configuration.json`, change `sampleSize` to a larger number to randomly sample +more collection documents, and run the `introspect` command again. -- connector: the MongoDB data connector agent defined in this repo (port 7130) -- mongodb -- Hasura GraphQL Engine -- a stubbed authentication server -- jaeger to collect logs (see UI at http://localhost:16686/) +## Step 4: Add your resources -Connect to the HGE GraphiQL UI at http://localhost:7100/ +This command will query the MongoDB connector to produce DDN metadata that +declares resources provided by the connector in your data graph. -Instead of a `docker-compose.yaml` configuration is found in `arion-compose.nix`. +```bash +ddn connector-link add-resources +``` -### Working with Test Data +The connector must be running before you run this command! If you have not +already done so you can run the connector with `ddn run docker-start`. -The arion configuration in the previous section preloads MongoDB with test data. -There is corresponding OpenDDN configuration in the `fixtures/` directory. +If you have changed the configuration described in Step 3 it is important to +restart the connector. Running `ddn run docker-start` again will restart the +connector if configuration has changed. -The preloaded data is in the form of scripts in `fixtures/mongodb/`. Any `.js` -or `.sh` scripts added to this directory will be run when the mongodb service is -run from a fresh state. Note that you will have to remove any existing docker -volume to get to a fresh state. Using arion you can remove volumes by running -`arion down`. +This will create and update DDN metadata files. Once again this example is based +on the [sample_mflix][] data set: -### Running with a different MongoDB version +``` +app/metadata +├── mongodb.hml -- DataConnectorLink has connector connection details & database schema +├── mongodb-types.hml -- maps connector scalar types to GraphQL scalar types +├── Comments.hml -- The remaining files map database collections to GraphQL object types +├── Movies.hml +├── Sessions.hml +├── Theaters.hml +└── Users.hml +``` -Override the MongoDB version that arion runs by assigning a Docker image name to -the environment variable `MONGODB_IMAGE`. For example, +## Documentation - $ arion down --volumes # delete potentially-incompatible MongoDB data - $ MONGODB_IMAGE=mongo:4 arion up -d +View the full documentation for the MongoDB connector [here](https://hasura.io/docs/3.0/connectors/mongodb/). -Or run integration tests against a specific MongoDB version, +## Contributing - $ MONGODB_IMAGE=mongo:4 just test-integration +Check out our [contributing guide](./docs/contributing.md) for more details. ## License -The Hasura MongoDB Connector is available under the [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) (Apache-2.0). +The MongoDB connector is available under the [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0). diff --git a/arion-compose/e2e-testing.nix b/arion-compose/e2e-testing.nix index 2c2822c2..80254f93 100644 --- a/arion-compose/e2e-testing.nix +++ b/arion-compose/e2e-testing.nix @@ -20,7 +20,7 @@ in connector = import ./services/connector.nix { inherit pkgs; - configuration-dir = ../fixtures/hasura/chinook/connector/chinook; + configuration-dir = ../fixtures/hasura/app/connector/chinook; database-uri = "mongodb://mongodb/chinook"; port = connector-port; service.depends_on.mongodb.condition = "service_healthy"; @@ -38,7 +38,7 @@ in inherit pkgs; port = engine-port; connectors.chinook = "http://connector:${connector-port}"; - ddn-dirs = [ ../fixtures/hasura/chinook/metadata ]; + ddn-dirs = [ ../fixtures/hasura/app/metadata ]; service.depends_on = { auth-hook.condition = "service_started"; }; diff --git a/arion-compose/integration-test-services.nix b/arion-compose/integration-test-services.nix index 1d6b7921..a1fd50a8 100644 --- a/arion-compose/integration-test-services.nix +++ b/arion-compose/integration-test-services.nix @@ -12,6 +12,7 @@ , otlp-endpoint ? null , connector-port ? "7130" , connector-chinook-port ? "7131" +, connector-test-cases-port ? "7132" , engine-port ? "7100" , mongodb-port ? "27017" }: @@ -21,7 +22,7 @@ in { connector = import ./services/connector.nix { inherit pkgs otlp-endpoint; - configuration-dir = ../fixtures/hasura/sample_mflix/connector/sample_mflix; + configuration-dir = ../fixtures/hasura/app/connector/sample_mflix; database-uri = "mongodb://mongodb/sample_mflix"; port = connector-port; hostPort = hostPort connector-port; @@ -32,7 +33,7 @@ in connector-chinook = import ./services/connector.nix { inherit pkgs otlp-endpoint; - configuration-dir = ../fixtures/hasura/chinook/connector/chinook; + configuration-dir = ../fixtures/hasura/app/connector/chinook; database-uri = "mongodb://mongodb/chinook"; port = connector-chinook-port; hostPort = hostPort connector-chinook-port; @@ -41,6 +42,17 @@ in }; }; + connector-test-cases = import ./services/connector.nix { + inherit pkgs otlp-endpoint; + configuration-dir = ../fixtures/hasura/app/connector/test_cases; + database-uri = "mongodb://mongodb/test_cases"; + port = connector-test-cases-port; + hostPort = hostPort connector-test-cases-port; + service.depends_on = { + mongodb.condition = "service_healthy"; + }; + }; + mongodb = import ./services/mongodb.nix { inherit pkgs; port = mongodb-port; @@ -60,11 +72,10 @@ in connectors = { chinook = "http://connector-chinook:${connector-chinook-port}"; sample_mflix = "http://connector:${connector-port}"; + test_cases = "http://connector-test-cases:${connector-test-cases-port}"; }; ddn-dirs = [ - ../fixtures/hasura/chinook/metadata - ../fixtures/hasura/sample_mflix/metadata - ../fixtures/hasura/common/metadata + ../fixtures/hasura/app/metadata ]; service.depends_on = { auth-hook.condition = "service_started"; diff --git a/arion-compose/integration-tests.nix b/arion-compose/integration-tests.nix index 6e45df8d..5ef5ec56 100644 --- a/arion-compose/integration-tests.nix +++ b/arion-compose/integration-tests.nix @@ -11,6 +11,7 @@ let connector-port = "7130"; connector-chinook-port = "7131"; + connector-test-cases-port = "7132"; engine-port = "7100"; services = import ./integration-test-services.nix { @@ -26,10 +27,12 @@ in inherit pkgs; connector-url = "http://connector:${connector-port}/"; connector-chinook-url = "http://connector-chinook:${connector-chinook-port}/"; + connector-test-cases-url = "http://connector-test-cases:${connector-test-cases-port}/"; engine-graphql-url = "http://engine:${engine-port}/graphql"; service.depends_on = { connector.condition = "service_healthy"; connector-chinook.condition = "service_healthy"; + connector-test-cases.condition = "service_healthy"; engine.condition = "service_healthy"; }; # Run the container as the current user so when it writes to the snapshots diff --git a/arion-compose/ndc-test.nix b/arion-compose/ndc-test.nix index 4f39e3b7..12daabc1 100644 --- a/arion-compose/ndc-test.nix +++ b/arion-compose/ndc-test.nix @@ -14,7 +14,7 @@ in # command = ["test" "--snapshots-dir" "/snapshots" "--seed" "1337_1337_1337_1337_1337_1337_13"]; # Replay and test the recorded snapshots # command = ["replay" "--snapshots-dir" "/snapshots"]; - configuration-dir = ../fixtures/hasura/chinook/connector/chinook; + configuration-dir = ../fixtures/hasura/app/connector/chinook; database-uri = "mongodb://mongodb:${mongodb-port}/chinook"; service.depends_on.mongodb.condition = "service_healthy"; # Run the container as the current user so when it writes to the snapshots directory it doesn't write as root diff --git a/arion-compose/services/connector.nix b/arion-compose/services/connector.nix index a65e2c7e..ed820931 100644 --- a/arion-compose/services/connector.nix +++ b/arion-compose/services/connector.nix @@ -12,7 +12,7 @@ , profile ? "dev" # Rust crate profile, usually either "dev" or "release" , hostPort ? null , command ? ["serve"] -, configuration-dir ? ../../fixtures/hasura/sample_mflix/connector/sample_mflix +, configuration-dir ? ../../fixtures/hasura/app/connector/sample_mflix , database-uri ? "mongodb://mongodb/sample_mflix" , service ? { } # additional options to customize this service configuration , otlp-endpoint ? null @@ -32,16 +32,14 @@ let "${hostPort}:${port}" # host:container ]; environment = pkgs.lib.filterAttrs (_: v: v != null) { - HASURA_CONFIGURATION_DIRECTORY = "/configuration"; + HASURA_CONFIGURATION_DIRECTORY = (pkgs.lib.sources.cleanSource configuration-dir).outPath; HASURA_CONNECTOR_PORT = port; MONGODB_DATABASE_URI = database-uri; OTEL_SERVICE_NAME = "mongodb-connector"; OTEL_EXPORTER_OTLP_ENDPOINT = otlp-endpoint; - RUST_LOG = "mongodb-connector=debug,dc_api=debug"; + RUST_LOG = "configuration=debug,mongodb_agent_common=debug,mongodb_connector=debug,mongodb_support=debug,ndc_query_plan=debug"; }; - volumes = [ - "${configuration-dir}:/configuration:ro" - ] ++ extra-volumes; + volumes = extra-volumes; healthcheck = { test = [ "CMD" "${pkgs.pkgsCross.linux.curl}/bin/curl" "-f" "http://localhost:${port}/health" ]; start_period = "5s"; diff --git a/arion-compose/services/engine.nix b/arion-compose/services/engine.nix index 34f2f004..1d30bc2f 100644 --- a/arion-compose/services/engine.nix +++ b/arion-compose/services/engine.nix @@ -6,7 +6,7 @@ # a `DataConnectorLink.definition.name` value in one of the given `ddn-dirs` # to correctly match up configuration to connector instances. , connectors ? { sample_mflix = "http://connector:7130"; } -, ddn-dirs ? [ ../../fixtures/hasura/sample_mflix/metadata ] +, ddn-dirs ? [ ../../fixtures/hasura/app/metadata ] , auth-webhook ? { url = "http://auth-hook:3050/validate-request"; } , otlp-endpoint ? "http://jaeger:4317" , service ? { } # additional options to customize this service configuration @@ -88,6 +88,7 @@ in "--port=${port}" "--metadata-path=${metadata}" "--authn-config-path=${auth-config}" + "--expose-internal-errors" ] ++ (pkgs.lib.optionals (otlp-endpoint != null) [ "--otlp-endpoint=${otlp-endpoint}" ]); @@ -95,7 +96,7 @@ in "${hostPort}:${port}" ]; environment = { - RUST_LOG = "engine=debug,hasura-authn-core=debug"; + RUST_LOG = "engine=debug,hasura_authn_core=debug,hasura_authn_jwt=debug,hasura_authn_noauth=debug,hasura_authn_webhook=debug,lang_graphql=debug,open_dds=debug,schema=debug,metadata-resolve=debug"; }; healthcheck = { test = [ "CMD" "curl" "-f" "http://localhost:${port}/" ]; diff --git a/arion-compose/services/integration-tests.nix b/arion-compose/services/integration-tests.nix index e25d3770..00d55c4e 100644 --- a/arion-compose/services/integration-tests.nix +++ b/arion-compose/services/integration-tests.nix @@ -1,6 +1,7 @@ { pkgs , connector-url , connector-chinook-url +, connector-test-cases-url , engine-graphql-url , service ? { } # additional options to customize this service configuration }: @@ -16,6 +17,7 @@ let environment = { CONNECTOR_URL = connector-url; CONNECTOR_CHINOOK_URL = connector-chinook-url; + CONNECTOR_TEST_CASES_URL = connector-test-cases-url; ENGINE_GRAPHQL_URL = engine-graphql-url; INSTA_WORKSPACE_ROOT = repo-source-mount-point; MONGODB_IMAGE = builtins.getEnv "MONGODB_IMAGE"; diff --git a/connector-definition/connector-metadata.yaml b/connector-definition/connector-metadata.yaml index 49d06552..c05bbe82 100644 --- a/connector-definition/connector-metadata.yaml +++ b/connector-definition/connector-metadata.yaml @@ -1,15 +1,47 @@ +version: v2 +ndcSpecGeneration: v0.2 packagingDefinition: type: PrebuiltDockerImage dockerImage: supportedEnvironmentVariables: - name: MONGODB_DATABASE_URI description: The URI for the MongoDB database +nativeToolchainDefinition: + commands: + start: + type: ShellScript + bash: | + #!/usr/bin/env bash + set -eu -o pipefail + HASURA_CONFIGURATION_DIRECTORY="$HASURA_PLUGIN_CONNECTOR_CONTEXT_PATH" "$HASURA_DDN_NATIVE_CONNECTOR_DIR/mongodb-connector" serve + powershell: | + $ErrorActionPreference = "Stop" + $env:HASURA_CONFIGURATION_DIRECTORY="$env:HASURA_PLUGIN_CONNECTOR_CONTEXT_PATH"; & "$env:HASURA_DDN_NATIVE_CONNECTOR_DIR\mongodb-connector.exe" serve + update: + type: ShellScript + bash: | + #!/usr/bin/env bash + set -eu -o pipefail + "$HASURA_DDN_NATIVE_CONNECTOR_PLUGIN_DIR/hasura-ndc-mongodb" update + powershell: | + $ErrorActionPreference = "Stop" + & "$env:HASURA_DDN_NATIVE_CONNECTOR_PLUGIN_DIR\hasura-ndc-mongodb.exe" update + watch: + type: ShellScript + bash: | + #!/usr/bin/env bash + echo "Watch is not supported for this connector" + exit 1 + powershell: | + Write-Output "Watch is not supported for this connector" + exit 1 commands: update: hasura-ndc-mongodb update cliPlugin: name: ndc-mongodb - version: + version: dockerComposeWatch: - path: ./ target: /etc/connector - action: sync+restart \ No newline at end of file + action: sync+restart +documentationPage: "https://hasura.info/mongodb-getting-started" diff --git a/crates/cli/Cargo.toml b/crates/cli/Cargo.toml index 031d7891..64d1b3ce 100644 --- a/crates/cli/Cargo.toml +++ b/crates/cli/Cargo.toml @@ -3,6 +3,10 @@ name = "mongodb-cli-plugin" edition = "2021" version.workspace = true +[features] +default = ["native-query-subcommand"] +native-query-subcommand = ["dep:pretty", "dep:nom", "dep:textwrap"] + [dependencies] configuration = { path = "../configuration" } mongodb-agent-common = { path = "../mongodb-agent-common" } @@ -11,16 +15,30 @@ mongodb-support = { path = "../mongodb-support" } anyhow = "1.0.80" clap = { version = "4.5.1", features = ["derive", "env"] } +enum-iterator = "^2.0.0" futures-util = "0.3.28" +indent = "^0.1.1" indexmap = { workspace = true } itertools = { workspace = true } +json-structural-diff = "^0.2.0" ndc-models = { workspace = true } -serde = { version = "1.0", features = ["derive"] } -serde_json = { version = "1.0.113", features = ["raw_value"] } +nom = { version = "^7.1.3", optional = true } +nonempty = { workspace = true } +pretty = { version = "^0.12.3", features = ["termcolor"], optional = true } +ref-cast = { workspace = true } +regex = "^1.11.1" +serde = { workspace = true } +serde_json = { workspace = true } +textwrap = { version = "^0.16.1", optional = true } thiserror = "1.0.57" tokio = { version = "1.36.0", features = ["full"] } [dev-dependencies] -test-helpers = { path = "../test-helpers" } +mongodb-agent-common = { path = "../mongodb-agent-common", features = ["test-helpers"] } +async-tempfile = "^0.6.0" +googletest = "^0.13.0" +pretty_assertions = "1.4" proptest = "1" +ndc-test-helpers = { path = "../ndc-test-helpers" } +test-helpers = { path = "../test-helpers" } diff --git a/crates/cli/proptest-regressions/introspection/type_unification.txt b/crates/cli/proptest-regressions/introspection/type_unification.txt index 77460802..1dc172d2 100644 --- a/crates/cli/proptest-regressions/introspection/type_unification.txt +++ b/crates/cli/proptest-regressions/introspection/type_unification.txt @@ -9,3 +9,4 @@ cc e7368f0503761c52e2ce47fa2e64454ecd063f2e019c511759162d0be049e665 # shrinks to cc bd6f440b7ea7e51d8c369e802b8cbfbc0c3f140c01cd6b54d9c61e6d84d7e77d # shrinks to c = TypeUnificationContext { object_type_name: "", field_name: "" }, t = Nullable(Scalar(Null)) cc d16279848ea51c4be376436423d342afd077a737efcab03ba2d29d5a0dee9df2 # shrinks to left = {"": Scalar(Double)}, right = {"": Scalar(Decimal)}, shared = {} cc fc85c97eeccb12e144f548fe65fd262d4e7b1ec9c799be69fd30535aa032e26d # shrinks to ta = Nullable(Scalar(Null)), tb = Nullable(Scalar(Undefined)) +cc 57b3015ca6d70f8e1975e21132e7624132bfe3bf958475473e5d1027c59dc7d9 # shrinks to t = Predicate { object_type_name: ObjectTypeName(TypeName("A")) } diff --git a/crates/cli/proptest-regressions/native_query/type_annotation.txt b/crates/cli/proptest-regressions/native_query/type_annotation.txt new file mode 100644 index 00000000..f2148756 --- /dev/null +++ b/crates/cli/proptest-regressions/native_query/type_annotation.txt @@ -0,0 +1,10 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc 525ecaf39caf362837e1addccbf4e0f4301e7e0ad1f84047a952b6ac710f795f # shrinks to t = Scalar(Double) +cc 893face3f71cf906a1a089e94527e12d36882624d651797754b0d622f7af7680 # shrinks to t = Scalar(JavascriptWithScope) +cc 6500920ee0ab41ac265301e4afdc05438df74f2b92112a7c0c1ccb59f056071c # shrinks to t = ArrayOf(Scalar(Double)) +cc adf516fe79b0dc9248c54a23f8b301ad1e2a3280081cde3f89586e4b5ade1065 # shrinks to t = Nullable(Nullable(Scalar(Double))) diff --git a/crates/cli/src/exit_codes.rs b/crates/cli/src/exit_codes.rs new file mode 100644 index 00000000..a8d7c246 --- /dev/null +++ b/crates/cli/src/exit_codes.rs @@ -0,0 +1,24 @@ +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum ExitCode { + CouldNotReadAggregationPipeline, + CouldNotReadConfiguration, + CouldNotProcessAggregationPipeline, + ErrorWriting, + InvalidArguments, + RefusedToOverwrite, + ResourceNotFound, +} + +impl From for i32 { + fn from(value: ExitCode) -> Self { + match value { + ExitCode::CouldNotReadAggregationPipeline => 201, + ExitCode::CouldNotReadConfiguration => 202, + ExitCode::CouldNotProcessAggregationPipeline => 205, + ExitCode::ErrorWriting => 204, + ExitCode::InvalidArguments => 400, + ExitCode::RefusedToOverwrite => 203, + ExitCode::ResourceNotFound => 404, + } + } +} diff --git a/crates/cli/src/introspection/sampling.rs b/crates/cli/src/introspection/sampling.rs index c01360ca..78df3302 100644 --- a/crates/cli/src/introspection/sampling.rs +++ b/crates/cli/src/introspection/sampling.rs @@ -1,20 +1,76 @@ -use std::collections::{BTreeMap, HashSet}; +mod keep_backward_compatible_changes; + +use std::collections::BTreeMap; use crate::log_warning; use super::type_unification::{make_nullable_field, unify_object_types, unify_type}; use configuration::{ - schema::{self, Type}, + schema::{self, Collection, CollectionSchema, ObjectTypes, Type}, Schema, WithName, }; use futures_util::TryStreamExt; -use mongodb::bson::{doc, Bson, Document}; -use mongodb_agent_common::state::ConnectorState; -use mongodb_support::BsonScalarType::{self, *}; +use json_structural_diff::JsonDiff; +use mongodb::bson::{doc, spec::BinarySubtype, Binary, Bson, Document}; +use mongodb_agent_common::mongodb::{CollectionTrait as _, DatabaseTrait}; +use mongodb_support::{ + aggregate::{Pipeline, Stage}, + BsonScalarType::{self, self as S}, +}; +use ndc_models::{CollectionName, ObjectTypeName}; + +use self::keep_backward_compatible_changes::keep_backward_compatible_changes; type ObjectField = WithName; type ObjectType = WithName; +#[derive(Default)] +pub struct SampledSchema { + pub schemas: BTreeMap, + + /// Updates to existing schema changes are made conservatively. These diffs show the difference + /// between each new configuration to be written to disk on the left, and the schema that would + /// have been written if starting from scratch on the right. + pub ignored_changes: BTreeMap, +} + +impl SampledSchema { + pub fn insert_collection( + &mut self, + name: impl std::fmt::Display, + collection: CollectionSchema, + ) { + self.schemas.insert( + name.to_string(), + Self::schema_from_collection(name, collection), + ); + } + + pub fn record_ignored_collection_changes( + &mut self, + name: impl std::fmt::Display, + before: &CollectionSchema, + after: &CollectionSchema, + ) -> Result<(), serde_json::error::Error> { + let a = serde_json::to_value(Self::schema_from_collection(&name, before.clone()))?; + let b = serde_json::to_value(Self::schema_from_collection(&name, after.clone()))?; + if let Some(diff) = JsonDiff::diff_string(&a, &b, false) { + self.ignored_changes.insert(name.to_string(), diff); + } + Ok(()) + } + + fn schema_from_collection( + name: impl std::fmt::Display, + collection: CollectionSchema, + ) -> Schema { + Schema { + collections: [(name.to_string().into(), collection.collection)].into(), + object_types: collection.object_types, + } + } +} + /// Sample from all collections in the database and return a Schema. /// Return an error if there are any errors accessing the database /// or if the types derived from the sample documents for a collection @@ -22,51 +78,110 @@ type ObjectType = WithName; pub async fn sample_schema_from_db( sample_size: u32, all_schema_nullable: bool, - config_file_changed: bool, - state: &ConnectorState, - existing_schemas: &HashSet, -) -> anyhow::Result> { - let mut schemas = BTreeMap::new(); - let db = state.database(); - let mut collections_cursor = db.list_collections(None, None).await?; + db: &impl DatabaseTrait, + mut previously_defined_collections: BTreeMap, +) -> anyhow::Result { + let mut sampled_schema: SampledSchema = Default::default(); + let mut collections_cursor = db.list_collections().await?; while let Some(collection_spec) = collections_cursor.try_next().await? { let collection_name = collection_spec.name; - if !existing_schemas.contains(&collection_name) || config_file_changed { - let collection_schema = sample_schema_from_collection( - &collection_name, - sample_size, - all_schema_nullable, - state, - ) - .await?; - if let Some(collection_schema) = collection_schema { - schemas.insert(collection_name, collection_schema); - } else { - log_warning!("could not find any documents to sample from collection, {collection_name} - skipping"); - } + + // The `system.*` namespace is reserved for internal use. In some deployments, such as + // MongoDB v6 running on Atlas, aggregate permissions are denied for `system.views` which + // causes introspection to fail. So we skip those collections. + if collection_name.starts_with("system.") { + log_warning!("collection {collection_name} is under the system namespace which is reserved for internal use - skipping"); + continue; } + + let previously_defined_collection = + previously_defined_collections.remove(collection_name.as_str()); + + // Use previously-defined type name in case user has customized it + let collection_type_name = previously_defined_collection + .as_ref() + .map(|c| c.collection.r#type.clone()) + .unwrap_or_else(|| collection_name.clone().into()); + + let sample_result = match sample_schema_from_collection( + &collection_name, + collection_type_name.clone(), + sample_size, + all_schema_nullable, + db, + ) + .await + { + Ok(schema) => schema, + Err(err) => { + let indented_error = indent::indent_all_by(2, err.to_string()); + log_warning!( + "an error occurred attempting to sample collection, {collection_name} - skipping\n{indented_error}" + ); + continue; + } + }; + + let Some(collection_schema) = sample_result else { + log_warning!("could not find any documents to sample from collection, {collection_name} - skipping"); + continue; + }; + + let collection_schema = match previously_defined_collection { + Some(previously_defined_collection) => { + let backward_compatible_schema = keep_backward_compatible_changes( + previously_defined_collection, + collection_schema.object_types.clone(), + ); + let _ = sampled_schema.record_ignored_collection_changes( + &collection_name, + &backward_compatible_schema, + &collection_schema, + ); + let updated_collection = Collection { + r#type: collection_type_name, + description: collection_schema + .collection + .description + .or(backward_compatible_schema.collection.description), + }; + CollectionSchema { + collection: updated_collection, + object_types: backward_compatible_schema.object_types, + } + } + None => collection_schema, + }; + + sampled_schema.insert_collection(collection_name, collection_schema); } - Ok(schemas) + + Ok(sampled_schema) } async fn sample_schema_from_collection( collection_name: &str, + collection_type_name: ObjectTypeName, sample_size: u32, all_schema_nullable: bool, - state: &ConnectorState, -) -> anyhow::Result> { - let db = state.database(); + db: &impl DatabaseTrait, +) -> anyhow::Result> { let options = None; let mut cursor = db - .collection::(collection_name) - .aggregate(vec![doc! {"$sample": { "size": sample_size }}], options) + .collection(collection_name) + .aggregate( + Pipeline::new(vec![Stage::Other(doc! { + "$sample": { "size": sample_size } + })]), + options, + ) .await?; let mut collected_object_types = vec![]; let is_collection_type = true; while let Some(document) = cursor.try_next().await? { let object_types = make_object_type( - &collection_name.into(), + &collection_type_name, &document, is_collection_type, all_schema_nullable, @@ -80,21 +195,18 @@ async fn sample_schema_from_collection( if collected_object_types.is_empty() { Ok(None) } else { - let collection_info = WithName::named( - collection_name.into(), - schema::Collection { - description: None, - r#type: collection_name.into(), - }, - ); - Ok(Some(Schema { - collections: WithName::into_map([collection_info]), + let collection_info = schema::Collection { + description: None, + r#type: collection_type_name, + }; + Ok(Some(CollectionSchema { + collection: collection_info, object_types: WithName::into_map(collected_object_types), })) } } -fn make_object_type( +pub fn make_object_type( object_type_name: &ndc_models::ObjectTypeName, document: &Document, is_collection_type: bool, @@ -178,12 +290,12 @@ fn make_field_type( (vec![], Type::Scalar(t)) } match field_value { - Bson::Double(_) => scalar(Double), - Bson::String(_) => scalar(String), + Bson::Double(_) => scalar(S::Double), + Bson::String(_) => scalar(S::String), Bson::Array(arr) => { // Examine all elements of the array and take the union of the resulting types. let mut collected_otds = vec![]; - let mut result_type = Type::Scalar(Undefined); + let mut result_type = Type::Scalar(S::Undefined); for elem in arr { let (elem_collected_otds, elem_type) = make_field_type(object_type_name, elem, all_schema_nullable); @@ -206,23 +318,29 @@ fn make_field_type( ); (collected_otds, Type::Object(object_type_name.to_owned())) } - Bson::Boolean(_) => scalar(Bool), - Bson::Null => scalar(Null), - Bson::RegularExpression(_) => scalar(Regex), - Bson::JavaScriptCode(_) => scalar(Javascript), - Bson::JavaScriptCodeWithScope(_) => scalar(JavascriptWithScope), - Bson::Int32(_) => scalar(Int), - Bson::Int64(_) => scalar(Long), - Bson::Timestamp(_) => scalar(Timestamp), - Bson::Binary(_) => scalar(BinData), - Bson::ObjectId(_) => scalar(ObjectId), - Bson::DateTime(_) => scalar(Date), - Bson::Symbol(_) => scalar(Symbol), - Bson::Decimal128(_) => scalar(Decimal), - Bson::Undefined => scalar(Undefined), - Bson::MaxKey => scalar(MaxKey), - Bson::MinKey => scalar(MinKey), - Bson::DbPointer(_) => scalar(DbPointer), + Bson::Boolean(_) => scalar(S::Bool), + Bson::Null => scalar(S::Null), + Bson::RegularExpression(_) => scalar(S::Regex), + Bson::JavaScriptCode(_) => scalar(S::Javascript), + Bson::JavaScriptCodeWithScope(_) => scalar(S::JavascriptWithScope), + Bson::Int32(_) => scalar(S::Int), + Bson::Int64(_) => scalar(S::Long), + Bson::Timestamp(_) => scalar(S::Timestamp), + Bson::Binary(Binary { subtype, .. }) => { + if *subtype == BinarySubtype::Uuid { + scalar(S::UUID) + } else { + scalar(S::BinData) + } + } + Bson::ObjectId(_) => scalar(S::ObjectId), + Bson::DateTime(_) => scalar(S::Date), + Bson::Symbol(_) => scalar(S::Symbol), + Bson::Decimal128(_) => scalar(S::Decimal), + Bson::Undefined => scalar(S::Undefined), + Bson::MaxKey => scalar(S::MaxKey), + Bson::MinKey => scalar(S::MinKey), + Bson::DbPointer(_) => scalar(S::DbPointer), } } diff --git a/crates/cli/src/introspection/sampling/keep_backward_compatible_changes.rs b/crates/cli/src/introspection/sampling/keep_backward_compatible_changes.rs new file mode 100644 index 00000000..6f710cad --- /dev/null +++ b/crates/cli/src/introspection/sampling/keep_backward_compatible_changes.rs @@ -0,0 +1,156 @@ +use std::collections::BTreeMap; + +use configuration::schema::{CollectionSchema, ObjectField, ObjectType, Type}; +use itertools::Itertools as _; +use ndc_models::ObjectTypeName; + +use super::ObjectTypes; + +pub fn keep_backward_compatible_changes( + existing_collection: CollectionSchema, + mut updated_object_types: ObjectTypes, +) -> CollectionSchema { + let mut accumulated_new_object_types = Default::default(); + let CollectionSchema { + collection, + object_types: mut previously_defined_object_types, + } = existing_collection; + backward_compatible_helper( + &mut previously_defined_object_types, + &mut updated_object_types, + &mut accumulated_new_object_types, + collection.r#type.clone(), + ); + CollectionSchema { + collection, + object_types: accumulated_new_object_types, + } +} + +fn backward_compatible_helper( + previously_defined_object_types: &mut ObjectTypes, + updated_object_types: &mut ObjectTypes, + accumulated_new_object_types: &mut ObjectTypes, + type_name: ObjectTypeName, +) { + if accumulated_new_object_types.contains_key(&type_name) { + return; + } + let existing = previously_defined_object_types.remove(&type_name); + let updated = updated_object_types.remove(&type_name); + match (existing, updated) { + (Some(existing), Some(updated)) => { + let object_type = backward_compatible_object_type( + previously_defined_object_types, + updated_object_types, + accumulated_new_object_types, + existing, + updated, + ); + accumulated_new_object_types.insert(type_name, object_type); + } + (Some(existing), None) => { + accumulated_new_object_types.insert(type_name, existing.clone()); + } + (None, Some(updated)) => { + accumulated_new_object_types.insert(type_name, updated); + } + // shouldn't be reachable + (None, None) => (), + } +} + +fn backward_compatible_object_type( + previously_defined_object_types: &mut ObjectTypes, + updated_object_types: &mut ObjectTypes, + accumulated_new_object_types: &mut ObjectTypes, + existing: ObjectType, + mut updated: ObjectType, +) -> ObjectType { + let field_names = updated + .fields + .keys() + .chain(existing.fields.keys()) + .unique() + .cloned() + .collect_vec(); + let fields = field_names + .into_iter() + .map(|name| { + let existing_field = existing.fields.get(&name); + let updated_field = updated.fields.remove(&name); + let field = match (existing_field, updated_field) { + (Some(existing_field), Some(updated_field)) => { + let r#type = reconcile_types( + previously_defined_object_types, + updated_object_types, + accumulated_new_object_types, + existing_field.r#type.clone(), + updated_field.r#type, + ); + ObjectField { + description: existing.description.clone().or(updated_field.description), + r#type, + } + } + (Some(existing_field), None) => existing_field.clone(), + (None, Some(updated_field)) => updated_field, + (None, None) => unreachable!(), + }; + (name.clone(), field) + }) + .collect(); + ObjectType { + description: existing.description.clone().or(updated.description), + fields, + } +} + +fn reconcile_types( + previously_defined_object_types: &mut BTreeMap, + updated_object_types: &mut BTreeMap, + accumulated_new_object_types: &mut BTreeMap, + existing_type: Type, + updated_type: Type, +) -> Type { + match (existing_type, updated_type) { + (Type::Nullable(a), Type::Nullable(b)) => Type::Nullable(Box::new(reconcile_types( + previously_defined_object_types, + updated_object_types, + accumulated_new_object_types, + *a, + *b, + ))), + (Type::Nullable(a), b) => Type::Nullable(Box::new(reconcile_types( + previously_defined_object_types, + updated_object_types, + accumulated_new_object_types, + *a, + b, + ))), + (a, Type::Nullable(b)) => reconcile_types( + previously_defined_object_types, + updated_object_types, + accumulated_new_object_types, + a, + *b, + ), + (Type::ArrayOf(a), Type::ArrayOf(b)) => Type::ArrayOf(Box::new(reconcile_types( + previously_defined_object_types, + updated_object_types, + accumulated_new_object_types, + *a, + *b, + ))), + (Type::Object(_), Type::Object(b)) => { + backward_compatible_helper( + previously_defined_object_types, + updated_object_types, + accumulated_new_object_types, + b.clone().into(), + ); + Type::Object(b) + } + (a, _) => a, + } +} diff --git a/crates/cli/src/introspection/type_unification.rs b/crates/cli/src/introspection/type_unification.rs index dd813f3c..fc4216be 100644 --- a/crates/cli/src/introspection/type_unification.rs +++ b/crates/cli/src/introspection/type_unification.rs @@ -48,13 +48,9 @@ pub fn unify_type(type_a: Type, type_b: Type) -> Type { // Scalar types unify if they are the same type, or if one is a superset of the other. // If they are diffferent then the union is ExtendedJSON. (Type::Scalar(scalar_a), Type::Scalar(scalar_b)) => { - if scalar_a == scalar_b || is_supertype(&scalar_a, &scalar_b) { - Type::Scalar(scalar_a) - } else if is_supertype(&scalar_b, &scalar_a) { - Type::Scalar(scalar_b) - } else { - Type::ExtendedJSON - } + BsonScalarType::common_supertype(scalar_a, scalar_b) + .map(Type::Scalar) + .unwrap_or(Type::ExtendedJSON) } // Object types unify if they have the same name. @@ -67,6 +63,25 @@ pub fn unify_type(type_a: Type, type_b: Type) -> Type { } } + // Predicate types unify if they have the same name. + // If they are diffferent then the union is ExtendedJSON. + ( + Type::Predicate { + object_type_name: object_a, + }, + Type::Predicate { + object_type_name: object_b, + }, + ) => { + if object_a == object_b { + Type::Predicate { + object_type_name: object_a, + } + } else { + Type::ExtendedJSON + } + } + // Array types unify iff their element types unify. (Type::ArrayOf(elem_type_a), Type::ArrayOf(elem_type_b)) => { let elem_type = unify_type(*elem_type_a, *elem_type_b); @@ -173,20 +188,6 @@ pub fn unify_object_types( merged_type_map.into_values().collect() } -/// True iff we consider a to be a supertype of b. -/// -/// Note that if you add more supertypes here then it is important to also update the custom -/// equality check in our tests in mongodb_agent_common::query::serialization::tests. Equality -/// needs to be transitive over supertypes, so for example if we have, -/// -/// (Double, Int), (Decimal, Double) -/// -/// then in addition to comparing ints to doubles, and doubles to decimals, we also need to compare -/// decimals to ints. -fn is_supertype(a: &BsonScalarType, b: &BsonScalarType) -> bool { - matches!((a, b), (Double, Int)) -} - #[cfg(test)] mod tests { use std::collections::{HashMap, HashSet}; diff --git a/crates/cli/src/introspection/validation_schema.rs b/crates/cli/src/introspection/validation_schema.rs index 78ee7d25..f90b0122 100644 --- a/crates/cli/src/introspection/validation_schema.rs +++ b/crates/cli/src/introspection/validation_schema.rs @@ -7,8 +7,8 @@ use configuration::{ use futures_util::TryStreamExt; use mongodb::bson::from_bson; use mongodb_agent_common::{ + mongodb::DatabaseTrait, schema::{get_property_description, Property, ValidatorSchema}, - state::ConnectorState, }; use mongodb_support::BsonScalarType; @@ -19,10 +19,9 @@ type ObjectType = WithName; type ObjectField = WithName; pub async fn get_metadata_from_validation_schema( - state: &ConnectorState, + db: &impl DatabaseTrait, ) -> Result, MongoAgentError> { - let db = state.database(); - let mut collections_cursor = db.list_collections(None, None).await?; + let mut collections_cursor = db.list_collections().await?; let mut schemas: Vec> = vec![]; @@ -37,7 +36,11 @@ pub async fn get_metadata_from_validation_schema( if let Some(schema_bson) = schema_bson_option { let validator_schema = from_bson::(schema_bson.clone()).map_err(|err| { - MongoAgentError::BadCollectionSchema(name.to_owned(), schema_bson.clone(), err) + MongoAgentError::BadCollectionSchema(Box::new(( + name.to_owned(), + schema_bson.clone(), + err, + ))) })?; let collection_schema = make_collection_schema(name, &validator_schema); schemas.push(collection_schema); @@ -148,10 +151,12 @@ fn make_field_type(object_type_name: &str, prop_schema: &Property) -> (Vec (vec![], Type::ExtendedJSON), + Property::Object { description: _, required, - properties, + properties: Some(properties), } => { let type_prefix = format!("{object_type_name}_"); let (otds, otd_fields): (Vec>, Vec) = properties @@ -173,7 +178,6 @@ fn make_field_type(object_type_name: &str, prop_schema: &Property) -> (Vec { diff --git a/crates/cli/src/lib.rs b/crates/cli/src/lib.rs index 1baef324..95f90e13 100644 --- a/crates/cli/src/lib.rs +++ b/crates/cli/src/lib.rs @@ -1,15 +1,25 @@ //! The interpretation of the commands that the CLI can handle. +mod exit_codes; mod introspection; mod logging; +#[cfg(test)] +mod tests; + +#[cfg(feature = "native-query-subcommand")] +mod native_query; use std::path::PathBuf; use clap::{Parser, Subcommand}; +use configuration::SCHEMA_DIRNAME; +use introspection::sampling::SampledSchema; // Exported for use in tests pub use introspection::type_from_bson; -use mongodb_agent_common::state::ConnectorState; +use mongodb_agent_common::{mongodb::DatabaseTrait, state::try_init_state_from_uri}; +#[cfg(feature = "native-query-subcommand")] +pub use native_query::native_query_from_pipeline; #[derive(Debug, Clone, Parser)] pub struct UpdateArgs { @@ -28,26 +38,41 @@ pub struct UpdateArgs { pub enum Command { /// Update the configuration by introspecting the database, using the configuration options. Update(UpdateArgs), + + #[cfg(feature = "native-query-subcommand")] + #[command(subcommand)] + NativeQuery(native_query::Command), } pub struct Context { pub path: PathBuf, - pub connector_state: ConnectorState, + pub connection_uri: Option, + pub display_color: bool, } /// Run a command in a given directory. pub async fn run(command: Command, context: &Context) -> anyhow::Result<()> { match command { - Command::Update(args) => update(context, &args).await?, + Command::Update(args) => { + let connector_state = try_init_state_from_uri(context.connection_uri.as_ref()).await?; + update(context, &args, &connector_state.database()).await? + } + + #[cfg(feature = "native-query-subcommand")] + Command::NativeQuery(command) => native_query::run(context, command).await?, }; Ok(()) } /// Update the configuration in the current directory by introspecting the database. -async fn update(context: &Context, args: &UpdateArgs) -> anyhow::Result<()> { +async fn update( + context: &Context, + args: &UpdateArgs, + database: &impl DatabaseTrait, +) -> anyhow::Result<()> { let configuration_options = - configuration::parse_configuration_options_file(&context.path).await; - // Prefer arguments passed to cli, and fallback to the configuration file + configuration::parse_configuration_options_file(&context.path).await?; + // Prefer arguments passed to cli, and fall back to the configuration file let sample_size = match args.sample_size { Some(size) => size, None => configuration_options.introspection_options.sample_size, @@ -68,22 +93,42 @@ async fn update(context: &Context, args: &UpdateArgs) -> anyhow::Result<()> { .all_schema_nullable } }; - let config_file_changed = configuration::get_config_file_changed(&context.path).await?; if !no_validator_schema { let schemas_from_json_validation = - introspection::get_metadata_from_validation_schema(&context.connector_state).await?; + introspection::get_metadata_from_validation_schema(database).await?; configuration::write_schema_directory(&context.path, schemas_from_json_validation).await?; } - let existing_schemas = configuration::list_existing_schemas(&context.path).await?; - let schemas_from_sampling = introspection::sample_schema_from_db( + let existing_schemas = configuration::read_existing_schemas(&context.path).await?; + let SampledSchema { + schemas: schemas_from_sampling, + ignored_changes, + } = introspection::sample_schema_from_db( sample_size, all_schema_nullable, - config_file_changed, - &context.connector_state, - &existing_schemas, + database, + existing_schemas, ) .await?; - configuration::write_schema_directory(&context.path, schemas_from_sampling).await + configuration::write_schema_directory(&context.path, schemas_from_sampling).await?; + + if !ignored_changes.is_empty() { + eprintln!("Warning: introspection detected some changes to to database that were **not** applied to existing +schema configurations. To avoid accidental breaking changes the introspection system is +conservative about what changes are applied automatically."); + eprintln!(); + eprintln!("To apply changes delete the schema configuration files you want updated, and run introspection +again; or edit the files directly."); + eprintln!(); + eprintln!("These database changes were **not** applied:"); + } + for (collection_name, changes) in ignored_changes { + let mut config_path = context.path.join(SCHEMA_DIRNAME).join(collection_name); + config_path.set_extension("json"); + eprintln!(); + eprintln!("{}:", config_path.to_string_lossy()); + eprintln!("{}", changes) + } + Ok(()) } diff --git a/crates/cli/src/main.rs b/crates/cli/src/main.rs index 9b1752e4..c358be99 100644 --- a/crates/cli/src/main.rs +++ b/crates/cli/src/main.rs @@ -3,12 +3,11 @@ //! This is intended to be automatically downloaded and invoked via the Hasura CLI, as a plugin. //! It is unlikely that end-users will use it directly. -use anyhow::anyhow; use std::env; use std::path::PathBuf; use clap::{Parser, ValueHint}; -use mongodb_agent_common::state::{try_init_state_from_uri, DATABASE_URI_ENV_VAR}; +use mongodb_agent_common::state::DATABASE_URI_ENV_VAR; use mongodb_cli_plugin::{run, Command, Context}; /// The command-line arguments. @@ -17,6 +16,7 @@ pub struct Args { /// The path to the configuration. Defaults to the current directory. #[arg( long = "context-path", + short = 'p', env = "HASURA_PLUGIN_CONNECTOR_CONTEXT_PATH", value_name = "DIRECTORY", value_hint = ValueHint::DirPath @@ -31,6 +31,10 @@ pub struct Args { )] pub connection_uri: Option, + /// Disable color in command output. + #[arg(long = "no-color", short = 'C')] + pub no_color: bool, + /// The command to invoke. #[command(subcommand)] pub subcommand: Command, @@ -46,16 +50,10 @@ pub async fn main() -> anyhow::Result<()> { Some(path) => path, None => env::current_dir()?, }; - let connection_uri = args.connection_uri.ok_or(anyhow!( - "Missing environment variable {}", - DATABASE_URI_ENV_VAR - ))?; - let connector_state = try_init_state_from_uri(&connection_uri) - .await - .map_err(|e| anyhow!("Error initializing MongoDB state {}", e))?; let context = Context { path, - connector_state, + connection_uri: args.connection_uri, + display_color: !args.no_color, }; run(args.subcommand, &context).await?; Ok(()) diff --git a/crates/cli/src/native_query/aggregation-operator-progress.md b/crates/cli/src/native_query/aggregation-operator-progress.md new file mode 100644 index 00000000..16a4ef8d --- /dev/null +++ b/crates/cli/src/native_query/aggregation-operator-progress.md @@ -0,0 +1,280 @@ +Arithmetic Expression Operators + +- [x] $abs - Returns the absolute value of a number. +- [x] $add - Adds numbers to return the sum, or adds numbers and a date to return a new date. If adding numbers and a date, treats the numbers as milliseconds. Accepts any number of argument expressions, but at most, one expression can resolve to a date. +- [ ] $ceil - Returns the smallest integer greater than or equal to the specified number. +- [x] $divide - Returns the result of dividing the first number by the second. Accepts two argument expressions. +- [ ] $exp - Raises e to the specified exponent. +- [ ] $floor - Returns the largest integer less than or equal to the specified number. +- [ ] $ln - Calculates the natural log of a number. +- [ ] $log - Calculates the log of a number in the specified base. +- [ ] $log10 - Calculates the log base 10 of a number. +- [ ] $mod - Returns the remainder of the first number divided by the second. Accepts two argument expressions. +- [x] $multiply - Multiplies numbers to return the product. Accepts any number of argument expressions. +- [ ] $pow - Raises a number to the specified exponent. +- [ ] $round - Rounds a number to to a whole integer or to a specified decimal place. +- [ ] $sqrt - Calculates the square root. +- [x] $subtract - Returns the result of subtracting the second value from the first. If the two values are numbers, return the difference. If the two values are dates, return the difference in milliseconds. If the two values are a date and a number in milliseconds, return the resulting date. Accepts two argument expressions. If the two values are a date and a number, specify the date argument first as it is not meaningful to subtract a date from a number. +- [ ] $trunc - Truncates a number to a whole integer or to a specified decimal place. + +Array Expression Operators + +- [x] $arrayElemAt - Returns the element at the specified array index. +- [ ] $arrayToObject - Converts an array of key value pairs to a document. +- [ ] $concatArrays - Concatenates arrays to return the concatenated array. +- [ ] $filter - Selects a subset of the array to return an array with only the elements that match the filter condition. +- [ ] $firstN - Returns a specified number of elements from the beginning of an array. Distinct from the $firstN accumulator. +- [ ] $in - Returns a boolean indicating whether a specified value is in an array. +- [ ] $indexOfArray - Searches an array for an occurrence of a specified value and returns the array index of the first occurrence. Array indexes start at zero. +- [ ] $isArray - Determines if the operand is an array. Returns a boolean. +- [ ] $lastN - Returns a specified number of elements from the end of an array. Distinct from the $lastN accumulator. +- [ ] $map - Applies a subexpression to each element of an array and returns the array of resulting values in order. Accepts named parameters. +- [ ] $maxN - Returns the n largest values in an array. Distinct from the $maxN accumulator. +- [ ] $minN - Returns the n smallest values in an array. Distinct from the $minN accumulator. +- [ ] $objectToArray - Converts a document to an array of documents representing key-value pairs. +- [ ] $range - Outputs an array containing a sequence of integers according to user-defined inputs. +- [ ] $reduce - Applies an expression to each element in an array and combines them into a single value. +- [ ] $reverseArray - Returns an array with the elements in reverse order. +- [ ] $size - Returns the number of elements in the array. Accepts a single expression as argument. +- [ ] $slice - Returns a subset of an array. +- [ ] $sortArray - Sorts the elements of an array. +- [ ] $zip - Merge two arrays together. + +Bitwise Operators + +- [ ] $bitAnd - Returns the result of a bitwise and operation on an array of int or long values. +- [ ] $bitNot - Returns the result of a bitwise not operation on a single argument or an array that contains a single int or long value. +- [ ] $bitOr - Returns the result of a bitwise or operation on an array of int or long values. +- [ ] $bitXor - Returns the result of a bitwise xor (exclusive or) operation on an array of int and long values. + +Boolean Expression Operators + +- [x] $and - Returns true only when all its expressions evaluate to true. Accepts any number of argument expressions. +- [x] $not - Returns the boolean value that is the opposite of its argument expression. Accepts a single argument expression. +- [x] $or - Returns true when any of its expressions evaluates to true. Accepts any number of argument expressions. + +Comparison Expression Operators + +- [ ] $cmp - Returns 0 if the two values are equivalent, 1 if the first value is greater than the second, and -1 if the first value is less than the second. +- [x] $eq - Returns true if the values are equivalent. +- [x] $gt - Returns true if the first value is greater than the second. +- [x] $gte - Returns true if the first value is greater than or equal to the second. +- [x] $lt - Returns true if the first value is less than the second. +- [x] $lte - Returns true if the first value is less than or equal to the second. +- [x] $ne - Returns true if the values are not equivalent. + +Conditional Expression Operators + +- [ ] $cond - A ternary operator that evaluates one expression, and depending on the result, returns the value of one of the other two expressions. Accepts either three expressions in an ordered list or three named parameters. +- [ ] $ifNull - Returns either the non-null result of the first expression or the result of the second expression if the first expression results in a null result. Null result encompasses instances of undefined values or missing fields. Accepts two expressions as arguments. The result of the second expression can be null. +- [ ] $switch - Evaluates a series of case expressions. When it finds an expression which evaluates to true, $switch executes a specified expression and breaks out of the control flow. + +Custom Aggregation Expression Operators + +- [ ] $accumulator - Defines a custom accumulator function. +- [ ] $function - Defines a custom function. + +Data Size Operators + +- [ ] $binarySize - Returns the size of a given string or binary data value's content in bytes. +- [ ] $bsonSize - Returns the size in bytes of a given document (i.e. bsontype Object) when encoded as BSON. + +Date Expression Operators + +- [ ] $dateAdd - Adds a number of time units to a date object. +- [ ] $dateDiff - Returns the difference between two dates. +- [ ] $dateFromParts - Constructs a BSON Date object given the date's constituent parts. +- [ ] $dateFromString - Converts a date/time string to a date object. +- [ ] $dateSubtract - Subtracts a number of time units from a date object. +- [ ] $dateToParts - Returns a document containing the constituent parts of a date. +- [ ] $dateToString - Returns the date as a formatted string. +- [ ] $dateTrunc - Truncates a date. +- [ ] $dayOfMonth - Returns the day of the month for a date as a number between 1 and 31. +- [ ] $dayOfWeek - Returns the day of the week for a date as a number between 1 (Sunday) and 7 (Saturday). +- [ ] $dayOfYear - Returns the day of the year for a date as a number between 1 and 366 (leap year). +- [ ] $hour - Returns the hour for a date as a number between 0 and 23. +- [ ] $isoDayOfWeek - Returns the weekday number in ISO 8601 format, ranging from 1 (for Monday) to 7 (for Sunday). +- [ ] $isoWeek - Returns the week number in ISO 8601 format, ranging from 1 to 53. Week numbers start at 1 with the week (Monday through Sunday) that contains the year's first Thursday. +- [ ] $isoWeekYear - Returns the year number in ISO 8601 format. The year starts with the Monday of week 1 (ISO 8601) and ends with the Sunday of the last week (ISO 8601). +- [ ] $millisecond - Returns the milliseconds of a date as a number between 0 and 999. +- [ ] $minute - Returns the minute for a date as a number between 0 and 59. +- [ ] $month - Returns the month for a date as a number between 1 (January) and 12 (December). +- [ ] $second - Returns the seconds for a date as a number between 0 and 60 (leap seconds). +- [ ] $toDate - Converts value to a Date. +- [ ] $week - Returns the week number for a date as a number between 0 (the partial week that precedes the first Sunday of the year) and 53 (leap year). +- [ ] $year - Returns the year for a date as a number (e.g. 2014). + +The following arithmetic operators can take date operands: + +- [ ] $add - Adds numbers and a date to return a new date. If adding numbers and a date, treats the numbers as milliseconds. Accepts any number of argument expressions, but at most, one expression can resolve to a date. +- [ ] $subtract - Returns the result of subtracting the second value from the first. If the two values are dates, return the difference in milliseconds. If the two values are a date and a number in milliseconds, return the resulting date. Accepts two argument expressions. If the two values are a date and a number, specify the date argument first as it is not meaningful to subtract a date from a number. + +Literal Expression Operator + +- [ ] $literal - Return a value without parsing. Use for values that the aggregation pipeline may interpret as an expression. For example, use a $literal expression to a string that starts with a dollar sign ($) to avoid parsing as a field path. + +Miscellaneous Operators + +- [ ] $getField - Returns the value of a specified field from a document. You can use $getField to retrieve the value of fields with names that contain periods (.) or start with dollar signs ($). +- [ ] $rand - Returns a random float between 0 and 1 +- [ ] $sampleRate - Randomly select documents at a given rate. Although the exact number of documents selected varies on each run, the quantity chosen approximates the sample rate expressed as a percentage of the total number of documents. +- [ ] $toHashedIndexKey - Computes and returns the hash of the input expression using the same hash function that MongoDB uses to create a hashed index. + +Object Expression Operators + +- [ ] $mergeObjects - Combines multiple documents into a single document. +- [ ] $objectToArray - Converts a document to an array of documents representing key-value pairs. +- [ ] $setField - Adds, updates, or removes a specified field in a document. You can use $setField to add, update, or remove fields with names that contain periods (.) or start with dollar signs ($). + +Set Expression Operators + +- [x] $allElementsTrue - Returns true if no element of a set evaluates to false, otherwise, returns false. Accepts a single argument expression. +- [x] $anyElementTrue - Returns true if any elements of a set evaluate to true; otherwise, returns false. Accepts a single argument expression. +- [ ] $setDifference - Returns a set with elements that appear in the first set but not in the second set; i.e. performs a relative complement of the second set relative to the first. Accepts exactly two argument expressions. +- [ ] $setEquals - Returns true if the input sets have the same distinct elements. Accepts two or more argument expressions. +- [ ] $setIntersection - Returns a set with elements that appear in all of the input sets. Accepts any number of argument expressions. +- [ ] $setIsSubset - Returns true if all elements of the first set appear in the second set, including when the first set equals the second set; i.e. not a strict subset. Accepts exactly two argument expressions. +- [ ] $setUnion - Returns a set with elements that appear in any of the input sets. + +String Expression Operators + +- [ ] $concat - Concatenates any number of strings. +- [ ] $dateFromString - Converts a date/time string to a date object. +- [ ] $dateToString - Returns the date as a formatted string. +- [ ] $indexOfBytes - Searches a string for an occurrence of a substring and returns the UTF-8 byte index of the first occurrence. If the substring is not found, returns -1. +- [ ] $indexOfCP - Searches a string for an occurrence of a substring and returns the UTF-8 code point index of the first occurrence. If the substring is not found, returns -1 +- [ ] $ltrim - Removes whitespace or the specified characters from the beginning of a string. +- [ ] $regexFind - Applies a regular expression (regex) to a string and returns information on the first matched substring. +- [ ] $regexFindAll - Applies a regular expression (regex) to a string and returns information on the all matched substrings. +- [ ] $regexMatch - Applies a regular expression (regex) to a string and returns a boolean that indicates if a match is found or not. +- [ ] $replaceOne - Replaces the first instance of a matched string in a given input. +- [ ] $replaceAll - Replaces all instances of a matched string in a given input. +- [ ] $rtrim - Removes whitespace or the specified characters from the end of a string. +- [x] $split - Splits a string into substrings based on a delimiter. Returns an array of substrings. If the delimiter is not found within the string, returns an array containing the original string. +- [ ] $strLenBytes - Returns the number of UTF-8 encoded bytes in a string. +- [ ] $strLenCP - Returns the number of UTF-8 code points in a string. +- [ ] $strcasecmp - Performs case-insensitive string comparison and returns: 0 if two strings are equivalent, 1 if the first string is greater than the second, and -1 if the first string is less than the second. +- [ ] $substr - Deprecated. Use $substrBytes or $substrCP. +- [ ] $substrBytes - Returns the substring of a string. Starts with the character at the specified UTF-8 byte index (zero-based) in the string and continues for the specified number of bytes. +- [ ] $substrCP - Returns the substring of a string. Starts with the character at the specified UTF-8 code point (CP) +index (zero-based) in the string and continues for the number of code points specified. +- [ ] $toLower - Converts a string to lowercase. Accepts a single argument expression. +- [ ] $toString - Converts value to a string. +- [ ] $trim - Removes whitespace or the specified characters from the beginning and end of a string. +- [ ] $toUpper - Converts a string to uppercase. Accepts a single argument expression. + +Text Expression Operator + +- [ ] $meta - Access available per-document metadata related to the aggregation operation. + +Timestamp Expression Operators + +- [ ] $tsIncrement - Returns the incrementing ordinal from a timestamp as a long. +- [ ] $tsSecond - Returns the seconds from a timestamp as a long. + +Trigonometry Expression Operators + +- [x] $sin - Returns the sine of a value that is measured in radians. +- [x] $cos - Returns the cosine of a value that is measured in radians. +- [x] $tan - Returns the tangent of a value that is measured in radians. +- [x] $asin - Returns the inverse sin (arc sine) of a value in radians. +- [x] $acos - Returns the inverse cosine (arc cosine) of a value in radians. +- [x] $atan - Returns the inverse tangent (arc tangent) of a value in radians. +- [ ] $atan2 - Returns the inverse tangent (arc tangent) of y / x in radians, where y and x are the first and second values passed to the expression respectively. +- [x] $asinh - Returns the inverse hyperbolic sine (hyperbolic arc sine) of a value in radians. +- [x] $acosh - Returns the inverse hyperbolic cosine (hyperbolic arc cosine) of a value in radians. +- [x] $atanh - Returns the inverse hyperbolic tangent (hyperbolic arc tangent) of a value in radians. +- [x] $sinh - Returns the hyperbolic sine of a value that is measured in radians. +- [x] $cosh - Returns the hyperbolic cosine of a value that is measured in radians. +- [x] $tanh - Returns the hyperbolic tangent of a value that is measured in radians. +- [ ] $degreesToRadians - Converts a value from degrees to radians. +- [ ] $radiansToDegrees - Converts a value from radians to degrees. + +Type Expression Operators + +- [ ] $convert - Converts a value to a specified type. +- [ ] $isNumber - Returns boolean true if the specified expression resolves to an integer, decimal, double, or long. +- [ ] $toBool - Converts value to a boolean. +- [ ] $toDate - Converts value to a Date. +- [ ] $toDecimal - Converts value to a Decimal128. +- [ ] $toDouble - Converts value to a double. +- [ ] $toInt - Converts value to an integer. +- [ ] $toLong - Converts value to a long. +- [ ] $toObjectId - Converts value to an ObjectId. +- [ ] $toString - Converts value to a string. +- [ ] $type - Return the BSON data type of the field. +- [ ] $toUUID - Converts a string to a UUID. + +Accumulators ($group, $bucket, $bucketAuto, $setWindowFields) + +- [ ] $accumulator - Returns the result of a user-defined accumulator function. +- [ ] $addToSet - Returns an array of unique expression values for each group. Order of the array elements is undefined. +- [x] $avg - Returns an average of numerical values. Ignores non-numeric values. +- [ ] $bottom - Returns the bottom element within a group according to the specified sort order. +- [ ] $bottomN - Returns an aggregation of the bottom n fields within a group, according to the specified sort order. +- [x] $count - Returns the number of documents in a group. +- [ ] $first - Returns the result of an expression for the first document in a group. +- [ ] $firstN - Returns an aggregation of the first n elements within a group. Only meaningful when documents are in a defined order. Distinct from the $firstN array operator. +- [ ] $last - Returns the result of an expression for the last document in a group. +- [ ] $lastN - Returns an aggregation of the last n elements within a group. Only meaningful when documents are in a defined order. Distinct from the $lastN array operator. +- [x] $max - Returns the highest expression value for each group. +- [ ] $maxN - Returns an aggregation of the n maximum valued elements in a group. Distinct from the $maxN array operator. +- [ ] $median - Returns an approximation of the median, the 50th percentile, as a scalar value. +- [ ] $mergeObjects - Returns a document created by combining the input documents for each group. +- [x] $min - Returns the lowest expression value for each group. +- [ ] $minN - Returns an aggregation of the n minimum valued elements in a group. Distinct from the $minN array operator. +- [ ] $percentile - Returns an array of scalar values that correspond to specified percentile values. +- [x] $push - Returns an array of expression values for documents in each group. +- [ ] $stdDevPop - Returns the population standard deviation of the input values. +- [ ] $stdDevSamp - Returns the sample standard deviation of the input values. +- [x] $sum - Returns a sum of numerical values. Ignores non-numeric values. +- [ ] $top - Returns the top element within a group according to the specified sort order. +- [ ] $topN - Returns an aggregation of the top n fields within a group, according to the specified sort order. + +Accumulators (in Other Stages) + +- [ ] $avg - Returns an average of the specified expression or list of expressions for each document. Ignores non-numeric values. +- [ ] $first - Returns the result of an expression for the first document in a group. +- [ ] $last - Returns the result of an expression for the last document in a group. +- [ ] $max - Returns the maximum of the specified expression or list of expressions for each document +- [ ] $median - Returns an approximation of the median, the 50th percentile, as a scalar value. +- [ ] $min - Returns the minimum of the specified expression or list of expressions for each document +- [ ] $percentile - Returns an array of scalar values that correspond to specified percentile values. +- [ ] $stdDevPop - Returns the population standard deviation of the input values. +- [ ] $stdDevSamp - Returns the sample standard deviation of the input values. +- [ ] $sum - Returns a sum of numerical values. Ignores non-numeric values. + +Variable Expression Operators + +- [ ] $let - Defines variables for use within the scope of a subexpression and returns the result of the subexpression. Accepts named parameters. + +Window Operators + +- [ ] $addToSet - Returns an array of all unique values that results from applying an expression to each document. +- [ ] $avg - Returns the average for the specified expression. Ignores non-numeric values. +- [ ] $bottom - Returns the bottom element within a group according to the specified sort order. +- [ ] $bottomN - Returns an aggregation of the bottom n fields within a group, according to the specified sort order. +- [ ] $count - Returns the number of documents in the group or window. +- [ ] $covariancePop - Returns the population covariance of two numeric expressions. +- [ ] $covarianceSamp - Returns the sample covariance of two numeric expressions. +- [ ] $denseRank - Returns the document position (known as the rank) relative to other documents in the $setWindowFields stage partition. There are no gaps in the ranks. Ties receive the same rank. +- [ ] $derivative - Returns the average rate of change within the specified window. +- [ ] $documentNumber - Returns the position of a document (known as the document number) in the $setWindowFields stage partition. Ties result in different adjacent document numbers. +- [ ] $expMovingAvg - Returns the exponential moving average for the numeric expression. +- [ ] $first - Returns the result of an expression for the first document in a group or window. +- [ ] $integral - Returns the approximation of the area under a curve. +- [ ] $last - Returns the result of an expression for the last document in a group or window. +- [ ] $linearFill - Fills null and missing fields in a window using linear interpolation +- [ ] $locf - Last observation carried forward. Sets values for null and missing fields in a window to the last non-null value for the field. +- [ ] $max - Returns the maximum value that results from applying an expression to each document. +- [ ] $min - Returns the minimum value that results from applying an expression to each document. +- [ ] $minN - Returns an aggregation of the n minimum valued elements in a group. Distinct from the $minN array operator. +- [ ] $push - Returns an array of values that result from applying an expression to each document. +- [ ] $rank - Returns the document position (known as the rank) relative to other documents in the $setWindowFields stage partition. +- [ ] $shift - Returns the value from an expression applied to a document in a specified position relative to the current document in the $setWindowFields stage partition. +- [ ] $stdDevPop - Returns the population standard deviation that results from applying a numeric expression to each document. +- [ ] $stdDevSamp - Returns the sample standard deviation that results from applying a numeric expression to each document. +- [ ] $sum - Returns the sum that results from applying a numeric expression to each document. +- [ ] $top - Returns the top element within a group according to the specified sort order. +- [ ] $topN - Returns an aggregation of the top n fields within a group, according to the specified sort order. + diff --git a/crates/cli/src/native_query/aggregation_expression.rs b/crates/cli/src/native_query/aggregation_expression.rs new file mode 100644 index 00000000..0941249e --- /dev/null +++ b/crates/cli/src/native_query/aggregation_expression.rs @@ -0,0 +1,419 @@ +use std::collections::BTreeMap; + +use itertools::Itertools as _; +use mongodb::bson::{Bson, Document}; +use mongodb_support::BsonScalarType; +use nonempty::NonEmpty; + +use super::pipeline_type_context::PipelineTypeContext; + +use super::error::{Error, Result}; +use super::reference_shorthand::{parse_reference_shorthand, Reference}; +use super::type_constraint::{ObjectTypeConstraint, TypeConstraint, Variance}; + +use TypeConstraint as C; + +pub fn infer_type_from_aggregation_expression( + context: &mut PipelineTypeContext<'_>, + desired_object_type_name: &str, + type_hint: Option<&TypeConstraint>, + expression: Bson, +) -> Result { + let t = match expression { + Bson::Double(_) => C::Scalar(BsonScalarType::Double), + Bson::String(string) => infer_type_from_reference_shorthand(context, type_hint, &string)?, + Bson::Array(elems) => { + infer_type_from_array(context, desired_object_type_name, type_hint, elems)? + } + Bson::Document(doc) => infer_type_from_aggregation_expression_document( + context, + desired_object_type_name, + type_hint, + doc, + )?, + Bson::Boolean(_) => C::Scalar(BsonScalarType::Bool), + Bson::Null | Bson::Undefined => C::Scalar(BsonScalarType::Null), + Bson::RegularExpression(_) => C::Scalar(BsonScalarType::Regex), + Bson::JavaScriptCode(_) => C::Scalar(BsonScalarType::Javascript), + Bson::JavaScriptCodeWithScope(_) => C::Scalar(BsonScalarType::JavascriptWithScope), + Bson::Int32(_) => C::Scalar(BsonScalarType::Int), + Bson::Int64(_) => C::Scalar(BsonScalarType::Long), + Bson::Timestamp(_) => C::Scalar(BsonScalarType::Timestamp), + Bson::Binary(_) => C::Scalar(BsonScalarType::BinData), + Bson::ObjectId(_) => C::Scalar(BsonScalarType::ObjectId), + Bson::DateTime(_) => C::Scalar(BsonScalarType::Date), + Bson::Symbol(_) => C::Scalar(BsonScalarType::Symbol), + Bson::Decimal128(_) => C::Scalar(BsonScalarType::Decimal), + Bson::MaxKey => C::Scalar(BsonScalarType::MaxKey), + Bson::MinKey => C::Scalar(BsonScalarType::MinKey), + Bson::DbPointer(_) => C::Scalar(BsonScalarType::DbPointer), + }; + Ok(t) +} + +pub fn infer_types_from_aggregation_expression_tuple( + context: &mut PipelineTypeContext<'_>, + desired_object_type_name: &str, + type_hint_for_elements: Option<&TypeConstraint>, + bson: Bson, +) -> Result> { + let tuple = match bson { + Bson::Array(exprs) => exprs + .into_iter() + .map(|expr| { + infer_type_from_aggregation_expression( + context, + desired_object_type_name, + type_hint_for_elements, + expr, + ) + }) + .collect::>>()?, + expr => Err(Error::Other(format!("expected array, but got {expr}")))?, + }; + Ok(tuple) +} + +fn infer_type_from_array( + context: &mut PipelineTypeContext<'_>, + desired_object_type_name: &str, + type_hint_for_entire_array: Option<&TypeConstraint>, + elements: Vec, +) -> Result { + let elem_type_hint = type_hint_for_entire_array.map(|hint| match hint { + C::ArrayOf(t) => *t.clone(), + t => C::ElementOf(Box::new(t.clone())), + }); + Ok(C::Union( + elements + .into_iter() + .map(|elem| { + infer_type_from_aggregation_expression( + context, + desired_object_type_name, + elem_type_hint.as_ref(), + elem, + ) + }) + .collect::>()?, + )) +} + +fn infer_type_from_aggregation_expression_document( + context: &mut PipelineTypeContext<'_>, + desired_object_type_name: &str, + type_hint_for_entire_object: Option<&TypeConstraint>, + mut document: Document, +) -> Result { + let mut expression_operators = document + .keys() + .filter(|key| key.starts_with("$")) + .collect_vec(); + let expression_operator = expression_operators.pop().map(ToString::to_string); + let is_empty = expression_operators.is_empty(); + match (expression_operator, is_empty) { + (_, false) => Err(Error::MultipleExpressionOperators(document)), + (Some(operator), _) => { + let operands = document.remove(&operator).unwrap(); + infer_type_from_operator_expression( + context, + desired_object_type_name, + type_hint_for_entire_object, + &operator, + operands, + ) + } + (None, _) => infer_type_from_document(context, desired_object_type_name, document), + } +} + +fn infer_type_from_operator_expression( + context: &mut PipelineTypeContext<'_>, + desired_object_type_name: &str, + type_hint: Option<&TypeConstraint>, + operator: &str, + operand: Bson, +) -> Result { + // NOTE: It is important to run inference on `operand` in every match arm even if we don't read + // the result because we need to check for uses of parameters. + let t = match operator { + // technically $abs returns the same *numeric* type as its input, and fails on other types + "$abs" => infer_type_from_aggregation_expression( + context, + desired_object_type_name, + type_hint.or(Some(&C::numeric())), + operand, + )?, + "$sin" | "$cos" | "$tan" | "$asin" | "$acos" | "$atan" | "$asinh" | "$acosh" | "$atanh" + | "$sinh" | "$cosh" | "$tanh" => { + type_for_trig_operator(infer_type_from_aggregation_expression( + context, + desired_object_type_name, + Some(&C::numeric()), + operand, + )?) + } + "$add" | "$divide" | "$multiply" | "$subtract" => homogeneous_binary_operator_operand_type( + context, + desired_object_type_name, + Some(C::numeric()), + operator, + operand, + )?, + "$and" | "$or" => { + infer_types_from_aggregation_expression_tuple( + context, + desired_object_type_name, + None, + operand, + )?; + C::Scalar(BsonScalarType::Bool) + } + "$not" => { + infer_type_from_aggregation_expression( + context, + desired_object_type_name, + Some(&C::Scalar(BsonScalarType::Bool)), + operand, + )?; + C::Scalar(BsonScalarType::Bool) + } + "$eq" | "$ne" => { + homogeneous_binary_operator_operand_type( + context, + desired_object_type_name, + None, + operator, + operand, + )?; + C::Scalar(BsonScalarType::Bool) + } + "$gt" | "$gte" | "$lt" | "$lte" => { + homogeneous_binary_operator_operand_type( + context, + desired_object_type_name, + Some(C::comparable()), + operator, + operand, + )?; + C::Scalar(BsonScalarType::Bool) + } + "$allElementsTrue" => { + infer_type_from_aggregation_expression( + context, + desired_object_type_name, + Some(&C::ArrayOf(Box::new(C::Scalar(BsonScalarType::Bool)))), + operand, + )?; + C::Scalar(BsonScalarType::Bool) + } + "$anyElementTrue" => { + infer_type_from_aggregation_expression( + context, + desired_object_type_name, + Some(&C::ArrayOf(Box::new(C::Scalar(BsonScalarType::Bool)))), + operand, + )?; + C::Scalar(BsonScalarType::Bool) + } + "$arrayElemAt" => { + let (array_ref, idx) = two_parameter_operand(operator, operand)?; + let array_type = infer_type_from_aggregation_expression( + context, + &format!("{desired_object_type_name}_arrayElemAt_array"), + type_hint.map(|t| C::ArrayOf(Box::new(t.clone()))).as_ref(), + array_ref, + )?; + infer_type_from_aggregation_expression( + context, + &format!("{desired_object_type_name}_arrayElemAt_idx"), + Some(&C::Scalar(BsonScalarType::Int)), + idx, + )?; + type_hint + .cloned() + .unwrap_or_else(|| C::ElementOf(Box::new(array_type))) + .make_nullable() + } + "$split" => { + infer_types_from_aggregation_expression_tuple( + context, + desired_object_type_name, + Some(&C::Scalar(BsonScalarType::String)), + operand, + )?; + C::ArrayOf(Box::new(C::Scalar(BsonScalarType::String))) + } + op => Err(Error::UnknownAggregationOperator(op.to_string()))?, + }; + Ok(t) +} + +fn two_parameter_operand(operator: &str, operand: Bson) -> Result<(Bson, Bson)> { + match operand { + Bson::Array(operands) => { + if operands.len() != 2 { + return Err(Error::Other(format!( + "argument to {operator} must be a two-element array" + ))); + } + let mut operands = operands.into_iter(); + let a = operands.next().unwrap(); + let b = operands.next().unwrap(); + Ok((a, b)) + } + other_bson => Err(Error::ExpectedArrayExpressionArgument { + actual_argument: other_bson, + })?, + } +} + +fn homogeneous_binary_operator_operand_type( + context: &mut PipelineTypeContext<'_>, + desired_object_type_name: &str, + operand_type_hint: Option, + operator: &str, + operand: Bson, +) -> Result { + let (a, b) = two_parameter_operand(operator, operand)?; + let variable = context.new_type_variable(Variance::Invariant, operand_type_hint); + let type_a = infer_type_from_aggregation_expression( + context, + desired_object_type_name, + Some(&C::Variable(variable)), + a, + )?; + let type_b = infer_type_from_aggregation_expression( + context, + desired_object_type_name, + Some(&C::Variable(variable)), + b, + )?; + for t in [type_a, type_b] { + // Avoid cycles of type variable references + if !context.constraint_references_variable(&t, variable) { + context.set_type_variable_constraint(variable, t); + } + } + Ok(C::Variable(variable)) +} + +pub fn type_for_trig_operator(operand_type: TypeConstraint) -> TypeConstraint { + operand_type.map_nullable(|t| match t { + t @ C::Scalar(BsonScalarType::Decimal) => t, + _ => C::Scalar(BsonScalarType::Double), + }) +} + +/// This is a document that is not evaluated as a plain value, not as an aggregation expression. +fn infer_type_from_document( + context: &mut PipelineTypeContext<'_>, + desired_object_type_name: &str, + document: Document, +) -> Result { + let object_type_name = context.unique_type_name(desired_object_type_name); + let fields = document + .into_iter() + .map(|(field_name, bson)| { + let field_object_type_name = format!("{desired_object_type_name}_{field_name}"); + let object_field_type = infer_type_from_aggregation_expression( + context, + &field_object_type_name, + None, + bson, + )?; + Ok((field_name.into(), object_field_type)) + }) + .collect::>>()?; + let object_type = ObjectTypeConstraint { fields }; + context.insert_object_type(object_type_name.clone(), object_type); + Ok(C::Object(object_type_name)) +} + +pub fn infer_type_from_reference_shorthand( + context: &mut PipelineTypeContext<'_>, + type_hint: Option<&TypeConstraint>, + input: &str, +) -> Result { + let reference = parse_reference_shorthand(input)?; + let t = match reference { + Reference::NativeQueryVariable { + name, + type_annotation, + } => { + let constraints = type_hint + .into_iter() + .cloned() + .chain(type_annotation.map(TypeConstraint::from)); + context.register_parameter(name.into(), constraints) + } + Reference::PipelineVariable { name, .. } => Err(Error::Other(format!("Encountered a pipeline variable, $${name}. Pipeline variables are currently not supported.")))?, + Reference::InputDocumentField { name, nested_path } => { + let doc_type = context.get_input_document_type()?; + let path = NonEmpty { + head: name, + tail: nested_path, + }; + C::FieldOf { + target_type: Box::new(doc_type.clone()), + path, + } + } + Reference::String { + native_query_variables, + } => { + for variable in native_query_variables { + context.register_parameter(variable.into(), [C::Scalar(BsonScalarType::String)]); + } + C::Scalar(BsonScalarType::String) + } + }; + Ok(t) +} + +#[cfg(test)] +mod tests { + use googletest::prelude::*; + use mongodb::bson::bson; + use mongodb_support::BsonScalarType; + use test_helpers::configuration::mflix_config; + + use crate::native_query::{ + pipeline_type_context::PipelineTypeContext, + type_constraint::{TypeConstraint, TypeVariable, Variance}, + }; + + use super::infer_type_from_operator_expression; + + use TypeConstraint as C; + + #[googletest::test] + fn infers_constrants_on_equality() -> Result<()> { + let config = mflix_config(); + let mut context = PipelineTypeContext::new(&config, None); + + let (var0, var1) = ( + TypeVariable::new(0, Variance::Invariant), + TypeVariable::new(1, Variance::Contravariant), + ); + + infer_type_from_operator_expression( + &mut context, + "test", + None, + "$eq", + bson!(["{{ parameter }}", 1]), + )?; + + expect_eq!( + context.type_variables(), + &[ + (var0, [C::Scalar(BsonScalarType::Int)].into()), + (var1, [C::Variable(var0)].into()) + ] + .into() + ); + + Ok(()) + } +} diff --git a/crates/cli/src/native_query/error.rs b/crates/cli/src/native_query/error.rs new file mode 100644 index 00000000..80a02ee9 --- /dev/null +++ b/crates/cli/src/native_query/error.rs @@ -0,0 +1,141 @@ +use std::collections::{BTreeMap, BTreeSet, HashMap}; + +use configuration::schema::Type; +use mongodb::bson::{Bson, Document}; +use ndc_models::{ArgumentName, FieldName, ObjectTypeName}; +use thiserror::Error; + +use super::type_constraint::{ObjectTypeConstraint, TypeConstraint, TypeVariable}; + +pub type Result = std::result::Result; + +// The URL for native query issues will be visible due to a wrapper around this error message in +// [crate::native_query::create]. +const UNSUPPORTED_FEATURE_MESSAGE: &str = r#"For a list of currently-supported features see https://hasura.io/docs/3.0/connectors/mongodb/native-operations/supported-aggregation-pipeline-features/. Please file a bug report, and declare types for your native query by hand for the time being."#; + +#[derive(Clone, Debug, Error, PartialEq)] +pub enum Error { + #[error("Cannot infer a result type for an empty pipeline")] + EmptyPipeline, + + #[error( + "Expected {reference} to reference an array, but instead it references a {referenced_type:?}" + )] + ExpectedArrayReference { + reference: Bson, + referenced_type: Type, + }, + + #[error("Expected an array type, but got: {actual_type:?}")] + ExpectedArray { actual_type: Type }, + + #[error("Expected an array, but got: {actual_argument}")] + ExpectedArrayExpressionArgument { actual_argument: Bson }, + + #[error("Expected an object type, but got: {actual_type:?}")] + ExpectedObject { actual_type: Type }, + + #[error("Expected a path for the $unwind stage")] + ExpectedStringPath(Bson), + + // This variant is not intended to be returned to the user - it is transformed with more + // context in [super::PipelineTypeContext::into_types]. + #[error("Failed to unify: {unsolved_variables:?}")] + FailedToUnify { + unsolved_variables: Vec, + }, + + #[error("Cannot infer a result document type for pipeline because it does not produce documents. You might need to add a --collection flag to your command to specify an input collection for the query.")] + IncompletePipeline, + + #[error("An object representing an expression must have exactly one field: {0}")] + MultipleExpressionOperators(Document), + + #[error("Object type, {object_type}, does not have a field named {field_name}")] + ObjectMissingField { + object_type: ObjectTypeName, + field_name: FieldName, + }, + + #[error("Type mismatch{}: {a} is not compatible with {b}", match context { + Some(context) => format!(" in {}", context), + None => String::new(), + })] + TypeMismatch { + context: Option, + a: TypeConstraint, + b: TypeConstraint, + }, + + #[error( + "{}", + unable_to_infer_types_message(*could_not_infer_return_type, problem_parameter_types) + )] + UnableToInferTypes { + problem_parameter_types: Vec, + could_not_infer_return_type: bool, + + // These fields are included here for internal debugging + type_variables: HashMap>, + object_type_constraints: BTreeMap, + }, + + #[error("Error parsing a string in the aggregation pipeline: {0}")] + UnableToParseReferenceShorthand(String), + + #[error("Type inference is not currently implemented for the query predicate operator, {0}. {UNSUPPORTED_FEATURE_MESSAGE}")] + UnknownMatchDocumentOperator(String), + + #[error("Type inference is not currently implemented for the aggregation expression operator, {0}. {UNSUPPORTED_FEATURE_MESSAGE}")] + UnknownAggregationOperator(String), + + #[error("Type inference is not currently implemented for{} stage number {} in your aggregation pipeline. {UNSUPPORTED_FEATURE_MESSAGE}", match stage_name { Some(name) => format!(" {name},"), None => "".to_string() }, stage_index + 1)] + UnknownAggregationStage { + stage_index: usize, + stage_name: Option<&'static str>, + }, + + #[error("Native query input collection, \"{0}\", is not defined in the connector schema")] + UnknownCollection(String), + + #[error("Unknown object type, \"{0}\"")] + UnknownObjectType(String), + + #[error("{0}")] + Other(String), + + #[error("Errors processing pipeline:\n\n{}", multiple_errors(.0))] + Multiple(Vec), +} + +fn unable_to_infer_types_message( + could_not_infer_return_type: bool, + problem_parameter_types: &[ArgumentName], +) -> String { + let mut message = String::new(); + message += "Cannot infer types for this pipeline.\n"; + if !problem_parameter_types.is_empty() { + message += "\nCould not infer types for these parameters:\n"; + for name in problem_parameter_types { + message += &format!("- {name}\n"); + } + message += "\nTry adding type annotations of the form: {{ parameter_name | [int!]! }}\n"; + message += "\nIf you added an annotation, and you are still seeing this error then the type you gave may not be compatible with the context where the parameter is used.\n"; + } + if could_not_infer_return_type { + message += "\nUnable to infer return type."; + if !problem_parameter_types.is_empty() { + message += " Adding type annotations to parameters may help."; + } + message += "\n"; + } + message +} + +fn multiple_errors(errors: &[Error]) -> String { + let mut output = String::new(); + for error in errors { + output += &format!("- {}\n", error); + } + output +} diff --git a/crates/cli/src/native_query/helpers.rs b/crates/cli/src/native_query/helpers.rs new file mode 100644 index 00000000..d39ff44e --- /dev/null +++ b/crates/cli/src/native_query/helpers.rs @@ -0,0 +1,94 @@ +use std::{borrow::Cow, collections::BTreeMap}; + +use configuration::Configuration; +use ndc_models::{CollectionInfo, CollectionName, FieldName, ObjectTypeName}; +use nonempty::NonEmpty; +use regex::Regex; + +use super::error::{Error, Result}; + +fn find_collection<'a>( + configuration: &'a Configuration, + collection_name: &CollectionName, +) -> Result<&'a CollectionInfo> { + if let Some(collection) = configuration.collections.get(collection_name) { + return Ok(collection); + } + if let Some((_, function)) = configuration.functions.get(collection_name) { + return Ok(function); + } + + Err(Error::UnknownCollection(collection_name.to_string())) +} + +pub fn find_collection_object_type( + configuration: &Configuration, + collection_name: &CollectionName, +) -> Result { + let collection = find_collection(configuration, collection_name)?; + Ok(collection.collection_type.clone()) +} + +pub fn unique_type_name( + object_types: &BTreeMap, + added_object_types: &BTreeMap, + desired_type_name: &str, +) -> ObjectTypeName { + let (name, mut counter) = parse_counter_suffix(desired_type_name); + let mut type_name: ObjectTypeName = name.as_ref().into(); + while object_types.contains_key(&type_name) || added_object_types.contains_key(&type_name) { + counter += 1; + type_name = format!("{desired_type_name}_{counter}").into(); + } + type_name +} + +/// [unique_type_name] adds a `_n` numeric suffix where necessary. There are cases where we go +/// through multiple layers of unique names. Instead of accumulating multiple suffixes, we can +/// increment the existing suffix. If there is no suffix then the count starts at zero. +pub fn parse_counter_suffix(name: &str) -> (Cow<'_, str>, u32) { + let re = Regex::new(r"^(.*?)_(\d+)$").unwrap(); + let Some(captures) = re.captures(name) else { + return (Cow::Borrowed(name), 0); + }; + let prefix = captures.get(1).unwrap().as_str(); + let Some(count) = captures.get(2).and_then(|s| s.as_str().parse().ok()) else { + return (Cow::Borrowed(name), 0); + }; + (Cow::Owned(prefix.to_string()), count) +} + +pub fn get_object_field_type<'a>( + object_types: &'a BTreeMap, + object_type_name: &ObjectTypeName, + object_type: &'a ndc_models::ObjectType, + path: NonEmpty, +) -> Result<&'a ndc_models::Type> { + let field_name = path.head; + let rest = NonEmpty::from_vec(path.tail); + + let field = object_type + .fields + .get(&field_name) + .ok_or_else(|| Error::ObjectMissingField { + object_type: object_type_name.clone(), + field_name: field_name.clone(), + })?; + + match rest { + None => Ok(&field.r#type), + Some(rest) => match &field.r#type { + ndc_models::Type::Named { name } => { + let type_name: ObjectTypeName = name.clone().into(); + let inner_object_type = object_types + .get(&type_name) + .ok_or_else(|| Error::UnknownObjectType(type_name.to_string()))?; + get_object_field_type(object_types, &type_name, inner_object_type, rest) + } + _ => Err(Error::ObjectMissingField { + object_type: object_type_name.clone(), + field_name: field_name.clone(), + }), + }, + } +} diff --git a/crates/cli/src/native_query/mod.rs b/crates/cli/src/native_query/mod.rs new file mode 100644 index 00000000..72c33450 --- /dev/null +++ b/crates/cli/src/native_query/mod.rs @@ -0,0 +1,308 @@ +mod aggregation_expression; +pub mod error; +mod helpers; +mod pipeline; +mod pipeline_type_context; +mod pretty_printing; +mod prune_object_types; +mod reference_shorthand; +mod type_annotation; +mod type_constraint; +mod type_solver; + +#[cfg(test)] +mod tests; + +use std::collections::BTreeMap; +use std::path::{Path, PathBuf}; +use std::process::exit; + +use clap::Subcommand; +use configuration::schema::ObjectField; +use configuration::{ + native_query::NativeQueryRepresentation::Collection, serialized::NativeQuery, Configuration, +}; +use configuration::{read_directory_with_ignored_configs, read_native_query_directory, WithName}; +use mongodb_support::aggregate::Pipeline; +use ndc_models::{CollectionName, FunctionName}; +use pretty::termcolor::{ColorChoice, StandardStream}; +use pretty_printing::pretty_print_native_query; +use tokio::fs; + +use crate::exit_codes::ExitCode; +use crate::Context; + +use self::error::Result; +use self::pipeline::infer_pipeline_types; +use self::pretty_printing::pretty_print_native_query_info; + +/// [BETA] Create or manage native queries - custom MongoDB queries that integrate into your data graph +#[derive(Clone, Debug, Subcommand)] +pub enum Command { + /// Create a native query from a JSON file containing an aggregation pipeline + Create { + /// Name that will identify the query in your data graph (defaults to base name of pipeline file) + #[arg(long, short = 'n')] + name: Option, + + /// Name of the collection that acts as input for the pipeline - omit for a pipeline that does not require input + #[arg(long, short = 'c')] + collection: Option, + + /// Overwrite any existing native query configuration with the same name + #[arg(long, short = 'f')] + force: bool, + + /// Path to a JSON file with an aggregation pipeline that specifies your custom query. This + /// is a value that could be given to the MongoDB command db..aggregate(). + pipeline_path: PathBuf, + }, + + /// Delete a native query identified by name. Use the list subcommand to see native query + /// names. + Delete { native_query_name: String }, + + /// List all configured native queries + List, + + /// Print details of a native query identified by name. Use the list subcommand to see native + /// query names. + Show { native_query_name: String }, +} + +pub async fn run(context: &Context, command: Command) -> anyhow::Result<()> { + match command { + Command::Create { + name, + collection, + force, + pipeline_path, + } => create(context, name, collection, force, &pipeline_path).await, + Command::Delete { native_query_name } => delete(context, &native_query_name).await, + Command::List => list(context).await, + Command::Show { native_query_name } => show(context, &native_query_name).await, + } +} + +async fn list(context: &Context) -> anyhow::Result<()> { + let native_queries = read_native_queries(context).await?; + for (name, _) in native_queries { + println!("{}", name); + } + Ok(()) +} + +async fn delete(context: &Context, native_query_name: &str) -> anyhow::Result<()> { + let (_, path) = find_native_query(context, native_query_name).await?; + fs::remove_file(&path).await?; + eprintln!( + "Deleted native query configuration at {}", + path.to_string_lossy() + ); + Ok(()) +} + +async fn show(context: &Context, native_query_name: &str) -> anyhow::Result<()> { + let (native_query, path) = find_native_query(context, native_query_name).await?; + pretty_print_native_query(&mut stdout(context), &native_query, &path).await?; + println!(); // blank line to avoid unterminated output indicator + Ok(()) +} + +async fn create( + context: &Context, + name: Option, + collection: Option, + force: bool, + pipeline_path: &Path, +) -> anyhow::Result<()> { + let name = match name.or_else(|| { + pipeline_path + .file_stem() + .map(|os_str| os_str.to_string_lossy().to_string()) + }) { + Some(name) => name, + None => { + eprintln!("Could not determine name for native query."); + exit(ExitCode::InvalidArguments.into()) + } + }; + + let native_query_path = { + let path = get_native_query_path(context, &name); + if !force && fs::try_exists(&path).await? { + eprintln!( + "A native query named {name} already exists at {}.", + path.to_string_lossy() + ); + eprintln!("Re-run with --force to overwrite."); + exit(ExitCode::RefusedToOverwrite.into()) + } + path + }; + + let configuration = read_configuration(context, &[native_query_path.clone()]).await?; + + let pipeline = match read_pipeline(pipeline_path).await { + Ok(p) => p, + Err(err) => { + write_stderr(&format!("Could not read aggregation pipeline.\n\n{err}")); + exit(ExitCode::CouldNotReadAggregationPipeline.into()) + } + }; + let native_query = match native_query_from_pipeline(&configuration, &name, collection, pipeline) + { + Ok(q) => WithName::named(name, q), + Err(err) => { + eprintln!(); + write_stderr(&err.to_string()); + eprintln!(); + write_stderr(&format!("If you are not able to resolve this error you can add the native query by writing the configuration file directly in {}. See https://hasura.io/docs/3.0/connectors/mongodb/native-operations/native-queries/#write-native-query-configurations-directly", native_query_path.to_string_lossy())); + // eprintln!("See https://hasura.io/docs/3.0/connectors/mongodb/native-operations/native-queries/#write-native-query-configurations-directly"); + eprintln!(); + write_stderr("If you want to request support for a currently unsupported query feature, report a bug, or get support please file an issue at https://github.com/hasura/ndc-mongodb/issues/new?template=native-query.md"); + exit(ExitCode::CouldNotReadAggregationPipeline.into()) + } + }; + + let native_query_dir = native_query_path + .parent() + .expect("parent directory of native query configuration path"); + if !(fs::try_exists(&native_query_dir).await?) { + fs::create_dir(&native_query_dir).await?; + } + + if let Err(err) = fs::write( + &native_query_path, + serde_json::to_string_pretty(&native_query)?, + ) + .await + { + write_stderr(&format!("Error writing native query configuration: {err}")); + exit(ExitCode::ErrorWriting.into()) + }; + eprintln!( + "\nWrote native query configuration to {}", + native_query_path.to_string_lossy() + ); + eprintln!(); + pretty_print_native_query_info(&mut stdout(context), &native_query.value).await?; + println!(); // blank line to avoid unterminated output indicator + Ok(()) +} + +/// Reads configuration, or exits with specific error code on error +async fn read_configuration( + context: &Context, + ignored_configs: &[PathBuf], +) -> anyhow::Result { + let configuration = match read_directory_with_ignored_configs(&context.path, ignored_configs) + .await + { + Ok(c) => c, + Err(err) => { + write_stderr(&format!("Could not read connector configuration - configuration must be initialized before creating native queries.\n\n{err:#}")); + exit(ExitCode::CouldNotReadConfiguration.into()) + } + }; + eprintln!( + "Read configuration from {}", + &context.path.to_string_lossy() + ); + Ok(configuration) +} + +/// Reads native queries skipping configuration processing, or exits with specific error code on error +async fn read_native_queries( + context: &Context, +) -> anyhow::Result> { + let native_queries = match read_native_query_directory(&context.path, &[]).await { + Ok(native_queries) => native_queries, + Err(err) => { + write_stderr(&format!("Could not read native queries.\n\n{err}")); + exit(ExitCode::CouldNotReadConfiguration.into()) + } + }; + Ok(native_queries) +} + +async fn find_native_query( + context: &Context, + name: &str, +) -> anyhow::Result<(NativeQuery, PathBuf)> { + let mut native_queries = read_native_queries(context).await?; + let (_, definition_and_path) = match native_queries.remove_entry(name) { + Some(native_query) => native_query, + None => { + eprintln!("No native query named {name} found."); + exit(ExitCode::ResourceNotFound.into()) + } + }; + Ok(definition_and_path) +} + +async fn read_pipeline(pipeline_path: &Path) -> anyhow::Result { + let input = fs::read(pipeline_path).await?; + let pipeline = serde_json::from_slice(&input)?; + Ok(pipeline) +} + +fn get_native_query_path(context: &Context, name: &str) -> PathBuf { + context + .path + .join(configuration::NATIVE_QUERIES_DIRNAME) + .join(name) + .with_extension("json") +} + +pub fn native_query_from_pipeline( + configuration: &Configuration, + name: &str, + input_collection: Option, + pipeline: Pipeline, +) -> Result { + let pipeline_types = + infer_pipeline_types(configuration, name, input_collection.as_ref(), &pipeline)?; + + let arguments = pipeline_types + .parameter_types + .into_iter() + .map(|(name, parameter_type)| { + ( + name, + ObjectField { + r#type: parameter_type, + description: None, + }, + ) + }) + .collect(); + + // TODO: move warnings to `run` function + for warning in pipeline_types.warnings { + println!("warning: {warning}"); + } + Ok(NativeQuery { + representation: Collection, + input_collection, + arguments, + result_document_type: pipeline_types.result_document_type, + object_types: pipeline_types.object_types, + pipeline: pipeline.into(), + description: None, + }) +} + +fn stdout(context: &Context) -> StandardStream { + if context.display_color { + StandardStream::stdout(ColorChoice::Auto) + } else { + StandardStream::stdout(ColorChoice::Never) + } +} + +/// Write a message to sdterr with automatic line wrapping +fn write_stderr(message: &str) { + let wrap_options = 120; + eprintln!("{}", textwrap::fill(message, wrap_options)) +} diff --git a/crates/cli/src/native_query/pipeline/match_stage.rs b/crates/cli/src/native_query/pipeline/match_stage.rs new file mode 100644 index 00000000..101c30c9 --- /dev/null +++ b/crates/cli/src/native_query/pipeline/match_stage.rs @@ -0,0 +1,287 @@ +use mongodb::bson::{Bson, Document}; +use mongodb_support::BsonScalarType; +use nonempty::NonEmpty; + +use crate::native_query::{ + aggregation_expression::infer_type_from_aggregation_expression, + error::{Error, Result}, + pipeline_type_context::PipelineTypeContext, + reference_shorthand::{parse_reference_shorthand, Reference}, + type_constraint::TypeConstraint, +}; + +pub fn check_match_doc_for_parameters( + context: &mut PipelineTypeContext<'_>, + desired_object_type_name: &str, + mut match_doc: Document, +) -> Result<()> { + let input_document_type = context.get_input_document_type()?; + if let Some(expression) = match_doc.remove("$expr") { + let type_hint = TypeConstraint::Scalar(BsonScalarType::Bool); + infer_type_from_aggregation_expression( + context, + desired_object_type_name, + Some(&type_hint), + expression, + )?; + Ok(()) + } else { + check_match_doc_for_parameters_helper( + context, + desired_object_type_name, + &input_document_type, + match_doc, + ) + } +} + +fn check_match_doc_for_parameters_helper( + context: &mut PipelineTypeContext<'_>, + desired_object_type_name: &str, + input_document_type: &TypeConstraint, + match_doc: Document, +) -> Result<()> { + for (key, value) in match_doc { + if key.starts_with("$") { + analyze_match_operator( + context, + desired_object_type_name, + input_document_type, + key, + value, + )?; + } else { + analyze_input_doc_field( + context, + desired_object_type_name, + input_document_type, + key, + value, + )?; + } + } + Ok(()) +} + +fn analyze_input_doc_field( + context: &mut PipelineTypeContext<'_>, + desired_object_type_name: &str, + input_document_type: &TypeConstraint, + field_name: String, + match_expression: Bson, +) -> Result<()> { + let field_type = TypeConstraint::FieldOf { + target_type: Box::new(input_document_type.clone()), + path: NonEmpty::from_vec(field_name.split(".").map(Into::into).collect()) + .ok_or_else(|| Error::Other("object field reference is an empty string".to_string()))?, + }; + analyze_match_expression( + context, + desired_object_type_name, + &field_type, + match_expression, + ) +} + +fn analyze_match_operator( + context: &mut PipelineTypeContext<'_>, + desired_object_type_name: &str, + field_type: &TypeConstraint, + operator: String, + match_expression: Bson, +) -> Result<()> { + match operator.as_ref() { + "$and" | "$or" | "$nor" => { + if let Bson::Array(array) = match_expression { + for expression in array { + check_match_doc_for_parameters_helper( + context, + desired_object_type_name, + field_type, + expression + .as_document() + .ok_or_else(|| { + Error::Other(format!( + "expected argument to {operator} to be an array of objects" + )) + })? + .clone(), + )?; + } + } else { + Err(Error::Other(format!( + "expected argument to {operator} to be an array of objects" + )))?; + } + } + "$not" => { + match match_expression { + Bson::Document(match_doc) => check_match_doc_for_parameters_helper( + context, + desired_object_type_name, + field_type, + match_doc, + )?, + _ => Err(Error::Other(format!( + "{operator} operator requires a document", + )))?, + }; + } + "$elemMatch" => { + let element_type = field_type.clone().map_nullable(|ft| match ft { + TypeConstraint::ArrayOf(t) => *t, + other => TypeConstraint::ElementOf(Box::new(other)), + }); + match match_expression { + Bson::Document(match_doc) => check_match_doc_for_parameters_helper( + context, + desired_object_type_name, + &element_type, + match_doc, + )?, + _ => Err(Error::Other(format!( + "{operator} operator requires a document", + )))?, + }; + } + "$eq" | "$ne" | "$gt" | "$lt" | "$gte" | "$lte" => analyze_match_expression( + context, + desired_object_type_name, + field_type, + match_expression, + )?, + "$in" | "$nin" => analyze_match_expression( + context, + desired_object_type_name, + &TypeConstraint::ArrayOf(Box::new(field_type.clone())), + match_expression, + )?, + "$exists" => analyze_match_expression( + context, + desired_object_type_name, + &TypeConstraint::Scalar(BsonScalarType::Bool), + match_expression, + )?, + // In MongoDB $type accepts either a number, a string, an array of numbers, or an array of + // strings - for simplicity we're only accepting an array of strings since this form can + // express all comparisons that can be expressed with the other forms. + "$type" => analyze_match_expression( + context, + desired_object_type_name, + &TypeConstraint::ArrayOf(Box::new(TypeConstraint::Scalar(BsonScalarType::String))), + match_expression, + )?, + "$mod" => match match_expression { + Bson::Array(xs) => { + if xs.len() != 2 { + Err(Error::Other(format!( + "{operator} operator requires exactly two arguments", + operator = operator + )))?; + } + for divisor_or_remainder in xs { + analyze_match_expression( + context, + desired_object_type_name, + &TypeConstraint::Scalar(BsonScalarType::Int), + divisor_or_remainder, + )?; + } + } + _ => Err(Error::Other(format!( + "{operator} operator requires an array of two elements", + )))?, + }, + "$regex" => analyze_match_expression( + context, + desired_object_type_name, + &TypeConstraint::Scalar(BsonScalarType::Regex), + match_expression, + )?, + "$all" => { + let element_type = field_type.clone().map_nullable(|ft| match ft { + TypeConstraint::ArrayOf(t) => *t, + other => TypeConstraint::ElementOf(Box::new(other)), + }); + // It's like passing field_type through directly, except that we move out of + // a possible nullable type, and we enforce an array type. + let argument_type = TypeConstraint::ArrayOf(Box::new(element_type)); + analyze_match_expression( + context, + desired_object_type_name, + &argument_type, + match_expression, + )?; + } + "$size" => analyze_match_expression( + context, + desired_object_type_name, + &TypeConstraint::Scalar(BsonScalarType::Int), + match_expression, + )?, + _ => Err(Error::UnknownMatchDocumentOperator(operator))?, + } + Ok(()) +} + +fn analyze_match_expression( + context: &mut PipelineTypeContext<'_>, + desired_object_type_name: &str, + field_type: &TypeConstraint, + match_expression: Bson, +) -> Result<()> { + match match_expression { + Bson::String(s) => analyze_match_expression_string(context, field_type, s), + Bson::Document(match_doc) => check_match_doc_for_parameters_helper( + context, + desired_object_type_name, + field_type, + match_doc, + ), + Bson::Array(xs) => { + let element_type = field_type.clone().map_nullable(|ft| match ft { + TypeConstraint::ArrayOf(t) => *t, + other => TypeConstraint::ElementOf(Box::new(other)), + }); + for x in xs { + analyze_match_expression(context, desired_object_type_name, &element_type, x)?; + } + Ok(()) + } + _ => Ok(()), + } +} + +fn analyze_match_expression_string( + context: &mut PipelineTypeContext<'_>, + field_type: &TypeConstraint, + match_expression: String, +) -> Result<()> { + // A match expression is not an aggregation expression shorthand string. But we only care about + // variable references, and the shorthand parser gets those for us. + match parse_reference_shorthand(&match_expression)? { + Reference::NativeQueryVariable { + name, + type_annotation, + } => { + let constraints = std::iter::once(field_type.clone()) + .chain(type_annotation.map(TypeConstraint::from)); + context.register_parameter(name.into(), constraints); + } + Reference::String { + native_query_variables, + } => { + for variable in native_query_variables { + context.register_parameter( + variable.into(), + [TypeConstraint::Scalar( + mongodb_support::BsonScalarType::String, + )], + ); + } + } + Reference::PipelineVariable { .. } => (), + Reference::InputDocumentField { .. } => (), + }; + Ok(()) +} diff --git a/crates/cli/src/native_query/pipeline/mod.rs b/crates/cli/src/native_query/pipeline/mod.rs new file mode 100644 index 00000000..9f14d085 --- /dev/null +++ b/crates/cli/src/native_query/pipeline/mod.rs @@ -0,0 +1,475 @@ +mod match_stage; +mod project_stage; + +use std::{collections::BTreeMap, iter::once}; + +use configuration::Configuration; +use mongodb::bson::{Bson, Document}; +use mongodb_support::{ + aggregate::{Accumulator, Pipeline, Stage}, + BsonScalarType, +}; +use ndc_models::{CollectionName, FieldName, ObjectTypeName}; + +use super::{ + aggregation_expression::{ + self, infer_type_from_aggregation_expression, infer_type_from_reference_shorthand, + type_for_trig_operator, + }, + error::{Error, Result}, + helpers::find_collection_object_type, + pipeline_type_context::{PipelineTypeContext, PipelineTypes}, + reference_shorthand::{parse_reference_shorthand, Reference}, + type_constraint::{ObjectTypeConstraint, TypeConstraint, Variance}, +}; + +pub fn infer_pipeline_types( + configuration: &Configuration, + // If we have to define a new object type, use this name + desired_object_type_name: &str, + input_collection: Option<&CollectionName>, + pipeline: &Pipeline, +) -> Result { + if pipeline.is_empty() { + return Err(Error::EmptyPipeline); + } + + let collection_doc_type = input_collection + .map(|collection_name| find_collection_object_type(configuration, collection_name)) + .transpose()?; + + let mut context = PipelineTypeContext::new(configuration, collection_doc_type); + + let object_type_name = context.unique_type_name(desired_object_type_name); + + for (stage_index, stage) in pipeline.iter().enumerate() { + if let Some(output_type) = + infer_stage_output_type(&mut context, desired_object_type_name, stage_index, stage)? + { + context.set_stage_doc_type(output_type); + }; + } + + // Try to set the desired type name for the overall pipeline output + let last_stage_type = context.get_input_document_type()?; + if let TypeConstraint::Object(stage_type_name) = last_stage_type { + if let Some(object_type) = context.get_object_type(&stage_type_name) { + context.insert_object_type(object_type_name.clone(), object_type.into_owned()); + context.set_stage_doc_type(TypeConstraint::Object(object_type_name)); + } + } + + context.into_types() +} + +fn infer_stage_output_type( + context: &mut PipelineTypeContext<'_>, + desired_object_type_name: &str, + stage_index: usize, + stage: &Stage, +) -> Result> { + let output_type = match stage { + Stage::AddFields(_) => Err(Error::UnknownAggregationStage { + stage_index, + stage_name: Some("$addFields"), + })?, + Stage::Documents(docs) => { + let doc_constraints = docs + .iter() + .map(|doc| { + infer_type_from_aggregation_expression( + context, + &format!("{desired_object_type_name}_documents"), + None, + doc.into(), + ) + }) + .collect::>>()?; + let type_variable = context.new_type_variable(Variance::Covariant, doc_constraints); + Some(TypeConstraint::Variable(type_variable)) + } + Stage::Match(match_doc) => { + match_stage::check_match_doc_for_parameters( + context, + &format!("{desired_object_type_name}_match"), + match_doc.clone(), + )?; + None + } + Stage::Sort(_) => None, + Stage::Skip(expression) => { + infer_type_from_aggregation_expression( + context, + desired_object_type_name, + Some(&TypeConstraint::Scalar(BsonScalarType::Int)), + expression.clone(), + )?; + None + } + Stage::Limit(expression) => { + infer_type_from_aggregation_expression( + context, + desired_object_type_name, + Some(&TypeConstraint::Scalar(BsonScalarType::Int)), + expression.clone(), + )?; + None + } + Stage::Lookup { .. } => Err(Error::UnknownAggregationStage { + stage_index, + stage_name: Some("$lookup"), + })?, + Stage::Group { + key_expression, + accumulators, + } => { + let object_type_name = infer_type_from_group_stage( + context, + &format!("{desired_object_type_name}_group"), + key_expression, + accumulators, + )?; + Some(TypeConstraint::Object(object_type_name)) + } + Stage::Facet(_) => Err(Error::UnknownAggregationStage { + stage_index, + stage_name: Some("$facet"), + })?, + Stage::Count(_) => Err(Error::UnknownAggregationStage { + stage_index, + stage_name: Some("$count"), + })?, + Stage::Project(doc) => { + let augmented_type = project_stage::infer_type_from_project_stage( + context, + &format!("{desired_object_type_name}_project"), + doc, + )?; + Some(augmented_type) + } + Stage::ReplaceRoot { + new_root: selection, + } + | Stage::ReplaceWith(selection) => { + let selection: &Document = selection.into(); + Some( + aggregation_expression::infer_type_from_aggregation_expression( + context, + &format!("{desired_object_type_name}_replaceWith"), + None, + selection.clone().into(), + )?, + ) + } + Stage::Unwind { + path, + include_array_index, + preserve_null_and_empty_arrays, + } => Some(infer_type_from_unwind_stage( + context, + &format!("{desired_object_type_name}_unwind"), + path, + include_array_index.as_deref(), + *preserve_null_and_empty_arrays, + )?), + Stage::Other(_) => Err(Error::UnknownAggregationStage { + stage_index, + stage_name: None, + })?, + }; + Ok(output_type) +} + +fn infer_type_from_group_stage( + context: &mut PipelineTypeContext<'_>, + desired_object_type_name: &str, + key_expression: &Bson, + accumulators: &BTreeMap, +) -> Result { + let group_key_expression_type = infer_type_from_aggregation_expression( + context, + &format!("{desired_object_type_name}_id"), + None, + key_expression.clone(), + )?; + + let group_expression_field: (FieldName, TypeConstraint) = + ("_id".into(), group_key_expression_type.clone()); + + let accumulator_fields = accumulators.iter().map(|(key, accumulator)| { + let accumulator_type = match accumulator { + Accumulator::Count => TypeConstraint::Scalar(BsonScalarType::Int), + Accumulator::Min(expr) => infer_type_from_aggregation_expression( + context, + &format!("{desired_object_type_name}_min"), + None, + expr.clone(), + )?, + Accumulator::Max(expr) => infer_type_from_aggregation_expression( + context, + &format!("{desired_object_type_name}_min"), + None, + expr.clone(), + )?, + Accumulator::AddToSet(expr) | Accumulator::Push(expr) => { + let t = infer_type_from_aggregation_expression( + context, + &format!("{desired_object_type_name}_push"), + None, + expr.clone(), + )?; + TypeConstraint::ArrayOf(Box::new(t)) + } + Accumulator::Avg(expr) => { + let t = infer_type_from_aggregation_expression( + context, + &format!("{desired_object_type_name}_avg"), + Some(&TypeConstraint::numeric()), + expr.clone(), + )?; + type_for_trig_operator(t).make_nullable() + } + Accumulator::Sum(expr) => infer_type_from_aggregation_expression( + context, + &format!("{desired_object_type_name}_push"), + Some(&TypeConstraint::numeric()), + expr.clone(), + )?, + }; + Ok::<_, Error>((key.clone().into(), accumulator_type)) + }); + + let fields = once(Ok(group_expression_field)) + .chain(accumulator_fields) + .collect::>()?; + let object_type = ObjectTypeConstraint { fields }; + let object_type_name = context.unique_type_name(desired_object_type_name); + context.insert_object_type(object_type_name.clone(), object_type); + Ok(object_type_name) +} + +fn infer_type_from_unwind_stage( + context: &mut PipelineTypeContext<'_>, + desired_object_type_name: &str, + path: &str, + include_array_index: Option<&str>, + _preserve_null_and_empty_arrays: Option, +) -> Result { + let field_to_unwind = parse_reference_shorthand(path)?; + let Reference::InputDocumentField { name, nested_path } = field_to_unwind else { + return Err(Error::ExpectedStringPath(path.into())); + }; + let field_type = infer_type_from_reference_shorthand(context, None, path)?; + + let mut unwind_stage_object_type = ObjectTypeConstraint { + fields: Default::default(), + }; + if let Some(index_field_name) = include_array_index { + unwind_stage_object_type.fields.insert( + index_field_name.into(), + TypeConstraint::Scalar(BsonScalarType::Long), + ); + } + + // If `path` includes a nested_path then the type for the unwound field will be nested + // objects + fn build_nested_types( + context: &mut PipelineTypeContext<'_>, + ultimate_field_type: TypeConstraint, + parent_object_type: &mut ObjectTypeConstraint, + desired_object_type_name: &str, + field_name: FieldName, + mut rest: impl Iterator, + ) { + match rest.next() { + Some(next_field_name) => { + let object_type_name = context.unique_type_name(desired_object_type_name); + let mut object_type = ObjectTypeConstraint { + fields: Default::default(), + }; + build_nested_types( + context, + ultimate_field_type, + &mut object_type, + &format!("{desired_object_type_name}_{next_field_name}"), + next_field_name, + rest, + ); + context.insert_object_type(object_type_name.clone(), object_type); + parent_object_type + .fields + .insert(field_name, TypeConstraint::Object(object_type_name)); + } + None => { + parent_object_type + .fields + .insert(field_name, ultimate_field_type); + } + } + } + build_nested_types( + context, + TypeConstraint::ElementOf(Box::new(field_type)), + &mut unwind_stage_object_type, + desired_object_type_name, + name, + nested_path.into_iter(), + ); + + // let object_type_name = context.unique_type_name(desired_object_type_name); + // context.insert_object_type(object_type_name.clone(), unwind_stage_object_type); + + // We just inferred an object type for the fields that are **added** by the unwind stage. To + // get the full output type the added fields must be merged with fields from the output of the + // previous stage. + Ok(TypeConstraint::WithFieldOverrides { + augmented_object_type_name: format!("{desired_object_type_name}_unwind").into(), + target_type: Box::new(context.get_input_document_type()?.clone()), + fields: unwind_stage_object_type + .fields + .into_iter() + .map(|(k, t)| (k, Some(t))) + .collect(), + }) +} + +#[cfg(test)] +mod tests { + use configuration::schema::{ObjectField, ObjectType, Type}; + use mongodb::bson::doc; + use mongodb_support::{ + aggregate::{Pipeline, Selection, Stage}, + BsonScalarType, + }; + use nonempty::NonEmpty; + use pretty_assertions::assert_eq; + use test_helpers::configuration::mflix_config; + + use crate::native_query::{ + pipeline_type_context::PipelineTypeContext, + type_constraint::{ObjectTypeConstraint, TypeConstraint, TypeVariable, Variance}, + }; + + use super::{infer_pipeline_types, infer_type_from_unwind_stage}; + + type Result = anyhow::Result; + + #[test] + fn infers_type_from_documents_stage() -> Result<()> { + let pipeline = Pipeline::new(vec![Stage::Documents(vec![ + doc! { "foo": 1 }, + doc! { "bar": 2 }, + ])]); + let config = mflix_config(); + let pipeline_types = infer_pipeline_types(&config, "documents", None, &pipeline).unwrap(); + let expected = [( + "documents_documents".into(), + ObjectType { + fields: [ + ( + "foo".into(), + ObjectField { + r#type: Type::Nullable(Box::new(Type::Scalar(BsonScalarType::Int))), + description: None, + }, + ), + ( + "bar".into(), + ObjectField { + r#type: Type::Nullable(Box::new(Type::Scalar(BsonScalarType::Int))), + description: None, + }, + ), + ] + .into(), + description: None, + }, + )] + .into(); + let actual = pipeline_types.object_types; + assert_eq!(actual, expected); + Ok(()) + } + + #[test] + fn infers_type_from_replace_with_stage() -> Result<()> { + let pipeline = Pipeline::new(vec![Stage::ReplaceWith(Selection::new(doc! { + "selected_title": "$title" + }))]); + let config = mflix_config(); + let pipeline_types = + infer_pipeline_types(&config, "movies", Some(&("movies".into())), &pipeline)?; + let expected = [( + "movies_replaceWith".into(), + ObjectType { + fields: [( + "selected_title".into(), + ObjectField { + r#type: Type::Scalar(BsonScalarType::String), + description: None, + }, + )] + .into(), + description: None, + }, + )] + .into(); + let actual = pipeline_types.object_types; + assert_eq!(actual, expected); + Ok(()) + } + + #[test] + fn infers_type_from_unwind_stage() -> Result<()> { + let config = mflix_config(); + let mut context = PipelineTypeContext::new(&config, None); + context.insert_object_type( + "words_doc".into(), + ObjectTypeConstraint { + fields: [( + "words".into(), + TypeConstraint::ArrayOf(Box::new(TypeConstraint::Scalar( + BsonScalarType::String, + ))), + )] + .into(), + }, + ); + context.set_stage_doc_type(TypeConstraint::Object("words_doc".into())); + + let inferred_type = infer_type_from_unwind_stage( + &mut context, + "unwind_stage", + "$words", + Some("idx"), + Some(false), + )?; + + let input_doc_variable = TypeVariable::new(0, Variance::Covariant); + + assert_eq!( + inferred_type, + TypeConstraint::WithFieldOverrides { + augmented_object_type_name: "unwind_stage_unwind".into(), + target_type: Box::new(TypeConstraint::Variable(input_doc_variable)), + fields: [ + ( + "idx".into(), + Some(TypeConstraint::Scalar(BsonScalarType::Long)) + ), + ( + "words".into(), + Some(TypeConstraint::ElementOf(Box::new( + TypeConstraint::FieldOf { + target_type: Box::new(TypeConstraint::Variable(input_doc_variable)), + path: NonEmpty::singleton("words".into()), + } + ))) + ) + ] + .into(), + } + ); + Ok(()) + } +} diff --git a/crates/cli/src/native_query/pipeline/project_stage.rs b/crates/cli/src/native_query/pipeline/project_stage.rs new file mode 100644 index 00000000..427d9c55 --- /dev/null +++ b/crates/cli/src/native_query/pipeline/project_stage.rs @@ -0,0 +1,444 @@ +use std::{ + collections::{hash_map::Entry, HashMap}, + str::FromStr as _, +}; + +use itertools::Itertools as _; +use mongodb::bson::{Bson, Decimal128, Document}; +use mongodb_support::BsonScalarType; +use ndc_models::{FieldName, ObjectTypeName}; +use nonempty::NonEmpty; + +use crate::native_query::{ + aggregation_expression::infer_type_from_aggregation_expression, + error::{Error, Result}, + pipeline_type_context::PipelineTypeContext, + type_constraint::{ObjectTypeConstraint, TypeConstraint}, +}; + +enum Mode { + Exclusion, + Inclusion, +} + +// $project has two distinct behaviors: +// +// Exclusion mode: if every value in the projection document is `false` or `0` then the output +// preserves fields from the input except for fields that are specifically excluded. The special +// value `$$REMOVE` **cannot** be used in this mode. +// +// Inclusion (replace) mode: if any value in the projection document specifies a field for +// inclusion, replaces the value of an input field with a new value, adds a new field with a new +// value, or removes a field with the special value `$$REMOVE` then output excludes input fields +// that are not specified. The output is composed solely of fields specified in the projection +// document, plus `_id` unless `_id` is specifically excluded. Values of `false` or `0` are not +// allowed in this mode except to suppress `_id`. +// +// TODO: This implementation does not fully account for uses of $$REMOVE. It does correctly select +// inclusion mode if $$REMOVE is used. A complete implementation would infer a nullable type for +// a projection that conditionally resolves to $$REMOVE. +pub fn infer_type_from_project_stage( + context: &mut PipelineTypeContext<'_>, + desired_object_type_name: &str, + projection: &Document, +) -> Result { + let mode = if projection.values().all(is_false_or_zero) { + Mode::Exclusion + } else { + Mode::Inclusion + }; + match mode { + Mode::Exclusion => exclusion_projection_type(context, desired_object_type_name, projection), + Mode::Inclusion => inclusion_projection_type(context, desired_object_type_name, projection), + } +} + +fn exclusion_projection_type( + context: &mut PipelineTypeContext<'_>, + desired_object_type_name: &str, + projection: &Document, +) -> Result { + // Projection keys can be dot-separated paths to nested fields. In this case a single + // object-type output field might be specified by multiple project keys. We collect sets of + // each top-level key (the first component of a dot-separated path), and then merge + // constraints. + let mut specifications: HashMap> = Default::default(); + + for (field_name, _) in projection { + let path = field_name.split(".").map(|s| s.into()).collect_vec(); + ProjectionTree::insert_specification(&mut specifications, &path, ())?; + } + + let input_type = context.get_input_document_type()?; + Ok(projection_tree_into_field_overrides( + input_type, + desired_object_type_name, + specifications, + )) +} + +fn projection_tree_into_field_overrides( + input_type: TypeConstraint, + desired_object_type_name: &str, + specifications: HashMap>, +) -> TypeConstraint { + let overrides = specifications + .into_iter() + .map(|(name, spec)| { + let field_override = match spec { + ProjectionTree::Object(sub_specs) => { + let original_field_type = TypeConstraint::FieldOf { + target_type: Box::new(input_type.clone()), + path: NonEmpty::singleton(name.clone()), + }; + Some(projection_tree_into_field_overrides( + original_field_type, + &format!("{desired_object_type_name}_{name}"), + sub_specs, + )) + } + ProjectionTree::Field(_) => None, + }; + (name, field_override) + }) + .collect(); + + TypeConstraint::WithFieldOverrides { + augmented_object_type_name: desired_object_type_name.into(), + target_type: Box::new(input_type), + fields: overrides, + } +} + +fn inclusion_projection_type( + context: &mut PipelineTypeContext<'_>, + desired_object_type_name: &str, + projection: &Document, +) -> Result { + let input_type = context.get_input_document_type()?; + + // Projection keys can be dot-separated paths to nested fields. In this case a single + // object-type output field might be specified by multiple project keys. We collect sets of + // each top-level key (the first component of a dot-separated path), and then merge + // constraints. + let mut specifications: HashMap> = Default::default(); + + let added_fields = projection + .iter() + .filter(|(_, spec)| !is_false_or_zero(spec)); + + for (field_name, spec) in added_fields { + let path = field_name.split(".").map(|s| s.into()).collect_vec(); + let projected_type = if is_true_or_one(spec) { + TypeConstraint::FieldOf { + target_type: Box::new(input_type.clone()), + path: NonEmpty::from_slice(&path).ok_or_else(|| { + Error::Other("key in $project stage is an empty string".to_string()) + })?, + } + } else { + let desired_object_type_name = format!("{desired_object_type_name}_{field_name}"); + infer_type_from_aggregation_expression( + context, + &desired_object_type_name, + None, + spec.clone(), + )? + }; + ProjectionTree::insert_specification(&mut specifications, &path, projected_type)?; + } + + let specifies_id = projection.keys().any(|k| k == "_id"); + if !specifies_id { + ProjectionTree::insert_specification( + &mut specifications, + &["_id".into()], + TypeConstraint::Scalar(BsonScalarType::ObjectId), + )?; + } + + let object_type_name = + projection_tree_into_object_type(context, desired_object_type_name, specifications); + + Ok(TypeConstraint::Object(object_type_name)) +} + +fn projection_tree_into_object_type( + context: &mut PipelineTypeContext<'_>, + desired_object_type_name: &str, + specifications: HashMap>, +) -> ObjectTypeName { + let fields = specifications + .into_iter() + .map(|(field_name, spec)| { + let field_type = match spec { + ProjectionTree::Field(field_type) => field_type, + ProjectionTree::Object(sub_specs) => { + let desired_object_type_name = + format!("{desired_object_type_name}_{field_name}"); + let nested_object_name = projection_tree_into_object_type( + context, + &desired_object_type_name, + sub_specs, + ); + TypeConstraint::Object(nested_object_name) + } + }; + (field_name, field_type) + }) + .collect(); + let object_type = ObjectTypeConstraint { fields }; + let object_type_name = context.unique_type_name(desired_object_type_name); + context.insert_object_type(object_type_name.clone(), object_type); + object_type_name +} + +enum ProjectionTree { + Object(HashMap>), + Field(T), +} + +impl ProjectionTree { + fn insert_specification( + specifications: &mut HashMap>, + path: &[FieldName], + field_type: T, + ) -> Result<()> { + match path { + [] => Err(Error::Other( + "invalid $project: a projection key is an empty string".into(), + ))?, + [field_name] => { + let maybe_old_value = + specifications.insert(field_name.clone(), ProjectionTree::Field(field_type)); + if maybe_old_value.is_some() { + Err(path_collision_error(path))?; + }; + } + [first_field_name, rest @ ..] => { + let entry = specifications.entry(first_field_name.clone()); + match entry { + Entry::Occupied(mut e) => match e.get_mut() { + ProjectionTree::Object(sub_specs) => { + Self::insert_specification(sub_specs, rest, field_type)?; + } + ProjectionTree::Field(_) => Err(path_collision_error(path))?, + }, + Entry::Vacant(entry) => { + let mut sub_specs = Default::default(); + Self::insert_specification(&mut sub_specs, rest, field_type)?; + entry.insert(ProjectionTree::Object(sub_specs)); + } + }; + } + } + Ok(()) + } +} + +// Experimentation confirms that a zero value of any numeric type is interpreted as suppression of +// a field. +fn is_false_or_zero(x: &Bson) -> bool { + let decimal_zero = Decimal128::from_str("0").expect("parse 0 as decimal"); + matches!( + x, + Bson::Boolean(false) | Bson::Int32(0) | Bson::Int64(0) | Bson::Double(0.0) + ) || x == &Bson::Decimal128(decimal_zero) +} + +fn is_true_or_one(x: &Bson) -> bool { + let decimal_one = Decimal128::from_str("1").expect("parse 1 as decimal"); + matches!( + x, + Bson::Boolean(true) | Bson::Int32(1) | Bson::Int64(1) | Bson::Double(1.0) + ) || x == &Bson::Decimal128(decimal_one) +} + +fn path_collision_error(path: impl IntoIterator) -> Error { + Error::Other(format!( + "invalid $project: path collision at {}", + path.into_iter().join(".") + )) +} + +#[cfg(test)] +mod tests { + use mongodb::bson::doc; + use mongodb_support::BsonScalarType; + use nonempty::{nonempty, NonEmpty}; + use pretty_assertions::assert_eq; + use test_helpers::configuration::mflix_config; + + use crate::native_query::{ + pipeline_type_context::PipelineTypeContext, + type_constraint::{ObjectTypeConstraint, TypeConstraint}, + }; + + #[test] + fn infers_type_of_projection_in_inclusion_mode() -> anyhow::Result<()> { + let config = mflix_config(); + let mut context = PipelineTypeContext::new(&config, None); + let input_type = context.set_stage_doc_type(TypeConstraint::Object("movies".into())); + + let input = doc! { + "title": 1, + "tomatoes.critic.rating": true, + "tomatoes.critic.meter": true, + "tomatoes.lastUpdated": true, + "releaseDate": "$released", + }; + + let inferred_type = + super::infer_type_from_project_stage(&mut context, "Movie_project", &input)?; + + assert_eq!( + inferred_type, + TypeConstraint::Object("Movie_project".into()) + ); + + let object_types = context.object_types(); + let expected_object_types = [ + ( + "Movie_project".into(), + ObjectTypeConstraint { + fields: [ + ( + "_id".into(), + TypeConstraint::Scalar(BsonScalarType::ObjectId), + ), + ( + "title".into(), + TypeConstraint::FieldOf { + target_type: Box::new(input_type.clone()), + path: NonEmpty::singleton("title".into()), + }, + ), + ( + "tomatoes".into(), + TypeConstraint::Object("Movie_project_tomatoes".into()), + ), + ( + "releaseDate".into(), + TypeConstraint::FieldOf { + target_type: Box::new(input_type.clone()), + path: NonEmpty::singleton("released".into()), + }, + ), + ] + .into(), + }, + ), + ( + "Movie_project_tomatoes".into(), + ObjectTypeConstraint { + fields: [ + ( + "critic".into(), + TypeConstraint::Object("Movie_project_tomatoes_critic".into()), + ), + ( + "lastUpdated".into(), + TypeConstraint::FieldOf { + target_type: Box::new(input_type.clone()), + path: nonempty!["tomatoes".into(), "lastUpdated".into()], + }, + ), + ] + .into(), + }, + ), + ( + "Movie_project_tomatoes_critic".into(), + ObjectTypeConstraint { + fields: [ + ( + "rating".into(), + TypeConstraint::FieldOf { + target_type: Box::new(input_type.clone()), + path: nonempty![ + "tomatoes".into(), + "critic".into(), + "rating".into() + ], + }, + ), + ( + "meter".into(), + TypeConstraint::FieldOf { + target_type: Box::new(input_type.clone()), + path: nonempty!["tomatoes".into(), "critic".into(), "meter".into()], + }, + ), + ] + .into(), + }, + ), + ] + .into(); + + assert_eq!(object_types, &expected_object_types); + + Ok(()) + } + + #[test] + fn infers_type_of_projection_in_exclusion_mode() -> anyhow::Result<()> { + let config = mflix_config(); + let mut context = PipelineTypeContext::new(&config, None); + let input_type = context.set_stage_doc_type(TypeConstraint::Object("movies".into())); + + let input = doc! { + "title": 0, + "tomatoes.critic.rating": false, + "tomatoes.critic.meter": false, + "tomatoes.lastUpdated": false, + }; + + let inferred_type = + super::infer_type_from_project_stage(&mut context, "Movie_project", &input)?; + + assert_eq!( + inferred_type, + TypeConstraint::WithFieldOverrides { + augmented_object_type_name: "Movie_project".into(), + target_type: Box::new(input_type.clone()), + fields: [ + ("title".into(), None), + ( + "tomatoes".into(), + Some(TypeConstraint::WithFieldOverrides { + augmented_object_type_name: "Movie_project_tomatoes".into(), + target_type: Box::new(TypeConstraint::FieldOf { + target_type: Box::new(input_type.clone()), + path: NonEmpty::singleton("tomatoes".into()), + }), + fields: [ + ("lastUpdated".into(), None), + ( + "critic".into(), + Some(TypeConstraint::WithFieldOverrides { + augmented_object_type_name: "Movie_project_tomatoes_critic" + .into(), + target_type: Box::new(TypeConstraint::FieldOf { + target_type: Box::new(TypeConstraint::FieldOf { + target_type: Box::new(input_type.clone()), + path: NonEmpty::singleton("tomatoes".into()), + }), + path: NonEmpty::singleton("critic".into()), + }), + fields: [("rating".into(), None), ("meter".into(), None),] + .into(), + }) + ) + ] + .into(), + }) + ), + ] + .into(), + } + ); + + Ok(()) + } +} diff --git a/crates/cli/src/native_query/pipeline_type_context.rs b/crates/cli/src/native_query/pipeline_type_context.rs new file mode 100644 index 00000000..f5460117 --- /dev/null +++ b/crates/cli/src/native_query/pipeline_type_context.rs @@ -0,0 +1,315 @@ +#![allow(dead_code)] + +use std::{ + borrow::Cow, + collections::{BTreeMap, BTreeSet, HashMap}, +}; + +use configuration::{ + schema::{ObjectType, Type}, + Configuration, +}; +use itertools::Itertools as _; +use ndc_models::{ArgumentName, ObjectTypeName}; + +use super::{ + error::{Error, Result}, + helpers::unique_type_name, + prune_object_types::prune_object_types, + type_constraint::{ObjectTypeConstraint, TypeConstraint, TypeVariable, Variance}, + type_solver::unify, +}; + +/// Information exported from [PipelineTypeContext] after type inference is complete. +#[derive(Clone, Debug)] +pub struct PipelineTypes { + pub result_document_type: ObjectTypeName, + pub parameter_types: BTreeMap, + pub object_types: BTreeMap, + pub warnings: Vec, +} + +#[derive(Clone, Debug)] +pub struct PipelineTypeContext<'a> { + configuration: &'a Configuration, + + /// Document type for inputs to the pipeline stage being evaluated. At the start of the + /// pipeline this is the document type for the input collection, if there is one. + input_doc_type: Option, + + parameter_types: BTreeMap, + + /// Object types defined in the process of type inference. [self.input_doc_type] may refer to + /// to a type here, or in [self.configuration.object_types] + object_types: BTreeMap, + + type_variables: HashMap>, + next_type_variable: u32, + + warnings: Vec, +} + +impl PipelineTypeContext<'_> { + pub fn new( + configuration: &Configuration, + input_collection_document_type: Option, + ) -> PipelineTypeContext<'_> { + let mut context = PipelineTypeContext { + configuration, + input_doc_type: None, + parameter_types: Default::default(), + object_types: Default::default(), + type_variables: Default::default(), + next_type_variable: 0, + warnings: Default::default(), + }; + + if let Some(type_name) = input_collection_document_type { + context.set_stage_doc_type(TypeConstraint::Object(type_name)); + } + + context + } + + #[cfg(test)] + pub fn object_types(&self) -> &BTreeMap { + &self.object_types + } + + #[cfg(test)] + pub fn type_variables(&self) -> &HashMap> { + &self.type_variables + } + + pub fn into_types(self) -> Result { + let result_document_type_variable = self.input_doc_type.ok_or(Error::IncompletePipeline)?; + let required_type_variables = self + .parameter_types + .values() + .copied() + .chain([result_document_type_variable]) + .collect_vec(); + + #[cfg(test)] + { + println!("variable mappings:"); + for (parameter, variable) in self.parameter_types.iter() { + println!(" {variable}: {parameter}"); + } + println!(" {result_document_type_variable}: result type\n"); + } + + let mut object_type_constraints = self.object_types; + let (variable_types, added_object_types) = unify( + self.configuration, + &required_type_variables, + &mut object_type_constraints, + self.type_variables.clone(), + ) + .map_err(|err| match err { + Error::FailedToUnify { unsolved_variables } => Error::UnableToInferTypes { + could_not_infer_return_type: unsolved_variables + .contains(&result_document_type_variable), + problem_parameter_types: self + .parameter_types + .iter() + .filter_map(|(name, variable)| { + if unsolved_variables.contains(variable) { + Some(name.clone()) + } else { + None + } + }) + .collect(), + type_variables: self.type_variables, + object_type_constraints, + }, + e => e, + })?; + + let mut result_document_type = variable_types + .get(&result_document_type_variable) + .expect("missing result type variable is missing") + .clone(); + + let mut parameter_types: BTreeMap = self + .parameter_types + .into_iter() + .map(|(parameter_name, type_variable)| { + let param_type = variable_types + .get(&type_variable) + .expect("parameter type variable is missing"); + (parameter_name, param_type.clone()) + }) + .collect(); + + // Prune added object types to remove types that are not referenced by the return type or + // by parameter types, and therefore don't need to be included in the native query + // configuration. + let object_types = { + let mut reference_types = std::iter::once(&mut result_document_type) + .chain(parameter_types.values_mut()) + .collect_vec(); + prune_object_types( + &mut reference_types, + &self.configuration.object_types, + added_object_types, + )? + }; + + let result_document_type_name = match result_document_type { + Type::Object(type_name) => type_name.clone().into(), + t => Err(Error::ExpectedObject { + actual_type: t.clone(), + })?, + }; + + Ok(PipelineTypes { + result_document_type: result_document_type_name, + parameter_types, + object_types, + warnings: self.warnings, + }) + } + + pub fn new_type_variable( + &mut self, + variance: Variance, + constraints: impl IntoIterator, + ) -> TypeVariable { + let variable = TypeVariable::new(self.next_type_variable, variance); + self.next_type_variable += 1; + self.type_variables + .insert(variable, constraints.into_iter().collect()); + variable + } + + pub fn set_type_variable_constraint( + &mut self, + variable: TypeVariable, + constraint: TypeConstraint, + ) { + let entry = self + .type_variables + .get_mut(&variable) + .expect("unknown type variable"); + entry.insert(constraint); + } + + pub fn constraint_references_variable( + &self, + constraint: &TypeConstraint, + variable: TypeVariable, + ) -> bool { + let object_constraint_references_variable = |name: &ObjectTypeName| -> bool { + if let Some(object_type) = self.object_types.get(name) { + object_type.fields.iter().any(|(_, field_type)| { + self.constraint_references_variable(field_type, variable) + }) + } else { + false + } + }; + + match constraint { + TypeConstraint::ExtendedJSON => false, + TypeConstraint::Scalar(_) => false, + TypeConstraint::Object(name) => object_constraint_references_variable(name), + TypeConstraint::ArrayOf(t) => self.constraint_references_variable(t, variable), + TypeConstraint::Predicate { object_type_name } => { + object_constraint_references_variable(object_type_name) + } + TypeConstraint::Union(ts) => ts + .iter() + .any(|t| self.constraint_references_variable(t, variable)), + TypeConstraint::OneOf(ts) => ts + .iter() + .any(|t| self.constraint_references_variable(t, variable)), + TypeConstraint::Variable(v2) if *v2 == variable => true, + TypeConstraint::Variable(v2) => { + let constraints = self.type_variables.get(v2); + constraints + .iter() + .flat_map(|m| *m) + .any(|t| self.constraint_references_variable(t, variable)) + } + TypeConstraint::ElementOf(t) => self.constraint_references_variable(t, variable), + TypeConstraint::FieldOf { target_type, .. } => { + self.constraint_references_variable(target_type, variable) + } + TypeConstraint::WithFieldOverrides { + target_type, + fields, + .. + } => { + self.constraint_references_variable(target_type, variable) + || fields + .iter() + .flat_map(|(_, t)| t) + .any(|t| self.constraint_references_variable(t, variable)) + } + } + } + + pub fn insert_object_type(&mut self, name: ObjectTypeName, object_type: ObjectTypeConstraint) { + self.object_types.insert(name, object_type); + } + + /// Add a parameter to be written to the native query configuration. Implicitly registers + /// a corresponding type variable. If the parameter name has already been registered then + /// returns a reference to the already-registered type variable. + pub fn register_parameter( + &mut self, + name: ArgumentName, + constraints: impl IntoIterator, + ) -> TypeConstraint { + let variable = if let Some(variable) = self.parameter_types.get(&name) { + *variable + } else { + let variable = self.new_type_variable(Variance::Contravariant, []); + self.parameter_types.insert(name, variable); + variable + }; + for constraint in constraints { + self.set_type_variable_constraint(variable, constraint) + } + TypeConstraint::Variable(variable) + } + + pub fn unique_type_name(&self, desired_type_name: &str) -> ObjectTypeName { + unique_type_name( + &self.configuration.object_types, + &self.object_types, + desired_type_name, + ) + } + + pub fn set_stage_doc_type(&mut self, doc_type: TypeConstraint) -> TypeConstraint { + let variable = self.new_type_variable(Variance::Covariant, [doc_type]); + self.input_doc_type = Some(variable); + TypeConstraint::Variable(variable) + } + + pub fn add_warning(&mut self, warning: Error) { + self.warnings.push(warning); + } + + pub fn get_object_type(&self, name: &ObjectTypeName) -> Option> { + if let Some(object_type) = self.configuration.object_types.get(name) { + let schema_object_type = object_type.clone().into(); + return Some(Cow::Owned(schema_object_type)); + } + if let Some(object_type) = self.object_types.get(name) { + return Some(Cow::Borrowed(object_type)); + } + None + } + + pub fn get_input_document_type(&self) -> Result { + let variable = self + .input_doc_type + .as_ref() + .ok_or(Error::IncompletePipeline)?; + Ok(TypeConstraint::Variable(*variable)) + } +} diff --git a/crates/cli/src/native_query/pretty_printing.rs b/crates/cli/src/native_query/pretty_printing.rs new file mode 100644 index 00000000..7543393d --- /dev/null +++ b/crates/cli/src/native_query/pretty_printing.rs @@ -0,0 +1,239 @@ +use std::path::Path; + +use configuration::{schema::ObjectType, serialized::NativeQuery}; +use itertools::Itertools; +use pretty::{ + termcolor::{Color, ColorSpec, StandardStream}, + BoxAllocator, DocAllocator, DocBuilder, Pretty, +}; +use tokio::task; + +/// Prints metadata for a native query, excluding its pipeline +pub async fn pretty_print_native_query_info( + output: &mut StandardStream, + native_query: &NativeQuery, +) -> std::io::Result<()> { + task::block_in_place(move || { + let allocator = BoxAllocator; + native_query_info_printer(native_query, &allocator) + .1 + .render_colored(80, output)?; + Ok(()) + }) +} + +/// Prints metadata for a native query including its pipeline +pub async fn pretty_print_native_query( + output: &mut StandardStream, + native_query: &NativeQuery, + path: &Path, +) -> std::io::Result<()> { + task::block_in_place(move || { + let allocator = BoxAllocator; + native_query_printer(native_query, path, &allocator) + .1 + .render_colored(80, output)?; + Ok(()) + }) +} + +fn native_query_printer<'a, D>( + nq: &'a NativeQuery, + path: &'a Path, + allocator: &'a D, +) -> DocBuilder<'a, D, ColorSpec> +where + D: DocAllocator<'a, ColorSpec>, + D::Doc: Clone, +{ + let source = definition_list_entry( + "configuration source", + allocator.text(path.to_string_lossy()), + allocator, + ); + let info = native_query_info_printer(nq, allocator); + let pipeline = section( + "pipeline", + allocator.text(serde_json::to_string_pretty(&nq.pipeline).unwrap()), + allocator, + ); + allocator.intersperse([source, info, pipeline], allocator.hardline()) +} + +fn native_query_info_printer<'a, D>( + nq: &'a NativeQuery, + allocator: &'a D, +) -> DocBuilder<'a, D, ColorSpec> +where + D: DocAllocator<'a, ColorSpec>, + D::Doc: Clone, +{ + let input_collection = nq.input_collection.as_ref().map(|collection| { + definition_list_entry( + "input collection", + allocator.text(collection.to_string()), + allocator, + ) + }); + + let representation = Some(definition_list_entry( + "representation", + allocator.text(nq.representation.to_str()), + allocator, + )); + + let parameters = if !nq.arguments.is_empty() { + let params = nq.arguments.iter().map(|(name, definition)| { + allocator + .text(name.to_string()) + .annotate(field_name()) + .append(allocator.text(": ")) + .append( + allocator + .text(definition.r#type.to_string()) + .annotate(type_expression()), + ) + }); + Some(section( + "parameters", + allocator.intersperse(params, allocator.line()), + allocator, + )) + } else { + None + }; + + let result_type = { + let body = if let Some(object_type) = nq.object_types.get(&nq.result_document_type) { + object_type_printer(object_type, allocator) + } else { + allocator.text(nq.result_document_type.to_string()) + }; + Some(section("result type", body, allocator)) + }; + + let other_object_types = nq + .object_types + .iter() + .filter(|(name, _)| **name != nq.result_document_type) + .collect_vec(); + let object_types_doc = if !other_object_types.is_empty() { + let docs = other_object_types.into_iter().map(|(name, definition)| { + allocator + .text(format!("{name} ")) + .annotate(object_type_name()) + .append(object_type_printer(definition, allocator)) + }); + let separator = allocator.line().append(allocator.line()); + Some(section( + "object type definitions", + allocator.intersperse(docs, separator), + allocator, + )) + } else { + None + }; + + allocator.intersperse( + [ + input_collection, + representation, + parameters, + result_type, + object_types_doc, + ] + .into_iter() + .filter(Option::is_some), + allocator.hardline(), + ) +} + +fn object_type_printer<'a, D>(ot: &'a ObjectType, allocator: &'a D) -> DocBuilder<'a, D, ColorSpec> +where + D: DocAllocator<'a, ColorSpec>, + D::Doc: Clone, +{ + let fields = ot.fields.iter().map(|(name, definition)| { + allocator + .text(name.to_string()) + .annotate(field_name()) + .append(allocator.text(": ")) + .append( + allocator + .text(definition.r#type.to_string()) + .annotate(type_expression()), + ) + }); + let separator = allocator.text(",").append(allocator.line()); + let body = allocator.intersperse(fields, separator); + body.indent(2).enclose( + allocator.text("{").append(allocator.line()), + allocator.line().append(allocator.text("}")), + ) +} + +fn definition_list_entry<'a, D>( + label: &'a str, + body: impl Pretty<'a, D, ColorSpec>, + allocator: &'a D, +) -> DocBuilder<'a, D, ColorSpec> +where + D: DocAllocator<'a, ColorSpec>, + D::Doc: Clone, +{ + allocator + .text(label) + .annotate(definition_list_label()) + .append(allocator.text(": ")) + .append(body) +} + +fn section<'a, D>( + heading: &'a str, + body: impl Pretty<'a, D, ColorSpec>, + allocator: &'a D, +) -> DocBuilder<'a, D, ColorSpec> +where + D: DocAllocator<'a, ColorSpec>, + D::Doc: Clone, +{ + let heading_doc = allocator + .text("## ") + .append(heading) + .annotate(section_heading()); + allocator + .line() + .append(heading_doc) + .append(allocator.line()) + .append(allocator.line()) + .append(body) +} + +fn section_heading() -> ColorSpec { + let mut color = ColorSpec::new(); + color.set_fg(Some(Color::Red)); + color.set_bold(true); + color +} + +fn definition_list_label() -> ColorSpec { + let mut color = ColorSpec::new(); + color.set_fg(Some(Color::Blue)); + color +} + +fn field_name() -> ColorSpec { + let mut color = ColorSpec::new(); + color.set_fg(Some(Color::Yellow)); + color +} + +fn object_type_name() -> ColorSpec { + // placeholder in case we want styling here in the future + ColorSpec::new() +} + +fn type_expression() -> ColorSpec { + // placeholder in case we want styling here in the future + ColorSpec::new() +} diff --git a/crates/cli/src/native_query/prune_object_types.rs b/crates/cli/src/native_query/prune_object_types.rs new file mode 100644 index 00000000..fa819e7a --- /dev/null +++ b/crates/cli/src/native_query/prune_object_types.rs @@ -0,0 +1,290 @@ +use std::collections::{BTreeMap, HashSet}; + +use configuration::schema::{ObjectField, ObjectType, Type}; +use itertools::Itertools as _; +use ndc_models::ObjectTypeName; + +use crate::native_query::helpers::{parse_counter_suffix, unique_type_name}; + +use super::error::{Error, Result}; + +/// Filters map of object types to get only types that are referenced directly or indirectly from +/// the set of reference types. +pub fn prune_object_types( + reference_types: &mut [&mut Type], + existing_object_types: &BTreeMap, + added_object_types: BTreeMap, +) -> Result> { + let mut required_type_names = HashSet::new(); + for t in &*reference_types { + collect_names_from_type( + existing_object_types, + &added_object_types, + &mut required_type_names, + t, + )?; + } + let mut pruned_object_types = added_object_types + .into_iter() + .filter(|(name, _)| required_type_names.contains(name)) + .collect(); + + simplify_type_names( + reference_types, + existing_object_types, + &mut pruned_object_types, + ); + + Ok(pruned_object_types) +} + +fn collect_names_from_type( + existing_object_types: &BTreeMap, + added_object_types: &BTreeMap, + found_type_names: &mut HashSet, + input_type: &Type, +) -> Result<()> { + match input_type { + Type::Object(type_name) => { + let object_type_name = mk_object_type_name(type_name); + collect_names_from_object_type( + existing_object_types, + added_object_types, + found_type_names, + &object_type_name, + )?; + found_type_names.insert(object_type_name); + } + Type::Predicate { object_type_name } => { + let object_type_name = object_type_name.clone(); + collect_names_from_object_type( + existing_object_types, + added_object_types, + found_type_names, + &object_type_name, + )?; + found_type_names.insert(object_type_name); + } + Type::ArrayOf(t) => collect_names_from_type( + existing_object_types, + added_object_types, + found_type_names, + t, + )?, + Type::Nullable(t) => collect_names_from_type( + existing_object_types, + added_object_types, + found_type_names, + t, + )?, + Type::ExtendedJSON => (), + Type::Scalar(_) => (), + }; + Ok(()) +} + +fn collect_names_from_object_type( + existing_object_types: &BTreeMap, + object_types: &BTreeMap, + found_type_names: &mut HashSet, + input_type_name: &ObjectTypeName, +) -> Result<()> { + if existing_object_types.contains_key(input_type_name) { + return Ok(()); + } + let object_type = object_types + .get(input_type_name) + .ok_or_else(|| Error::UnknownObjectType(input_type_name.to_string()))?; + for (_, field) in object_type.fields.iter() { + collect_names_from_type( + existing_object_types, + object_types, + found_type_names, + &field.r#type, + )?; + } + Ok(()) +} + +/// The system for generating unique object type names uses numeric suffixes. After pruning we may +/// be able to remove these suffixes. +fn simplify_type_names( + reference_types: &mut [&mut Type], + existing_object_types: &BTreeMap, + added_object_types: &mut BTreeMap, +) { + let names = added_object_types.keys().cloned().collect_vec(); + for name in names { + let (name_root, count) = parse_counter_suffix(name.as_str()); + let maybe_simplified_name = + unique_type_name(existing_object_types, added_object_types, &name_root); + let (_, new_count) = parse_counter_suffix(maybe_simplified_name.as_str()); + if new_count < count { + rename_object_type( + reference_types, + added_object_types, + &name, + &maybe_simplified_name, + ); + } + } +} + +fn rename_object_type( + reference_types: &mut [&mut Type], + object_types: &mut BTreeMap, + old_name: &ObjectTypeName, + new_name: &ObjectTypeName, +) { + for t in reference_types.iter_mut() { + **t = rename_type_helper(old_name, new_name, (*t).clone()); + } + + let renamed_object_types = object_types + .clone() + .into_iter() + .map(|(name, object_type)| { + let new_type_name = if &name == old_name { + new_name.clone() + } else { + name + }; + let new_object_type = rename_object_type_helper(old_name, new_name, object_type); + (new_type_name, new_object_type) + }) + .collect(); + *object_types = renamed_object_types; +} + +fn rename_type_helper( + old_name: &ObjectTypeName, + new_name: &ObjectTypeName, + input_type: Type, +) -> Type { + let old_name_string = old_name.to_string(); + + match input_type { + Type::Object(name) => { + if name == old_name_string { + Type::Object(new_name.to_string()) + } else { + Type::Object(name) + } + } + Type::Predicate { object_type_name } => { + if &object_type_name == old_name { + Type::Predicate { + object_type_name: new_name.clone(), + } + } else { + Type::Predicate { object_type_name } + } + } + Type::ArrayOf(t) => Type::ArrayOf(Box::new(rename_type_helper(old_name, new_name, *t))), + Type::Nullable(t) => Type::Nullable(Box::new(rename_type_helper(old_name, new_name, *t))), + t @ Type::Scalar(_) => t, + t @ Type::ExtendedJSON => t, + } +} + +fn rename_object_type_helper( + old_name: &ObjectTypeName, + new_name: &ObjectTypeName, + object_type: ObjectType, +) -> ObjectType { + let new_fields = object_type + .fields + .into_iter() + .map(|(name, field)| { + let new_field = ObjectField { + r#type: rename_type_helper(old_name, new_name, field.r#type), + description: field.description, + }; + (name, new_field) + }) + .collect(); + ObjectType { + fields: new_fields, + description: object_type.description, + } +} + +fn mk_object_type_name(name: &str) -> ObjectTypeName { + name.into() +} + +#[cfg(test)] +mod tests { + use configuration::schema::{ObjectField, ObjectType, Type}; + use googletest::prelude::*; + + use super::prune_object_types; + + #[googletest::test] + fn prunes_and_simplifies_object_types() -> Result<()> { + let mut result_type = Type::Object("Documents_2".into()); + let mut reference_types = [&mut result_type]; + let existing_object_types = Default::default(); + + let added_object_types = [ + ( + "Documents_1".into(), + ObjectType { + fields: [( + "bar".into(), + ObjectField { + r#type: Type::Scalar(mongodb_support::BsonScalarType::String), + description: None, + }, + )] + .into(), + description: None, + }, + ), + ( + "Documents_2".into(), + ObjectType { + fields: [( + "foo".into(), + ObjectField { + r#type: Type::Scalar(mongodb_support::BsonScalarType::String), + description: None, + }, + )] + .into(), + description: None, + }, + ), + ] + .into(); + + let pruned = prune_object_types( + &mut reference_types, + &existing_object_types, + added_object_types, + )?; + + expect_eq!( + pruned, + [( + "Documents".into(), + ObjectType { + fields: [( + "foo".into(), + ObjectField { + r#type: Type::Scalar(mongodb_support::BsonScalarType::String), + description: None, + }, + )] + .into(), + description: None, + }, + )] + .into() + ); + + expect_eq!(result_type, Type::Object("Documents".into())); + + Ok(()) + } +} diff --git a/crates/cli/src/native_query/reference_shorthand.rs b/crates/cli/src/native_query/reference_shorthand.rs new file mode 100644 index 00000000..100d05e1 --- /dev/null +++ b/crates/cli/src/native_query/reference_shorthand.rs @@ -0,0 +1,153 @@ +use configuration::schema::Type; +use ndc_models::FieldName; +use nom::{ + branch::alt, + bytes::complete::{tag, take_while1}, + character::complete::{alpha1, alphanumeric1, multispace0}, + combinator::{all_consuming, cut, map, opt, recognize}, + error::ParseError, + multi::{many0, many0_count}, + sequence::{delimited, pair, preceded}, + IResult, Parser, +}; + +use super::{ + error::{Error, Result}, + type_annotation::type_expression, +}; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum Reference { + /// Reference to a variable that is substituted by the connector from GraphQL inputs before + /// sending to MongoDB. For example, `"{{ artist_id }}`. + NativeQueryVariable { + name: String, + type_annotation: Option, + }, + + /// Reference to a variable that is defined as part of the pipeline syntax. May be followed by + /// a dot-separated path to a nested field. For example, `"$$CURRENT.foo.bar"` + PipelineVariable { + name: String, + nested_path: Vec, + }, + + /// Reference to a field of the input document. May be followed by a dot-separated path to + /// a nested field. For example, `"$tomatoes.viewer.rating"` + InputDocumentField { + name: FieldName, + nested_path: Vec, + }, + + /// The expression evaluates to a string. The string may contain native query variable + /// references which implicitly have type String. + String { native_query_variables: Vec }, +} + +pub fn parse_reference_shorthand(input: &str) -> Result { + match reference_shorthand(input) { + Ok((_, r)) => Ok(r), + Err(err) => Err(Error::UnableToParseReferenceShorthand(format!("{err}"))), + } +} + +/// Reference shorthand is a string in an aggregation expression that may evaluate to the value of +/// a field of the input document if the string begins with $, or to a variable if it begins with +/// $$, or may be a plain string. +fn reference_shorthand(input: &str) -> IResult<&str, Reference> { + all_consuming(alt(( + native_query_variable, + pipeline_variable, + input_document_field, + plain_string, + )))(input) +} + +// A native query variable placeholder might be embedded in a larger string. But in that case the +// expression evaluates to a string so we ignore it. +fn native_query_variable(input: &str) -> IResult<&str, Reference> { + let placeholder_content = |input| { + map(take_while1(|c| c != '}' && c != '|'), |content: &str| { + content.trim() + })(input) + }; + let type_annotation = preceded(ws(tag("|")), type_expression); + + let (remaining, (name, variable_type)) = delimited( + tag("{{"), + cut(ws(pair(ws(placeholder_content), ws(opt(type_annotation))))), + tag("}}"), + )(input)?; + // Since the native_query_variable parser runs inside an `alt`, the use of `cut` commits to + // this branch of the `alt` after successfully parsing the opening "{{" characters. + + let variable = Reference::NativeQueryVariable { + name: name.to_string(), + type_annotation: variable_type, + }; + Ok((remaining, variable)) +} + +fn pipeline_variable(input: &str) -> IResult<&str, Reference> { + let variable_parser = preceded(tag("$$"), cut(mongodb_variable_name)); + let (remaining, (name, path)) = pair(variable_parser, nested_path)(input)?; + let variable = Reference::PipelineVariable { + name: name.to_string(), + nested_path: path, + }; + Ok((remaining, variable)) +} + +fn input_document_field(input: &str) -> IResult<&str, Reference> { + let field_parser = preceded(tag("$"), cut(mongodb_variable_name)); + let (remaining, (name, path)) = pair(field_parser, nested_path)(input)?; + let field = Reference::InputDocumentField { + name: name.into(), + nested_path: path, + }; + Ok((remaining, field)) +} + +fn mongodb_variable_name(input: &str) -> IResult<&str, &str> { + let first_char = alt((alpha1, tag("_"))); + let succeeding_char = alt((alphanumeric1, tag("_"), non_ascii1)); + recognize(pair(first_char, many0_count(succeeding_char)))(input) +} + +fn nested_path(input: &str) -> IResult<&str, Vec> { + let component_parser = preceded(tag("."), take_while1(|c| c != '.')); + let (remaining, components) = many0(component_parser)(input)?; + Ok(( + remaining, + components.into_iter().map(|c| c.into()).collect(), + )) +} + +fn non_ascii1(input: &str) -> IResult<&str, &str> { + take_while1(is_non_ascii)(input) +} + +fn is_non_ascii(char: char) -> bool { + char as u8 > 127 +} + +fn plain_string(_input: &str) -> IResult<&str, Reference> { + // TODO: parse variable references embedded in strings ENG-1250 + Ok(( + "", + Reference::String { + native_query_variables: Default::default(), + }, + )) +} + +/// A combinator that takes a parser `inner` and produces a parser that also consumes both leading and +/// trailing whitespace, returning the output of `inner`. +/// +/// From https://github.com/rust-bakery/nom/blob/main/doc/nom_recipes.md#wrapper-combinators-that-eat-whitespace-before-and-after-a-parser +fn ws<'a, O, E: ParseError<&'a str>, F>(inner: F) -> impl Parser<&'a str, O, E> +where + F: Parser<&'a str, O, E>, +{ + delimited(multispace0, inner, multispace0) +} diff --git a/crates/cli/src/native_query/tests.rs b/crates/cli/src/native_query/tests.rs new file mode 100644 index 00000000..1a543724 --- /dev/null +++ b/crates/cli/src/native_query/tests.rs @@ -0,0 +1,508 @@ +use std::collections::BTreeMap; + +use anyhow::Result; +use configuration::{ + native_query::NativeQueryRepresentation::Collection, + schema::{ObjectField, ObjectType, Type}, + serialized::NativeQuery, +}; +use googletest::prelude::*; +use itertools::Itertools as _; +use mongodb::bson::doc; +use mongodb_support::{ + aggregate::{Accumulator, Pipeline, Selection, Stage}, + BsonScalarType, +}; +use ndc_models::{ArgumentName, FieldName, ObjectTypeName}; +use pretty_assertions::assert_eq; +use test_helpers::configuration::mflix_config; + +use super::native_query_from_pipeline; + +#[tokio::test] +async fn infers_native_query_from_pipeline() -> Result<()> { + let config = mflix_config(); + let pipeline = Pipeline::new(vec![Stage::Documents(vec![ + doc! { "foo": 1 }, + doc! { "bar": 2 }, + ])]); + let native_query = native_query_from_pipeline( + &config, + "selected_title", + Some("movies".into()), + pipeline.clone(), + )?; + + let expected_document_type_name: ObjectTypeName = "selected_title_documents".into(); + + let expected_object_types = [( + expected_document_type_name.clone(), + ObjectType { + fields: [ + ( + "foo".into(), + ObjectField { + r#type: Type::Nullable(Box::new(Type::Scalar(BsonScalarType::Int))), + description: None, + }, + ), + ( + "bar".into(), + ObjectField { + r#type: Type::Nullable(Box::new(Type::Scalar(BsonScalarType::Int))), + description: None, + }, + ), + ] + .into(), + description: None, + }, + )] + .into(); + + let expected = NativeQuery { + representation: Collection, + input_collection: Some("movies".into()), + arguments: Default::default(), + result_document_type: expected_document_type_name, + object_types: expected_object_types, + pipeline: pipeline.into(), + description: None, + }; + + assert_eq!(native_query, expected); + Ok(()) +} + +#[tokio::test] +async fn infers_native_query_from_non_trivial_pipeline() -> Result<()> { + let config = mflix_config(); + let pipeline = Pipeline::new(vec![ + Stage::ReplaceWith(Selection::new(doc! { + "title": "$title", + "title_words": { "$split": ["$title", " "] } + })), + Stage::Unwind { + path: "$title_words".to_string(), + include_array_index: None, + preserve_null_and_empty_arrays: None, + }, + Stage::Group { + key_expression: "$title_words".into(), + accumulators: [("title_count".into(), Accumulator::Count)].into(), + }, + ]); + let native_query = native_query_from_pipeline( + &config, + "title_word_frequency", + Some("movies".into()), + pipeline.clone(), + )?; + + assert_eq!(native_query.input_collection, Some("movies".into())); + assert!(native_query + .result_document_type + .to_string() + .starts_with("title_word_frequency")); + assert_eq!( + native_query + .object_types + .get(&native_query.result_document_type), + Some(&ObjectType { + fields: [ + ( + "_id".into(), + ObjectField { + r#type: Type::Scalar(BsonScalarType::String), + description: None, + }, + ), + ( + "title_count".into(), + ObjectField { + r#type: Type::Scalar(BsonScalarType::Int), + description: None, + }, + ), + ] + .into(), + description: None, + }) + ); + Ok(()) +} + +#[googletest::test] +fn infers_native_query_from_pipeline_with_unannotated_parameter() -> googletest::Result<()> { + let config = mflix_config(); + + let pipeline = Pipeline::new(vec![Stage::Match(doc! { + "title": { "$eq": "{{ title }}" }, + })]); + + let native_query = + native_query_from_pipeline(&config, "movies_by_title", Some("movies".into()), pipeline)?; + + expect_that!( + native_query.arguments, + unordered_elements_are![( + displays_as(eq("title")), + field!( + ObjectField.r#type, + eq(&Type::Scalar(BsonScalarType::String)) + ) + )] + ); + Ok(()) +} + +#[googletest::test] +fn reads_parameter_type_annotation() -> googletest::Result<()> { + let config = mflix_config(); + + // Parameter type would be inferred as double without this annotation + let pipeline = Pipeline::new(vec![Stage::Match(doc! { + "imdb.rating": { "$gt": "{{ min_rating | int! }}" }, + })]); + + let native_query = native_query_from_pipeline( + &config, + "movies_by_min_rating", + Some("movies".into()), + pipeline, + )?; + + expect_that!( + native_query.arguments, + unordered_elements_are![( + eq(&ArgumentName::from("min_rating")), + field!(ObjectField.r#type, eq(&Type::Scalar(BsonScalarType::Int))) + )] + ); + Ok(()) +} + +#[googletest::test] +fn emits_error_on_incorrect_parameter_type_annotation() -> googletest::Result<()> { + let config = mflix_config(); + + let pipeline = Pipeline::new(vec![Stage::Match(doc! { + "title": { "$eq": "{{ title | decimal }}" }, + })]); + + let native_query = + native_query_from_pipeline(&config, "movies_by_title", Some("movies".into()), pipeline); + + expect_that!( + native_query, + err(displays_as(contains_substring( + "string! is not compatible with decimal" + ))) + ); + Ok(()) +} + +#[googletest::test] +fn infers_parameter_type_from_binary_comparison() -> googletest::Result<()> { + let config = mflix_config(); + + let pipeline = Pipeline::new(vec![Stage::Match(doc! { + "$expr": { "$eq": ["{{ title }}", "$title"] } + })]); + + let native_query = + native_query_from_pipeline(&config, "movies_by_title", Some("movies".into()), pipeline)?; + + expect_that!( + native_query.arguments, + unordered_elements_are![( + displays_as(eq("title")), + field!( + ObjectField.r#type, + eq(&Type::Scalar(BsonScalarType::String)) + ) + )] + ); + Ok(()) +} + +#[googletest::test] +fn supports_various_query_predicate_operators() -> googletest::Result<()> { + let config = mflix_config(); + + let pipeline = Pipeline::new(vec![Stage::Match(doc! { + "title": { "$eq": "{{ title }}" }, + "rated": { "$ne": "{{ rating }}" }, + "year": "{{ year_1 }}", + "imdb.votes": { "$gt": "{{ votes }}" }, + "num_mflix_comments": { "$in": "{{ num_comments_options }}" }, + "$not": { "runtime": { "$lt": "{{ runtime }}" } }, + "tomatoes.critic": { "$exists": "{{ critic_exists }}" }, + "released": { "$type": ["date", "{{ other_type }}"] }, + "$or": [ + { "$and": [ + { "writers": { "$eq": "{{ writers }}" } }, + { "year": "{{ year_2 }}", } + ] }, + { + "year": { "$mod": ["{{ divisor }}", "{{ expected_remainder }}"] }, + "title": { "$regex": "{{ title_regex }}" }, + }, + ], + "$and": [ + { "genres": { "$all": "{{ genres }}" } }, + { "genres": { "$all": ["{{ genre_1 }}"] } }, + { "genres": { "$elemMatch": { + "$gt": "{{ genre_start }}", + "$lt": "{{ genre_end }}", + }} }, + { "genres": { "$size": "{{ genre_size }}" } }, + ], + })]); + + let native_query = + native_query_from_pipeline(&config, "operators_test", Some("movies".into()), pipeline)?; + + expect_eq!( + native_query.arguments, + object_fields([ + ("title", Type::Scalar(BsonScalarType::String)), + ("rating", Type::Scalar(BsonScalarType::String)), + ("year_1", Type::Scalar(BsonScalarType::Int)), + ("year_2", Type::Scalar(BsonScalarType::Int)), + ("votes", Type::Scalar(BsonScalarType::Int)), + ( + "num_comments_options", + Type::ArrayOf(Box::new(Type::Scalar(BsonScalarType::Int))) + ), + ("runtime", Type::Scalar(BsonScalarType::Int)), + ("critic_exists", Type::Scalar(BsonScalarType::Bool)), + ("other_type", Type::Scalar(BsonScalarType::String)), + ( + "writers", + Type::ArrayOf(Box::new(Type::Scalar(BsonScalarType::String))) + ), + ("divisor", Type::Scalar(BsonScalarType::Int)), + ("expected_remainder", Type::Scalar(BsonScalarType::Int)), + ("title_regex", Type::Scalar(BsonScalarType::Regex)), + ( + "genres", + Type::ArrayOf(Box::new(Type::Scalar(BsonScalarType::String))) + ), + ("genre_1", Type::Scalar(BsonScalarType::String)), + ("genre_start", Type::Scalar(BsonScalarType::String)), + ("genre_end", Type::Scalar(BsonScalarType::String)), + ("genre_size", Type::Scalar(BsonScalarType::Int)), + ]) + ); + + Ok(()) +} + +#[googletest::test] +fn supports_various_aggregation_operators() -> googletest::Result<()> { + let config = mflix_config(); + + let pipeline = Pipeline::new(vec![ + Stage::Match(doc! { + "$expr": { + "$and": [ + { "$eq": ["{{ title }}", "$title"] }, + { "$or": [null, 1] }, + { "$not": "{{ bool_param }}" }, + { "$gt": ["$imdb.votes", "{{ votes }}"] }, + ] + } + }), + Stage::ReplaceWith(Selection::new(doc! { + "abs": { "$abs": "$year" }, + "add": { "$add": ["$tomatoes.viewer.rating", "{{ rating_inc }}"] }, + "divide": { "$divide": ["$tomatoes.viewer.rating", "{{ rating_div }}"] }, + "multiply": { "$multiply": ["$tomatoes.viewer.rating", "{{ rating_mult }}"] }, + "subtract": { "$subtract": ["$tomatoes.viewer.rating", "{{ rating_sub }}"] }, + "arrayElemAt": { "$arrayElemAt": ["$genres", "{{ idx }}"] }, + "title_words": { "$split": ["$title", " "] } + })), + ]); + + let native_query = + native_query_from_pipeline(&config, "operators_test", Some("movies".into()), pipeline)?; + + expect_eq!( + native_query.arguments, + object_fields([ + ("title", Type::Scalar(BsonScalarType::String)), + ("bool_param", Type::Scalar(BsonScalarType::Bool)), + ("votes", Type::Scalar(BsonScalarType::Int)), + ("rating_inc", Type::Scalar(BsonScalarType::Double)), + ("rating_div", Type::Scalar(BsonScalarType::Double)), + ("rating_mult", Type::Scalar(BsonScalarType::Double)), + ("rating_sub", Type::Scalar(BsonScalarType::Double)), + ("idx", Type::Scalar(BsonScalarType::Int)), + ]) + ); + + let result_type = native_query.result_document_type; + expect_eq!( + native_query.object_types[&result_type], + ObjectType { + fields: object_fields([ + ("abs", Type::Scalar(BsonScalarType::Int)), + ("add", Type::Scalar(BsonScalarType::Double)), + ("divide", Type::Scalar(BsonScalarType::Double)), + ("multiply", Type::Scalar(BsonScalarType::Double)), + ("subtract", Type::Scalar(BsonScalarType::Double)), + ( + "arrayElemAt", + Type::Nullable(Box::new(Type::Scalar(BsonScalarType::String))) + ), + ( + "title_words", + Type::ArrayOf(Box::new(Type::Scalar(BsonScalarType::String))) + ), + ]), + description: None, + } + ); + + Ok(()) +} + +#[googletest::test] +fn supports_project_stage_in_exclusion_mode() -> Result<()> { + let config = mflix_config(); + + let pipeline = Pipeline::new(vec![Stage::Project(doc! { + "title": 0, + "tomatoes.critic.rating": false, + "tomatoes.critic.meter": false, + "tomatoes.lastUpdated": false, + })]); + + let native_query = + native_query_from_pipeline(&config, "project_test", Some("movies".into()), pipeline)?; + + let result_type_name = native_query.result_document_type; + let result_type = &native_query.object_types[&result_type_name]; + + expect_false!(result_type.fields.contains_key("title")); + + let tomatoes_type_name = match result_type.fields.get("tomatoes") { + Some(ObjectField { + r#type: Type::Object(name), + .. + }) => ObjectTypeName::from(name.clone()), + _ => panic!("tomatoes field does not have an object type"), + }; + let tomatoes_type = &native_query.object_types[&tomatoes_type_name]; + expect_that!( + tomatoes_type.fields.keys().collect_vec(), + unordered_elements_are![&&FieldName::from("viewer"), &&FieldName::from("critic")] + ); + expect_eq!( + tomatoes_type.fields["viewer"].r#type, + Type::Object("TomatoesCriticViewer".into()), + ); + + let critic_type_name = match tomatoes_type.fields.get("critic") { + Some(ObjectField { + r#type: Type::Object(name), + .. + }) => ObjectTypeName::from(name.clone()), + _ => panic!("tomatoes.critic field does not have an object type"), + }; + let critic_type = &native_query.object_types[&critic_type_name]; + expect_eq!( + critic_type.fields, + object_fields([("numReviews", Type::Scalar(BsonScalarType::Int))]), + ); + + Ok(()) +} + +#[googletest::test] +fn supports_project_stage_in_inclusion_mode() -> Result<()> { + let config = mflix_config(); + + let pipeline = Pipeline::new(vec![Stage::Project(doc! { + "title": 1, + "tomatoes.critic.rating": true, + "tomatoes.critic.meter": true, + "tomatoes.lastUpdated": true, + "releaseDate": "$released", + })]); + + let native_query = + native_query_from_pipeline(&config, "inclusion", Some("movies".into()), pipeline)?; + + expect_eq!( + native_query.result_document_type, + "inclusion_project".into() + ); + + expect_eq!( + native_query.object_types, + [ + ( + "inclusion_project".into(), + ObjectType { + fields: object_fields([ + ("_id", Type::Scalar(BsonScalarType::ObjectId)), + ("title", Type::Scalar(BsonScalarType::String)), + ( + "tomatoes", + Type::Object("inclusion_project_tomatoes".into()) + ), + ("releaseDate", Type::Scalar(BsonScalarType::Date)), + ]), + description: None + } + ), + ( + "inclusion_project_tomatoes".into(), + ObjectType { + fields: object_fields([ + ( + "critic", + Type::Object("inclusion_project_tomatoes_critic".into()) + ), + ("lastUpdated", Type::Scalar(BsonScalarType::Date)), + ]), + description: None + } + ), + ( + "inclusion_project_tomatoes_critic".into(), + ObjectType { + fields: object_fields([ + ("rating", Type::Scalar(BsonScalarType::Double)), + ("meter", Type::Scalar(BsonScalarType::Int)), + ]), + description: None + } + ) + ] + .into(), + ); + + Ok(()) +} + +fn object_fields(types: impl IntoIterator) -> BTreeMap +where + S: Into, + K: Ord, +{ + types + .into_iter() + .map(|(name, r#type)| { + ( + name.into(), + ObjectField { + r#type, + description: None, + }, + ) + }) + .collect() +} diff --git a/crates/cli/src/native_query/type_annotation.rs b/crates/cli/src/native_query/type_annotation.rs new file mode 100644 index 00000000..91f0f9a7 --- /dev/null +++ b/crates/cli/src/native_query/type_annotation.rs @@ -0,0 +1,198 @@ +use configuration::schema::Type; +use enum_iterator::all; +use itertools::Itertools; +use mongodb_support::BsonScalarType; +use nom::{ + branch::alt, + bytes::complete::tag, + character::complete::{alpha1, alphanumeric1, multispace0}, + combinator::{cut, opt, recognize}, + error::ParseError, + multi::many0_count, + sequence::{delimited, pair, preceded, terminated}, + IResult, Parser, +}; + +/// Nom parser for type expressions Parse a type expression according to GraphQL syntax, using +/// MongoDB scalar type names. +/// +/// This implies that types are nullable by default unless they use the non-nullable suffix (!). +pub fn type_expression(input: &str) -> IResult<&str, Type> { + nullability_suffix(alt(( + extended_json_annotation, + scalar_annotation, + predicate_annotation, + object_annotation, // object_annotation must follow parsers that look for fixed sets of keywords + array_of_annotation, + )))(input) +} + +fn extended_json_annotation(input: &str) -> IResult<&str, Type> { + let (remaining, _) = tag("extendedJSON")(input)?; + Ok((remaining, Type::ExtendedJSON)) +} + +fn scalar_annotation(input: &str) -> IResult<&str, Type> { + // This parser takes the first type name that matches so in cases where one type name is + // a prefix of another we must try the longer name first. Otherwise `javascriptWithScope` can + // be mistaken for the type `javascript`. So we sort type names by length in descending order. + let scalar_type_parsers = all::() + .sorted_by_key(|t| 1000 - t.bson_name().len()) + .map(|t| tag(t.bson_name()).map(move |_| Type::Nullable(Box::new(Type::Scalar(t))))); + alt_many(scalar_type_parsers)(input) +} + +fn object_annotation(input: &str) -> IResult<&str, Type> { + let (remaining, name) = object_type_name(input)?; + Ok(( + remaining, + Type::Nullable(Box::new(Type::Object(name.into()))), + )) +} + +fn predicate_annotation(input: &str) -> IResult<&str, Type> { + let (remaining, name) = preceded( + terminated(tag("predicate"), multispace0), + delimited(tag("<"), cut(ws(object_type_name)), tag(">")), + )(input)?; + Ok(( + remaining, + Type::Nullable(Box::new(Type::Predicate { + object_type_name: name.into(), + })), + )) +} + +fn object_type_name(input: &str) -> IResult<&str, &str> { + let first_char = alt((alpha1, tag("_"))); + let succeeding_char = alt((alphanumeric1, tag("_"))); + recognize(pair(first_char, many0_count(succeeding_char)))(input) +} + +fn array_of_annotation(input: &str) -> IResult<&str, Type> { + let (remaining, element_type) = delimited(tag("["), cut(ws(type_expression)), tag("]"))(input)?; + Ok(( + remaining, + Type::Nullable(Box::new(Type::ArrayOf(Box::new(element_type)))), + )) +} + +/// The other parsers produce nullable types by default. This wraps a parser that produces a type, +/// and flips the type from nullable to non-nullable if it sees the non-nullable suffix (!). +fn nullability_suffix<'a, P, E>(mut parser: P) -> impl FnMut(&'a str) -> IResult<&'a str, Type, E> +where + P: Parser<&'a str, Type, E> + 'a, + E: ParseError<&'a str>, +{ + move |input| { + let (remaining, t) = parser.parse(input)?; + let t = t.normalize_type(); // strip redundant nullable layers + let (remaining, non_nullable_suffix) = opt(preceded(multispace0, tag("!")))(remaining)?; + let t = match non_nullable_suffix { + None => t, + Some(_) => match t { + Type::Nullable(t) => *t, + t => t, + }, + }; + Ok((remaining, t)) + } +} + +/// Like [nom::branch::alt], but accepts a dynamically-constructed iterable of parsers instead of +/// a tuple. +/// +/// From https://stackoverflow.com/a/76759023/103017 +pub fn alt_many(mut parsers: Ps) -> impl FnMut(I) -> IResult +where + P: Parser, + I: Clone, + for<'a> &'a mut Ps: IntoIterator, + E: ParseError, +{ + move |input: I| { + for mut parser in &mut parsers { + if let r @ Ok(_) = parser.parse(input.clone()) { + return r; + } + } + nom::combinator::fail::(input) + } +} + +/// A combinator that takes a parser `inner` and produces a parser that also consumes both leading and +/// trailing whitespace, returning the output of `inner`. +/// +/// From https://github.com/rust-bakery/nom/blob/main/doc/nom_recipes.md#wrapper-combinators-that-eat-whitespace-before-and-after-a-parser +fn ws<'a, O, E: ParseError<&'a str>, F>(inner: F) -> impl Parser<&'a str, O, E> +where + F: Parser<&'a str, O, E>, +{ + delimited(multispace0, inner, multispace0) +} + +#[cfg(test)] +mod tests { + use configuration::schema::Type; + use googletest::prelude::*; + use mongodb_support::BsonScalarType; + use proptest::{prop_assert_eq, proptest}; + use test_helpers::arb_type; + + #[googletest::test] + fn parses_scalar_type_expression() -> Result<()> { + expect_that!( + super::type_expression("double"), + ok(( + anything(), + eq(&Type::Nullable(Box::new(Type::Scalar( + BsonScalarType::Double + )))) + )) + ); + Ok(()) + } + + #[googletest::test] + fn parses_non_nullable_suffix() -> Result<()> { + expect_that!( + super::type_expression("double!"), + ok((anything(), eq(&Type::Scalar(BsonScalarType::Double)))) + ); + Ok(()) + } + + #[googletest::test] + fn ignores_whitespace_in_type_expressions() -> Result<()> { + expect_that!( + super::type_expression("[ double ! ] !"), + ok(( + anything(), + eq(&Type::ArrayOf(Box::new(Type::Scalar( + BsonScalarType::Double + )))) + )) + ); + expect_that!( + super::type_expression("predicate < obj >"), + ok(( + anything(), + eq(&Type::Nullable(Box::new(Type::Predicate { + object_type_name: "obj".into() + }))) + )) + ); + Ok(()) + } + + proptest! { + #[test] + fn type_expression_roundtrips_display_and_parsing(t in arb_type()) { + let t = t.normalize_type(); + let annotation = t.to_string(); + println!("annotation: {}", annotation); + let (_, parsed) = super::type_expression(&annotation)?; + prop_assert_eq!(parsed, t) + } + } +} diff --git a/crates/cli/src/native_query/type_constraint.rs b/crates/cli/src/native_query/type_constraint.rs new file mode 100644 index 00000000..e6681d43 --- /dev/null +++ b/crates/cli/src/native_query/type_constraint.rs @@ -0,0 +1,389 @@ +use std::{ + borrow::Cow, + collections::{BTreeMap, BTreeSet}, +}; + +use configuration::MongoScalarType; +use itertools::Itertools as _; +use mongodb_support::BsonScalarType; +use ndc_models::{FieldName, ObjectTypeName}; +use nonempty::NonEmpty; +use ref_cast::RefCast as _; + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct TypeVariable { + id: u32, + pub variance: Variance, +} + +impl TypeVariable { + pub fn new(id: u32, variance: Variance) -> Self { + TypeVariable { id, variance } + } + + pub fn is_covariant(self) -> bool { + matches!(self.variance, Variance::Covariant) + } + + pub fn is_contravariant(self) -> bool { + matches!(self.variance, Variance::Contravariant) + } +} + +impl std::fmt::Display for TypeVariable { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "${}", self.id) + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub enum Variance { + Covariant, + Contravariant, + Invariant, +} + +/// A TypeConstraint is almost identical to a [configuration::schema::Type], except that +/// a TypeConstraint may reference type variables. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub enum TypeConstraint { + // Normal type stuff - except that composite types might include variables in their structure. + ExtendedJSON, + Scalar(BsonScalarType), + Object(ObjectTypeName), + ArrayOf(Box), + Predicate { + object_type_name: ObjectTypeName, + }, + + // Complex types + Union(BTreeSet), + + /// Unlike Union we expect the solved concrete type for a variable with a OneOf constraint may + /// be one of the types in the set, but we don't know yet which one. This is useful for MongoDB + /// operators that expect an input of any numeric type. We use OneOf because we don't know + /// which numeric type to infer until we see more usage evidence of the same type variable. + /// + /// In other words with Union we have specific evidence that a variable occurs in contexts of + /// multiple concrete types, while with OneOf we **don't** have specific evidence that the + /// variable takes multiple types, but there are multiple possibilities of the type or types + /// that it does take. + OneOf(BTreeSet), + + /// Indicates a type that is the same as the type of the given variable. + Variable(TypeVariable), + + /// A type that is the same as the type of elements in the array type referenced by the + /// variable. + ElementOf(Box), + + /// A type that is the same as the type of a field of an object type referenced by the + /// variable, or that is the same as a type in a field of a field, etc. + FieldOf { + target_type: Box, + path: NonEmpty, + }, + + /// A type that modifies another type by adding, replacing, or subtracting object fields. + WithFieldOverrides { + augmented_object_type_name: ObjectTypeName, + target_type: Box, + fields: BTreeMap>, + }, +} + +impl std::fmt::Display for TypeConstraint { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fn helper(t: &TypeConstraint, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match t { + TypeConstraint::ExtendedJSON => write!(f, "extendedJSON"), + TypeConstraint::Scalar(s) => s.fmt(f), + TypeConstraint::Object(name) => write!(f, "{name}"), + TypeConstraint::ArrayOf(t) => write!(f, "[{t}]"), + TypeConstraint::Predicate { object_type_name } => { + write!(f, "predicate<{object_type_name}>") + } + TypeConstraint::Union(ts) => write!(f, "({})", ts.iter().join(" | ")), + TypeConstraint::OneOf(ts) => write!(f, "({})", ts.iter().join(" / ")), + TypeConstraint::Variable(v) => v.fmt(f), + TypeConstraint::ElementOf(t) => write!(f, "{t}[@]"), + TypeConstraint::FieldOf { target_type, path } => { + write!(f, "{target_type}.{}", path.iter().join(".")) + } + TypeConstraint::WithFieldOverrides { + augmented_object_type_name, + target_type, + fields, + } => { + writeln!(f, "{target_type} // {augmented_object_type_name} {{")?; + for (name, spec) in fields { + write!(f, " {name}: ")?; + match spec { + Some(t) => write!(f, "{t}"), + None => write!(f, ""), + }?; + writeln!(f)?; + } + write!(f, "}}") + } + } + } + if *self == TypeConstraint::Scalar(BsonScalarType::Null) { + write!(f, "null") + } else { + match self.without_null() { + Some(t) => helper(&t, f), + None => { + helper(self, f)?; + write!(f, "!") + } + } + } + } +} + +impl TypeConstraint { + /// Order constraints by complexity to help with type unification + pub fn complexity(&self) -> usize { + match self { + TypeConstraint::Variable(_) => 2, + TypeConstraint::ExtendedJSON => 0, + TypeConstraint::Scalar(_) => 0, + TypeConstraint::Object(_) => 1, + TypeConstraint::Predicate { .. } => 1, + TypeConstraint::ArrayOf(constraint) => 1 + constraint.complexity(), + TypeConstraint::Union(constraints) => { + 1 + constraints + .iter() + .map(TypeConstraint::complexity) + .sum::() + } + TypeConstraint::OneOf(constraints) => { + 1 + constraints + .iter() + .map(TypeConstraint::complexity) + .sum::() + } + TypeConstraint::ElementOf(constraint) => 2 + constraint.complexity(), + TypeConstraint::FieldOf { target_type, path } => { + 2 + target_type.complexity() + path.len() + } + TypeConstraint::WithFieldOverrides { + target_type, + fields, + .. + } => { + let overridden_field_complexity: usize = fields + .values() + .flatten() + .map(|constraint| constraint.complexity()) + .sum(); + 2 + target_type.complexity() + overridden_field_complexity + } + } + } + + pub fn make_nullable(self) -> Self { + match self { + TypeConstraint::ExtendedJSON => TypeConstraint::ExtendedJSON, + t @ TypeConstraint::Scalar(BsonScalarType::Null) => t, + t => TypeConstraint::union(t, TypeConstraint::Scalar(BsonScalarType::Null)), + } + } + + pub fn null() -> Self { + TypeConstraint::Scalar(BsonScalarType::Null) + } + + pub fn is_nullable(&self) -> bool { + match self { + TypeConstraint::Union(types) => types + .iter() + .any(|t| matches!(t, TypeConstraint::Scalar(BsonScalarType::Null))), + _ => false, + } + } + + /// If the type constraint is a union including null then return a constraint with the null + /// removed + pub fn without_null(&self) -> Option> { + match self { + TypeConstraint::Union(constraints) => { + let non_null = constraints + .iter() + .filter(|c| **c != TypeConstraint::Scalar(BsonScalarType::Null)) + .collect_vec(); + if non_null.len() == constraints.len() { + Some(Cow::Borrowed(self)) + } else if non_null.len() == 1 { + Some(Cow::Borrowed(non_null.first().unwrap())) + } else { + Some(Cow::Owned(TypeConstraint::Union( + non_null.into_iter().cloned().collect(), + ))) + } + } + _ => None, + } + } + + pub fn map_nullable(self, callback: F) -> TypeConstraint + where + F: FnOnce(TypeConstraint) -> TypeConstraint, + { + match self { + Self::Union(types) => { + let non_null_types: BTreeSet<_> = + types.into_iter().filter(|t| t != &Self::null()).collect(); + let single_non_null_type = if non_null_types.len() == 1 { + non_null_types.into_iter().next().unwrap() + } else { + Self::Union(non_null_types) + }; + let mapped = callback(single_non_null_type); + Self::union(mapped, Self::null()) + } + t => callback(t), + } + } + + fn scalar_one_of_by_predicate(f: impl Fn(BsonScalarType) -> bool) -> TypeConstraint { + let matching_types = enum_iterator::all::() + .filter(|t| f(*t)) + .map(TypeConstraint::Scalar) + .collect(); + TypeConstraint::OneOf(matching_types) + } + + pub fn comparable() -> TypeConstraint { + Self::scalar_one_of_by_predicate(BsonScalarType::is_comparable) + } + + pub fn numeric() -> TypeConstraint { + Self::scalar_one_of_by_predicate(BsonScalarType::is_numeric) + } + + pub fn is_numeric(&self) -> bool { + match self { + TypeConstraint::Scalar(scalar_type) => BsonScalarType::is_numeric(*scalar_type), + TypeConstraint::OneOf(types) => types.iter().all(|t| t.is_numeric()), + TypeConstraint::Union(types) => types.iter().all(|t| t.is_numeric()), + _ => false, + } + } + + pub fn union(a: TypeConstraint, b: TypeConstraint) -> Self { + match (a, b) { + (TypeConstraint::Union(mut types_a), TypeConstraint::Union(mut types_b)) => { + types_a.append(&mut types_b); + TypeConstraint::Union(types_a) + } + (TypeConstraint::Union(mut types), b) => { + types.insert(b); + TypeConstraint::Union(types) + } + (a, TypeConstraint::Union(mut types)) => { + types.insert(a); + TypeConstraint::Union(types) + } + (a, b) => TypeConstraint::Union([a, b].into()), + } + } +} + +impl From for TypeConstraint { + fn from(t: ndc_models::Type) -> Self { + match t { + ndc_models::Type::Named { name } => { + let scalar_type_name = ndc_models::ScalarTypeName::ref_cast(&name); + match MongoScalarType::try_from(scalar_type_name) { + Ok(MongoScalarType::Bson(scalar_type)) => TypeConstraint::Scalar(scalar_type), + Ok(MongoScalarType::ExtendedJSON) => TypeConstraint::ExtendedJSON, + Err(_) => TypeConstraint::Object(name.into()), + } + } + ndc_models::Type::Nullable { underlying_type } => { + Self::from(*underlying_type).make_nullable() + } + ndc_models::Type::Array { element_type } => { + TypeConstraint::ArrayOf(Box::new(Self::from(*element_type))) + } + ndc_models::Type::Predicate { object_type_name } => { + TypeConstraint::Predicate { object_type_name } + } + } + } +} + +impl From for TypeConstraint { + fn from(t: configuration::schema::Type) -> Self { + match t { + configuration::schema::Type::ExtendedJSON => TypeConstraint::ExtendedJSON, + configuration::schema::Type::Scalar(s) => TypeConstraint::Scalar(s), + configuration::schema::Type::Object(name) => TypeConstraint::Object(name.into()), + configuration::schema::Type::ArrayOf(t) => { + TypeConstraint::ArrayOf(Box::new(TypeConstraint::from(*t))) + } + configuration::schema::Type::Nullable(t) => TypeConstraint::from(*t).make_nullable(), + configuration::schema::Type::Predicate { object_type_name } => { + TypeConstraint::Predicate { object_type_name } + } + } + } +} + +impl From<&configuration::schema::Type> for TypeConstraint { + fn from(t: &configuration::schema::Type) -> Self { + t.clone().into() + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ObjectTypeConstraint { + pub fields: BTreeMap, +} + +impl From for ObjectTypeConstraint { + fn from(value: ndc_models::ObjectType) -> Self { + ObjectTypeConstraint { + fields: value + .fields + .into_iter() + .map(|(name, field)| (name, field.r#type.into())) + .collect(), + } + } +} + +#[cfg(test)] +mod tests { + use googletest::prelude::*; + use mongodb_support::BsonScalarType; + + use super::TypeConstraint; + + #[googletest::test] + fn displays_non_nullable_type_with_suffix() { + expect_eq!( + format!("{}", TypeConstraint::Scalar(BsonScalarType::Int)), + "int!".to_string() + ); + } + + #[googletest::test] + fn displays_nullable_type_without_suffix() { + expect_eq!( + format!( + "{}", + TypeConstraint::Union( + [ + TypeConstraint::Scalar(BsonScalarType::Int), + TypeConstraint::Scalar(BsonScalarType::Null), + ] + .into() + ) + ), + "int".to_string() + ); + } +} diff --git a/crates/cli/src/native_query/type_solver/constraint_to_type.rs b/crates/cli/src/native_query/type_solver/constraint_to_type.rs new file mode 100644 index 00000000..76d3b4dd --- /dev/null +++ b/crates/cli/src/native_query/type_solver/constraint_to_type.rs @@ -0,0 +1,419 @@ +use std::collections::{BTreeMap, HashMap, VecDeque}; + +use configuration::{ + schema::{ObjectField, ObjectType, Type}, + Configuration, +}; +use itertools::Itertools as _; +use ndc_models::{FieldName, ObjectTypeName}; + +use crate::native_query::{ + error::{Error, Result}, + type_constraint::{ObjectTypeConstraint, TypeConstraint, TypeVariable}, +}; + +use TypeConstraint as C; + +/// In cases where there is enough information present in one constraint itself to infer a concrete +/// type, do that. Returns None if there is not enough information present. +/// +/// TODO: Most of this logic should be moved to `simplify_one` +pub fn constraint_to_type( + configuration: &Configuration, + solutions: &HashMap, + added_object_types: &mut BTreeMap, + object_type_constraints: &mut BTreeMap, + constraint: &TypeConstraint, +) -> Result> { + let solution = match constraint { + C::ExtendedJSON => Some(Type::ExtendedJSON), + C::Scalar(s) => Some(Type::Scalar(*s)), + C::ArrayOf(c) => constraint_to_type( + configuration, + solutions, + added_object_types, + object_type_constraints, + c, + )? + .map(|t| Type::ArrayOf(Box::new(t))), + C::Object(name) => object_constraint_to_type( + configuration, + solutions, + added_object_types, + object_type_constraints, + name, + )? + .map(|_| Type::Object(name.to_string())), + C::Predicate { object_type_name } => object_constraint_to_type( + configuration, + solutions, + added_object_types, + object_type_constraints, + object_type_name, + )? + .map(|_| Type::Predicate { + object_type_name: object_type_name.clone(), + }), + C::Variable(variable) => solutions.get(variable).cloned(), + C::ElementOf(c) => constraint_to_type( + configuration, + solutions, + added_object_types, + object_type_constraints, + c, + )? + .map(element_of) + .transpose()?, + C::FieldOf { target_type, path } => constraint_to_type( + configuration, + solutions, + added_object_types, + object_type_constraints, + target_type, + )? + .and_then(|t| { + field_of( + configuration, + solutions, + added_object_types, + object_type_constraints, + t, + path, + ) + .transpose() + }) + .transpose()?, + + t @ C::Union(constraints) if t.is_nullable() => { + let non_null_constraints = constraints + .iter() + .filter(|t| *t != &C::null()) + .collect_vec(); + let underlying_constraint = if non_null_constraints.len() == 1 { + non_null_constraints.into_iter().next().unwrap() + } else { + &C::Union(non_null_constraints.into_iter().cloned().collect()) + }; + constraint_to_type( + configuration, + solutions, + added_object_types, + object_type_constraints, + underlying_constraint, + )? + .map(|t| Type::Nullable(Box::new(t))) + } + + C::Union(_) => Some(Type::ExtendedJSON), + + t @ C::OneOf(_) if t.is_numeric() => { + // We know it's a number, but we don't know exactly which numeric type. Double should + // be good enough for anybody, right? + Some(Type::Scalar(mongodb_support::BsonScalarType::Double)) + } + + C::OneOf(_) => Some(Type::ExtendedJSON), + + C::WithFieldOverrides { + augmented_object_type_name, + target_type, + fields, + } => { + let resolved_object_type = constraint_to_type( + configuration, + solutions, + added_object_types, + object_type_constraints, + target_type, + )?; + let added_or_replaced_fields: Option> = fields + .iter() + .flat_map(|(field_name, option_t)| option_t.as_ref().map(|t| (field_name, t))) + .map(|(field_name, t)| { + Ok(constraint_to_type( + configuration, + solutions, + added_object_types, + object_type_constraints, + t, + )? + .map(|t| (field_name.clone(), t))) + }) + .collect::>()?; + let subtracted_fields = fields + .iter() + .filter_map(|(n, option_t)| match option_t { + Some(_) => None, + None => Some(n), + }) + .collect_vec(); + match (resolved_object_type, added_or_replaced_fields) { + (Some(object_type), Some(added_fields)) => with_field_overrides( + configuration, + solutions, + added_object_types, + object_type_constraints, + object_type, + augmented_object_type_name.clone(), + added_fields, + subtracted_fields, + )?, + _ => None, + } + } + }; + Ok(solution) +} + +fn object_constraint_to_type( + configuration: &Configuration, + solutions: &HashMap, + added_object_types: &mut BTreeMap, + object_type_constraints: &mut BTreeMap, + name: &ObjectTypeName, +) -> Result> { + // If the referenced type is defined externally to the native query or already has a recorded + // solution then we don't need to do anything. + if let Some(object_type) = configuration.object_types.get(name) { + return Ok(Some(object_type.clone().into())); + } + if let Some(object_type) = added_object_types.get(name) { + return Ok(Some(object_type.clone())); + } + + let Some(object_type_constraint) = object_type_constraints.get(name).cloned() else { + return Err(Error::UnknownObjectType(name.to_string())); + }; + + let mut fields = BTreeMap::new(); + // let mut solved_object_types = BTreeMap::new(); + + for (field_name, field_constraint) in object_type_constraint.fields.iter() { + match constraint_to_type( + configuration, + solutions, + added_object_types, + object_type_constraints, + field_constraint, + )? { + Some(solved_field_type) => { + fields.insert( + field_name.clone(), + ObjectField { + r#type: solved_field_type, + description: None, + }, + ); + } + // If any fields do not have solved types we need to abort + None => return Ok(None), + }; + } + + let new_object_type = ObjectType { + fields, + description: None, + }; + added_object_types.insert(name.clone(), new_object_type.clone()); + + Ok(Some(new_object_type)) +} + +fn element_of(array_type: Type) -> Result { + let element_type = match array_type { + Type::ArrayOf(elem_type) => Ok(*elem_type), + Type::Nullable(t) => element_of(*t).map(|t| Type::Nullable(Box::new(t))), + Type::ExtendedJSON => Ok(Type::ExtendedJSON), + _ => Err(Error::ExpectedArray { + actual_type: array_type, + }), + }?; + Ok(element_type.normalize_type()) +} + +fn field_of<'a>( + configuration: &Configuration, + solutions: &HashMap, + added_object_types: &mut BTreeMap, + object_type_constraints: &mut BTreeMap, + object_type: Type, + path: impl IntoIterator, +) -> Result> { + let field_type = match object_type { + Type::ExtendedJSON => Ok(Some(Type::ExtendedJSON)), + Type::Object(type_name) => { + let Some(object_type) = object_constraint_to_type( + configuration, + solutions, + added_object_types, + object_type_constraints, + &type_name.clone().into(), + )? + else { + return Ok(None); + }; + + let mut path: VecDeque<_> = path.into_iter().collect(); + let Some(field_name) = path.pop_front() else { + return Ok(Some(Type::Object(type_name))); + }; + + let field_type = + object_type + .fields + .get(field_name) + .ok_or(Error::ObjectMissingField { + object_type: type_name.into(), + field_name: field_name.clone(), + })?; + + if path.is_empty() { + Ok(Some(field_type.r#type.clone())) + } else { + field_of( + configuration, + solutions, + added_object_types, + object_type_constraints, + field_type.r#type.clone(), + path, + ) + } + } + Type::Nullable(t) => { + let underlying_type = field_of( + configuration, + solutions, + added_object_types, + object_type_constraints, + *t, + path, + )?; + Ok(underlying_type.map(|t| Type::Nullable(Box::new(t)))) + } + t => Err(Error::ExpectedObject { actual_type: t }), + }?; + Ok(field_type.map(Type::normalize_type)) +} + +#[allow(clippy::too_many_arguments)] +fn with_field_overrides<'a>( + configuration: &Configuration, + solutions: &HashMap, + added_object_types: &mut BTreeMap, + object_type_constraints: &mut BTreeMap, + object_type: Type, + augmented_object_type_name: ObjectTypeName, + added_or_replaced_fields: impl IntoIterator, + subtracted_fields: impl IntoIterator, +) -> Result> { + let augmented_object_type = match object_type { + Type::ExtendedJSON => Some(Type::ExtendedJSON), + Type::Object(type_name) => { + let Some(object_type) = object_constraint_to_type( + configuration, + solutions, + added_object_types, + object_type_constraints, + &type_name.clone().into(), + )? + else { + return Ok(None); + }; + let mut new_object_type = object_type.clone(); + for (field_name, field_type) in added_or_replaced_fields.into_iter() { + new_object_type.fields.insert( + field_name, + ObjectField { + r#type: field_type, + description: None, + }, + ); + } + for field_name in subtracted_fields { + new_object_type.fields.remove(field_name); + } + // We might end up back-tracking in which case this will register an object type that + // isn't referenced. BUT once solving is complete we should get here again with the + // same augmented_object_type_name, overwrite the old definition with an identical one, + // and then it will be referenced. + added_object_types.insert(augmented_object_type_name.clone(), new_object_type); + Some(Type::Object(augmented_object_type_name.to_string())) + } + Type::Nullable(t) => { + let underlying_type = with_field_overrides( + configuration, + solutions, + added_object_types, + object_type_constraints, + *t, + augmented_object_type_name, + added_or_replaced_fields, + subtracted_fields, + )?; + underlying_type.map(|t| Type::Nullable(Box::new(t))) + } + t => Err(Error::ExpectedObject { actual_type: t })?, + }; + Ok(augmented_object_type.map(Type::normalize_type)) +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + use configuration::schema::{ObjectField, ObjectType, Type}; + use mongodb_support::BsonScalarType; + use pretty_assertions::assert_eq; + use test_helpers::configuration::mflix_config; + + use crate::native_query::type_constraint::{ObjectTypeConstraint, TypeConstraint}; + + use super::constraint_to_type; + + #[test] + fn converts_object_type_constraint_to_object_type() -> Result<()> { + let configuration = mflix_config(); + let solutions = Default::default(); + let mut added_object_types = Default::default(); + + let input = TypeConstraint::Object("new_object_type".into()); + + let mut object_type_constraints = [( + "new_object_type".into(), + ObjectTypeConstraint { + fields: [("foo".into(), TypeConstraint::Scalar(BsonScalarType::Int))].into(), + }, + )] + .into(); + + let solved_type = constraint_to_type( + &configuration, + &solutions, + &mut added_object_types, + &mut object_type_constraints, + &input, + )?; + + assert_eq!(solved_type, Some(Type::Object("new_object_type".into()))); + assert_eq!( + added_object_types, + [( + "new_object_type".into(), + ObjectType { + fields: [( + "foo".into(), + ObjectField { + r#type: Type::Scalar(BsonScalarType::Int), + description: None, + } + )] + .into(), + description: None, + } + ),] + .into() + ); + + Ok(()) + } +} diff --git a/crates/cli/src/native_query/type_solver/mod.rs b/crates/cli/src/native_query/type_solver/mod.rs new file mode 100644 index 00000000..5c40a9cc --- /dev/null +++ b/crates/cli/src/native_query/type_solver/mod.rs @@ -0,0 +1,300 @@ +mod constraint_to_type; +mod simplify; + +use std::collections::{BTreeMap, BTreeSet, HashMap}; + +use configuration::{ + schema::{ObjectType, Type}, + Configuration, +}; +use itertools::Itertools; +use ndc_models::ObjectTypeName; +use simplify::simplify_constraints; + +use super::{ + error::{Error, Result}, + type_constraint::{ObjectTypeConstraint, TypeConstraint, TypeVariable}, +}; + +use self::constraint_to_type::constraint_to_type; + +pub fn unify( + configuration: &Configuration, + required_type_variables: &[TypeVariable], + object_type_constraints: &mut BTreeMap, + type_variables: HashMap>, +) -> Result<( + HashMap, + BTreeMap, +)> { + let mut added_object_types = BTreeMap::new(); + let mut solutions = HashMap::new(); + let mut substitutions = HashMap::new(); + fn is_solved(solutions: &HashMap, variable: TypeVariable) -> bool { + solutions.contains_key(&variable) + } + + #[cfg(test)] + { + println!("begin unify:"); + println!(" type_variables:"); + for (var, constraints) in type_variables.iter() { + println!( + " - {var}: {}", + constraints.iter().map(|c| format!("{c}")).join("; ") + ); + } + println!(" object_type_constraints:"); + for (name, ot) in object_type_constraints.iter() { + println!(" {name} ::",); + for (field_name, field_type) in ot.fields.iter() { + println!(" - {field_name}: {field_type}") + } + } + println!(); + } + + loop { + let prev_type_variables = type_variables.clone(); + let prev_solutions = solutions.clone(); + let prev_substitutions = substitutions.clone(); + + // TODO: check for mismatches, e.g. constraint list contains scalar & array ENG-1252 + + for (variable, constraints) in type_variables.iter() { + if is_solved(&solutions, *variable) { + continue; + } + + let simplified = simplify_constraints( + configuration, + &substitutions, + object_type_constraints, + Some(*variable), + constraints.iter().cloned(), + ) + .map_err(Error::Multiple)?; + #[cfg(test)] + if simplified != *constraints { + println!("simplified {variable}: {constraints:?} -> {simplified:?}"); + } + if simplified.len() == 1 { + let constraint = simplified.iter().next().unwrap(); + if let Some(solved_type) = constraint_to_type( + configuration, + &solutions, + &mut added_object_types, + object_type_constraints, + constraint, + )? { + #[cfg(test)] + println!("solved {variable}: {solved_type:?}"); + solutions.insert(*variable, solved_type.clone()); + substitutions.insert(*variable, [solved_type.into()].into()); + } + } + } + + #[cfg(test)] + println!("added_object_types: {added_object_types:?}\n"); + + let variables = type_variables_by_complexity(&type_variables); + if let Some(v) = variables.iter().find(|v| !substitutions.contains_key(*v)) { + // TODO: We should do some recursion to substitute variable references within + // substituted constraints to existing substitutions. + substitutions.insert(*v, type_variables[v].clone()); + } + + if required_type_variables + .iter() + .copied() + .all(|v| is_solved(&solutions, v)) + { + return Ok((solutions, added_object_types)); + } + + if type_variables == prev_type_variables + && solutions == prev_solutions + && substitutions == prev_substitutions + { + return Err(Error::FailedToUnify { + unsolved_variables: variables + .into_iter() + .filter(|v| !is_solved(&solutions, *v)) + .collect(), + }); + } + } +} + +/// List type variables ordered according to increasing complexity of their constraints. +fn type_variables_by_complexity( + type_variables: &HashMap>, +) -> Vec { + type_variables + .iter() + .sorted_unstable_by_key(|(_, constraints)| { + let complexity: usize = constraints.iter().map(TypeConstraint::complexity).sum(); + complexity + }) + .map(|(variable, _)| variable) + .copied() + .collect_vec() +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + use configuration::schema::{ObjectField, ObjectType, Type}; + use mongodb_support::BsonScalarType; + use nonempty::NonEmpty; + use pretty_assertions::assert_eq; + use test_helpers::configuration::mflix_config; + + use crate::native_query::type_constraint::{ + ObjectTypeConstraint, TypeConstraint, TypeVariable, Variance, + }; + + use super::unify; + + #[test] + fn solves_object_type() -> Result<()> { + let configuration = mflix_config(); + let type_variable = TypeVariable::new(0, Variance::Covariant); + let required_type_variables = [type_variable]; + let mut object_type_constraints = Default::default(); + + let type_variables = [( + type_variable, + [TypeConstraint::Object("movies".into())].into(), + )] + .into(); + + let (solved_variables, _) = unify( + &configuration, + &required_type_variables, + &mut object_type_constraints, + type_variables, + )?; + + assert_eq!( + solved_variables, + [(type_variable, Type::Object("movies".into()))].into() + ); + + Ok(()) + } + + #[test] + fn solves_added_object_type_based_on_object_type_constraint() -> Result<()> { + let configuration = mflix_config(); + let type_variable = TypeVariable::new(0, Variance::Covariant); + let required_type_variables = [type_variable]; + + let mut object_type_constraints = [( + "new_object_type".into(), + ObjectTypeConstraint { + fields: [("foo".into(), TypeConstraint::Scalar(BsonScalarType::Int))].into(), + }, + )] + .into(); + + let type_variables = [( + type_variable, + [TypeConstraint::Object("new_object_type".into())].into(), + )] + .into(); + + let (solved_variables, added_object_types) = unify( + &configuration, + &required_type_variables, + &mut object_type_constraints, + type_variables, + )?; + + assert_eq!( + solved_variables, + [(type_variable, Type::Object("new_object_type".into()))].into() + ); + assert_eq!( + added_object_types, + [( + "new_object_type".into(), + ObjectType { + fields: [( + "foo".into(), + ObjectField { + r#type: Type::Scalar(BsonScalarType::Int), + description: None + } + )] + .into(), + description: None + } + )] + .into(), + ); + + Ok(()) + } + + #[test] + fn produces_object_type_based_on_field_type_of_another_object_type() -> Result<()> { + let configuration = mflix_config(); + let var0 = TypeVariable::new(0, Variance::Covariant); + let var1 = TypeVariable::new(1, Variance::Covariant); + let required_type_variables = [var0, var1]; + + let mut object_type_constraints = [( + "movies_selection_stage0".into(), + ObjectTypeConstraint { + fields: [( + "selected_title".into(), + TypeConstraint::FieldOf { + target_type: Box::new(TypeConstraint::Variable(var0)), + path: NonEmpty::singleton("title".into()), + }, + )] + .into(), + }, + )] + .into(); + + let type_variables = [ + (var0, [TypeConstraint::Object("movies".into())].into()), + ( + var1, + [TypeConstraint::Object("movies_selection_stage0".into())].into(), + ), + ] + .into(); + + let (solved_variables, added_object_types) = unify( + &configuration, + &required_type_variables, + &mut object_type_constraints, + type_variables, + )?; + + assert_eq!( + solved_variables.get(&var1), + Some(&Type::Object("movies_selection_stage0".into())) + ); + assert_eq!( + added_object_types.get("movies_selection_stage0"), + Some(&ObjectType { + fields: [( + "selected_title".into(), + ObjectField { + r#type: Type::Scalar(BsonScalarType::String), + description: None + } + )] + .into(), + description: None + }) + ); + + Ok(()) + } +} diff --git a/crates/cli/src/native_query/type_solver/simplify.rs b/crates/cli/src/native_query/type_solver/simplify.rs new file mode 100644 index 00000000..dad0e829 --- /dev/null +++ b/crates/cli/src/native_query/type_solver/simplify.rs @@ -0,0 +1,731 @@ +use std::collections::{BTreeMap, BTreeSet, HashMap}; + +use configuration::Configuration; +use itertools::Itertools as _; +use mongodb_support::align::try_align; +use mongodb_support::BsonScalarType; +use ndc_models::{FieldName, ObjectTypeName}; +use nonempty::NonEmpty; + +use crate::native_query::helpers::get_object_field_type; +use crate::native_query::type_constraint::Variance; +use crate::native_query::{ + error::Error, + type_constraint::{ObjectTypeConstraint, TypeConstraint, TypeVariable}, +}; + +use TypeConstraint as C; + +struct SimplifyContext<'a> { + configuration: &'a Configuration, + substitutions: &'a HashMap>, + object_type_constraints: &'a mut BTreeMap, +} + +// Attempts to reduce the number of type constraints from the input by combining redundant +// constraints, merging constraints into more specific ones where possible, and applying +// accumulated variable substitutions. +pub fn simplify_constraints( + configuration: &Configuration, + substitutions: &HashMap>, + object_type_constraints: &mut BTreeMap, + variable: Option, + constraints: impl IntoIterator, +) -> Result, Vec> { + let mut context = SimplifyContext { + configuration, + substitutions, + object_type_constraints, + }; + let (constraints, errors) = simplify_constraints_internal(&mut context, variable, constraints); + if errors.is_empty() { + Ok(constraints) + } else { + Err(errors) + } +} + +fn simplify_constraints_internal( + state: &mut SimplifyContext, + variable: Option, + constraints: impl IntoIterator, +) -> (BTreeSet, Vec) { + let (constraint_sets, error_sets): (Vec>, Vec>) = constraints + .into_iter() + .map(|constraint| simplify_single_constraint(state, variable, constraint)) + .partition_result(); + let constraints = constraint_sets.into_iter().flatten(); + let mut errors: Vec = error_sets.into_iter().flatten().collect(); + + let constraints = constraints + .coalesce(|constraint_a, constraint_b| { + match simplify_constraint_pair( + state, + variable, + constraint_a.clone(), + constraint_b.clone(), + ) { + Ok(Some(t)) => Ok(t), + Ok(None) => Err((constraint_a, constraint_b)), + Err(errs) => { + errors.extend(errs); + Err((constraint_a, constraint_b)) + } + } + }) + .collect(); + + (constraints, errors) +} + +fn simplify_single_constraint( + context: &mut SimplifyContext, + variable: Option, + constraint: TypeConstraint, +) -> Result, Vec> { + let simplified = match constraint { + C::Variable(v) if Some(v) == variable => vec![], + + C::Variable(v) => match context.substitutions.get(&v) { + Some(constraints) => constraints.iter().cloned().collect(), + None => vec![C::Variable(v)], + }, + + C::FieldOf { target_type, path } => { + let object_type = simplify_single_constraint(context, variable, *target_type.clone())?; + if object_type.len() == 1 { + let object_type = object_type.into_iter().next().unwrap(); + match expand_field_of(context, object_type, path.clone()) { + Ok(Some(t)) => return Ok(t), + Ok(None) => (), + Err(e) => return Err(e), + } + } + vec![C::FieldOf { target_type, path }] + } + + C::Union(constraints) => { + let (simplified_constraints, _) = + simplify_constraints_internal(context, variable, constraints); + vec![C::Union(simplified_constraints)] + } + + C::OneOf(constraints) => { + let (simplified_constraints, _) = + simplify_constraints_internal(context, variable, constraints); + vec![C::OneOf(simplified_constraints)] + } + + _ => vec![constraint], + }; + Ok(simplified) +} + +// Attempt to unify two type constraints. There are three possible result shapes: +// +// - Ok(Some(t)) : successfully unified the two constraints into one +// - Ok(None) : could not unify, but that could be because there is insufficient information available +// - Err(errs) : it is not possible to unify the two constraints +// +fn simplify_constraint_pair( + context: &mut SimplifyContext, + variable: Option, + a: TypeConstraint, + b: TypeConstraint, +) -> Result, Vec> { + let variance = variable.map(|v| v.variance).unwrap_or(Variance::Invariant); + match (a, b) { + (a, b) if a == b => Ok(Some(a)), + + (C::Variable(a), C::Variable(b)) if a == b => Ok(Some(C::Variable(a))), + + (C::ExtendedJSON, _) | (_, C::ExtendedJSON) if variance == Variance::Covariant => { + Ok(Some(C::ExtendedJSON)) + } + (C::ExtendedJSON, b) if variance == Variance::Contravariant => Ok(Some(b)), + (a, C::ExtendedJSON) if variance == Variance::Contravariant => Ok(Some(a)), + + (C::Scalar(a), C::Scalar(b)) => match solve_scalar(variance, a, b) { + Ok(t) => Ok(Some(t)), + Err(e) => Err(vec![e]), + }, + + (C::Union(mut a), C::Union(mut b)) if variance == Variance::Covariant => { + a.append(&mut b); + // Ignore errors when simplifying because union branches are allowed to be strictly incompatible + let (constraints, _) = simplify_constraints_internal(context, variable, a); + Ok(Some(C::Union(constraints))) + } + + // TODO: Instead of a naive intersection we want to get a common subtype of both unions in + // the contravariant case, or get the intersection after solving all types in the invariant + // case. + (C::Union(a), C::Union(b)) => { + let intersection: BTreeSet<_> = a.intersection(&b).cloned().collect(); + if intersection.is_empty() { + Ok(None) + } else if intersection.len() == 1 { + Ok(Some(intersection.into_iter().next().unwrap())) + } else { + Ok(Some(C::Union(intersection))) + } + } + + (C::Union(mut a), b) if variance == Variance::Covariant => { + a.insert(b); + // Ignore errors when simplifying because union branches are allowed to be strictly incompatible + let (constraints, _) = simplify_constraints_internal(context, variable, a); + Ok(Some(C::Union(constraints))) + } + + (C::Union(a), b) if variance == Variance::Contravariant => { + let mut simplified = BTreeSet::new(); + let mut errors = vec![]; + + for union_branch in a { + match simplify_constraint_pair(context, variable, b.clone(), union_branch.clone()) { + Ok(Some(t)) => { + simplified.insert(t); + } + Ok(None) => return Ok(None), + Err(errs) => { + // ignore incompatible branches, but note errors + errors.extend(errs); + } + } + } + + if simplified.is_empty() { + return Err(errors); + } + + let (simplified, errors) = simplify_constraints_internal(context, variable, simplified); + + if simplified.is_empty() { + Err(errors) + } else if simplified.len() == 1 { + Ok(Some(simplified.into_iter().next().unwrap())) + } else { + Ok(Some(C::Union(simplified))) + } + } + + (a, b @ C::Union(_)) => simplify_constraint_pair(context, variable, b, a), + + (C::OneOf(mut a), C::OneOf(mut b)) => { + a.append(&mut b); + Ok(Some(C::OneOf(a))) + } + + (C::OneOf(constraints), b) => { + let matches: BTreeSet<_> = constraints + .clone() + .into_iter() + .filter_map( + |c| match simplify_constraint_pair(context, variable, c, b.clone()) { + Ok(c) => Some(c), + Err(_) => None, + }, + ) + .flatten() + .collect(); + + if matches.len() == 1 { + Ok(Some(matches.into_iter().next().unwrap())) + } else if matches.is_empty() { + Ok(None) + } else { + Ok(Some(C::OneOf(matches))) + } + } + (a, b @ C::OneOf(_)) => simplify_constraint_pair(context, variable, b, a), + + (C::Object(a), C::Object(b)) if a == b => Ok(Some(C::Object(a))), + (C::Object(a), C::Object(b)) => { + match merge_object_type_constraints(context, variable, &a, &b) { + Some(merged_name) => Ok(Some(C::Object(merged_name))), + None => Ok(None), + } + } + + ( + C::Predicate { + object_type_name: a, + }, + C::Predicate { + object_type_name: b, + }, + ) if a == b => Ok(Some(C::Predicate { + object_type_name: a, + })), + ( + C::Predicate { + object_type_name: a, + }, + C::Predicate { + object_type_name: b, + }, + ) if a == b => match merge_object_type_constraints(context, variable, &a, &b) { + Some(merged_name) => Ok(Some(C::Predicate { + object_type_name: merged_name, + })), + None => Ok(None), + }, + + (C::ArrayOf(a), C::ArrayOf(b)) => simplify_constraint_pair(context, variable, *a, *b) + .map(|r| r.map(|ab| C::ArrayOf(Box::new(ab)))), + + (_, _) => Ok(None), + } +} + +/// Reconciles two scalar type constraints depending on variance of the context. In a covariant +/// context the type of a type variable is determined to be the supertype of the two (if the types +/// overlap). In a covariant context the variable type is the subtype of the two instead. +fn solve_scalar( + variance: Variance, + a: BsonScalarType, + b: BsonScalarType, +) -> Result { + let solution = match variance { + Variance::Covariant => BsonScalarType::common_supertype(a, b) + .map(C::Scalar) + .or_else(|| Some(C::Union([C::Scalar(a), C::Scalar(b)].into()))), + Variance::Contravariant => { + if a == b || BsonScalarType::is_supertype(a, b) { + Some(C::Scalar(b)) + } else if BsonScalarType::is_supertype(b, a) { + Some(C::Scalar(a)) + } else { + None + } + } + Variance::Invariant => { + if a == b { + Some(C::Scalar(a)) + } else { + None + } + } + }; + match solution { + Some(t) => Ok(t), + None => Err(Error::TypeMismatch { + context: None, + a: C::Scalar(a), + b: C::Scalar(b), + }), + } +} + +fn merge_object_type_constraints( + context: &mut SimplifyContext, + variable: Option, + name_a: &ObjectTypeName, + name_b: &ObjectTypeName, +) -> Option { + // Pick from the two input names according to sort order to get a deterministic outcome. + let preferred_name = if name_a <= name_b { name_a } else { name_b }; + let merged_name = unique_type_name( + context.configuration, + context.object_type_constraints, + preferred_name, + ); + + let a = look_up_object_type_constraint(context, name_a); + let b = look_up_object_type_constraint(context, name_b); + + let merged_fields_result = try_align( + a.fields.clone().into_iter().collect(), + b.fields.clone().into_iter().collect(), + always_ok(TypeConstraint::make_nullable), + always_ok(TypeConstraint::make_nullable), + |field_a, field_b| unify_object_field(context, variable, field_a, field_b), + ); + + let fields = match merged_fields_result { + Ok(merged_fields) => merged_fields.into_iter().collect(), + Err(_) => { + return None; + } + }; + + let merged_object_type = ObjectTypeConstraint { fields }; + context + .object_type_constraints + .insert(merged_name.clone(), merged_object_type); + + Some(merged_name) +} + +fn unify_object_field( + context: &mut SimplifyContext, + variable: Option, + field_type_a: TypeConstraint, + field_type_b: TypeConstraint, +) -> Result> { + match simplify_constraint_pair(context, variable, field_type_a, field_type_b) { + Ok(Some(t)) => Ok(t), + Ok(None) => Err(vec![]), + Err(errs) => Err(errs), + } +} + +fn always_ok(mut f: F) -> impl FnMut(A) -> Result +where + F: FnMut(A) -> B, +{ + move |x| Ok(f(x)) +} + +fn look_up_object_type_constraint( + context: &SimplifyContext, + name: &ObjectTypeName, +) -> ObjectTypeConstraint { + if let Some(object_type) = context.configuration.object_types.get(name) { + object_type.clone().into() + } else if let Some(object_type) = context.object_type_constraints.get(name) { + object_type.clone() + } else { + unreachable!("look_up_object_type_constraint") + } +} + +fn unique_type_name( + configuration: &Configuration, + object_type_constraints: &mut BTreeMap, + desired_name: &ObjectTypeName, +) -> ObjectTypeName { + let mut counter = 0; + let mut type_name = desired_name.clone(); + while configuration.object_types.contains_key(&type_name) + || object_type_constraints.contains_key(&type_name) + { + counter += 1; + type_name = format!("{desired_name}_{counter}").into(); + } + type_name +} + +fn expand_field_of( + context: &mut SimplifyContext, + object_type: TypeConstraint, + path: NonEmpty, +) -> Result>, Vec> { + let field_type = match object_type { + C::ExtendedJSON => Some(vec![C::ExtendedJSON]), + C::Object(type_name) => get_object_constraint_field_type(context, &type_name, path)?, + C::Union(constraints) => { + let variants: BTreeSet = constraints + .into_iter() + .map(|t| { + let maybe_expanded = expand_field_of(context, t.clone(), path.clone())?; + + // TODO: if variant has more than one element that should be interpreted as an + // intersection, which we haven't implemented yet + Ok(match maybe_expanded { + Some(variant) if variant.len() <= 1 => variant, + _ => vec![t], + }) + }) + .flatten_ok() + .collect::>>()?; + Some(vec![(C::Union(variants))]) + } + C::OneOf(constraints) => { + // The difference between the Union and OneOf cases is that in OneOf we want to prune + // variants that don't expand, while in Union we want to preserve unexpanded variants. + let expanded_variants: BTreeSet = constraints + .into_iter() + .map(|t| { + let maybe_expanded = expand_field_of(context, t, path.clone())?; + + // TODO: if variant has more than one element that should be interpreted as an + // intersection, which we haven't implemented yet + Ok(match maybe_expanded { + Some(variant) if variant.len() <= 1 => variant, + _ => vec![], + }) + }) + .flatten_ok() + .collect::>>()?; + if expanded_variants.len() == 1 { + Some(vec![expanded_variants.into_iter().next().unwrap()]) + } else if !expanded_variants.is_empty() { + Some(vec![C::Union(expanded_variants)]) + } else { + Err(vec![Error::Other(format!( + "no variant matched object field path {path:?}" + ))])? + } + } + _ => None, + }; + Ok(field_type) +} + +fn get_object_constraint_field_type( + context: &mut SimplifyContext, + object_type_name: &ObjectTypeName, + path: NonEmpty, +) -> Result>, Vec> { + if let Some(object_type) = context.configuration.object_types.get(object_type_name) { + let t = get_object_field_type( + &context.configuration.object_types, + object_type_name, + object_type, + path, + ) + .map_err(|e| vec![e])?; + return Ok(Some(vec![t.clone().into()])); + } + + let Some(object_type_constraint) = context.object_type_constraints.get(object_type_name) else { + return Err(vec![Error::UnknownObjectType(object_type_name.to_string())]); + }; + + let field_name = path.head; + let rest = NonEmpty::from_vec(path.tail); + + let field_type = object_type_constraint + .fields + .get(&field_name) + .ok_or_else(|| { + vec![Error::ObjectMissingField { + object_type: object_type_name.clone(), + field_name: field_name.clone(), + }] + })? + .clone(); + + let field_type = simplify_single_constraint(context, None, field_type)?; + + match rest { + None => Ok(Some(field_type)), + Some(rest) if field_type.len() == 1 => match field_type.into_iter().next().unwrap() { + C::Object(type_name) => get_object_constraint_field_type(context, &type_name, rest), + _ => Err(vec![Error::ObjectMissingField { + object_type: object_type_name.clone(), + field_name: field_name.clone(), + }]), + }, + _ if field_type.is_empty() => Err(vec![Error::Other( + "could not resolve object field to a type".to_string(), + )]), + _ => Ok(None), // field_type len > 1 + } +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeSet; + + use googletest::prelude::*; + use mongodb_support::BsonScalarType; + use nonempty::NonEmpty; + use test_helpers::configuration::mflix_config; + + use crate::native_query::{ + error::Error, + type_constraint::{TypeConstraint, TypeVariable, Variance}, + }; + + #[googletest::test] + fn multiple_identical_scalar_constraints_resolve_one_constraint() { + expect_eq!( + super::solve_scalar( + Variance::Covariant, + BsonScalarType::String, + BsonScalarType::String, + ), + Ok(TypeConstraint::Scalar(BsonScalarType::String)) + ); + expect_eq!( + super::solve_scalar( + Variance::Contravariant, + BsonScalarType::String, + BsonScalarType::String, + ), + Ok(TypeConstraint::Scalar(BsonScalarType::String)) + ); + } + + #[googletest::test] + fn multiple_scalar_constraints_resolve_to_supertype_in_covariant_context() { + expect_eq!( + super::solve_scalar( + Variance::Covariant, + BsonScalarType::Int, + BsonScalarType::Double, + ), + Ok(TypeConstraint::Scalar(BsonScalarType::Double)) + ); + } + + #[googletest::test] + fn multiple_scalar_constraints_resolve_to_subtype_in_contravariant_context() { + expect_eq!( + super::solve_scalar( + Variance::Contravariant, + BsonScalarType::Int, + BsonScalarType::Double, + ), + Ok(TypeConstraint::Scalar(BsonScalarType::Int)) + ); + } + + #[googletest::test] + fn simplifies_field_of() -> Result<()> { + let config = mflix_config(); + let result = super::simplify_constraints( + &config, + &Default::default(), + &mut Default::default(), + Some(TypeVariable::new(1, Variance::Covariant)), + [TypeConstraint::FieldOf { + target_type: Box::new(TypeConstraint::Object("movies".into())), + path: NonEmpty::singleton("title".into()), + }], + ); + expect_that!( + result, + matches_pattern!(Ok(&BTreeSet::from_iter([TypeConstraint::Scalar( + BsonScalarType::String + )]))) + ); + Ok(()) + } + + #[googletest::test] + fn nullable_union_does_not_error_and_does_not_simplify() -> Result<()> { + let configuration = mflix_config(); + let result = super::simplify_constraints( + &configuration, + &Default::default(), + &mut Default::default(), + Some(TypeVariable::new(1, Variance::Contravariant)), + [TypeConstraint::Union( + [ + TypeConstraint::Scalar(BsonScalarType::Int), + TypeConstraint::Scalar(BsonScalarType::Null), + ] + .into(), + )], + ); + expect_that!( + result, + ok(eq(&BTreeSet::from([TypeConstraint::Union( + [ + TypeConstraint::Scalar(BsonScalarType::Int), + TypeConstraint::Scalar(BsonScalarType::Null), + ] + .into(), + )]))) + ); + Ok(()) + } + + #[googletest::test] + fn simplifies_from_nullable_to_non_nullable_in_contravariant_context() -> Result<()> { + let configuration = mflix_config(); + let result = super::simplify_constraints( + &configuration, + &Default::default(), + &mut Default::default(), + Some(TypeVariable::new(1, Variance::Contravariant)), + [ + TypeConstraint::Scalar(BsonScalarType::String), + TypeConstraint::Union( + [ + TypeConstraint::Scalar(BsonScalarType::String), + TypeConstraint::Scalar(BsonScalarType::Null), + ] + .into(), + ), + ], + ); + expect_that!( + result, + ok(eq(&BTreeSet::from([TypeConstraint::Scalar( + BsonScalarType::String + )]))) + ); + Ok(()) + } + + #[googletest::test] + fn emits_error_if_scalar_is_not_compatible_with_any_union_branch() -> Result<()> { + let configuration = mflix_config(); + let result = super::simplify_constraints( + &configuration, + &Default::default(), + &mut Default::default(), + Some(TypeVariable::new(1, Variance::Contravariant)), + [ + TypeConstraint::Scalar(BsonScalarType::Decimal), + TypeConstraint::Union( + [ + TypeConstraint::Scalar(BsonScalarType::String), + TypeConstraint::Scalar(BsonScalarType::Null), + ] + .into(), + ), + ], + ); + expect_that!( + result, + err(unordered_elements_are![ + eq(&Error::TypeMismatch { + context: None, + a: TypeConstraint::Scalar(BsonScalarType::Decimal), + b: TypeConstraint::Scalar(BsonScalarType::String), + }), + eq(&Error::TypeMismatch { + context: None, + a: TypeConstraint::Scalar(BsonScalarType::Decimal), + b: TypeConstraint::Scalar(BsonScalarType::Null), + }), + ]) + ); + Ok(()) + } + + // TODO: + // #[googletest::test] + // fn simplifies_two_compatible_unions_in_contravariant_context() -> Result<()> { + // let configuration = mflix_config(); + // let result = super::simplify_constraints( + // &configuration, + // &Default::default(), + // &mut Default::default(), + // Some(TypeVariable::new(1, Variance::Contravariant)), + // [ + // TypeConstraint::Union( + // [ + // TypeConstraint::Scalar(BsonScalarType::Double), + // TypeConstraint::Scalar(BsonScalarType::Null), + // ] + // .into(), + // ), + // TypeConstraint::Union( + // [ + // TypeConstraint::Scalar(BsonScalarType::Int), + // TypeConstraint::Scalar(BsonScalarType::Null), + // ] + // .into(), + // ), + // ], + // ); + // expect_that!( + // result, + // ok(eq(&BTreeSet::from([TypeConstraint::Union( + // [ + // TypeConstraint::Scalar(BsonScalarType::Int), + // TypeConstraint::Scalar(BsonScalarType::Null), + // ] + // .into(), + // )]))) + // ); + // Ok(()) + // } +} diff --git a/crates/cli/src/tests.rs b/crates/cli/src/tests.rs new file mode 100644 index 00000000..a18e80ab --- /dev/null +++ b/crates/cli/src/tests.rs @@ -0,0 +1,403 @@ +use std::path::Path; + +use async_tempfile::TempDir; +use configuration::{read_directory, Configuration}; +use googletest::prelude::*; +use itertools::Itertools as _; +use mongodb::{ + bson::{self, doc, from_document, Bson}, + options::AggregateOptions, +}; +use mongodb_agent_common::mongodb::{ + test_helpers::mock_stream, MockCollectionTrait, MockDatabaseTrait, +}; +use ndc_models::{CollectionName, FieldName, ObjectField, ObjectType, Type}; +use ndc_test_helpers::{array_of, named_type, nullable, object_type}; +use pretty_assertions::assert_eq; + +use crate::{update, Context, UpdateArgs}; + +#[tokio::test] +async fn required_field_from_validator_is_non_nullable() -> anyhow::Result<()> { + let collection_object_type = collection_schema_from_validator(doc! { + "bsonType": "object", + "required": ["title"], + "properties": { + "title": { "bsonType": "string", "maxLength": 100 }, + "author": { "bsonType": "string", "maxLength": 100 }, + } + }) + .await?; + + assert_eq!( + collection_object_type + .fields + .get(&FieldName::new("title".into())), + Some(&ObjectField { + r#type: Type::Named { + name: "String".into() + }, + arguments: Default::default(), + description: Default::default(), + }) + ); + + assert_eq!( + collection_object_type + .fields + .get(&FieldName::new("author".into())), + Some(&ObjectField { + r#type: Type::Nullable { + underlying_type: Box::new(Type::Named { + name: "String".into() + }) + }, + arguments: Default::default(), + description: Default::default(), + }) + ); + + Ok(()) +} + +#[tokio::test] +async fn validator_object_with_no_properties_becomes_extended_json_object() -> anyhow::Result<()> { + let collection_object_type = collection_schema_from_validator(doc! { + "bsonType": "object", + "title": "posts validator", + "additionalProperties": false, + "properties": { + "reactions": { "bsonType": "object" }, + } + }) + .await?; + + assert_eq!( + collection_object_type + .fields + .get(&FieldName::new("reactions".into())), + Some(&ObjectField { + r#type: Type::Nullable { + underlying_type: Box::new(Type::Named { + name: "ExtendedJSON".into() + }) + }, + arguments: Default::default(), + description: Default::default(), + }) + ); + + Ok(()) +} + +#[gtest] +#[tokio::test] +async fn adds_new_fields_on_re_introspection() -> anyhow::Result<()> { + let config_dir = TempDir::new().await?; + schema_from_sampling( + &config_dir, + vec![doc! { "title": "First post!", "author": "Alice" }], + ) + .await?; + + // re-introspect after database changes + let configuration = schema_from_sampling( + &config_dir, + vec![doc! { "title": "First post!", "author": "Alice", "body": "Hello, world!" }], + ) + .await?; + + let updated_type = configuration + .object_types + .get("posts") + .expect("got posts collection type"); + + expect_that!( + updated_type.fields, + unordered_elements_are![ + ( + displays_as(eq("title")), + field!(ObjectField.r#type, eq(&named_type("String"))) + ), + ( + displays_as(eq("author")), + field!(ObjectField.r#type, eq(&named_type("String"))) + ), + ( + displays_as(eq("body")), + field!(ObjectField.r#type, eq(&named_type("String"))) + ), + ] + ); + Ok(()) +} + +#[gtest] +#[tokio::test] +async fn changes_from_re_introspection_are_additive_only() -> anyhow::Result<()> { + let config_dir = TempDir::new().await?; + schema_from_sampling( + &config_dir, + vec![ + doc! { + "created_at": "2025-07-03T02:31Z", + "removed_field": true, + "author": "Alice", + "nested": { + "scalar_type_changed": 1, + "removed": 1, + "made_nullable": 1, + + }, + "nested_array": [{ + "scalar_type_changed": 1, + "removed": 1, + "made_nullable": 1, + + }], + "nested_nullable": { + "scalar_type_changed": 1, + "removed": 1, + "made_nullable": 1, + + } + }, + doc! { + "created_at": "2025-07-03T02:31Z", + "removed_field": true, + "author": "Alice", + "nested": { + "scalar_type_changed": 1, + "removed": 1, + "made_nullable": 1, + + }, + "nested_array": [{ + "scalar_type_changed": 1, + "removed": 1, + "made_nullable": 1, + + }], + "nested_nullable": null, + }, + ], + ) + .await?; + + // re-introspect after database changes + let configuration = schema_from_sampling( + &config_dir, + vec![ + doc! { + "created_at": Bson::DateTime(bson::DateTime::from_millis(1741372252881)), + "author": "Alice", + "nested": { + "scalar_type_changed": true, + "made_nullable": 1, + }, + "nested_array": [{ + "scalar_type_changed": true, + "made_nullable": 1, + + }], + "nested_nullable": { + "scalar_type_changed": true, + "made_nullable": 1, + + } + }, + doc! { + "created_at": Bson::DateTime(bson::DateTime::from_millis(1741372252881)), + "author": null, + "nested": { + "scalar_type_changed": true, + "made_nullable": null, + }, + "nested_array": [{ + "scalar_type_changed": true, + "made_nullable": null, + }], + "nested_nullable": null, + }, + ], + ) + .await?; + + let updated_type = configuration + .object_types + .get("posts") + .expect("got posts collection type"); + + expect_that!( + updated_type.fields, + unordered_elements_are![ + ( + displays_as(eq("created_at")), + field!(ObjectField.r#type, eq(&named_type("String"))) + ), + ( + displays_as(eq("removed_field")), + field!(ObjectField.r#type, eq(&named_type("Bool"))) + ), + ( + displays_as(eq("author")), + field!(ObjectField.r#type, eq(&named_type("String"))) + ), + ( + displays_as(eq("nested")), + field!(ObjectField.r#type, eq(&named_type("posts_nested"))) + ), + ( + displays_as(eq("nested_array")), + field!( + ObjectField.r#type, + eq(&array_of(named_type("posts_nested_array"))) + ) + ), + ( + displays_as(eq("nested_nullable")), + field!( + ObjectField.r#type, + eq(&nullable(named_type("posts_nested_nullable"))) + ) + ), + ] + ); + expect_that!( + configuration.object_types, + contains_each![ + ( + displays_as(eq("posts_nested")), + eq(&object_type([ + ("scalar_type_changed", named_type("Int")), + ("removed", named_type("Int")), + ("made_nullable", named_type("Int")), + ])) + ), + ( + displays_as(eq("posts_nested_array")), + eq(&object_type([ + ("scalar_type_changed", named_type("Int")), + ("removed", named_type("Int")), + ("made_nullable", named_type("Int")), + ])) + ), + ( + displays_as(eq("posts_nested_nullable")), + eq(&object_type([ + ("scalar_type_changed", named_type("Int")), + ("removed", named_type("Int")), + ("made_nullable", named_type("Int")), + ])) + ), + ] + ); + Ok(()) +} + +async fn collection_schema_from_validator(validator: bson::Document) -> anyhow::Result { + let mut db = MockDatabaseTrait::new(); + let config_dir = TempDir::new().await?; + + let context = Context { + path: config_dir.to_path_buf(), + connection_uri: None, + display_color: false, + }; + + let args = UpdateArgs { + sample_size: Some(100), + no_validator_schema: None, + all_schema_nullable: Some(false), + }; + + db.expect_list_collections().returning(move || { + let collection_spec = doc! { + "name": "posts", + "type": "collection", + "options": { + "validator": { + "$jsonSchema": &validator + } + }, + "info": { "readOnly": false }, + }; + Ok(mock_stream(vec![Ok( + from_document(collection_spec).unwrap() + )])) + }); + + db.expect_collection().returning(|_collection_name| { + let mut collection = MockCollectionTrait::new(); + collection + .expect_aggregate() + .returning(|_pipeline, _options: Option| Ok(mock_stream(vec![]))); + collection + }); + + update(&context, &args, &db).await?; + + let configuration = read_directory(config_dir).await?; + + let collection = configuration + .collections + .get(&CollectionName::new("posts".into())) + .expect("posts collection"); + let collection_object_type = configuration + .object_types + .get(&collection.collection_type) + .expect("posts object type"); + + Ok(collection_object_type.clone()) +} + +async fn schema_from_sampling( + config_dir: &Path, + sampled_documents: Vec, +) -> anyhow::Result { + let mut db = MockDatabaseTrait::new(); + + let context = Context { + path: config_dir.to_path_buf(), + connection_uri: None, + display_color: false, + }; + + let args = UpdateArgs { + sample_size: Some(100), + no_validator_schema: None, + all_schema_nullable: Some(false), + }; + + db.expect_list_collections().returning(move || { + let collection_spec = doc! { + "name": "posts", + "type": "collection", + "options": {}, + "info": { "readOnly": false }, + }; + Ok(mock_stream(vec![Ok( + from_document(collection_spec).unwrap() + )])) + }); + + db.expect_collection().returning(move |_collection_name| { + let mut collection = MockCollectionTrait::new(); + let sample_results = sampled_documents + .iter() + .cloned() + .map(Ok::<_, mongodb::error::Error>) + .collect_vec(); + collection.expect_aggregate().returning( + move |_pipeline, _options: Option| { + Ok(mock_stream(sample_results.clone())) + }, + ); + collection + }); + + update(&context, &args, &db).await?; + + let configuration = read_directory(config_dir).await?; + Ok(configuration) +} diff --git a/crates/configuration/Cargo.toml b/crates/configuration/Cargo.toml index 2e04c416..8c3aa88e 100644 --- a/crates/configuration/Cargo.toml +++ b/crates/configuration/Cargo.toml @@ -12,9 +12,15 @@ futures = "^0.3" itertools = { workspace = true } mongodb = { workspace = true } ndc-models = { workspace = true } +ref-cast = { workspace = true } schemars = { workspace = true } -serde = { version = "1", features = ["derive"] } -serde_json = { version = "1" } +serde = { workspace = true } +serde_json = { workspace = true } serde_yaml = "^0.9" tokio = "1" tokio-stream = { version = "^0.1", features = ["fs"] } +tracing = "0.1" + +[dev-dependencies] +async-tempfile = "^0.6.0" +googletest = "^0.12.0" diff --git a/crates/configuration/src/configuration.rs b/crates/configuration/src/configuration.rs index 5ac8131e..57291713 100644 --- a/crates/configuration/src/configuration.rs +++ b/crates/configuration/src/configuration.rs @@ -57,6 +57,14 @@ impl Configuration { native_queries: BTreeMap, options: ConfigurationOptions, ) -> anyhow::Result { + tracing::debug!( + schema = %serde_json::to_string(&schema).unwrap(), + ?native_mutations, + ?native_queries, + options = %serde_json::to_string(&options).unwrap(), + "parsing connector configuration" + ); + let object_types_iter = || merge_object_types(&schema, &native_mutations, &native_queries); let object_type_errors = { let duplicate_type_names: Vec<&ndc::TypeName> = object_types_iter() @@ -195,7 +203,7 @@ impl Configuration { } #[derive(Clone, Debug, Default, Deserialize, Serialize)] -#[serde(rename_all = "camelCase")] +#[serde(rename_all = "camelCase", deny_unknown_fields)] pub struct ConfigurationOptions { /// Options for introspection pub introspection_options: ConfigurationIntrospectionOptions, @@ -207,7 +215,7 @@ pub struct ConfigurationOptions { } #[derive(Copy, Clone, Debug, Deserialize, Serialize)] -#[serde(rename_all = "camelCase")] +#[serde(rename_all = "camelCase", deny_unknown_fields)] pub struct ConfigurationIntrospectionOptions { // For introspection how many documents should be sampled per collection. pub sample_size: u32, @@ -230,12 +238,32 @@ impl Default for ConfigurationIntrospectionOptions { } #[derive(Clone, Debug, Default, Deserialize, Serialize)] -#[serde(rename_all = "camelCase")] +#[serde(rename_all = "camelCase", deny_unknown_fields)] pub struct ConfigurationSerializationOptions { /// Extended JSON has two modes: canonical and relaxed. This option determines which mode is /// used for output. This setting has no effect on inputs (query arguments, etc.). #[serde(default)] pub extended_json_mode: ExtendedJsonMode, + + /// When sending response data the connector may encounter data in a field that does not match + /// the type declared for that field in the connector schema. This option specifies what the + /// connector should do in this situation. + #[serde(default)] + pub on_response_type_mismatch: OnResponseTypeMismatch, +} + +/// Options for connector behavior on encountering a type mismatch between query response data, and +/// declared types in schema. +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub enum OnResponseTypeMismatch { + /// On a type mismatch, send an error instead of response data. Fails the entire query. + #[default] + Fail, + + /// If any field in a response row contains data of an incorrect type, exclude that row from + /// the response. + SkipRow, } fn merge_object_types<'a>( @@ -268,8 +296,8 @@ fn collection_to_collection_info( collection_type: collection.r#type, description: collection.description, arguments: Default::default(), - foreign_keys: Default::default(), uniqueness_constraints: BTreeMap::from_iter(pk_constraint), + relational_mutations: None, } } @@ -290,8 +318,8 @@ fn native_query_to_collection_info( collection_type: native_query.result_document_type.clone(), description: native_query.description.clone(), arguments: arguments_to_ndc_arguments(native_query.arguments.clone()), - foreign_keys: Default::default(), uniqueness_constraints: BTreeMap::from_iter(pk_constraint), + relational_mutations: None, } } diff --git a/crates/configuration/src/directory.rs b/crates/configuration/src/directory.rs index d94dacd6..0bff4130 100644 --- a/crates/configuration/src/directory.rs +++ b/crates/configuration/src/directory.rs @@ -1,24 +1,27 @@ use anyhow::{anyhow, Context as _}; use futures::stream::TryStreamExt as _; use itertools::Itertools as _; +use ndc_models::{CollectionName, FunctionName}; use serde::{Deserialize, Serialize}; use std::{ - collections::{BTreeMap, HashSet}, - fs::Metadata, + collections::BTreeMap, path::{Path, PathBuf}, }; -use tokio::{fs, io::AsyncWriteExt}; +use tokio::fs; use tokio_stream::wrappers::ReadDirStream; use crate::{ - configuration::ConfigurationOptions, serialized::Schema, with_name::WithName, Configuration, + configuration::ConfigurationOptions, + schema::CollectionSchema, + serialized::{NativeQuery, Schema}, + with_name::WithName, + Configuration, }; pub const SCHEMA_DIRNAME: &str = "schema"; pub const NATIVE_MUTATIONS_DIRNAME: &str = "native_mutations"; pub const NATIVE_QUERIES_DIRNAME: &str = "native_queries"; pub const CONFIGURATION_OPTIONS_BASENAME: &str = "configuration"; -pub const CONFIGURATION_OPTIONS_METADATA: &str = ".configuration_metadata"; // Deprecated: Discussion came out that we standardize names and the decision // was to use `native_mutations`. We should leave this in for a few releases @@ -41,41 +44,86 @@ const YAML: FileFormat = FileFormat::Yaml; /// Read configuration from a directory pub async fn read_directory( configuration_dir: impl AsRef + Send, +) -> anyhow::Result { + read_directory_with_ignored_configs(configuration_dir, &[]).await +} + +/// Read configuration from a directory +pub async fn read_directory_with_ignored_configs( + configuration_dir: impl AsRef + Send, + ignored_configs: &[PathBuf], ) -> anyhow::Result { let dir = configuration_dir.as_ref(); - let schemas = read_subdir_configs::(&dir.join(SCHEMA_DIRNAME)) + let schemas = read_subdir_configs::(&dir.join(SCHEMA_DIRNAME), ignored_configs) .await? .unwrap_or_default(); let schema = schemas.into_values().fold(Schema::default(), Schema::merge); // Deprecated see message above at NATIVE_PROCEDURES_DIRNAME - let native_procedures = read_subdir_configs(&dir.join(NATIVE_PROCEDURES_DIRNAME)) - .await? - .unwrap_or_default(); + let native_procedures = + read_subdir_configs(&dir.join(NATIVE_PROCEDURES_DIRNAME), ignored_configs) + .await? + .unwrap_or_default(); // TODO: Once we fully remove `native_procedures` after a deprecation period we can remove `mut` - let mut native_mutations = read_subdir_configs(&dir.join(NATIVE_MUTATIONS_DIRNAME)) - .await? - .unwrap_or_default(); + let mut native_mutations = + read_subdir_configs(&dir.join(NATIVE_MUTATIONS_DIRNAME), ignored_configs) + .await? + .unwrap_or_default(); - let native_queries = read_subdir_configs(&dir.join(NATIVE_QUERIES_DIRNAME)) + let native_queries = read_native_query_directory(dir, ignored_configs) .await? - .unwrap_or_default(); + .into_iter() + .map(|(name, (config, _))| (name, config)) + .collect(); - let options = parse_configuration_options_file(dir).await; + let options = parse_configuration_options_file(dir).await?; native_mutations.extend(native_procedures.into_iter()); Configuration::validate(schema, native_mutations, native_queries, options) } +/// Read native queries only, and skip configuration processing +pub async fn read_native_query_directory( + configuration_dir: impl AsRef + Send, + ignored_configs: &[PathBuf], +) -> anyhow::Result> { + let dir = configuration_dir.as_ref(); + let native_queries = + read_subdir_configs_with_paths(&dir.join(NATIVE_QUERIES_DIRNAME), ignored_configs) + .await? + .unwrap_or_default(); + Ok(native_queries) +} + /// Parse all files in a directory with one of the allowed configuration extensions according to /// the given type argument. For example if `T` is `NativeMutation` this function assumes that all /// json and yaml files in the given directory should be parsed as native mutation configurations. /// /// Assumes that every configuration file has a `name` field. -async fn read_subdir_configs(subdir: &Path) -> anyhow::Result>> +async fn read_subdir_configs( + subdir: &Path, + ignored_configs: &[PathBuf], +) -> anyhow::Result>> +where + for<'a> T: Deserialize<'a>, + for<'a> N: Ord + ToString + Deserialize<'a>, +{ + let configs_with_paths = read_subdir_configs_with_paths(subdir, ignored_configs).await?; + let configs_without_paths = configs_with_paths.map(|cs| { + cs.into_iter() + .map(|(name, (config, _))| (name, config)) + .collect() + }); + Ok(configs_without_paths) +} + +async fn read_subdir_configs_with_paths( + subdir: &Path, + ignored_configs: &[PathBuf], +) -> anyhow::Result>> where for<'a> T: Deserialize<'a>, for<'a> N: Ord + ToString + Deserialize<'a>, @@ -85,8 +133,8 @@ where } let dir_stream = ReadDirStream::new(fs::read_dir(subdir).await?); - let configs: Vec> = dir_stream - .map_err(|err| err.into()) + let configs: Vec> = dir_stream + .map_err(anyhow::Error::from) .try_filter_map(|dir_entry| async move { // Permits regular files and symlinks, does not filter out symlinks to directories. let is_file = !(dir_entry.file_type().await?.is_dir()); @@ -97,6 +145,13 @@ where let path = dir_entry.path(); let extension = path.extension().and_then(|ext| ext.to_str()); + if ignored_configs + .iter() + .any(|ignored| path.ends_with(ignored)) + { + return Ok(None); + } + let format_option = extension .and_then(|ext| { CONFIGURATION_EXTENSIONS @@ -108,7 +163,11 @@ where Ok(format_option.map(|format| (path, format))) }) .and_then(|(path, format)| async move { - parse_config_file::>(path, format).await + let config = parse_config_file::>(&path, format).await?; + Ok(WithName { + name: config.name, + value: (config.value, path), + }) }) .try_collect() .await?; @@ -129,24 +188,34 @@ where } } -pub async fn parse_configuration_options_file(dir: &Path) -> ConfigurationOptions { - let json_filename = CONFIGURATION_OPTIONS_BASENAME.to_owned() + ".json"; - let json_config_file = parse_config_file(&dir.join(json_filename), JSON).await; - if let Ok(config_options) = json_config_file { - return config_options; +pub async fn parse_configuration_options_file(dir: &Path) -> anyhow::Result { + let json_filename = configuration_file_path(dir, JSON); + if fs::try_exists(&json_filename).await? { + return parse_config_file(json_filename, JSON).await; } - let yaml_filename = CONFIGURATION_OPTIONS_BASENAME.to_owned() + ".yaml"; - let yaml_config_file = parse_config_file(&dir.join(yaml_filename), YAML).await; - if let Ok(config_options) = yaml_config_file { - return config_options; + let yaml_filename = configuration_file_path(dir, YAML); + if fs::try_exists(&yaml_filename).await? { + return parse_config_file(yaml_filename, YAML).await; } + tracing::warn!( + "{CONFIGURATION_OPTIONS_BASENAME}.json not found, using default connector settings" + ); + // If a configuration file does not exist use defaults and write the file let defaults: ConfigurationOptions = Default::default(); let _ = write_file(dir, CONFIGURATION_OPTIONS_BASENAME, &defaults).await; - let _ = write_config_metadata_file(dir).await; - defaults + Ok(defaults) +} + +fn configuration_file_path(dir: &Path, format: FileFormat) -> PathBuf { + let mut file_path = dir.join(CONFIGURATION_OPTIONS_BASENAME); + match format { + FileFormat::Json => file_path.set_extension("json"), + FileFormat::Yaml => file_path.set_extension("yaml"), + }; + file_path } async fn parse_config_file(path: impl AsRef, format: FileFormat) -> anyhow::Result @@ -154,6 +223,12 @@ where for<'a> T: Deserialize<'a>, { let bytes = fs::read(path.as_ref()).await?; + tracing::debug!( + path = %path.as_ref().display(), + ?format, + content = %std::str::from_utf8(&bytes).unwrap_or(""), + "parse_config_file" + ); let value = match format { FileFormat::Json => serde_json::from_slice(&bytes) .with_context(|| format!("error parsing {:?}", path.as_ref()))?, @@ -217,56 +292,140 @@ where .with_context(|| format!("error writing {:?}", path)) } -pub async fn list_existing_schemas( +// Read schemas with a separate map entry for each configuration file. +pub async fn read_existing_schemas( configuration_dir: impl AsRef, -) -> anyhow::Result> { +) -> anyhow::Result> { let dir = configuration_dir.as_ref(); - // TODO: we don't really need to read and parse all the schema files here, just get their names. - let schemas = read_subdir_configs::<_, Schema>(&dir.join(SCHEMA_DIRNAME)) + let schemas = read_subdir_configs::(&dir.join(SCHEMA_DIRNAME), &[]) .await? .unwrap_or_default(); - Ok(schemas.into_keys().collect()) -} + // Get a single collection schema out of each file + let schemas = schemas + .into_iter() + .flat_map(|(name, schema)| { + let mut collections = schema.collections.into_iter().collect_vec(); + let (collection_name, collection) = collections.pop()?; + if !collections.is_empty() { + return Some(Err(anyhow!("found schemas for multiple collections in {SCHEMA_DIRNAME}/{name}.json - please limit schema configurations to one collection per file"))); + } + Some(Ok((collection_name, CollectionSchema { + collection, + object_types: schema.object_types, + }))) + }) + .collect::>>()?; -// Metadata file is just a dot filed used for the purposes of know if the user has updated their config to force refresh -// of the schema introspection. -async fn write_config_metadata_file(configuration_dir: impl AsRef) { - let dir = configuration_dir.as_ref(); - let file_result = fs::OpenOptions::new() - .write(true) - .truncate(true) - .create(true) - .open(dir.join(CONFIGURATION_OPTIONS_METADATA)) - .await; - - if let Ok(mut file) = file_result { - let _ = file.write_all(b"").await; - }; + Ok(schemas) } -pub async fn get_config_file_changed(dir: impl AsRef) -> anyhow::Result { - let path = dir.as_ref(); - let dot_metadata: Result = - fs::metadata(&path.join(CONFIGURATION_OPTIONS_METADATA)).await; - let json_metadata = - fs::metadata(&path.join(CONFIGURATION_OPTIONS_BASENAME.to_owned() + ".json")).await; - let yaml_metadata = - fs::metadata(&path.join(CONFIGURATION_OPTIONS_BASENAME.to_owned() + ".yaml")).await; - - let compare = |dot_date, config_date| async move { - if dot_date < config_date { - let _ = write_config_metadata_file(path).await; - Ok(true) - } else { - Ok(false) - } +#[cfg(test)] +mod tests { + use std::path::PathBuf; + + use async_tempfile::TempDir; + use googletest::prelude::*; + use mongodb_support::BsonScalarType; + use ndc_models::FunctionName; + use serde_json::json; + use tokio::fs; + + use crate::{ + native_query::NativeQuery, + read_directory_with_ignored_configs, + schema::{ObjectField, ObjectType, Type}, + serialized, WithName, NATIVE_QUERIES_DIRNAME, }; - match (dot_metadata, json_metadata, yaml_metadata) { - (Ok(dot), Ok(json), _) => compare(dot.modified()?, json.modified()?).await, - (Ok(dot), _, Ok(yaml)) => compare(dot.modified()?, yaml.modified()?).await, - _ => Ok(true), + use super::{read_directory, CONFIGURATION_OPTIONS_BASENAME}; + + #[googletest::test] + #[tokio::test] + async fn errors_on_typo_in_extended_json_mode_string() -> Result<()> { + let input = json!({ + "introspectionOptions": { + "sampleSize": 1_000, + "noValidatorSchema": true, + "allSchemaNullable": false, + }, + "serializationOptions": { + "extendedJsonMode": "no-such-mode", + }, + }); + + let config_dir = TempDir::new().await?; + let mut config_file = config_dir.join(CONFIGURATION_OPTIONS_BASENAME); + config_file.set_extension("json"); + fs::write(config_file, serde_json::to_vec(&input)?).await?; + + let actual = read_directory(config_dir).await; + + expect_that!( + actual, + err(predicate(|e: &anyhow::Error| e + .root_cause() + .to_string() + .contains("unknown variant `no-such-mode`"))) + ); + + Ok(()) + } + + #[googletest::test] + #[tokio::test] + async fn ignores_specified_config_files() -> anyhow::Result<()> { + let native_query = WithName { + name: "hello".to_string(), + value: serialized::NativeQuery { + representation: crate::native_query::NativeQueryRepresentation::Function, + input_collection: None, + arguments: Default::default(), + result_document_type: "Hello".into(), + object_types: [( + "Hello".into(), + ObjectType { + fields: [( + "__value".into(), + ObjectField { + r#type: Type::Scalar(BsonScalarType::String), + description: None, + }, + )] + .into(), + description: None, + }, + )] + .into(), + pipeline: [].into(), + description: None, + }, + }; + + let config_dir = TempDir::new().await?; + tokio::fs::create_dir(config_dir.join(NATIVE_QUERIES_DIRNAME)).await?; + let native_query_path = PathBuf::from(NATIVE_QUERIES_DIRNAME).join("hello.json"); + fs::write( + config_dir.join(&native_query_path), + serde_json::to_vec(&native_query)?, + ) + .await?; + + let parsed_config = read_directory(&config_dir).await?; + let parsed_config_ignoring_native_query = + read_directory_with_ignored_configs(config_dir, &[native_query_path]).await?; + + expect_that!( + parsed_config.native_queries, + unordered_elements_are!(eq(( + &FunctionName::from("hello"), + &NativeQuery::from_serialized(&Default::default(), native_query.value)? + ))), + ); + + expect_that!(parsed_config_ignoring_native_query.native_queries, empty()); + + Ok(()) } } diff --git a/crates/configuration/src/lib.rs b/crates/configuration/src/lib.rs index c9c2f971..2e229594 100644 --- a/crates/configuration/src/lib.rs +++ b/crates/configuration/src/lib.rs @@ -7,12 +7,20 @@ pub mod schema; pub mod serialized; mod with_name; -pub use crate::configuration::Configuration; -pub use crate::directory::get_config_file_changed; -pub use crate::directory::list_existing_schemas; +pub use crate::configuration::{ + Configuration, ConfigurationIntrospectionOptions, ConfigurationOptions, + ConfigurationSerializationOptions, OnResponseTypeMismatch, +}; pub use crate::directory::parse_configuration_options_file; -pub use crate::directory::read_directory; +pub use crate::directory::read_existing_schemas; pub use crate::directory::write_schema_directory; +pub use crate::directory::{ + read_directory, read_directory_with_ignored_configs, read_native_query_directory, +}; +pub use crate::directory::{ + CONFIGURATION_OPTIONS_BASENAME, NATIVE_MUTATIONS_DIRNAME, NATIVE_QUERIES_DIRNAME, + SCHEMA_DIRNAME, +}; pub use crate::mongo_scalar_type::MongoScalarType; pub use crate::serialized::Schema; pub use crate::with_name::{WithName, WithNameRef}; diff --git a/crates/configuration/src/mongo_scalar_type.rs b/crates/configuration/src/mongo_scalar_type.rs index 9641ce9f..38c3532f 100644 --- a/crates/configuration/src/mongo_scalar_type.rs +++ b/crates/configuration/src/mongo_scalar_type.rs @@ -1,7 +1,9 @@ +use std::fmt::Display; + use mongodb_support::{BsonScalarType, EXTENDED_JSON_TYPE_NAME}; use ndc_query_plan::QueryPlanError; -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, Hash, PartialEq, Eq)] pub enum MongoScalarType { /// One of the predefined BSON scalar types Bson(BsonScalarType), @@ -20,6 +22,12 @@ impl MongoScalarType { } } +impl From for MongoScalarType { + fn from(value: BsonScalarType) -> Self { + Self::Bson(value) + } +} + impl TryFrom<&ndc_models::ScalarTypeName> for MongoScalarType { type Error = QueryPlanError; @@ -34,3 +42,14 @@ impl TryFrom<&ndc_models::ScalarTypeName> for MongoScalarType { } } } + +impl Display for MongoScalarType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + MongoScalarType::ExtendedJSON => write!(f, "extendedJSON"), + MongoScalarType::Bson(bson_scalar_type) => { + write!(f, "{}", bson_scalar_type.bson_name()) + } + } + } +} diff --git a/crates/configuration/src/native_query.rs b/crates/configuration/src/native_query.rs index e8986bb6..9588e3f1 100644 --- a/crates/configuration/src/native_query.rs +++ b/crates/configuration/src/native_query.rs @@ -5,7 +5,7 @@ use ndc_models as ndc; use ndc_query_plan as plan; use plan::QueryPlanError; use schemars::JsonSchema; -use serde::Deserialize; +use serde::{Deserialize, Serialize}; use crate::serialized; @@ -15,7 +15,7 @@ use crate::serialized; /// Note: this type excludes `name` and `object_types` from the serialized type. Object types are /// intended to be merged into one big map so should not be accessed through values of this type. /// Native query values are stored in maps so names should be taken from map keys. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq)] pub struct NativeQuery { pub representation: NativeQueryRepresentation, pub input_collection: Option, @@ -39,9 +39,18 @@ impl NativeQuery { } } -#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Eq, Hash, JsonSchema)] +#[derive(Clone, Copy, Debug, Deserialize, Serialize, PartialEq, Eq, Hash, JsonSchema)] #[serde(rename_all = "camelCase")] pub enum NativeQueryRepresentation { Collection, Function, } + +impl NativeQueryRepresentation { + pub fn to_str(&self) -> &'static str { + match self { + NativeQueryRepresentation::Collection => "collection", + NativeQueryRepresentation::Function => "function", + } + } +} diff --git a/crates/configuration/src/schema/mod.rs b/crates/configuration/src/schema/mod.rs index 3476e75f..e3a4f821 100644 --- a/crates/configuration/src/schema/mod.rs +++ b/crates/configuration/src/schema/mod.rs @@ -1,11 +1,12 @@ -use std::collections::BTreeMap; +use std::{collections::BTreeMap, fmt::Display}; +use ref_cast::RefCast as _; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use mongodb_support::BsonScalarType; -use crate::{WithName, WithNameRef}; +use crate::{MongoScalarType, WithName, WithNameRef}; #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[serde(rename_all = "camelCase")] @@ -17,8 +18,15 @@ pub struct Collection { pub description: Option, } +/// Schema for a single collection, as opposed to [Schema] which can describe multiple collections. +#[derive(Clone, Debug)] +pub struct CollectionSchema { + pub collection: Collection, + pub object_types: BTreeMap, +} + /// The type of values that a column, field, or argument may take. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)] #[serde(rename_all = "camelCase")] pub enum Type { /// Any BSON value, represented as Extended JSON. @@ -100,6 +108,55 @@ impl From for ndc_models::Type { } } +impl From for Type { + fn from(t: ndc_models::Type) -> Self { + match t { + ndc_models::Type::Named { name } => { + let scalar_type_name = ndc_models::ScalarTypeName::ref_cast(&name); + match MongoScalarType::try_from(scalar_type_name) { + Ok(MongoScalarType::Bson(scalar_type)) => Type::Scalar(scalar_type), + Ok(MongoScalarType::ExtendedJSON) => Type::ExtendedJSON, + Err(_) => Type::Object(name.to_string()), + } + } + ndc_models::Type::Nullable { underlying_type } => { + Type::Nullable(Box::new(Self::from(*underlying_type))) + } + ndc_models::Type::Array { element_type } => { + Type::ArrayOf(Box::new(Self::from(*element_type))) + } + ndc_models::Type::Predicate { object_type_name } => { + Type::Predicate { object_type_name } + } + } + } +} + +impl Display for Type { + /// Display types using GraphQL-style syntax + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fn helper(t: &Type, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match t { + Type::ExtendedJSON => write!(f, "extendedJSON"), + Type::Scalar(s) => write!(f, "{}", s.bson_name()), + Type::Object(name) => write!(f, "{name}"), + Type::ArrayOf(t) => write!(f, "[{t}]"), + Type::Nullable(t) => write!(f, "{t}"), + Type::Predicate { object_type_name } => { + write!(f, "predicate<{object_type_name}>") + } + } + } + match self { + Type::Nullable(t) => helper(t, f), + t => { + helper(t, f)?; + write!(f, "!") + } + } + } +} + #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[serde(rename_all = "camelCase")] pub struct ObjectType { @@ -135,10 +192,26 @@ impl From for ndc_models::ObjectType { .into_iter() .map(|(name, field)| (name, field.into())) .collect(), + foreign_keys: Default::default(), + } + } +} + +impl From for ObjectType { + fn from(object_type: ndc_models::ObjectType) -> Self { + ObjectType { + description: object_type.description, + fields: object_type + .fields + .into_iter() + .map(|(name, field)| (name, field.into())) + .collect(), } } } +pub type ObjectTypes = BTreeMap; + /// Information about an object type field. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[serde(rename_all = "camelCase")] @@ -169,3 +242,12 @@ impl From for ndc_models::ObjectField { } } } + +impl From for ObjectField { + fn from(field: ndc_models::ObjectField) -> Self { + ObjectField { + description: field.description, + r#type: field.r#type.into(), + } + } +} diff --git a/crates/configuration/src/serialized/native_query.rs b/crates/configuration/src/serialized/native_query.rs index 11ff4b87..93352ad8 100644 --- a/crates/configuration/src/serialized/native_query.rs +++ b/crates/configuration/src/serialized/native_query.rs @@ -2,7 +2,7 @@ use std::collections::BTreeMap; use mongodb::bson; use schemars::JsonSchema; -use serde::Deserialize; +use serde::{Deserialize, Serialize}; use crate::{ native_query::NativeQueryRepresentation, @@ -11,7 +11,7 @@ use crate::{ /// Define an arbitrary MongoDB aggregation pipeline that can be referenced in your data graph. For /// details on aggregation pipelines see https://www.mongodb.com/docs/manual/core/aggregation-pipeline/ -#[derive(Clone, Debug, Deserialize, JsonSchema)] +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] #[serde(rename_all = "camelCase")] pub struct NativeQuery { /// Representation may be either "collection" or "function". If you choose "collection" then @@ -35,6 +35,7 @@ pub struct NativeQuery { /// Use `input_collection` when you want to start an aggregation pipeline off of the specified /// `input_collection` db..aggregate. + #[serde(default, skip_serializing_if = "Option::is_none")] pub input_collection: Option, /// Arguments to be supplied for each query invocation. These will be available to the given diff --git a/crates/configuration/src/with_name.rs b/crates/configuration/src/with_name.rs index 85afbfdd..2dd44ba1 100644 --- a/crates/configuration/src/with_name.rs +++ b/crates/configuration/src/with_name.rs @@ -56,7 +56,7 @@ pub struct WithNameRef<'a, N, T> { pub value: &'a T, } -impl<'a, N, T> WithNameRef<'a, N, T> { +impl WithNameRef<'_, N, T> { pub fn named<'b>(name: &'b N, value: &'b T) -> WithNameRef<'b, N, T> { WithNameRef { name, value } } diff --git a/crates/integration-tests/Cargo.toml b/crates/integration-tests/Cargo.toml index 598c39a3..8986e0a0 100644 --- a/crates/integration-tests/Cargo.toml +++ b/crates/integration-tests/Cargo.toml @@ -14,7 +14,7 @@ anyhow = "1" assert_json = "^0.1" insta = { version = "^1.38", features = ["yaml"] } reqwest = { version = "^0.12.4", features = ["json"] } -serde = { version = "1", features = ["derive"] } -serde_json = "1" +serde = { workspace = true } +serde_json = { workspace = true } tokio = { version = "^1.37.0", features = ["full"] } url = "^2.5.0" diff --git a/crates/integration-tests/src/connector.rs b/crates/integration-tests/src/connector.rs index 858b668c..3d90a8d0 100644 --- a/crates/integration-tests/src/connector.rs +++ b/crates/integration-tests/src/connector.rs @@ -3,7 +3,7 @@ use reqwest::Client; use serde::{Deserialize, Serialize}; use url::Url; -use crate::{get_connector_chinook_url, get_connector_url}; +use crate::{get_connector_chinook_url, get_connector_test_cases_url, get_connector_url}; #[derive(Clone, Debug, Serialize)] #[serde(transparent)] @@ -17,6 +17,7 @@ pub struct ConnectorQueryRequest { pub enum Connector { Chinook, SampleMflix, + TestCases, } impl Connector { @@ -24,6 +25,7 @@ impl Connector { match self { Connector::Chinook => get_connector_chinook_url(), Connector::SampleMflix => get_connector_url(), + Connector::TestCases => get_connector_test_cases_url(), } } } diff --git a/crates/integration-tests/src/lib.rs b/crates/integration-tests/src/lib.rs index ac51abe6..b11b74dc 100644 --- a/crates/integration-tests/src/lib.rs +++ b/crates/integration-tests/src/lib.rs @@ -21,6 +21,7 @@ pub use self::validators::*; const CONNECTOR_URL: &str = "CONNECTOR_URL"; const CONNECTOR_CHINOOK_URL: &str = "CONNECTOR_CHINOOK_URL"; +const CONNECTOR_TEST_CASES_URL: &str = "CONNECTOR_TEST_CASES_URL"; const ENGINE_GRAPHQL_URL: &str = "ENGINE_GRAPHQL_URL"; fn get_connector_url() -> anyhow::Result { @@ -35,6 +36,12 @@ fn get_connector_chinook_url() -> anyhow::Result { Ok(url) } +fn get_connector_test_cases_url() -> anyhow::Result { + let input = env::var(CONNECTOR_TEST_CASES_URL).map_err(|_| anyhow!("please set {CONNECTOR_TEST_CASES_URL} to the base URL of a running MongoDB connector instance"))?; + let url = Url::parse(&input)?; + Ok(url) +} + fn get_graphql_url() -> anyhow::Result { env::var(ENGINE_GRAPHQL_URL).map_err(|_| anyhow!("please set {ENGINE_GRAPHQL_URL} to the GraphQL endpoint of a running GraphQL Engine server")) } diff --git a/crates/integration-tests/src/tests/aggregation.rs b/crates/integration-tests/src/tests/aggregation.rs index 299f68cf..86d6a180 100644 --- a/crates/integration-tests/src/tests/aggregation.rs +++ b/crates/integration-tests/src/tests/aggregation.rs @@ -18,10 +18,10 @@ async fn runs_aggregation_over_top_level_fields() -> anyhow::Result<()> { ) { _count milliseconds { - _avg - _max - _min - _sum + avg + max + min + sum } unitPrice { _count @@ -37,3 +37,166 @@ async fn runs_aggregation_over_top_level_fields() -> anyhow::Result<()> { ); Ok(()) } + +#[tokio::test] +async fn aggregates_extended_json_representing_mixture_of_numeric_types() -> anyhow::Result<()> { + assert_yaml_snapshot!( + graphql_query( + r#" + query ($types: String!) { + extendedJsonTestDataAggregate( + filter_input: { where: { type: { _regex: $types } } } + ) { + value { + avg + _count + max + min + sum + _count_distinct + } + } + extendedJsonTestData(where: { type: { _regex: $types } }) { + type + value + } + } + "# + ) + .variables(json!({ "types": "decimal|double|int|long" })) + .run() + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn aggregates_mixture_of_numeric_and_null_values() -> anyhow::Result<()> { + assert_yaml_snapshot!( + graphql_query( + r#" + query ($types: String!) { + extendedJsonTestDataAggregate( + filter_input: { where: { type: { _regex: $types } } } + ) { + value { + avg + _count + max + min + sum + _count_distinct + } + } + extendedJsonTestData(where: { type: { _regex: $types } }) { + type + value + } + } + "# + ) + .variables(json!({ "types": "double|null" })) + .run() + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn returns_null_when_aggregating_empty_result_set() -> anyhow::Result<()> { + assert_yaml_snapshot!( + graphql_query( + r#" + query { + moviesAggregate(filter_input: {where: {title: {_eq: "no such movie"}}}) { + runtime { + avg + } + } + } + "# + ) + .run() + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn returns_zero_when_counting_empty_result_set() -> anyhow::Result<()> { + assert_yaml_snapshot!( + graphql_query( + r#" + query { + moviesAggregate(filter_input: {where: {title: {_eq: "no such movie"}}}) { + _count + title { + _count + } + } + } + "# + ) + .run() + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn returns_zero_when_counting_nested_fields_in_empty_result_set() -> anyhow::Result<()> { + assert_yaml_snapshot!( + graphql_query( + r#" + query { + moviesAggregate(filter_input: {where: {title: {_eq: "no such movie"}}}) { + awards { + nominations { + _count + } + } + } + } + "# + ) + .run() + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn aggregates_nested_field_values() -> anyhow::Result<()> { + assert_yaml_snapshot!( + graphql_query( + r#" + query { + moviesAggregate( + filter_input: {where: {title: {_in: ["Within Our Gates", "The Ace of Hearts"]}}} + ) { + tomatoes { + viewer { + rating { + avg + } + } + critic { + rating { + avg + } + } + } + imdb { + rating { + avg + } + } + } + } + "# + ) + .run() + .await? + ); + Ok(()) +} diff --git a/crates/integration-tests/src/tests/basic.rs b/crates/integration-tests/src/tests/basic.rs index eea422a0..41cb23ca 100644 --- a/crates/integration-tests/src/tests/basic.rs +++ b/crates/integration-tests/src/tests/basic.rs @@ -70,3 +70,47 @@ async fn selects_array_within_array() -> anyhow::Result<()> { ); Ok(()) } + +#[tokio::test] +async fn selects_field_names_that_require_escaping() -> anyhow::Result<()> { + assert_yaml_snapshot!( + graphql_query( + r#" + query { + weirdFieldNames(limit: 1, order_by: { invalidName: Asc }) { + invalidName + invalidObjectName { + validName + } + validObjectName { + invalidNestedName + } + } + } + "# + ) + .run() + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn selects_nested_field_with_dollar_sign_in_name() -> anyhow::Result<()> { + assert_yaml_snapshot!( + graphql_query( + r#" + query { + nestedFieldWithDollar(order_by: { configuration: Asc }) { + configuration { + schema + } + } + } + "# + ) + .run() + .await? + ); + Ok(()) +} diff --git a/crates/integration-tests/src/tests/expressions.rs b/crates/integration-tests/src/tests/expressions.rs new file mode 100644 index 00000000..584cbd69 --- /dev/null +++ b/crates/integration-tests/src/tests/expressions.rs @@ -0,0 +1,169 @@ +use insta::assert_yaml_snapshot; +use ndc_models::{ExistsInCollection, Expression}; +use ndc_test_helpers::{ + array, asc, binop, exists, field, object, query, query_request, relation_field, relationship, + target, value, +}; + +use crate::{connector::Connector, graphql_query, run_connector_query}; + +#[tokio::test] +async fn evaluates_field_name_that_requires_escaping() -> anyhow::Result<()> { + assert_yaml_snapshot!( + graphql_query( + r#" + query { + weirdFieldNames(where: { invalidName: { _eq: 3 } }) { + invalidName + } + } + "# + ) + .run() + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn evaluates_field_name_that_requires_escaping_in_complex_expression() -> anyhow::Result<()> { + assert_yaml_snapshot!( + graphql_query( + r#" + query { + weirdFieldNames( + where: { + _and: [ + { invalidName: { _gt: 2 } }, + { invalidName: { _lt: 4 } } + ] + } + ) { + invalidName + } + } + "# + ) + .run() + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn evaluates_exists_with_predicate() -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::Chinook, + query_request() + .collection("Artist") + .query( + query() + .predicate(exists( + ExistsInCollection::Related { + field_path: Default::default(), + relationship: "albums".into(), + arguments: Default::default(), + }, + binop("_iregex", target!("Title"), value!("Wild")) + )) + .fields([ + field!("_id"), + field!("Name"), + relation_field!("albums" => "albums", query().fields([ + field!("Title") + ]).order_by([asc!("Title")])) + ]), + ) + .relationships([( + "albums", + relationship("Album", [("ArtistId", &["ArtistId"])]) + )]) + ) + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn exists_with_predicate_with_escaped_field_name() -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::TestCases, + query_request().collection("weird_field_names").query( + query() + .predicate(exists( + ExistsInCollection::NestedCollection { + column_name: "$invalid.array".into(), + arguments: Default::default(), + field_path: Default::default(), + }, + binop("_lt", target!("$invalid.element"), value!(3)), + )) + .fields([ + field!("_id"), + field!("invalid_array" => "$invalid.array", array!(object!([ + field!("invalid_element" => "$invalid.element") + ]))) + ]) + .order_by([asc!("$invalid.name")]), + ) + ) + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn exists_in_nested_collection_without_predicate() -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::TestCases, + query_request().collection("nested_collection").query( + query() + .predicate(Expression::Exists { + in_collection: ExistsInCollection::NestedCollection { + column_name: "staff".into(), + arguments: Default::default(), + field_path: Default::default(), + }, + predicate: None, + }) + .fields([field!("_id"), field!("institution")]) + .order_by([asc!("institution")]), + ) + ) + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn exists_in_nested_collection_without_predicate_with_escaped_field_name( +) -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::TestCases, + query_request().collection("weird_field_names").query( + query() + .predicate(Expression::Exists { + in_collection: ExistsInCollection::NestedCollection { + column_name: "$invalid.array".into(), + arguments: Default::default(), + field_path: Default::default(), + }, + predicate: None, + }) + .fields([ + field!("_id"), + field!("invalid_array" => "$invalid.array", array!(object!([ + field!("invalid_element" => "$invalid.element") + ]))) + ]) + .order_by([asc!("$invalid.name")]), + ) + ) + .await? + ); + Ok(()) +} diff --git a/crates/integration-tests/src/tests/filtering.rs b/crates/integration-tests/src/tests/filtering.rs new file mode 100644 index 00000000..fb435af3 --- /dev/null +++ b/crates/integration-tests/src/tests/filtering.rs @@ -0,0 +1,141 @@ +use insta::assert_yaml_snapshot; +use ndc_test_helpers::{ + array_contains, binop, field, is_empty, query, query_request, target, value, variable, +}; + +use crate::{connector::Connector, graphql_query, run_connector_query}; + +#[tokio::test] +async fn filters_using_in_operator() -> anyhow::Result<()> { + assert_yaml_snapshot!( + graphql_query( + r#" + query { + movies( + where: { rated: { _in: ["G", "TV-G"] } } + order_by: { id: Asc } + limit: 5 + ) { + title + rated + } + } + "# + ) + .run() + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn filters_on_extended_json_using_string_comparison() -> anyhow::Result<()> { + assert_yaml_snapshot!( + graphql_query( + r#" + query Filtering { + extendedJsonTestData(where: { value: { _regex: "hello" } }) { + type + value + } + } + "# + ) + .run() + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn filters_by_comparisons_on_elements_of_array_field() -> anyhow::Result<()> { + assert_yaml_snapshot!( + graphql_query( + r#" + query { + nestedCollection( + where: { staff: { name: { _eq: "Freeman" } } } + order_by: { institution: Asc } + ) { + institution + } + } + "# + ) + .run() + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn filters_by_comparison_with_a_variable() -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::SampleMflix, + query_request() + .variables([[("title", "The Blue Bird")]]) + .collection("movies") + .query( + query() + .predicate(binop("_eq", target!("title"), variable!(title))) + .fields([field!("title")]), + ) + ) + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn filters_by_array_comparison_contains() -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::SampleMflix, + query_request().collection("movies").query( + query() + .predicate(array_contains(target!("cast"), value!("Albert Austin"))) + .fields([field!("title"), field!("cast")]), + ) + ) + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn filters_by_array_comparison_is_empty() -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::SampleMflix, + query_request().collection("movies").query( + query() + .predicate(is_empty(target!("writers"))) + .fields([field!("writers")]) + .limit(1), + ) + ) + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn filters_by_uuid() -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::TestCases, + query_request().collection("uuids").query( + query() + .predicate(binop( + "_eq", + target!("uuid"), + value!("40a693d0-c00a-425d-af5c-535e37fdfe9c") + )) + .fields([field!("name"), field!("uuid"), field!("uuid_as_string")]), + ) + ) + .await? + ); + Ok(()) +} diff --git a/crates/integration-tests/src/tests/grouping.rs b/crates/integration-tests/src/tests/grouping.rs new file mode 100644 index 00000000..135faa19 --- /dev/null +++ b/crates/integration-tests/src/tests/grouping.rs @@ -0,0 +1,162 @@ +use insta::assert_yaml_snapshot; +use ndc_test_helpers::{ + and, asc, binop, column_aggregate, column_count_aggregate, dimension_column, field, grouping, or, ordered_dimensions, query, query_request, star_count_aggregate, target, value +}; + +use crate::{connector::Connector, run_connector_query}; + +#[tokio::test] +async fn runs_single_column_aggregate_on_groups() -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::SampleMflix, + query_request().collection("movies").query( + query() + // The predicate avoids an error when encountering documents where `year` is + // a string instead of a number. + .predicate(or([ + binop("_gt", target!("year"), value!(0)), + binop("_lte", target!("year"), value!(0)), + ])) + .order_by([asc!("_id")]) + .limit(10) + .groups( + grouping() + .dimensions([dimension_column("year")]) + .aggregates([ + ( + "average_viewer_rating", + column_aggregate("tomatoes.viewer.rating", "avg"), + ), + ("max_runtime", column_aggregate("runtime", "max")), + ]) + .order_by(ordered_dimensions()), + ), + ), + ) + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn counts_column_values_in_groups() -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::SampleMflix, + query_request().collection("movies").query( + query() + .predicate(and([ + binop("_gt", target!("year"), value!(1920)), + binop("_lte", target!("year"), value!(1923)), + ])) + .groups( + grouping() + .dimensions([dimension_column("rated")]) + .aggregates([ + // The distinct count should be 3 or less because we filtered to only 3 years + column_count_aggregate!("year_distinct_count" => "year", distinct: true), + column_count_aggregate!("year_count" => "year", distinct: false), + star_count_aggregate!("count"), + ]) + .order_by(ordered_dimensions()), + ), + ), + ) + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn groups_by_multiple_dimensions() -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::SampleMflix, + query_request().collection("movies").query( + query() + .predicate(binop("_lt", target!("year"), value!(1950))) + .order_by([asc!("_id")]) + .limit(10) + .groups( + grouping() + .dimensions([ + dimension_column("year"), + dimension_column("languages"), + dimension_column("rated"), + ]) + .aggregates([( + "average_viewer_rating", + column_aggregate("tomatoes.viewer.rating", "avg"), + )]) + .order_by(ordered_dimensions()), + ), + ), + ) + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn combines_aggregates_and_groups_in_one_query() -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::SampleMflix, + query_request().collection("movies").query( + query() + .predicate(binop("_gte", target!("year"), value!(2000))) + .order_by([asc!("_id")]) + .limit(10) + .aggregates([( + "average_viewer_rating", + column_aggregate("tomatoes.viewer.rating", "avg") + )]) + .groups( + grouping() + .dimensions([dimension_column("year"),]) + .aggregates([( + "average_viewer_rating_by_year", + column_aggregate("tomatoes.viewer.rating", "avg"), + )]) + .order_by(ordered_dimensions()), + ), + ), + ) + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn combines_fields_and_groups_in_one_query() -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::SampleMflix, + query_request().collection("movies").query( + query() + // The predicate avoids an error when encountering documents where `year` is + // a string instead of a number. + .predicate(or([ + binop("_gt", target!("year"), value!(0)), + binop("_lte", target!("year"), value!(0)), + ])) + .order_by([asc!("_id")]) + .limit(3) + .fields([field!("title"), field!("year")]) + .order_by([asc!("_id")]) + .groups( + grouping() + .dimensions([dimension_column("year")]) + .aggregates([( + "average_viewer_rating_by_year", + column_aggregate("tomatoes.viewer.rating", "avg"), + )]) + .order_by(ordered_dimensions()), + ) + ), + ) + .await? + ); + Ok(()) +} diff --git a/crates/integration-tests/src/tests/local_relationship.rs b/crates/integration-tests/src/tests/local_relationship.rs index d254c0a2..2031028b 100644 --- a/crates/integration-tests/src/tests/local_relationship.rs +++ b/crates/integration-tests/src/tests/local_relationship.rs @@ -1,5 +1,11 @@ -use crate::graphql_query; +use crate::{connector::Connector, graphql_query, run_connector_query}; use insta::assert_yaml_snapshot; +use ndc_test_helpers::{ + asc, binop, column, column_aggregate, column_count_aggregate, dimension_column, exists, field, + grouping, is_in, ordered_dimensions, query, query_request, related, relation_field, + relationship, star_count_aggregate, target, value, +}; +use serde_json::json; #[tokio::test] async fn joins_local_relationships() -> anyhow::Result<()> { @@ -182,3 +188,231 @@ async fn queries_through_relationship_with_null_value() -> anyhow::Result<()> { ); Ok(()) } + +#[tokio::test] +async fn joins_on_field_names_that_require_escaping() -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::TestCases, + query_request() + .collection("weird_field_names") + .query( + query() + .fields([ + field!("invalid_name" => "$invalid.name"), + relation_field!("join" => "join", query().fields([ + field!("invalid_name" => "$invalid.name") + ])) + ]) + .order_by([asc!("_id")]) + ) + .relationships([( + "join", + relationship("weird_field_names", [("$invalid.name", &["$invalid.name"])]) + )]) + ) + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn joins_relationships_on_nested_key() -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::TestCases, + query_request() + .collection("departments") + .query( + query() + .predicate(exists( + related!("schools_departments"), + binop("_eq", target!("name"), value!("West Valley")) + )) + .fields([ + relation_field!("departments" => "schools_departments", query().fields([ + field!("name") + ])) + ]) + .order_by([asc!("_id")]) + ) + .relationships([( + "schools_departments", + relationship("schools", [("_id", &["departments", "math_department_id"])]) + )]) + ) + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn aggregates_over_related_collection() -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::Chinook, + query_request() + .collection("Album") + .query( + query() + // avoid albums that are modified in mutation tests + .predicate(is_in( + target!("AlbumId"), + [json!(15), json!(91), json!(227)] + )) + .fields([relation_field!("tracks" => "tracks", query().aggregates([ + star_count_aggregate!("count"), + ("average_price", column_aggregate("UnitPrice", "avg").into()), + ]))]) + .order_by([asc!("_id")]) + ) + .relationships([("tracks", relationship("Track", [("AlbumId", &["AlbumId"])]))]) + ) + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn aggregates_over_empty_subset_of_related_collection() -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::Chinook, + query_request() + .collection("Album") + .query( + query() + // avoid albums that are modified in mutation tests + .predicate(is_in( + target!("AlbumId"), + [json!(15), json!(91), json!(227)] + )) + .fields([relation_field!("tracks" => "tracks", query() + .predicate(binop("_eq", target!("Name"), value!("non-existent name"))) + .aggregates([ + star_count_aggregate!("count"), + column_count_aggregate!("composer_count" => "Composer", distinct: true), + ("average_price", column_aggregate("UnitPrice", "avg").into()), + ]))]) + .order_by([asc!("_id")]) + ) + .relationships([("tracks", relationship("Track", [("AlbumId", &["AlbumId"])]))]) + ) + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn groups_by_related_field() -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::Chinook, + query_request() + .collection("Track") + .query( + query() + // avoid albums that are modified in mutation tests + .predicate(is_in( + target!("AlbumId"), + [json!(15), json!(91), json!(227)] + )) + .groups( + grouping() + .dimensions([dimension_column( + column("Name").from_relationship("track_genre") + )]) + .aggregates([( + "average_price", + column_aggregate("UnitPrice", "avg") + )]) + .order_by(ordered_dimensions()) + ) + ) + .relationships([( + "track_genre", + relationship("Genre", [("GenreId", &["GenreId"])]).object_type() + )]) + ) + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn gets_groups_through_relationship() -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::Chinook, + query_request() + .collection("Album") + .query( + query() + // avoid albums that are modified in mutation tests + .predicate(is_in(target!("AlbumId"), [json!(15), json!(91), json!(227)])) + .order_by([asc!("_id")]) + .fields([field!("AlbumId"), relation_field!("tracks" => "album_tracks", query() + .groups(grouping() + .dimensions([dimension_column(column("Name").from_relationship("track_genre"))]) + .aggregates([ + ("AlbumId", column_aggregate("AlbumId", "avg")), + ("average_price", column_aggregate("UnitPrice", "avg")), + ]) + .order_by(ordered_dimensions()), + ) + )]) + ) + .relationships([ + ( + "album_tracks", + relationship("Track", [("AlbumId", &["AlbumId"])]) + ), + ( + "track_genre", + relationship("Genre", [("GenreId", &["GenreId"])]).object_type() + ) + ]) + ) + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn gets_fields_and_groups_through_relationship() -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::Chinook, + query_request() + .collection("Album") + .query( + query() + .predicate(is_in(target!("AlbumId"), [json!(15), json!(91), json!(227)])) + .order_by([asc!("_id")]) + .fields([field!("AlbumId"), relation_field!("tracks" => "album_tracks", query() + .order_by([asc!("_id")]) + .fields([field!("AlbumId"), field!("Name"), field!("UnitPrice")]) + .groups(grouping() + .dimensions([dimension_column(column("Name").from_relationship("track_genre"))]) + .aggregates([( + "average_price", column_aggregate("UnitPrice", "avg") + )]) + .order_by(ordered_dimensions()), + ) + )]) + ) + .relationships([ + ( + "album_tracks", + relationship("Track", [("AlbumId", &["AlbumId"])]) + ), + ( + "track_genre", + relationship("Genre", [("GenreId", &["GenreId"])]).object_type() + ) + ]) + ) + .await? + ); + Ok(()) +} diff --git a/crates/integration-tests/src/tests/mod.rs b/crates/integration-tests/src/tests/mod.rs index 0b687af9..6533de72 100644 --- a/crates/integration-tests/src/tests/mod.rs +++ b/crates/integration-tests/src/tests/mod.rs @@ -9,8 +9,13 @@ mod aggregation; mod basic; +mod expressions; +mod filtering; +mod grouping; mod local_relationship; mod native_mutation; mod native_query; +mod nested_collection; mod permissions; mod remote_relationship; +mod sorting; diff --git a/crates/integration-tests/src/tests/native_mutation.rs b/crates/integration-tests/src/tests/native_mutation.rs index 2dea14ac..b5a0c58e 100644 --- a/crates/integration-tests/src/tests/native_mutation.rs +++ b/crates/integration-tests/src/tests/native_mutation.rs @@ -66,7 +66,7 @@ async fn accepts_predicate_argument() -> anyhow::Result<()> { let mutation_resp = graphql_query( r#" mutation($albumId: Int!) { - chinook_updateTrackPrices(newPrice: "11.99", where: {albumId: {_eq: $albumId}}) { + updateTrackPrices(newPrice: "11.99", where: {albumId: {_eq: $albumId}}) { n ok } @@ -79,7 +79,7 @@ async fn accepts_predicate_argument() -> anyhow::Result<()> { assert_eq!(mutation_resp.errors, None); assert_json!(mutation_resp.data, { - "chinook_updateTrackPrices": { + "updateTrackPrices": { "ok": 1.0, "n": validators::i64(|n| if n > &0 { Ok(()) diff --git a/crates/integration-tests/src/tests/native_query.rs b/crates/integration-tests/src/tests/native_query.rs index aa9ec513..6865b5fe 100644 --- a/crates/integration-tests/src/tests/native_query.rs +++ b/crates/integration-tests/src/tests/native_query.rs @@ -4,17 +4,6 @@ use ndc_test_helpers::{asc, binop, field, query, query_request, target, variable #[tokio::test] async fn runs_native_query_with_function_representation() -> anyhow::Result<()> { - // Skip this test in MongoDB 5 because the example fails there. We're getting an error: - // - // > Kind: Command failed: Error code 5491300 (Location5491300): $documents' is not allowed in user requests, labels: {} - // - // This doesn't affect native queries that don't use the $documents stage. - if let Ok(image) = std::env::var("MONGODB_IMAGE") { - if image == "mongo:5" { - return Ok(()); - } - } - assert_yaml_snapshot!( graphql_query( r#" @@ -35,13 +24,13 @@ async fn runs_native_query_with_collection_representation() -> anyhow::Result<() graphql_query( r#" query { - title_word_frequencies( + titleWordFrequency( where: {count: {_eq: 2}} - order_by: {word: Asc} + order_by: {id: Asc} offset: 100 limit: 25 ) { - word + id count } } @@ -55,17 +44,6 @@ async fn runs_native_query_with_collection_representation() -> anyhow::Result<() #[tokio::test] async fn runs_native_query_with_variable_sets() -> anyhow::Result<()> { - // Skip this test in MongoDB 5 because the example fails there. We're getting an error: - // - // > Kind: Command failed: Error code 5491300 (Location5491300): $documents' is not allowed in user requests, labels: {} - // - // This means that remote joins are not working in MongoDB 5 - if let Ok(image) = std::env::var("MONGODB_IMAGE") { - if image == "mongo:5" { - return Ok(()); - } - } - assert_yaml_snapshot!( run_connector_query( Connector::SampleMflix, diff --git a/crates/integration-tests/src/tests/nested_collection.rs b/crates/integration-tests/src/tests/nested_collection.rs new file mode 100644 index 00000000..eee65140 --- /dev/null +++ b/crates/integration-tests/src/tests/nested_collection.rs @@ -0,0 +1,28 @@ +use crate::{connector::Connector, run_connector_query}; +use insta::assert_yaml_snapshot; +use ndc_test_helpers::{ + array, asc, binop, exists, exists_in_nested, field, object, query, query_request, target, value, +}; + +#[tokio::test] +async fn exists_in_nested_collection() -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::TestCases, + query_request().collection("nested_collection").query( + query() + .predicate(exists( + exists_in_nested("staff"), + binop("_eq", target!("name"), value!("Alyx")) + )) + .fields([ + field!("institution"), + field!("staff" => "staff", array!(object!([field!("name")]))), + ]) + .order_by([asc!("_id")]) + ) + ) + .await? + ); + Ok(()) +} diff --git a/crates/integration-tests/src/tests/remote_relationship.rs b/crates/integration-tests/src/tests/remote_relationship.rs index fa1202c9..20837657 100644 --- a/crates/integration-tests/src/tests/remote_relationship.rs +++ b/crates/integration-tests/src/tests/remote_relationship.rs @@ -1,21 +1,13 @@ use crate::{connector::Connector, graphql_query, run_connector_query}; use insta::assert_yaml_snapshot; -use ndc_test_helpers::{and, asc, binop, field, query, query_request, target, variable}; +use ndc_test_helpers::{ + and, asc, binop, column_aggregate, column_count_aggregate, dimension_column, field, grouping, + ordered_dimensions, query, query_request, star_count_aggregate, target, value, variable, +}; use serde_json::json; #[tokio::test] async fn provides_source_and_target_for_remote_relationship() -> anyhow::Result<()> { - // Skip this test in MongoDB 5 because the example fails there. We're getting an error: - // - // > Kind: Command failed: Error code 5491300 (Location5491300): $documents' is not allowed in user requests, labels: {} - // - // This means that remote joins are not working in MongoDB 5 - if let Ok(image) = std::env::var("MONGODB_IMAGE") { - if image == "mongo:5" { - return Ok(()); - } - } - assert_yaml_snapshot!( graphql_query( r#" @@ -40,17 +32,6 @@ async fn provides_source_and_target_for_remote_relationship() -> anyhow::Result< #[tokio::test] async fn handles_request_with_single_variable_set() -> anyhow::Result<()> { - // Skip this test in MongoDB 5 because the example fails there. We're getting an error: - // - // > Kind: Command failed: Error code 5491300 (Location5491300): $documents' is not allowed in user requests, labels: {} - // - // This means that remote joins are not working in MongoDB 5 - if let Ok(image) = std::env::var("MONGODB_IMAGE") { - if image == "mongo:5" { - return Ok(()); - } - } - assert_yaml_snapshot!( run_connector_query( Connector::SampleMflix, @@ -70,17 +51,6 @@ async fn handles_request_with_single_variable_set() -> anyhow::Result<()> { #[tokio::test] async fn variable_used_in_multiple_type_contexts() -> anyhow::Result<()> { - // Skip this test in MongoDB 5 because the example fails there. We're getting an error: - // - // > Kind: Command failed: Error code 5491300 (Location5491300): $documents' is not allowed in user requests, labels: {} - // - // This means that remote joins are not working in MongoDB 5 - if let Ok(image) = std::env::var("MONGODB_IMAGE") { - if image == "mongo:5" { - return Ok(()); - } - } - assert_yaml_snapshot!( run_connector_query( Connector::SampleMflix, @@ -107,3 +77,116 @@ async fn variable_used_in_multiple_type_contexts() -> anyhow::Result<()> { ); Ok(()) } + +#[tokio::test] +async fn aggregates_request_with_variable_sets() -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::SampleMflix, + query_request() + .collection("movies") + .variables([[("year", json!(2014))]]) + .query( + query() + .predicate(binop("_eq", target!("year"), variable!(year))) + .aggregates([ + ( + "average_viewer_rating", + column_aggregate("tomatoes.viewer.rating", "avg").into(), + ), + column_count_aggregate!("rated_count" => "rated", distinct: true), + star_count_aggregate!("count"), + ]) + ), + ) + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn aggregates_request_with_variable_sets_over_empty_collection_subset() -> anyhow::Result<()> +{ + assert_yaml_snapshot!( + run_connector_query( + Connector::SampleMflix, + query_request() + .collection("movies") + .variables([[("year", json!(2014))]]) + .query( + query() + .predicate(and([ + binop("_eq", target!("year"), variable!(year)), + binop("_eq", target!("title"), value!("non-existent title")), + ])) + .aggregates([ + ( + "average_viewer_rating", + column_aggregate("tomatoes.viewer.rating", "avg").into(), + ), + column_count_aggregate!("rated_count" => "rated", distinct: true), + star_count_aggregate!("count"), + ]) + ), + ) + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn provides_groups_for_variable_set() -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::SampleMflix, + query_request() + .collection("movies") + .variables([[("year", json!(2014))]]) + .query( + query() + .predicate(binop("_eq", target!("year"), variable!(year))) + .groups( + grouping() + .dimensions([dimension_column("rated")]) + .aggregates([( + "average_viewer_rating", + column_aggregate("tomatoes.viewer.rating", "avg"), + ),]) + .order_by(ordered_dimensions()), + ), + ), + ) + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn provides_fields_combined_with_groups_for_variable_set() -> anyhow::Result<()> { + assert_yaml_snapshot!( + run_connector_query( + Connector::SampleMflix, + query_request() + .collection("movies") + .variables([[("year", json!(2014))]]) + .query( + query() + .predicate(binop("_eq", target!("year"), variable!(year))) + .fields([field!("title"), field!("rated")]) + .order_by([asc!("_id")]) + .groups( + grouping() + .dimensions([dimension_column("rated")]) + .aggregates([( + "average_viewer_rating", + column_aggregate("tomatoes.viewer.rating", "avg"), + ),]) + .order_by(ordered_dimensions()), + ) + .limit(3), + ), + ) + .await? + ); + Ok(()) +} diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__aggregation__aggregates_extended_json_representing_mixture_of_numeric_types.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__aggregation__aggregates_extended_json_representing_mixture_of_numeric_types.snap new file mode 100644 index 00000000..bcaa082a --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__aggregation__aggregates_extended_json_representing_mixture_of_numeric_types.snap @@ -0,0 +1,43 @@ +--- +source: crates/integration-tests/src/tests/aggregation.rs +expression: "graphql_query(r#\"\n query ($types: String!) {\n extendedJsonTestDataAggregate(\n filter_input: { where: { type: { _regex: $types } } }\n ) {\n value {\n avg\n _count\n max\n min\n sum\n _count_distinct\n }\n }\n extendedJsonTestData(where: { type: { _regex: $types } }) {\n type\n value\n }\n }\n \"#).variables(json!({\n \"types\": \"decimal|double|int|long\"\n})).run().await?" +--- +data: + extendedJsonTestDataAggregate: + value: + avg: + $numberDouble: "4.5" + _count: 8 + max: + $numberLong: "8" + min: + $numberDecimal: "1" + sum: + $numberDouble: "36.0" + _count_distinct: 8 + extendedJsonTestData: + - type: decimal + value: + $numberDecimal: "1" + - type: decimal + value: + $numberDecimal: "2" + - type: double + value: + $numberDouble: "3.0" + - type: double + value: + $numberDouble: "4.0" + - type: int + value: + $numberInt: "5" + - type: int + value: + $numberInt: "6" + - type: long + value: + $numberLong: "7" + - type: long + value: + $numberLong: "8" +errors: ~ diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__aggregation__aggregates_mixture_of_numeric_and_null_values.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__aggregation__aggregates_mixture_of_numeric_and_null_values.snap new file mode 100644 index 00000000..e54279e9 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__aggregation__aggregates_mixture_of_numeric_and_null_values.snap @@ -0,0 +1,27 @@ +--- +source: crates/integration-tests/src/tests/aggregation.rs +expression: "graphql_query(r#\"\n query ($types: String!) {\n extendedJsonTestDataAggregate(\n filter_input: { where: { type: { _regex: $types } } }\n ) {\n value {\n avg\n _count\n max\n min\n sum\n _count_distinct\n }\n }\n extendedJsonTestData(where: { type: { _regex: $types } }) {\n type\n value\n }\n }\n \"#).variables(json!({\n \"types\": \"double|null\"\n})).run().await?" +--- +data: + extendedJsonTestDataAggregate: + value: + avg: + $numberDouble: "3.5" + _count: 2 + max: + $numberDouble: "4.0" + min: + $numberDouble: "3.0" + sum: + $numberDouble: "7.0" + _count_distinct: 2 + extendedJsonTestData: + - type: double + value: + $numberDouble: "3.0" + - type: double + value: + $numberDouble: "4.0" + - type: "null" + value: ~ +errors: ~ diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__aggregation__aggregates_nested_field_values.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__aggregation__aggregates_nested_field_values.snap new file mode 100644 index 00000000..51304f6d --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__aggregation__aggregates_nested_field_values.snap @@ -0,0 +1,17 @@ +--- +source: crates/integration-tests/src/tests/aggregation.rs +expression: "graphql_query(r#\"\n query {\n moviesAggregate(\n filter_input: {where: {title: {_in: [\"Within Our Gates\", \"The Ace of Hearts\"]}}}\n ) {\n tomatoes {\n viewer {\n rating {\n avg\n }\n }\n critic {\n rating {\n avg\n }\n }\n }\n imdb {\n rating {\n avg\n }\n }\n }\n }\n \"#).run().await?" +--- +data: + moviesAggregate: + tomatoes: + viewer: + rating: + avg: 3.45 + critic: + rating: + avg: ~ + imdb: + rating: + avg: 6.65 +errors: ~ diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__aggregation__returns_null_when_aggregating_empty_result_set.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__aggregation__returns_null_when_aggregating_empty_result_set.snap new file mode 100644 index 00000000..00ed6601 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__aggregation__returns_null_when_aggregating_empty_result_set.snap @@ -0,0 +1,9 @@ +--- +source: crates/integration-tests/src/tests/aggregation.rs +expression: "graphql_query(r#\"\n query {\n moviesAggregate(filter_input: {where: {title: {_eq: \"no such movie\"}}}) {\n runtime {\n avg\n }\n }\n }\n \"#).run().await?" +--- +data: + moviesAggregate: + runtime: + avg: ~ +errors: ~ diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__aggregation__returns_zero_when_counting_empty_result_set.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__aggregation__returns_zero_when_counting_empty_result_set.snap new file mode 100644 index 00000000..f436ce34 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__aggregation__returns_zero_when_counting_empty_result_set.snap @@ -0,0 +1,10 @@ +--- +source: crates/integration-tests/src/tests/aggregation.rs +expression: "graphql_query(r#\"\n query {\n moviesAggregate(filter_input: {where: {title: {_eq: \"no such movie\"}}}) {\n _count\n title {\n _count\n }\n }\n }\n \"#).run().await?" +--- +data: + moviesAggregate: + _count: 0 + title: + _count: 0 +errors: ~ diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__aggregation__returns_zero_when_counting_nested_fields_in_empty_result_set.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__aggregation__returns_zero_when_counting_nested_fields_in_empty_result_set.snap new file mode 100644 index 00000000..f7d33a3c --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__aggregation__returns_zero_when_counting_nested_fields_in_empty_result_set.snap @@ -0,0 +1,10 @@ +--- +source: crates/integration-tests/src/tests/aggregation.rs +expression: "graphql_query(r#\"\n query {\n moviesAggregate(filter_input: {where: {title: {_eq: \"no such movie\"}}}) {\n awards {\n nominations {\n _count\n }\n }\n }\n }\n \"#).run().await?" +--- +data: + moviesAggregate: + awards: + nominations: + _count: 0 +errors: ~ diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__aggregation__runs_aggregation_over_top_level_fields.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__aggregation__runs_aggregation_over_top_level_fields.snap index 609c9931..3fb73855 100644 --- a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__aggregation__runs_aggregation_over_top_level_fields.snap +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__aggregation__runs_aggregation_over_top_level_fields.snap @@ -1,6 +1,6 @@ --- source: crates/integration-tests/src/tests/aggregation.rs -expression: "graphql_query(r#\"\n query($albumId: Int!) {\n track(order_by: { id: Asc }, where: { albumId: { _eq: $albumId } }) {\n milliseconds\n unitPrice\n }\n trackAggregate(\n filter_input: { order_by: { id: Asc }, where: { albumId: { _eq: $albumId } } }\n ) {\n _count\n milliseconds {\n _avg\n _max\n _min\n _sum\n }\n unitPrice {\n _count\n _count_distinct\n }\n }\n }\n \"#).variables(json!({\n \"albumId\": 9\n })).run().await?" +expression: "graphql_query(r#\"\n query($albumId: Int!) {\n track(order_by: { id: Asc }, where: { albumId: { _eq: $albumId } }) {\n milliseconds\n unitPrice\n }\n trackAggregate(\n filter_input: { order_by: { id: Asc }, where: { albumId: { _eq: $albumId } } }\n ) {\n _count\n milliseconds {\n avg\n max\n min\n sum\n }\n unitPrice {\n _count\n _count_distinct\n }\n }\n }\n \"#).variables(json!({\n \"albumId\": 9\n})).run().await?" --- data: track: @@ -23,10 +23,10 @@ data: trackAggregate: _count: 8 milliseconds: - _avg: 333925.875 - _max: 436453 - _min: 221701 - _sum: 2671407 + avg: 333925.875 + max: 436453 + min: 221701 + sum: "2671407" unitPrice: _count: 8 _count_distinct: 1 diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__basic__selects_field_names_that_require_escaping.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__basic__selects_field_names_that_require_escaping.snap new file mode 100644 index 00000000..cb341577 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__basic__selects_field_names_that_require_escaping.snap @@ -0,0 +1,12 @@ +--- +source: crates/integration-tests/src/tests/basic.rs +expression: "graphql_query(r#\"\n query {\n weirdFieldNames(limit: 1, order_by: { invalidName: Asc }) {\n invalidName\n invalidObjectName {\n validName\n }\n validObjectName {\n invalidNestedName\n }\n }\n }\n \"#).run().await?" +--- +data: + weirdFieldNames: + - invalidName: 1 + invalidObjectName: + validName: 1 + validObjectName: + invalidNestedName: 1 +errors: ~ diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__basic__selects_nested_field_with_dollar_sign_in_name.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__basic__selects_nested_field_with_dollar_sign_in_name.snap new file mode 100644 index 00000000..656a6dc3 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__basic__selects_nested_field_with_dollar_sign_in_name.snap @@ -0,0 +1,13 @@ +--- +source: crates/integration-tests/src/tests/basic.rs +expression: "graphql_query(r#\"\n query {\n nestedFieldWithDollar(order_by: { configuration: Asc }) {\n configuration {\n schema\n }\n }\n }\n \"#).run().await?" +--- +data: + nestedFieldWithDollar: + - configuration: + schema: ~ + - configuration: + schema: schema1 + - configuration: + schema: schema3 +errors: ~ diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__expressions__evaluates_exists_with_predicate.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__expressions__evaluates_exists_with_predicate.snap new file mode 100644 index 00000000..4d928827 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__expressions__evaluates_exists_with_predicate.snap @@ -0,0 +1,11 @@ +--- +source: crates/integration-tests/src/tests/expressions.rs +expression: "run_connector_query(Connector::Chinook,\n query_request().collection(\"Artist\").query(query().predicate(exists(ExistsInCollection::Related {\n relationship: \"albums\".into(),\n arguments: Default::default(),\n },\n binop(\"_iregex\", target!(\"Title\"),\n value!(\"Wild\")))).fields([field!(\"_id\"), field!(\"Name\"),\n relation_field!(\"albums\" => \"albums\",\n query().fields([field!(\"Title\")]))])).relationships([(\"albums\",\n relationship(\"Album\", [(\"ArtistId\", \"ArtistId\")]))])).await?" +--- +- rows: + - Name: Accept + _id: 66134cc163c113a2dc1364ad + albums: + rows: + - Title: Balls to the Wall + - Title: Restless and Wild diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__expressions__evaluates_field_name_that_requires_escaping.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__expressions__evaluates_field_name_that_requires_escaping.snap new file mode 100644 index 00000000..fc9f6e18 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__expressions__evaluates_field_name_that_requires_escaping.snap @@ -0,0 +1,8 @@ +--- +source: crates/integration-tests/src/tests/expressions.rs +expression: "graphql_query(r#\"\n query {\n weirdFieldNames(where: { invalidName: { _eq: 3 } }) {\n invalidName\n }\n }\n \"#).run().await?" +--- +data: + weirdFieldNames: + - invalidName: 3 +errors: ~ diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__expressions__evaluates_field_name_that_requires_escaping_in_complex_expression.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__expressions__evaluates_field_name_that_requires_escaping_in_complex_expression.snap new file mode 100644 index 00000000..db551750 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__expressions__evaluates_field_name_that_requires_escaping_in_complex_expression.snap @@ -0,0 +1,8 @@ +--- +source: crates/integration-tests/src/tests/expressions.rs +expression: "graphql_query(r#\"\n query {\n weirdFieldNames(\n where: { \n _and: [\n { invalidName: { _gt: 2 } },\n { invalidName: { _lt: 4 } } \n ] \n }\n ) {\n invalidName\n }\n }\n \"#).run().await?" +--- +data: + weirdFieldNames: + - invalidName: 3 +errors: ~ diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__expressions__exists_in_nested_collection_without_predicate.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__expressions__exists_in_nested_collection_without_predicate.snap new file mode 100644 index 00000000..bb6e8460 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__expressions__exists_in_nested_collection_without_predicate.snap @@ -0,0 +1,11 @@ +--- +source: crates/integration-tests/src/tests/expressions.rs +expression: "run_connector_query(Connector::TestCases,\n query_request().collection(\"nested_collection\").query(query().predicate(Expression::Exists {\n in_collection: ExistsInCollection::NestedCollection {\n column_name: \"staff\".into(),\n arguments: Default::default(),\n field_path: Default::default(),\n },\n predicate: None,\n }).fields([field!(\"_id\"),\n field!(\"institution\")]).order_by([asc!(\"institution\")]))).await?" +--- +- rows: + - _id: 6705a1cec2df58ace3e67807 + institution: Aperture Science + - _id: 6705a1c2c2df58ace3e67806 + institution: Black Mesa + - _id: 6705a1d7c2df58ace3e67808 + institution: City 17 diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__expressions__exists_in_nested_collection_without_predicate_with_escaped_field_name.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__expressions__exists_in_nested_collection_without_predicate_with_escaped_field_name.snap new file mode 100644 index 00000000..02a0ab0e --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__expressions__exists_in_nested_collection_without_predicate_with_escaped_field_name.snap @@ -0,0 +1,17 @@ +--- +source: crates/integration-tests/src/tests/expressions.rs +expression: "run_connector_query(Connector::TestCases,\n query_request().collection(\"weird_field_names\").query(query().predicate(Expression::Exists {\n in_collection: ExistsInCollection::NestedCollection {\n column_name: \"$invalid.array\".into(),\n arguments: Default::default(),\n field_path: Default::default(),\n },\n predicate: None,\n }).fields([field!(\"_id\"),\n field!(\"invalid_array\" => \"$invalid.array\",\n array!(object!([field!(\"invalid_element\" =>\n \"$invalid.element\")])))]).order_by([asc!(\"$invalid.name\")]))).await?" +--- +- rows: + - _id: 66cf91a0ec1dfb55954378bd + invalid_array: + - invalid_element: 1 + - _id: 66cf9230ec1dfb55954378be + invalid_array: + - invalid_element: 2 + - _id: 66cf9274ec1dfb55954378bf + invalid_array: + - invalid_element: 3 + - _id: 66cf9295ec1dfb55954378c0 + invalid_array: + - invalid_element: 4 diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__expressions__exists_with_predicate_with_escaped_field_name.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__expressions__exists_with_predicate_with_escaped_field_name.snap new file mode 100644 index 00000000..60507475 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__expressions__exists_with_predicate_with_escaped_field_name.snap @@ -0,0 +1,11 @@ +--- +source: crates/integration-tests/src/tests/expressions.rs +expression: "run_connector_query(Connector::TestCases,\n query_request().collection(\"weird_field_names\").query(query().predicate(exists(ExistsInCollection::NestedCollection {\n column_name: \"$invalid.array\".into(),\n arguments: Default::default(),\n field_path: Default::default(),\n },\n binop(\"_lt\", target!(\"$invalid.element\"),\n value!(3)))).fields([field!(\"_id\"),\n field!(\"invalid_array\" => \"$invalid.array\",\n array!(object!([field!(\"invalid_element\" =>\n \"$invalid.element\")])))]).order_by([asc!(\"$invalid.name\")]))).await?" +--- +- rows: + - _id: 66cf91a0ec1dfb55954378bd + invalid_array: + - invalid_element: 1 + - _id: 66cf9230ec1dfb55954378be + invalid_array: + - invalid_element: 2 diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_by_array_comparison_contains.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_by_array_comparison_contains.snap new file mode 100644 index 00000000..43711a77 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_by_array_comparison_contains.snap @@ -0,0 +1,11 @@ +--- +source: crates/integration-tests/src/tests/filtering.rs +expression: "run_connector_query(Connector::SampleMflix,\nquery_request().collection(\"movies\").query(query().predicate(array_contains(target!(\"cast\"),\nvalue!(\"Albert Austin\"))).fields([field!(\"title\"), field!(\"cast\")]),)).await?" +--- +- rows: + - cast: + - Charles Chaplin + - Edna Purviance + - Eric Campbell + - Albert Austin + title: The Immigrant diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_by_array_comparison_is_empty.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_by_array_comparison_is_empty.snap new file mode 100644 index 00000000..5285af75 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_by_array_comparison_is_empty.snap @@ -0,0 +1,6 @@ +--- +source: crates/integration-tests/src/tests/filtering.rs +expression: "run_connector_query(Connector::SampleMflix,\nquery_request().collection(\"movies\").query(query().predicate(is_empty(target!(\"writers\"))).fields([field!(\"writers\")]).limit(1),)).await?" +--- +- rows: + - writers: [] diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_by_comparison_with_a_variable.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_by_comparison_with_a_variable.snap new file mode 100644 index 00000000..d2b39ddc --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_by_comparison_with_a_variable.snap @@ -0,0 +1,6 @@ +--- +source: crates/integration-tests/src/tests/filtering.rs +expression: "run_connector_query(Connector::SampleMflix,\nquery_request().variables([[(\"title\",\n\"The Blue Bird\")]]).collection(\"movies\").query(query().predicate(binop(\"_eq\",\ntarget!(\"title\"), variable!(title))).fields([field!(\"title\")]),)).await?" +--- +- rows: + - title: The Blue Bird diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_by_comparisons_on_elements_of_array_field.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_by_comparisons_on_elements_of_array_field.snap new file mode 100644 index 00000000..32120675 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_by_comparisons_on_elements_of_array_field.snap @@ -0,0 +1,9 @@ +--- +source: crates/integration-tests/src/tests/filtering.rs +expression: "graphql_query(r#\"\n query {\n nestedCollection(\n where: { staff: { name: { _eq: \"Freeman\" } } }\n order_by: { institution: Asc }\n ) {\n institution\n }\n }\n \"#).run().await?" +--- +data: + nestedCollection: + - institution: Black Mesa + - institution: City 17 +errors: ~ diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_by_comparisons_on_elements_of_array_of_scalars.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_by_comparisons_on_elements_of_array_of_scalars.snap new file mode 100644 index 00000000..faf3986e --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_by_comparisons_on_elements_of_array_of_scalars.snap @@ -0,0 +1,13 @@ +--- +source: crates/integration-tests/src/tests/filtering.rs +expression: "graphql_query(r#\"\n query MyQuery {\n movies(where: { cast: { _eq: \"Albert Austin\" } }) {\n title\n cast\n }\n }\n \"#).run().await?" +--- +data: + movies: + - title: The Immigrant + cast: + - Charles Chaplin + - Edna Purviance + - Eric Campbell + - Albert Austin +errors: ~ diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_by_uuid.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_by_uuid.snap new file mode 100644 index 00000000..80fd4607 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_by_uuid.snap @@ -0,0 +1,8 @@ +--- +source: crates/integration-tests/src/tests/filtering.rs +expression: "run_connector_query(Connector::TestCases,\nquery_request().collection(\"uuids\").query(query().predicate(binop(\"_eq\",\ntarget!(\"uuid\"),\nvalue!(\"40a693d0-c00a-425d-af5c-535e37fdfe9c\"))).fields([field!(\"name\"),\nfield!(\"uuid\"), field!(\"uuid_as_string\")]),)).await?" +--- +- rows: + - name: peristeria elata + uuid: 40a693d0-c00a-425d-af5c-535e37fdfe9c + uuid_as_string: 40a693d0-c00a-425d-af5c-535e37fdfe9c diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_on_extended_json_using_string_comparison.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_on_extended_json_using_string_comparison.snap new file mode 100644 index 00000000..88d6fa6a --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_on_extended_json_using_string_comparison.snap @@ -0,0 +1,9 @@ +--- +source: crates/integration-tests/src/tests/filtering.rs +expression: "graphql_query(r#\"\n query Filtering {\n extendedJsonTestData(where: { value: { _regex: \"hello\" } }) {\n type\n value\n }\n }\n \"#).variables(json!({\n \"types\": \"double|null\"\n })).run().await?" +--- +data: + extendedJsonTestData: + - type: string + value: "hello, world!" +errors: ~ diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_using_in_operator.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_using_in_operator.snap new file mode 100644 index 00000000..6517e724 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__filtering__filters_using_in_operator.snap @@ -0,0 +1,17 @@ +--- +source: crates/integration-tests/src/tests/filtering.rs +expression: "graphql_query(r#\"\n query {\n movies(\n where: { rated: { _in: [\"G\", \"TV-G\"] } }\n order_by: { id: Asc }\n limit: 5\n ) {\n title\n rated\n }\n }\n \"#).run().await?" +--- +data: + movies: + - title: The Great Train Robbery + rated: TV-G + - title: A Corner in Wheat + rated: G + - title: From Hand to Mouth + rated: TV-G + - title: One Week + rated: TV-G + - title: The Devil to Pay! + rated: TV-G +errors: ~ diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__grouping__combines_aggregates_and_groups_in_one_query.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__grouping__combines_aggregates_and_groups_in_one_query.snap new file mode 100644 index 00000000..efff0c4f --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__grouping__combines_aggregates_and_groups_in_one_query.snap @@ -0,0 +1,27 @@ +--- +source: crates/integration-tests/src/tests/grouping.rs +expression: "run_connector_query(Connector::SampleMflix,\nquery_request().collection(\"movies\").query(query().predicate(binop(\"_gte\",\ntarget!(\"year\"),\nvalue!(2000))).limit(10).aggregates([(\"average_viewer_rating\",\ncolumn_aggregate(\"tomatoes.viewer.rating\",\n\"avg\"))]).groups(grouping().dimensions([dimension_column(\"year\"),]).aggregates([(\"average_viewer_rating_by_year\",\ncolumn_aggregate(\"tomatoes.viewer.rating\",\n\"avg\"),)]).order_by(ordered_dimensions()),),),).await?" +--- +- aggregates: + average_viewer_rating: 3.05 + groups: + - dimensions: + - 2000 + aggregates: + average_viewer_rating_by_year: 3.825 + - dimensions: + - 2001 + aggregates: + average_viewer_rating_by_year: 2.55 + - dimensions: + - 2002 + aggregates: + average_viewer_rating_by_year: 1.8 + - dimensions: + - 2003 + aggregates: + average_viewer_rating_by_year: 3 + - dimensions: + - 2005 + aggregates: + average_viewer_rating_by_year: 3.5 diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__grouping__combines_fields_and_groups_in_one_query.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__grouping__combines_fields_and_groups_in_one_query.snap new file mode 100644 index 00000000..236aadae --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__grouping__combines_fields_and_groups_in_one_query.snap @@ -0,0 +1,24 @@ +--- +source: crates/integration-tests/src/tests/grouping.rs +expression: "run_connector_query(Connector::SampleMflix,\nquery_request().collection(\"movies\").query(query().predicate(or([binop(\"_gt\",\ntarget!(\"year\"), value!(0)),\nbinop(\"_lte\", target!(\"year\"),\nvalue!(0)),])).fields([field!(\"title\"),\nfield!(\"year\")]).order_by([asc!(\"_id\")]).groups(grouping().dimensions([dimension_column(\"year\")]).aggregates([(\"average_viewer_rating_by_year\",\ncolumn_aggregate(\"tomatoes.viewer.rating\",\n\"avg\"),)]).order_by(ordered_dimensions()),).limit(3),),).await?" +--- +- rows: + - title: Blacksmith Scene + year: 1893 + - title: The Great Train Robbery + year: 1903 + - title: The Land Beyond the Sunset + year: 1912 + groups: + - dimensions: + - 1893 + aggregates: + average_viewer_rating_by_year: 3 + - dimensions: + - 1903 + aggregates: + average_viewer_rating_by_year: 3.7 + - dimensions: + - 1912 + aggregates: + average_viewer_rating_by_year: 3.7 diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__grouping__counts_column_values_in_groups.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__grouping__counts_column_values_in_groups.snap new file mode 100644 index 00000000..d8542d2b --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__grouping__counts_column_values_in_groups.snap @@ -0,0 +1,35 @@ +--- +source: crates/integration-tests/src/tests/grouping.rs +expression: "run_connector_query(Connector::SampleMflix,\nquery_request().collection(\"movies\").query(query().predicate(and([binop(\"_gt\",\ntarget!(\"year\"), value!(1920)),\nbinop(\"_lte\", target!(\"year\"),\nvalue!(1923)),])).groups(grouping().dimensions([dimension_column(\"rated\")]).aggregates([column_count_aggregate!(\"year_distinct_count\"\n=> \"year\", distinct: true),\ncolumn_count_aggregate!(\"year_count\" => \"year\", distinct: false),\nstar_count_aggregate!(\"count\"),]).order_by(ordered_dimensions()),),),).await?" +--- +- groups: + - dimensions: + - ~ + aggregates: + year_distinct_count: 3 + year_count: 6 + count: 6 + - dimensions: + - NOT RATED + aggregates: + year_distinct_count: 3 + year_count: 4 + count: 4 + - dimensions: + - PASSED + aggregates: + year_distinct_count: 1 + year_count: 3 + count: 3 + - dimensions: + - TV-PG + aggregates: + year_distinct_count: 1 + year_count: 1 + count: 1 + - dimensions: + - UNRATED + aggregates: + year_distinct_count: 2 + year_count: 5 + count: 5 diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__grouping__groups_by_multiple_dimensions.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__grouping__groups_by_multiple_dimensions.snap new file mode 100644 index 00000000..f2f0d486 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__grouping__groups_by_multiple_dimensions.snap @@ -0,0 +1,53 @@ +--- +source: crates/integration-tests/src/tests/grouping.rs +expression: "run_connector_query(Connector::SampleMflix,\nquery_request().collection(\"movies\").query(query().predicate(binop(\"_lt\",\ntarget!(\"year\"),\nvalue!(1950))).order_by([asc!(\"_id\")]).limit(10).groups(grouping().dimensions([dimension_column(\"year\"),\ndimension_column(\"languages\"),\ndimension_column(\"rated\"),]).aggregates([(\"average_viewer_rating\",\ncolumn_aggregate(\"tomatoes.viewer.rating\",\n\"avg\"),)]).order_by(ordered_dimensions()),),),).await?" +--- +- groups: + - dimensions: + - 1893 + - ~ + - UNRATED + aggregates: + average_viewer_rating: 3 + - dimensions: + - 1903 + - - English + - TV-G + aggregates: + average_viewer_rating: 3.7 + - dimensions: + - 1909 + - - English + - G + aggregates: + average_viewer_rating: 3.6 + - dimensions: + - 1911 + - - English + - ~ + aggregates: + average_viewer_rating: 3.4 + - dimensions: + - 1912 + - - English + - UNRATED + aggregates: + average_viewer_rating: 3.7 + - dimensions: + - 1913 + - - English + - TV-PG + aggregates: + average_viewer_rating: 3 + - dimensions: + - 1914 + - - English + - ~ + aggregates: + average_viewer_rating: 3.0666666666666664 + - dimensions: + - 1915 + - ~ + - NOT RATED + aggregates: + average_viewer_rating: 3.2 diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__grouping__runs_single_column_aggregate_on_groups.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__grouping__runs_single_column_aggregate_on_groups.snap new file mode 100644 index 00000000..4b3177a1 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__grouping__runs_single_column_aggregate_on_groups.snap @@ -0,0 +1,45 @@ +--- +source: crates/integration-tests/src/tests/grouping.rs +expression: "run_connector_query(Connector::SampleMflix,\nquery_request().collection(\"movies\").query(query().predicate(or([binop(\"_gt\",\ntarget!(\"year\"), value!(0)),\nbinop(\"_lte\", target!(\"year\"),\nvalue!(0)),])).order_by([asc!(\"_id\")]).limit(10).groups(grouping().dimensions([dimension_column(\"year\")]).aggregates([(\"average_viewer_rating\",\ncolumn_aggregate(\"tomatoes.viewer.rating\", \"avg\"),),\n(\"max_runtime\",\ncolumn_aggregate(\"runtime\",\n\"max\")),]).order_by(ordered_dimensions()),),),).await?" +--- +- groups: + - dimensions: + - 1893 + aggregates: + average_viewer_rating: 3 + max_runtime: 1 + - dimensions: + - 1903 + aggregates: + average_viewer_rating: 3.7 + max_runtime: 11 + - dimensions: + - 1909 + aggregates: + average_viewer_rating: 3.6 + max_runtime: 14 + - dimensions: + - 1911 + aggregates: + average_viewer_rating: 3.4 + max_runtime: 7 + - dimensions: + - 1912 + aggregates: + average_viewer_rating: 3.7 + max_runtime: 14 + - dimensions: + - 1913 + aggregates: + average_viewer_rating: 3 + max_runtime: 88 + - dimensions: + - 1914 + aggregates: + average_viewer_rating: 3.0666666666666664 + max_runtime: 199 + - dimensions: + - 1915 + aggregates: + average_viewer_rating: 3.2 + max_runtime: 165 diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__local_relationship__aggregates_over_empty_subset_of_related_collection.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__local_relationship__aggregates_over_empty_subset_of_related_collection.snap new file mode 100644 index 00000000..398d5674 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__local_relationship__aggregates_over_empty_subset_of_related_collection.snap @@ -0,0 +1,20 @@ +--- +source: crates/integration-tests/src/tests/local_relationship.rs +expression: "run_connector_query(Connector::Chinook,\nquery_request().collection(\"Album\").query(query().predicate(is_in(target!(\"AlbumId\"),\n[json!(15), json!(91),\njson!(227)])).fields([relation_field!(\"tracks\" => \"tracks\",\nquery().predicate(binop(\"_eq\", target!(\"Name\"),\nvalue!(\"non-existent name\"))).aggregates([star_count_aggregate!(\"count\"),\ncolumn_count_aggregate!(\"composer_count\" => \"Composer\", distinct: true),\n(\"average_price\",\ncolumn_aggregate(\"UnitPrice\",\n\"avg\").into()),]))]).order_by([asc!(\"_id\")])).relationships([(\"tracks\",\nrelationship(\"Track\", [(\"AlbumId\", &[\"AlbumId\"])]))])).await?" +--- +- rows: + - tracks: + aggregates: + average_price: ~ + composer_count: 0 + count: 0 + - tracks: + aggregates: + average_price: ~ + composer_count: 0 + count: 0 + - tracks: + aggregates: + average_price: ~ + composer_count: 0 + count: 0 diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__local_relationship__aggregates_over_related_collection.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__local_relationship__aggregates_over_related_collection.snap new file mode 100644 index 00000000..03f0e861 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__local_relationship__aggregates_over_related_collection.snap @@ -0,0 +1,17 @@ +--- +source: crates/integration-tests/src/tests/local_relationship.rs +expression: "run_connector_query(Connector::Chinook,\nquery_request().collection(\"Album\").query(query().predicate(is_in(target!(\"AlbumId\"),\n[json!(15), json!(91),\njson!(227)])).fields([relation_field!(\"tracks\" => \"tracks\",\nquery().aggregates([star_count_aggregate!(\"count\"),\n(\"average_price\",\ncolumn_aggregate(\"UnitPrice\",\n\"avg\").into()),]))]).order_by([asc!(\"_id\")])).relationships([(\"tracks\",\nrelationship(\"Track\", [(\"AlbumId\", &[\"AlbumId\"])]))])).await?" +--- +- rows: + - tracks: + aggregates: + average_price: 0.99 + count: 5 + - tracks: + aggregates: + average_price: 0.99 + count: 16 + - tracks: + aggregates: + average_price: 1.99 + count: 19 diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__local_relationship__gets_fields_and_groups_through_relationship.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__local_relationship__gets_fields_and_groups_through_relationship.snap new file mode 100644 index 00000000..f3aaa8ea --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__local_relationship__gets_fields_and_groups_through_relationship.snap @@ -0,0 +1,152 @@ +--- +source: crates/integration-tests/src/tests/local_relationship.rs +expression: "run_connector_query(Connector::Chinook,\nquery_request().collection(\"Album\").query(query().predicate(is_in(target!(\"AlbumId\"),\n[json!(15), json!(91),\njson!(227)])).order_by([asc!(\"_id\")]).fields([field!(\"AlbumId\"),\nrelation_field!(\"tracks\" => \"album_tracks\",\nquery().order_by([asc!(\"_id\")]).fields([field!(\"AlbumId\"), field!(\"Name\"),\nfield!(\"UnitPrice\")]).groups(grouping().dimensions([dimension_column(column(\"Name\").from_relationship(\"track_genre\"))]).aggregates([(\"average_price\",\ncolumn_aggregate(\"UnitPrice\",\n\"avg\"))]).order_by(ordered_dimensions()),))])).relationships([(\"album_tracks\",\nrelationship(\"Track\", [(\"AlbumId\", &[\"AlbumId\"])])),\n(\"track_genre\",\nrelationship(\"Genre\", [(\"GenreId\", &[\"GenreId\"])]).object_type())])).await?" +--- +- rows: + - AlbumId: 15 + tracks: + groups: + - average_price: 0.99 + dimensions: + - - Metal + rows: + - AlbumId: 15 + Name: Heart Of Gold + UnitPrice: "0.99" + - AlbumId: 15 + Name: Snowblind + UnitPrice: "0.99" + - AlbumId: 15 + Name: Like A Bird + UnitPrice: "0.99" + - AlbumId: 15 + Name: Blood In The Wall + UnitPrice: "0.99" + - AlbumId: 15 + Name: The Beginning...At Last + UnitPrice: "0.99" + - AlbumId: 91 + tracks: + groups: + - average_price: 0.99 + dimensions: + - - Rock + rows: + - AlbumId: 91 + Name: Right Next Door to Hell + UnitPrice: "0.99" + - AlbumId: 91 + Name: "Dust N' Bones" + UnitPrice: "0.99" + - AlbumId: 91 + Name: Live and Let Die + UnitPrice: "0.99" + - AlbumId: 91 + Name: "Don't Cry (Original)" + UnitPrice: "0.99" + - AlbumId: 91 + Name: Perfect Crime + UnitPrice: "0.99" + - AlbumId: 91 + Name: "You Ain't the First" + UnitPrice: "0.99" + - AlbumId: 91 + Name: Bad Obsession + UnitPrice: "0.99" + - AlbumId: 91 + Name: Back off Bitch + UnitPrice: "0.99" + - AlbumId: 91 + Name: "Double Talkin' Jive" + UnitPrice: "0.99" + - AlbumId: 91 + Name: November Rain + UnitPrice: "0.99" + - AlbumId: 91 + Name: The Garden + UnitPrice: "0.99" + - AlbumId: 91 + Name: Garden of Eden + UnitPrice: "0.99" + - AlbumId: 91 + Name: "Don't Damn Me" + UnitPrice: "0.99" + - AlbumId: 91 + Name: Bad Apples + UnitPrice: "0.99" + - AlbumId: 91 + Name: Dead Horse + UnitPrice: "0.99" + - AlbumId: 91 + Name: Coma + UnitPrice: "0.99" + - AlbumId: 227 + tracks: + groups: + - average_price: 1.99 + dimensions: + - - Sci Fi & Fantasy + - average_price: 1.99 + dimensions: + - - Science Fiction + - average_price: 1.99 + dimensions: + - - TV Shows + rows: + - AlbumId: 227 + Name: Occupation / Precipice + UnitPrice: "1.99" + - AlbumId: 227 + Name: "Exodus, Pt. 1" + UnitPrice: "1.99" + - AlbumId: 227 + Name: "Exodus, Pt. 2" + UnitPrice: "1.99" + - AlbumId: 227 + Name: Collaborators + UnitPrice: "1.99" + - AlbumId: 227 + Name: Torn + UnitPrice: "1.99" + - AlbumId: 227 + Name: A Measure of Salvation + UnitPrice: "1.99" + - AlbumId: 227 + Name: Hero + UnitPrice: "1.99" + - AlbumId: 227 + Name: Unfinished Business + UnitPrice: "1.99" + - AlbumId: 227 + Name: The Passage + UnitPrice: "1.99" + - AlbumId: 227 + Name: The Eye of Jupiter + UnitPrice: "1.99" + - AlbumId: 227 + Name: Rapture + UnitPrice: "1.99" + - AlbumId: 227 + Name: Taking a Break from All Your Worries + UnitPrice: "1.99" + - AlbumId: 227 + Name: The Woman King + UnitPrice: "1.99" + - AlbumId: 227 + Name: A Day In the Life + UnitPrice: "1.99" + - AlbumId: 227 + Name: Dirty Hands + UnitPrice: "1.99" + - AlbumId: 227 + Name: Maelstrom + UnitPrice: "1.99" + - AlbumId: 227 + Name: The Son Also Rises + UnitPrice: "1.99" + - AlbumId: 227 + Name: "Crossroads, Pt. 1" + UnitPrice: "1.99" + - AlbumId: 227 + Name: "Crossroads, Pt. 2" + UnitPrice: "1.99" diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__local_relationship__gets_groups_through_relationship.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__local_relationship__gets_groups_through_relationship.snap new file mode 100644 index 00000000..9d6719e1 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__local_relationship__gets_groups_through_relationship.snap @@ -0,0 +1,34 @@ +--- +source: crates/integration-tests/src/tests/local_relationship.rs +expression: "run_connector_query(Connector::Chinook,\nquery_request().collection(\"Album\").query(query().predicate(is_in(target!(\"AlbumId\"),\n[json!(15), json!(91),\njson!(227)])).order_by([asc!(\"_id\")]).fields([field!(\"AlbumId\"),\nrelation_field!(\"tracks\" => \"album_tracks\",\nquery().groups(grouping().dimensions([dimension_column(column(\"Name\").from_relationship(\"track_genre\"))]).aggregates([(\"AlbumId\",\ncolumn_aggregate(\"AlbumId\", \"avg\")),\n(\"average_price\",\ncolumn_aggregate(\"UnitPrice\",\n\"avg\")),]).order_by(ordered_dimensions()),))])).relationships([(\"album_tracks\",\nrelationship(\"Track\", [(\"AlbumId\", &[\"AlbumId\"])])),\n(\"track_genre\",\nrelationship(\"Genre\", [(\"GenreId\", &[\"GenreId\"])]).object_type())])).await?" +--- +- rows: + - AlbumId: 15 + tracks: + groups: + - AlbumId: 15 + average_price: 0.99 + dimensions: + - - Metal + - AlbumId: 91 + tracks: + groups: + - AlbumId: 91 + average_price: 0.99 + dimensions: + - - Rock + - AlbumId: 227 + tracks: + groups: + - AlbumId: 227 + average_price: 1.99 + dimensions: + - - Sci Fi & Fantasy + - AlbumId: 227 + average_price: 1.99 + dimensions: + - - Science Fiction + - AlbumId: 227 + average_price: 1.99 + dimensions: + - - TV Shows diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__local_relationship__groups_by_related_field.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__local_relationship__groups_by_related_field.snap new file mode 100644 index 00000000..5e960c98 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__local_relationship__groups_by_related_field.snap @@ -0,0 +1,25 @@ +--- +source: crates/integration-tests/src/tests/local_relationship.rs +expression: "run_connector_query(Connector::Chinook,\nquery_request().collection(\"Track\").query(query().predicate(is_in(target!(\"AlbumId\"),\n[json!(15), json!(91),\njson!(227)])).groups(grouping().dimensions([dimension_column(column(\"Name\").from_relationship(\"track_genre\"))]).aggregates([(\"average_price\",\ncolumn_aggregate(\"UnitPrice\",\n\"avg\"))]).order_by(ordered_dimensions()))).relationships([(\"track_genre\",\nrelationship(\"Genre\", [(\"GenreId\", &[\"GenreId\"])]).object_type())])).await?" +--- +- groups: + - dimensions: + - - Metal + aggregates: + average_price: 0.99 + - dimensions: + - - Rock + aggregates: + average_price: 0.99 + - dimensions: + - - Sci Fi & Fantasy + aggregates: + average_price: 1.99 + - dimensions: + - - Science Fiction + aggregates: + average_price: 1.99 + - dimensions: + - - TV Shows + aggregates: + average_price: 1.99 diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__local_relationship__joins_on_field_names_that_require_escaping.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__local_relationship__joins_on_field_names_that_require_escaping.snap new file mode 100644 index 00000000..7dc18178 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__local_relationship__joins_on_field_names_that_require_escaping.snap @@ -0,0 +1,21 @@ +--- +source: crates/integration-tests/src/tests/local_relationship.rs +expression: "run_connector_query(Connector::TestCases,\n query_request().collection(\"weird_field_names\").query(query().fields([field!(\"invalid_name\"\n => \"$invalid.name\"),\n relation_field!(\"join\" => \"join\",\n query().fields([field!(\"invalid_name\" =>\n \"$invalid.name\")]))]).order_by([asc!(\"_id\")])).relationships([(\"join\",\n relationship(\"weird_field_names\",\n [(\"$invalid.name\", \"$invalid.name\")]))])).await?" +--- +- rows: + - invalid_name: 1 + join: + rows: + - invalid_name: 1 + - invalid_name: 2 + join: + rows: + - invalid_name: 2 + - invalid_name: 3 + join: + rows: + - invalid_name: 3 + - invalid_name: 4 + join: + rows: + - invalid_name: 4 diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__local_relationship__joins_relationships_on_nested_key.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__local_relationship__joins_relationships_on_nested_key.snap new file mode 100644 index 00000000..2200e9e1 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__local_relationship__joins_relationships_on_nested_key.snap @@ -0,0 +1,8 @@ +--- +source: crates/integration-tests/src/tests/local_relationship.rs +expression: "run_connector_query(Connector::TestCases,\nquery_request().collection(\"departments\").query(query().predicate(exists(related!(\"schools_departments\"),\nbinop(\"_eq\", target!(\"name\"),\nvalue!(\"West Valley\")))).fields([relation_field!(\"departments\" =>\n\"schools_departments\",\nquery().fields([field!(\"name\")]))]).order_by([asc!(\"_id\")])).relationships([(\"schools_departments\",\nrelationship(\"schools\",\n[(\"_id\", &[\"departments\", \"math_department_id\"])]))])).await?" +--- +- rows: + - departments: + rows: + - name: West Valley diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__native_query__runs_native_query_with_collection_representation.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__native_query__runs_native_query_with_collection_representation.snap index c2d65132..f4e11e24 100644 --- a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__native_query__runs_native_query_with_collection_representation.snap +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__native_query__runs_native_query_with_collection_representation.snap @@ -1,57 +1,57 @@ --- source: crates/integration-tests/src/tests/native_query.rs -expression: "graphql_query(r#\"\n query {\n title_word_frequencies(\n where: {count: {_eq: 2}}\n order_by: {word: Asc}\n offset: 100\n limit: 25\n ) {\n word\n count\n }\n }\n \"#).run().await?" +expression: "graphql_query(r#\"\n query {\n titleWordFrequency(\n where: {count: {_eq: 2}}\n order_by: {id: Asc}\n offset: 100\n limit: 25\n ) {\n id\n count\n }\n }\n \"#).run().await?" --- data: - title_word_frequencies: - - word: Amish + titleWordFrequency: + - id: Amish count: 2 - - word: Amor? + - id: Amor? count: 2 - - word: Anara + - id: Anara count: 2 - - word: Anarchy + - id: Anarchy count: 2 - - word: Anastasia + - id: Anastasia count: 2 - - word: Anchorman + - id: Anchorman count: 2 - - word: Andre + - id: Andre count: 2 - - word: Andrei + - id: Andrei count: 2 - - word: Andromeda + - id: Andromeda count: 2 - - word: Andrè + - id: Andrè count: 2 - - word: Angela + - id: Angela count: 2 - - word: Angelica + - id: Angelica count: 2 - - word: "Angels'" + - id: "Angels'" count: 2 - - word: "Angels:" + - id: "Angels:" count: 2 - - word: Angst + - id: Angst count: 2 - - word: Animation + - id: Animation count: 2 - - word: Annabelle + - id: Annabelle count: 2 - - word: Anonyma + - id: Anonyma count: 2 - - word: Anonymous + - id: Anonymous count: 2 - - word: Answer + - id: Answer count: 2 - - word: Ant + - id: Ant count: 2 - - word: Antarctic + - id: Antarctic count: 2 - - word: Antoinette + - id: Antoinette count: 2 - - word: Anybody + - id: Anybody count: 2 - - word: Anywhere + - id: Anywhere count: 2 errors: ~ diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__nested_collection__exists_in_nested_collection.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__nested_collection__exists_in_nested_collection.snap new file mode 100644 index 00000000..5283509a --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__nested_collection__exists_in_nested_collection.snap @@ -0,0 +1,10 @@ +--- +source: crates/integration-tests/src/tests/nested_collection.rs +expression: "run_connector_query(Connector::TestCases,\nquery_request().collection(\"nested_collection\").query(query().predicate(exists(nested(\"staff\"),\nbinop(\"_eq\", target!(\"name\"),\nvalue!(\"Alyx\")))).fields([field!(\"institution\"),\nfield!(\"staff\" => \"staff\",\narray!(object!([field!(\"name\")]))),]).order_by([asc!(\"_id\")]))).await?" +--- +- rows: + - institution: City 17 + staff: + - name: Alyx + - name: Freeman + - name: Breen diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__remote_relationship__aggregates_request_with_variable_sets.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__remote_relationship__aggregates_request_with_variable_sets.snap new file mode 100644 index 00000000..8e61071d --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__remote_relationship__aggregates_request_with_variable_sets.snap @@ -0,0 +1,8 @@ +--- +source: crates/integration-tests/src/tests/remote_relationship.rs +expression: "run_connector_query(Connector::SampleMflix,\nquery_request().collection(\"movies\").variables([[(\"year\",\njson!(2014))]]).query(query().predicate(binop(\"_eq\", target!(\"year\"),\nvariable!(year))).aggregates([(\"average_viewer_rating\",\ncolumn_aggregate(\"tomatoes.viewer.rating\", \"avg\").into(),),\ncolumn_count_aggregate!(\"rated_count\" => \"rated\", distinct: true),\nstar_count_aggregate!(\"count\"),])),).await?" +--- +- aggregates: + average_viewer_rating: 3.2435114503816793 + rated_count: 10 + count: 1147 diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__remote_relationship__aggregates_request_with_variable_sets_over_empty_collection_subset.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__remote_relationship__aggregates_request_with_variable_sets_over_empty_collection_subset.snap new file mode 100644 index 00000000..d86d4497 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__remote_relationship__aggregates_request_with_variable_sets_over_empty_collection_subset.snap @@ -0,0 +1,8 @@ +--- +source: crates/integration-tests/src/tests/remote_relationship.rs +expression: "run_connector_query(Connector::SampleMflix,\nquery_request().collection(\"movies\").variables([[(\"year\",\njson!(2014))]]).query(query().predicate(and([binop(\"_eq\", target!(\"year\"),\nvariable!(year)),\nbinop(\"_eq\", target!(\"title\"),\nvalue!(\"non-existent title\")),])).aggregates([(\"average_viewer_rating\",\ncolumn_aggregate(\"tomatoes.viewer.rating\", \"avg\").into(),),\ncolumn_count_aggregate!(\"rated_count\" => \"rated\", distinct: true),\nstar_count_aggregate!(\"count\"),])),).await?" +--- +- aggregates: + average_viewer_rating: ~ + rated_count: 0 + count: 0 diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__remote_relationship__provides_fields_combined_with_groups_for_variable_set.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__remote_relationship__provides_fields_combined_with_groups_for_variable_set.snap new file mode 100644 index 00000000..37d2867c --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__remote_relationship__provides_fields_combined_with_groups_for_variable_set.snap @@ -0,0 +1,24 @@ +--- +source: crates/integration-tests/src/tests/remote_relationship.rs +expression: "run_connector_query(Connector::SampleMflix,\nquery_request().collection(\"movies\").variables([[(\"year\",\njson!(2014))]]).query(query().predicate(binop(\"_eq\", target!(\"year\"),\nvariable!(year))).fields([field!(\"title\"),\nfield!(\"rated\")]).order_by([asc!(\"_id\")]).groups(grouping().dimensions([dimension_column(\"rated\")]).aggregates([(\"average_viewer_rating\",\ncolumn_aggregate(\"tomatoes.viewer.rating\",\n\"avg\"),),]).order_by(ordered_dimensions()),).limit(3),),).await?" +--- +- rows: + - rated: ~ + title: Action Jackson + - rated: PG-13 + title: The Giver + - rated: R + title: The Equalizer + groups: + - dimensions: + - ~ + aggregates: + average_viewer_rating: 2.3 + - dimensions: + - PG-13 + aggregates: + average_viewer_rating: 3.4 + - dimensions: + - R + aggregates: + average_viewer_rating: 3.9 diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__remote_relationship__provides_groups_for_variable_set.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__remote_relationship__provides_groups_for_variable_set.snap new file mode 100644 index 00000000..fad8a471 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__remote_relationship__provides_groups_for_variable_set.snap @@ -0,0 +1,49 @@ +--- +source: crates/integration-tests/src/tests/remote_relationship.rs +expression: "run_connector_query(Connector::SampleMflix,\nquery_request().collection(\"movies\").variables([[(\"year\",\njson!(2014))]]).query(query().predicate(binop(\"_eq\", target!(\"year\"),\nvariable!(year))).groups(grouping().dimensions([dimension_column(\"rated\")]).aggregates([(\"average_viewer_rating\",\ncolumn_aggregate(\"tomatoes.viewer.rating\",\n\"avg\"),),]).order_by(ordered_dimensions()),),),).await?" +--- +- groups: + - dimensions: + - ~ + aggregates: + average_viewer_rating: 3.1320754716981134 + - dimensions: + - G + aggregates: + average_viewer_rating: 3.8 + - dimensions: + - NOT RATED + aggregates: + average_viewer_rating: 2.824242424242424 + - dimensions: + - PG + aggregates: + average_viewer_rating: 3.7096774193548385 + - dimensions: + - PG-13 + aggregates: + average_viewer_rating: 3.470707070707071 + - dimensions: + - R + aggregates: + average_viewer_rating: 3.3283783783783787 + - dimensions: + - TV-14 + aggregates: + average_viewer_rating: 3.233333333333333 + - dimensions: + - TV-G + aggregates: + average_viewer_rating: ~ + - dimensions: + - TV-MA + aggregates: + average_viewer_rating: 4.2 + - dimensions: + - TV-PG + aggregates: + average_viewer_rating: ~ + - dimensions: + - UNRATED + aggregates: + average_viewer_rating: 3.06875 diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__sorting__sorts_on_extended_json.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__sorting__sorts_on_extended_json.snap new file mode 100644 index 00000000..fb3c1e49 --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__sorting__sorts_on_extended_json.snap @@ -0,0 +1,45 @@ +--- +source: crates/integration-tests/src/tests/sorting.rs +expression: "graphql_query(r#\"\n query Sorting {\n extendedJsonTestData(order_by: { value: Desc }) {\n type\n value\n }\n }\n \"#).run().await?" +--- +data: + extendedJsonTestData: + - type: date + value: + $date: + $numberLong: "1724164680000" + - type: date + value: + $date: + $numberLong: "1637571600000" + - type: string + value: "hello, world!" + - type: string + value: foo + - type: long + value: + $numberLong: "8" + - type: long + value: + $numberLong: "7" + - type: int + value: + $numberInt: "6" + - type: int + value: + $numberInt: "5" + - type: double + value: + $numberDouble: "4.0" + - type: double + value: + $numberDouble: "3.0" + - type: decimal + value: + $numberDecimal: "2" + - type: decimal + value: + $numberDecimal: "1" + - type: "null" + value: ~ +errors: ~ diff --git a/crates/integration-tests/src/tests/snapshots/integration_tests__tests__sorting__sorts_on_nested_field_names_that_require_escaping.snap b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__sorting__sorts_on_nested_field_names_that_require_escaping.snap new file mode 100644 index 00000000..701ccfdb --- /dev/null +++ b/crates/integration-tests/src/tests/snapshots/integration_tests__tests__sorting__sorts_on_nested_field_names_that_require_escaping.snap @@ -0,0 +1,12 @@ +--- +source: crates/integration-tests/src/tests/sorting.rs +expression: "graphql_query(r#\"\n query {\n weirdFieldNames(limit: 1, order_by: { invalidName: Asc }) {\n invalidName\n invalidObjectName {\n validName\n }\n validObjectName {\n invalidNestedName\n }\n }\n }\n \"#).run().await?" +--- +data: + weirdFieldNames: + - invalidName: 1 + invalidObjectName: + validName: 1 + validObjectName: + invalidNestedName: 1 +errors: ~ diff --git a/crates/integration-tests/src/tests/sorting.rs b/crates/integration-tests/src/tests/sorting.rs new file mode 100644 index 00000000..35d65283 --- /dev/null +++ b/crates/integration-tests/src/tests/sorting.rs @@ -0,0 +1,46 @@ +use insta::assert_yaml_snapshot; + +use crate::graphql_query; + +#[tokio::test] +async fn sorts_on_extended_json() -> anyhow::Result<()> { + assert_yaml_snapshot!( + graphql_query( + r#" + query Sorting { + extendedJsonTestData(order_by: { value: Desc }) { + type + value + } + } + "# + ) + .run() + .await? + ); + Ok(()) +} + +#[tokio::test] +async fn sorts_on_nested_field_names_that_require_escaping() -> anyhow::Result<()> { + assert_yaml_snapshot!( + graphql_query( + r#" + query { + weirdFieldNames(limit: 1, order_by: { invalidName: Asc }) { + invalidName + invalidObjectName { + validName + } + validObjectName { + invalidNestedName + } + } + } + "# + ) + .run() + .await? + ); + Ok(()) +} diff --git a/crates/mongodb-agent-common/Cargo.toml b/crates/mongodb-agent-common/Cargo.toml index d123e86f..900e3979 100644 --- a/crates/mongodb-agent-common/Cargo.toml +++ b/crates/mongodb-agent-common/Cargo.toml @@ -4,6 +4,10 @@ description = "logic that is common to v2 and v3 agent versions" edition = "2021" version.workspace = true +[features] +default = [] +test-helpers = ["dep:mockall", "dep:pretty_assertions"] # exports mock database impl + [dependencies] configuration = { path = "../configuration" } mongodb-support = { path = "../mongodb-support" } @@ -21,13 +25,16 @@ indexmap = { workspace = true } indent = "^0.1" itertools = { workspace = true } lazy_static = "^1.4.0" +mockall = { version = "^0.13.1", optional = true } mongodb = { workspace = true } ndc-models = { workspace = true } +nonempty = { workspace = true } once_cell = "1" +pretty_assertions = { version = "1.4", optional = true } regex = "1" schemars = { version = "^0.8.12", features = ["smol_str"] } -serde = { version = "1.0", features = ["derive"] } -serde_json = { version = "1.0", features = ["preserve_order"] } +serde = { workspace = true } +serde_json = { workspace = true } serde_with = { version = "^3.7", features = ["base64", "hex"] } thiserror = "1" time = { version = "0.3.29", features = ["formatting", "parsing", "serde"] } @@ -38,7 +45,7 @@ mongodb-cli-plugin = { path = "../cli" } ndc-test-helpers = { path = "../ndc-test-helpers" } test-helpers = { path = "../test-helpers" } -mockall = "^0.12.1" -pretty_assertions = "1" +mockall = "^0.13.1" +pretty_assertions = "1.4" proptest = "1" tokio = { version = "1", features = ["full"] } diff --git a/crates/mongodb-agent-common/proptest-regressions/query/serialization/tests.txt b/crates/mongodb-agent-common/proptest-regressions/query/serialization/tests.txt index db207898..cbce5bb6 100644 --- a/crates/mongodb-agent-common/proptest-regressions/query/serialization/tests.txt +++ b/crates/mongodb-agent-common/proptest-regressions/query/serialization/tests.txt @@ -10,3 +10,5 @@ cc 7d760e540b56fedac7dd58e5bdb5bb9613b9b0bc6a88acfab3fc9c2de8bf026d # shrinks to cc 21360610045c5a616b371fb8d5492eb0c22065d62e54d9c8a8761872e2e192f3 # shrinks to bson = Array([Document({}), Document({" ": Null})]) cc 8842e7f78af24e19847be5d8ee3d47c547ef6c1bb54801d360a131f41a87f4fa cc 2a192b415e5669716701331fe4141383a12ceda9acc9f32e4284cbc2ed6f2d8a # shrinks to bson = Document({"A": Document({"¡": JavaScriptCodeWithScope { code: "", scope: Document({"\0": Int32(-1)}) }})}), mode = Relaxed +cc 4c37daee6ab1e1bcc75b4089786253f29271d116a1785180560ca431d2b4a651 # shrinks to bson = Document({"0": Document({"A": Array([Int32(0), Decimal128(...)])})}) +cc ad219d6630a8e9a386e734b6ba440577162cca8435c7685e32b574e9b1aa390e diff --git a/crates/mongodb-agent-common/src/aggregation_function.rs b/crates/mongodb-agent-common/src/aggregation_function.rs index 54cb0c0f..9c637dd6 100644 --- a/crates/mongodb-agent-common/src/aggregation_function.rs +++ b/crates/mongodb-agent-common/src/aggregation_function.rs @@ -1,23 +1,24 @@ +use configuration::MongoScalarType; use enum_iterator::{all, Sequence}; -// TODO: How can we unify this with the Accumulator type in the mongodb module? -#[derive(Copy, Clone, Debug, PartialEq, Eq, Sequence)] +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, Sequence)] pub enum AggregationFunction { Avg, - Count, Min, Max, Sum, } +use mongodb_support::BsonScalarType; use ndc_query_plan::QueryPlanError; use AggregationFunction as A; +use crate::mongo_query_plan::Type; + impl AggregationFunction { pub fn graphql_name(self) -> &'static str { match self { A::Avg => "avg", - A::Count => "count", A::Min => "min", A::Max => "max", A::Sum => "sum", @@ -32,13 +33,28 @@ impl AggregationFunction { }) } - pub fn is_count(self) -> bool { + /// Returns the result type that is declared for this function in the schema. + pub fn expected_result_type(self, argument_type: &Type) -> Option { match self { - A::Avg => false, - A::Count => true, - A::Min => false, - A::Max => false, - A::Sum => false, + A::Avg => Some(BsonScalarType::Double), + A::Min => None, + A::Max => None, + A::Sum => Some(if is_fractional(argument_type) { + BsonScalarType::Double + } else { + BsonScalarType::Long + }), } } } + +fn is_fractional(t: &Type) -> bool { + match t { + Type::Scalar(MongoScalarType::Bson(s)) => s.is_fractional(), + Type::Scalar(MongoScalarType::ExtendedJSON) => true, + Type::Object(_) => false, + Type::ArrayOf(_) => false, + Type::Tuple(ts) => ts.iter().all(is_fractional), + Type::Nullable(t) => is_fractional(t), + } +} diff --git a/crates/mongodb-agent-common/src/comparison_function.rs b/crates/mongodb-agent-common/src/comparison_function.rs index 09d288ed..f6357687 100644 --- a/crates/mongodb-agent-common/src/comparison_function.rs +++ b/crates/mongodb-agent-common/src/comparison_function.rs @@ -1,14 +1,12 @@ use enum_iterator::{all, Sequence}; use mongodb::bson::{doc, Bson, Document}; +use ndc_models as ndc; /// Supported binary comparison operators. This type provides GraphQL names, MongoDB operator /// names, and aggregation pipeline code for each operator. Argument types are defined in /// mongodb-agent-common/src/scalar_types_capabilities.rs. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Sequence)] +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, Sequence)] pub enum ComparisonFunction { - // Equality and inequality operators (except for `NotEqual`) are built into the v2 spec, but - // the only built-in operator in v3 is `Equal`. So we need at minimum definitions for - // inequality operators here. LessThan, LessThanOrEqual, GreaterThan, @@ -16,6 +14,9 @@ pub enum ComparisonFunction { Equal, NotEqual, + In, + NotIn, + Regex, /// case-insensitive regex IRegex, @@ -33,6 +34,8 @@ impl ComparisonFunction { C::GreaterThanOrEqual => "_gte", C::Equal => "_eq", C::NotEqual => "_neq", + C::In => "_in", + C::NotIn => "_nin", C::Regex => "_regex", C::IRegex => "_iregex", } @@ -45,12 +48,41 @@ impl ComparisonFunction { C::GreaterThan => "$gt", C::GreaterThanOrEqual => "$gte", C::Equal => "$eq", + C::In => "$in", + C::NotIn => "$nin", C::NotEqual => "$ne", C::Regex => "$regex", C::IRegex => "$regex", } } + pub fn ndc_definition( + self, + argument_type: impl FnOnce(Self) -> ndc::Type, + ) -> ndc::ComparisonOperatorDefinition { + use ndc::ComparisonOperatorDefinition as NDC; + match self { + C::Equal => NDC::Equal, + C::In => NDC::In, + C::LessThan => NDC::LessThan, + C::LessThanOrEqual => NDC::LessThanOrEqual, + C::GreaterThan => NDC::GreaterThan, + C::GreaterThanOrEqual => NDC::GreaterThanOrEqual, + C::NotEqual => NDC::Custom { + argument_type: argument_type(self), + }, + C::NotIn => NDC::Custom { + argument_type: argument_type(self), + }, + C::Regex => NDC::Custom { + argument_type: argument_type(self), + }, + C::IRegex => NDC::Custom { + argument_type: argument_type(self), + }, + } + } + pub fn from_graphql_name(s: &str) -> Result { all::() .find(|variant| variant.graphql_name() == s) diff --git a/crates/mongodb-agent-common/src/constants.rs b/crates/mongodb-agent-common/src/constants.rs new file mode 100644 index 00000000..91745adb --- /dev/null +++ b/crates/mongodb-agent-common/src/constants.rs @@ -0,0 +1,24 @@ +use mongodb::bson; +use serde::Deserialize; + +/// Value must match the field name in [BsonRowSet] +pub const ROW_SET_AGGREGATES_KEY: &str = "aggregates"; + +/// Value must match the field name in [BsonRowSet] +pub const ROW_SET_GROUPS_KEY: &str = "groups"; + +/// Value must match the field name in [BsonRowSet] +pub const ROW_SET_ROWS_KEY: &str = "rows"; + +#[derive(Debug, Deserialize)] +pub struct BsonRowSet { + #[serde(default)] + pub aggregates: Option, // name matches ROW_SET_AGGREGATES_KEY + #[serde(default)] + pub groups: Vec, // name matches ROW_SET_GROUPS_KEY + #[serde(default)] + pub rows: Vec, // name matches ROW_SET_ROWS_KEY +} + +/// Value must match the field name in [ndc_models::Group] +pub const GROUP_DIMENSIONS_KEY: &str = "dimensions"; diff --git a/crates/mongodb-agent-common/src/explain.rs b/crates/mongodb-agent-common/src/explain.rs index 4e556521..0b504da4 100644 --- a/crates/mongodb-agent-common/src/explain.rs +++ b/crates/mongodb-agent-common/src/explain.rs @@ -41,7 +41,7 @@ pub async fn explain_query( tracing::debug!(explain_command = %serde_json::to_string(&explain_command).unwrap()); - let explain_result = db.run_command(explain_command, None).await?; + let explain_result = db.run_command(explain_command).await?; let plan = serde_json::to_string_pretty(&explain_result).map_err(MongoAgentError::Serialization)?; diff --git a/crates/mongodb-agent-common/src/health.rs b/crates/mongodb-agent-common/src/health.rs deleted file mode 100644 index fd1d064b..00000000 --- a/crates/mongodb-agent-common/src/health.rs +++ /dev/null @@ -1,15 +0,0 @@ -use http::StatusCode; -use mongodb::bson::{doc, Document}; - -use crate::{interface_types::MongoAgentError, state::ConnectorState}; - -pub async fn check_health(state: &ConnectorState) -> Result { - let db = state.database(); - - let status: Result = db.run_command(doc! { "ping": 1 }, None).await; - - match status { - Ok(_) => Ok(StatusCode::NO_CONTENT), - Err(_) => Ok(StatusCode::SERVICE_UNAVAILABLE), - } -} diff --git a/crates/mongodb-agent-common/src/interface_types/mongo_agent_error.rs b/crates/mongodb-agent-common/src/interface_types/mongo_agent_error.rs index a549ec58..ede7be2c 100644 --- a/crates/mongodb-agent-common/src/interface_types/mongo_agent_error.rs +++ b/crates/mongodb-agent-common/src/interface_types/mongo_agent_error.rs @@ -1,25 +1,29 @@ -use std::fmt::{self, Display}; +use std::{ + borrow::Cow, + fmt::{self, Display}, +}; use http::StatusCode; use mongodb::bson; use ndc_query_plan::QueryPlanError; use thiserror::Error; -use crate::{procedure::ProcedureError, query::QueryResponseError}; +use crate::{mongo_query_plan::Dimension, procedure::ProcedureError, query::QueryResponseError}; /// A superset of the DC-API `AgentError` type. This enum adds error cases specific to the MongoDB /// agent. #[derive(Debug, Error)] pub enum MongoAgentError { - BadCollectionSchema(String, bson::Bson, bson::de::Error), + BadCollectionSchema(Box<(String, bson::Bson, bson::de::Error)>), // boxed to avoid an excessively-large stack value BadQuery(anyhow::Error), + InvalidGroupDimension(Dimension), InvalidVariableName(String), InvalidScalarTypeName(String), MongoDB(#[from] mongodb::error::Error), MongoDBDeserialization(#[from] mongodb::bson::de::Error), MongoDBSerialization(#[from] mongodb::bson::ser::Error), MongoDBSupport(#[from] mongodb_support::error::Error), - NotImplemented(&'static str), + NotImplemented(Cow<'static, str>), Procedure(#[from] ProcedureError), QueryPlan(#[from] QueryPlanError), ResponseSerialization(#[from] QueryResponseError), @@ -34,32 +38,38 @@ use MongoAgentError::*; impl MongoAgentError { pub fn status_and_error_response(&self) -> (StatusCode, ErrorResponse) { match self { - BadCollectionSchema(collection_name, schema, err) => ( - StatusCode::INTERNAL_SERVER_ERROR, - ErrorResponse { - message: format!("Could not parse a collection validator: {err}"), - details: Some( - [ - ( - "collection_name".to_owned(), - serde_json::Value::String(collection_name.clone()), - ), - ( - "collection_validator".to_owned(), - bson::from_bson::(schema.clone()) - .unwrap_or_else(|err| { - serde_json::Value::String(format!( - "Failed to convert bson validator to json: {err}" - )) - }), - ), - ] - .into(), - ), - r#type: None, - }, - ), + BadCollectionSchema(boxed_details) => { + let (collection_name, schema, err) = &**boxed_details; + ( + StatusCode::INTERNAL_SERVER_ERROR, + ErrorResponse { + message: format!("Could not parse a collection validator: {err}"), + details: Some( + [ + ( + "collection_name".to_owned(), + serde_json::Value::String(collection_name.clone()), + ), + ( + "collection_validator".to_owned(), + bson::from_bson::(schema.clone()) + .unwrap_or_else(|err| { + serde_json::Value::String(format!( + "Failed to convert bson validator to json: {err}" + )) + }), + ), + ] + .into(), + ), + r#type: None, + }, + ) + }, BadQuery(err) => (StatusCode::BAD_REQUEST, ErrorResponse::new(&err)), + InvalidGroupDimension(dimension) => ( + StatusCode::BAD_REQUEST, ErrorResponse::new(&format!("Cannot express grouping dimension as a MongoDB query document expression: {dimension:?}")) + ), InvalidVariableName(name) => ( StatusCode::BAD_REQUEST, ErrorResponse::new(&format!("Column identifier includes characters that are not permitted in a MongoDB variable name: {name}")) diff --git a/crates/mongodb-agent-common/src/lib.rs b/crates/mongodb-agent-common/src/lib.rs index 4fcd6596..02819e93 100644 --- a/crates/mongodb-agent-common/src/lib.rs +++ b/crates/mongodb-agent-common/src/lib.rs @@ -1,7 +1,7 @@ pub mod aggregation_function; pub mod comparison_function; +mod constants; pub mod explain; -pub mod health; pub mod interface_types; pub mod mongo_query_plan; pub mod mongodb; diff --git a/crates/mongodb-agent-common/src/mongo_query_plan/mod.rs b/crates/mongodb-agent-common/src/mongo_query_plan/mod.rs index 4f378667..58d49073 100644 --- a/crates/mongodb-agent-common/src/mongo_query_plan/mod.rs +++ b/crates/mongodb-agent-common/src/mongo_query_plan/mod.rs @@ -1,9 +1,10 @@ use std::collections::BTreeMap; +use configuration::ConfigurationSerializationOptions; use configuration::{ native_mutation::NativeMutation, native_query::NativeQuery, Configuration, MongoScalarType, }; -use mongodb_support::{ExtendedJsonMode, EXTENDED_JSON_TYPE_NAME}; +use mongodb_support::{BsonScalarType, EXTENDED_JSON_TYPE_NAME}; use ndc_models as ndc; use ndc_query_plan::{ConnectorTypes, QueryContext, QueryPlanError}; @@ -11,14 +12,12 @@ use crate::aggregation_function::AggregationFunction; use crate::comparison_function::ComparisonFunction; use crate::scalar_types_capabilities::SCALAR_TYPES; -pub use ndc_query_plan::OrderByTarget; - #[derive(Clone, Debug)] pub struct MongoConfiguration(pub Configuration); impl MongoConfiguration { - pub fn extended_json_mode(&self) -> ExtendedJsonMode { - self.0.options.serialization_options.extended_json_mode + pub fn serialization_options(&self) -> &ConfigurationSerializationOptions { + &self.0.options.serialization_options } pub fn native_queries(&self) -> &BTreeMap { @@ -34,6 +33,14 @@ impl ConnectorTypes for MongoConfiguration { type AggregateFunction = AggregationFunction; type ComparisonOperator = ComparisonFunction; type ScalarType = MongoScalarType; + + fn count_aggregate_type() -> ndc_query_plan::Type { + ndc_query_plan::Type::scalar(BsonScalarType::Int) + } + + fn string_type() -> ndc_query_plan::Type { + ndc_query_plan::Type::scalar(BsonScalarType::String) + } } impl QueryContext for MongoConfiguration { @@ -93,6 +100,9 @@ fn scalar_type_name(t: &Type) -> Option<&'static str> { match t { Type::Scalar(MongoScalarType::Bson(s)) => Some(s.graphql_name()), Type::Scalar(MongoScalarType::ExtendedJSON) => Some(EXTENDED_JSON_TYPE_NAME), + Type::ArrayOf(t) if matches!(**t, Type::Scalar(_) | Type::Nullable(_)) => { + scalar_type_name(t) + } Type::Nullable(t) => scalar_type_name(t), _ => None, } @@ -101,19 +111,26 @@ fn scalar_type_name(t: &Type) -> Option<&'static str> { pub type Aggregate = ndc_query_plan::Aggregate; pub type Argument = ndc_query_plan::Argument; pub type Arguments = ndc_query_plan::Arguments; +pub type ArrayComparison = ndc_query_plan::ArrayComparison; pub type ComparisonTarget = ndc_query_plan::ComparisonTarget; pub type ComparisonValue = ndc_query_plan::ComparisonValue; -pub type ExistsInCollection = ndc_query_plan::ExistsInCollection; +pub type ExistsInCollection = ndc_query_plan::ExistsInCollection; pub type Expression = ndc_query_plan::Expression; pub type Field = ndc_query_plan::Field; +pub type Dimension = ndc_query_plan::Dimension; +pub type Grouping = ndc_query_plan::Grouping; +pub type GroupOrderBy = ndc_query_plan::GroupOrderBy; +pub type GroupOrderByTarget = ndc_query_plan::GroupOrderByTarget; pub type MutationOperation = ndc_query_plan::MutationOperation; pub type MutationPlan = ndc_query_plan::MutationPlan; pub type MutationProcedureArgument = ndc_query_plan::MutationProcedureArgument; pub type NestedField = ndc_query_plan::NestedField; pub type NestedArray = ndc_query_plan::NestedArray; pub type NestedObject = ndc_query_plan::NestedObject; +pub type ObjectField = ndc_query_plan::ObjectField; pub type ObjectType = ndc_query_plan::ObjectType; pub type OrderBy = ndc_query_plan::OrderBy; +pub type OrderByTarget = ndc_query_plan::OrderByTarget; pub type Query = ndc_query_plan::Query; pub type QueryPlan = ndc_query_plan::QueryPlan; pub type Relationship = ndc_query_plan::Relationship; diff --git a/crates/mongodb-agent-common/src/mongodb/collection.rs b/crates/mongodb-agent-common/src/mongodb/collection.rs index 090dc66a..4e2fca01 100644 --- a/crates/mongodb-agent-common/src/mongodb/collection.rs +++ b/crates/mongodb-agent-common/src/mongodb/collection.rs @@ -6,21 +6,20 @@ use mongodb::{ options::{AggregateOptions, FindOptions}, Collection, }; +use mongodb_support::aggregate::Pipeline; use serde::de::DeserializeOwned; -#[cfg(test)] +#[cfg(any(test, feature = "test-helpers"))] use mockall::automock; -use super::Pipeline; - -#[cfg(test)] +#[cfg(any(test, feature = "test-helpers"))] use super::test_helpers::MockCursor; /// Abstract MongoDB collection methods. This lets us mock a database connection in tests. The /// automock attribute generates a struct called MockCollectionTrait that implements this trait. /// The mock provides a variety of methods for mocking and spying on database behavior in tests. /// See https://docs.rs/mockall/latest/mockall/ -#[cfg_attr(test, automock( +#[cfg_attr(any(test, feature = "test-helpers"), automock( type DocumentCursor=MockCursor; type RowCursor=MockCursor; ))] @@ -29,8 +28,8 @@ pub trait CollectionTrait where T: DeserializeOwned + Unpin + Send + Sync + 'static, { - type DocumentCursor: Stream> + 'static; - type RowCursor: Stream> + 'static; + type DocumentCursor: Stream> + 'static + Unpin; + type RowCursor: Stream> + 'static + Unpin; async fn aggregate( &self, @@ -40,13 +39,12 @@ where where Options: Into> + Send + 'static; - async fn find( + async fn find( &self, - filter: Filter, + filter: Document, options: Options, ) -> Result where - Filter: Into> + Send + 'static, Options: Into> + Send + 'static; } @@ -66,18 +64,19 @@ where where Options: Into> + Send + 'static, { - Collection::aggregate(self, pipeline, options).await + Collection::aggregate(self, pipeline) + .with_options(options) + .await } - async fn find( + async fn find( &self, - filter: Filter, + filter: Document, options: Options, ) -> Result where - Filter: Into> + Send + 'static, Options: Into> + Send + 'static, { - Collection::find(self, filter, options).await + Collection::find(self, filter).with_options(options).await } } diff --git a/crates/mongodb-agent-common/src/mongodb/database.rs b/crates/mongodb-agent-common/src/mongodb/database.rs index ce56a06f..b17a7293 100644 --- a/crates/mongodb-agent-common/src/mongodb/database.rs +++ b/crates/mongodb-agent-common/src/mongodb/database.rs @@ -1,16 +1,18 @@ use async_trait::async_trait; use futures_util::Stream; +use mongodb::results::CollectionSpecification; use mongodb::{bson::Document, error::Error, options::AggregateOptions, Database}; +use mongodb_support::aggregate::Pipeline; -#[cfg(test)] +#[cfg(any(test, feature = "test-helpers"))] use mockall::automock; -use super::{CollectionTrait, Pipeline}; +use super::CollectionTrait; -#[cfg(test)] +#[cfg(any(test, feature = "test-helpers"))] use super::MockCollectionTrait; -#[cfg(test)] +#[cfg(any(test, feature = "test-helpers"))] use super::test_helpers::MockCursor; /// Abstract MongoDB database methods. This lets us mock a database connection in tests. The @@ -21,14 +23,16 @@ use super::test_helpers::MockCursor; /// I haven't figured out how to make generic associated types work with automock, so the type /// argument for `Collection` values produced via `DatabaseTrait::collection` is fixed to to /// `Document`. That's the way we're using collections in this app anyway. -#[cfg_attr(test, automock( +#[cfg_attr(any(test, feature = "test-helpers"), automock( type Collection = MockCollectionTrait; + type CollectionCursor = MockCursor; type DocumentCursor = MockCursor; ))] #[async_trait] pub trait DatabaseTrait { type Collection: CollectionTrait; - type DocumentCursor: Stream>; + type CollectionCursor: Stream> + Unpin; + type DocumentCursor: Stream> + Unpin; async fn aggregate( &self, @@ -39,11 +43,14 @@ pub trait DatabaseTrait { Options: Into> + Send + 'static; fn collection(&self, name: &str) -> Self::Collection; + + async fn list_collections(&self) -> Result; } #[async_trait] impl DatabaseTrait for Database { type Collection = mongodb::Collection; + type CollectionCursor = mongodb::Cursor; type DocumentCursor = mongodb::Cursor; async fn aggregate( @@ -54,10 +61,16 @@ impl DatabaseTrait for Database { where Options: Into> + Send + 'static, { - Database::aggregate(self, pipeline, options).await + Database::aggregate(self, pipeline) + .with_options(options) + .await } fn collection(&self, name: &str) -> Self::Collection { Database::collection::(self, name) } + + async fn list_collections(&self) -> Result { + Database::list_collections(self).await + } } diff --git a/crates/mongodb-agent-common/src/mongodb/mod.rs b/crates/mongodb-agent-common/src/mongodb/mod.rs index 8931d5db..2e489234 100644 --- a/crates/mongodb-agent-common/src/mongodb/mod.rs +++ b/crates/mongodb-agent-common/src/mongodb/mod.rs @@ -1,23 +1,16 @@ -mod accumulator; mod collection; mod database; -mod pipeline; pub mod sanitize; -mod selection; -mod stage; -#[cfg(test)] +#[cfg(any(test, feature = "test-helpers"))] pub mod test_helpers; -pub use self::{ - accumulator::Accumulator, collection::CollectionTrait, database::DatabaseTrait, - pipeline::Pipeline, selection::Selection, stage::Stage, -}; +pub use self::{collection::CollectionTrait, database::DatabaseTrait}; // MockCollectionTrait is generated by automock when the test flag is active. -#[cfg(test)] +#[cfg(any(test, feature = "test-helpers"))] pub use self::collection::MockCollectionTrait; // MockDatabase is generated by automock when the test flag is active. -#[cfg(test)] +#[cfg(any(test, feature = "test-helpers"))] pub use self::database::MockDatabaseTrait; diff --git a/crates/mongodb-agent-common/src/mongodb/sanitize.rs b/crates/mongodb-agent-common/src/mongodb/sanitize.rs index b5f3f84b..fc1cea2a 100644 --- a/crates/mongodb-agent-common/src/mongodb/sanitize.rs +++ b/crates/mongodb-agent-common/src/mongodb/sanitize.rs @@ -1,18 +1,5 @@ use std::borrow::Cow; -use anyhow::anyhow; -use mongodb::bson::{doc, Document}; - -use crate::interface_types::MongoAgentError; - -/// Produces a MongoDB expression that references a field by name in a way that is safe from code -/// injection. -/// -/// TODO: equivalent to ColumnRef::Expression -pub fn get_field(name: &str) -> Document { - doc! { "$getField": { "$literal": name } } -} - /// Given a name returns a valid variable name for use in MongoDB aggregation expressions. Outputs /// are guaranteed to be distinct for distinct inputs. Consistently returns the same output for the /// same input string. @@ -28,20 +15,8 @@ pub fn variable(name: &str) -> String { /// Returns false if the name contains characters that MongoDB will interpret specially, such as an /// initial dollar sign, or dots. This indicates whether a name is safe for field references /// - variable names are more strict. -pub fn is_name_safe(name: &str) -> bool { - !(name.starts_with('$') || name.contains('.')) -} - -/// Given a collection or field name, returns Ok if the name is safe, or Err if it contains -/// characters that MongoDB will interpret specially. -/// -/// TODO: MDB-159, MBD-160 remove this function in favor of ColumnRef which is infallible -pub fn safe_name(name: &str) -> Result, MongoAgentError> { - if name.starts_with('$') || name.contains('.') { - Err(MongoAgentError::BadQuery(anyhow!("cannot execute query that includes the name, \"{name}\", because it includes characters that MongoDB interperets specially"))) - } else { - Ok(Cow::Borrowed(name)) - } +pub fn is_name_safe(name: impl AsRef) -> bool { + !(name.as_ref().starts_with('$') || name.as_ref().contains('.')) } // The escape character must be a valid character in MongoDB variable names, but must not appear in @@ -56,7 +31,7 @@ const ESCAPE_CHAR_ESCAPE_SEQUENCE: u32 = 0xff; /// MongoDB variable names allow a limited set of ASCII characters, or any non-ASCII character. /// See https://www.mongodb.com/docs/manual/reference/aggregation-variables/ -fn escape_invalid_variable_chars(input: &str) -> String { +pub fn escape_invalid_variable_chars(input: &str) -> String { let mut encoded = String::new(); for char in input.chars() { match char { diff --git a/crates/mongodb-agent-common/src/mongodb/test_helpers.rs b/crates/mongodb-agent-common/src/mongodb/test_helpers.rs index 473db605..c89b3b70 100644 --- a/crates/mongodb-agent-common/src/mongodb/test_helpers.rs +++ b/crates/mongodb-agent-common/src/mongodb/test_helpers.rs @@ -14,7 +14,6 @@ use super::{MockCollectionTrait, MockDatabaseTrait}; // is produced when calling `into_iter` on a `Vec`. - Jesse H. // // To produce a mock stream use the `mock_stream` function in this module. -#[cfg(test)] pub type MockCursor = futures::stream::Iter<> as IntoIterator>::IntoIter>; /// Create a stream that can be returned from mock implementations for diff --git a/crates/mongodb-agent-common/src/mongodb_connection.rs b/crates/mongodb-agent-common/src/mongodb_connection.rs index b704a81b..ce4e6a3d 100644 --- a/crates/mongodb-agent-common/src/mongodb_connection.rs +++ b/crates/mongodb-agent-common/src/mongodb_connection.rs @@ -1,5 +1,5 @@ use mongodb::{ - options::{ClientOptions, DriverInfo, ResolverConfig}, + options::{ClientOptions, DriverInfo}, Client, }; @@ -9,9 +9,7 @@ const DRIVER_NAME: &str = "Hasura"; pub async fn get_mongodb_client(database_uri: &str) -> Result { // An extra line of code to work around a DNS issue on Windows: - let mut options = - ClientOptions::parse_with_resolver_config(database_uri, ResolverConfig::cloudflare()) - .await?; + let mut options = ClientOptions::parse(database_uri).await?; // Helps MongoDB to collect statistics on Hasura use options.driver_info = Some(DriverInfo::builder().name(DRIVER_NAME).build()); diff --git a/crates/mongodb-agent-common/src/procedure/interpolated_command.rs b/crates/mongodb-agent-common/src/procedure/interpolated_command.rs index 0761156a..131cee38 100644 --- a/crates/mongodb-agent-common/src/procedure/interpolated_command.rs +++ b/crates/mongodb-agent-common/src/procedure/interpolated_command.rs @@ -123,14 +123,18 @@ enum NativeMutationPart { } /// Parse a string or key in a native procedure into parts where variables have the syntax -/// `{{}}`. +/// `{{}}` or `{{ | type expression }}`. fn parse_native_mutation(string: &str) -> Vec { let vec: Vec> = string .split("{{") .filter(|part| !part.is_empty()) .map(|part| match part.split_once("}}") { None => vec![NativeMutationPart::Text(part.to_string())], - Some((var, text)) => { + Some((placeholder_content, text)) => { + let var = match placeholder_content.split_once("|") { + Some((var_name, _type_annotation)) => var_name, + None => placeholder_content, + }; if text.is_empty() { vec![NativeMutationPart::Parameter(var.trim().into())] } else { @@ -155,7 +159,7 @@ mod tests { use serde_json::json; use crate::{ - mongo_query_plan::{ObjectType, Type}, + mongo_query_plan::{ObjectField, ObjectType, Type}, procedure::arguments_to_mongodb_expressions::arguments_to_mongodb_expressions, }; @@ -166,7 +170,11 @@ mod tests { let native_mutation = NativeMutation { result_type: Type::Object(ObjectType { name: Some("InsertArtist".into()), - fields: [("ok".into(), Type::Scalar(MongoScalarType::Bson(S::Bool)))].into(), + fields: [( + "ok".into(), + ObjectField::new(Type::Scalar(MongoScalarType::Bson(S::Bool))), + )] + .into(), }), command: doc! { "insert": "Artist", @@ -220,11 +228,11 @@ mod tests { fields: [ ( "ArtistId".into(), - Type::Scalar(MongoScalarType::Bson(S::Int)), + ObjectField::new(Type::Scalar(MongoScalarType::Bson(S::Int))), ), ( "Name".into(), - Type::Scalar(MongoScalarType::Bson(S::String)), + ObjectField::new(Type::Scalar(MongoScalarType::Bson(S::String))), ), ] .into(), @@ -233,7 +241,11 @@ mod tests { let native_mutation = NativeMutation { result_type: Type::Object(ObjectType { name: Some("InsertArtist".into()), - fields: [("ok".into(), Type::Scalar(MongoScalarType::Bson(S::Bool)))].into(), + fields: [( + "ok".into(), + ObjectField::new(Type::Scalar(MongoScalarType::Bson(S::Bool))), + )] + .into(), }), command: doc! { "insert": "Artist", @@ -283,7 +295,11 @@ mod tests { let native_mutation = NativeMutation { result_type: Type::Object(ObjectType { name: Some("Insert".into()), - fields: [("ok".into(), Type::Scalar(MongoScalarType::Bson(S::Bool)))].into(), + fields: [( + "ok".into(), + ObjectField::new(Type::Scalar(MongoScalarType::Bson(S::Bool))), + )] + .into(), }), command: doc! { "insert": "{{prefix}}-{{basename}}", @@ -324,4 +340,49 @@ mod tests { ); Ok(()) } + + #[test] + fn strips_type_annotation_from_placeholder_text() -> anyhow::Result<()> { + let native_mutation = NativeMutation { + result_type: Type::Object(ObjectType { + name: Some("InsertArtist".into()), + fields: [( + "ok".into(), + ObjectField::new(Type::Scalar(MongoScalarType::Bson(S::Bool))), + )] + .into(), + }), + command: doc! { + "insert": "Artist", + "documents": [{ + "Name": "{{name | string! }}", + }], + }, + selection_criteria: Default::default(), + description: Default::default(), + }; + + let input_arguments = [( + "name".into(), + MutationProcedureArgument::Literal { + value: json!("Regina Spektor"), + argument_type: Type::Scalar(MongoScalarType::Bson(S::String)), + }, + )] + .into(); + + let arguments = arguments_to_mongodb_expressions(input_arguments)?; + let command = interpolated_command(&native_mutation.command, &arguments)?; + + assert_eq!( + command, + bson::doc! { + "insert": "Artist", + "documents": [{ + "Name": "Regina Spektor", + }], + } + ); + Ok(()) + } } diff --git a/crates/mongodb-agent-common/src/procedure/mod.rs b/crates/mongodb-agent-common/src/procedure/mod.rs index e700efa8..aa3079fc 100644 --- a/crates/mongodb-agent-common/src/procedure/mod.rs +++ b/crates/mongodb-agent-common/src/procedure/mod.rs @@ -44,9 +44,14 @@ impl<'a> Procedure<'a> { self, database: Database, ) -> Result<(bson::Document, Type), ProcedureError> { - let selection_criteria = self.selection_criteria.map(Cow::into_owned); let command = interpolate(self.arguments, &self.command)?; - let result = database.run_command(command, selection_criteria).await?; + let run_command = database.run_command(command); + let run_command = if let Some(selection_criteria) = self.selection_criteria { + run_command.selection_criteria(selection_criteria.into_owned()) + } else { + run_command + }; + let result = run_command.await?; Ok((result, self.result_type)) } diff --git a/crates/mongodb-agent-common/src/query/aggregates.rs b/crates/mongodb-agent-common/src/query/aggregates.rs new file mode 100644 index 00000000..86abf948 --- /dev/null +++ b/crates/mongodb-agent-common/src/query/aggregates.rs @@ -0,0 +1,406 @@ +use std::collections::BTreeMap; + +use indexmap::IndexMap; +use mongodb::bson::{bson, Bson}; +use mongodb_support::aggregate::{Accumulator, Pipeline, Selection, Stage}; +use ndc_models::FieldName; + +use crate::{aggregation_function::AggregationFunction, mongo_query_plan::Aggregate}; + +use super::column_ref::ColumnRef; + +pub fn pipeline_for_aggregates(aggregates: &IndexMap) -> Pipeline { + let group_stage = Stage::Group { + key_expression: Bson::Null, + accumulators: accumulators_for_aggregates(aggregates), + }; + let replace_with_stage = Stage::ReplaceWith(selection_for_aggregates(aggregates)); + Pipeline::new(vec![group_stage, replace_with_stage]) +} + +pub fn accumulators_for_aggregates( + aggregates: &IndexMap, +) -> BTreeMap { + aggregates + .into_iter() + .map(|(name, aggregate)| (name.to_string(), aggregate_to_accumulator(aggregate))) + .collect() +} + +fn aggregate_to_accumulator(aggregate: &Aggregate) -> Accumulator { + use Aggregate as A; + match aggregate { + A::ColumnCount { + column, + field_path, + distinct, + .. + } => { + let field_ref = ColumnRef::from_column_and_field_path(column, field_path.as_ref()) + .into_aggregate_expression() + .into_bson(); + if *distinct { + Accumulator::AddToSet(field_ref) + } else { + Accumulator::Sum(bson!({ + "$cond": { + "if": { "$eq": [field_ref, null] }, // count non-null, non-missing values + "then": 0, + "else": 1, + } + })) + } + } + A::SingleColumn { + column, + field_path, + function, + .. + } => { + use AggregationFunction as A; + + let field_ref = ColumnRef::from_column_and_field_path(column, field_path.as_ref()) + .into_aggregate_expression() + .into_bson(); + + match function { + A::Avg => Accumulator::Avg(field_ref), + A::Min => Accumulator::Min(field_ref), + A::Max => Accumulator::Max(field_ref), + A::Sum => Accumulator::Sum(field_ref), + } + } + A::StarCount => Accumulator::Sum(bson!(1)), + } +} + +fn selection_for_aggregates(aggregates: &IndexMap) -> Selection { + let selected_aggregates = aggregates + .iter() + .map(|(key, aggregate)| selection_for_aggregate(key, aggregate)) + .collect(); + Selection::new(selected_aggregates) +} + +pub fn selection_for_aggregate(key: &FieldName, aggregate: &Aggregate) -> (String, Bson) { + let column_ref = ColumnRef::from_field(key.as_ref()).into_aggregate_expression(); + + // Selecting distinct counts requires some post-processing since the $group stage produces + // an array of unique values. We need to count the non-null values in that array. + let value_expression = match aggregate { + Aggregate::ColumnCount { distinct, .. } if *distinct => bson!({ + "$reduce": { + "input": column_ref, + "initialValue": 0, + "in": { + "$cond": { + "if": { "$eq": ["$$this", null] }, + "then": "$$value", + "else": { "$sum": ["$$value", 1] }, + } + }, + } + }), + _ => column_ref.into(), + }; + + // Fill in null or zero values for missing fields. If we skip this we get errors on missing + // data down the line. + let value_expression = replace_missing_aggregate_value(value_expression, aggregate.is_count()); + + // Convert types to match what the engine expects for each aggregation result + let value_expression = convert_aggregate_result_type(value_expression, aggregate); + + (key.to_string(), value_expression) +} + +pub fn replace_missing_aggregate_value(expression: Bson, is_count: bool) -> Bson { + bson!({ + "$ifNull": [ + expression, + if is_count { bson!(0) } else { bson!(null) } + ] + }) +} + +/// The system expects specific return types for specific aggregates. That means we may need +/// to do a numeric type conversion here. The conversion applies to the aggregated result, +/// not to input values. +fn convert_aggregate_result_type(column_ref: impl Into, aggregate: &Aggregate) -> Bson { + let convert_to = match aggregate { + Aggregate::ColumnCount { .. } => None, + Aggregate::SingleColumn { + column_type, + function, + .. + } => function.expected_result_type(column_type), + Aggregate::StarCount => None, + }; + match convert_to { + // $convert implicitly fills `null` if input value is missing + Some(scalar_type) => bson!({ + "$convert": { + "input": column_ref, + "to": scalar_type.bson_name(), + } + }), + None => column_ref.into(), + } +} + +#[cfg(test)] +mod tests { + use configuration::Configuration; + use mongodb::bson::bson; + use ndc_test_helpers::{ + binop, collection, column_aggregate, column_count_aggregate, dimension_column, field, + group, grouping, named_type, object_type, query, query_request, row_set, target, value, + }; + use pretty_assertions::assert_eq; + use serde_json::json; + + use crate::{ + mongo_query_plan::MongoConfiguration, + mongodb::test_helpers::mock_collection_aggregate_response_for_pipeline, + query::execute_query_request::execute_query_request, test_helpers::mflix_config, + }; + + #[tokio::test] + async fn executes_aggregation() -> Result<(), anyhow::Error> { + let query_request = query_request() + .collection("students") + .query(query().aggregates([ + column_count_aggregate!("count" => "gpa", distinct: true), + ("avg", column_aggregate("gpa", "avg").into()), + ])) + .into(); + + let expected_response = row_set() + .aggregates([("count", json!(11)), ("avg", json!(3))]) + .into_response(); + + let expected_pipeline = bson!([ + { + "$group": { + "_id": null, + "avg": { "$avg": "$gpa" }, + "count": { "$addToSet": "$gpa" }, + }, + }, + { + "$replaceWith": { + "avg": { + "$convert": { + "to": "double", + "input": { "$ifNull": ["$avg", null] }, + } + }, + "count": { + "$ifNull": [ + { + "$reduce": { + "input": "$count", + "initialValue": 0, + "in": { + "$cond": { + "if": { "$eq": ["$$this", null] }, + "then": "$$value", + "else": { "$sum": ["$$value", 1] } + } + } + } + }, + 0 + ] + }, + }, + }, + ]); + + let db = mock_collection_aggregate_response_for_pipeline( + "students", + expected_pipeline, + bson!([{ + "count": 11, + "avg": 3, + }]), + ); + + let result = execute_query_request(db, &students_config(), query_request).await?; + assert_eq!(result, expected_response); + Ok(()) + } + + #[tokio::test] + async fn executes_aggregation_with_fields() -> Result<(), anyhow::Error> { + let query_request = query_request() + .collection("students") + .query( + query() + .aggregates([("avg", column_aggregate("gpa", "avg"))]) + .fields([field!("student_gpa" => "gpa")]) + .predicate(binop("_lt", target!("gpa"), value!(4.0))), + ) + .into(); + + let expected_response = row_set() + .aggregates([("avg", json!(3.1))]) + .row([("student_gpa", 3.1)]) + .into_response(); + + let expected_pipeline = bson!([ + { "$match": { "gpa": { "$lt": 4.0 } } }, + { + "$facet": { + "__AGGREGATES__": [ + { "$group": { "_id": null, "avg": { "$avg": "$gpa" } } }, + { + "$replaceWith": { + "avg": { + "$convert": { + "to": "double", + "input": { "$ifNull": ["$avg", null] }, + } + }, + }, + }, + ], + "__ROWS__": [{ + "$replaceWith": { + "student_gpa": { "$ifNull": ["$gpa", null] }, + }, + }], + }, + }, + { + "$replaceWith": { + "aggregates": { "$first": "$__AGGREGATES__" }, + "rows": "$__ROWS__", + }, + }, + ]); + + let db = mock_collection_aggregate_response_for_pipeline( + "students", + expected_pipeline, + bson!([{ + "aggregates": { + "avg": 3.1, + }, + "rows": [{ + "student_gpa": 3.1, + }], + }]), + ); + + let result = execute_query_request(db, &students_config(), query_request).await?; + assert_eq!(result, expected_response); + Ok(()) + } + + #[tokio::test] + async fn executes_query_with_groups_with_single_column_aggregates() -> Result<(), anyhow::Error> + { + let query_request = query_request() + .collection("movies") + .query( + query().groups( + grouping() + .dimensions([dimension_column("year")]) + .aggregates([ + ( + "average_viewer_rating", + column_aggregate("tomatoes.viewer.rating", "avg"), + ), + ("max.runtime", column_aggregate("runtime", "max")), + ]), + ), + ) + .into(); + + let expected_response = row_set() + .groups([ + group( + [2007], + [ + ("average_viewer_rating", json!(7.5)), + ("max.runtime", json!(207)), + ], + ), + group( + [2015], + [ + ("average_viewer_rating", json!(6.9)), + ("max.runtime", json!(412)), + ], + ), + ]) + .into_response(); + + let expected_pipeline = bson!([ + { + "$group": { + "_id": ["$year"], + "average_viewer_rating": { "$avg": "$tomatoes.viewer.rating" }, + "max.runtime": { "$max": "$runtime" }, + } + }, + { + "$replaceWith": { + "dimensions": "$_id", + "average_viewer_rating": { + "$convert": { + "to": "double", + "input": { "$ifNull": ["$average_viewer_rating", null] }, + } + }, + "max.runtime": { "$ifNull": [{ "$getField": { "$literal": "max.runtime" } }, null] }, + } + }, + ]); + + let db = mock_collection_aggregate_response_for_pipeline( + "movies", + expected_pipeline, + bson!([ + { + "dimensions": [2007], + "average_viewer_rating": 7.5, + "max.runtime": 207, + }, + { + "dimensions": [2015], + "average_viewer_rating": 6.9, + "max.runtime": 412, + }, + ]), + ); + + let result = execute_query_request(db, &mflix_config(), query_request).await?; + assert_eq!(result, expected_response); + Ok(()) + } + + // TODO: Test: + // - fields & group by + // - group by & aggregates + // - various counts on groups + // - groups and variables + // - groups and relationships + + fn students_config() -> MongoConfiguration { + MongoConfiguration(Configuration { + collections: [collection("students")].into(), + object_types: [( + "students".into(), + object_type([("gpa", named_type("Double"))]), + )] + .into(), + functions: Default::default(), + procedures: Default::default(), + native_mutations: Default::default(), + native_queries: Default::default(), + options: Default::default(), + }) + } +} diff --git a/crates/mongodb-agent-common/src/query/column_ref.rs b/crates/mongodb-agent-common/src/query/column_ref.rs index cd0bef69..1522e95f 100644 --- a/crates/mongodb-agent-common/src/query/column_ref.rs +++ b/crates/mongodb-agent-common/src/query/column_ref.rs @@ -1,9 +1,21 @@ +// Some of the methods here have been added to support future work - suppressing the dead code +// check prevents warnings in the meantime. +#![allow(dead_code)] + use std::{borrow::Cow, iter::once}; use mongodb::bson::{doc, Bson}; +use ndc_models::FieldName; use ndc_query_plan::Scope; +use nonempty::NonEmpty; + +use crate::{ + interface_types::MongoAgentError, + mongo_query_plan::{ComparisonTarget, OrderByTarget}, + mongodb::sanitize::is_name_safe, +}; -use crate::{mongo_query_plan::ComparisonTarget, mongodb::sanitize::is_name_safe}; +use super::make_selector::AggregationExpression; /// Reference to a document field, or a nested property of a document field. There are two contexts /// where we reference columns: @@ -23,52 +35,149 @@ use crate::{mongo_query_plan::ComparisonTarget, mongodb::sanitize::is_name_safe} /// caller to switch contexts in the second case. #[derive(Clone, Debug, PartialEq)] pub enum ColumnRef<'a> { + /// Reference that can be used as a key in a match document. For example, "$imdb.rating". MatchKey(Cow<'a, str>), + + /// Just like MatchKey, except that this form can reference variables. For example, + /// "$$this.title". Can only be used in aggregation expressions, is not used as a key. + ExpressionStringShorthand(Cow<'a, str>), + Expression(Bson), } impl<'a> ColumnRef<'a> { /// Given a column target returns a string that can be used in a MongoDB match query that /// references the corresponding field, either in the target collection of a query request, or - /// in the related collection. Resolves nested fields and root collection references, but does - /// not traverse relationships. + /// in the related collection. /// /// If the given target cannot be represented as a match query key, falls back to providing an /// aggregation expression referencing the column. pub fn from_comparison_target(column: &ComparisonTarget) -> ColumnRef<'_> { - from_target(column) + from_comparison_target(column) + } + + pub fn from_column_and_field_path<'b>( + name: &'b FieldName, + field_path: Option<&'b Vec>, + ) -> ColumnRef<'b> { + from_column_and_field_path(&[], name, field_path) + } + + pub fn from_relationship_path_column_and_field_path<'b>( + relationship_path: &'b [ndc_models::RelationshipName], + name: &'b FieldName, + field_path: Option<&'b Vec>, + ) -> ColumnRef<'b> { + from_column_and_field_path(relationship_path, name, field_path) + } + + /// TODO: This will hopefully become infallible once ENG-1011 & ENG-1010 are implemented. + pub fn from_order_by_target(target: &OrderByTarget) -> Result, MongoAgentError> { + from_order_by_target(target) + } + + pub fn from_field_path(field_path: NonEmpty<&ndc_models::FieldName>) -> ColumnRef<'_> { + from_path( + None, + field_path + .into_iter() + .map(|field_name| field_name.as_ref() as &str), + ) + .expect("field_path is not empty") // safety: NonEmpty cannot be empty + } + + pub fn from_field(field_name: &str) -> ColumnRef<'_> { + fold_path_element(None, field_name) + } + + pub fn from_relationship(relationship_name: &ndc_models::RelationshipName) -> ColumnRef<'_> { + fold_path_element(None, relationship_name.as_ref()) + } + + pub fn from_unrelated_collection(collection_name: &str) -> ColumnRef<'_> { + fold_path_element(Some(ColumnRef::variable("ROOT")), collection_name) + } + + /// Get a reference to a pipeline variable + pub fn variable(variable_name: impl std::fmt::Display) -> Self { + Self::ExpressionStringShorthand(format!("$${variable_name}").into()) + } + + pub fn into_nested_field<'b: 'a>(self, field_name: &'b str) -> ColumnRef<'b> { + fold_path_element(Some(self), field_name) + } + + pub fn into_aggregate_expression(self) -> AggregationExpression { + let bson = match self { + ColumnRef::MatchKey(key) => format!("${key}").into(), + ColumnRef::ExpressionStringShorthand(key) => key.to_string().into(), + ColumnRef::Expression(expr) => expr, + }; + AggregationExpression(bson) + } + + pub fn into_match_key(self) -> Option> { + match self { + ColumnRef::MatchKey(key) => Some(key), + _ => None, + } } } -fn from_target(column: &ComparisonTarget) -> ColumnRef<'_> { +fn from_comparison_target(column: &ComparisonTarget) -> ColumnRef<'_> { match column { ComparisonTarget::Column { name, field_path, .. - } => { - let name_and_path = once(name).chain(field_path.iter().flatten()); - // The None case won't come up if the input to [from_target_helper] has at least - // one element, and we know it does because we start the iterable with `name` - from_path(None, name_and_path).unwrap() - } - ComparisonTarget::ColumnInScope { + } => from_column_and_field_path(&[], name, field_path.as_ref()), + } +} + +fn from_column_and_field_path<'a>( + relationship_path: &'a [ndc_models::RelationshipName], + name: &'a FieldName, + field_path: Option<&'a Vec>, +) -> ColumnRef<'a> { + let name_and_path = relationship_path + .iter() + .map(|r| r.as_ref() as &str) + .chain(once(name.as_ref() as &str)) + .chain( + field_path + .iter() + .copied() + .flatten() + .map(|field_name| field_name.as_ref() as &str), + ); + // The None case won't come up if the input to [from_target_helper] has at least + // one element, and we know it does because we start the iterable with `name` + from_path(None, name_and_path).unwrap() +} + +fn from_order_by_target(target: &OrderByTarget) -> Result, MongoAgentError> { + match target { + OrderByTarget::Column { + path, name, field_path, - scope, .. } => { - // "$$ROOT" is not actually a valid match key, but cheating here makes the - // implementation much simpler. This match branch produces a ColumnRef::Expression - // in all cases. - let init = ColumnRef::MatchKey(format!("${}", name_from_scope(scope)).into()); + let name_and_path = path + .iter() + .map(|n| n.as_str()) + .chain([name.as_str()]) + .chain( + field_path + .iter() + .flatten() + .map(|field_name| field_name.as_str()), + ); // The None case won't come up if the input to [from_target_helper] has at least // one element, and we know it does because we start the iterable with `name` - let col_ref = - from_path(Some(init), once(name).chain(field_path.iter().flatten())).unwrap(); - match col_ref { - // move from MatchKey to Expression because "$$ROOT" is not valid in a match key - ColumnRef::MatchKey(key) => ColumnRef::Expression(format!("${key}").into()), - e @ ColumnRef::Expression(_) => e, - } + Ok(from_path(None, name_and_path).unwrap()) + } + OrderByTarget::Aggregate { .. } => { + // TODO: ENG-1011 + Err(MongoAgentError::NotImplemented("order by aggregate".into())) } } } @@ -82,10 +191,10 @@ pub fn name_from_scope(scope: &Scope) -> Cow<'_, str> { fn from_path<'a>( init: Option>, - path: impl IntoIterator, + path: impl IntoIterator, ) -> Option> { path.into_iter().fold(init, |accum, element| { - Some(fold_path_element(accum, element.as_ref())) + Some(fold_path_element(accum, element)) }) } @@ -97,28 +206,13 @@ fn fold_path_element<'a>( (Some(ColumnRef::MatchKey(parent)), true) => { ColumnRef::MatchKey(format!("{parent}.{path_element}").into()) } - (Some(ColumnRef::MatchKey(parent)), false) => ColumnRef::Expression( - doc! { - "$getField": { - "input": format!("${parent}"), - "field": { "$literal": path_element }, - } - } - .into(), - ), - (Some(ColumnRef::Expression(parent)), true) => ColumnRef::Expression( - doc! { - "$getField": { - "input": parent, - "field": path_element, - } - } - .into(), - ), - (Some(ColumnRef::Expression(parent)), false) => ColumnRef::Expression( + (Some(ColumnRef::ExpressionStringShorthand(parent)), true) => { + ColumnRef::ExpressionStringShorthand(format!("{parent}.{path_element}").into()) + } + (Some(parent), _) => ColumnRef::Expression( doc! { "$getField": { - "input": parent, + "input": parent.into_aggregate_expression(), "field": { "$literal": path_element }, } } @@ -140,10 +234,9 @@ fn fold_path_element<'a>( /// Unlike `column_ref` this expression cannot be used as a match query key - it can only be used /// as an expression. pub fn column_expression(column: &ComparisonTarget) -> Bson { - match ColumnRef::from_comparison_target(column) { - ColumnRef::MatchKey(key) => format!("${key}").into(), - ColumnRef::Expression(expr) => expr, - } + ColumnRef::from_comparison_target(column) + .into_aggregate_expression() + .into_bson() } #[cfg(test)] @@ -151,7 +244,6 @@ mod tests { use configuration::MongoScalarType; use mongodb::bson::doc; use mongodb_support::BsonScalarType; - use ndc_query_plan::Scope; use pretty_assertions::assert_eq; use crate::mongo_query_plan::{ComparisonTarget, Type}; @@ -162,9 +254,9 @@ mod tests { fn produces_match_query_key() -> anyhow::Result<()> { let target = ComparisonTarget::Column { name: "imdb".into(), + arguments: Default::default(), field_path: Some(vec!["rating".into()]), field_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::Double)), - path: Default::default(), }; let actual = ColumnRef::from_comparison_target(&target); let expected = ColumnRef::MatchKey("imdb.rating".into()); @@ -176,9 +268,9 @@ mod tests { fn escapes_nested_field_name_with_dots() -> anyhow::Result<()> { let target = ComparisonTarget::Column { name: "subtitles".into(), + arguments: Default::default(), field_path: Some(vec!["english.us".into()]), field_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), - path: Default::default(), }; let actual = ColumnRef::from_comparison_target(&target); let expected = ColumnRef::Expression( @@ -198,16 +290,16 @@ mod tests { fn escapes_top_level_field_name_with_dots() -> anyhow::Result<()> { let target = ComparisonTarget::Column { name: "meta.subtitles".into(), + arguments: Default::default(), field_path: Some(vec!["english_us".into()]), field_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), - path: Default::default(), }; let actual = ColumnRef::from_comparison_target(&target); let expected = ColumnRef::Expression( doc! { "$getField": { "input": { "$getField": { "$literal": "meta.subtitles" } }, - "field": "english_us", + "field": { "$literal": "english_us" }, } } .into(), @@ -220,9 +312,9 @@ mod tests { fn escapes_multiple_unsafe_nested_field_names() -> anyhow::Result<()> { let target = ComparisonTarget::Column { name: "meta".into(), + arguments: Default::default(), field_path: Some(vec!["$unsafe".into(), "$also_unsafe".into()]), field_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), - path: Default::default(), }; let actual = ColumnRef::from_comparison_target(&target); let expected = ColumnRef::Expression( @@ -247,9 +339,9 @@ mod tests { fn traverses_multiple_field_names_before_escaping() -> anyhow::Result<()> { let target = ComparisonTarget::Column { name: "valid_key".into(), + arguments: Default::default(), field_path: Some(vec!["also_valid".into(), "$not_valid".into()]), field_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), - path: Default::default(), }; let actual = ColumnRef::from_comparison_target(&target); let expected = ColumnRef::Expression( @@ -265,116 +357,121 @@ mod tests { Ok(()) } - #[test] - fn produces_dot_separated_root_column_reference() -> anyhow::Result<()> { - let target = ComparisonTarget::ColumnInScope { - name: "field".into(), - field_path: Some(vec!["prop1".into(), "prop2".into()]), - field_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), - scope: Scope::Root, - }; - let actual = ColumnRef::from_comparison_target(&target); - let expected = ColumnRef::Expression("$$scope_root.field.prop1.prop2".into()); - assert_eq!(actual, expected); - Ok(()) - } + // TODO: ENG-1487 `ComparisonTarget::ColumnInScope` is gone, but there is new, similar + // functionality in the form of named scopes. It will be useful to modify these tests when + // named scopes are supported in this connector. - #[test] - fn escapes_unsafe_field_name_in_root_column_reference() -> anyhow::Result<()> { - let target = ComparisonTarget::ColumnInScope { - name: "$field".into(), - field_path: Default::default(), - field_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), - scope: Scope::Named("scope_0".into()), - }; - let actual = ColumnRef::from_comparison_target(&target); - let expected = ColumnRef::Expression( - doc! { - "$getField": { - "input": "$$scope_0", - "field": { "$literal": "$field" }, - } - } - .into(), - ); - assert_eq!(actual, expected); - Ok(()) - } + // #[test] + // fn produces_dot_separated_root_column_reference() -> anyhow::Result<()> { + // let target = ComparisonTarget::ColumnInScope { + // name: "field".into(), + // field_path: Some(vec!["prop1".into(), "prop2".into()]), + // field_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), + // scope: Scope::Root, + // }; + // let actual = ColumnRef::from_comparison_target(&target); + // let expected = + // ColumnRef::ExpressionStringShorthand("$$scope_root.field.prop1.prop2".into()); + // assert_eq!(actual, expected); + // Ok(()) + // } - #[test] - fn escapes_unsafe_nested_property_name_in_root_column_reference() -> anyhow::Result<()> { - let target = ComparisonTarget::ColumnInScope { - name: "field".into(), - field_path: Some(vec!["$unsafe_name".into()]), - field_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), - scope: Scope::Root, - }; - let actual = ColumnRef::from_comparison_target(&target); - let expected = ColumnRef::Expression( - doc! { - "$getField": { - "input": "$$scope_root.field", - "field": { "$literal": "$unsafe_name" }, - } - } - .into(), - ); - assert_eq!(actual, expected); - Ok(()) - } + // #[test] + // fn escapes_unsafe_field_name_in_root_column_reference() -> anyhow::Result<()> { + // let target = ComparisonTarget::ColumnInScope { + // name: "$field".into(), + // field_path: Default::default(), + // field_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), + // scope: Scope::Named("scope_0".into()), + // }; + // let actual = ColumnRef::from_comparison_target(&target); + // let expected = ColumnRef::Expression( + // doc! { + // "$getField": { + // "input": "$$scope_0", + // "field": { "$literal": "$field" }, + // } + // } + // .into(), + // ); + // assert_eq!(actual, expected); + // Ok(()) + // } - #[test] - fn escapes_multiple_layers_of_nested_property_names_in_root_column_reference( - ) -> anyhow::Result<()> { - let target = ComparisonTarget::ColumnInScope { - name: "$field".into(), - field_path: Some(vec!["$unsafe_name1".into(), "$unsafe_name2".into()]), - field_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), - scope: Scope::Root, - }; - let actual = ColumnRef::from_comparison_target(&target); - let expected = ColumnRef::Expression( - doc! { - "$getField": { - "input": { - "$getField": { - "input": { - "$getField": { - "input": "$$scope_root", - "field": { "$literal": "$field" }, - } - }, - "field": { "$literal": "$unsafe_name1" }, - } - }, - "field": { "$literal": "$unsafe_name2" }, - } - } - .into(), - ); - assert_eq!(actual, expected); - Ok(()) - } + // #[test] + // fn escapes_unsafe_nested_property_name_in_root_column_reference() -> anyhow::Result<()> { + // let target = ComparisonTarget::ColumnInScope { + // name: "field".into(), + // field_path: Some(vec!["$unsafe_name".into()]), + // field_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), + // scope: Scope::Root, + // }; + // let actual = ColumnRef::from_comparison_target(&target); + // let expected = ColumnRef::Expression( + // doc! { + // "$getField": { + // "input": "$$scope_root.field", + // "field": { "$literal": "$unsafe_name" }, + // } + // } + // .into(), + // ); + // assert_eq!(actual, expected); + // Ok(()) + // } - #[test] - fn escapes_unsafe_deeply_nested_property_name_in_root_column_reference() -> anyhow::Result<()> { - let target = ComparisonTarget::ColumnInScope { - name: "field".into(), - field_path: Some(vec!["prop1".into(), "$unsafe_name".into()]), - field_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), - scope: Scope::Root, - }; - let actual = ColumnRef::from_comparison_target(&target); - let expected = ColumnRef::Expression( - doc! { - "$getField": { - "input": "$$scope_root.field.prop1", - "field": { "$literal": "$unsafe_name" }, - } - } - .into(), - ); - assert_eq!(actual, expected); - Ok(()) - } + // #[test] + // fn escapes_multiple_layers_of_nested_property_names_in_root_column_reference( + // ) -> anyhow::Result<()> { + // let target = ComparisonTarget::ColumnInScope { + // name: "$field".into(), + // field_path: Some(vec!["$unsafe_name1".into(), "$unsafe_name2".into()]), + // field_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), + // scope: Scope::Root, + // }; + // let actual = ColumnRef::from_comparison_target(&target); + // let expected = ColumnRef::Expression( + // doc! { + // "$getField": { + // "input": { + // "$getField": { + // "input": { + // "$getField": { + // "input": "$$scope_root", + // "field": { "$literal": "$field" }, + // } + // }, + // "field": { "$literal": "$unsafe_name1" }, + // } + // }, + // "field": { "$literal": "$unsafe_name2" }, + // } + // } + // .into(), + // ); + // assert_eq!(actual, expected); + // Ok(()) + // } + + // #[test] + // fn escapes_unsafe_deeply_nested_property_name_in_root_column_reference() -> anyhow::Result<()> { + // let target = ComparisonTarget::ColumnInScope { + // name: "field".into(), + // field_path: Some(vec!["prop1".into(), "$unsafe_name".into()]), + // field_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), + // scope: Scope::Root, + // }; + // let actual = ColumnRef::from_comparison_target(&target); + // let expected = ColumnRef::Expression( + // doc! { + // "$getField": { + // "input": "$$scope_root.field.prop1", + // "field": { "$literal": "$unsafe_name" }, + // } + // } + // .into(), + // ); + // assert_eq!(actual, expected); + // Ok(()) + // } } diff --git a/crates/mongodb-agent-common/src/query/constants.rs b/crates/mongodb-agent-common/src/query/constants.rs deleted file mode 100644 index a8569fc0..00000000 --- a/crates/mongodb-agent-common/src/query/constants.rs +++ /dev/null @@ -1,3 +0,0 @@ -// TODO: check for collision with aggregation field names -pub const ROWS_FIELD: &str = "__ROWS__"; -pub const RESULT_FIELD: &str = "result"; diff --git a/crates/mongodb-agent-common/src/query/execute_query_request.rs b/crates/mongodb-agent-common/src/query/execute_query_request.rs index bf107318..1a3a961f 100644 --- a/crates/mongodb-agent-common/src/query/execute_query_request.rs +++ b/crates/mongodb-agent-common/src/query/execute_query_request.rs @@ -1,6 +1,7 @@ use futures::Stream; use futures_util::TryStreamExt as _; use mongodb::bson; +use mongodb_support::aggregate::Pipeline; use ndc_models::{QueryRequest, QueryResponse}; use ndc_query_plan::plan_for_query_request; use tracing::{instrument, Instrument}; @@ -9,7 +10,7 @@ use super::{pipeline::pipeline_for_query_request, response::serialize_query_resp use crate::{ interface_types::MongoAgentError, mongo_query_plan::{MongoConfiguration, QueryPlan}, - mongodb::{CollectionTrait as _, DatabaseTrait, Pipeline}, + mongodb::{CollectionTrait as _, DatabaseTrait}, query::QueryTarget, }; @@ -24,10 +25,16 @@ pub async fn execute_query_request( config: &MongoConfiguration, query_request: QueryRequest, ) -> Result { + tracing::debug!( + query_request = %serde_json::to_string(&query_request).unwrap(), + "query request" + ); let query_plan = preprocess_query_request(config, query_request)?; + tracing::debug!(?query_plan, "abstract query plan"); let pipeline = pipeline_for_query_request(config, &query_plan)?; let documents = execute_query_pipeline(database, config, &query_plan, pipeline).await?; - let response = serialize_query_response(config.extended_json_mode(), &query_plan, documents)?; + let response = + serialize_query_response(config.serialization_options(), &query_plan, documents)?; Ok(response) } diff --git a/crates/mongodb-agent-common/src/query/foreach.rs b/crates/mongodb-agent-common/src/query/foreach.rs index ce783864..e62fc5bb 100644 --- a/crates/mongodb-agent-common/src/query/foreach.rs +++ b/crates/mongodb-agent-common/src/query/foreach.rs @@ -1,19 +1,18 @@ use anyhow::anyhow; use itertools::Itertools as _; -use mongodb::bson::{self, doc, Bson}; +use mongodb::bson::{self, bson, doc, Bson}; +use mongodb_support::aggregate::{Pipeline, Selection, Stage}; use ndc_query_plan::VariableSet; +use super::is_response_faceted::ResponseFacets; use super::pipeline::pipeline_for_non_foreach; use super::query_level::QueryLevel; use super::query_variable_name::query_variable_name; use super::serialization::json_to_bson; use super::QueryTarget; +use crate::constants::{ROW_SET_AGGREGATES_KEY, ROW_SET_GROUPS_KEY, ROW_SET_ROWS_KEY}; +use crate::interface_types::MongoAgentError; use crate::mongo_query_plan::{MongoConfiguration, QueryPlan, Type, VariableTypes}; -use crate::mongodb::Selection; -use crate::{ - interface_types::MongoAgentError, - mongodb::{Pipeline, Stage}, -}; type Result = std::result::Result; @@ -48,21 +47,42 @@ pub fn pipeline_for_foreach( r#as: "query".to_string(), }; - let selection = if query_request.query.has_aggregates() && query_request.query.has_fields() { - doc! { - "aggregates": { "$getField": { "input": { "$first": "$query" }, "field": "aggregates" } }, - "rows": { "$getField": { "input": { "$first": "$query" }, "field": "rows" } }, + let selection = match ResponseFacets::from_query(&query_request.query) { + ResponseFacets::Combination { + aggregates, + fields, + groups, + } => { + let mut keys = vec![]; + if aggregates.is_some() { + keys.push(ROW_SET_AGGREGATES_KEY); + } + if fields.is_some() { + keys.push(ROW_SET_ROWS_KEY); + } + if groups.is_some() { + keys.push(ROW_SET_GROUPS_KEY) + } + keys.into_iter() + .map(|key| { + ( + key.to_string(), + bson!({ "$getField": { "input": { "$first": "$query" }, "field": key } }), + ) + }) + .collect() + } + ResponseFacets::AggregatesOnly(_) => { + doc! { ROW_SET_AGGREGATES_KEY: { "$first": "$query" } } } - } else if query_request.query.has_aggregates() { - doc! { - "aggregates": { "$getField": { "input": { "$first": "$query" }, "field": "aggregates" } }, + ResponseFacets::FieldsOnly(_) => { + doc! { ROW_SET_ROWS_KEY: "$query" } } - } else { - doc! { - "rows": "$query" + ResponseFacets::GroupsOnly(_) => { + doc! { ROW_SET_GROUPS_KEY: "$query" } } }; - let selection_stage = Stage::ReplaceWith(Selection(selection)); + let selection_stage = Stage::ReplaceWith(Selection::new(selection)); Ok(Pipeline { stages: vec![variable_sets_stage, lookup_stage, selection_stage], @@ -227,28 +247,30 @@ mod tests { "pipeline": [ { "$match": { "$expr": { "$eq": ["$artistId", "$$artistId_int"] } }}, { "$facet": { + "__AGGREGATES__": [ + { + "$group": { + "_id": null, + "count": { "$sum": 1 }, + } + }, + { + "$replaceWith": { + "count": { "$ifNull": ["$count", 0] }, + } + }, + ], "__ROWS__": [{ "$replaceWith": { "albumId": { "$ifNull": ["$albumId", null] }, "title": { "$ifNull": ["$title", null] } }}], - "count": [{ "$count": "result" }], - } }, - { "$replaceWith": { - "aggregates": { - "count": { - "$ifNull": [ - { - "$getField": { - "field": "result", - "input": { "$first": { "$getField": { "$literal": "count" } } } - } - }, - 0, - ] - }, - }, - "rows": "$__ROWS__", } }, + { + "$replaceWith": { + "aggregates": { "$first": "$__AGGREGATES__" }, + "rows": "$__ROWS__", + } + }, ] } }, @@ -333,30 +355,23 @@ mod tests { "as": "query", "pipeline": [ { "$match": { "$expr": { "$eq": ["$artistId", "$$artistId_int"] } }}, - { "$facet": { - "count": [{ "$count": "result" }], - } }, - { "$replaceWith": { - "aggregates": { - "count": { - "$ifNull": [ - { - "$getField": { - "field": "result", - "input": { "$first": { "$getField": { "$literal": "count" } } } - } - }, - 0, - ] - }, - }, - } }, + { + "$group": { + "_id": null, + "count": { "$sum": 1 } + } + }, + { + "$replaceWith": { + "count": { "$ifNull": ["$count", 0] }, + } + }, ] } }, { "$replaceWith": { - "aggregates": { "$getField": { "input": { "$first": "$query" }, "field": "aggregates" } }, + "aggregates": { "$first": "$query" }, } }, ]); diff --git a/crates/mongodb-agent-common/src/query/groups.rs b/crates/mongodb-agent-common/src/query/groups.rs new file mode 100644 index 00000000..85017dd7 --- /dev/null +++ b/crates/mongodb-agent-common/src/query/groups.rs @@ -0,0 +1,113 @@ +use std::borrow::Cow; + +use mongodb::bson::{self, bson}; +use mongodb_support::aggregate::{Pipeline, Selection, SortDocument, Stage}; +use ndc_models::OrderDirection; + +use crate::{ + constants::GROUP_DIMENSIONS_KEY, + interface_types::MongoAgentError, + mongo_query_plan::{Dimension, GroupOrderBy, GroupOrderByTarget, Grouping}, +}; + +use super::{ + aggregates::{accumulators_for_aggregates, selection_for_aggregate}, + column_ref::ColumnRef, +}; + +type Result = std::result::Result; + +// TODO: This function can be infallible once ENG-1562 is implemented. +pub fn pipeline_for_groups(grouping: &Grouping) -> Result { + let group_stage = Stage::Group { + key_expression: dimensions_to_expression(&grouping.dimensions).into(), + accumulators: accumulators_for_aggregates(&grouping.aggregates), + }; + + // TODO: ENG-1562 This implementation does not fully implement the + // 'query.aggregates.group_by.order' capability! This only orders by dimensions. Before + // enabling the capability we also need to be able to order by aggregates. We need partial + // support for order by to get consistent integration test snapshots. + let sort_groups_stage = grouping + .order_by + .as_ref() + .map(sort_stage_for_grouping) + .transpose()?; + + // TODO: ENG-1563 to implement 'query.aggregates.group_by.paginate' apply grouping.limit and + // grouping.offset **after** group stage because those options count groups, not documents + + let replace_with_stage = Stage::ReplaceWith(selection_for_grouping(grouping, "_id")); + + Ok(Pipeline::new( + [ + Some(group_stage), + sort_groups_stage, + Some(replace_with_stage), + ] + .into_iter() + .flatten() + .collect(), + )) +} + +/// Converts each dimension to a MongoDB aggregate expression that evaluates to the appropriate +/// value when applied to each input document. The array of expressions can be used directly as the +/// group stage key expression. +fn dimensions_to_expression(dimensions: &[Dimension]) -> bson::Array { + dimensions + .iter() + .map(|dimension| { + let column_ref = match dimension { + Dimension::Column { + path, + column_name, + field_path, + .. + } => ColumnRef::from_relationship_path_column_and_field_path( + path, + column_name, + field_path.as_ref(), + ), + }; + column_ref.into_aggregate_expression().into_bson() + }) + .collect() +} + +fn selection_for_grouping(grouping: &Grouping, dimensions_field_name: &str) -> Selection { + let dimensions = ( + GROUP_DIMENSIONS_KEY.to_string(), + bson!(format!("${dimensions_field_name}")), + ); + let selected_aggregates = grouping + .aggregates + .iter() + .map(|(key, aggregate)| selection_for_aggregate(key, aggregate)); + let selection_doc = std::iter::once(dimensions) + .chain(selected_aggregates) + .collect(); + Selection::new(selection_doc) +} + +// TODO: ENG-1562 This is where we need to implement sorting by aggregates +fn sort_stage_for_grouping(order_by: &GroupOrderBy) -> Result { + let sort_doc = order_by + .elements + .iter() + .map(|element| match element.target { + GroupOrderByTarget::Dimension { index } => { + let key = format!("_id.{index}"); + let direction = match element.order_direction { + OrderDirection::Asc => bson!(1), + OrderDirection::Desc => bson!(-1), + }; + Ok((key, direction)) + } + GroupOrderByTarget::Aggregate { .. } => Err(MongoAgentError::NotImplemented( + Cow::Borrowed("sorting groups by aggregate"), + )), + }) + .collect::>()?; + Ok(Stage::Sort(SortDocument::from_doc(sort_doc))) +} diff --git a/crates/mongodb-agent-common/src/query/is_response_faceted.rs b/crates/mongodb-agent-common/src/query/is_response_faceted.rs new file mode 100644 index 00000000..f53b23d0 --- /dev/null +++ b/crates/mongodb-agent-common/src/query/is_response_faceted.rs @@ -0,0 +1,97 @@ +//! Centralized logic for query response packing. + +use indexmap::IndexMap; +use lazy_static::lazy_static; +use ndc_models::FieldName; + +use crate::mongo_query_plan::{Aggregate, Field, Grouping, Query}; + +lazy_static! { + static ref DEFAULT_FIELDS: IndexMap = IndexMap::new(); +} + +/// In some queries we may need to "fork" the query to provide data that requires incompatible +/// pipelines. For example queries that combine two or more of row, group, and aggregates, or +/// queries that use multiple aggregates that use different buckets. In these cases we use the +/// `$facet` aggregation stage which runs multiple sub-pipelines, and stores the results of +/// each in fields of the output pipeline document with array values. +/// +/// In other queries we don't need to fork - instead of providing data in a nested array the stream +/// of pipeline output documents is itself the requested data. +/// +/// Depending on whether or not a pipeline needs to use `$facet` to fork response processing needs +/// to be done differently. +pub enum ResponseFacets<'a> { + /// When matching on the Combination variant assume that requested data has already been checked to make sure that maps are not empty. + Combination { + aggregates: Option<&'a IndexMap>, + fields: Option<&'a IndexMap>, + groups: Option<&'a Grouping>, + }, + AggregatesOnly(&'a IndexMap), + FieldsOnly(&'a IndexMap), + GroupsOnly(&'a Grouping), +} + +impl ResponseFacets<'_> { + pub fn from_parameters<'a>( + aggregates: Option<&'a IndexMap>, + fields: Option<&'a IndexMap>, + groups: Option<&'a Grouping>, + ) -> ResponseFacets<'a> { + let facet_score = [ + get_aggregates(aggregates).map(|_| ()), + get_fields(fields).map(|_| ()), + get_groups(groups).map(|_| ()), + ] + .into_iter() + .flatten() + .count(); + + if facet_score > 1 { + ResponseFacets::Combination { + aggregates: get_aggregates(aggregates), + fields: get_fields(fields), + groups: get_groups(groups), + } + } else if let Some(aggregates) = aggregates { + ResponseFacets::AggregatesOnly(aggregates) + } else if let Some(grouping) = groups { + ResponseFacets::GroupsOnly(grouping) + } else { + ResponseFacets::FieldsOnly(fields.unwrap_or(&DEFAULT_FIELDS)) + } + } + + pub fn from_query(query: &Query) -> ResponseFacets<'_> { + Self::from_parameters( + query.aggregates.as_ref(), + query.fields.as_ref(), + query.groups.as_ref(), + ) + } +} + +fn get_aggregates( + aggregates: Option<&IndexMap>, +) -> Option<&IndexMap> { + if let Some(aggregates) = aggregates { + if !aggregates.is_empty() { + return Some(aggregates); + } + } + None +} + +fn get_fields(fields: Option<&IndexMap>) -> Option<&IndexMap> { + if let Some(fields) = fields { + if !fields.is_empty() { + return Some(fields); + } + } + None +} + +fn get_groups(groups: Option<&Grouping>) -> Option<&Grouping> { + groups +} diff --git a/crates/mongodb-agent-common/src/query/make_selector.rs b/crates/mongodb-agent-common/src/query/make_selector.rs deleted file mode 100644 index f7ddb7da..00000000 --- a/crates/mongodb-agent-common/src/query/make_selector.rs +++ /dev/null @@ -1,389 +0,0 @@ -use anyhow::anyhow; -use mongodb::bson::{self, doc, Document}; -use ndc_models::UnaryComparisonOperator; - -use crate::{ - comparison_function::ComparisonFunction, - interface_types::MongoAgentError, - mongo_query_plan::{ComparisonTarget, ComparisonValue, ExistsInCollection, Expression, Type}, - query::column_ref::{column_expression, ColumnRef}, -}; - -use super::{query_variable_name::query_variable_name, serialization::json_to_bson}; - -pub type Result = std::result::Result; - -/// Convert a JSON Value into BSON using the provided type information. -/// For example, parses values of type "Date" into BSON DateTime. -fn bson_from_scalar_value(value: &serde_json::Value, value_type: &Type) -> Result { - json_to_bson(value_type, value.clone()).map_err(|e| MongoAgentError::BadQuery(anyhow!(e))) -} - -pub fn make_selector(expr: &Expression) -> Result { - match expr { - Expression::And { expressions } => { - let sub_exps: Vec = expressions - .clone() - .iter() - .map(make_selector) - .collect::>()?; - Ok(doc! {"$and": sub_exps}) - } - Expression::Or { expressions } => { - let sub_exps: Vec = expressions - .clone() - .iter() - .map(make_selector) - .collect::>()?; - Ok(doc! {"$or": sub_exps}) - } - Expression::Not { expression } => Ok(doc! { "$nor": [make_selector(expression)?]}), - Expression::Exists { - in_collection, - predicate, - } => Ok(match in_collection { - ExistsInCollection::Related { relationship } => match predicate { - Some(predicate) => doc! { - relationship.to_string(): { "$elemMatch": make_selector(predicate)? } - }, - None => doc! { format!("{relationship}.0"): { "$exists": true } }, - }, - ExistsInCollection::Unrelated { - unrelated_collection, - } => doc! { - "$expr": { - "$ne": [format!("$$ROOT.{unrelated_collection}.0"), null] - } - }, - }), - Expression::BinaryComparisonOperator { - column, - operator, - value, - } => make_binary_comparison_selector(column, operator, value), - Expression::UnaryComparisonOperator { column, operator } => match operator { - UnaryComparisonOperator::IsNull => { - let match_doc = match ColumnRef::from_comparison_target(column) { - ColumnRef::MatchKey(key) => doc! { - key: { "$eq": null } - }, - ColumnRef::Expression(expr) => doc! { - "$expr": { - "$eq": [expr, null] - } - }, - }; - Ok(traverse_relationship_path( - column.relationship_path(), - match_doc, - )) - } - }, - } -} - -fn make_binary_comparison_selector( - target_column: &ComparisonTarget, - operator: &ComparisonFunction, - value: &ComparisonValue, -) -> Result { - let selector = match value { - ComparisonValue::Column { - column: value_column, - } => { - if !target_column.relationship_path().is_empty() - || !value_column.relationship_path().is_empty() - { - return Err(MongoAgentError::NotImplemented( - "binary comparisons between two fields where either field is in a related collection", - )); - } - doc! { - "$expr": operator.mongodb_aggregation_expression( - column_expression(target_column), - column_expression(value_column) - ) - } - } - ComparisonValue::Scalar { value, value_type } => { - let comparison_value = bson_from_scalar_value(value, value_type)?; - let match_doc = match ColumnRef::from_comparison_target(target_column) { - ColumnRef::MatchKey(key) => operator.mongodb_match_query(key, comparison_value), - ColumnRef::Expression(expr) => doc! { - "$expr": operator.mongodb_aggregation_expression(expr, comparison_value) - }, - }; - traverse_relationship_path(target_column.relationship_path(), match_doc) - } - ComparisonValue::Variable { - name, - variable_type, - } => { - let comparison_value = variable_to_mongo_expression(name, variable_type); - let match_doc = doc! { - "$expr": operator.mongodb_aggregation_expression( - column_expression(target_column), - comparison_value - ) - }; - traverse_relationship_path(target_column.relationship_path(), match_doc) - } - }; - Ok(selector) -} - -/// For simple cases the target of an expression is a field reference. But if the target is -/// a column of a related collection then we're implicitly making an array comparison (because -/// related documents always come as an array, even for object relationships), so we have to wrap -/// the starting expression with an `$elemMatch` for each relationship that is traversed to reach -/// the target column. -fn traverse_relationship_path( - path: &[ndc_models::RelationshipName], - mut expression: Document, -) -> Document { - for path_element in path.iter().rev() { - expression = doc! { - path_element.to_string(): { - "$elemMatch": expression - } - } - } - expression -} - -fn variable_to_mongo_expression( - variable: &ndc_models::VariableName, - value_type: &Type, -) -> bson::Bson { - let mongodb_var_name = query_variable_name(variable, value_type); - format!("$${mongodb_var_name}").into() -} - -#[cfg(test)] -mod tests { - use configuration::MongoScalarType; - use mongodb::bson::{self, bson, doc}; - use mongodb_support::BsonScalarType; - use ndc_models::UnaryComparisonOperator; - use ndc_query_plan::{plan_for_query_request, Scope}; - use ndc_test_helpers::{ - binop, column_value, path_element, query, query_request, relation_field, root, target, - value, - }; - use pretty_assertions::assert_eq; - - use crate::{ - comparison_function::ComparisonFunction, - mongo_query_plan::{ComparisonTarget, ComparisonValue, Expression, Type}, - query::pipeline_for_query_request, - test_helpers::{chinook_config, chinook_relationships}, - }; - - use super::make_selector; - - #[test] - fn compares_fields_of_related_documents_using_elem_match_in_binary_comparison( - ) -> anyhow::Result<()> { - let selector = make_selector(&Expression::BinaryComparisonOperator { - column: ComparisonTarget::Column { - name: "Name".into(), - field_path: None, - field_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), - path: vec!["Albums".into(), "Tracks".into()], - }, - operator: ComparisonFunction::Equal, - value: ComparisonValue::Scalar { - value: "Helter Skelter".into(), - value_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), - }, - })?; - - let expected = doc! { - "Albums": { - "$elemMatch": { - "Tracks": { - "$elemMatch": { - "Name": { "$eq": "Helter Skelter" } - } - } - } - } - }; - - assert_eq!(selector, expected); - Ok(()) - } - - #[test] - fn compares_fields_of_related_documents_using_elem_match_in_unary_comparison( - ) -> anyhow::Result<()> { - let selector = make_selector(&Expression::UnaryComparisonOperator { - column: ComparisonTarget::Column { - name: "Name".into(), - field_path: None, - field_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), - path: vec!["Albums".into(), "Tracks".into()], - }, - operator: UnaryComparisonOperator::IsNull, - })?; - - let expected = doc! { - "Albums": { - "$elemMatch": { - "Tracks": { - "$elemMatch": { - "Name": { "$eq": null } - } - } - } - } - }; - - assert_eq!(selector, expected); - Ok(()) - } - - #[test] - fn compares_two_columns() -> anyhow::Result<()> { - let selector = make_selector(&Expression::BinaryComparisonOperator { - column: ComparisonTarget::Column { - name: "Name".into(), - field_path: None, - field_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), - path: Default::default(), - }, - operator: ComparisonFunction::Equal, - value: ComparisonValue::Column { - column: ComparisonTarget::Column { - name: "Title".into(), - field_path: None, - field_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), - path: Default::default(), - }, - }, - })?; - - let expected = doc! { - "$expr": { - "$eq": ["$Name", "$Title"] - } - }; - - assert_eq!(selector, expected); - Ok(()) - } - - #[test] - fn compares_root_collection_column_to_scalar() -> anyhow::Result<()> { - let selector = make_selector(&Expression::BinaryComparisonOperator { - column: ComparisonTarget::ColumnInScope { - name: "Name".into(), - field_path: None, - field_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), - scope: Scope::Named("scope_0".to_string()), - }, - operator: ComparisonFunction::Equal, - value: ComparisonValue::Scalar { - value: "Lady Gaga".into(), - value_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), - }, - })?; - - let expected = doc! { - "$expr": { - "$eq": ["$$scope_0.Name", "Lady Gaga"] - } - }; - - assert_eq!(selector, expected); - Ok(()) - } - - #[test] - fn root_column_reference_refereces_column_of_nearest_query() -> anyhow::Result<()> { - let request = query_request() - .collection("Artist") - .query( - query().fields([relation_field!("Albums" => "Albums", query().predicate( - binop( - "_gt", - target!("Milliseconds", relations: [ - path_element("Tracks".into()).predicate( - binop("_eq", target!("Name"), column_value!(root("Title"))) - ), - ]), - value!(30_000), - ) - ))]), - ) - .relationships(chinook_relationships()) - .into(); - - let config = chinook_config(); - let plan = plan_for_query_request(&config, request)?; - let pipeline = pipeline_for_query_request(&config, &plan)?; - - let expected_pipeline = bson!([ - { - "$lookup": { - "from": "Album", - "localField": "ArtistId", - "foreignField": "ArtistId", - "as": "Albums", - "let": { - "scope_root": "$$ROOT", - }, - "pipeline": [ - { - "$lookup": { - "from": "Track", - "localField": "AlbumId", - "foreignField": "AlbumId", - "as": "Tracks", - "let": { - "scope_0": "$$ROOT", - }, - "pipeline": [ - { - "$match": { - "$expr": { "$eq": ["$Name", "$$scope_0.Title"] }, - }, - }, - { - "$replaceWith": { - "Milliseconds": { "$ifNull": ["$Milliseconds", null] } - } - }, - ] - } - }, - { - "$match": { - "Tracks": { - "$elemMatch": { - "Milliseconds": { "$gt": 30_000 } - } - } - } - }, - { - "$replaceWith": { - "Tracks": { "$getField": { "$literal": "Tracks" } } - } - }, - ], - }, - }, - { - "$replaceWith": { - "Albums": { - "rows": [] - } - } - }, - ]); - - assert_eq!(bson::to_bson(&pipeline).unwrap(), expected_pipeline); - Ok(()) - } -} diff --git a/crates/mongodb-agent-common/src/query/make_selector/make_aggregation_expression.rs b/crates/mongodb-agent-common/src/query/make_selector/make_aggregation_expression.rs new file mode 100644 index 00000000..4f17d6cd --- /dev/null +++ b/crates/mongodb-agent-common/src/query/make_selector/make_aggregation_expression.rs @@ -0,0 +1,290 @@ +use anyhow::anyhow; +use itertools::Itertools as _; +use mongodb::bson::{self, doc, Bson}; +use ndc_models::UnaryComparisonOperator; + +use crate::{ + comparison_function::ComparisonFunction, + interface_types::MongoAgentError, + mongo_query_plan::{ + ArrayComparison, ComparisonTarget, ComparisonValue, ExistsInCollection, Expression, Type, + }, + query::{ + column_ref::{column_expression, ColumnRef}, + query_variable_name::query_variable_name, + serialization::json_to_bson, + }, +}; + +use super::Result; + +#[derive(Clone, Debug)] +pub struct AggregationExpression(pub Bson); + +impl AggregationExpression { + pub fn new(expression: impl Into) -> Self { + Self(expression.into()) + } + + pub fn into_bson(self) -> Bson { + self.0 + } +} + +impl From for Bson { + fn from(value: AggregationExpression) -> Self { + value.into_bson() + } +} + +pub fn make_aggregation_expression(expr: &Expression) -> Result { + match expr { + Expression::And { expressions } => { + let sub_exps: Vec<_> = expressions + .clone() + .iter() + .map(make_aggregation_expression) + .collect::>()?; + let plan = AggregationExpression( + doc! { + "$and": sub_exps.into_iter().map(AggregationExpression::into_bson).collect_vec() + } + .into(), + ); + Ok(plan) + } + Expression::Or { expressions } => { + let sub_exps: Vec<_> = expressions + .clone() + .iter() + .map(make_aggregation_expression) + .collect::>()?; + let plan = AggregationExpression( + doc! { + "$or": sub_exps.into_iter().map(AggregationExpression::into_bson).collect_vec() + } + .into(), + ); + Ok(plan) + } + Expression::Not { expression } => { + let sub_expression = make_aggregation_expression(expression)?; + let plan = AggregationExpression(doc! { "$nor": [sub_expression.into_bson()] }.into()); + Ok(plan) + } + Expression::Exists { + in_collection, + predicate, + } => make_aggregation_expression_for_exists(in_collection, predicate.as_deref()), + Expression::BinaryComparisonOperator { + column, + operator, + value, + } => make_binary_comparison_selector(column, operator, value), + Expression::ArrayComparison { column, comparison } => { + make_array_comparison_selector(column, comparison) + } + Expression::UnaryComparisonOperator { column, operator } => { + Ok(make_unary_comparison_selector(column, *operator)) + } + } +} + +// TODO: ENG-1148 Move predicate application to the join step instead of filtering the entire +// related or unrelated collection here +pub fn make_aggregation_expression_for_exists( + in_collection: &ExistsInCollection, + predicate: Option<&Expression>, +) -> Result { + let expression = match (in_collection, predicate) { + (ExistsInCollection::Related { relationship }, Some(predicate)) => { + let relationship_ref = ColumnRef::from_relationship(relationship); + exists_in_array(relationship_ref, predicate)? + } + (ExistsInCollection::Related { relationship }, None) => { + let relationship_ref = ColumnRef::from_relationship(relationship); + exists_in_array_no_predicate(relationship_ref) + } + ( + ExistsInCollection::Unrelated { + unrelated_collection, + }, + Some(predicate), + ) => { + let collection_ref = ColumnRef::from_unrelated_collection(unrelated_collection); + exists_in_array(collection_ref, predicate)? + } + ( + ExistsInCollection::Unrelated { + unrelated_collection, + }, + None, + ) => { + let collection_ref = ColumnRef::from_unrelated_collection(unrelated_collection); + exists_in_array_no_predicate(collection_ref) + } + ( + ExistsInCollection::NestedCollection { + column_name, + field_path, + .. + }, + Some(predicate), + ) => { + let column_ref = ColumnRef::from_column_and_field_path(column_name, Some(field_path)); + exists_in_array(column_ref, predicate)? + } + ( + ExistsInCollection::NestedCollection { + column_name, + field_path, + .. + }, + None, + ) => { + let column_ref = ColumnRef::from_column_and_field_path(column_name, Some(field_path)); + exists_in_array_no_predicate(column_ref) + } + ( + ExistsInCollection::NestedScalarCollection { + column_name, + field_path, + .. + }, + Some(predicate), + ) => { + let column_ref = ColumnRef::from_column_and_field_path(column_name, Some(field_path)); + exists_in_array(column_ref, predicate)? // TODO: ENG-1488 predicate expects objects with a __value field + } + ( + ExistsInCollection::NestedScalarCollection { + column_name, + field_path, + .. + }, + None, + ) => { + let column_ref = ColumnRef::from_column_and_field_path(column_name, Some(field_path)); + exists_in_array_no_predicate(column_ref) + } + }; + Ok(expression) +} + +fn exists_in_array( + array_ref: ColumnRef<'_>, + predicate: &Expression, +) -> Result { + let AggregationExpression(sub_expression) = make_aggregation_expression(predicate)?; + Ok(AggregationExpression( + doc! { + "$anyElementTrue": { + "$map": { + "input": array_ref.into_aggregate_expression(), + "as": "CURRENT", // implicitly changes the document root in `sub_expression` to be the array element + "in": sub_expression, + } + } + } + .into(), + )) +} + +fn exists_in_array_no_predicate(array_ref: ColumnRef<'_>) -> AggregationExpression { + AggregationExpression::new(doc! { + "$gt": [{ "$size": array_ref.into_aggregate_expression() }, 0] + }) +} + +fn make_binary_comparison_selector( + target_column: &ComparisonTarget, + operator: &ComparisonFunction, + value: &ComparisonValue, +) -> Result { + let left_operand = ColumnRef::from_comparison_target(target_column).into_aggregate_expression(); + let right_operand = value_expression(value)?; + let expr = AggregationExpression( + operator + .mongodb_aggregation_expression(left_operand, right_operand) + .into(), + ); + Ok(expr) +} + +fn make_unary_comparison_selector( + target_column: &ndc_query_plan::ComparisonTarget, + operator: UnaryComparisonOperator, +) -> AggregationExpression { + match operator { + UnaryComparisonOperator::IsNull => AggregationExpression( + doc! { + "$eq": [column_expression(target_column), null] + } + .into(), + ), + } +} + +fn make_array_comparison_selector( + column: &ComparisonTarget, + comparison: &ArrayComparison, +) -> Result { + let doc = match comparison { + ArrayComparison::Contains { value } => doc! { + "$in": [value_expression(value)?, column_expression(column)] + }, + ArrayComparison::IsEmpty => doc! { + "$eq": [{ "$size": column_expression(column) }, 0] + }, + }; + Ok(AggregationExpression(doc.into())) +} + +fn value_expression(value: &ComparisonValue) -> Result { + match value { + ComparisonValue::Column { + path, + name, + field_path, + scope: _, // We'll need to reference scope for ENG-1153 + .. + } => { + // TODO: ENG-1153 Do we want an implicit exists in the value relationship? If both + // target and value reference relationships do we want an exists in a Cartesian product + // of the two? + if !path.is_empty() { + return Err(MongoAgentError::NotImplemented("binary comparisons where the right-side of the comparison references a relationship".into())); + } + + let value_ref = ColumnRef::from_column_and_field_path(name, field_path.as_ref()); + Ok(value_ref.into_aggregate_expression()) + } + ComparisonValue::Scalar { value, value_type } => { + let comparison_value = bson_from_scalar_value(value, value_type)?; + Ok(AggregationExpression::new(doc! { + "$literal": comparison_value + })) + } + ComparisonValue::Variable { + name, + variable_type, + } => { + let comparison_value = variable_to_mongo_expression(name, variable_type); + Ok(comparison_value.into_aggregate_expression()) + } + } +} + +/// Convert a JSON Value into BSON using the provided type information. +/// For example, parses values of type "Date" into BSON DateTime. +fn bson_from_scalar_value(value: &serde_json::Value, value_type: &Type) -> Result { + json_to_bson(value_type, value.clone()).map_err(|e| MongoAgentError::BadQuery(anyhow!(e))) +} + +fn variable_to_mongo_expression( + variable: &ndc_models::VariableName, + value_type: &Type, +) -> ColumnRef<'static> { + let mongodb_var_name = query_variable_name(variable, value_type); + ColumnRef::variable(mongodb_var_name) +} diff --git a/crates/mongodb-agent-common/src/query/make_selector/make_expression_plan.rs b/crates/mongodb-agent-common/src/query/make_selector/make_expression_plan.rs new file mode 100644 index 00000000..7dac0888 --- /dev/null +++ b/crates/mongodb-agent-common/src/query/make_selector/make_expression_plan.rs @@ -0,0 +1,28 @@ +use crate::mongo_query_plan::Expression; + +use super::{ + make_aggregation_expression::{make_aggregation_expression, AggregationExpression}, + make_query_document::{make_query_document, QueryDocument}, + Result, +}; + +/// Represents the body of a `$match` stage which may use a special shorthand syntax (query +/// document) where document keys are interpreted as field references, or if the entire match +/// document is enclosed in an object with an `$expr` property then it is interpreted as an +/// aggregation expression. +#[derive(Clone, Debug)] +pub enum ExpressionPlan { + QueryDocument(QueryDocument), + AggregationExpression(AggregationExpression), +} + +pub fn make_expression_plan(expression: &Expression) -> Result { + if let Some(query_doc) = make_query_document(expression)? { + Ok(ExpressionPlan::QueryDocument(query_doc)) + } else { + let aggregation_expression = make_aggregation_expression(expression)?; + Ok(ExpressionPlan::AggregationExpression( + aggregation_expression, + )) + } +} diff --git a/crates/mongodb-agent-common/src/query/make_selector/make_query_document.rs b/crates/mongodb-agent-common/src/query/make_selector/make_query_document.rs new file mode 100644 index 00000000..df766662 --- /dev/null +++ b/crates/mongodb-agent-common/src/query/make_selector/make_query_document.rs @@ -0,0 +1,246 @@ +use anyhow::anyhow; +use itertools::Itertools as _; +use mongodb::bson::{self, doc, Bson}; +use ndc_models::UnaryComparisonOperator; + +use crate::{ + comparison_function::ComparisonFunction, + interface_types::MongoAgentError, + mongo_query_plan::{ + ArrayComparison, ComparisonTarget, ComparisonValue, ExistsInCollection, Expression, Type, + }, + query::{column_ref::ColumnRef, serialization::json_to_bson}, +}; + +use super::Result; + +#[derive(Clone, Debug)] +pub struct QueryDocument(pub bson::Document); + +impl QueryDocument { + pub fn into_document(self) -> bson::Document { + self.0 + } +} + +/// Translates the given expression into a query document for use in a $match aggregation stage if +/// possible. If the expression cannot be expressed as a query document returns `Ok(None)`. +pub fn make_query_document(expr: &Expression) -> Result> { + match expr { + Expression::And { expressions } => { + let sub_exps: Option> = expressions + .clone() + .iter() + .map(make_query_document) + .collect::>()?; + // If any of the sub expressions are not query documents then we have to back-track + // and map everything to aggregation expressions. + let plan = sub_exps.map(|exps| { + QueryDocument( + doc! { "$and": exps.into_iter().map(QueryDocument::into_document).collect_vec() }, + ) + }); + Ok(plan) + } + Expression::Or { expressions } => { + let sub_exps: Option> = expressions + .clone() + .iter() + .map(make_query_document) + .collect::>()?; + let plan = sub_exps.map(|exps| { + QueryDocument( + doc! { "$or": exps.into_iter().map(QueryDocument::into_document).collect_vec() }, + ) + }); + Ok(plan) + } + Expression::Not { expression } => { + let sub_expression = make_query_document(expression)?; + let plan = + sub_expression.map(|expr| QueryDocument(doc! { "$nor": [expr.into_document()] })); + Ok(plan) + } + Expression::Exists { + in_collection, + predicate, + } => make_query_document_for_exists(in_collection, predicate.as_deref()), + Expression::BinaryComparisonOperator { + column, + operator, + value, + } => make_binary_comparison_selector(column, operator, value), + Expression::UnaryComparisonOperator { column, operator } => { + make_unary_comparison_selector(column, operator) + } + Expression::ArrayComparison { column, comparison } => { + make_array_comparison_selector(column, comparison) + } + } +} + +// TODO: ENG-1148 Move predicate application to the join step instead of filtering the entire +// related or unrelated collection here +fn make_query_document_for_exists( + in_collection: &ExistsInCollection, + predicate: Option<&Expression>, +) -> Result> { + let plan = match (in_collection, predicate) { + (ExistsInCollection::Related { relationship }, Some(predicate)) => { + let relationship_ref = ColumnRef::from_relationship(relationship); + exists_in_array(relationship_ref, predicate)? + } + (ExistsInCollection::Related { relationship }, None) => { + let relationship_ref = ColumnRef::from_relationship(relationship); + exists_in_array_no_predicate(relationship_ref) + } + // Unrelated collection references cannot be expressed in a query document due to + // a requirement to reference a pipeline variable. + (ExistsInCollection::Unrelated { .. }, _) => None, + ( + ExistsInCollection::NestedCollection { + column_name, + field_path, + .. + }, + Some(predicate), + ) => { + let column_ref = ColumnRef::from_column_and_field_path(column_name, Some(field_path)); + exists_in_array(column_ref, predicate)? + } + ( + ExistsInCollection::NestedCollection { + column_name, + field_path, + .. + }, + None, + ) => { + let column_ref = ColumnRef::from_column_and_field_path(column_name, Some(field_path)); + exists_in_array_no_predicate(column_ref) + } + ( + ExistsInCollection::NestedScalarCollection { + column_name, + field_path, + .. + }, + Some(predicate), + ) => { + let column_ref = ColumnRef::from_column_and_field_path(column_name, Some(field_path)); + exists_in_array(column_ref, predicate)? // TODO: predicate expects objects with a __value field + } + ( + ExistsInCollection::NestedScalarCollection { + column_name, + field_path, + .. + }, + None, + ) => { + let column_ref = ColumnRef::from_column_and_field_path(column_name, Some(field_path)); + exists_in_array_no_predicate(column_ref) + } + }; + Ok(plan) +} + +fn exists_in_array( + array_ref: ColumnRef<'_>, + predicate: &Expression, +) -> Result> { + let sub_expression = make_query_document(predicate)?; + let plan = match (array_ref, sub_expression) { + (ColumnRef::MatchKey(key), Some(QueryDocument(query_doc))) => Some(QueryDocument(doc! { + key: { "$elemMatch": query_doc } + })), + _ => None, + }; + Ok(plan) +} + +fn exists_in_array_no_predicate(array_ref: ColumnRef<'_>) -> Option { + match array_ref { + ColumnRef::MatchKey(key) => Some(QueryDocument(doc! { + key: { + "$exists": true, + "$not": { "$size": 0 }, + } + })), + _ => None, + } +} + +fn make_binary_comparison_selector( + target_column: &ComparisonTarget, + operator: &ComparisonFunction, + value: &ComparisonValue, +) -> Result> { + let selector = + value_expression(value)?.and_then(|value| { + match ColumnRef::from_comparison_target(target_column) { + ColumnRef::MatchKey(key) => { + Some(QueryDocument(operator.mongodb_match_query(key, value))) + } + _ => None, + } + }); + Ok(selector) +} + +fn make_unary_comparison_selector( + target_column: &ComparisonTarget, + operator: &UnaryComparisonOperator, +) -> Result> { + let query_doc = match operator { + UnaryComparisonOperator::IsNull => match ColumnRef::from_comparison_target(target_column) { + ColumnRef::MatchKey(key) => Some(QueryDocument(doc! { + key: { "$eq": null } + })), + _ => None, + }, + }; + Ok(query_doc) +} + +fn make_array_comparison_selector( + column: &ComparisonTarget, + comparison: &ArrayComparison, +) -> Result> { + let column_ref = ColumnRef::from_comparison_target(column); + let ColumnRef::MatchKey(key) = column_ref else { + return Ok(None); + }; + let doc = match comparison { + ArrayComparison::Contains { value } => value_expression(value)?.map(|value| { + doc! { + key: { "$elemMatch": { "$eq": value } } + } + }), + ArrayComparison::IsEmpty => Some(doc! { + key: { "$size": 0 } + }), + }; + Ok(doc.map(QueryDocument)) +} + +/// Only scalar comparison values can be represented in query documents. This function returns such +/// a representation if there is a legal way to do so. +fn value_expression(value: &ComparisonValue) -> Result> { + let expression = match value { + ComparisonValue::Scalar { value, value_type } => { + let bson_value = bson_from_scalar_value(value, value_type)?; + Some(bson_value) + } + ComparisonValue::Column { .. } => None, + // Variables cannot be referenced in match documents + ComparisonValue::Variable { .. } => None, + }; + Ok(expression) +} + +/// Convert a JSON Value into BSON using the provided type information. +/// For example, parses values of type "Date" into BSON DateTime. +fn bson_from_scalar_value(value: &serde_json::Value, value_type: &Type) -> Result { + json_to_bson(value_type, value.clone()).map_err(|e| MongoAgentError::BadQuery(anyhow!(e))) +} diff --git a/crates/mongodb-agent-common/src/query/make_selector/mod.rs b/crates/mongodb-agent-common/src/query/make_selector/mod.rs new file mode 100644 index 00000000..4dcf9d00 --- /dev/null +++ b/crates/mongodb-agent-common/src/query/make_selector/mod.rs @@ -0,0 +1,331 @@ +mod make_aggregation_expression; +mod make_expression_plan; +mod make_query_document; + +use mongodb::bson::{doc, Document}; + +use crate::{interface_types::MongoAgentError, mongo_query_plan::Expression}; + +pub use self::{ + make_aggregation_expression::AggregationExpression, + make_expression_plan::{make_expression_plan, ExpressionPlan}, + make_query_document::QueryDocument, +}; + +pub type Result = std::result::Result; + +/// Creates a "query document" that filters documents according to the given expression. Query +/// documents are used as arguments for the `$match` aggregation stage, and for the db.find() +/// command. +/// +/// Query documents are distinct from "aggregation expressions". The latter are more general. +pub fn make_selector(expr: &Expression) -> Result { + let selector = match make_expression_plan(expr)? { + ExpressionPlan::QueryDocument(QueryDocument(doc)) => doc, + ExpressionPlan::AggregationExpression(AggregationExpression(e)) => doc! { + "$expr": e, + }, + }; + Ok(selector) +} + +#[cfg(test)] +mod tests { + use configuration::MongoScalarType; + use mongodb::bson::doc; + use mongodb_support::BsonScalarType; + use ndc_models::UnaryComparisonOperator; + use pretty_assertions::assert_eq; + + use crate::{ + comparison_function::ComparisonFunction, + mongo_query_plan::{ + ComparisonTarget, ComparisonValue, ExistsInCollection, Expression, Type, + }, + }; + + use super::make_selector; + + #[test] + fn compares_fields_of_related_documents_using_elem_match_in_binary_comparison( + ) -> anyhow::Result<()> { + let selector = make_selector(&Expression::Exists { + in_collection: ExistsInCollection::Related { + relationship: "Albums".into(), + }, + predicate: Some(Box::new(Expression::Exists { + in_collection: ExistsInCollection::Related { + relationship: "Tracks".into(), + }, + predicate: Some(Box::new(Expression::BinaryComparisonOperator { + column: ComparisonTarget::column( + "Name", + Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), + ), + operator: ComparisonFunction::Equal, + value: ComparisonValue::Scalar { + value: "Helter Skelter".into(), + value_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), + }, + })), + })), + })?; + + let expected = doc! { + "Albums": { + "$elemMatch": { + "Tracks": { + "$elemMatch": { + "Name": { "$eq": "Helter Skelter" } + } + } + } + } + }; + + assert_eq!(selector, expected); + Ok(()) + } + + #[test] + fn compares_fields_of_related_documents_using_elem_match_in_unary_comparison( + ) -> anyhow::Result<()> { + let selector = make_selector(&Expression::Exists { + in_collection: ExistsInCollection::Related { + relationship: "Albums".into(), + }, + predicate: Some(Box::new(Expression::Exists { + in_collection: ExistsInCollection::Related { + relationship: "Tracks".into(), + }, + predicate: Some(Box::new(Expression::UnaryComparisonOperator { + column: ComparisonTarget::column( + "Name", + Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), + ), + operator: UnaryComparisonOperator::IsNull, + })), + })), + })?; + + let expected = doc! { + "Albums": { + "$elemMatch": { + "Tracks": { + "$elemMatch": { + "Name": { "$eq": null } + } + } + } + } + }; + + assert_eq!(selector, expected); + Ok(()) + } + + #[test] + fn compares_two_columns() -> anyhow::Result<()> { + let selector = make_selector(&Expression::BinaryComparisonOperator { + column: ComparisonTarget::column( + "Name", + Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), + ), + operator: ComparisonFunction::Equal, + value: ComparisonValue::column( + "Title", + Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), + ), + })?; + + let expected = doc! { + "$expr": { + "$eq": ["$Name", "$Title"] + } + }; + + assert_eq!(selector, expected); + Ok(()) + } + + // TODO: ENG-1487 modify this test for the new named scopes feature + // #[test] + // fn compares_root_collection_column_to_scalar() -> anyhow::Result<()> { + // let selector = make_selector(&Expression::BinaryComparisonOperator { + // column: ComparisonTarget::ColumnInScope { + // name: "Name".into(), + // field_path: None, + // field_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), + // scope: Scope::Named("scope_0".to_string()), + // }, + // operator: ComparisonFunction::Equal, + // value: ComparisonValue::Scalar { + // value: "Lady Gaga".into(), + // value_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), + // }, + // })?; + // + // let expected = doc! { + // "$expr": { + // "$eq": ["$$scope_0.Name", "Lady Gaga"] + // } + // }; + // + // assert_eq!(selector, expected); + // Ok(()) + // } + + // #[test] + // fn root_column_reference_refereces_column_of_nearest_query() -> anyhow::Result<()> { + // let request = query_request() + // .collection("Artist") + // .query( + // query().fields([relation_field!("Albums" => "Albums", query().predicate( + // binop( + // "_gt", + // target!("Milliseconds", relations: [ + // path_element("Tracks".into()).predicate( + // binop("_eq", target!("Name"), column_value!(root("Title"))) + // ), + // ]), + // value!(30_000), + // ) + // ))]), + // ) + // .relationships(chinook_relationships()) + // .into(); + // + // let config = chinook_config(); + // let plan = plan_for_query_request(&config, request)?; + // let pipeline = pipeline_for_query_request(&config, &plan)?; + // + // let expected_pipeline = bson!([ + // { + // "$lookup": { + // "from": "Album", + // "localField": "ArtistId", + // "foreignField": "ArtistId", + // "as": "Albums", + // "let": { + // "scope_root": "$$ROOT", + // }, + // "pipeline": [ + // { + // "$lookup": { + // "from": "Track", + // "localField": "AlbumId", + // "foreignField": "AlbumId", + // "as": "Tracks", + // "let": { + // "scope_0": "$$ROOT", + // }, + // "pipeline": [ + // { + // "$match": { + // "$expr": { "$eq": ["$Name", "$$scope_0.Title"] }, + // }, + // }, + // { + // "$replaceWith": { + // "Milliseconds": { "$ifNull": ["$Milliseconds", null] } + // } + // }, + // ] + // } + // }, + // { + // "$match": { + // "Tracks": { + // "$elemMatch": { + // "Milliseconds": { "$gt": 30_000 } + // } + // } + // } + // }, + // { + // "$replaceWith": { + // "Tracks": { "$getField": { "$literal": "Tracks" } } + // } + // }, + // ], + // }, + // }, + // { + // "$replaceWith": { + // "Albums": { + // "rows": [] + // } + // } + // }, + // ]); + // + // assert_eq!(bson::to_bson(&pipeline).unwrap(), expected_pipeline); + // Ok(()) + // } + + #[test] + fn compares_value_to_elements_of_array_field() -> anyhow::Result<()> { + let selector = make_selector(&Expression::Exists { + in_collection: ExistsInCollection::NestedCollection { + column_name: "staff".into(), + arguments: Default::default(), + field_path: Default::default(), + }, + predicate: Some(Box::new(Expression::BinaryComparisonOperator { + column: ComparisonTarget::column( + "last_name", + Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), + ), + operator: ComparisonFunction::Equal, + value: ComparisonValue::Scalar { + value: "Hughes".into(), + value_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), + }, + })), + })?; + + let expected = doc! { + "staff": { + "$elemMatch": { + "last_name": { "$eq": "Hughes" } + } + } + }; + + assert_eq!(selector, expected); + Ok(()) + } + + #[test] + fn compares_value_to_elements_of_array_field_of_nested_object() -> anyhow::Result<()> { + let selector = make_selector(&Expression::Exists { + in_collection: ExistsInCollection::NestedCollection { + column_name: "staff".into(), + arguments: Default::default(), + field_path: vec!["site_info".into()], + }, + predicate: Some(Box::new(Expression::BinaryComparisonOperator { + column: ComparisonTarget::column( + "last_name", + Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), + ), + operator: ComparisonFunction::Equal, + value: ComparisonValue::Scalar { + value: "Hughes".into(), + value_type: Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), + }, + })), + })?; + + let expected = doc! { + "staff.site_info": { + "$elemMatch": { + "last_name": { "$eq": "Hughes" } + } + } + }; + + assert_eq!(selector, expected); + Ok(()) + } +} diff --git a/crates/mongodb-agent-common/src/query/make_sort.rs b/crates/mongodb-agent-common/src/query/make_sort.rs index e113da4e..5046ea6b 100644 --- a/crates/mongodb-agent-common/src/query/make_sort.rs +++ b/crates/mongodb-agent-common/src/query/make_sort.rs @@ -1,65 +1,174 @@ -use itertools::Itertools as _; -use mongodb::bson::{bson, Document}; +use std::{collections::BTreeMap, iter::once}; + +use itertools::join; +use mongodb::bson::bson; +use mongodb_support::aggregate::{SortDocument, Stage}; use ndc_models::OrderDirection; use crate::{ interface_types::MongoAgentError, mongo_query_plan::{OrderBy, OrderByTarget}, - mongodb::sanitize::safe_name, + mongodb::sanitize::escape_invalid_variable_chars, }; -pub fn make_sort(order_by: &OrderBy) -> Result { +use super::column_ref::ColumnRef; + +/// In a [SortDocument] there is no way to reference field names that need to be escaped, such as +/// names that begin with dollar signs. To sort on such fields we need to insert an $addFields +/// stage _before_ the $sort stage to map safe aliases. +type RequiredAliases<'a> = BTreeMap>; + +type Result = std::result::Result; + +pub fn make_sort_stages(order_by: &OrderBy) -> Result> { + let (sort_document, required_aliases) = make_sort(order_by)?; + let mut stages = vec![]; + + if !required_aliases.is_empty() { + let fields = required_aliases + .into_iter() + .map(|(alias, expression)| (alias, expression.into_aggregate_expression().into_bson())) + .collect(); + let stage = Stage::AddFields(fields); + stages.push(stage); + } + + let sort_stage = Stage::Sort(sort_document); + stages.push(sort_stage); + + Ok(stages) +} + +fn make_sort(order_by: &OrderBy) -> Result<(SortDocument, RequiredAliases<'_>)> { let OrderBy { elements } = order_by; - elements - .clone() + let keys_directions_expressions: BTreeMap>)> = + elements + .iter() + .map(|obe| { + let col_ref = ColumnRef::from_order_by_target(&obe.target)?; + let (key, required_alias) = match col_ref { + ColumnRef::MatchKey(key) => (key.to_string(), None), + ref_expr => (safe_alias(&obe.target)?, Some(ref_expr)), + }; + Ok((key, (obe.order_direction, required_alias))) + }) + .collect::>>()?; + + let sort_document = keys_directions_expressions .iter() - .map(|obe| { - let direction = match obe.clone().order_direction { + .map(|(key, (direction, _))| { + let direction_bson = match direction { OrderDirection::Asc => bson!(1), OrderDirection::Desc => bson!(-1), }; - match &obe.target { - OrderByTarget::Column { - name, - field_path, - path, - } => Ok(( - column_ref_with_path(name, field_path.as_deref(), path)?, - direction, - )), - OrderByTarget::SingleColumnAggregate { - column: _, - function: _, - path: _, - result_type: _, - } => - // TODO: MDB-150 - { - Err(MongoAgentError::NotImplemented( - "ordering by single column aggregate", - )) - } - OrderByTarget::StarCountAggregate { path: _ } => Err( - // TODO: MDB-151 - MongoAgentError::NotImplemented("ordering by star count aggregate"), - ), - } + (key.clone(), direction_bson) }) - .collect() + .collect(); + + let required_aliases = keys_directions_expressions + .into_iter() + .flat_map(|(key, (_, expr))| expr.map(|e| (key, e))) + .collect(); + + Ok((SortDocument(sort_document), required_aliases)) } -// TODO: MDB-159 Replace use of [safe_name] with [ColumnRef]. -fn column_ref_with_path( - name: &ndc_models::FieldName, - field_path: Option<&[ndc_models::FieldName]>, - relation_path: &[ndc_models::RelationshipName], -) -> Result { - relation_path - .iter() - .map(|n| n.as_str()) - .chain(std::iter::once(name.as_str())) - .chain(field_path.into_iter().flatten().map(|n| n.as_str())) - .map(safe_name) - .process_results(|mut iter| iter.join(".")) +fn safe_alias(target: &OrderByTarget) -> Result { + match target { + ndc_query_plan::OrderByTarget::Column { + name, + field_path, + path, + .. + } => { + let name_and_path = once("__sort_key_") + .chain(path.iter().map(|n| n.as_str())) + .chain([name.as_str()]) + .chain( + field_path + .iter() + .flatten() + .map(|field_name| field_name.as_str()), + ); + let combine_all_elements_into_one_name = join(name_and_path, "_"); + Ok(escape_invalid_variable_chars( + &combine_all_elements_into_one_name, + )) + } + ndc_query_plan::OrderByTarget::Aggregate { .. } => { + // TODO: ENG-1010, ENG-1011 + Err(MongoAgentError::NotImplemented("order by aggregate".into())) + } + } +} + +#[cfg(test)] +mod tests { + use mongodb::bson::doc; + use mongodb_support::aggregate::SortDocument; + use ndc_models::{FieldName, OrderDirection}; + use ndc_query_plan::OrderByElement; + use nonempty::{nonempty, NonEmpty}; + use pretty_assertions::assert_eq; + + use crate::{mongo_query_plan::OrderBy, query::column_ref::ColumnRef}; + + use super::make_sort; + + #[test] + fn escapes_field_names() -> anyhow::Result<()> { + let order_by = OrderBy { + elements: vec![OrderByElement { + order_direction: OrderDirection::Asc, + target: ndc_query_plan::OrderByTarget::Column { + name: "$schema".into(), + field_path: Default::default(), + path: Default::default(), + arguments: Default::default(), + }, + }], + }; + let path: NonEmpty = NonEmpty::singleton("$schema".into()); + + let actual = make_sort(&order_by)?; + let expected_sort_doc = SortDocument(doc! { + "__sort_key__·24schema": 1 + }); + let expected_aliases = [( + "__sort_key__·24schema".into(), + ColumnRef::from_field_path(path.as_ref()), + )] + .into(); + assert_eq!(actual, (expected_sort_doc, expected_aliases)); + Ok(()) + } + + #[test] + fn escapes_nested_field_names() -> anyhow::Result<()> { + let order_by = OrderBy { + elements: vec![OrderByElement { + order_direction: OrderDirection::Asc, + target: ndc_query_plan::OrderByTarget::Column { + name: "configuration".into(), + field_path: Some(vec!["$schema".into()]), + path: Default::default(), + arguments: Default::default(), + }, + }], + }; + let path: NonEmpty = nonempty!["configuration".into(), "$schema".into()]; + + let actual = make_sort(&order_by)?; + let expected_sort_doc = SortDocument(doc! { + "__sort_key__configuration_·24schema": 1 + }); + let expected_aliases = [( + "__sort_key__configuration_·24schema".into(), + ColumnRef::from_field_path(path.as_ref()), + )] + .into(); + assert_eq!(actual, (expected_sort_doc, expected_aliases)); + Ok(()) + } } diff --git a/crates/mongodb-agent-common/src/query/mod.rs b/crates/mongodb-agent-common/src/query/mod.rs index c0526183..6bc505af 100644 --- a/crates/mongodb-agent-common/src/query/mod.rs +++ b/crates/mongodb-agent-common/src/query/mod.rs @@ -1,7 +1,9 @@ -mod column_ref; -mod constants; +mod aggregates; +pub mod column_ref; mod execute_query_request; mod foreach; +mod groups; +mod is_response_faceted; mod make_selector; mod make_sort; mod native_query; @@ -11,6 +13,7 @@ mod query_target; mod query_variable_name; mod relations; pub mod response; +mod selection; pub mod serialization; use ndc_models::{QueryRequest, QueryResponse}; @@ -18,8 +21,8 @@ use ndc_models::{QueryRequest, QueryResponse}; use self::execute_query_request::execute_query_request; pub use self::{ make_selector::make_selector, - make_sort::make_sort, - pipeline::{is_response_faceted, pipeline_for_non_foreach, pipeline_for_query_request}, + make_sort::make_sort_stages, + pipeline::{pipeline_for_non_foreach, pipeline_for_query_request}, query_target::QueryTarget, response::QueryResponseError, }; @@ -44,11 +47,10 @@ mod tests { use mongodb::bson::{self, bson}; use ndc_models::{QueryResponse, RowSet}; use ndc_test_helpers::{ - binop, collection, column_aggregate, column_count_aggregate, field, named_type, - object_type, query, query_request, row_set, target, value, + binop, collection, field, named_type, object_type, query, query_request, row_set, target, + value, }; use pretty_assertions::assert_eq; - use serde_json::json; use super::execute_query_request; use crate::{ @@ -92,136 +94,6 @@ mod tests { Ok(()) } - #[tokio::test] - async fn executes_aggregation() -> Result<(), anyhow::Error> { - let query_request = query_request() - .collection("students") - .query(query().aggregates([ - column_count_aggregate!("count" => "gpa", distinct: true), - column_aggregate!("avg" => "gpa", "avg"), - ])) - .into(); - - let expected_response = row_set() - .aggregates([("count", json!(11)), ("avg", json!(3))]) - .into_response(); - - let expected_pipeline = bson!([ - { - "$facet": { - "avg": [ - { "$match": { "gpa": { "$exists": true, "$ne": null } } }, - { "$group": { "_id": null, "result": { "$avg": "$gpa" } } }, - ], - "count": [ - { "$match": { "gpa": { "$exists": true, "$ne": null } } }, - { "$group": { "_id": "$gpa" } }, - { "$count": "result" }, - ], - }, - }, - { - "$replaceWith": { - "aggregates": { - "avg": { "$getField": { - "field": "result", - "input": { "$first": { "$getField": { "$literal": "avg" } } }, - } }, - "count": { - "$ifNull": [ - { - "$getField": { - "field": "result", - "input": { "$first": { "$getField": { "$literal": "count" } } }, - } - }, - 0, - ] - }, - }, - }, - }, - ]); - - let db = mock_collection_aggregate_response_for_pipeline( - "students", - expected_pipeline, - bson!([{ - "aggregates": { - "count": 11, - "avg": 3, - }, - }]), - ); - - let result = execute_query_request(db, &students_config(), query_request).await?; - assert_eq!(result, expected_response); - Ok(()) - } - - #[tokio::test] - async fn executes_aggregation_with_fields() -> Result<(), anyhow::Error> { - let query_request = query_request() - .collection("students") - .query( - query() - .aggregates([column_aggregate!("avg" => "gpa", "avg")]) - .fields([field!("student_gpa" => "gpa")]) - .predicate(binop("_lt", target!("gpa"), value!(4.0))), - ) - .into(); - - let expected_response = row_set() - .aggregates([("avg", json!(3.1))]) - .row([("student_gpa", 3.1)]) - .into_response(); - - let expected_pipeline = bson!([ - { "$match": { "gpa": { "$lt": 4.0 } } }, - { - "$facet": { - "avg": [ - { "$match": { "gpa": { "$exists": true, "$ne": null } } }, - { "$group": { "_id": null, "result": { "$avg": "$gpa" } } }, - ], - "__ROWS__": [{ - "$replaceWith": { - "student_gpa": { "$ifNull": ["$gpa", null] }, - }, - }], - }, - }, - { - "$replaceWith": { - "aggregates": { - "avg": { "$getField": { - "field": "result", - "input": { "$first": { "$getField": { "$literal": "avg" } } }, - } }, - }, - "rows": "$__ROWS__", - }, - }, - ]); - - let db = mock_collection_aggregate_response_for_pipeline( - "students", - expected_pipeline, - bson!([{ - "aggregates": { - "avg": 3.1, - }, - "rows": [{ - "student_gpa": 3.1, - }], - }]), - ); - - let result = execute_query_request(db, &students_config(), query_request).await?; - assert_eq!(result, expected_response); - Ok(()) - } - #[tokio::test] async fn converts_date_inputs_to_bson() -> Result<(), anyhow::Error> { let query_request = query_request() @@ -273,6 +145,7 @@ mod tests { let expected_response = QueryResponse(vec![RowSet { aggregates: None, rows: Some(vec![]), + groups: Default::default(), }]); let db = mock_collection_aggregate_response("comments", bson!([])); diff --git a/crates/mongodb-agent-common/src/query/native_query.rs b/crates/mongodb-agent-common/src/query/native_query.rs index 946b5eea..b5a7a4c2 100644 --- a/crates/mongodb-agent-common/src/query/native_query.rs +++ b/crates/mongodb-agent-common/src/query/native_query.rs @@ -3,12 +3,12 @@ use std::collections::BTreeMap; use configuration::native_query::NativeQuery; use itertools::Itertools as _; use mongodb::bson::Bson; +use mongodb_support::aggregate::{Pipeline, Stage}; use ndc_models::ArgumentName; use crate::{ interface_types::MongoAgentError, mongo_query_plan::{Argument, MongoConfiguration, QueryPlan}, - mongodb::{Pipeline, Stage}, procedure::{interpolated_command, ProcedureError}, }; diff --git a/crates/mongodb-agent-common/src/query/pipeline.rs b/crates/mongodb-agent-common/src/query/pipeline.rs index a7fb3868..5bfe3290 100644 --- a/crates/mongodb-agent-common/src/query/pipeline.rs +++ b/crates/mongodb-agent-common/src/query/pipeline.rs @@ -1,40 +1,31 @@ use std::collections::BTreeMap; -use mongodb::bson::{self, doc, Bson}; +use itertools::Itertools; +use mongodb::bson::{bson, Bson}; +use mongodb_support::aggregate::{Pipeline, Selection, Stage}; use tracing::instrument; use crate::{ - aggregation_function::AggregationFunction, + constants::{ROW_SET_AGGREGATES_KEY, ROW_SET_GROUPS_KEY, ROW_SET_ROWS_KEY}, interface_types::MongoAgentError, - mongo_query_plan::{Aggregate, MongoConfiguration, Query, QueryPlan}, - mongodb::{sanitize::get_field, Accumulator, Pipeline, Selection, Stage}, + mongo_query_plan::{MongoConfiguration, Query, QueryPlan}, }; use super::{ - constants::{RESULT_FIELD, ROWS_FIELD}, - foreach::pipeline_for_foreach, - make_selector, make_sort, - native_query::pipeline_for_native_query, - query_level::QueryLevel, - relations::pipeline_for_relations, + aggregates::pipeline_for_aggregates, column_ref::ColumnRef, foreach::pipeline_for_foreach, + groups::pipeline_for_groups, is_response_faceted::ResponseFacets, make_selector, + make_sort::make_sort_stages, native_query::pipeline_for_native_query, query_level::QueryLevel, + relations::pipeline_for_relations, selection::selection_for_fields, }; -/// A query that includes aggregates will be run using a $facet pipeline stage, while a query -/// without aggregates will not. The choice affects how result rows are mapped to a QueryResponse. -/// -/// If we have aggregate pipelines they should be combined with the fields pipeline (if there is -/// one) in a single facet stage. If we have fields, and no aggregates then the fields pipeline -/// can instead be appended to `pipeline`. -pub fn is_response_faceted(query: &Query) -> bool { - query.has_aggregates() -} +type Result = std::result::Result; /// Shared logic to produce a MongoDB aggregation pipeline for a query request. #[instrument(name = "Build Query Pipeline" skip_all, fields(internal.visibility = "user"))] pub fn pipeline_for_query_request( config: &MongoConfiguration, query_plan: &QueryPlan, -) -> Result { +) -> Result { if let Some(variable_sets) = &query_plan.variables { pipeline_for_foreach(variable_sets, config, query_plan) } else { @@ -49,9 +40,10 @@ pub fn pipeline_for_non_foreach( config: &MongoConfiguration, query_plan: &QueryPlan, query_level: QueryLevel, -) -> Result { +) -> Result { let query = &query_plan.query; let Query { + limit, offset, order_by, predicate, @@ -70,144 +62,109 @@ pub fn pipeline_for_non_foreach( .map(make_selector) .transpose()? .map(Stage::Match); - let sort_stage: Option = order_by + let sort_stages: Vec = order_by .iter() - .map(|o| Ok(Stage::Sort(make_sort(o)?)) as Result<_, MongoAgentError>) - .next() - .transpose()?; - let skip_stage = offset.map(Stage::Skip); + .map(make_sort_stages) + .flatten_ok() + .collect::>>()?; + let limit_stage = limit.map(Into::into).map(Stage::Limit); + let skip_stage = offset.map(Into::into).map(Stage::Skip); - [match_stage, sort_stage, skip_stage] + match_stage .into_iter() - .flatten() + .chain(sort_stages) + .chain(skip_stage) + .chain(limit_stage) .for_each(|stage| pipeline.push(stage)); - // `diverging_stages` includes either a $facet stage if the query includes aggregates, or the - // sort and limit stages if we are requesting rows only. In both cases the last stage is - // a $replaceWith. - let diverging_stages = if is_response_faceted(query) { - let (facet_pipelines, select_facet_results) = - facet_pipelines_for_query(query_plan, query_level)?; - let aggregation_stages = Stage::Facet(facet_pipelines); - let replace_with_stage = Stage::ReplaceWith(select_facet_results); - Pipeline::from_iter([aggregation_stages, replace_with_stage]) - } else { - pipeline_for_fields_facet(query_plan, query_level)? + let diverging_stages = match ResponseFacets::from_query(query) { + ResponseFacets::Combination { .. } => { + let (facet_pipelines, select_facet_results) = + facet_pipelines_for_query(query_plan, query_level)?; + let facet_stage = Stage::Facet(facet_pipelines); + let replace_with_stage = Stage::ReplaceWith(select_facet_results); + Pipeline::new(vec![facet_stage, replace_with_stage]) + } + ResponseFacets::AggregatesOnly(aggregates) => pipeline_for_aggregates(aggregates), + ResponseFacets::FieldsOnly(_) => pipeline_for_fields_facet(query_plan, query_level)?, + ResponseFacets::GroupsOnly(grouping) => pipeline_for_groups(grouping)?, }; pipeline.append(diverging_stages); Ok(pipeline) } -/// Generate a pipeline to select fields requested by the given query. This is intended to be used -/// within a $facet stage. We assume that the query's `where`, `order_by`, `offset` criteria (which -/// are shared with aggregates) have already been applied, and that we have already joined -/// relations. -pub fn pipeline_for_fields_facet( - query_plan: &QueryPlan, - query_level: QueryLevel, -) -> Result { - let Query { - limit, - relationships, - .. - } = &query_plan.query; - - let mut selection = Selection::from_query_request(query_plan)?; - if query_level != QueryLevel::Top { - // Queries higher up the chain might need to reference relationships from this query. So we - // forward relationship arrays if this is not the top-level query. - for relationship_key in relationships.keys() { - selection.0.insert( - relationship_key.to_owned(), - get_field(relationship_key.as_str()), - ); - } - } - - let limit_stage = limit.map(Stage::Limit); - let replace_with_stage: Stage = Stage::ReplaceWith(selection); - - Ok(Pipeline::from_iter( - [limit_stage, replace_with_stage.into()] - .into_iter() - .flatten(), - )) -} - /// Returns a map of pipelines for evaluating each aggregate independently, paired with /// a `Selection` that converts results of each pipeline to a format compatible with /// `QueryResponse`. fn facet_pipelines_for_query( query_plan: &QueryPlan, query_level: QueryLevel, -) -> Result<(BTreeMap, Selection), MongoAgentError> { +) -> Result<(BTreeMap, Selection)> { let query = &query_plan.query; let Query { aggregates, - aggregates_limit, fields, + groups, .. } = query; - let mut facet_pipelines = aggregates - .iter() - .flatten() - .map(|(key, aggregate)| { - Ok(( - key.to_string(), - pipeline_for_aggregate(aggregate.clone(), *aggregates_limit)?, - )) - }) - .collect::, MongoAgentError>>()?; - - if fields.is_some() { - let fields_pipeline = pipeline_for_fields_facet(query_plan, query_level)?; - facet_pipelines.insert(ROWS_FIELD.to_owned(), fields_pipeline); - } - - // This builds a map that feeds into a `$replaceWith` pipeline stage to build a map of - // aggregation results. - let aggregate_selections: bson::Document = aggregates - .iter() - .flatten() - .map(|(key, aggregate)| { - // The facet result for each aggregate is an array containing a single document which - // has a field called `result`. This code selects each facet result by name, and pulls - // out the `result` value. - let value_expr = doc! { - "$getField": { - "field": RESULT_FIELD, // evaluates to the value of this field - "input": { "$first": get_field(key.as_str()) }, // field is accessed from this document - }, - }; - - // Matching SQL semantics, if a **count** aggregation does not match any rows we want - // to return zero. Other aggregations should return null. - let value_expr = if is_count(aggregate) { - doc! { - "$ifNull": [value_expr, 0], - } - } else { - value_expr - }; - - (key.to_string(), value_expr.into()) - }) - .collect(); + let mut facet_pipelines = BTreeMap::new(); + + let (aggregates_pipeline_facet, select_aggregates) = match aggregates { + Some(aggregates) => { + let internal_key = "__AGGREGATES__"; + let aggregates_pipeline = pipeline_for_aggregates(aggregates); + let facet = (internal_key.to_string(), aggregates_pipeline); + let selection = ( + ROW_SET_AGGREGATES_KEY.to_string(), + bson!({ "$first": format!("${internal_key}") }), + ); + (Some(facet), Some(selection)) + } + None => (None, None), + }; - let select_aggregates = if !aggregate_selections.is_empty() { - Some(("aggregates".to_owned(), aggregate_selections.into())) - } else { - None + let (groups_pipeline_facet, select_groups) = match groups { + Some(grouping) => { + let internal_key = "__GROUPS__"; + let groups_pipeline = pipeline_for_groups(grouping)?; + let facet = (internal_key.to_string(), groups_pipeline); + let selection = ( + ROW_SET_GROUPS_KEY.to_string(), + Bson::String(format!("${internal_key}")), + ); + (Some(facet), Some(selection)) + } + None => (None, None), }; - let select_rows = match fields { - Some(_) => Some(("rows".to_owned(), Bson::String(format!("${ROWS_FIELD}")))), - _ => None, + let (rows_pipeline_facet, select_rows) = match fields { + Some(_) => { + let internal_key = "__ROWS__"; + let rows_pipeline = pipeline_for_fields_facet(query_plan, query_level)?; + let facet = (internal_key.to_string(), rows_pipeline); + let selection = ( + ROW_SET_ROWS_KEY.to_string().to_string(), + Bson::String(format!("${internal_key}")), + ); + (Some(facet), Some(selection)) + } + None => (None, None), }; - let selection = Selection( - [select_aggregates, select_rows] + for (key, pipeline) in [ + aggregates_pipeline_facet, + groups_pipeline_facet, + rows_pipeline_facet, + ] + .into_iter() + .flatten() + { + facet_pipelines.insert(key, pipeline); + } + + let selection = Selection::new( + [select_aggregates, select_groups, select_rows] .into_iter() .flatten() .collect(), @@ -216,87 +173,31 @@ fn facet_pipelines_for_query( Ok((facet_pipelines, selection)) } -fn is_count(aggregate: &Aggregate) -> bool { - match aggregate { - Aggregate::ColumnCount { .. } => true, - Aggregate::StarCount { .. } => true, - Aggregate::SingleColumn { function, .. } => function.is_count(), - } -} - -fn pipeline_for_aggregate( - aggregate: Aggregate, - limit: Option, -) -> Result { - // Group expressions use a dollar-sign prefix to indicate a reference to a document field. - // TODO: I don't think we need sanitizing, but I could use a second opinion -Jesse H. - let field_ref = |column: &str| Bson::String(format!("${column}")); - - let pipeline = match aggregate { - Aggregate::ColumnCount { column, distinct } if distinct => Pipeline::from_iter( - [ - Some(Stage::Match( - bson::doc! { column.as_str(): { "$exists": true, "$ne": null } }, - )), - limit.map(Stage::Limit), - Some(Stage::Group { - key_expression: field_ref(column.as_str()), - accumulators: [].into(), - }), - Some(Stage::Count(RESULT_FIELD.to_string())), - ] - .into_iter() - .flatten(), - ), - - Aggregate::ColumnCount { column, .. } => Pipeline::from_iter( - [ - Some(Stage::Match( - bson::doc! { column.as_str(): { "$exists": true, "$ne": null } }, - )), - limit.map(Stage::Limit), - Some(Stage::Count(RESULT_FIELD.to_string())), - ] - .into_iter() - .flatten(), - ), - - Aggregate::SingleColumn { - column, function, .. - } => { - use AggregationFunction::*; +/// Generate a pipeline to select fields requested by the given query. This is intended to be used +/// within a $facet stage. We assume that the query's `where`, `order_by`, `offset`, `limit` +/// criteria (which are shared with aggregates) have already been applied, and that we have already +/// joined relations. +pub fn pipeline_for_fields_facet( + query_plan: &QueryPlan, + query_level: QueryLevel, +) -> Result { + let Query { relationships, .. } = &query_plan.query; - let accumulator = match function { - Avg => Accumulator::Avg(field_ref(column.as_str())), - Count => Accumulator::Count, - Min => Accumulator::Min(field_ref(column.as_str())), - Max => Accumulator::Max(field_ref(column.as_str())), - Sum => Accumulator::Sum(field_ref(column.as_str())), - }; - Pipeline::from_iter( - [ - Some(Stage::Match( - bson::doc! { column: { "$exists": true, "$ne": null } }, - )), - limit.map(Stage::Limit), - Some(Stage::Group { - key_expression: Bson::Null, - accumulators: [(RESULT_FIELD.to_string(), accumulator)].into(), - }), - ] - .into_iter() - .flatten(), - ) + let mut selection = selection_for_fields(query_plan.query.fields.as_ref())?; + if query_level != QueryLevel::Top { + // Queries higher up the chain might need to reference relationships from this query. So we + // forward relationship arrays if this is not the top-level query. + for relationship_key in relationships.keys() { + selection = selection.try_map_document(|mut doc| { + doc.insert( + relationship_key.to_owned(), + ColumnRef::from_field(relationship_key.as_str()).into_aggregate_expression(), + ); + doc + })?; } + } - Aggregate::StarCount {} => Pipeline::from_iter( - [ - limit.map(Stage::Limit), - Some(Stage::Count(RESULT_FIELD.to_string())), - ] - .into_iter() - .flatten(), - ), - }; - Ok(pipeline) + let replace_with_stage: Stage = Stage::ReplaceWith(selection); + Ok(Pipeline::new(vec![replace_with_stage])) } diff --git a/crates/mongodb-agent-common/src/query/query_variable_name.rs b/crates/mongodb-agent-common/src/query/query_variable_name.rs index bacaccbe..66589962 100644 --- a/crates/mongodb-agent-common/src/query/query_variable_name.rs +++ b/crates/mongodb-agent-common/src/query/query_variable_name.rs @@ -1,6 +1,7 @@ use std::borrow::Cow; use configuration::MongoScalarType; +use itertools::Itertools; use crate::{ mongo_query_plan::{ObjectType, Type}, @@ -28,13 +29,14 @@ fn type_name(input_type: &Type) -> Cow<'static, str> { Type::Object(obj) => object_type_name(obj).into(), Type::ArrayOf(t) => format!("[{}]", type_name(t)).into(), Type::Nullable(t) => format!("nullable({})", type_name(t)).into(), + Type::Tuple(ts) => format!("({})", ts.iter().map(type_name).join(", ")).into(), } } fn object_type_name(obj: &ObjectType) -> String { let mut output = "{".to_string(); for (key, t) in &obj.fields { - output.push_str(&format!("{key}:{}", type_name(t))); + output.push_str(&format!("{key}:{}", type_name(&t.r#type))); } output.push('}'); output diff --git a/crates/mongodb-agent-common/src/query/relations.rs b/crates/mongodb-agent-common/src/query/relations.rs index 39edbdc6..089b3caa 100644 --- a/crates/mongodb-agent-common/src/query/relations.rs +++ b/crates/mongodb-agent-common/src/query/relations.rs @@ -1,18 +1,16 @@ use std::collections::BTreeMap; use itertools::Itertools as _; -use mongodb::bson::{doc, Bson, Document}; +use mongodb::bson::{doc, Document}; +use mongodb_support::aggregate::{Pipeline, Stage}; use ndc_query_plan::Scope; +use nonempty::NonEmpty; use crate::mongo_query_plan::{MongoConfiguration, Query, QueryPlan}; -use crate::mongodb::sanitize::safe_name; -use crate::mongodb::Pipeline; use crate::query::column_ref::name_from_scope; -use crate::{ - interface_types::MongoAgentError, - mongodb::{sanitize::variable, Stage}, -}; +use crate::{interface_types::MongoAgentError, mongodb::sanitize::variable}; +use super::column_ref::ColumnRef; use super::pipeline::pipeline_for_non_foreach; use super::query_level::QueryLevel; @@ -47,13 +45,13 @@ pub fn pipeline_for_relations( QueryLevel::Relationship, )?; - make_lookup_stage( + Ok(make_lookup_stage( relationship.target_collection.clone(), &relationship.column_mapping, name.to_owned(), lookup_pipeline, scope.as_ref(), - ) + )) as Result<_> }) .try_collect()?; @@ -62,42 +60,53 @@ pub fn pipeline_for_relations( fn make_lookup_stage( from: ndc_models::CollectionName, - column_mapping: &BTreeMap, + column_mapping: &BTreeMap>, r#as: ndc_models::RelationshipName, lookup_pipeline: Pipeline, scope: Option<&Scope>, -) -> Result { - // If we are mapping a single field in the source collection to a single field in the target - // collection then we can use the correlated subquery syntax. - if column_mapping.len() == 1 { - // Safe to unwrap because we just checked the hashmap size - let (source_selector, target_selector) = column_mapping.iter().next().unwrap(); - single_column_mapping_lookup( +) -> Stage { + // If there is a single column mapping, and the source and target field references can be + // expressed as match keys (we don't need to escape field names), then we can use a concise + // correlated subquery. Otherwise we need to fall back to an uncorrelated subquery. + let single_mapping = if column_mapping.len() == 1 { + column_mapping.iter().next() + } else { + None + }; + let source_selector = single_mapping.map(|(field_name, _)| field_name); + let target_selector = single_mapping.map(|(_, target_path)| target_path); + + let source_key = + source_selector.and_then(|f| ColumnRef::from_field(f.as_ref()).into_match_key()); + let target_key = + target_selector.and_then(|path| ColumnRef::from_field_path(path.as_ref()).into_match_key()); + + match (source_key, target_key) { + (Some(source_key), Some(target_key)) => lookup_with_concise_correlated_subquery( from, - source_selector, - target_selector, + source_key.into_owned(), + target_key.into_owned(), r#as, lookup_pipeline, scope, - ) - } else { - multiple_column_mapping_lookup(from, column_mapping, r#as, lookup_pipeline, scope) + ), + + _ => lookup_with_uncorrelated_subquery(from, column_mapping, r#as, lookup_pipeline, scope), } } -// TODO: MDB-160 Replace uses of [safe_name] with [ColumnRef]. -fn single_column_mapping_lookup( +fn lookup_with_concise_correlated_subquery( from: ndc_models::CollectionName, - source_selector: &ndc_models::FieldName, - target_selector: &ndc_models::FieldName, + source_selector_key: String, + target_selector_key: String, r#as: ndc_models::RelationshipName, lookup_pipeline: Pipeline, scope: Option<&Scope>, -) -> Result { - Ok(Stage::Lookup { +) -> Stage { + Stage::Lookup { from: Some(from.to_string()), - local_field: Some(safe_name(source_selector.as_str())?.into_owned()), - foreign_field: Some(safe_name(target_selector.as_str())?.into_owned()), + local_field: Some(source_selector_key), + foreign_field: Some(target_selector_key), r#let: scope.map(|scope| { doc! { name_from_scope(scope): "$$ROOT" @@ -109,28 +118,32 @@ fn single_column_mapping_lookup( Some(lookup_pipeline) }, r#as: r#as.to_string(), - }) + } } -fn multiple_column_mapping_lookup( +/// The concise correlated subquery syntax with `localField` and `foreignField` only works when +/// joining on one field. To join on multiple fields it is necessary to bind variables to fields on +/// the left side of the join, and to emit a custom `$match` stage to filter the right side of the +/// join. This version also allows comparing arbitrary expressions for the join which we need for +/// cases like joining on field names that require escaping. +fn lookup_with_uncorrelated_subquery( from: ndc_models::CollectionName, - column_mapping: &BTreeMap, + column_mapping: &BTreeMap>, r#as: ndc_models::RelationshipName, lookup_pipeline: Pipeline, scope: Option<&Scope>, -) -> Result { +) -> Stage { let mut let_bindings: Document = column_mapping .keys() .map(|local_field| { - Ok(( + ( variable(local_field.as_str()), - Bson::String(format!( - "${}", - safe_name(local_field.as_str())?.into_owned() - )), - )) + ColumnRef::from_field(local_field.as_ref()) + .into_aggregate_expression() + .into_bson(), + ) }) - .collect::>()?; + .collect(); if let Some(scope) = scope { let_bindings.insert(name_from_scope(scope), "$$ROOT"); @@ -139,24 +152,20 @@ fn multiple_column_mapping_lookup( // Creating an intermediate Vec and sorting it is done just to help with testing. // A stable order for matchers makes it easier to assert equality between actual // and expected pipelines. - let mut column_pairs: Vec<(&ndc_models::FieldName, &ndc_models::FieldName)> = + let mut column_pairs: Vec<(&ndc_models::FieldName, &NonEmpty)> = column_mapping.iter().collect(); column_pairs.sort(); let matchers: Vec = column_pairs .into_iter() - .map(|(local_field, remote_field)| { - Ok(doc! { "$eq": [ - format!("$${}", variable(local_field.as_str())), - format!("${}", safe_name(remote_field.as_str())?) - ] }) + .map(|(local_field, remote_field_path)| { + doc! { "$eq": [ + ColumnRef::variable(variable(local_field.as_str())).into_aggregate_expression(), + ColumnRef::from_field_path(remote_field_path.as_ref()).into_aggregate_expression(), + ] } }) - .collect::>()?; + .collect(); - // Match only documents on the right side of the join that match the column-mapping - // criteria. In the case where we have only one column mapping using the $lookup stage's - // `local_field` and `foreign_field` shorthand would give better performance (~10%), but that - // locks us into MongoDB v5.0 or later. let mut pipeline = Pipeline::from_iter([Stage::Match(if matchers.len() == 1 { doc! { "$expr": matchers.into_iter().next().unwrap() } } else { @@ -165,22 +174,23 @@ fn multiple_column_mapping_lookup( pipeline.append(lookup_pipeline); let pipeline: Option = pipeline.into(); - Ok(Stage::Lookup { + Stage::Lookup { from: Some(from.to_string()), local_field: None, foreign_field: None, r#let: let_bindings.into(), pipeline, r#as: r#as.to_string(), - }) + } } #[cfg(test)] mod tests { use configuration::Configuration; use mongodb::bson::{bson, Bson}; + use ndc_models::{FieldName, QueryResponse}; use ndc_test_helpers::{ - binop, collection, exists, field, named_type, object_type, query, query_request, + binop, collection, exists, field, named_type, object, object_type, query, query_request, relation_field, relationship, row_set, star_count_aggregate, target, value, }; use pretty_assertions::assert_eq; @@ -205,7 +215,7 @@ mod tests { ])) .relationships([( "class_students", - relationship("students", [("_id", "classId")]), + relationship("students", [("_id", &["classId"])]), )]) .into(); @@ -247,7 +257,7 @@ mod tests { "students": { "rows": { "$map": { - "input": { "$getField": { "$literal": "class_students" } }, + "input": "$class_students", "in": { "student_name": "$$this.student_name" } @@ -288,7 +298,7 @@ mod tests { ])) .relationships([( "student_class", - relationship("classes", [("classId", "_id")]), + relationship("classes", [("classId", &["_id"])]), )]) .into(); @@ -336,7 +346,7 @@ mod tests { "class": { "rows": { "$map": { - "input": { "$getField": { "$literal": "student_class" } }, + "input": "$student_class", "in": { "class_title": "$$this.class_title" } @@ -380,7 +390,10 @@ mod tests { ])) .relationships([( "students", - relationship("students", [("title", "class_title"), ("year", "year")]), + relationship( + "students", + [("title", &["class_title"]), ("year", &["year"])], + ), )]) .into(); @@ -430,7 +443,7 @@ mod tests { "students": { "rows": { "$map": { - "input": { "$getField": { "$literal": "students" } }, + "input": "$students", "in": { "student_name": "$$this.student_name" } @@ -459,6 +472,77 @@ mod tests { Ok(()) } + #[tokio::test] + async fn escapes_column_mappings_names_if_necessary() -> Result<(), anyhow::Error> { + let query_request = query_request() + .collection("weird_field_names") + .query(query().fields([ + field!("invalid_name" => "$invalid.name"), + relation_field!("join" => "join", query().fields([ + field!("invalid_name" => "$invalid.name") + ])), + ])) + .relationships([( + "join", + relationship("weird_field_names", [("$invalid.name", &["$invalid.name"])]), + )]) + .into(); + + let expected_pipeline = bson!([ + { + "$lookup": { + "from": "weird_field_names", + "let": { + "v_·24invalid·2ename": { "$getField": { "$literal": "$invalid.name" } }, + "scope_root": "$$ROOT", + }, + "pipeline": [ + { + "$match": { "$expr": { + "$eq": [ + "$$v_·24invalid·2ename", + { "$getField": { "$literal": "$invalid.name" } } + ] + } }, + }, + { + "$replaceWith": { + "invalid_name": { "$ifNull": [{ "$getField": { "$literal": "$invalid.name" } }, null] }, + }, + }, + ], + "as": "join", + }, + }, + { + "$replaceWith": { + "invalid_name": { "$ifNull": [{ "$getField": { "$literal": "$invalid.name" } }, null] }, + "join": { + "rows": { + "$map": { + "input": "$join", + "in": { + "invalid_name": "$$this.invalid_name", + } + } + } + }, + }, + }, + ]); + + let db = mock_collection_aggregate_response_for_pipeline( + "weird_field_names", + expected_pipeline, + bson!([]), + ); + + execute_query_request(db, &test_cases_config(), query_request).await?; + // assert_eq!(expected_response, result); + + Ok(()) + } + #[tokio::test] async fn makes_recursive_lookups_for_nested_relations() -> Result<(), anyhow::Error> { let query_request = query_request() @@ -473,10 +557,13 @@ mod tests { ])), ])) .relationships([ - ("students", relationship("students", [("_id", "class_id")])), + ( + "students", + relationship("students", [("_id", &["class_id"])]), + ), ( "assignments", - relationship("assignments", [("_id", "student_id")]), + relationship("assignments", [("_id", &["student_id"])]), ), ]) .into(); @@ -535,7 +622,7 @@ mod tests { }, { "$replaceWith": { - "assignments": { "$getField": { "$literal": "assignments" } }, + "assignments": "$assignments", "student_name": { "$ifNull": ["$name", null] }, }, }, @@ -549,7 +636,7 @@ mod tests { "students": { "rows": { "$map": { - "input": { "$getField": { "$literal": "students" } }, + "input": "$students", "in": { "assignments": "$$this.assignments", "student_name": "$$this.student_name", @@ -605,7 +692,10 @@ mod tests { star_count_aggregate!("aggregate_count") ])), ])) - .relationships([("students", relationship("students", [("_id", "classId")]))]) + .relationships([( + "students", + relationship("students", [("_id", &["classId"])]), + )]) .into(); let expected_response = row_set() @@ -630,27 +720,14 @@ mod tests { }, "pipeline": [ { - "$facet": { - "aggregate_count": [ - { "$count": "result" }, - ], + "$group": { + "_id": null, + "aggregate_count": { "$sum": 1 }, } }, { "$replaceWith": { - "aggregates": { - "aggregate_count": { - "$ifNull": [ - { - "$getField": { - "field": "result", - "input": { "$first": { "$getField": { "$literal": "aggregate_count" } } }, - }, - }, - 0, - ] - }, - }, + "aggregate_count": { "$ifNull": ["$aggregate_count", 0] }, }, } ], @@ -660,16 +737,16 @@ mod tests { { "$replaceWith": { "students_aggregate": { - "$let": { - "vars": { - "row_set": { "$first": { "$getField": { "$literal": "students" } } } - }, - "in": { - "aggregates": { - "aggregate_count": "$$row_set.aggregates.aggregate_count" + "aggregates": { + "$let": { + "vars": { + "aggregates": { "$first": "$students" } + }, + "in": { + "aggregate_count": { "$ifNull": ["$$aggregates.aggregate_count", 0] } } } - } + }, } }, }, @@ -711,6 +788,7 @@ mod tests { ndc_models::ExistsInCollection::Related { relationship: "movie".into(), arguments: Default::default(), + field_path: Default::default(), }, binop( "_eq", @@ -721,7 +799,7 @@ mod tests { ) .relationships([( "movie", - relationship("movies", [("movie_id", "_id")]).object_type(), + relationship("movies", [("movie_id", &["_id"])]).object_type(), )]) .into(); @@ -766,14 +844,14 @@ mod tests { } }, { - "$limit": Bson::Int64(50), + "$limit": Bson::Int32(50), }, { "$replaceWith": { "movie": { "rows": { "$map": { - "input": { "$getField": { "$literal": "movie" } }, + "input": "$movie", "in": { "year": "$$this.year", "title": "$$this.title", @@ -804,114 +882,126 @@ mod tests { Ok(()) } - // TODO: This test requires updated ndc_models that add `field_path` to - // [ndc::ComparisonTarget::Column] - // #[tokio::test] - // async fn filters_by_field_nested_in_object_in_related_collection() -> Result<(), anyhow::Error> - // { - // let query_request = query_request() - // .collection("comments") - // .query( - // query() - // .fields([relation_field!("movie" => "movie", query().fields([ - // field!("credits" => "credits", object!([ - // field!("director"), - // ])), - // ]))]) - // .limit(50) - // .predicate(exists( - // ndc_models::ExistsInCollection::Related { - // relationship: "movie".into(), - // arguments: Default::default(), - // }, - // binop( - // "_eq", - // target!("credits", field_path: ["director"]), - // value!("Martin Scorsese"), - // ), - // )), - // ) - // .relationships([("movie", relationship("movies", [("movie_id", "_id")]))]) - // .into(); - // - // let expected_response = row_set() - // .row([ - // ("name", "Beric Dondarrion"), - // ( - // "movie", - // json!({ "rows": [{ - // "credits": { - // "director": "Martin Scorsese", - // } - // }]}), - // ), - // ]) - // .into(); - // - // let expected_pipeline = bson!([ - // { - // "$lookup": { - // "from": "movies", - // "localField": "movie_id", - // "foreignField": "_id", - // "pipeline": [ - // { - // "$replaceWith": { - // "credits": { - // "$cond": { - // "if": "$credits", - // "then": { "director": { "$ifNull": ["$credits.director", null] } }, - // "else": null, - // } - // }, - // } - // } - // ], - // "as": "movie" - // } - // }, - // { - // "$match": { - // "movie.credits.director": { - // "$eq": "Martin Scorsese" - // } - // } - // }, - // { - // "$limit": Bson::Int64(50), - // }, - // { - // "$replaceWith": { - // "name": { "$ifNull": ["$name", null] }, - // "movie": { - // "rows": { - // "$getField": { - // "$literal": "movie" - // } - // } - // }, - // } - // }, - // ]); - // - // let db = mock_collection_aggregate_response_for_pipeline( - // "comments", - // expected_pipeline, - // bson!([{ - // "name": "Beric Dondarrion", - // "movie": { "rows": [{ - // "credits": { - // "director": "Martin Scorsese" - // } - // }] }, - // }]), - // ); - // - // let result = execute_query_request(db, &mflix_config(), query_request).await?; - // assert_eq!(expected_response, result); - // - // Ok(()) - // } + #[tokio::test] + async fn filters_by_field_nested_in_object_in_related_collection() -> Result<(), anyhow::Error> + { + let query_request = query_request() + .collection("comments") + .query( + query() + .fields([ + field!("name"), + relation_field!("movie" => "movie", query().fields([ + field!("credits" => "credits", object!([ + field!("director"), + ])), + ])), + ]) + .limit(50) + .predicate(exists( + ndc_models::ExistsInCollection::Related { + relationship: "movie".into(), + arguments: Default::default(), + field_path: Default::default(), + }, + binop( + "_eq", + target!("credits", field_path: [Some(FieldName::from("director"))]), + value!("Martin Scorsese"), + ), + )), + ) + .relationships([("movie", relationship("movies", [("movie_id", &["_id"])]))]) + .into(); + + let expected_response: QueryResponse = row_set() + .row([ + ("name", json!("Beric Dondarrion")), + ( + "movie", + json!({ "rows": [{ + "credits": { + "director": "Martin Scorsese", + } + }]}), + ), + ]) + .into(); + + let expected_pipeline = bson!([ + { + "$lookup": { + "from": "movies", + "localField": "movie_id", + "foreignField": "_id", + "let": { + "scope_root": "$$ROOT", + }, + "pipeline": [ + { + "$replaceWith": { + "credits": { + "$cond": { + "if": "$credits", + "then": { "director": { "$ifNull": ["$credits.director", null] } }, + "else": null, + } + }, + } + } + ], + "as": "movie" + } + }, + { + "$match": { + "movie": { + "$elemMatch": { + "credits.director": { + "$eq": "Martin Scorsese" + } + } + } + } + }, + { + "$limit": Bson::Int32(50), + }, + { + "$replaceWith": { + "name": { "$ifNull": ["$name", null] }, + "movie": { + "rows": { + "$map": { + "input": "$movie", + "in": { + "credits": "$$this.credits", + } + } + } + }, + } + }, + ]); + + let db = mock_collection_aggregate_response_for_pipeline( + "comments", + expected_pipeline, + bson!([{ + "name": "Beric Dondarrion", + "movie": { "rows": [{ + "credits": { + "director": "Martin Scorsese" + } + }] }, + }]), + ); + + let result = execute_query_request(db, &mflix_config(), query_request).await?; + assert_eq!(expected_response, result); + + Ok(()) + } fn students_config() -> MongoConfiguration { MongoConfiguration(Configuration { @@ -957,4 +1047,23 @@ mod tests { options: Default::default(), }) } + + fn test_cases_config() -> MongoConfiguration { + MongoConfiguration(Configuration { + collections: [collection("weird_field_names")].into(), + object_types: [( + "weird_field_names".into(), + object_type([ + ("_id", named_type("ObjectId")), + ("$invalid.name", named_type("Int")), + ]), + )] + .into(), + functions: Default::default(), + procedures: Default::default(), + native_mutations: Default::default(), + native_queries: Default::default(), + options: Default::default(), + }) + } } diff --git a/crates/mongodb-agent-common/src/query/response.rs b/crates/mongodb-agent-common/src/query/response.rs index cec6f1b8..f3068683 100644 --- a/crates/mongodb-agent-common/src/query/response.rs +++ b/crates/mongodb-agent-common/src/query/response.rs @@ -1,21 +1,27 @@ -use std::collections::BTreeMap; +use std::{borrow::Cow, collections::BTreeMap}; -use configuration::MongoScalarType; +use configuration::{ConfigurationSerializationOptions, MongoScalarType, OnResponseTypeMismatch}; use indexmap::IndexMap; use itertools::Itertools; -use mongodb::bson::{self, Bson}; -use mongodb_support::ExtendedJsonMode; -use ndc_models::{QueryResponse, RowFieldValue, RowSet}; -use serde::Deserialize; +use mongodb::bson::{self, doc, Bson}; +use ndc_models::{FieldName, Group, QueryResponse, RowFieldValue, RowSet}; +use serde_json::json; use thiserror::Error; use tracing::instrument; use crate::{ + constants::{ + BsonRowSet, GROUP_DIMENSIONS_KEY, ROW_SET_AGGREGATES_KEY, ROW_SET_GROUPS_KEY, + ROW_SET_ROWS_KEY, + }, mongo_query_plan::{ - Aggregate, Field, NestedArray, NestedField, NestedObject, ObjectType, Query, QueryPlan, - Type, + Aggregate, Dimension, Field, Grouping, NestedArray, NestedField, NestedObject, ObjectField, + ObjectType, Query, QueryPlan, Type, + }, + query::{ + is_response_faceted::ResponseFacets, + serialization::{bson_to_json, BsonToJsonError}, }, - query::serialization::{bson_to_json, BsonToJsonError}, }; use super::serialization::is_nullable; @@ -31,6 +37,9 @@ pub enum QueryResponseError { #[error("{0}")] BsonToJson(#[from] BsonToJsonError), + #[error("a group response is missing its '{GROUP_DIMENSIONS_KEY}' field")] + GroupMissingDimensions { path: Vec }, + #[error("expected a single response document from MongoDB, but did not get one")] ExpectedSingleDocument, @@ -40,17 +49,9 @@ pub enum QueryResponseError { type Result = std::result::Result; -#[derive(Debug, Deserialize)] -struct BsonRowSet { - #[serde(default)] - aggregates: Bson, - #[serde(default)] - rows: Vec, -} - #[instrument(name = "Serialize Query Response", skip_all, fields(internal.visibility = "user"))] pub fn serialize_query_response( - mode: ExtendedJsonMode, + options: &ConfigurationSerializationOptions, query_plan: &QueryPlan, response_documents: Vec, ) -> Result { @@ -61,38 +62,55 @@ pub fn serialize_query_response( .into_iter() .map(|document| { let row_set = bson::from_document(document)?; - serialize_row_set_with_aggregates( - mode, + serialize_row_set( + options, &[collection_name.as_str()], &query_plan.query, row_set, ) }) .try_collect() - } else if query_plan.query.has_aggregates() { - let row_set = parse_single_document(response_documents)?; - Ok(vec![serialize_row_set_with_aggregates( - mode, - &[], - &query_plan.query, - row_set, - )?]) } else { - Ok(vec![serialize_row_set_rows_only( - mode, - &[], - &query_plan.query, - response_documents, - )?]) + match ResponseFacets::from_query(&query_plan.query) { + ResponseFacets::Combination { .. } => { + let row_set = parse_single_document(response_documents)?; + Ok(vec![serialize_row_set( + options, + &[], + &query_plan.query, + row_set, + )?]) + } + ResponseFacets::AggregatesOnly(aggregates) => { + Ok(vec![serialize_row_set_aggregates_only( + options, + &[], + aggregates, + response_documents, + )?]) + } + ResponseFacets::FieldsOnly(_) => Ok(vec![serialize_row_set_rows_only( + options, + &[], + &query_plan.query, + response_documents, + )?]), + ResponseFacets::GroupsOnly(grouping) => Ok(vec![serialize_row_set_groups_only( + options, + &[], + grouping, + response_documents, + )?]), + } }?; let response = QueryResponse(row_sets); tracing::debug!(query_response = %serde_json::to_string(&response).unwrap()); Ok(response) } -// When there are no aggregates we expect a list of rows +// When there are no aggregates or groups we expect a list of rows fn serialize_row_set_rows_only( - mode: ExtendedJsonMode, + options: &ConfigurationSerializationOptions, path: &[&str], query: &Query, docs: Vec, @@ -100,19 +118,47 @@ fn serialize_row_set_rows_only( let rows = query .fields .as_ref() - .map(|fields| serialize_rows(mode, path, fields, docs)) + .map(|fields| serialize_rows(options, path, fields, docs)) .transpose()?; Ok(RowSet { aggregates: None, rows, + groups: None, + }) +} + +fn serialize_row_set_aggregates_only( + options: &ConfigurationSerializationOptions, + path: &[&str], + aggregates: &IndexMap, + docs: Vec, +) -> Result { + let doc = docs.first().cloned().unwrap_or(doc! {}); + Ok(RowSet { + aggregates: Some(serialize_aggregates(options, path, aggregates, doc)?), + rows: None, + groups: None, + }) +} + +fn serialize_row_set_groups_only( + options: &ConfigurationSerializationOptions, + path: &[&str], + grouping: &Grouping, + docs: Vec, +) -> Result { + Ok(RowSet { + aggregates: None, + rows: None, + groups: Some(serialize_groups(options, path, grouping, docs)?), }) } -// When there are aggregates we expect a single document with `rows` and `aggregates` -// fields -fn serialize_row_set_with_aggregates( - mode: ExtendedJsonMode, +// When a query includes some combination of aggregates, rows, or groups then the response is +// "faceted" to give us a single document with `rows`, `aggregates`, and `groups` fields. +fn serialize_row_set( + options: &ConfigurationSerializationOptions, path: &[&str], query: &Query, row_set: BsonRowSet, @@ -120,59 +166,140 @@ fn serialize_row_set_with_aggregates( let aggregates = query .aggregates .as_ref() - .map(|aggregates| serialize_aggregates(mode, path, aggregates, row_set.aggregates)) + .map(|aggregates| { + let aggregate_values = row_set.aggregates.unwrap_or_else(|| doc! {}); + serialize_aggregates(options, path, aggregates, aggregate_values) + }) + .transpose()?; + + let groups = query + .groups + .as_ref() + .map(|grouping| serialize_groups(options, path, grouping, row_set.groups)) .transpose()?; let rows = query .fields .as_ref() - .map(|fields| serialize_rows(mode, path, fields, row_set.rows)) + .map(|fields| serialize_rows(options, path, fields, row_set.rows)) .transpose()?; - Ok(RowSet { aggregates, rows }) + Ok(RowSet { + aggregates, + rows, + groups, + }) } fn serialize_aggregates( - mode: ExtendedJsonMode, - path: &[&str], + options: &ConfigurationSerializationOptions, + _path: &[&str], query_aggregates: &IndexMap, - value: Bson, + value: bson::Document, ) -> Result> { - let aggregates_type = type_for_aggregates(query_aggregates); - let json = bson_to_json(mode, &aggregates_type, value)?; - - // The NDC type uses an IndexMap for aggregate values; we need to convert the map - // underlying the Value::Object value to an IndexMap - let aggregate_values = match json { - serde_json::Value::Object(obj) => obj.into_iter().map(|(k, v)| (k.into(), v)).collect(), - _ => Err(QueryResponseError::AggregatesNotObject { - path: path_to_owned(path), - })?, - }; + // The NDC type uses an IndexMap for aggregate values; we need to convert the map underlying + // the Value::Object value to an IndexMap. + // + // We also need to fill in missing aggregate values. This can be an issue in a query that does + // not match any documents. In that case instead of an object with null aggregate values + // MongoDB does not return any documents, so this function gets an empty document. + let aggregate_values = query_aggregates + .iter() + .map(|(key, aggregate)| { + let json_value = match value.get(key.as_str()).cloned() { + Some(bson_value) => bson_to_json( + options.extended_json_mode, + &type_for_aggregate(aggregate), + bson_value, + )?, + None => { + if aggregate.is_count() { + json!(0) + } else { + json!(null) + } + } + }; + Ok((key.clone(), json_value)) + }) + .collect::>()?; Ok(aggregate_values) } fn serialize_rows( - mode: ExtendedJsonMode, + options: &ConfigurationSerializationOptions, path: &[&str], query_fields: &IndexMap, docs: Vec, ) -> Result>> { let row_type = type_for_row(path, query_fields)?; - docs.into_iter() - .map(|doc| { - let json = bson_to_json(mode, &row_type, doc.into())?; + let rows = docs + .into_iter() + .filter_map( + |doc| match bson_to_json(options.extended_json_mode, &row_type, doc.into()) { + Ok(json) => Some(Ok(json)), + Err(BsonToJsonError::TypeMismatch(_, _)) + if options.on_response_type_mismatch == OnResponseTypeMismatch::SkipRow => + { + None + } + Err(error) => Some(Err(error)), + }, + ) + .map_ok(|json| { // The NDC types use an IndexMap for each row value; we need to convert the map // underlying the Value::Object value to an IndexMap - let index_map = match json { + match json { serde_json::Value::Object(obj) => obj .into_iter() .map(|(key, value)| (key.into(), RowFieldValue(value))) .collect(), _ => unreachable!(), + } + }) + .try_collect()?; + Ok(rows) +} + +fn serialize_groups( + options: &ConfigurationSerializationOptions, + path: &[&str], + grouping: &Grouping, + docs: Vec, +) -> Result> { + docs.into_iter() + .map(|doc| { + let dimensions_field_value = doc.get(GROUP_DIMENSIONS_KEY).ok_or_else(|| { + QueryResponseError::GroupMissingDimensions { + path: path_to_owned(path), + } + })?; + + let dimensions_array = match dimensions_field_value { + Bson::Array(vec) => Cow::Borrowed(vec), + other_bson_value => Cow::Owned(vec![other_bson_value.clone()]), }; - Ok(index_map) + + let dimensions = grouping + .dimensions + .iter() + .zip(dimensions_array.iter()) + .map(|(dimension_definition, dimension_value)| { + Ok(bson_to_json( + options.extended_json_mode, + dimension_definition.value_type(), + dimension_value.clone(), + )?) + }) + .collect::>()?; + + let aggregates = serialize_aggregates(options, path, &grouping.aggregates, doc)?; + + Ok(Group { + dimensions, + aggregates, + }) }) .try_collect() } @@ -181,43 +308,87 @@ fn type_for_row_set( path: &[&str], aggregates: &Option>, fields: &Option>, + groups: &Option, ) -> Result { - let mut type_fields = BTreeMap::new(); + let mut object_fields = BTreeMap::new(); if let Some(aggregates) = aggregates { - type_fields.insert("aggregates".into(), type_for_aggregates(aggregates)); + object_fields.insert( + ROW_SET_AGGREGATES_KEY.into(), + ObjectField { + r#type: Type::Object(type_for_aggregates(aggregates)), + parameters: Default::default(), + }, + ); } if let Some(query_fields) = fields { let row_type = type_for_row(path, query_fields)?; - type_fields.insert("rows".into(), Type::ArrayOf(Box::new(row_type))); + object_fields.insert( + ROW_SET_ROWS_KEY.into(), + ObjectField { + r#type: Type::ArrayOf(Box::new(row_type)), + parameters: Default::default(), + }, + ); + } + + if let Some(grouping) = groups { + let dimension_types = grouping + .dimensions + .iter() + .map(Dimension::value_type) + .cloned() + .collect(); + let dimension_tuple_type = Type::Tuple(dimension_types); + let mut group_object_type = type_for_aggregates(&grouping.aggregates); + group_object_type + .fields + .insert(GROUP_DIMENSIONS_KEY.into(), dimension_tuple_type.into()); + object_fields.insert( + ROW_SET_GROUPS_KEY.into(), + ObjectField { + r#type: Type::array_of(Type::Object(group_object_type)), + parameters: Default::default(), + }, + ); } Ok(Type::Object(ObjectType { - fields: type_fields, + fields: object_fields, name: None, })) } -fn type_for_aggregates(query_aggregates: &IndexMap) -> Type { +fn type_for_aggregates( + query_aggregates: &IndexMap, +) -> ObjectType { let fields = query_aggregates .iter() .map(|(field_name, aggregate)| { + let result_type = type_for_aggregate(aggregate); ( field_name.to_string().into(), - match aggregate { - Aggregate::ColumnCount { .. } => { - Type::Scalar(MongoScalarType::Bson(mongodb_support::BsonScalarType::Int)) - } - Aggregate::StarCount => { - Type::Scalar(MongoScalarType::Bson(mongodb_support::BsonScalarType::Int)) - } - Aggregate::SingleColumn { result_type, .. } => result_type.clone(), + ObjectField { + r#type: result_type, + parameters: Default::default(), }, ) }) .collect(); - Type::Object(ObjectType { fields, name: None }) + ObjectType { fields, name: None } +} + +fn type_for_aggregate(aggregate: &Aggregate) -> Type { + match aggregate { + Aggregate::ColumnCount { .. } => { + Type::Scalar(MongoScalarType::Bson(mongodb_support::BsonScalarType::Int)) + } + Aggregate::StarCount => { + Type::Scalar(MongoScalarType::Bson(mongodb_support::BsonScalarType::Int)) + } + Aggregate::SingleColumn { result_type, .. } => result_type.clone(), + } } fn type_for_row( @@ -231,7 +402,11 @@ fn type_for_row( &append_to_path(path, [field_name.as_str()]), field_definition, )?; - Ok((field_name.clone(), field_type)) + let object_field = ObjectField { + r#type: field_type, + parameters: Default::default(), + }; + Ok((field_name.clone(), object_field)) }) .try_collect::<_, _, QueryResponseError>()?; Ok(Type::Object(ObjectType { fields, name: None })) @@ -250,8 +425,11 @@ fn type_for_field(path: &[&str], field_definition: &Field) -> Result { .. } => type_for_nested_field(path, column_type, nested_field)?, Field::Relationship { - aggregates, fields, .. - } => type_for_row_set(path, aggregates, fields)?, + aggregates, + fields, + groups, + .. + } => type_for_row_set(path, aggregates, fields, groups)?, }; Ok(field_type) } @@ -322,9 +500,12 @@ fn path_to_owned(path: &[&str]) -> Vec { mod tests { use std::str::FromStr; - use configuration::{Configuration, MongoScalarType}; + use configuration::{ + Configuration, ConfigurationOptions, ConfigurationSerializationOptions, MongoScalarType, + OnResponseTypeMismatch, + }; use mongodb::bson::{self, Bson}; - use mongodb_support::{BsonScalarType, ExtendedJsonMode}; + use mongodb_support::BsonScalarType; use ndc_models::{QueryRequest, QueryResponse, RowFieldValue, RowSet}; use ndc_query_plan::plan_for_query_request; use ndc_test_helpers::{ @@ -336,7 +517,7 @@ mod tests { use crate::{ mongo_query_plan::{MongoConfiguration, ObjectType, Type}, - test_helpers::make_nested_schema, + test_helpers::{chinook_config, chinook_relationships, make_nested_schema}, }; use super::{serialize_query_response, type_for_row_set}; @@ -364,7 +545,7 @@ mod tests { }]; let response = - serialize_query_response(ExtendedJsonMode::Canonical, &query_plan, response_documents)?; + serialize_query_response(&Default::default(), &query_plan, response_documents)?; assert_eq!( response, QueryResponse(vec![RowSet { @@ -379,6 +560,7 @@ mod tests { })) )] .into()]), + groups: Default::default(), }]) ); Ok(()) @@ -404,7 +586,7 @@ mod tests { }]; let response = - serialize_query_response(ExtendedJsonMode::Canonical, &query_plan, response_documents)?; + serialize_query_response(&Default::default(), &query_plan, response_documents)?; assert_eq!( response, QueryResponse(vec![RowSet { @@ -417,6 +599,7 @@ mod tests { ])) )] .into()]), + groups: Default::default(), }]) ); Ok(()) @@ -451,7 +634,7 @@ mod tests { }]; let response = - serialize_query_response(ExtendedJsonMode::Canonical, &query_plan, response_documents)?; + serialize_query_response(&Default::default(), &query_plan, response_documents)?; assert_eq!( response, QueryResponse(vec![RowSet { @@ -473,6 +656,7 @@ mod tests { ) ] .into()]), + groups: Default::default(), }]) ); Ok(()) @@ -509,8 +693,11 @@ mod tests { "price_extjson": Bson::Decimal128(bson::Decimal128::from_str("-4.9999999999").unwrap()), }]; - let response = - serialize_query_response(ExtendedJsonMode::Canonical, &query_plan, response_documents)?; + let response = serialize_query_response( + query_context.serialization_options(), + &query_plan, + response_documents, + )?; assert_eq!( response, QueryResponse(vec![RowSet { @@ -525,6 +712,7 @@ mod tests { ), ] .into()]), + groups: Default::default(), }]) ); Ok(()) @@ -567,8 +755,11 @@ mod tests { }, }]; - let response = - serialize_query_response(ExtendedJsonMode::Canonical, &query_plan, response_documents)?; + let response = serialize_query_response( + query_context.serialization_options(), + &query_plan, + response_documents, + )?; assert_eq!( response, QueryResponse(vec![RowSet { @@ -588,6 +779,7 @@ mod tests { })) )] .into()]), + groups: Default::default(), }]) ); Ok(()) @@ -602,11 +794,14 @@ mod tests { object_type([("value", named_type("ExtendedJSON"))]), )] .into(), - functions: Default::default(), - procedures: Default::default(), - native_mutations: Default::default(), - native_queries: Default::default(), - options: Default::default(), + options: ConfigurationOptions { + serialization_options: ConfigurationSerializationOptions { + extended_json_mode: mongodb_support::ExtendedJsonMode::Relaxed, + ..Default::default() + }, + ..Default::default() + }, + ..Default::default() }); let request = query_request() @@ -630,8 +825,11 @@ mod tests { }, }]; - let response = - serialize_query_response(ExtendedJsonMode::Relaxed, &query_plan, response_documents)?; + let response = serialize_query_response( + query_context.serialization_options(), + &query_plan, + response_documents, + )?; assert_eq!( response, QueryResponse(vec![RowSet { @@ -651,6 +849,7 @@ mod tests { })) )] .into()]), + groups: Default::default(), }]) ); Ok(()) @@ -661,7 +860,7 @@ mod tests { let collection_name = "appearances"; let request: QueryRequest = query_request() .collection(collection_name) - .relationships([("author", relationship("authors", [("authorId", "id")]))]) + .relationships([("author", relationship("authors", [("authorId", &["id"])]))]) .query( query().fields([relation_field!("presenter" => "author", query().fields([ field!("addr" => "address", object!([ @@ -684,49 +883,188 @@ mod tests { &path, &query_plan.query.aggregates, &query_plan.query.fields, + &query_plan.query.groups, )?; - let expected = Type::Object(ObjectType { - name: None, - fields: [ - ("rows".into(), Type::ArrayOf(Box::new(Type::Object(ObjectType { - name: None, - fields: [ - ("presenter".into(), Type::Object(ObjectType { - name: None, - fields: [ - ("rows".into(), Type::ArrayOf(Box::new(Type::Object(ObjectType { - name: None, - fields: [ - ("addr".into(), Type::Object(ObjectType { - name: None, - fields: [ - ("geocode".into(), Type::Nullable(Box::new(Type::Object(ObjectType { - name: None, - fields: [ - ("latitude".into(), Type::Scalar(MongoScalarType::Bson(BsonScalarType::Double))), - ("long".into(), Type::Scalar(MongoScalarType::Bson(BsonScalarType::Double))), - ].into(), - })))), - ("street".into(), Type::Scalar(MongoScalarType::Bson(BsonScalarType::String))), - ].into(), - })), - ("articles".into(), Type::ArrayOf(Box::new(Type::Object(ObjectType { - name: None, - fields: [ - ("article_title".into(), Type::Scalar(MongoScalarType::Bson(BsonScalarType::String))), - ].into(), - })))), - ].into(), - })))) - ].into(), - })) - ].into() - })))) - ].into(), - }); + let expected = Type::object([( + "rows", + Type::array_of(Type::Object(ObjectType::new([( + "presenter", + Type::object([( + "rows", + Type::array_of(Type::object([ + ( + "addr", + Type::object([ + ( + "geocode", + Type::nullable(Type::object([ + ( + "latitude", + Type::Scalar(MongoScalarType::Bson( + BsonScalarType::Double, + )), + ), + ( + "long", + Type::Scalar(MongoScalarType::Bson( + BsonScalarType::Double, + )), + ), + ])), + ), + ( + "street", + Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), + ), + ]), + ), + ( + "articles", + Type::array_of(Type::object([( + "article_title", + Type::Scalar(MongoScalarType::Bson(BsonScalarType::String)), + )])), + ), + ])), + )]), + )]))), + )]); assert_eq!(row_set_type, expected); Ok(()) } + + #[test] + fn fails_on_response_type_mismatch() -> anyhow::Result<()> { + let options = ConfigurationSerializationOptions { + on_response_type_mismatch: OnResponseTypeMismatch::Fail, + ..Default::default() + }; + + let request = query_request() + .collection("Track") + .query(query().fields([field!("Milliseconds")])) + .into(); + + let query_plan = plan_for_query_request(&chinook_config(), request)?; + + let response_documents = vec![ + bson::doc! { "Milliseconds": 1 }, + bson::doc! { "Milliseconds": "two" }, + bson::doc! { "Milliseconds": 3 }, + ]; + + let response_result = serialize_query_response(&options, &query_plan, response_documents); + assert!( + response_result.is_err(), + "serialize_query_response returns an error" + ); + Ok(()) + } + + #[test] + fn skips_rows_with_unexpected_data_type() -> anyhow::Result<()> { + let options = ConfigurationSerializationOptions { + on_response_type_mismatch: OnResponseTypeMismatch::SkipRow, + ..Default::default() + }; + + let request = query_request() + .collection("Track") + .query(query().fields([field!("Milliseconds")])) + .into(); + + let query_plan = plan_for_query_request(&chinook_config(), request)?; + + let response_documents = vec![ + bson::doc! { "Milliseconds": 1 }, + bson::doc! { "Milliseconds": "two" }, + bson::doc! { "Milliseconds": 3 }, + ]; + + let response = serialize_query_response(&options, &query_plan, response_documents)?; + assert_eq!( + response, + QueryResponse(vec![RowSet { + aggregates: Default::default(), + rows: Some(vec![ + [("Milliseconds".into(), RowFieldValue(json!(1)))].into(), + [("Milliseconds".into(), RowFieldValue(json!(3)))].into(), + ]), + groups: Default::default(), + }]) + ); + Ok(()) + } + + #[test] + fn fails_on_response_type_mismatch_in_related_collection() -> anyhow::Result<()> { + let options = ConfigurationSerializationOptions { + on_response_type_mismatch: OnResponseTypeMismatch::Fail, + ..Default::default() + }; + + let request = query_request() + .collection("Album") + .query( + query().fields([relation_field!("Tracks" => "Tracks", query().fields([ + field!("Milliseconds") + ]))]), + ) + .relationships(chinook_relationships()) + .into(); + + let query_plan = plan_for_query_request(&chinook_config(), request)?; + + let response_documents = vec![bson::doc! { "Tracks": { "rows": [ + bson::doc! { "Milliseconds": 1 }, + bson::doc! { "Milliseconds": "two" }, + bson::doc! { "Milliseconds": 3 }, + ] } }]; + + let response_result = serialize_query_response(&options, &query_plan, response_documents); + assert!( + response_result.is_err(), + "serialize_query_response returns an error" + ); + Ok(()) + } + + #[test] + fn skips_rows_with_unexpected_data_type_in_related_collection() -> anyhow::Result<()> { + let options = ConfigurationSerializationOptions { + on_response_type_mismatch: OnResponseTypeMismatch::SkipRow, + ..Default::default() + }; + + let request = query_request() + .collection("Album") + .query( + query().fields([relation_field!("Tracks" => "Tracks", query().fields([ + field!("Milliseconds") + ]))]), + ) + .relationships(chinook_relationships()) + .into(); + + let query_plan = plan_for_query_request(&chinook_config(), request)?; + + let response_documents = vec![bson::doc! { "Tracks": { "rows": [ + bson::doc! { "Milliseconds": 1 }, + bson::doc! { "Milliseconds": "two" }, + bson::doc! { "Milliseconds": 3 }, + ] } }]; + + let response = serialize_query_response(&options, &query_plan, response_documents)?; + assert_eq!( + response, + QueryResponse(vec![RowSet { + aggregates: Default::default(), + rows: Some(vec![]), + groups: Default::default(), + }]) + ); + Ok(()) + } } diff --git a/crates/mongodb-agent-common/src/mongodb/selection.rs b/crates/mongodb-agent-common/src/query/selection.rs similarity index 52% rename from crates/mongodb-agent-common/src/mongodb/selection.rs rename to crates/mongodb-agent-common/src/query/selection.rs index 4c8c2ee8..e65f8c78 100644 --- a/crates/mongodb-agent-common/src/mongodb/selection.rs +++ b/crates/mongodb-agent-common/src/query/selection.rs @@ -1,82 +1,74 @@ use indexmap::IndexMap; -use mongodb::bson::{self, doc, Bson, Document}; -use serde::{Deserialize, Serialize}; +use mongodb::bson::{doc, Bson, Document}; +use mongodb_support::aggregate::Selection; +use ndc_models::FieldName; +use nonempty::NonEmpty; use crate::{ + constants::{ + GROUP_DIMENSIONS_KEY, ROW_SET_AGGREGATES_KEY, ROW_SET_GROUPS_KEY, ROW_SET_ROWS_KEY, + }, interface_types::MongoAgentError, - mongo_query_plan::{Field, NestedArray, NestedField, NestedObject, QueryPlan}, - mongodb::sanitize::get_field, + mongo_query_plan::{Aggregate, Field, Grouping, NestedArray, NestedField, NestedObject}, + query::column_ref::ColumnRef, }; -/// Wraps a BSON document that represents a MongoDB "expression" that constructs a document based -/// on the output of a previous aggregation pipeline stage. A Selection value is intended to be -/// used as the argument to a $replaceWith pipeline stage. -/// -/// When we compose pipelines, we can pair each Pipeline with a Selection that extracts the data we -/// want, in the format we want it to provide to HGE. We can collect Selection values and merge -/// them to form one stage after all of the composed pipelines. -#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] -#[serde(transparent)] -pub struct Selection(pub bson::Document); +use super::{aggregates::replace_missing_aggregate_value, is_response_faceted::ResponseFacets}; -impl Selection { - pub fn from_doc(doc: bson::Document) -> Self { - Selection(doc) - } - - pub fn from_query_request(query_request: &QueryPlan) -> Result { - // let fields = (&query_request.query.fields).flatten().unwrap_or_default(); - let empty_map = IndexMap::new(); - let fields = if let Some(fs) = &query_request.query.fields { - fs - } else { - &empty_map - }; - let doc = from_query_request_helper(&[], fields)?; - Ok(Selection(doc)) - } +/// Creates a document to use in a $replaceWith stage to limit query results to the specific fields +/// requested. Assumes that only fields are requested. +pub fn selection_for_fields( + fields: Option<&IndexMap>, +) -> Result { + let empty_map = IndexMap::new(); + let fields = if let Some(fs) = fields { + fs + } else { + &empty_map + }; + let doc = for_fields_helper(None, fields)?; + Ok(Selection::new(doc)) } -fn from_query_request_helper( - parent_columns: &[&str], +fn for_fields_helper( + parent: Option>, field_selection: &IndexMap, ) -> Result { field_selection .iter() - .map(|(key, value)| Ok((key.to_string(), selection_for_field(parent_columns, value)?))) + .map(|(key, value)| Ok((key.to_string(), selection_for_field(parent.clone(), value)?))) .collect() } /// Wraps column reference with an `$isNull` check. That catches cases where a field is missing /// from a document, and substitutes a concrete null value. Otherwise the field would be omitted /// from query results which leads to an error in the engine. -fn value_or_null(col_path: String) -> Bson { - doc! { "$ifNull": [col_path, Bson::Null] }.into() +fn value_or_null(value: Bson) -> Bson { + doc! { "$ifNull": [value, Bson::Null] }.into() } -fn selection_for_field(parent_columns: &[&str], field: &Field) -> Result { +fn selection_for_field( + parent: Option>, + field: &Field, +) -> Result { match field { Field::Column { column, fields: None, .. } => { - let col_path = match parent_columns { - [] => format!("${column}"), - _ => format!("${}.{}", parent_columns.join("."), column), - }; - let bson_col_path = value_or_null(col_path); - Ok(bson_col_path) + let col_ref = nested_column_reference(parent, column); + let col_ref_or_null = value_or_null(col_ref.into_aggregate_expression().into_bson()); + Ok(col_ref_or_null) } Field::Column { column, fields: Some(NestedField::Object(NestedObject { fields })), .. } => { - let nested_parent_columns = append_to_path(parent_columns, column.as_str()); - let nested_parent_col_path = format!("${}", nested_parent_columns.join(".")); - let nested_selection = from_query_request_helper(&nested_parent_columns, fields)?; - Ok(doc! {"$cond": {"if": nested_parent_col_path, "then": nested_selection, "else": Bson::Null}}.into()) + let col_ref = nested_column_reference(parent, column); + let nested_selection = for_fields_helper(Some(col_ref.clone()), fields)?; + Ok(doc! {"$cond": {"if": col_ref.into_aggregate_expression(), "then": nested_selection, "else": Bson::Null}}.into()) } Field::Column { column, @@ -85,127 +77,192 @@ fn selection_for_field(parent_columns: &[&str], field: &Field) -> Result selection_for_array( - &append_to_path(parent_columns, column.as_str()), - nested_field, - 0, - ), + } => selection_for_array(nested_column_reference(parent, column), nested_field, 0), Field::Relationship { relationship, aggregates, fields, + groups, .. } => { + // TODO: ENG-1569 If we get a unification of two relationship references where one + // selects only fields, and the other selects only groups, we may end up in a broken + // state where the response should be faceted but is not. Data will be populated + // correctly - the issue is only here where we need to figure out whether to write + // a selection for faceted data or not. Instead of referencing the + // [Field::Relationship] value to determine faceting we need to reference the + // [Relationship] attached to the [Query] that populated it. + // The pipeline for the relationship has already selected the requested fields with the // appropriate aliases. At this point all we need to do is to prune the selection down // to requested fields, omitting fields of the relationship that were selected for // filtering and sorting. - let field_selection: Option = fields.as_ref().map(|fields| { + fn field_selection(fields: &IndexMap) -> Document { fields .iter() .map(|(field_name, _)| { ( field_name.to_string(), - format!("$$this.{field_name}").into(), + ColumnRef::variable("this") + .into_nested_field(field_name.as_ref()) + .into_aggregate_expression() + .into_bson(), ) }) .collect() - }); + } - if let Some(aggregates) = aggregates { - let aggregate_selecion: Document = aggregates - .iter() - .map(|(aggregate_name, _)| { - ( - aggregate_name.to_string(), - format!("$$row_set.aggregates.{aggregate_name}").into(), - ) + fn aggregates_selection( + from: ColumnRef<'_>, + aggregates: &IndexMap, + check_for_null: bool, + ) -> Document { + aggregates + .into_iter() + .map(|(aggregate_name, aggregate)| { + let value_ref = from + .clone() + .into_nested_field(aggregate_name.as_ref()) + .into_aggregate_expression() + .into_bson(); + let value_ref = if check_for_null { + replace_missing_aggregate_value(value_ref, aggregate.is_count()) + } else { + value_ref + }; + (aggregate_name.to_string(), value_ref) }) - .collect(); - let mut new_row_set = doc! { "aggregates": aggregate_selecion }; + .collect() + } - if let Some(field_selection) = field_selection { - new_row_set.insert( - "rows", - doc! { - "$map": { - "input": "$$row_set.rows", - "in": field_selection, - } - }, - ); - } + fn group_selection(from: ColumnRef<'_>, grouping: &Grouping) -> Document { + let mut selection = aggregates_selection(from, &grouping.aggregates, false); + selection.insert( + GROUP_DIMENSIONS_KEY, + ColumnRef::variable("this") + .into_nested_field(GROUP_DIMENSIONS_KEY) + .into_aggregate_expression(), + ); + selection + } - Ok(doc! { - "$let": { - "vars": { "row_set": { "$first": get_field(relationship.as_str()) } }, - "in": new_row_set, + // Field of the incoming pipeline document that contains data fetched for the + // relationship. + let relationship_field = ColumnRef::from_field(relationship.as_ref()); + + let doc = match ResponseFacets::from_parameters( + aggregates.as_ref(), + fields.as_ref(), + groups.as_ref(), + ) { + ResponseFacets::Combination { + aggregates, + fields, + groups, + } => { + let mut new_row_set = Document::new(); + + if let Some(aggregates) = aggregates { + new_row_set.insert( + ROW_SET_AGGREGATES_KEY, + aggregates_selection( + ColumnRef::variable("row_set") + .into_nested_field(ROW_SET_AGGREGATES_KEY), + aggregates, + false, + ), + ); + } + + if let Some(fields) = fields { + new_row_set.insert( + ROW_SET_ROWS_KEY, + doc! { + "$map": { + "input": ColumnRef::variable("row_set").into_nested_field(ROW_SET_ROWS_KEY).into_aggregate_expression(), + "in": field_selection(fields), + } + }, + ); + } + + if let Some(grouping) = groups { + new_row_set.insert( + ROW_SET_GROUPS_KEY, + doc! { + "$map": { + "input": ColumnRef::variable("row_set").into_nested_field(ROW_SET_GROUPS_KEY).into_aggregate_expression(), + "in": group_selection(ColumnRef::variable("this"), grouping), + } + }, + ); + } + + doc! { + "$let": { + "vars": { "row_set": { "$first": relationship_field.into_aggregate_expression() } }, + "in": new_row_set, + } } } - .into()) - } else if let Some(field_selection) = field_selection { - Ok(doc! { - "rows": { + ResponseFacets::AggregatesOnly(aggregates) => doc! { + ROW_SET_AGGREGATES_KEY: { + "$let": { + "vars": { "aggregates": { "$first": relationship_field.into_aggregate_expression() } }, + "in": aggregates_selection(ColumnRef::variable("aggregates"), aggregates, true), + } + } + }, + ResponseFacets::FieldsOnly(fields) => doc! { + ROW_SET_ROWS_KEY: { "$map": { - "input": get_field(relationship.as_str()), - "in": field_selection, + "input": relationship_field.into_aggregate_expression(), + "in": field_selection(fields), } } - } - .into()) - } else { - Ok(doc! { "rows": [] }.into()) - } + }, + ResponseFacets::GroupsOnly(grouping) => doc! { + ROW_SET_GROUPS_KEY: { + "$map": { + "input": relationship_field.into_aggregate_expression(), + "in": group_selection(ColumnRef::variable("this"), grouping), + } + } + }, + }; + Ok(doc.into()) } } } fn selection_for_array( - parent_columns: &[&str], + parent: ColumnRef<'_>, field: &NestedField, array_nesting_level: usize, ) -> Result { match field { NestedField::Object(NestedObject { fields }) => { - let nested_parent_col_path = format!("${}", parent_columns.join(".")); - let mut nested_selection = from_query_request_helper(&["$this"], fields)?; + let mut nested_selection = + for_fields_helper(Some(ColumnRef::variable("this")), fields)?; for _ in 0..array_nesting_level { nested_selection = doc! {"$map": {"input": "$$this", "in": nested_selection}} } - let map_expression = - doc! {"$map": {"input": &nested_parent_col_path, "in": nested_selection}}; - Ok(doc! {"$cond": {"if": &nested_parent_col_path, "then": map_expression, "else": Bson::Null}}.into()) + let map_expression = doc! {"$map": {"input": parent.clone().into_aggregate_expression(), "in": nested_selection}}; + Ok(doc! {"$cond": {"if": parent.into_aggregate_expression(), "then": map_expression, "else": Bson::Null}}.into()) } NestedField::Array(NestedArray { fields: nested_field, - }) => selection_for_array(parent_columns, nested_field, array_nesting_level + 1), - } -} -fn append_to_path<'a, 'b, 'c>(parent_columns: &'a [&'b str], column: &'c str) -> Vec<&'c str> -where - 'b: 'c, -{ - parent_columns.iter().copied().chain(Some(column)).collect() -} - -/// The extend implementation provides a shallow merge. -impl Extend<(String, Bson)> for Selection { - fn extend>(&mut self, iter: T) { - self.0.extend(iter); + }) => selection_for_array(parent, nested_field, array_nesting_level + 1), } } -impl From for bson::Document { - fn from(value: Selection) -> Self { - value.0 - } -} - -// This won't fail, but it might in the future if we add some sort of validation or parsing. -impl TryFrom for Selection { - type Error = anyhow::Error; - fn try_from(value: bson::Document) -> Result { - Ok(Selection(value)) +fn nested_column_reference<'a>( + parent: Option>, + column: &'a FieldName, +) -> ColumnRef<'a> { + match parent { + Some(parent) => parent.into_nested_field(column.as_ref()), + None => ColumnRef::from_field_path(NonEmpty::singleton(column)), } } @@ -222,7 +279,7 @@ mod tests { use crate::mongo_query_plan::MongoConfiguration; - use super::Selection; + use super::*; #[test] fn calculates_selection_for_query_request() -> Result<(), anyhow::Error> { @@ -250,7 +307,7 @@ mod tests { let query_plan = plan_for_query_request(&foo_config(), query_request)?; - let selection = Selection::from_query_request(&query_plan)?; + let selection = selection_for_fields(query_plan.query.fields.as_ref())?; assert_eq!( Into::::into(selection), doc! { @@ -282,7 +339,11 @@ mod tests { "then": { "$map": { "input": "$os", - "in": {"cat": { "$ifNull": ["$$this.cat", null] }} + "in": { + "cat": { + "$ifNull": ["$$this.cat", null] + } + } } }, "else": null @@ -297,7 +358,11 @@ mod tests { "in": { "$map": { "input": "$$this", - "in": {"cat": { "$ifNull": ["$$this.cat", null] }} + "in": { + "cat": { + "$ifNull": ["$$this.cat", null] + } + } } } } @@ -324,7 +389,7 @@ mod tests { ])) .relationships([( "class_students", - relationship("students", [("_id", "classId")]), + relationship("students", [("_id", &["classId"])]), )]) .into(); @@ -334,14 +399,14 @@ mod tests { // twice (once with the key `class_students`, and then with the key `class_students_0`). // This is because the queries on the two relationships have different scope names. The // query would work with just one lookup. Can we do that optimization? - let selection = Selection::from_query_request(&query_plan)?; + let selection = selection_for_fields(query_plan.query.fields.as_ref())?; assert_eq!( Into::::into(selection), doc! { "class_students": { "rows": { "$map": { - "input": { "$getField": { "$literal": "class_students" } }, + "input": "$class_students", "in": { "name": "$$this.name" }, @@ -351,7 +416,7 @@ mod tests { "students": { "rows": { "$map": { - "input": { "$getField": { "$literal": "class_students_0" } }, + "input": "$class_students_0", "in": { "student_name": "$$this.student_name" }, diff --git a/crates/mongodb-agent-common/src/query/serialization/bson_to_json.rs b/crates/mongodb-agent-common/src/query/serialization/bson_to_json.rs index ead29d93..7cc80e02 100644 --- a/crates/mongodb-agent-common/src/query/serialization/bson_to_json.rs +++ b/crates/mongodb-agent-common/src/query/serialization/bson_to_json.rs @@ -18,14 +18,17 @@ pub enum BsonToJsonError { #[error("error converting 64-bit floating point number from BSON to JSON: {0}")] DoubleConversion(f64), - #[error("input object of type {0:?} is missing a field, \"{1}\"")] + #[error("error converting UUID from BSON to JSON: {0}")] + UuidConversion(#[from] bson::uuid::Error), + + #[error("input object of type {0} is missing a field, \"{1}\"")] MissingObjectField(Type, String), #[error("error converting value to JSON: {0}")] Serde(#[from] serde_json::Error), // TODO: It would be great if we could capture a path into the larger BSON value here - #[error("expected a value of type {0:?}, but got {1}")] + #[error("expected a value of type {0}, but got {1}")] TypeMismatch(Type, Bson), #[error("unknown object type, \"{0}\"")] @@ -49,6 +52,7 @@ pub fn bson_to_json(mode: ExtendedJsonMode, expected_type: &Type, value: Bson) - } Type::Object(object_type) => convert_object(mode, object_type, value), Type::ArrayOf(element_type) => convert_array(mode, element_type, value), + Type::Tuple(element_types) => convert_tuple(mode, element_types, value), Type::Nullable(t) => convert_nullable(mode, t, value), } } @@ -71,7 +75,9 @@ fn bson_scalar_to_json( (BsonScalarType::Double, v) => convert_small_number(expected_type, v), (BsonScalarType::Int, v) => convert_small_number(expected_type, v), (BsonScalarType::Long, Bson::Int64(n)) => Ok(Value::String(n.to_string())), + (BsonScalarType::Long, Bson::Int32(n)) => Ok(Value::String(n.to_string())), (BsonScalarType::Decimal, Bson::Decimal128(n)) => Ok(Value::String(n.to_string())), + (BsonScalarType::Decimal, Bson::Double(n)) => Ok(Value::String(n.to_string())), (BsonScalarType::String, Bson::String(s)) => Ok(Value::String(s)), (BsonScalarType::Symbol, Bson::Symbol(s)) => Ok(Value::String(s)), (BsonScalarType::Date, Bson::DateTime(date)) => convert_date(date), @@ -85,6 +91,7 @@ fn bson_scalar_to_json( (BsonScalarType::Timestamp, Bson::Timestamp(v)) => { Ok(to_value::(v.into())?) } + (BsonScalarType::UUID, Bson::Binary(b)) => Ok(serde_json::to_value(b.to_uuid()?)?), (BsonScalarType::BinData, Bson::Binary(b)) => { Ok(to_value::(b.into())?) } @@ -112,6 +119,22 @@ fn convert_array(mode: ExtendedJsonMode, element_type: &Type, value: Bson) -> Re Ok(Value::Array(json_array)) } +fn convert_tuple(mode: ExtendedJsonMode, element_types: &[Type], value: Bson) -> Result { + let values = match value { + Bson::Array(values) => Ok(values), + _ => Err(BsonToJsonError::TypeMismatch( + Type::Tuple(element_types.to_vec()), + value, + )), + }?; + let json_array = element_types + .iter() + .zip(values) + .map(|(element_type, value)| bson_to_json(mode, element_type, value)) + .try_collect()?; + Ok(Value::Array(json_array)) +} + fn convert_object(mode: ExtendedJsonMode, object_type: &ObjectType, value: Bson) -> Result { let input_doc = match value { Bson::Document(fields) => Ok(fields), @@ -230,16 +253,13 @@ mod tests { #[test] fn serializes_document_with_missing_nullable_field() -> anyhow::Result<()> { - let expected_type = Type::Object(ObjectType { - name: Some("test_object".into()), - fields: [( - "field".into(), - Type::Nullable(Box::new(Type::Scalar(MongoScalarType::Bson( - BsonScalarType::String, - )))), - )] - .into(), - }); + let expected_type = Type::named_object( + "test_object", + [( + "field", + Type::nullable(Type::Scalar(MongoScalarType::Bson(BsonScalarType::String))), + )], + ); let value = bson::doc! {}; let actual = bson_to_json(ExtendedJsonMode::Canonical, &expected_type, value.into())?; assert_eq!(actual, json!({})); diff --git a/crates/mongodb-agent-common/src/query/serialization/json_formats.rs b/crates/mongodb-agent-common/src/query/serialization/json_formats.rs index 9ab6c8d0..85a435f9 100644 --- a/crates/mongodb-agent-common/src/query/serialization/json_formats.rs +++ b/crates/mongodb-agent-common/src/query/serialization/json_formats.rs @@ -6,6 +6,25 @@ use mongodb::bson::{self, Bson}; use serde::{Deserialize, Serialize}; use serde_with::{base64::Base64, hex::Hex, serde_as}; +#[derive(Debug, Serialize, Deserialize)] +#[serde(untagged)] +pub enum Either { + Left(T), + Right(U), +} + +impl Either { + pub fn into_left(self) -> T + where + T: From, + { + match self { + Either::Left(l) => l, + Either::Right(r) => r.into(), + } + } +} + #[serde_as] #[derive(Deserialize, Serialize)] #[serde(rename_all = "camelCase")] @@ -84,6 +103,15 @@ impl From for Regex { } } +impl From for Regex { + fn from(value: String) -> Self { + Regex { + pattern: value, + options: String::new(), + } + } +} + #[derive(Deserialize, Serialize)] pub struct Timestamp { t: u32, diff --git a/crates/mongodb-agent-common/src/query/serialization/json_to_bson.rs b/crates/mongodb-agent-common/src/query/serialization/json_to_bson.rs index 5dff0be0..7c04b91a 100644 --- a/crates/mongodb-agent-common/src/query/serialization/json_to_bson.rs +++ b/crates/mongodb-agent-common/src/query/serialization/json_to_bson.rs @@ -66,16 +66,18 @@ pub fn json_to_bson(expected_type: &Type, value: Value) -> Result { Type::Object(object_type) => convert_object(object_type, value), Type::ArrayOf(element_type) => convert_array(element_type, value), Type::Nullable(t) => convert_nullable(t, value), + Type::Tuple(element_types) => convert_tuple(element_types, value), } } /// Works like json_to_bson, but only converts BSON scalar types. pub fn json_to_bson_scalar(expected_type: BsonScalarType, value: Value) -> Result { + use BsonScalarType as S; let result = match expected_type { - BsonScalarType::Double => Bson::Double(deserialize(expected_type, value)?), - BsonScalarType::Int => Bson::Int32(deserialize(expected_type, value)?), - BsonScalarType::Long => convert_long(&from_string(expected_type, value)?)?, - BsonScalarType::Decimal => Bson::Decimal128( + S::Double => Bson::Double(deserialize(expected_type, value)?), + S::Int => Bson::Int32(deserialize(expected_type, value)?), + S::Long => convert_long(&from_string(expected_type, value)?)?, + S::Decimal => Bson::Decimal128( Decimal128::from_str(&from_string(expected_type, value.clone())?).map_err(|err| { JsonToBsonError::ConversionErrorWithContext( Type::Scalar(MongoScalarType::Bson(expected_type)), @@ -84,37 +86,38 @@ pub fn json_to_bson_scalar(expected_type: BsonScalarType, value: Value) -> Resul ) })?, ), - BsonScalarType::String => Bson::String(deserialize(expected_type, value)?), - BsonScalarType::Date => convert_date(&from_string(expected_type, value)?)?, - BsonScalarType::Timestamp => { - deserialize::(expected_type, value)?.into() - } - BsonScalarType::BinData => { - deserialize::(expected_type, value)?.into() - } - BsonScalarType::ObjectId => Bson::ObjectId(deserialize(expected_type, value)?), - BsonScalarType::Bool => match value { + S::String => Bson::String(deserialize(expected_type, value)?), + S::Date => convert_date(&from_string(expected_type, value)?)?, + S::Timestamp => deserialize::(expected_type, value)?.into(), + S::BinData => deserialize::(expected_type, value)?.into(), + S::UUID => convert_uuid(&from_string(expected_type, value)?)?, + S::ObjectId => Bson::ObjectId(deserialize(expected_type, value)?), + S::Bool => match value { Value::Bool(b) => Bson::Boolean(b), - _ => incompatible_scalar_type(BsonScalarType::Bool, value)?, + _ => incompatible_scalar_type(S::Bool, value)?, }, - BsonScalarType::Null => match value { + S::Null => match value { Value::Null => Bson::Null, - _ => incompatible_scalar_type(BsonScalarType::Null, value)?, + _ => incompatible_scalar_type(S::Null, value)?, }, - BsonScalarType::Undefined => match value { + S::Undefined => match value { Value::Null => Bson::Undefined, - _ => incompatible_scalar_type(BsonScalarType::Undefined, value)?, + _ => incompatible_scalar_type(S::Undefined, value)?, }, - BsonScalarType::Regex => deserialize::(expected_type, value)?.into(), - BsonScalarType::Javascript => Bson::JavaScriptCode(deserialize(expected_type, value)?), - BsonScalarType::JavascriptWithScope => { + S::Regex => { + deserialize::>(expected_type, value)? + .into_left() + .into() + } + S::Javascript => Bson::JavaScriptCode(deserialize(expected_type, value)?), + S::JavascriptWithScope => { deserialize::(expected_type, value)?.into() } - BsonScalarType::MinKey => Bson::MinKey, - BsonScalarType::MaxKey => Bson::MaxKey, - BsonScalarType::Symbol => Bson::Symbol(deserialize(expected_type, value)?), + S::MinKey => Bson::MinKey, + S::MaxKey => Bson::MaxKey, + S::Symbol => Bson::Symbol(deserialize(expected_type, value)?), // dbPointer is deprecated - BsonScalarType::DbPointer => Err(JsonToBsonError::NotImplemented(expected_type))?, + S::DbPointer => Err(JsonToBsonError::NotImplemented(expected_type))?, }; Ok(result) } @@ -128,6 +131,16 @@ fn convert_array(element_type: &Type, value: Value) -> Result { Ok(Bson::Array(bson_array)) } +fn convert_tuple(element_types: &[Type], value: Value) -> Result { + let input_elements: Vec = serde_json::from_value(value)?; + let bson_array = element_types + .iter() + .zip(input_elements) + .map(|(element_type, v)| json_to_bson(element_type, v)) + .try_collect()?; + Ok(Bson::Array(bson_array)) +} + fn convert_object(object_type: &ObjectType, value: Value) -> Result { let input_fields: BTreeMap = serde_json::from_value(value)?; let bson_doc: bson::Document = object_type @@ -191,6 +204,17 @@ fn convert_long(value: &str) -> Result { Ok(Bson::Int64(n)) } +fn convert_uuid(value: &str) -> Result { + let uuid = bson::Uuid::parse_str(value).map_err(|err| { + JsonToBsonError::ConversionErrorWithContext( + Type::Scalar(MongoScalarType::Bson(BsonScalarType::UUID)), + value.into(), + err.into(), + ) + })?; + Ok(bson::binary::Binary::from_uuid(uuid).into()) +} + fn deserialize(expected_type: BsonScalarType, value: Value) -> Result where T: DeserializeOwned, @@ -236,35 +260,32 @@ mod tests { use super::json_to_bson; + use BsonScalarType as S; + #[test] #[allow(clippy::approx_constant)] fn deserializes_specialized_scalar_types() -> anyhow::Result<()> { - let object_type = ObjectType { - name: Some("scalar_test".into()), - fields: [ - ("double", BsonScalarType::Double), - ("int", BsonScalarType::Int), - ("long", BsonScalarType::Long), - ("decimal", BsonScalarType::Decimal), - ("string", BsonScalarType::String), - ("date", BsonScalarType::Date), - ("timestamp", BsonScalarType::Timestamp), - ("binData", BsonScalarType::BinData), - ("objectId", BsonScalarType::ObjectId), - ("bool", BsonScalarType::Bool), - ("null", BsonScalarType::Null), - ("undefined", BsonScalarType::Undefined), - ("regex", BsonScalarType::Regex), - ("javascript", BsonScalarType::Javascript), - ("javascriptWithScope", BsonScalarType::JavascriptWithScope), - ("minKey", BsonScalarType::MinKey), - ("maxKey", BsonScalarType::MaxKey), - ("symbol", BsonScalarType::Symbol), - ] - .into_iter() - .map(|(name, t)| (name.into(), Type::Scalar(MongoScalarType::Bson(t)))) - .collect(), - }; + let object_type = ObjectType::new([ + ("double", Type::scalar(S::Double)), + ("int", Type::scalar(S::Int)), + ("long", Type::scalar(S::Long)), + ("decimal", Type::scalar(S::Decimal)), + ("string", Type::scalar(S::String)), + ("date", Type::scalar(S::Date)), + ("timestamp", Type::scalar(S::Timestamp)), + ("binData", Type::scalar(S::BinData)), + ("objectId", Type::scalar(S::ObjectId)), + ("bool", Type::scalar(S::Bool)), + ("null", Type::scalar(S::Null)), + ("undefined", Type::scalar(S::Undefined)), + ("regex", Type::scalar(S::Regex)), + ("javascript", Type::scalar(S::Javascript)), + ("javascriptWithScope", Type::scalar(S::JavascriptWithScope)), + ("minKey", Type::scalar(S::MinKey)), + ("maxKey", Type::scalar(S::MaxKey)), + ("symbol", Type::scalar(S::Symbol)), + ]) + .named("scalar_test"); let input = json!({ "double": 3.14159, @@ -367,16 +388,13 @@ mod tests { #[test] fn deserializes_object_with_missing_nullable_field() -> anyhow::Result<()> { - let expected_type = Type::Object(ObjectType { - name: Some("test_object".into()), - fields: [( - "field".into(), - Type::Nullable(Box::new(Type::Scalar(MongoScalarType::Bson( - BsonScalarType::String, - )))), - )] - .into(), - }); + let expected_type = Type::named_object( + "test_object", + [( + "field", + Type::nullable(Type::scalar(BsonScalarType::String)), + )], + ); let value = json!({}); let actual = json_to_bson(&expected_type, value)?; assert_eq!(actual, bson!({})); diff --git a/crates/mongodb-agent-common/src/scalar_types_capabilities.rs b/crates/mongodb-agent-common/src/scalar_types_capabilities.rs index 34b08b12..c5edbd37 100644 --- a/crates/mongodb-agent-common/src/scalar_types_capabilities.rs +++ b/crates/mongodb-agent-common/src/scalar_types_capabilities.rs @@ -10,6 +10,7 @@ use ndc_models::{ use crate::aggregation_function::{AggregationFunction, AggregationFunction as A}; use crate::comparison_function::{ComparisonFunction, ComparisonFunction as C}; +use crate::mongo_query_plan as plan; use BsonScalarType as S; @@ -25,12 +26,68 @@ pub fn scalar_types() -> BTreeMap { } fn extended_json_scalar_type() -> (ndc_models::ScalarTypeName, ScalarType) { + // Extended JSON could be anything, so allow all aggregation functions + let aggregation_functions = enum_iterator::all::(); + + // Extended JSON could be anything, so allow all comparison operators + let comparison_operators = enum_iterator::all::(); + + let ext_json_type = Type::Named { + name: mongodb_support::EXTENDED_JSON_TYPE_NAME.into(), + }; + ( mongodb_support::EXTENDED_JSON_TYPE_NAME.into(), ScalarType { - representation: Some(TypeRepresentation::JSON), - aggregate_functions: BTreeMap::new(), - comparison_operators: BTreeMap::new(), + representation: TypeRepresentation::JSON, + aggregate_functions: aggregation_functions + .into_iter() + .map(|aggregation_function| { + use AggregateFunctionDefinition as NDC; + use AggregationFunction as Plan; + let name = aggregation_function.graphql_name().into(); + let definition = match aggregation_function { + // Using custom instead of standard aggregations because we want the result + // types to be ExtendedJSON instead of specific numeric types + Plan::Avg => NDC::Custom { + result_type: Type::Named { + name: mongodb_support::EXTENDED_JSON_TYPE_NAME.into(), + }, + }, + Plan::Min => NDC::Min, + Plan::Max => NDC::Max, + Plan::Sum => NDC::Custom { + result_type: Type::Named { + name: mongodb_support::EXTENDED_JSON_TYPE_NAME.into(), + }, + }, + }; + (name, definition) + }) + .collect(), + comparison_operators: comparison_operators + .into_iter() + .map(|comparison_fn| { + let name = comparison_fn.graphql_name().into(); + let ndc_definition = comparison_fn.ndc_definition(|func| match func { + C::Equal => ext_json_type.clone(), + C::In => Type::Array { + element_type: Box::new(ext_json_type.clone()), + }, + C::LessThan => ext_json_type.clone(), + C::LessThanOrEqual => ext_json_type.clone(), + C::GreaterThan => ext_json_type.clone(), + C::GreaterThanOrEqual => ext_json_type.clone(), + C::NotEqual => ext_json_type.clone(), + C::NotIn => Type::Array { + element_type: Box::new(ext_json_type.clone()), + }, + C::Regex | C::IRegex => bson_to_named_type(S::Regex), + }); + (name, ndc_definition) + }) + .collect(), + extraction_functions: Default::default(), }, ) } @@ -41,31 +98,34 @@ fn make_scalar_type(bson_scalar_type: BsonScalarType) -> (ndc_models::ScalarType representation: bson_scalar_type_representation(bson_scalar_type), aggregate_functions: bson_aggregation_functions(bson_scalar_type), comparison_operators: bson_comparison_operators(bson_scalar_type), + extraction_functions: Default::default(), }; (scalar_type_name.into(), scalar_type) } -fn bson_scalar_type_representation(bson_scalar_type: BsonScalarType) -> Option { +fn bson_scalar_type_representation(bson_scalar_type: BsonScalarType) -> TypeRepresentation { + use TypeRepresentation as R; match bson_scalar_type { - BsonScalarType::Double => Some(TypeRepresentation::Float64), - BsonScalarType::Decimal => Some(TypeRepresentation::BigDecimal), // Not quite.... Mongo Decimal is 128-bit, BigDecimal is unlimited - BsonScalarType::Int => Some(TypeRepresentation::Int32), - BsonScalarType::Long => Some(TypeRepresentation::Int64), - BsonScalarType::String => Some(TypeRepresentation::String), - BsonScalarType::Date => Some(TypeRepresentation::Timestamp), // Mongo Date is milliseconds since unix epoch - BsonScalarType::Timestamp => None, // Internal Mongo timestamp type - BsonScalarType::BinData => None, - BsonScalarType::ObjectId => Some(TypeRepresentation::String), // Mongo ObjectId is usually expressed as a 24 char hex string (12 byte number) - BsonScalarType::Bool => Some(TypeRepresentation::Boolean), - BsonScalarType::Null => None, - BsonScalarType::Regex => None, - BsonScalarType::Javascript => None, - BsonScalarType::JavascriptWithScope => None, - BsonScalarType::MinKey => None, - BsonScalarType::MaxKey => None, - BsonScalarType::Undefined => None, - BsonScalarType::DbPointer => None, - BsonScalarType::Symbol => None, + S::Double => R::Float64, + S::Decimal => R::BigDecimal, // Not quite.... Mongo Decimal is 128-bit, BigDecimal is unlimited + S::Int => R::Int32, + S::Long => R::Int64, + S::String => R::String, + S::Date => R::TimestampTZ, // Mongo Date is milliseconds since unix epoch, but we serialize to JSON as an ISO string + S::Timestamp => R::JSON, // Internal Mongo timestamp type + S::BinData => R::JSON, + S::UUID => R::String, + S::ObjectId => R::String, // Mongo ObjectId is usually expressed as a 24 char hex string (12 byte number) - not using R::Bytes because that expects base64 + S::Bool => R::Boolean, + S::Null => R::JSON, + S::Regex => R::JSON, + S::Javascript => R::String, + S::JavascriptWithScope => R::JSON, + S::MinKey => R::JSON, + S::MaxKey => R::JSON, + S::Undefined => R::JSON, + S::DbPointer => R::JSON, + S::Symbol => R::String, } } @@ -73,17 +133,9 @@ fn bson_comparison_operators( bson_scalar_type: BsonScalarType, ) -> BTreeMap { comparison_operators(bson_scalar_type) - .map(|(comparison_fn, arg_type)| { + .map(|(comparison_fn, argument_type)| { let fn_name = comparison_fn.graphql_name().into(); - match comparison_fn { - ComparisonFunction::Equal => (fn_name, ComparisonOperatorDefinition::Equal), - _ => ( - fn_name, - ComparisonOperatorDefinition::Custom { - argument_type: bson_to_named_type(arg_type), - }, - ), - } + (fn_name, comparison_fn.ndc_definition(|_| argument_type)) }) .collect() } @@ -92,10 +144,7 @@ fn bson_aggregation_functions( bson_scalar_type: BsonScalarType, ) -> BTreeMap { aggregate_functions(bson_scalar_type) - .map(|(fn_name, result_type)| { - let aggregation_definition = AggregateFunctionDefinition { - result_type: bson_to_named_type(result_type), - }; + .map(|(fn_name, aggregation_definition)| { (fn_name.graphql_name().into(), aggregation_definition) }) .collect() @@ -107,31 +156,69 @@ fn bson_to_named_type(bson_scalar_type: BsonScalarType) -> Type { } } -pub fn aggregate_functions( +fn bson_to_scalar_type_name(bson_scalar_type: BsonScalarType) -> ndc_models::ScalarTypeName { + bson_scalar_type.graphql_name().into() +} + +fn aggregate_functions( scalar_type: BsonScalarType, -) -> impl Iterator { - [(A::Count, S::Int)] - .into_iter() - .chain(iter_if( - scalar_type.is_orderable(), - [A::Min, A::Max] - .into_iter() - .map(move |op| (op, scalar_type)), - )) - .chain(iter_if( - scalar_type.is_numeric(), - [A::Avg, A::Sum] - .into_iter() - .map(move |op| (op, scalar_type)), - )) +) -> impl Iterator { + use AggregateFunctionDefinition as NDC; + iter_if( + scalar_type.is_orderable(), + [(A::Min, NDC::Min), (A::Max, NDC::Max)].into_iter(), + ) + .chain(iter_if( + scalar_type.is_numeric(), + [ + ( + A::Avg, + NDC::Average { + result_type: bson_to_scalar_type_name( + A::expected_result_type(A::Avg, &plan::Type::scalar(scalar_type)) + .expect("average result type is defined"), + // safety: this expect is checked in integration tests + ), + }, + ), + ( + A::Sum, + NDC::Sum { + result_type: bson_to_scalar_type_name( + A::expected_result_type(A::Sum, &plan::Type::scalar(scalar_type)) + .expect("sum result type is defined"), + // safety: this expect is checked in integration tests + ), + }, + ), + ] + .into_iter(), + )) } pub fn comparison_operators( scalar_type: BsonScalarType, -) -> impl Iterator { +) -> impl Iterator { iter_if( scalar_type.is_comparable(), - [(C::Equal, scalar_type), (C::NotEqual, scalar_type)].into_iter(), + [ + (C::Equal, bson_to_named_type(scalar_type)), + (C::NotEqual, bson_to_named_type(scalar_type)), + ( + C::In, + Type::Array { + element_type: Box::new(bson_to_named_type(scalar_type)), + }, + ), + ( + C::NotIn, + Type::Array { + element_type: Box::new(bson_to_named_type(scalar_type)), + }, + ), + (C::NotEqual, bson_to_named_type(scalar_type)), + ] + .into_iter(), ) .chain(iter_if( scalar_type.is_orderable(), @@ -142,11 +229,17 @@ pub fn comparison_operators( C::GreaterThanOrEqual, ] .into_iter() - .map(move |op| (op, scalar_type)), + .map(move |op| (op, bson_to_named_type(scalar_type))), )) .chain(match scalar_type { - S::String => Box::new([(C::Regex, S::String), (C::IRegex, S::String)].into_iter()), - _ => Box::new(std::iter::empty()) as Box>, + S::String => Box::new( + [ + (C::Regex, bson_to_named_type(S::Regex)), + (C::IRegex, bson_to_named_type(S::Regex)), + ] + .into_iter(), + ), + _ => Box::new(std::iter::empty()) as Box>, }) } diff --git a/crates/mongodb-agent-common/src/schema.rs b/crates/mongodb-agent-common/src/schema.rs index 26fd6845..e475eb7f 100644 --- a/crates/mongodb-agent-common/src/schema.rs +++ b/crates/mongodb-agent-common/src/schema.rs @@ -18,28 +18,28 @@ pub struct ValidatorSchema { #[derive(Clone, Debug, Deserialize)] #[cfg_attr(test, derive(PartialEq))] -#[serde(untagged)] +#[serde(tag = "bsonType", rename_all = "camelCase")] pub enum Property { Object { - #[serde(rename = "bsonType", default = "default_bson_type")] - #[allow(dead_code)] - bson_type: BsonType, #[serde(skip_serializing_if = "Option::is_none")] description: Option, #[serde(skip_serializing_if = "Vec::is_empty", default)] required: Vec, - properties: IndexMap, + #[serde(skip_serializing_if = "Option::is_none")] + properties: Option>, }, Array { - #[serde(rename = "bsonType", default = "default_bson_type")] - #[allow(dead_code)] - bson_type: BsonType, #[serde(skip_serializing_if = "Option::is_none")] description: Option, items: Box, }, + #[serde(untagged)] Scalar { - #[serde(rename = "bsonType", default = "default_bson_scalar_type")] + #[serde( + rename = "bsonType", + deserialize_with = "deserialize_scalar_bson_type", + default = "default_bson_scalar_type" + )] bson_type: BsonScalarType, #[serde(skip_serializing_if = "Option::is_none")] description: Option, @@ -49,13 +49,11 @@ pub enum Property { pub fn get_property_description(p: &Property) -> Option { match p { Property::Object { - bson_type: _, description, required: _, properties: _, } => description.clone(), Property::Array { - bson_type: _, description, items: _, } => description.clone(), @@ -66,6 +64,15 @@ pub fn get_property_description(p: &Property) -> Option { } } +fn deserialize_scalar_bson_type<'de, D>(deserializer: D) -> Result +where + D: serde::Deserializer<'de>, +{ + use serde::de::Error; + let value = BsonType::deserialize(deserializer)?; + value.try_into().map_err(D::Error::custom) +} + fn default_bson_scalar_type() -> BsonScalarType { BsonScalarType::Undefined } @@ -78,8 +85,8 @@ fn default_bson_type() -> BsonType { mod test { use indexmap::IndexMap; use mongodb::bson::{bson, from_bson}; - use mongodb_support::{BsonScalarType, BsonType}; + use pretty_assertions::assert_eq; use super::{Property, ValidatorSchema}; @@ -122,10 +129,9 @@ mod test { assert_eq!( from_bson::(input)?, Property::Object { - bson_type: BsonType::Object, description: Some("Name of places".to_owned()), required: vec!["name".to_owned(), "description".to_owned()], - properties: IndexMap::from([ + properties: Some(IndexMap::from([ ( "name".to_owned(), Property::Scalar { @@ -142,7 +148,7 @@ mod test { ) } ) - ]) + ])) } ); @@ -165,13 +171,11 @@ mod test { assert_eq!( from_bson::(input)?, Property::Array { - bson_type: BsonType::Array, description: Some("Location must be an array of objects".to_owned()), items: Box::new(Property::Object { - bson_type: BsonType::Object, description: None, required: vec!["name".to_owned(), "size".to_owned()], - properties: IndexMap::from([ + properties: Some(IndexMap::from([ ( "name".to_owned(), Property::Scalar { @@ -186,7 +190,7 @@ mod test { description: None } ) - ]) + ])) }), } ); @@ -250,10 +254,9 @@ mod test { properties: IndexMap::from([( "counts".to_owned(), Property::Object { - bson_type: BsonType::Object, description: None, required: vec!["xs".to_owned()], - properties: IndexMap::from([ + properties: Some(IndexMap::from([ ( "xs".to_owned(), Property::Scalar { @@ -268,7 +271,7 @@ mod test { description: None } ), - ]) + ])) } )]) } @@ -300,7 +303,7 @@ mod test { "description": "\"gpa\" must be a double if the field exists" }, "address": { - "bsonType": ["object"], + "bsonType": "object", "properties": { "city": { "bsonType": "string" }, "street": { "bsonType": "string" } @@ -350,10 +353,9 @@ mod test { ( "address".to_owned(), Property::Object { - bson_type: BsonType::Object, description: None, required: vec![], - properties: IndexMap::from([ + properties: Some(IndexMap::from([ ( "city".to_owned(), Property::Scalar { @@ -368,7 +370,7 @@ mod test { description: None, } ) - ]) + ])) } ) ]), diff --git a/crates/mongodb-agent-common/src/state.rs b/crates/mongodb-agent-common/src/state.rs index 7875c7ab..07fae77d 100644 --- a/crates/mongodb-agent-common/src/state.rs +++ b/crates/mongodb-agent-common/src/state.rs @@ -25,13 +25,18 @@ impl ConnectorState { pub async fn try_init_state() -> Result> { // Splitting this out of the `Connector` impl makes error translation easier let database_uri = env::var(DATABASE_URI_ENV_VAR)?; - try_init_state_from_uri(&database_uri).await + let state = try_init_state_from_uri(Some(&database_uri)).await?; + Ok(state) } pub async fn try_init_state_from_uri( - database_uri: &str, -) -> Result> { - let client = get_mongodb_client(database_uri).await?; + database_uri: Option<&impl AsRef>, +) -> anyhow::Result { + let database_uri = database_uri.ok_or(anyhow!( + "Missing environment variable {}", + DATABASE_URI_ENV_VAR + ))?; + let client = get_mongodb_client(database_uri.as_ref()).await?; let database_name = match client.default_database() { Some(database) => Ok(database.name().to_owned()), None => Err(anyhow!( diff --git a/crates/mongodb-agent-common/src/test_helpers.rs b/crates/mongodb-agent-common/src/test_helpers.rs index cc78a049..c265c915 100644 --- a/crates/mongodb-agent-common/src/test_helpers.rs +++ b/crates/mongodb-agent-common/src/test_helpers.rs @@ -20,7 +20,7 @@ pub fn make_nested_schema() -> MongoConfiguration { collection_type: "Author".into(), arguments: Default::default(), uniqueness_constraints: make_primary_key_uniqueness_constraint("authors"), - foreign_keys: Default::default(), + relational_mutations: None, }, ), collection("appearances"), // new helper gives more concise syntax @@ -87,6 +87,7 @@ pub fn make_nested_schema() -> MongoConfiguration { } /// Configuration for a MongoDB database with Chinook test data +#[allow(dead_code)] pub fn chinook_config() -> MongoConfiguration { MongoConfiguration(Configuration { collections: [ @@ -139,19 +140,20 @@ pub fn chinook_config() -> MongoConfiguration { }) } +#[allow(dead_code)] pub fn chinook_relationships() -> BTreeMap { [ ( "Albums", - ndc_test_helpers::relationship("Album", [("ArtistId", "ArtistId")]), + ndc_test_helpers::relationship("Album", [("ArtistId", &["ArtistId"])]), ), ( "Tracks", - ndc_test_helpers::relationship("Track", [("AlbumId", "AlbumId")]), + ndc_test_helpers::relationship("Track", [("AlbumId", &["AlbumId"])]), ), ( "Genre", - ndc_test_helpers::relationship("Genre", [("GenreId", "GenreId")]).object_type(), + ndc_test_helpers::relationship("Genre", [("GenreId", &["GenreId"])]).object_type(), ), ] .into_iter() @@ -161,36 +163,5 @@ pub fn chinook_relationships() -> BTreeMap { /// Configuration for a MongoDB database that resembles MongoDB's sample_mflix test data set. pub fn mflix_config() -> MongoConfiguration { - MongoConfiguration(Configuration { - collections: [collection("comments"), collection("movies")].into(), - object_types: [ - ( - "comments".into(), - object_type([ - ("_id", named_type("ObjectId")), - ("movie_id", named_type("ObjectId")), - ("name", named_type("String")), - ]), - ), - ( - "credits".into(), - object_type([("director", named_type("String"))]), - ), - ( - "movies".into(), - object_type([ - ("_id", named_type("ObjectId")), - ("credits", named_type("credits")), - ("title", named_type("String")), - ("year", named_type("Int")), - ]), - ), - ] - .into(), - functions: Default::default(), - procedures: Default::default(), - native_mutations: Default::default(), - native_queries: Default::default(), - options: Default::default(), - }) + MongoConfiguration(test_helpers::configuration::mflix_config()) } diff --git a/crates/mongodb-connector/Cargo.toml b/crates/mongodb-connector/Cargo.toml index 65de56c5..8cfb001f 100644 --- a/crates/mongodb-connector/Cargo.toml +++ b/crates/mongodb-connector/Cargo.toml @@ -19,12 +19,12 @@ itertools = { workspace = true } mongodb = { workspace = true } ndc-sdk = { workspace = true } prometheus = "*" # share version from ndc-sdk -serde = { version = "1.0", features = ["derive"] } -serde_json = { version = "1.0", features = ["preserve_order"] } +serde = { workspace = true } +serde_json = { workspace = true } thiserror = "1" tokio = { version = "1.28.1", features = ["full"] } tracing = "0.1" [dev-dependencies] ndc-test-helpers = { path = "../ndc-test-helpers" } -pretty_assertions = "1" +pretty_assertions = "1.4" diff --git a/crates/mongodb-connector/src/capabilities.rs b/crates/mongodb-connector/src/capabilities.rs index 460be3cd..ce739614 100644 --- a/crates/mongodb-connector/src/capabilities.rs +++ b/crates/mongodb-connector/src/capabilities.rs @@ -1,18 +1,38 @@ use ndc_sdk::models::{ - Capabilities, LeafCapability, NestedFieldCapabilities, QueryCapabilities, - RelationshipCapabilities, + AggregateCapabilities, Capabilities, ExistsCapabilities, GroupByCapabilities, LeafCapability, + NestedArrayFilterByCapabilities, NestedFieldCapabilities, NestedFieldFilterByCapabilities, + QueryCapabilities, RelationshipCapabilities, }; pub fn mongo_capabilities() -> Capabilities { Capabilities { query: QueryCapabilities { - aggregates: Some(LeafCapability {}), + aggregates: Some(AggregateCapabilities { + filter_by: None, + group_by: Some(GroupByCapabilities { + filter: None, + order: None, + paginate: None, + }), + }), variables: Some(LeafCapability {}), explain: Some(LeafCapability {}), nested_fields: NestedFieldCapabilities { - filter_by: Some(LeafCapability {}), + filter_by: Some(NestedFieldFilterByCapabilities { + nested_arrays: Some(NestedArrayFilterByCapabilities { + contains: Some(LeafCapability {}), + is_empty: Some(LeafCapability {}), + }), + }), order_by: Some(LeafCapability {}), - aggregates: None, + aggregates: Some(LeafCapability {}), + nested_collections: None, // TODO: ENG-1464 + }, + exists: ExistsCapabilities { + named_scopes: None, // TODO: ENG-1487 + unrelated: Some(LeafCapability {}), + nested_collections: Some(LeafCapability {}), + nested_scalar_collections: None, // TODO: ENG-1488 }, }, mutation: ndc_sdk::models::MutationCapabilities { @@ -22,6 +42,9 @@ pub fn mongo_capabilities() -> Capabilities { relationships: Some(RelationshipCapabilities { relation_comparisons: Some(LeafCapability {}), order_by_aggregate: None, + nested: None, // TODO: ENG-1490 }), + relational_mutation: None, + relational_query: None, } } diff --git a/crates/mongodb-connector/src/error_mapping.rs b/crates/mongodb-connector/src/error_mapping.rs deleted file mode 100644 index 6db47afc..00000000 --- a/crates/mongodb-connector/src/error_mapping.rs +++ /dev/null @@ -1,43 +0,0 @@ -use http::StatusCode; -use mongodb_agent_common::interface_types::{ErrorResponse, MongoAgentError}; -use ndc_sdk::{ - connector::{ExplainError, QueryError}, - models, -}; -use serde_json::Value; - -pub fn mongo_agent_error_to_query_error(error: MongoAgentError) -> QueryError { - if let MongoAgentError::NotImplemented(e) = error { - return QueryError::UnsupportedOperation(error_response(e.to_owned())); - } - let (status, err) = error.status_and_error_response(); - match status { - StatusCode::BAD_REQUEST => QueryError::UnprocessableContent(convert_error_response(err)), - _ => QueryError::Other(Box::new(error), Value::Object(Default::default())), - } -} - -pub fn mongo_agent_error_to_explain_error(error: MongoAgentError) -> ExplainError { - if let MongoAgentError::NotImplemented(e) = error { - return ExplainError::UnsupportedOperation(error_response(e.to_owned())); - } - let (status, err) = error.status_and_error_response(); - match status { - StatusCode::BAD_REQUEST => ExplainError::UnprocessableContent(convert_error_response(err)), - _ => ExplainError::Other(Box::new(error), Value::Object(Default::default())), - } -} - -pub fn error_response(message: String) -> models::ErrorResponse { - models::ErrorResponse { - message, - details: serde_json::Value::Object(Default::default()), - } -} - -pub fn convert_error_response(err: ErrorResponse) -> models::ErrorResponse { - models::ErrorResponse { - message: err.message, - details: Value::Object(err.details.unwrap_or_default().into_iter().collect()), - } -} diff --git a/crates/mongodb-connector/src/main.rs b/crates/mongodb-connector/src/main.rs index abcab866..bc9ed2a9 100644 --- a/crates/mongodb-connector/src/main.rs +++ b/crates/mongodb-connector/src/main.rs @@ -1,14 +1,11 @@ mod capabilities; -mod error_mapping; mod mongo_connector; mod mutation; mod schema; -use std::error::Error; - use mongo_connector::MongoConnector; #[tokio::main] -async fn main() -> Result<(), Box> { +async fn main() -> ndc_sdk::connector::Result<()> { ndc_sdk::default_main::default_main::().await } diff --git a/crates/mongodb-connector/src/mongo_connector.rs b/crates/mongodb-connector/src/mongo_connector.rs index 5df795a3..41ffd845 100644 --- a/crates/mongodb-connector/src/mongo_connector.rs +++ b/crates/mongodb-connector/src/mongo_connector.rs @@ -1,29 +1,23 @@ use std::path::Path; -use anyhow::anyhow; use async_trait::async_trait; use configuration::Configuration; +use http::StatusCode; use mongodb_agent_common::{ - explain::explain_query, health::check_health, mongo_query_plan::MongoConfiguration, + explain::explain_query, interface_types::MongoAgentError, mongo_query_plan::MongoConfiguration, query::handle_query_request, state::ConnectorState, }; use ndc_sdk::{ - connector::{ - Connector, ConnectorSetup, ExplainError, FetchMetricsError, HealthError, - InitializationError, MutationError, ParseError, QueryError, SchemaError, - }, + connector::{self, Connector, ConnectorSetup, ErrorResponse}, json_response::JsonResponse, models::{ Capabilities, ExplainResponse, MutationRequest, MutationResponse, QueryRequest, QueryResponse, SchemaResponse, }, }; -use serde_json::Value; +use serde_json::json; use tracing::instrument; -use crate::error_mapping::{ - error_response, mongo_agent_error_to_explain_error, mongo_agent_error_to_query_error, -}; use crate::{capabilities::mongo_capabilities, mutation::handle_mutation_request}; #[derive(Clone, Default)] @@ -37,11 +31,18 @@ impl ConnectorSetup for MongoConnector { #[instrument(err, skip_all)] async fn parse_configuration( &self, - configuration_dir: impl AsRef + Send, - ) -> Result { + configuration_dir: &Path, + ) -> connector::Result { let configuration = Configuration::parse_configuration(configuration_dir) .await - .map_err(|err| ParseError::Other(err.into()))?; + .map_err(|err| { + ErrorResponse::new( + StatusCode::INTERNAL_SERVER_ERROR, + format!("{err:#}"), // alternate selector (:#) includes root cause in string + json!({}), + ) + })?; + tracing::debug!(?configuration); Ok(MongoConfiguration(configuration)) } @@ -54,7 +55,7 @@ impl ConnectorSetup for MongoConnector { &self, _configuration: &MongoConfiguration, _metrics: &mut prometheus::Registry, - ) -> Result { + ) -> connector::Result { let state = mongodb_agent_common::state::try_init_state().await?; Ok(state) } @@ -66,31 +67,22 @@ impl Connector for MongoConnector { type Configuration = MongoConfiguration; type State = ConnectorState; + fn connector_name() -> &'static str { + "ndc_mongodb" + } + + fn connector_version() -> &'static str { + env!("CARGO_PKG_VERSION") + } + #[instrument(err, skip_all)] fn fetch_metrics( _configuration: &Self::Configuration, _state: &Self::State, - ) -> Result<(), FetchMetricsError> { + ) -> connector::Result<()> { Ok(()) } - #[instrument(err, skip_all)] - async fn health_check( - _configuration: &Self::Configuration, - state: &Self::State, - ) -> Result<(), HealthError> { - let status = check_health(state) - .await - .map_err(|e| HealthError::Other(e.into(), Value::Object(Default::default())))?; - match status.as_u16() { - 200..=299 => Ok(()), - s => Err(HealthError::Other( - anyhow!("unhealthy status: {s}").into(), - Value::Object(Default::default()), - )), - } - } - async fn get_capabilities() -> Capabilities { mongo_capabilities() } @@ -98,7 +90,7 @@ impl Connector for MongoConnector { #[instrument(err, skip_all)] async fn get_schema( configuration: &Self::Configuration, - ) -> Result, SchemaError> { + ) -> connector::Result> { let response = crate::schema::get_schema(configuration).await?; Ok(response.into()) } @@ -108,10 +100,10 @@ impl Connector for MongoConnector { configuration: &Self::Configuration, state: &Self::State, request: QueryRequest, - ) -> Result, ExplainError> { + ) -> connector::Result> { let response = explain_query(configuration, state, request) .await - .map_err(mongo_agent_error_to_explain_error)?; + .map_err(map_mongo_agent_error)?; Ok(response.into()) } @@ -120,10 +112,12 @@ impl Connector for MongoConnector { _configuration: &Self::Configuration, _state: &Self::State, _request: MutationRequest, - ) -> Result, ExplainError> { - Err(ExplainError::UnsupportedOperation(error_response( - "Explain for mutations is not implemented yet".to_owned(), - ))) + ) -> connector::Result> { + Err(ErrorResponse::new( + StatusCode::NOT_IMPLEMENTED, + "Explain for mutations is not implemented yet".to_string(), + json!({}), + )) } #[instrument(err, skip_all)] @@ -131,8 +125,9 @@ impl Connector for MongoConnector { configuration: &Self::Configuration, state: &Self::State, request: MutationRequest, - ) -> Result, MutationError> { - handle_mutation_request(configuration, state, request).await + ) -> connector::Result> { + let response = handle_mutation_request(configuration, state, request).await?; + Ok(response) } #[instrument(name = "/query", err, skip_all, fields(internal.visibility = "user"))] @@ -140,10 +135,19 @@ impl Connector for MongoConnector { configuration: &Self::Configuration, state: &Self::State, request: QueryRequest, - ) -> Result, QueryError> { + ) -> connector::Result> { let response = handle_query_request(configuration, state, request) .await - .map_err(mongo_agent_error_to_query_error)?; + .map_err(map_mongo_agent_error)?; Ok(response.into()) } } + +fn map_mongo_agent_error(err: MongoAgentError) -> ErrorResponse { + let (status_code, err_response) = err.status_and_error_response(); + let details = match err_response.details { + Some(details) => details.into_iter().collect(), + None => json!({}), + }; + ErrorResponse::new(status_code, err_response.message, details) +} diff --git a/crates/mongodb-connector/src/mutation.rs b/crates/mongodb-connector/src/mutation.rs index e517dbb4..7082f9e2 100644 --- a/crates/mongodb-connector/src/mutation.rs +++ b/crates/mongodb-connector/src/mutation.rs @@ -17,10 +17,9 @@ use ndc_query_plan::plan_for_mutation_request; use ndc_sdk::{ connector::MutationError, json_response::JsonResponse, - models::{MutationOperationResults, MutationRequest, MutationResponse}, + models::{ErrorResponse, MutationOperationResults, MutationRequest, MutationResponse}, }; - -use crate::error_mapping::error_response; +use serde_json::json; pub async fn handle_mutation_request( config: &MongoConfiguration, @@ -29,10 +28,10 @@ pub async fn handle_mutation_request( ) -> Result, MutationError> { tracing::debug!(?config, mutation_request = %serde_json::to_string(&mutation_request).unwrap(), "executing mutation"); let mutation_plan = plan_for_mutation_request(config, mutation_request).map_err(|err| { - MutationError::UnprocessableContent(error_response(format!( - "error processing mutation request: {}", - err - ))) + MutationError::UnprocessableContent(ErrorResponse { + message: format!("error processing mutation request: {}", err), + details: json!({}), + }) })?; let database = state.database(); let jobs = look_up_procedures(config, &mutation_plan)?; @@ -71,12 +70,13 @@ fn look_up_procedures<'a, 'b>( .partition_result(); if !not_found.is_empty() { - return Err(MutationError::UnprocessableContent(error_response( - format!( + return Err(MutationError::UnprocessableContent(ErrorResponse { + message: format!( "request includes unknown mutations: {}", not_found.join(", ") ), - ))); + details: json!({}), + })); } Ok(procedures) @@ -88,26 +88,37 @@ async fn execute_procedure( procedure: Procedure<'_>, requested_fields: Option<&NestedField>, ) -> Result { - let (result, result_type) = procedure - .execute(database.clone()) - .await - .map_err(|err| MutationError::UnprocessableContent(error_response(err.to_string())))?; + let (result, result_type) = procedure.execute(database.clone()).await.map_err(|err| { + MutationError::UnprocessableContent(ErrorResponse { + message: err.to_string(), + details: json!({}), + }) + })?; let rewritten_result = rewrite_response(requested_fields, result.into())?; let requested_result_type = if let Some(fields) = requested_fields { - type_for_nested_field(&[], &result_type, fields) - .map_err(|err| MutationError::UnprocessableContent(error_response(err.to_string())))? + type_for_nested_field(&[], &result_type, fields).map_err(|err| { + MutationError::UnprocessableContent(ErrorResponse { + message: err.to_string(), + details: json!({}), + }) + })? } else { result_type }; let json_result = bson_to_json( - config.extended_json_mode(), + config.serialization_options().extended_json_mode, &requested_result_type, rewritten_result, ) - .map_err(|err| MutationError::UnprocessableContent(error_response(err.to_string())))?; + .map_err(|err| { + MutationError::UnprocessableContent(ErrorResponse { + message: err.to_string(), + details: json!({}), + }) + })?; Ok(MutationOperationResults::Procedure { result: json_result, @@ -130,12 +141,18 @@ fn rewrite_response( Ok(rewrite_array(fields, values)?.into()) } - (Some(NestedField::Object(_)), _) => Err(MutationError::UnprocessableContent( - error_response("expected an object".to_owned()), - )), - (Some(NestedField::Array(_)), _) => Err(MutationError::UnprocessableContent( - error_response("expected an array".to_owned()), - )), + (Some(NestedField::Object(_)), _) => { + Err(MutationError::UnprocessableContent(ErrorResponse { + message: "expected an object".to_owned(), + details: json!({}), + })) + } + (Some(NestedField::Array(_)), _) => { + Err(MutationError::UnprocessableContent(ErrorResponse { + message: "expected an array".to_owned(), + details: json!({}), + })) + } } } @@ -154,15 +171,18 @@ fn rewrite_doc( fields, } => { let orig_value = doc.remove(column.as_str()).ok_or_else(|| { - MutationError::UnprocessableContent(error_response(format!( - "missing expected field from response: {name}" - ))) + MutationError::UnprocessableContent(ErrorResponse { + message: format!("missing expected field from response: {name}"), + details: json!({}), + }) })?; rewrite_response(fields.as_ref(), orig_value) } Field::Relationship { .. } => Err(MutationError::UnsupportedOperation( - error_response("The MongoDB connector does not support relationship references in mutations" - .to_owned()), + ErrorResponse { + message: "The MongoDB connector does not support relationship references in mutations".to_owned(), + details: json!({}), + }, )), }?; diff --git a/crates/mongodb-connector/src/schema.rs b/crates/mongodb-connector/src/schema.rs index d24c8d5e..6e6add5c 100644 --- a/crates/mongodb-connector/src/schema.rs +++ b/crates/mongodb-connector/src/schema.rs @@ -1,11 +1,12 @@ use mongodb_agent_common::{ mongo_query_plan::MongoConfiguration, scalar_types_capabilities::SCALAR_TYPES, }; +use mongodb_support::BsonScalarType; use ndc_query_plan::QueryContext as _; -use ndc_sdk::{connector::SchemaError, models as ndc}; +use ndc_sdk::{connector, models as ndc}; -pub async fn get_schema(config: &MongoConfiguration) -> Result { - Ok(ndc::SchemaResponse { +pub async fn get_schema(config: &MongoConfiguration) -> connector::Result { + let schema = ndc::SchemaResponse { collections: config.collections().values().cloned().collect(), functions: config .functions() @@ -20,5 +21,15 @@ pub async fn get_schema(config: &MongoConfiguration) -> Result, @@ -32,6 +34,26 @@ impl Pipeline { } } +impl AsRef<[Stage]> for Pipeline { + fn as_ref(&self) -> &[Stage] { + &self.stages + } +} + +impl Borrow<[Stage]> for Pipeline { + fn borrow(&self) -> &[Stage] { + &self.stages + } +} + +impl Deref for Pipeline { + type Target = [Stage]; + + fn deref(&self) -> &Self::Target { + &self.stages + } +} + /// This impl allows passing a [Pipeline] as the first argument to [mongodb::Collection::aggregate]. impl IntoIterator for Pipeline { type Item = bson::Document; @@ -57,3 +79,9 @@ impl FromIterator for Pipeline { } } } + +impl From for Vec { + fn from(value: Pipeline) -> Self { + value.into_iter().collect() + } +} diff --git a/crates/mongodb-support/src/aggregate/selection.rs b/crates/mongodb-support/src/aggregate/selection.rs new file mode 100644 index 00000000..8d6fbf28 --- /dev/null +++ b/crates/mongodb-support/src/aggregate/selection.rs @@ -0,0 +1,63 @@ +use mongodb::bson::{self, Bson}; +use serde::{Deserialize, Serialize}; + +/// Wraps a BSON document that represents a MongoDB "expression" that constructs a document based +/// on the output of a previous aggregation pipeline stage. A Selection value is intended to be +/// used as the argument to a $replaceWith pipeline stage. +/// +/// When we compose pipelines, we can pair each Pipeline with a Selection that extracts the data we +/// want, in the format we want it to provide to HGE. We can collect Selection values and merge +/// them to form one stage after all of the composed pipelines. +#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] +#[serde(transparent)] +pub struct Selection(bson::Document); + +impl Selection { + pub fn new(doc: bson::Document) -> Self { + Self(doc) + } + + /// Transform the contained BSON document in a callback. This may return an error on invariant + /// violations in the future. + pub fn try_map_document(self, callback: F) -> Result + where + F: FnOnce(bson::Document) -> bson::Document, + { + let doc = self.into(); + let updated_doc = callback(doc); + Ok(Self::new(updated_doc)) + } +} + +/// The extend implementation provides a shallow merge. +impl Extend<(String, Bson)> for Selection { + fn extend>(&mut self, iter: T) { + self.0.extend(iter); + } +} + +impl From for Bson { + fn from(value: Selection) -> Self { + value.0.into() + } +} + +impl From for bson::Document { + fn from(value: Selection) -> Self { + value.0 + } +} + +impl<'a> From<&'a Selection> for &'a bson::Document { + fn from(value: &'a Selection) -> Self { + &value.0 + } +} + +// This won't fail, but it might in the future if we add some sort of validation or parsing. +impl TryFrom for Selection { + type Error = anyhow::Error; + fn try_from(value: bson::Document) -> Result { + Ok(Selection(value)) + } +} diff --git a/crates/mongodb-support/src/aggregate/sort_document.rs b/crates/mongodb-support/src/aggregate/sort_document.rs new file mode 100644 index 00000000..37756cb2 --- /dev/null +++ b/crates/mongodb-support/src/aggregate/sort_document.rs @@ -0,0 +1,14 @@ +use mongodb::bson; +use serde::{Deserialize, Serialize}; + +/// Wraps a BSON document that represents a set of sort criteria. A SortDocument value is intended +/// to be used as the argument to a $sort pipeline stage. +#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] +#[serde(transparent)] +pub struct SortDocument(pub bson::Document); + +impl SortDocument { + pub fn from_doc(doc: bson::Document) -> Self { + SortDocument(doc) + } +} diff --git a/crates/mongodb-agent-common/src/mongodb/stage.rs b/crates/mongodb-support/src/aggregate/stage.rs similarity index 72% rename from crates/mongodb-agent-common/src/mongodb/stage.rs rename to crates/mongodb-support/src/aggregate/stage.rs index 9845f922..635e2c2e 100644 --- a/crates/mongodb-agent-common/src/mongodb/stage.rs +++ b/crates/mongodb-support/src/aggregate/stage.rs @@ -1,16 +1,23 @@ use std::collections::BTreeMap; -use mongodb::bson; -use serde::Serialize; +use mongodb::bson::{self, Bson}; +use serde::{Deserialize, Serialize}; -use super::{accumulator::Accumulator, pipeline::Pipeline, Selection}; +use super::{Accumulator, Pipeline, Selection, SortDocument}; /// Aggergation Pipeline Stage. This is a work-in-progress - we are adding enum variants to match /// MongoDB pipeline stage types as we need them in this app. For documentation on all stage types /// see, /// https://www.mongodb.com/docs/manual/reference/operator/aggregation-pipeline/#std-label-aggregation-pipeline-operator-reference -#[derive(Clone, Debug, PartialEq, Serialize)] +#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub enum Stage { + /// Adds new fields to documents. $addFields outputs documents that contain all existing fields + /// from the input documents and newly added fields. + /// + /// See https://www.mongodb.com/docs/manual/reference/operator/aggregation/addFields/ + #[serde(rename = "$addFields")] + AddFields(bson::Document), + /// Returns literal documents from input expressions. /// /// See https://www.mongodb.com/docs/manual/reference/operator/aggregation/documents/#mongodb-pipeline-pipe.-documents @@ -35,7 +42,7 @@ pub enum Stage { /// /// See https://www.mongodb.com/docs/manual/reference/operator/aggregation/sort/#mongodb-pipeline-pipe.-sort #[serde(rename = "$sort")] - Sort(bson::Document), + Sort(SortDocument), /// Passes the first n documents unmodified to the pipeline where n is the specified limit. For /// each input document, outputs either one document (for the first n documents) or zero @@ -43,7 +50,7 @@ pub enum Stage { /// /// See https://www.mongodb.com/docs/manual/reference/operator/aggregation/limit/#mongodb-pipeline-pipe.-limit #[serde(rename = "$limit")] - Limit(u32), + Limit(Bson), /// Performs a left outer join to another collection in the same database to filter in /// documents from the "joined" collection for processing. @@ -62,6 +69,9 @@ pub enum Stage { /// /// If a local document does not contain a localField value, the $lookup uses a null value /// for the match. + /// + /// Must be a string. Does not begin with a dollar sign. May contain dots to select nested + /// fields. #[serde(skip_serializing_if = "Option::is_none")] local_field: Option, /// Specifies the foreign documents' foreignField to perform an equality match with the @@ -69,6 +79,9 @@ pub enum Stage { /// /// If a foreign document does not contain a foreignField value, the $lookup uses a null /// value for the match. + /// + /// Must be a string. Does not begin with a dollar sign. May contain dots to select nested + /// fields. #[serde(skip_serializing_if = "Option::is_none")] foreign_field: Option, /// Optional. Specifies the variables to use in the pipeline stages. Use the variable @@ -101,7 +114,7 @@ pub enum Stage { /// /// See https://www.mongodb.com/docs/manual/reference/operator/aggregation/skip/#mongodb-pipeline-pipe.-skip #[serde(rename = "$skip")] - Skip(u32), + Skip(Bson), /// Groups input documents by a specified identifier expression and applies the accumulator /// expression(s), if specified, to each group. Consumes all input documents and outputs one @@ -139,6 +152,25 @@ pub enum Stage { #[serde(rename = "$count")] Count(String), + /// Reshapes each document in the stream, such as by adding new fields or removing existing + /// fields. For each input document, outputs one document. + /// + /// See also $unset for removing existing fields. + /// + /// See https://www.mongodb.com/docs/manual/reference/operator/aggregation/project/#mongodb-pipeline-pipe.-project + #[serde(rename = "$project")] + Project(bson::Document), + + /// Replaces a document with the specified embedded document. The operation replaces all + /// existing fields in the input document, including the _id field. Specify a document embedded + /// in the input document to promote the embedded document to the top level. + /// + /// $replaceWith is an alias for $replaceRoot stage. + /// + /// See https://www.mongodb.com/docs/manual/reference/operator/aggregation/replaceRoot/#mongodb-pipeline-pipe.-replaceRoot + #[serde(rename = "$replaceRoot", rename_all = "camelCase")] + ReplaceRoot { new_root: Selection }, + /// Replaces a document with the specified embedded document. The operation replaces all /// existing fields in the input document, including the _id field. Specify a document embedded /// in the input document to promote the embedded document to the top level. @@ -149,6 +181,32 @@ pub enum Stage { #[serde(rename = "$replaceWith")] ReplaceWith(Selection), + /// Deconstructs an array field from the input documents to output a document for each element. + /// Each output document is the input document with the value of the array field replaced by + /// the element. + /// + /// See https://www.mongodb.com/docs/manual/reference/operator/aggregation/unwind/ + #[serde(rename = "$unwind", rename_all = "camelCase")] + Unwind { + /// Field path to an array field. To specify a field path, prefix the field name with + /// a dollar sign $ and enclose in quotes. + path: String, + + /// Optional. The name of a new field to hold the array index of the element. The name + /// cannot start with a dollar sign $. + #[serde(default, skip_serializing_if = "Option::is_none")] + include_array_index: Option, + + /// Optional. + /// + /// - If true, if the path is null, missing, or an empty array, $unwind outputs the document. + /// - If false, if path is null, missing, or an empty array, $unwind does not output a document. + /// + /// The default value is false. + #[serde(default, skip_serializing_if = "Option::is_none")] + preserve_null_and_empty_arrays: Option, + }, + /// For cases where we receive pipeline stages from an external source, such as a native query, /// and we don't want to attempt to parse it we store the stage BSON document unaltered. #[serde(untagged)] diff --git a/crates/mongodb-support/src/align.rs b/crates/mongodb-support/src/align.rs index 89ecf741..468487d0 100644 --- a/crates/mongodb-support/src/align.rs +++ b/crates/mongodb-support/src/align.rs @@ -4,15 +4,15 @@ use std::hash::Hash; pub fn align( ts: IndexMap, mut us: IndexMap, - ft: FT, - fu: FU, - ftu: FTU, + mut ft: FT, + mut fu: FU, + mut ftu: FTU, ) -> IndexMap where K: Hash + Eq, - FT: Fn(T) -> V, - FU: Fn(U) -> V, - FTU: Fn(T, U) -> V, + FT: FnMut(T) -> V, + FU: FnMut(U) -> V, + FTU: FnMut(T, U) -> V, { let mut result: IndexMap = IndexMap::new(); @@ -28,3 +28,31 @@ where } result } + +pub fn try_align( + ts: IndexMap, + mut us: IndexMap, + mut ft: FT, + mut fu: FU, + mut ftu: FTU, +) -> Result, E> +where + K: Hash + Eq, + FT: FnMut(T) -> Result, + FU: FnMut(U) -> Result, + FTU: FnMut(T, U) -> Result, +{ + let mut result: IndexMap = IndexMap::new(); + + for (k, t) in ts { + match us.swap_remove(&k) { + None => result.insert(k, ft(t)?), + Some(u) => result.insert(k, ftu(t, u)?), + }; + } + + for (k, u) in us { + result.insert(k, fu(u)?); + } + Ok(result) +} diff --git a/crates/mongodb-support/src/bson_type.rs b/crates/mongodb-support/src/bson_type.rs index 5024a2cf..adf5673f 100644 --- a/crates/mongodb-support/src/bson_type.rs +++ b/crates/mongodb-support/src/bson_type.rs @@ -80,8 +80,7 @@ impl<'de> Deserialize<'de> for BsonType { } } -#[derive(Copy, Clone, Debug, PartialEq, Eq, Sequence, Serialize, Deserialize, JsonSchema)] -#[serde(try_from = "BsonType", rename_all = "camelCase")] +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Sequence, JsonSchema)] pub enum BsonScalarType { // numeric Double, @@ -96,6 +95,10 @@ pub enum BsonScalarType { Date, Timestamp, + // binary subtypes - these are stored in BSON using the BinData type, but there are multiple + // binary subtype codes, and it's useful to have first-class representations for those + UUID, // subtype 4 + // other BinData, ObjectId, @@ -137,6 +140,7 @@ impl BsonScalarType { S::Undefined => "undefined", S::DbPointer => "dbPointer", S::Symbol => "symbol", + S::UUID => "uuid", } } @@ -161,6 +165,7 @@ impl BsonScalarType { S::Undefined => "Undefined", S::DbPointer => "DbPointer", S::Symbol => "Symbol", + S::UUID => "UUID", } } @@ -177,6 +182,31 @@ impl BsonScalarType { scalar_type.ok_or_else(|| Error::UnknownScalarType(name.to_owned())) } + pub fn is_binary(self) -> bool { + match self { + S::BinData => true, + S::UUID => true, + S::Double => false, + S::Decimal => false, + S::Int => false, + S::Long => false, + S::String => false, + S::Date => false, + S::Timestamp => false, + S::ObjectId => false, + S::Bool => false, + S::Null => false, + S::Regex => false, + S::Javascript => false, + S::JavascriptWithScope => false, + S::MinKey => false, + S::MaxKey => false, + S::Undefined => false, + S::DbPointer => false, + S::Symbol => false, + } + } + pub fn is_orderable(self) -> bool { match self { S::Double => true, @@ -198,6 +228,7 @@ impl BsonScalarType { S::Undefined => false, S::DbPointer => false, S::Symbol => false, + S::UUID => false, } } @@ -222,6 +253,32 @@ impl BsonScalarType { S::Undefined => false, S::DbPointer => false, S::Symbol => false, + S::UUID => false, + } + } + + pub fn is_fractional(self) -> bool { + match self { + S::Double => true, + S::Decimal => true, + S::Int => false, + S::Long => false, + S::String => false, + S::Date => false, + S::Timestamp => false, + S::BinData => false, + S::UUID => false, + S::ObjectId => false, + S::Bool => false, + S::Null => false, + S::Regex => false, + S::Javascript => false, + S::JavascriptWithScope => false, + S::MinKey => false, + S::MaxKey => false, + S::Undefined => false, + S::DbPointer => false, + S::Symbol => false, } } @@ -246,8 +303,61 @@ impl BsonScalarType { S::Undefined => true, S::DbPointer => true, S::Symbol => true, + S::UUID => true, } } + + /// True iff we consider a to be a supertype of b. + /// + /// Note that if you add more supertypes here then it is important to also update the custom + /// equality check in our tests in mongodb_agent_common::query::serialization::tests. Equality + /// needs to be transitive over supertypes, so for example if we have, + /// + /// (Double, Int), (Decimal, Double) + /// + /// then in addition to comparing ints to doubles, and doubles to decimals, we also need to compare + /// decimals to ints. + pub fn is_supertype(a: Self, b: Self) -> bool { + Self::common_supertype(a, b).is_some_and(|c| c == a) + } + + /// If there is a BSON scalar type that encompasses both a and b, return it. This does not + /// require a and to overlap. The returned type may be equal to a or b if one is a supertype of + /// the other. + pub fn common_supertype(a: BsonScalarType, b: BsonScalarType) -> Option { + fn helper(a: BsonScalarType, b: BsonScalarType) -> Option { + if a == b { + Some(a) + } else if a.is_binary() && b.is_binary() { + Some(S::BinData) + } else { + match (a, b) { + (S::Double, S::Int) => Some(S::Double), + _ => None, + } + } + } + helper(a, b).or_else(|| helper(b, a)) + } +} + +impl Serialize for BsonScalarType { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(self.bson_name()) + } +} + +impl<'de> Deserialize<'de> for BsonScalarType { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + BsonScalarType::from_bson_name(&s).map_err(serde::de::Error::custom) + } } impl std::fmt::Display for BsonScalarType { @@ -316,4 +426,22 @@ mod tests { assert_eq!(t, BsonType::Scalar(BsonScalarType::Double)); Ok(()) } + + #[test] + fn unifies_double_and_int() { + use BsonScalarType as S; + let t1 = S::common_supertype(S::Double, S::Int); + let t2 = S::common_supertype(S::Int, S::Double); + assert_eq!(t1, Some(S::Double)); + assert_eq!(t2, Some(S::Double)); + } + + #[test] + fn unifies_bin_data_and_uuid() { + use BsonScalarType as S; + let t1 = S::common_supertype(S::BinData, S::UUID); + let t2 = S::common_supertype(S::UUID, S::BinData); + assert_eq!(t1, Some(S::BinData)); + assert_eq!(t2, Some(S::BinData)); + } } diff --git a/crates/mongodb-support/src/lib.rs b/crates/mongodb-support/src/lib.rs index 2f45f8de..f8113b81 100644 --- a/crates/mongodb-support/src/lib.rs +++ b/crates/mongodb-support/src/lib.rs @@ -1,3 +1,4 @@ +pub mod aggregate; pub mod align; mod bson_type; pub mod error; diff --git a/crates/ndc-query-plan/Cargo.toml b/crates/ndc-query-plan/Cargo.toml index 39110ce2..66d42939 100644 --- a/crates/ndc-query-plan/Cargo.toml +++ b/crates/ndc-query-plan/Cargo.toml @@ -9,8 +9,8 @@ indent = "^0.1" indexmap = { workspace = true } itertools = { workspace = true } ndc-models = { workspace = true } -nonempty = "^0.10" -serde_json = "1" +nonempty = { workspace = true } +serde_json = { workspace = true } thiserror = "1" ref-cast = { workspace = true } @@ -20,4 +20,4 @@ ndc-test-helpers = { path = "../ndc-test-helpers" } anyhow = "1" enum-iterator = "2" lazy_static = "1" -pretty_assertions = "1" +pretty_assertions = "1.4" diff --git a/crates/ndc-query-plan/src/lib.rs b/crates/ndc-query-plan/src/lib.rs index f7b6b1b5..000e7e5b 100644 --- a/crates/ndc-query-plan/src/lib.rs +++ b/crates/ndc-query-plan/src/lib.rs @@ -6,11 +6,11 @@ pub mod vec_set; pub use mutation_plan::*; pub use plan_for_query_request::{ + plan_for_mutation_request::plan_for_mutation_request, plan_for_query_request, query_context::QueryContext, query_plan_error::QueryPlanError, - plan_for_mutation_request, type_annotated_field::{type_annotated_field, type_annotated_nested_field}, }; pub use query_plan::*; -pub use type_system::{inline_object_types, ObjectType, Type}; +pub use type_system::{inline_object_types, ObjectField, ObjectType, Type}; diff --git a/crates/ndc-query-plan/src/plan_for_query_request/helpers.rs b/crates/ndc-query-plan/src/plan_for_query_request/helpers.rs index 8dcf8edf..11abe277 100644 --- a/crates/ndc-query-plan/src/plan_for_query_request/helpers.rs +++ b/crates/ndc-query-plan/src/plan_for_query_request/helpers.rs @@ -1,8 +1,9 @@ use std::collections::BTreeMap; -use ndc_models as ndc; +use itertools::Itertools as _; +use ndc_models::{self as ndc}; -use crate as plan; +use crate::{self as plan}; use super::query_plan_error::QueryPlanError; @@ -11,7 +12,7 @@ type Result = std::result::Result; pub fn find_object_field<'a, S>( object_type: &'a plan::ObjectType, field_name: &ndc::FieldName, -) -> Result<&'a plan::Type> { +) -> Result<&'a plan::ObjectField> { object_type.fields.get(field_name).ok_or_else(|| { QueryPlanError::UnknownObjectTypeField { object_type: object_type.name.clone(), @@ -21,28 +22,29 @@ pub fn find_object_field<'a, S>( }) } -pub fn find_object_field_path<'a, S>( +pub fn get_object_field_by_path<'a, S>( object_type: &'a plan::ObjectType, field_name: &ndc::FieldName, - field_path: &Option>, -) -> Result<&'a plan::Type> { + field_path: Option<&[ndc::FieldName]>, +) -> Result<&'a plan::ObjectField> { match field_path { None => find_object_field(object_type, field_name), - Some(field_path) => find_object_field_path_helper(object_type, field_name, field_path), + Some(field_path) => get_object_field_by_path_helper(object_type, field_name, field_path), } } -fn find_object_field_path_helper<'a, S>( +fn get_object_field_by_path_helper<'a, S>( object_type: &'a plan::ObjectType, field_name: &ndc::FieldName, field_path: &[ndc::FieldName], -) -> Result<&'a plan::Type> { - let field_type = find_object_field(object_type, field_name)?; +) -> Result<&'a plan::ObjectField> { + let object_field = find_object_field(object_type, field_name)?; + let field_type = &object_field.r#type; match field_path { - [] => Ok(field_type), + [] => Ok(object_field), [nested_field_name, rest @ ..] => { let o = find_object_type(field_type, &object_type.name, field_name)?; - find_object_field_path_helper(o, nested_field_name, rest) + get_object_field_by_path_helper(o, nested_field_name, rest) } } } @@ -65,9 +67,61 @@ fn find_object_type<'a, S>( }), crate::Type::Nullable(t) => find_object_type(t, parent_type, field_name), crate::Type::Object(object_type) => Ok(object_type), + crate::Type::Tuple(ts) => { + let object_types = ts + .iter() + .flat_map(|t| find_object_type(t, parent_type, field_name)) + .collect_vec(); + if object_types.len() == 1 { + Ok(object_types[0]) + } else { + Err(QueryPlanError::ExpectedObjectTypeAtField { + parent_type: parent_type.to_owned(), + field_name: field_name.to_owned(), + got: "array".to_owned(), + }) + } + } } } +/// Given the type of a collection and a field path returns the type of the nested values in an +/// array field at that path. +pub fn find_nested_collection_type( + collection_object_type: plan::ObjectType, + field_path: &[ndc::FieldName], +) -> Result> +where + S: Clone + std::fmt::Debug, +{ + let nested_field = match field_path { + [field_name] => get_object_field_by_path(&collection_object_type, field_name, None), + [field_name, rest_of_path @ ..] => { + get_object_field_by_path(&collection_object_type, field_name, Some(rest_of_path)) + } + [] => Err(QueryPlanError::UnknownCollection(field_path.join("."))), + }?; + let element_type = nested_field.r#type.clone().into_array_element_type()?; + Ok(element_type) +} + +/// Given the type of a collection and a field path returns the object type of the nested object at +/// that path. +/// +/// This function differs from [find_nested_collection_type] in that it this one returns +/// [plan::ObjectType] instead of [plan::Type], and returns an error if the nested type is not an +/// object type. +pub fn find_nested_collection_object_type( + collection_object_type: plan::ObjectType, + field_path: &[ndc::FieldName], +) -> Result> +where + S: Clone + std::fmt::Debug, +{ + let collection_element_type = find_nested_collection_type(collection_object_type, field_path)?; + collection_element_type.into_object_type() +} + pub fn lookup_relationship<'a>( relationships: &'a BTreeMap, relationship: &ndc::RelationshipName, diff --git a/crates/ndc-query-plan/src/plan_for_query_request/mod.rs b/crates/ndc-query-plan/src/plan_for_query_request/mod.rs index 4da4fb04..f5d87585 100644 --- a/crates/ndc-query-plan/src/plan_for_query_request/mod.rs +++ b/crates/ndc-query-plan/src/plan_for_query_request/mod.rs @@ -1,6 +1,9 @@ mod helpers; mod plan_for_arguments; -mod plan_for_mutation_request; +mod plan_for_expression; +mod plan_for_grouping; +pub mod plan_for_mutation_request; +mod plan_for_relationship; pub mod query_context; pub mod query_plan_error; mod query_plan_state; @@ -12,23 +15,22 @@ mod plan_test_helpers; #[cfg(test)] mod tests; -use std::collections::VecDeque; - -use crate::{self as plan, type_annotated_field, ObjectType, QueryPlan, Scope}; +use crate::{self as plan, type_annotated_field, QueryPlan, Scope}; use indexmap::IndexMap; use itertools::Itertools; -use ndc::{ExistsInCollection, QueryRequest}; -use ndc_models as ndc; +use ndc_models::{self as ndc, QueryRequest}; +use plan_for_relationship::plan_for_relationship_path; use query_plan_state::QueryPlanInfo; use self::{ - helpers::{find_object_field, find_object_field_path, lookup_relationship}, - plan_for_arguments::plan_for_arguments, + helpers::{find_object_field, get_object_field_by_path}, + plan_for_arguments::{plan_arguments_from_plan_parameters, plan_for_arguments}, + plan_for_expression::plan_for_expression, + plan_for_grouping::plan_for_grouping, query_context::QueryContext, query_plan_error::QueryPlanError, query_plan_state::QueryPlanState, }; -pub use self::plan_for_mutation_request::plan_for_mutation_request; type Result = std::result::Result; @@ -98,8 +100,10 @@ pub fn plan_for_query( ) -> Result> { let mut plan_state = plan_state.state_for_subquery(); - let aggregates = - plan_for_aggregates(plan_state.context, collection_object_type, query.aggregates)?; + let aggregates = query + .aggregates + .map(|aggregates| plan_for_aggregates(&mut plan_state, collection_object_type, aggregates)) + .transpose()?; let fields = plan_for_fields( &mut plan_state, root_collection_object_type, @@ -134,61 +138,95 @@ pub fn plan_for_query( }) .transpose()?; + let groups = query + .groups + .map(|grouping| { + plan_for_grouping( + &mut plan_state, + root_collection_object_type, + collection_object_type, + grouping, + ) + }) + .transpose()?; + Ok(plan::Query { aggregates, - aggregates_limit: limit, fields, order_by, limit, offset, predicate, + groups, relationships: plan_state.into_relationships(), scope: None, }) } fn plan_for_aggregates( - context: &T, + plan_state: &mut QueryPlanState<'_, T>, collection_object_type: &plan::ObjectType, - ndc_aggregates: Option>, -) -> Result>>> { + ndc_aggregates: IndexMap, +) -> Result>> { ndc_aggregates - .map(|aggregates| -> Result<_> { - aggregates - .into_iter() - .map(|(name, aggregate)| { - Ok(( - name, - plan_for_aggregate(context, collection_object_type, aggregate)?, - )) - }) - .collect() + .into_iter() + .map(|(name, aggregate)| { + Ok(( + name, + plan_for_aggregate(plan_state, collection_object_type, aggregate)?, + )) }) - .transpose() + .collect() } fn plan_for_aggregate( - context: &T, + plan_state: &mut QueryPlanState<'_, T>, collection_object_type: &plan::ObjectType, aggregate: ndc::Aggregate, ) -> Result> { match aggregate { ndc::Aggregate::ColumnCount { column, + arguments, distinct, - field_path: _, - } => Ok(plan::Aggregate::ColumnCount { column, distinct }), + field_path, + } => { + let object_field = collection_object_type.get(&column)?; + let plan_arguments = plan_arguments_from_plan_parameters( + plan_state, + &object_field.parameters, + arguments, + )?; + Ok(plan::Aggregate::ColumnCount { + column, + arguments: plan_arguments, + distinct, + field_path, + }) + } ndc::Aggregate::SingleColumn { column, + arguments, function, - field_path: _, + field_path, } => { - let object_type_field_type = find_object_field(collection_object_type, &column)?; - // let column_scalar_type_name = get_scalar_type_name(&object_type_field.r#type)?; - let (function, definition) = - context.find_aggregation_function_definition(object_type_field_type, &function)?; + let nested_object_field = + get_object_field_by_path(collection_object_type, &column, field_path.as_deref())?; + let column_type = &nested_object_field.r#type; + let object_field = collection_object_type.get(&column)?; + let plan_arguments = plan_arguments_from_plan_parameters( + plan_state, + &object_field.parameters, + arguments, + )?; + let (function, definition) = plan_state + .context + .find_aggregation_function_definition(column_type, &function)?; Ok(plan::Aggregate::SingleColumn { column, + column_type: column_type.clone(), + arguments: plan_arguments, + field_path, function, result_type: definition.result_type.clone(), }) @@ -253,455 +291,133 @@ fn plan_for_order_by_element( ) -> Result> { let target = match element.target { ndc::OrderByTarget::Column { - name, - field_path, path, - } => plan::OrderByTarget::Column { - name: name.clone(), + name, + arguments, field_path, - path: plan_for_relationship_path( - plan_state, - root_collection_object_type, - object_type, - path, - vec![name], - )? - .0, - }, - ndc::OrderByTarget::SingleColumnAggregate { - column, - function, - path, - field_path: _, } => { - let (plan_path, target_object_type) = plan_for_relationship_path( + let (relationship_names, collection_object_type) = plan_for_relationship_path( plan_state, root_collection_object_type, object_type, path, - vec![], // TODO: MDB-156 propagate requested aggregate to relationship query + vec![name.clone()], )?; - let column_type = find_object_field(&target_object_type, &column)?; - let (function, function_definition) = plan_state - .context - .find_aggregation_function_definition(column_type, &function)?; + let object_field = collection_object_type.get(&name)?; - plan::OrderByTarget::SingleColumnAggregate { - column, - function, - result_type: function_definition.result_type.clone(), - path: plan_path, + let plan_arguments = plan_arguments_from_plan_parameters( + plan_state, + &object_field.parameters, + arguments, + )?; + + plan::OrderByTarget::Column { + path: relationship_names, + name: name.clone(), + arguments: plan_arguments, + field_path, } } - ndc::OrderByTarget::StarCountAggregate { path } => { - let (plan_path, _) = plan_for_relationship_path( + ndc::OrderByTarget::Aggregate { + path, + aggregate: + ndc::Aggregate::ColumnCount { + column, + arguments, + field_path, + distinct, + }, + } => { + let (plan_path, collection_object_type) = plan_for_relationship_path( plan_state, root_collection_object_type, object_type, path, - vec![], // TODO: MDB-157 propagate requested aggregate to relationship query + vec![], // TODO: ENG-1019 propagate requested aggregate to relationship query )?; - plan::OrderByTarget::StarCountAggregate { path: plan_path } - } - }; - Ok(plan::OrderByElement { - order_direction: element.order_direction, - target, - }) -} + let object_field = collection_object_type.get(&column)?; -/// Returns list of aliases for joins to traverse, plus the object type of the final collection in -/// the path. -fn plan_for_relationship_path( - plan_state: &mut QueryPlanState<'_, T>, - root_collection_object_type: &plan::ObjectType, - object_type: &plan::ObjectType, - relationship_path: Vec, - requested_columns: Vec, // columns to select from last path element -) -> Result<(Vec, ObjectType)> { - let end_of_relationship_path_object_type = relationship_path - .last() - .map(|last_path_element| { - let relationship = lookup_relationship( - plan_state.collection_relationships, - &last_path_element.relationship, + let plan_arguments = plan_arguments_from_plan_parameters( + plan_state, + &object_field.parameters, + arguments, )?; - plan_state - .context - .find_collection_object_type(&relationship.target_collection) - }) - .transpose()?; - let target_object_type = end_of_relationship_path_object_type.unwrap_or(object_type.clone()); - - let reversed_relationship_path = { - let mut path = relationship_path; - path.reverse(); - path - }; - - let vec_deque = plan_for_relationship_path_helper( - plan_state, - root_collection_object_type, - reversed_relationship_path, - requested_columns, - )?; - let aliases = vec_deque.into_iter().collect(); - - Ok((aliases, target_object_type)) -} - -fn plan_for_relationship_path_helper( - plan_state: &mut QueryPlanState<'_, T>, - root_collection_object_type: &plan::ObjectType, - mut reversed_relationship_path: Vec, - requested_columns: Vec, // columns to select from last path element -) -> Result> { - if reversed_relationship_path.is_empty() { - return Ok(VecDeque::new()); - } - // safety: we just made an early return if the path is empty - let head = reversed_relationship_path.pop().unwrap(); - let tail = reversed_relationship_path; - let is_last = tail.is_empty(); - - let ndc::PathElement { - relationship, - arguments, - predicate, - } = head; - - let relationship_def = lookup_relationship(plan_state.collection_relationships, &relationship)?; - let related_collection_type = plan_state - .context - .find_collection_object_type(&relationship_def.target_collection)?; - let mut nested_state = plan_state.state_for_subquery(); - - // If this is the last path element then we need to apply the requested fields to the - // relationship query. Otherwise we need to recursively process the rest of the path. Both - // cases take ownership of `requested_columns` so we group them together. - let (mut rest_path, fields) = if is_last { - let fields = requested_columns - .into_iter() - .map(|column_name| { - let column_type = - find_object_field(&related_collection_type, &column_name)?.clone(); - Ok(( - column_name.clone(), - plan::Field::Column { - column: column_name, - fields: None, - column_type, - }, - )) - }) - .collect::>()?; - (VecDeque::new(), Some(fields)) - } else { - let rest = plan_for_relationship_path_helper( - &mut nested_state, - root_collection_object_type, - tail, - requested_columns, - )?; - (rest, None) - }; - - let predicate_plan = predicate - .map(|p| { - plan_for_expression( - &mut nested_state, - root_collection_object_type, - &related_collection_type, - *p, - ) - }) - .transpose()?; - - let nested_relationships = nested_state.into_relationships(); - - let relationship_query = plan::Query { - predicate: predicate_plan, - relationships: nested_relationships, - fields, - ..Default::default() - }; - - let relation_key = - plan_state.register_relationship(relationship, arguments, relationship_query)?; - - rest_path.push_front(relation_key); - Ok(rest_path) -} - -fn plan_for_expression( - plan_state: &mut QueryPlanState, - root_collection_object_type: &plan::ObjectType, - object_type: &plan::ObjectType, - expression: ndc::Expression, -) -> Result> { - match expression { - ndc::Expression::And { expressions } => Ok(plan::Expression::And { - expressions: expressions - .into_iter() - .map(|expr| { - plan_for_expression(plan_state, root_collection_object_type, object_type, expr) - }) - .collect::>()?, - }), - ndc::Expression::Or { expressions } => Ok(plan::Expression::Or { - expressions: expressions - .into_iter() - .map(|expr| { - plan_for_expression(plan_state, root_collection_object_type, object_type, expr) - }) - .collect::>()?, - }), - ndc::Expression::Not { expression } => Ok(plan::Expression::Not { - expression: Box::new(plan_for_expression( - plan_state, - root_collection_object_type, - object_type, - *expression, - )?), - }), - ndc::Expression::UnaryComparisonOperator { column, operator } => { - Ok(plan::Expression::UnaryComparisonOperator { - column: plan_for_comparison_target( - plan_state, - root_collection_object_type, - object_type, + plan::OrderByTarget::Aggregate { + path: plan_path, + aggregate: plan::Aggregate::ColumnCount { column, - )?, - operator, - }) - } - ndc::Expression::BinaryComparisonOperator { - column, - operator, - value, - } => plan_for_binary_comparison( - plan_state, - root_collection_object_type, - object_type, - column, - operator, - value, - ), - ndc::Expression::Exists { - in_collection, - predicate, - } => plan_for_exists( - plan_state, - root_collection_object_type, - in_collection, - predicate, - ), - } -} - -fn plan_for_binary_comparison( - plan_state: &mut QueryPlanState<'_, T>, - root_collection_object_type: &plan::ObjectType, - object_type: &plan::ObjectType, - column: ndc::ComparisonTarget, - operator: ndc::ComparisonOperatorName, - value: ndc::ComparisonValue, -) -> Result> { - let comparison_target = - plan_for_comparison_target(plan_state, root_collection_object_type, object_type, column)?; - let (operator, operator_definition) = plan_state - .context - .find_comparison_operator(comparison_target.get_field_type(), &operator)?; - let value_type = match operator_definition { - plan::ComparisonOperatorDefinition::Equal => comparison_target.get_field_type().clone(), - plan::ComparisonOperatorDefinition::In => { - plan::Type::ArrayOf(Box::new(comparison_target.get_field_type().clone())) + arguments: plan_arguments, + field_path, + distinct, + }, + } } - plan::ComparisonOperatorDefinition::Custom { argument_type } => argument_type.clone(), - }; - Ok(plan::Expression::BinaryComparisonOperator { - operator, - value: plan_for_comparison_value( - plan_state, - root_collection_object_type, - object_type, - value_type, - value, - )?, - column: comparison_target, - }) -} - -fn plan_for_comparison_target( - plan_state: &mut QueryPlanState<'_, T>, - root_collection_object_type: &plan::ObjectType, - object_type: &plan::ObjectType, - target: ndc::ComparisonTarget, -) -> Result> { - match target { - ndc::ComparisonTarget::Column { - name, - field_path, + ndc::OrderByTarget::Aggregate { path, + aggregate: + ndc::Aggregate::SingleColumn { + column, + arguments, + field_path, + function, + }, } => { - let requested_columns = vec![name.clone()]; - let (path, target_object_type) = plan_for_relationship_path( + let (plan_path, collection_object_type) = plan_for_relationship_path( plan_state, root_collection_object_type, object_type, path, - requested_columns, + vec![], // TODO: ENG-1019 propagate requested aggregate to relationship query )?; - let field_type = - find_object_field_path(&target_object_type, &name, &field_path)?.clone(); - Ok(plan::ComparisonTarget::Column { - name, - field_path, - path, - field_type, - }) - } - ndc::ComparisonTarget::RootCollectionColumn { name, field_path } => { - let field_type = - find_object_field_path(root_collection_object_type, &name, &field_path)?.clone(); - Ok(plan::ComparisonTarget::ColumnInScope { - name, - field_path, - field_type, - scope: plan_state.scope.clone(), - }) - } - } -} -fn plan_for_comparison_value( - plan_state: &mut QueryPlanState<'_, T>, - root_collection_object_type: &plan::ObjectType, - object_type: &plan::ObjectType, - expected_type: plan::Type, - value: ndc::ComparisonValue, -) -> Result> { - match value { - ndc::ComparisonValue::Column { column } => Ok(plan::ComparisonValue::Column { - column: plan_for_comparison_target( + let object_field = collection_object_type.get(&column)?; + + let plan_arguments = plan_arguments_from_plan_parameters( plan_state, - root_collection_object_type, - object_type, - column, - )?, - }), - ndc::ComparisonValue::Scalar { value } => Ok(plan::ComparisonValue::Scalar { - value, - value_type: expected_type, - }), - ndc::ComparisonValue::Variable { name } => { - plan_state.register_variable_use(&name, expected_type.clone()); - Ok(plan::ComparisonValue::Variable { - name, - variable_type: expected_type, - }) - } - } -} + &object_field.parameters, + arguments, + )?; -fn plan_for_exists( - plan_state: &mut QueryPlanState<'_, T>, - root_collection_object_type: &plan::ObjectType, - in_collection: ExistsInCollection, - predicate: Option>, -) -> Result> { - let mut nested_state = plan_state.state_for_subquery(); - - let (in_collection, predicate) = match in_collection { - ndc::ExistsInCollection::Related { - relationship, - arguments, - } => { - let ndc_relationship = - lookup_relationship(plan_state.collection_relationships, &relationship)?; - let collection_object_type = plan_state + let object_field = find_object_field(&collection_object_type, &column)?; + let column_type = &object_field.r#type; + let (function, function_definition) = plan_state .context - .find_collection_object_type(&ndc_relationship.target_collection)?; - - let predicate = predicate - .map(|expression| { - plan_for_expression( - &mut nested_state, - root_collection_object_type, - &collection_object_type, - *expression, - ) - }) - .transpose()?; - - let fields = predicate.as_ref().map(|p| { - p.query_local_comparison_targets() - .map(|comparison_target| { - ( - comparison_target.column_name().to_owned(), - plan::Field::Column { - column: comparison_target.column_name().clone(), - column_type: comparison_target.get_field_type().clone(), - fields: None, - }, - ) - }) - .collect() - }); - - let relationship_query = plan::Query { - fields, - relationships: nested_state.into_relationships(), - ..Default::default() - }; - - let relationship_key = - plan_state.register_relationship(relationship, arguments, relationship_query)?; - - let in_collection = plan::ExistsInCollection::Related { - relationship: relationship_key, - }; + .find_aggregation_function_definition(column_type, &function)?; - Ok((in_collection, predicate)) as Result<_> + plan::OrderByTarget::Aggregate { + path: plan_path, + aggregate: plan::Aggregate::SingleColumn { + column, + column_type: column_type.clone(), + arguments: plan_arguments, + field_path, + function, + result_type: function_definition.result_type.clone(), + }, + } } - ndc::ExistsInCollection::Unrelated { - collection, - arguments, + ndc::OrderByTarget::Aggregate { + path, + aggregate: ndc::Aggregate::StarCount {}, } => { - let collection_object_type = plan_state - .context - .find_collection_object_type(&collection)?; - - let predicate = predicate - .map(|expression| { - plan_for_expression( - &mut nested_state, - root_collection_object_type, - &collection_object_type, - *expression, - ) - }) - .transpose()?; - - let join_query = plan::Query { - predicate: predicate.clone(), - relationships: nested_state.into_relationships(), - ..Default::default() - }; - - let join_key = plan_state.register_unrelated_join(collection, arguments, join_query)?; - - let in_collection = plan::ExistsInCollection::Unrelated { - unrelated_collection: join_key, - }; - Ok((in_collection, predicate)) + let (plan_path, _) = plan_for_relationship_path( + plan_state, + root_collection_object_type, + object_type, + path, + vec![], // TODO: ENG-1019 propagate requested aggregate to relationship query + )?; + plan::OrderByTarget::Aggregate { + path: plan_path, + aggregate: plan::Aggregate::StarCount, + } } - }?; + }; - Ok(plan::Expression::Exists { - in_collection, - predicate: predicate.map(Box::new), + Ok(plan::OrderByElement { + order_direction: element.order_direction, + target, }) } diff --git a/crates/ndc-query-plan/src/plan_for_query_request/plan_for_arguments.rs b/crates/ndc-query-plan/src/plan_for_query_request/plan_for_arguments.rs index 6f485448..b15afb1c 100644 --- a/crates/ndc-query-plan/src/plan_for_query_request/plan_for_arguments.rs +++ b/crates/ndc-query-plan/src/plan_for_query_request/plan_for_arguments.rs @@ -44,7 +44,7 @@ pub fn plan_for_mutation_procedure_arguments( ) } -/// Convert maps of [ndc::Argument] values to maps of [plan::Argument] +/// Convert maps of [ndc::RelationshipArgument] values to maps of [plan::RelationshipArgument] pub fn plan_for_relationship_arguments( plan_state: &mut QueryPlanState<'_, T>, parameters: &BTreeMap, @@ -70,17 +70,54 @@ pub fn plan_for_relationship_arguments( Ok(arguments) } +/// Create a map of plan arguments when we already have plan types for parameters. +pub fn plan_arguments_from_plan_parameters( + plan_state: &mut QueryPlanState<'_, T>, + parameters: &BTreeMap>, + arguments: BTreeMap, +) -> Result>> { + let arguments = plan_for_arguments_generic( + plan_state, + parameters, + arguments, + |_plan_state, plan_type, argument| match argument { + ndc::Argument::Variable { name } => Ok(plan::Argument::Variable { + name, + argument_type: plan_type.clone(), + }), + ndc::Argument::Literal { value } => Ok(plan::Argument::Literal { + value, + argument_type: plan_type.clone(), + }), + }, + )?; + + for argument in arguments.values() { + if let plan::Argument::Variable { + name, + argument_type, + } = argument + { + plan_state.register_variable_use(name, argument_type.clone()) + } + } + + Ok(arguments) +} + fn plan_for_argument( plan_state: &mut QueryPlanState<'_, T>, - parameter_type: &ndc::Type, + argument_info: &ndc::ArgumentInfo, argument: ndc::Argument, ) -> Result> { match argument { ndc::Argument::Variable { name } => Ok(plan::Argument::Variable { name, - argument_type: plan_state.context.ndc_to_plan_type(parameter_type)?, + argument_type: plan_state + .context + .ndc_to_plan_type(&argument_info.argument_type)?, }), - ndc::Argument::Literal { value } => match parameter_type { + ndc::Argument::Literal { value } => match &argument_info.argument_type { ndc::Type::Predicate { object_type_name } => Ok(plan::Argument::Predicate { expression: plan_for_predicate(plan_state, object_type_name, value)?, }), @@ -94,10 +131,10 @@ fn plan_for_argument( fn plan_for_mutation_procedure_argument( plan_state: &mut QueryPlanState<'_, T>, - parameter_type: &ndc::Type, + argument_info: &ndc::ArgumentInfo, value: serde_json::Value, ) -> Result> { - match parameter_type { + match &argument_info.argument_type { ndc::Type::Predicate { object_type_name } => { Ok(plan::MutationProcedureArgument::Predicate { expression: plan_for_predicate(plan_state, object_type_name, value)?, @@ -112,19 +149,20 @@ fn plan_for_mutation_procedure_argument( fn plan_for_relationship_argument( plan_state: &mut QueryPlanState<'_, T>, - parameter_type: &ndc::Type, + argument_info: &ndc::ArgumentInfo, argument: ndc::RelationshipArgument, ) -> Result> { + let argument_type = &argument_info.argument_type; match argument { ndc::RelationshipArgument::Variable { name } => Ok(plan::RelationshipArgument::Variable { name, - argument_type: plan_state.context.ndc_to_plan_type(parameter_type)?, + argument_type: plan_state.context.ndc_to_plan_type(argument_type)?, }), ndc::RelationshipArgument::Column { name } => Ok(plan::RelationshipArgument::Column { name, - argument_type: plan_state.context.ndc_to_plan_type(parameter_type)?, + argument_type: plan_state.context.ndc_to_plan_type(argument_type)?, }), - ndc::RelationshipArgument::Literal { value } => match parameter_type { + ndc::RelationshipArgument::Literal { value } => match argument_type { ndc::Type::Predicate { object_type_name } => { Ok(plan::RelationshipArgument::Predicate { expression: plan_for_predicate(plan_state, object_type_name, value)?, @@ -151,19 +189,19 @@ fn plan_for_predicate( /// Convert maps of [ndc::Argument] or [ndc::RelationshipArgument] values to [plan::Argument] or /// [plan::RelationshipArgument] respectively. -fn plan_for_arguments_generic( +fn plan_for_arguments_generic( plan_state: &mut QueryPlanState<'_, T>, - parameters: &BTreeMap, + parameters: &BTreeMap, mut arguments: BTreeMap, convert_argument: F, ) -> Result> where - F: Fn(&mut QueryPlanState<'_, T>, &ndc::Type, NdcArgument) -> Result, + F: Fn(&mut QueryPlanState<'_, T>, &Parameter, NdcArgument) -> Result, { validate_no_excess_arguments(parameters, &arguments)?; let (arguments, missing): ( - Vec<(ndc::ArgumentName, NdcArgument, &ndc::ArgumentInfo)>, + Vec<(ndc::ArgumentName, NdcArgument, &Parameter)>, Vec, ) = parameters .iter() @@ -185,7 +223,7 @@ where ) = arguments .into_iter() .map(|(name, argument, argument_info)| { - match convert_argument(plan_state, &argument_info.argument_type, argument) { + match convert_argument(plan_state, argument_info, argument) { Ok(argument) => Ok((name, argument)), Err(err) => Err((name, err)), } @@ -198,8 +236,8 @@ where Ok(resolved) } -pub fn validate_no_excess_arguments( - parameters: &BTreeMap, +pub fn validate_no_excess_arguments( + parameters: &BTreeMap, arguments: &BTreeMap, ) -> Result<()> { let excess: Vec = arguments diff --git a/crates/ndc-query-plan/src/plan_for_query_request/plan_for_expression.rs b/crates/ndc-query-plan/src/plan_for_query_request/plan_for_expression.rs new file mode 100644 index 00000000..8c30d984 --- /dev/null +++ b/crates/ndc-query-plan/src/plan_for_query_request/plan_for_expression.rs @@ -0,0 +1,431 @@ +use std::iter::once; + +use indexmap::IndexMap; +use itertools::Itertools as _; +use ndc_models::{self as ndc, ExistsInCollection}; + +use crate::{self as plan, QueryContext, QueryPlanError}; + +use super::{ + helpers::{ + find_nested_collection_object_type, find_nested_collection_type, + get_object_field_by_path, lookup_relationship, + }, + plan_for_arguments::plan_arguments_from_plan_parameters, + plan_for_relationship::plan_for_relationship_path, + query_plan_state::QueryPlanState, +}; + +type Result = std::result::Result; + +pub fn plan_for_expression( + plan_state: &mut QueryPlanState, + root_collection_object_type: &plan::ObjectType, + object_type: &plan::ObjectType, + expression: ndc::Expression, +) -> Result> { + match expression { + ndc::Expression::And { expressions } => Ok(plan::Expression::And { + expressions: expressions + .into_iter() + .map(|expr| { + plan_for_expression(plan_state, root_collection_object_type, object_type, expr) + }) + .collect::>()?, + }), + ndc::Expression::Or { expressions } => Ok(plan::Expression::Or { + expressions: expressions + .into_iter() + .map(|expr| { + plan_for_expression(plan_state, root_collection_object_type, object_type, expr) + }) + .collect::>()?, + }), + ndc::Expression::Not { expression } => Ok(plan::Expression::Not { + expression: Box::new(plan_for_expression( + plan_state, + root_collection_object_type, + object_type, + *expression, + )?), + }), + ndc::Expression::UnaryComparisonOperator { column, operator } => { + Ok(plan::Expression::UnaryComparisonOperator { + column: plan_for_comparison_target(plan_state, object_type, column)?, + operator, + }) + } + ndc::Expression::BinaryComparisonOperator { + column, + operator, + value, + } => plan_for_binary_comparison( + plan_state, + root_collection_object_type, + object_type, + column, + operator, + value, + ), + ndc::Expression::ArrayComparison { column, comparison } => plan_for_array_comparison( + plan_state, + root_collection_object_type, + object_type, + column, + comparison, + ), + ndc::Expression::Exists { + in_collection, + predicate, + } => plan_for_exists( + plan_state, + root_collection_object_type, + in_collection, + predicate, + ), + } +} + +fn plan_for_binary_comparison( + plan_state: &mut QueryPlanState<'_, T>, + root_collection_object_type: &plan::ObjectType, + object_type: &plan::ObjectType, + column: ndc::ComparisonTarget, + operator: ndc::ComparisonOperatorName, + value: ndc::ComparisonValue, +) -> Result> { + let comparison_target = plan_for_comparison_target(plan_state, object_type, column)?; + let (operator, operator_definition) = plan_state + .context + .find_comparison_operator(comparison_target.target_type(), &operator)?; + let value_type = operator_definition.argument_type(comparison_target.target_type()); + Ok(plan::Expression::BinaryComparisonOperator { + operator, + value: plan_for_comparison_value( + plan_state, + root_collection_object_type, + object_type, + value_type, + value, + )?, + column: comparison_target, + }) +} + +fn plan_for_array_comparison( + plan_state: &mut QueryPlanState<'_, T>, + root_collection_object_type: &plan::ObjectType, + object_type: &plan::ObjectType, + column: ndc::ComparisonTarget, + comparison: ndc::ArrayComparison, +) -> Result> { + let comparison_target = plan_for_comparison_target(plan_state, object_type, column)?; + let plan_comparison = match comparison { + ndc::ArrayComparison::Contains { value } => { + let array_element_type = comparison_target + .target_type() + .clone() + .into_array_element_type()?; + let value = plan_for_comparison_value( + plan_state, + root_collection_object_type, + object_type, + array_element_type, + value, + )?; + plan::ArrayComparison::Contains { value } + } + ndc::ArrayComparison::IsEmpty => plan::ArrayComparison::IsEmpty, + }; + Ok(plan::Expression::ArrayComparison { + column: comparison_target, + comparison: plan_comparison, + }) +} + +fn plan_for_comparison_target( + plan_state: &mut QueryPlanState<'_, T>, + object_type: &plan::ObjectType, + target: ndc::ComparisonTarget, +) -> Result> { + match target { + ndc::ComparisonTarget::Column { + name, + arguments, + field_path, + } => { + let object_field = + get_object_field_by_path(object_type, &name, field_path.as_deref())?.clone(); + let plan_arguments = plan_arguments_from_plan_parameters( + plan_state, + &object_field.parameters, + arguments, + )?; + Ok(plan::ComparisonTarget::Column { + name, + arguments: plan_arguments, + field_path, + field_type: object_field.r#type, + }) + } + ndc::ComparisonTarget::Aggregate { .. } => { + // TODO: ENG-1457 implement query.aggregates.filter_by + Err(QueryPlanError::NotImplemented( + "filter by aggregate".to_string(), + )) + } + } +} + +fn plan_for_comparison_value( + plan_state: &mut QueryPlanState<'_, T>, + root_collection_object_type: &plan::ObjectType, + object_type: &plan::ObjectType, + expected_type: plan::Type, + value: ndc::ComparisonValue, +) -> Result> { + match value { + ndc::ComparisonValue::Column { + path, + name, + arguments, + field_path, + scope, + } => { + let (plan_path, collection_object_type) = plan_for_relationship_path( + plan_state, + root_collection_object_type, + object_type, + path, + vec![name.clone()], + )?; + let object_field = collection_object_type.get(&name)?; + let plan_arguments = plan_arguments_from_plan_parameters( + plan_state, + &object_field.parameters, + arguments, + )?; + Ok(plan::ComparisonValue::Column { + path: plan_path, + name, + arguments: plan_arguments, + field_path, + field_type: object_field.r#type.clone(), + scope, + }) + } + ndc::ComparisonValue::Scalar { value } => Ok(plan::ComparisonValue::Scalar { + value, + value_type: expected_type, + }), + ndc::ComparisonValue::Variable { name } => { + plan_state.register_variable_use(&name, expected_type.clone()); + Ok(plan::ComparisonValue::Variable { + name, + variable_type: expected_type, + }) + } + } +} + +fn plan_for_exists( + plan_state: &mut QueryPlanState<'_, T>, + root_collection_object_type: &plan::ObjectType, + in_collection: ExistsInCollection, + predicate: Option>, +) -> Result> { + let mut nested_state = plan_state.state_for_subquery(); + + let (in_collection, predicate) = match in_collection { + ndc::ExistsInCollection::Related { + relationship, + arguments, + field_path: _, // TODO: ENG-1490 requires propagating this, probably through the `register_relationship` call + } => { + let ndc_relationship = + lookup_relationship(plan_state.collection_relationships, &relationship)?; + let collection_object_type = plan_state + .context + .find_collection_object_type(&ndc_relationship.target_collection)?; + + let predicate = predicate + .map(|expression| { + plan_for_expression( + &mut nested_state, + root_collection_object_type, + &collection_object_type, + *expression, + ) + }) + .transpose()?; + + // TODO: ENG-1457 When we implement query.aggregates.filter_by we'll need to collect aggregates + // here as well as fields. + let fields = predicate.as_ref().map(|p| { + let mut fields = IndexMap::new(); + for comparison_target in p.query_local_comparison_targets() { + match comparison_target.into_owned() { + plan::ComparisonTarget::Column { + name, + arguments: _, + field_type, + .. + } => fields.insert( + name.clone(), + plan::Field::Column { + column: name, + fields: None, + column_type: field_type, + }, + ), + }; + } + fields + }); + + let relationship_query = plan::Query { + fields, + relationships: nested_state.into_relationships(), + ..Default::default() + }; + + let relationship_key = + plan_state.register_relationship(relationship, arguments, relationship_query)?; + + let in_collection = plan::ExistsInCollection::Related { + relationship: relationship_key, + }; + + Ok((in_collection, predicate)) as Result<_> + } + ndc::ExistsInCollection::Unrelated { + collection, + arguments, + } => { + let collection_object_type = plan_state + .context + .find_collection_object_type(&collection)?; + + let predicate = predicate + .map(|expression| { + plan_for_expression( + &mut nested_state, + root_collection_object_type, + &collection_object_type, + *expression, + ) + }) + .transpose()?; + + let join_query = plan::Query { + predicate: predicate.clone(), + relationships: nested_state.into_relationships(), + ..Default::default() + }; + + let join_key = plan_state.register_unrelated_join(collection, arguments, join_query)?; + + let in_collection = plan::ExistsInCollection::Unrelated { + unrelated_collection: join_key, + }; + Ok((in_collection, predicate)) + } + ndc::ExistsInCollection::NestedCollection { + column_name, + arguments, + field_path, + } => { + let object_field = root_collection_object_type.get(&column_name)?; + let plan_arguments = plan_arguments_from_plan_parameters( + &mut nested_state, + &object_field.parameters, + arguments, + )?; + + let nested_collection_type = find_nested_collection_object_type( + root_collection_object_type.clone(), + &field_path + .clone() + .into_iter() + .chain(once(column_name.clone())) + .collect_vec(), + )?; + + let in_collection = plan::ExistsInCollection::NestedCollection { + column_name, + arguments: plan_arguments, + field_path, + }; + + let predicate = predicate + .map(|expression| { + plan_for_expression( + &mut nested_state, + root_collection_object_type, + &nested_collection_type, + *expression, + ) + }) + .transpose()?; + + Ok((in_collection, predicate)) + } + ExistsInCollection::NestedScalarCollection { + column_name, + arguments, + field_path, + } => { + let object_field = root_collection_object_type.get(&column_name)?; + let plan_arguments = plan_arguments_from_plan_parameters( + &mut nested_state, + &object_field.parameters, + arguments, + )?; + + let nested_collection_type = find_nested_collection_type( + root_collection_object_type.clone(), + &field_path + .clone() + .into_iter() + .chain(once(column_name.clone())) + .collect_vec(), + )?; + + let virtual_object_type = plan::ObjectType { + name: None, + fields: [( + "__value".into(), + plan::ObjectField { + r#type: nested_collection_type, + parameters: Default::default(), + }, + )] + .into(), + }; + + let in_collection = plan::ExistsInCollection::NestedScalarCollection { + column_name, + arguments: plan_arguments, + field_path, + }; + + let predicate = predicate + .map(|expression| { + plan_for_expression( + &mut nested_state, + root_collection_object_type, + &virtual_object_type, + *expression, + ) + }) + .transpose()?; + + Ok((in_collection, predicate)) + } + }?; + + Ok(plan::Expression::Exists { + in_collection, + predicate: predicate.map(Box::new), + }) +} diff --git a/crates/ndc-query-plan/src/plan_for_query_request/plan_for_grouping.rs b/crates/ndc-query-plan/src/plan_for_query_request/plan_for_grouping.rs new file mode 100644 index 00000000..80b7a3cb --- /dev/null +++ b/crates/ndc-query-plan/src/plan_for_query_request/plan_for_grouping.rs @@ -0,0 +1,234 @@ +use ndc_models::{self as ndc}; + +use crate::{self as plan, ConnectorTypes, QueryContext, QueryPlanError}; + +use super::{ + helpers::get_object_field_by_path, plan_for_aggregate, plan_for_aggregates, + plan_for_arguments::plan_arguments_from_plan_parameters, + plan_for_relationship::plan_for_relationship_path, query_plan_state::QueryPlanState, +}; + +type Result = std::result::Result; + +pub fn plan_for_grouping( + plan_state: &mut QueryPlanState<'_, T>, + root_collection_object_type: &plan::ObjectType, + collection_object_type: &plan::ObjectType, + grouping: ndc::Grouping, +) -> Result> { + let dimensions = grouping + .dimensions + .into_iter() + .map(|d| { + plan_for_dimension( + plan_state, + root_collection_object_type, + collection_object_type, + d, + ) + }) + .collect::>()?; + + let aggregates = plan_for_aggregates(plan_state, collection_object_type, grouping.aggregates)?; + + let predicate = grouping + .predicate + .map(|predicate| plan_for_group_expression(plan_state, collection_object_type, predicate)) + .transpose()?; + + let order_by = grouping + .order_by + .map(|order_by| plan_for_group_order_by(plan_state, collection_object_type, order_by)) + .transpose()?; + + let plan_grouping = plan::Grouping { + dimensions, + aggregates, + predicate, + order_by, + limit: grouping.limit, + offset: grouping.offset, + }; + Ok(plan_grouping) +} + +fn plan_for_dimension( + plan_state: &mut QueryPlanState<'_, T>, + root_collection_object_type: &plan::ObjectType, + collection_object_type: &plan::ObjectType, + dimension: ndc::Dimension, +) -> Result> { + let plan_dimension = match dimension { + ndc_models::Dimension::Column { + path, + column_name, + arguments, + field_path, + .. + } => { + let (relationship_path, collection_type) = plan_for_relationship_path( + plan_state, + root_collection_object_type, + collection_object_type, + path, + vec![column_name.clone()], + )?; + + let plan_arguments = plan_arguments_from_plan_parameters( + plan_state, + &collection_type.get(&column_name)?.parameters, + arguments, + )?; + + let object_field = + get_object_field_by_path(&collection_type, &column_name, field_path.as_deref())? + .clone(); + + let references_relationship = !relationship_path.is_empty(); + let field_type = if references_relationship { + plan::Type::array_of(object_field.r#type) + } else { + object_field.r#type + }; + + plan::Dimension::Column { + path: relationship_path, + column_name, + arguments: plan_arguments, + field_path, + field_type, + } + } + }; + Ok(plan_dimension) +} + +fn plan_for_group_expression( + plan_state: &mut QueryPlanState, + object_type: &plan::ObjectType, + expression: ndc::GroupExpression, +) -> Result> { + match expression { + ndc::GroupExpression::And { expressions } => Ok(plan::GroupExpression::And { + expressions: expressions + .into_iter() + .map(|expr| plan_for_group_expression(plan_state, object_type, expr)) + .collect::>()?, + }), + ndc::GroupExpression::Or { expressions } => Ok(plan::GroupExpression::Or { + expressions: expressions + .into_iter() + .map(|expr| plan_for_group_expression(plan_state, object_type, expr)) + .collect::>()?, + }), + ndc::GroupExpression::Not { expression } => Ok(plan::GroupExpression::Not { + expression: Box::new(plan_for_group_expression( + plan_state, + object_type, + *expression, + )?), + }), + ndc::GroupExpression::UnaryComparisonOperator { target, operator } => { + Ok(plan::GroupExpression::UnaryComparisonOperator { + target: plan_for_group_comparison_target(plan_state, object_type, target)?, + operator, + }) + } + ndc::GroupExpression::BinaryComparisonOperator { + target, + operator, + value, + } => { + let target = plan_for_group_comparison_target(plan_state, object_type, target)?; + let (operator, operator_definition) = plan_state + .context + .find_comparison_operator(&target.result_type(), &operator)?; + let value_type = operator_definition.argument_type(&target.result_type()); + Ok(plan::GroupExpression::BinaryComparisonOperator { + target, + operator, + value: plan_for_group_comparison_value(plan_state, value_type, value)?, + }) + } + } +} + +fn plan_for_group_comparison_target( + plan_state: &mut QueryPlanState, + object_type: &plan::ObjectType, + target: ndc::GroupComparisonTarget, +) -> Result> { + let plan_target = match target { + ndc::GroupComparisonTarget::Aggregate { aggregate } => { + let target_aggregate = plan_for_aggregate(plan_state, object_type, aggregate)?; + plan::GroupComparisonTarget::Aggregate { + aggregate: target_aggregate, + } + } + }; + Ok(plan_target) +} + +fn plan_for_group_comparison_value( + plan_state: &mut QueryPlanState, + expected_type: plan::Type, + value: ndc::GroupComparisonValue, +) -> Result> { + match value { + ndc::GroupComparisonValue::Scalar { value } => Ok(plan::GroupComparisonValue::Scalar { + value, + value_type: expected_type, + }), + ndc::GroupComparisonValue::Variable { name } => { + plan_state.register_variable_use(&name, expected_type.clone()); + Ok(plan::GroupComparisonValue::Variable { + name, + variable_type: expected_type, + }) + } + } +} + +fn plan_for_group_order_by( + plan_state: &mut QueryPlanState<'_, T>, + collection_object_type: &plan::ObjectType, + order_by: ndc::GroupOrderBy, +) -> Result> { + Ok(plan::GroupOrderBy { + elements: order_by + .elements + .into_iter() + .map(|elem| plan_for_group_order_by_element(plan_state, collection_object_type, elem)) + .collect::>()?, + }) +} + +fn plan_for_group_order_by_element( + plan_state: &mut QueryPlanState<'_, T>, + collection_object_type: &plan::ObjectType<::ScalarType>, + element: ndc::GroupOrderByElement, +) -> Result> { + Ok(plan::GroupOrderByElement { + order_direction: element.order_direction, + target: plan_for_group_order_by_target(plan_state, collection_object_type, element.target)?, + }) +} + +fn plan_for_group_order_by_target( + plan_state: &mut QueryPlanState<'_, T>, + collection_object_type: &plan::ObjectType, + target: ndc::GroupOrderByTarget, +) -> Result> { + match target { + ndc::GroupOrderByTarget::Dimension { index } => { + Ok(plan::GroupOrderByTarget::Dimension { index }) + } + ndc::GroupOrderByTarget::Aggregate { aggregate } => { + let target_aggregate = + plan_for_aggregate(plan_state, collection_object_type, aggregate)?; + Ok(plan::GroupOrderByTarget::Aggregate { + aggregate: target_aggregate, + }) + } + } +} diff --git a/crates/ndc-query-plan/src/plan_for_query_request/plan_for_relationship.rs b/crates/ndc-query-plan/src/plan_for_query_request/plan_for_relationship.rs new file mode 100644 index 00000000..de98e178 --- /dev/null +++ b/crates/ndc-query-plan/src/plan_for_query_request/plan_for_relationship.rs @@ -0,0 +1,137 @@ +use std::collections::VecDeque; + +use crate::{self as plan, ObjectType, QueryContext, QueryPlanError}; +use ndc_models::{self as ndc}; + +use super::{ + helpers::{find_object_field, lookup_relationship}, + plan_for_expression, + query_plan_state::QueryPlanState, +}; + +type Result = std::result::Result; + +/// Returns list of aliases for joins to traverse, plus the object type of the final collection in +/// the path. +pub fn plan_for_relationship_path( + plan_state: &mut QueryPlanState<'_, T>, + root_collection_object_type: &plan::ObjectType, + object_type: &plan::ObjectType, + relationship_path: Vec, + requested_columns: Vec, // columns to select from last path element +) -> Result<(Vec, ObjectType)> { + let end_of_relationship_path_object_type = relationship_path + .last() + .map(|last_path_element| { + let relationship = lookup_relationship( + plan_state.collection_relationships, + &last_path_element.relationship, + )?; + plan_state + .context + .find_collection_object_type(&relationship.target_collection) + }) + .transpose()?; + let target_object_type = end_of_relationship_path_object_type.unwrap_or(object_type.clone()); + + let reversed_relationship_path = { + let mut path = relationship_path; + path.reverse(); + path + }; + + let vec_deque = plan_for_relationship_path_helper( + plan_state, + root_collection_object_type, + reversed_relationship_path, + requested_columns, + )?; + let aliases = vec_deque.into_iter().collect(); + + Ok((aliases, target_object_type)) +} + +fn plan_for_relationship_path_helper( + plan_state: &mut QueryPlanState<'_, T>, + root_collection_object_type: &plan::ObjectType, + mut reversed_relationship_path: Vec, + requested_columns: Vec, // columns to select from last path element +) -> Result> { + if reversed_relationship_path.is_empty() { + return Ok(VecDeque::new()); + } + + // safety: we just made an early return if the path is empty + let head = reversed_relationship_path.pop().unwrap(); + let tail = reversed_relationship_path; + let is_last = tail.is_empty(); + + let ndc::PathElement { + field_path: _, // TODO: ENG-1458 support nested relationships + relationship, + arguments, + predicate, + } = head; + + let relationship_def = lookup_relationship(plan_state.collection_relationships, &relationship)?; + let related_collection_type = plan_state + .context + .find_collection_object_type(&relationship_def.target_collection)?; + let mut nested_state = plan_state.state_for_subquery(); + + // If this is the last path element then we need to apply the requested fields to the + // relationship query. Otherwise we need to recursively process the rest of the path. Both + // cases take ownership of `requested_columns` so we group them together. + let (mut rest_path, fields) = if is_last { + let fields = requested_columns + .into_iter() + .map(|column_name| { + let object_field = + find_object_field(&related_collection_type, &column_name)?.clone(); + Ok(( + column_name.clone(), + plan::Field::Column { + column: column_name, + fields: None, + column_type: object_field.r#type, + }, + )) + }) + .collect::>()?; + (VecDeque::new(), Some(fields)) + } else { + let rest = plan_for_relationship_path_helper( + &mut nested_state, + root_collection_object_type, + tail, + requested_columns, + )?; + (rest, None) + }; + + let predicate_plan = predicate + .map(|p| { + plan_for_expression( + &mut nested_state, + root_collection_object_type, + &related_collection_type, + *p, + ) + }) + .transpose()?; + + let nested_relationships = nested_state.into_relationships(); + + let relationship_query = plan::Query { + predicate: predicate_plan, + relationships: nested_relationships, + fields, + ..Default::default() + }; + + let relation_key = + plan_state.register_relationship(relationship, arguments, relationship_query)?; + + rest_path.push_front(relation_key); + Ok(rest_path) +} diff --git a/crates/ndc-query-plan/src/plan_for_query_request/plan_test_helpers/mod.rs b/crates/ndc-query-plan/src/plan_for_query_request/plan_test_helpers/mod.rs index 8518fd90..78562b1a 100644 --- a/crates/ndc-query-plan/src/plan_for_query_request/plan_test_helpers/mod.rs +++ b/crates/ndc-query-plan/src/plan_for_query_request/plan_test_helpers/mod.rs @@ -15,11 +15,10 @@ use ndc_test_helpers::{ use crate::{ConnectorTypes, QueryContext, QueryPlanError, Type}; -#[allow(unused_imports)] pub use self::{ - query::{query, QueryBuilder}, - relationships::{relationship, RelationshipBuilder}, - type_helpers::{date, double, int, object_type, string}, + query::QueryBuilder, + relationships::relationship, + type_helpers::{date, double, int, string}, }; #[derive(Clone, Debug, Default)] @@ -34,6 +33,14 @@ impl ConnectorTypes for TestContext { type AggregateFunction = AggregateFunction; type ComparisonOperator = ComparisonOperator; type ScalarType = ScalarType; + + fn count_aggregate_type() -> Type { + int() + } + + fn string_type() -> Type { + string() + } } impl QueryContext for TestContext { @@ -95,7 +102,7 @@ impl QueryContext for TestContext { } } -#[derive(Clone, Copy, Debug, PartialEq, Sequence)] +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Sequence)] pub enum AggregateFunction { Average, } @@ -108,7 +115,7 @@ impl NamedEnum for AggregateFunction { } } -#[derive(Clone, Copy, Debug, PartialEq, Sequence)] +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Sequence)] pub enum ComparisonOperator { Equal, Regex, @@ -123,7 +130,7 @@ impl NamedEnum for ComparisonOperator { } } -#[derive(Clone, Copy, Debug, PartialEq, Eq, Sequence)] +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Sequence)] pub enum ScalarType { Bool, Date, @@ -173,13 +180,11 @@ fn scalar_types() -> BTreeMap { ( ScalarType::Double.name().to_owned(), ndc::ScalarType { - representation: Some(TypeRepresentation::Float64), + representation: TypeRepresentation::Float64, aggregate_functions: [( AggregateFunction::Average.name().into(), - ndc::AggregateFunctionDefinition { - result_type: ndc::Type::Named { - name: ScalarType::Double.name().into(), - }, + ndc::AggregateFunctionDefinition::Average { + result_type: ScalarType::Double.name().into(), }, )] .into(), @@ -188,18 +193,17 @@ fn scalar_types() -> BTreeMap { ndc::ComparisonOperatorDefinition::Equal, )] .into(), + extraction_functions: Default::default(), }, ), ( ScalarType::Int.name().to_owned(), ndc::ScalarType { - representation: Some(TypeRepresentation::Int32), + representation: TypeRepresentation::Int32, aggregate_functions: [( AggregateFunction::Average.name().into(), - ndc::AggregateFunctionDefinition { - result_type: ndc::Type::Named { - name: ScalarType::Double.name().into(), - }, + ndc::AggregateFunctionDefinition::Average { + result_type: ScalarType::Double.name().into(), }, )] .into(), @@ -208,12 +212,13 @@ fn scalar_types() -> BTreeMap { ndc::ComparisonOperatorDefinition::Equal, )] .into(), + extraction_functions: Default::default(), }, ), ( ScalarType::String.name().to_owned(), ndc::ScalarType { - representation: Some(TypeRepresentation::String), + representation: TypeRepresentation::String, aggregate_functions: Default::default(), comparison_operators: [ ( @@ -228,6 +233,7 @@ fn scalar_types() -> BTreeMap { ), ] .into(), + extraction_functions: Default::default(), }, ), ] @@ -249,7 +255,7 @@ pub fn make_flat_schema() -> TestContext { collection_type: "Author".into(), arguments: Default::default(), uniqueness_constraints: make_primary_key_uniqueness_constraint("authors"), - foreign_keys: Default::default(), + relational_mutations: None, }, ), ( @@ -260,7 +266,7 @@ pub fn make_flat_schema() -> TestContext { collection_type: "Article".into(), arguments: Default::default(), uniqueness_constraints: make_primary_key_uniqueness_constraint("articles"), - foreign_keys: Default::default(), + relational_mutations: None, }, ), ]), @@ -297,7 +303,7 @@ pub fn make_nested_schema() -> TestContext { collection_type: "Author".into(), arguments: Default::default(), uniqueness_constraints: make_primary_key_uniqueness_constraint("authors"), - foreign_keys: Default::default(), + relational_mutations: None, }, ), collection("appearances"), // new helper gives more concise syntax diff --git a/crates/ndc-query-plan/src/plan_for_query_request/plan_test_helpers/query.rs b/crates/ndc-query-plan/src/plan_for_query_request/plan_test_helpers/query.rs index ddb9df8c..444870b4 100644 --- a/crates/ndc-query-plan/src/plan_for_query_request/plan_test_helpers/query.rs +++ b/crates/ndc-query-plan/src/plan_for_query_request/plan_test_helpers/query.rs @@ -1,8 +1,7 @@ use indexmap::IndexMap; use crate::{ - Aggregate, ConnectorTypes, Expression, Field, OrderBy, OrderByElement, Query, Relationships, - Scope, + Aggregate, ConnectorTypes, Expression, Field, Grouping, OrderBy, OrderByElement, Query, Relationships, Scope }; #[derive(Clone, Debug, Default)] @@ -10,10 +9,10 @@ pub struct QueryBuilder { aggregates: Option>>, fields: Option>>, limit: Option, - aggregates_limit: Option, offset: Option, order_by: Option>, predicate: Option>, + groups: Option>, relationships: Relationships, scope: Option, } @@ -29,10 +28,10 @@ impl QueryBuilder { fields: None, aggregates: Default::default(), limit: None, - aggregates_limit: None, offset: None, order_by: None, predicate: None, + groups: None, relationships: Default::default(), scope: None, } @@ -88,10 +87,10 @@ impl From> for Query { aggregates: value.aggregates, fields: value.fields, limit: value.limit, - aggregates_limit: value.aggregates_limit, offset: value.offset, order_by: value.order_by, predicate: value.predicate, + groups: value.groups, relationships: value.relationships, scope: value.scope, } diff --git a/crates/ndc-query-plan/src/plan_for_query_request/plan_test_helpers/relationships.rs b/crates/ndc-query-plan/src/plan_for_query_request/plan_test_helpers/relationships.rs index 0ab7cfbd..ab8f3226 100644 --- a/crates/ndc-query-plan/src/plan_for_query_request/plan_test_helpers/relationships.rs +++ b/crates/ndc-query-plan/src/plan_for_query_request/plan_test_helpers/relationships.rs @@ -1,6 +1,7 @@ use std::collections::BTreeMap; -use ndc_models::RelationshipType; +use ndc_models::{FieldName, RelationshipType}; +use nonempty::NonEmpty; use crate::{ConnectorTypes, Field, Relationship, RelationshipArgument}; @@ -8,7 +9,7 @@ use super::QueryBuilder; #[derive(Clone, Debug)] pub struct RelationshipBuilder { - column_mapping: BTreeMap, + column_mapping: BTreeMap>, relationship_type: RelationshipType, target_collection: ndc_models::CollectionName, arguments: BTreeMap>, @@ -42,11 +43,22 @@ impl RelationshipBuilder { pub fn column_mapping( mut self, - column_mapping: impl IntoIterator, + column_mapping: impl IntoIterator< + Item = ( + impl Into, + impl IntoIterator>, + ), + >, ) -> Self { self.column_mapping = column_mapping .into_iter() - .map(|(source, target)| (source.to_string().into(), target.to_string().into())) + .map(|(source, target)| { + ( + source.into(), + NonEmpty::collect(target.into_iter().map(Into::into)) + .expect("target path in relationship column mapping may not be empty"), + ) + }) .collect(); self } diff --git a/crates/ndc-query-plan/src/plan_for_query_request/plan_test_helpers/type_helpers.rs b/crates/ndc-query-plan/src/plan_for_query_request/plan_test_helpers/type_helpers.rs index 7d0dc453..05875471 100644 --- a/crates/ndc-query-plan/src/plan_for_query_request/plan_test_helpers/type_helpers.rs +++ b/crates/ndc-query-plan/src/plan_for_query_request/plan_test_helpers/type_helpers.rs @@ -1,4 +1,4 @@ -use crate::{ObjectType, Type}; +use crate::Type; use super::ScalarType; @@ -17,15 +17,3 @@ pub fn int() -> Type { pub fn string() -> Type { Type::Scalar(ScalarType::String) } - -pub fn object_type( - fields: impl IntoIterator>)>, -) -> Type { - Type::Object(ObjectType { - name: None, - fields: fields - .into_iter() - .map(|(name, field)| (name.to_string().into(), field.into())) - .collect(), - }) -} diff --git a/crates/ndc-query-plan/src/plan_for_query_request/query_context.rs b/crates/ndc-query-plan/src/plan_for_query_request/query_context.rs index 64a947e1..eb180b43 100644 --- a/crates/ndc-query-plan/src/plan_for_query_request/query_context.rs +++ b/crates/ndc-query-plan/src/plan_for_query_request/query_context.rs @@ -54,11 +54,32 @@ pub trait QueryContext: ConnectorTypes { Ok(( func, plan::AggregateFunctionDefinition { - result_type: self.ndc_to_plan_type(&definition.result_type)?, + result_type: self.aggregate_function_result_type(definition, input_type)?, }, )) } + fn aggregate_function_result_type( + &self, + definition: &ndc::AggregateFunctionDefinition, + input_type: &plan::Type, + ) -> Result> { + let t = match definition { + ndc::AggregateFunctionDefinition::Min => input_type.clone().into_nullable(), + ndc::AggregateFunctionDefinition::Max => input_type.clone().into_nullable(), + ndc::AggregateFunctionDefinition::Sum { result_type } + | ndc::AggregateFunctionDefinition::Average { result_type } => { + let scalar_type = Self::lookup_scalar_type(result_type) + .ok_or_else(|| QueryPlanError::UnknownScalarType(result_type.clone()))?; + plan::Type::Scalar(scalar_type).into_nullable() + } + ndc::AggregateFunctionDefinition::Custom { result_type } => { + self.ndc_to_plan_type(result_type)? + } + }; + Ok(t) + } + fn find_comparison_operator( &self, left_operand_type: &Type, @@ -72,15 +93,10 @@ pub trait QueryContext: ConnectorTypes { { let (operator, definition) = Self::lookup_comparison_operator(self, left_operand_type, op_name)?; - let plan_def = match definition { - ndc::ComparisonOperatorDefinition::Equal => plan::ComparisonOperatorDefinition::Equal, - ndc::ComparisonOperatorDefinition::In => plan::ComparisonOperatorDefinition::In, - ndc::ComparisonOperatorDefinition::Custom { argument_type } => { - plan::ComparisonOperatorDefinition::Custom { - argument_type: self.ndc_to_plan_type(argument_type)?, - } - } - }; + let plan_def = + plan::ComparisonOperatorDefinition::from_ndc_definition(definition, |ndc_type| { + self.ndc_to_plan_type(ndc_type) + })?; Ok((operator, plan_def)) } diff --git a/crates/ndc-query-plan/src/plan_for_query_request/query_plan_error.rs b/crates/ndc-query-plan/src/plan_for_query_request/query_plan_error.rs index e0d0ffc0..2283ed1f 100644 --- a/crates/ndc-query-plan/src/plan_for_query_request/query_plan_error.rs +++ b/crates/ndc-query-plan/src/plan_for_query_request/query_plan_error.rs @@ -26,6 +26,14 @@ pub enum QueryPlanError { #[error("missing arguments: {}", .0.join(", "))] MissingArguments(Vec), + #[error("not implemented: {}", .0)] + NotImplemented(String), + + #[error("relationship, {relationship_name}, has an empty target path")] + RelationshipEmptyTarget { + relationship_name: ndc::RelationshipName, + }, + #[error("{0}")] RelationshipUnification(#[from] RelationshipUnificationError), diff --git a/crates/ndc-query-plan/src/plan_for_query_request/query_plan_state.rs b/crates/ndc-query-plan/src/plan_for_query_request/query_plan_state.rs index d82e5183..89ccefb7 100644 --- a/crates/ndc-query-plan/src/plan_for_query_request/query_plan_state.rs +++ b/crates/ndc-query-plan/src/plan_for_query_request/query_plan_state.rs @@ -5,6 +5,7 @@ use std::{ }; use ndc_models as ndc; +use nonempty::NonEmpty; use crate::{ plan_for_query_request::helpers::lookup_relationship, @@ -96,8 +97,23 @@ impl QueryPlanState<'_, T> { Default::default() }; + let column_mapping = ndc_relationship + .column_mapping + .iter() + .map(|(source, target_path)| { + Ok(( + source.clone(), + NonEmpty::collect(target_path.iter().cloned()).ok_or_else(|| { + QueryPlanError::RelationshipEmptyTarget { + relationship_name: ndc_relationship_name.clone(), + } + })?, + )) + }) + .collect::>>()?; + let relationship = Relationship { - column_mapping: ndc_relationship.column_mapping.clone(), + column_mapping, relationship_type: ndc_relationship.relationship_type, target_collection: ndc_relationship.target_collection.clone(), arguments, diff --git a/crates/ndc-query-plan/src/plan_for_query_request/tests.rs b/crates/ndc-query-plan/src/plan_for_query_request/tests.rs index 1d5d1c6e..6e2251b8 100644 --- a/crates/ndc-query-plan/src/plan_for_query_request/tests.rs +++ b/crates/ndc-query-plan/src/plan_for_query_request/tests.rs @@ -1,507 +1,517 @@ use ndc_models::{self as ndc, OrderByTarget, OrderDirection, RelationshipType}; use ndc_test_helpers::*; +use nonempty::NonEmpty; use pretty_assertions::assert_eq; -use serde_json::json; use crate::{ self as plan, - plan_for_query_request::plan_test_helpers::{ - self, make_flat_schema, make_nested_schema, TestContext, - }, - query_plan::UnrelatedJoin, - ExistsInCollection, Expression, Field, OrderBy, Query, QueryContext, QueryPlan, Relationship, + plan_for_query_request::plan_test_helpers::{self, make_flat_schema, make_nested_schema}, + QueryContext, QueryPlan, Type, }; use super::plan_for_query_request; -#[test] -fn translates_query_request_relationships() -> Result<(), anyhow::Error> { - let request = query_request() - .collection("schools") - .relationships([ - ( - "school_classes", - relationship("classes", [("_id", "school_id")]), - ), - ( - "class_students", - relationship("students", [("_id", "class_id")]), - ), - ( - "class_department", - relationship("departments", [("department_id", "_id")]).object_type(), - ), - ( - "school_directory", - relationship("directory", [("_id", "school_id")]).object_type(), - ), - ( - "student_advisor", - relationship("advisors", [("advisor_id", "_id")]).object_type(), - ), - ( - "existence_check", - relationship("some_collection", [("some_id", "_id")]), - ), - ]) - .query( - query() - .fields([relation_field!("class_name" => "school_classes", query() - .fields([ - relation_field!("student_name" => "class_students") - ]) - )]) - .order_by(vec![ndc::OrderByElement { - order_direction: OrderDirection::Asc, - target: OrderByTarget::Column { - name: "advisor_name".into(), - field_path: None, - path: vec![ - path_element("school_classes".into()) - .predicate(binop( - "Equal", - target!( - "_id", - relations: [ - // path_element("school_classes"), - path_element("class_department".into()), - ], - ), - column_value!( - "math_department_id", - relations: [path_element("school_directory".into())], - ), - )) - .into(), - path_element("class_students".into()).into(), - path_element("student_advisor".into()).into(), - ], - }, - }]) - // The `And` layer checks that we properly recursive into Expressions - .predicate(and([ndc::Expression::Exists { - in_collection: related!("existence_check"), - predicate: None, - }])), - ) - .into(); +// TODO: ENG-1487 we need named scopes to define this query in ndc-spec 0.2 +// #[test] +// fn translates_query_request_relationships() -> Result<(), anyhow::Error> { +// let request = query_request() +// .collection("schools") +// .relationships([ +// ( +// "school_classes", +// relationship("classes", [("_id", &["school_id"])]), +// ), +// ( +// "class_students", +// relationship("students", [("_id", &["class_id"])]), +// ), +// ( +// "class_department", +// relationship("departments", [("department_id", &["_id"])]).object_type(), +// ), +// ( +// "school_directory", +// relationship("directory", [("_id", &["school_id"])]).object_type(), +// ), +// ( +// "student_advisor", +// relationship("advisors", [("advisor_id", &["_id"])]).object_type(), +// ), +// ( +// "existence_check", +// relationship("some_collection", [("some_id", &["_id"])]), +// ), +// ]) +// .query( +// query() +// .fields([relation_field!("class_name" => "school_classes", query() +// .fields([ +// relation_field!("student_name" => "class_students") +// ]) +// )]) +// .order_by(vec![ndc::OrderByElement { +// order_direction: OrderDirection::Asc, +// target: OrderByTarget::Column { +// name: "advisor_name".into(), +// arguments: Default::default(), +// field_path: None, +// path: vec![ +// path_element("school_classes") +// .predicate( +// exists( +// in_related("class_department"), +// binop( +// "Equal", +// target!("_id"), +// column_value("math_department_id") +// .path([path_element("school_directory")]) +// .scope(2) +// .into() +// ), +// ) +// ) +// .into(), +// path_element("class_students").into(), +// path_element("student_advisor").into(), +// ], +// }, +// }]) +// // The `And` layer checks that we properly recurse into Expressions +// .predicate(and([ndc::Expression::Exists { +// in_collection: related!("existence_check"), +// predicate: None, +// }])), +// ) +// .into(); +// +// let expected = QueryPlan { +// collection: "schools".into(), +// arguments: Default::default(), +// variables: None, +// variable_types: Default::default(), +// unrelated_collections: Default::default(), +// query: Query { +// predicate: Some(Expression::And { +// expressions: vec![Expression::Exists { +// in_collection: ExistsInCollection::Related { +// relationship: "existence_check".into(), +// }, +// predicate: None, +// }], +// }), +// order_by: Some(OrderBy { +// elements: [plan::OrderByElement { +// order_direction: OrderDirection::Asc, +// target: plan::OrderByTarget::Column { +// name: "advisor_name".into(), +// arguments: Default::default(), +// field_path: Default::default(), +// path: [ +// "school_classes_0".into(), +// "class_students".into(), +// "student_advisor".into(), +// ] +// .into(), +// }, +// }] +// .into(), +// }), +// relationships: [ +// // We join on the school_classes relationship twice. This one is for the `order_by` +// // comparison in the top-level request query +// ( +// "school_classes_0".into(), +// Relationship { +// column_mapping: [("_id".into(), vec!["school_id".into()])].into(), +// relationship_type: RelationshipType::Array, +// target_collection: "classes".into(), +// arguments: Default::default(), +// query: Query { +// predicate: Some(Expression::Exists { +// in_collection: ExistsInCollection::Related { +// relationship: "school_directory".into(), +// }, +// predicate: Some(Box::new(plan::Expression::BinaryComparisonOperator { +// column: plan::ComparisonTarget::Column { +// name: "_id".into(), +// arguments: Default::default(), +// field_path: None, +// field_type: plan::Type::Scalar( +// plan_test_helpers::ScalarType::Int, +// ), +// }, +// operator: plan_test_helpers::ComparisonOperator::Equal, +// value: plan::ComparisonValue::Column { +// name: "math_department_id".into(), +// arguments: Default::default(), +// field_path: None, +// field_type: plan::Type::Scalar( +// plan_test_helpers::ScalarType::Int, +// ), +// path: vec!["school_directory".into()], +// scope: Default::default(), +// }, +// })) +// }), +// relationships: [( +// "class_department".into(), +// plan::Relationship { +// target_collection: "departments".into(), +// column_mapping: [("department_id".into(), vec!["_id".into()])].into(), +// relationship_type: RelationshipType::Object, +// arguments: Default::default(), +// query: plan::Query { +// fields: Some([ +// ("_id".into(), plan::Field::Column { column: "_id".into(), fields: None, column_type: plan::Type::Scalar(plan_test_helpers::ScalarType::Int) }) +// ].into()), +// ..Default::default() +// }, +// }, +// ), ( +// "class_students".into(), +// plan::Relationship { +// target_collection: "students".into(), +// column_mapping: [("_id".into(), vec!["class_id".into()])].into(), +// relationship_type: RelationshipType::Array, +// arguments: Default::default(), +// query: plan::Query { +// relationships: [( +// "student_advisor".into(), +// plan::Relationship { +// column_mapping: [( +// "advisor_id".into(), +// vec!["_id".into()], +// )] +// .into(), +// relationship_type: RelationshipType::Object, +// target_collection: "advisors".into(), +// arguments: Default::default(), +// query: plan::Query { +// fields: Some( +// [( +// "advisor_name".into(), +// plan::Field::Column { +// column: "advisor_name".into(), +// fields: None, +// column_type: plan::Type::Scalar(plan_test_helpers::ScalarType::String), +// }, +// )] +// .into(), +// ), +// ..Default::default() +// }, +// }, +// )] +// .into(), +// ..Default::default() +// }, +// }, +// ), +// ( +// "school_directory".into(), +// Relationship { +// target_collection: "directory".into(), +// column_mapping: [("_id".into(), vec!["school_id".into()])].into(), +// relationship_type: RelationshipType::Object, +// arguments: Default::default(), +// query: Query { +// fields: Some([ +// ("math_department_id".into(), plan::Field::Column { column: "math_department_id".into(), fields: None, column_type: plan::Type::Scalar(plan_test_helpers::ScalarType::Int) }) +// ].into()), +// ..Default::default() +// }, +// }, +// ), +// ] +// .into(), +// ..Default::default() +// }, +// }, +// ), +// // This is the second join on school_classes - this one provides the relationship +// // field for the top-level request query +// ( +// "school_classes".into(), +// Relationship { +// column_mapping: [("_id".into(), vec!["school_id".into()])].into(), +// relationship_type: RelationshipType::Array, +// target_collection: "classes".into(), +// arguments: Default::default(), +// query: Query { +// fields: Some( +// [( +// "student_name".into(), +// plan::Field::Relationship { +// relationship: "class_students".into(), +// aggregates: None, +// fields: None, +// }, +// )] +// .into(), +// ), +// relationships: [( +// "class_students".into(), +// plan::Relationship { +// target_collection: "students".into(), +// column_mapping: [("_id".into(), vec!["class_id".into()])].into(), +// relationship_type: RelationshipType::Array, +// arguments: Default::default(), +// query: Query { +// scope: Some(plan::Scope::Named("scope_1".into())), +// ..Default::default() +// }, +// }, +// )].into(), +// scope: Some(plan::Scope::Named("scope_0".into())), +// ..Default::default() +// }, +// }, +// ), +// ( +// "existence_check".into(), +// Relationship { +// column_mapping: [("some_id".into(), vec!["_id".into()])].into(), +// relationship_type: RelationshipType::Array, +// target_collection: "some_collection".into(), +// arguments: Default::default(), +// query: Query { +// predicate: None, +// ..Default::default() +// }, +// }, +// ), +// ] +// .into(), +// fields: Some( +// [( +// "class_name".into(), +// Field::Relationship { +// relationship: "school_classes".into(), +// aggregates: None, +// fields: Some( +// [( +// "student_name".into(), +// Field::Relationship { +// relationship: "class_students".into(), +// aggregates: None, +// fields: None, +// }, +// )] +// .into(), +// ), +// }, +// )] +// .into(), +// ), +// scope: Some(plan::Scope::Root), +// ..Default::default() +// }, +// }; +// +// let context = TestContext { +// collections: [ +// collection("schools"), +// collection("classes"), +// collection("students"), +// collection("departments"), +// collection("directory"), +// collection("advisors"), +// collection("some_collection"), +// ] +// .into(), +// object_types: [ +// ("schools".into(), object_type([("_id", named_type("Int"))])), +// ( +// "classes".into(), +// object_type([ +// ("_id", named_type("Int")), +// ("school_id", named_type("Int")), +// ("department_id", named_type("Int")), +// ]), +// ), +// ( +// "students".into(), +// object_type([ +// ("_id", named_type("Int")), +// ("class_id", named_type("Int")), +// ("advisor_id", named_type("Int")), +// ("student_name", named_type("String")), +// ]), +// ), +// ( +// "departments".into(), +// object_type([("_id", named_type("Int"))]), +// ), +// ( +// "directory".into(), +// object_type([ +// ("_id", named_type("Int")), +// ("school_id", named_type("Int")), +// ("math_department_id", named_type("Int")), +// ]), +// ), +// ( +// "advisors".into(), +// object_type([ +// ("_id", named_type("Int")), +// ("advisor_name", named_type("String")), +// ]), +// ), +// ( +// "some_collection".into(), +// object_type([("_id", named_type("Int")), ("some_id", named_type("Int"))]), +// ), +// ] +// .into(), +// ..Default::default() +// }; +// +// let query_plan = plan_for_query_request(&context, request)?; +// +// assert_eq!(query_plan, expected); +// Ok(()) +// } - let expected = QueryPlan { - collection: "schools".into(), - arguments: Default::default(), - variables: None, - variable_types: Default::default(), - unrelated_collections: Default::default(), - query: Query { - predicate: Some(Expression::And { - expressions: vec![Expression::Exists { - in_collection: ExistsInCollection::Related { - relationship: "existence_check".into(), - }, - predicate: None, - }], - }), - order_by: Some(OrderBy { - elements: [plan::OrderByElement { - order_direction: OrderDirection::Asc, - target: plan::OrderByTarget::Column { - name: "advisor_name".into(), - field_path: Default::default(), - path: [ - "school_classes_0".into(), - "class_students".into(), - "student_advisor".into(), - ] - .into(), - }, - }] - .into(), - }), - relationships: [ - ( - "school_classes_0".into(), - Relationship { - column_mapping: [("_id".into(), "school_id".into())].into(), - relationship_type: RelationshipType::Array, - target_collection: "classes".into(), - arguments: Default::default(), - query: Query { - predicate: Some(plan::Expression::BinaryComparisonOperator { - column: plan::ComparisonTarget::Column { - name: "_id".into(), - field_path: None, - field_type: plan::Type::Scalar( - plan_test_helpers::ScalarType::Int, - ), - path: vec!["class_department".into()], - }, - operator: plan_test_helpers::ComparisonOperator::Equal, - value: plan::ComparisonValue::Column { - column: plan::ComparisonTarget::Column { - name: "math_department_id".into(), - field_path: None, - field_type: plan::Type::Scalar( - plan_test_helpers::ScalarType::Int, - ), - path: vec!["school_directory".into()], - }, - }, - }), - relationships: [( - "class_department".into(), - plan::Relationship { - target_collection: "departments".into(), - column_mapping: [("department_id".into(), "_id".into())].into(), - relationship_type: RelationshipType::Object, - arguments: Default::default(), - query: plan::Query { - fields: Some([ - ("_id".into(), plan::Field::Column { column: "_id".into(), fields: None, column_type: plan::Type::Scalar(plan_test_helpers::ScalarType::Int) }) - ].into()), - ..Default::default() - }, - }, - ), ( - "class_students".into(), - plan::Relationship { - target_collection: "students".into(), - column_mapping: [("_id".into(), "class_id".into())].into(), - relationship_type: RelationshipType::Array, - arguments: Default::default(), - query: plan::Query { - relationships: [( - "student_advisor".into(), - plan::Relationship { - column_mapping: [( - "advisor_id".into(), - "_id".into(), - )] - .into(), - relationship_type: RelationshipType::Object, - target_collection: "advisors".into(), - arguments: Default::default(), - query: plan::Query { - fields: Some( - [( - "advisor_name".into(), - plan::Field::Column { - column: "advisor_name".into(), - fields: None, - column_type: plan::Type::Scalar(plan_test_helpers::ScalarType::String), - }, - )] - .into(), - ), - ..Default::default() - }, - }, - )] - .into(), - ..Default::default() - }, - }, - ), - ( - "school_directory".into(), - Relationship { - target_collection: "directory".into(), - column_mapping: [("_id".into(), "school_id".into())].into(), - relationship_type: RelationshipType::Object, - arguments: Default::default(), - query: Query { - fields: Some([ - ("math_department_id".into(), plan::Field::Column { column: "math_department_id".into(), fields: None, column_type: plan::Type::Scalar(plan_test_helpers::ScalarType::Int) }) - ].into()), - ..Default::default() - }, - }, - ), - ] - .into(), - ..Default::default() - }, - }, - ), - ( - "school_classes".into(), - Relationship { - column_mapping: [("_id".into(), "school_id".into())].into(), - relationship_type: RelationshipType::Array, - target_collection: "classes".into(), - arguments: Default::default(), - query: Query { - fields: Some( - [( - "student_name".into(), - plan::Field::Relationship { - relationship: "class_students".into(), - aggregates: None, - fields: None, - }, - )] - .into(), - ), - relationships: [( - "class_students".into(), - plan::Relationship { - target_collection: "students".into(), - column_mapping: [("_id".into(), "class_id".into())].into(), - relationship_type: RelationshipType::Array, - arguments: Default::default(), - query: Query { - scope: Some(plan::Scope::Named("scope_1".into())), - ..Default::default() - }, - }, - )].into(), - scope: Some(plan::Scope::Named("scope_0".into())), - ..Default::default() - }, - }, - ), - ( - "existence_check".into(), - Relationship { - column_mapping: [("some_id".into(), "_id".into())].into(), - relationship_type: RelationshipType::Array, - target_collection: "some_collection".into(), - arguments: Default::default(), - query: Query { - predicate: None, - ..Default::default() - }, - }, - ), - ] - .into(), - fields: Some( - [( - "class_name".into(), - Field::Relationship { - relationship: "school_classes".into(), - aggregates: None, - fields: Some( - [( - "student_name".into(), - Field::Relationship { - relationship: "class_students".into(), - aggregates: None, - fields: None, - }, - )] - .into(), - ), - }, - )] - .into(), - ), - scope: Some(plan::Scope::Root), - ..Default::default() - }, - }; +// TODO: ENG-1487 update this test to use named scopes instead of root column reference - let context = TestContext { - collections: [ - collection("schools"), - collection("classes"), - collection("students"), - collection("departments"), - collection("directory"), - collection("advisors"), - collection("some_collection"), - ] - .into(), - object_types: [ - ("schools".into(), object_type([("_id", named_type("Int"))])), - ( - "classes".into(), - object_type([ - ("_id", named_type("Int")), - ("school_id", named_type("Int")), - ("department_id", named_type("Int")), - ]), - ), - ( - "students".into(), - object_type([ - ("_id", named_type("Int")), - ("class_id", named_type("Int")), - ("advisor_id", named_type("Int")), - ("student_name", named_type("String")), - ]), - ), - ( - "departments".into(), - object_type([("_id", named_type("Int"))]), - ), - ( - "directory".into(), - object_type([ - ("_id", named_type("Int")), - ("school_id", named_type("Int")), - ("math_department_id", named_type("Int")), - ]), - ), - ( - "advisors".into(), - object_type([ - ("_id", named_type("Int")), - ("advisor_name", named_type("String")), - ]), - ), - ( - "some_collection".into(), - object_type([("_id", named_type("Int")), ("some_id", named_type("Int"))]), - ), - ] - .into(), - ..Default::default() - }; - - let query_plan = plan_for_query_request(&context, request)?; - - assert_eq!(query_plan, expected); - Ok(()) -} - -#[test] -fn translates_root_column_references() -> Result<(), anyhow::Error> { - let query_context = make_flat_schema(); - let query = query_request() - .collection("authors") - .query(query().fields([field!("last_name")]).predicate(exists( - unrelated!("articles"), - and([ - binop("Equal", target!("author_id"), column_value!(root("id"))), - binop("Regex", target!("title"), value!("Functional.*")), - ]), - ))) - .into(); - let query_plan = plan_for_query_request(&query_context, query)?; - - let expected = QueryPlan { - collection: "authors".into(), - query: plan::Query { - predicate: Some(plan::Expression::Exists { - in_collection: plan::ExistsInCollection::Unrelated { - unrelated_collection: "__join_articles_0".into(), - }, - predicate: Some(Box::new(plan::Expression::And { - expressions: vec![ - plan::Expression::BinaryComparisonOperator { - column: plan::ComparisonTarget::Column { - name: "author_id".into(), - field_path: Default::default(), - field_type: plan::Type::Scalar(plan_test_helpers::ScalarType::Int), - path: Default::default(), - }, - operator: plan_test_helpers::ComparisonOperator::Equal, - value: plan::ComparisonValue::Column { - column: plan::ComparisonTarget::ColumnInScope { - name: "id".into(), - field_path: Default::default(), - field_type: plan::Type::Scalar( - plan_test_helpers::ScalarType::Int, - ), - scope: plan::Scope::Root, - }, - }, - }, - plan::Expression::BinaryComparisonOperator { - column: plan::ComparisonTarget::Column { - name: "title".into(), - field_path: Default::default(), - field_type: plan::Type::Scalar( - plan_test_helpers::ScalarType::String, - ), - path: Default::default(), - }, - operator: plan_test_helpers::ComparisonOperator::Regex, - value: plan::ComparisonValue::Scalar { - value: json!("Functional.*"), - value_type: plan::Type::Scalar( - plan_test_helpers::ScalarType::String, - ), - }, - }, - ], - })), - }), - fields: Some( - [( - "last_name".into(), - plan::Field::Column { - column: "last_name".into(), - fields: None, - column_type: plan::Type::Scalar(plan_test_helpers::ScalarType::String), - }, - )] - .into(), - ), - scope: Some(plan::Scope::Root), - ..Default::default() - }, - unrelated_collections: [( - "__join_articles_0".into(), - UnrelatedJoin { - target_collection: "articles".into(), - arguments: Default::default(), - query: plan::Query { - predicate: Some(plan::Expression::And { - expressions: vec![ - plan::Expression::BinaryComparisonOperator { - column: plan::ComparisonTarget::Column { - name: "author_id".into(), - field_type: plan::Type::Scalar( - plan_test_helpers::ScalarType::Int, - ), - field_path: None, - path: vec![], - }, - operator: plan_test_helpers::ComparisonOperator::Equal, - value: plan::ComparisonValue::Column { - column: plan::ComparisonTarget::ColumnInScope { - name: "id".into(), - scope: plan::Scope::Root, - field_type: plan::Type::Scalar( - plan_test_helpers::ScalarType::Int, - ), - field_path: None, - }, - }, - }, - plan::Expression::BinaryComparisonOperator { - column: plan::ComparisonTarget::Column { - name: "title".into(), - field_type: plan::Type::Scalar( - plan_test_helpers::ScalarType::String, - ), - field_path: None, - path: vec![], - }, - operator: plan_test_helpers::ComparisonOperator::Regex, - value: plan::ComparisonValue::Scalar { - value: "Functional.*".into(), - value_type: plan::Type::Scalar( - plan_test_helpers::ScalarType::String, - ), - }, - }, - ], - }), - ..Default::default() - }, - }, - )] - .into(), - arguments: Default::default(), - variables: Default::default(), - variable_types: Default::default(), - }; - - assert_eq!(query_plan, expected); - Ok(()) -} +// #[test] +// fn translates_root_column_references() -> Result<(), anyhow::Error> { +// let query_context = make_flat_schema(); +// let query = query_request() +// .collection("authors") +// .query(query().fields([field!("last_name")]).predicate(exists( +// unrelated!("articles"), +// and([ +// binop("Equal", target!("author_id"), column_value!(root("id"))), +// binop("Regex", target!("title"), value!("Functional.*")), +// ]), +// ))) +// .into(); +// let query_plan = plan_for_query_request(&query_context, query)?; +// +// let expected = QueryPlan { +// collection: "authors".into(), +// query: plan::Query { +// predicate: Some(plan::Expression::Exists { +// in_collection: plan::ExistsInCollection::Unrelated { +// unrelated_collection: "__join_articles_0".into(), +// }, +// predicate: Some(Box::new(plan::Expression::And { +// expressions: vec![ +// plan::Expression::BinaryComparisonOperator { +// column: plan::ComparisonTarget::Column { +// name: "author_id".into(), +// field_path: Default::default(), +// field_type: plan::Type::Scalar(plan_test_helpers::ScalarType::Int), +// path: Default::default(), +// }, +// operator: plan_test_helpers::ComparisonOperator::Equal, +// value: plan::ComparisonValue::Column { +// column: plan::ComparisonTarget::ColumnInScope { +// name: "id".into(), +// field_path: Default::default(), +// field_type: plan::Type::Scalar( +// plan_test_helpers::ScalarType::Int, +// ), +// scope: plan::Scope::Root, +// }, +// }, +// }, +// plan::Expression::BinaryComparisonOperator { +// column: plan::ComparisonTarget::Column { +// name: "title".into(), +// field_path: Default::default(), +// field_type: plan::Type::Scalar( +// plan_test_helpers::ScalarType::String, +// ), +// path: Default::default(), +// }, +// operator: plan_test_helpers::ComparisonOperator::Regex, +// value: plan::ComparisonValue::Scalar { +// value: json!("Functional.*"), +// value_type: plan::Type::Scalar( +// plan_test_helpers::ScalarType::String, +// ), +// }, +// }, +// ], +// })), +// }), +// fields: Some( +// [( +// "last_name".into(), +// plan::Field::Column { +// column: "last_name".into(), +// fields: None, +// column_type: plan::Type::Scalar(plan_test_helpers::ScalarType::String), +// }, +// )] +// .into(), +// ), +// scope: Some(plan::Scope::Root), +// ..Default::default() +// }, +// unrelated_collections: [( +// "__join_articles_0".into(), +// UnrelatedJoin { +// target_collection: "articles".into(), +// arguments: Default::default(), +// query: plan::Query { +// predicate: Some(plan::Expression::And { +// expressions: vec![ +// plan::Expression::BinaryComparisonOperator { +// column: plan::ComparisonTarget::Column { +// name: "author_id".into(), +// field_type: plan::Type::Scalar( +// plan_test_helpers::ScalarType::Int, +// ), +// field_path: None, +// path: vec![], +// }, +// operator: plan_test_helpers::ComparisonOperator::Equal, +// value: plan::ComparisonValue::Column { +// column: plan::ComparisonTarget::ColumnInScope { +// name: "id".into(), +// scope: plan::Scope::Root, +// field_type: plan::Type::Scalar( +// plan_test_helpers::ScalarType::Int, +// ), +// field_path: None, +// }, +// }, +// }, +// plan::Expression::BinaryComparisonOperator { +// column: plan::ComparisonTarget::Column { +// name: "title".into(), +// field_type: plan::Type::Scalar( +// plan_test_helpers::ScalarType::String, +// ), +// field_path: None, +// path: vec![], +// }, +// operator: plan_test_helpers::ComparisonOperator::Regex, +// value: plan::ComparisonValue::Scalar { +// value: "Functional.*".into(), +// value_type: plan::Type::Scalar( +// plan_test_helpers::ScalarType::String, +// ), +// }, +// }, +// ], +// }), +// ..Default::default() +// }, +// }, +// )] +// .into(), +// arguments: Default::default(), +// variables: Default::default(), +// variable_types: Default::default(), +// }; +// +// assert_eq!(query_plan, expected); +// Ok(()) +// } #[test] fn translates_aggregate_selections() -> Result<(), anyhow::Error> { @@ -511,7 +521,7 @@ fn translates_aggregate_selections() -> Result<(), anyhow::Error> { .query(query().aggregates([ star_count_aggregate!("count_star"), column_count_aggregate!("count_id" => "last_name", distinct: true), - column_aggregate!("avg_id" => "id", "Average"), + ("avg_id", column_aggregate("id", "Average").into()), ])) .into(); let query_plan = plan_for_query_request(&query_context, query)?; @@ -526,6 +536,8 @@ fn translates_aggregate_selections() -> Result<(), anyhow::Error> { "count_id".into(), plan::Aggregate::ColumnCount { column: "last_name".into(), + arguments: Default::default(), + field_path: None, distinct: true, }, ), @@ -533,8 +545,12 @@ fn translates_aggregate_selections() -> Result<(), anyhow::Error> { "avg_id".into(), plan::Aggregate::SingleColumn { column: "id".into(), + column_type: Type::scalar(plan_test_helpers::ScalarType::Int), + arguments: Default::default(), + field_path: None, function: plan_test_helpers::AggregateFunction::Average, - result_type: plan::Type::Scalar(plan_test_helpers::ScalarType::Double), + result_type: plan::Type::Scalar(plan_test_helpers::ScalarType::Double) + .into_nullable(), }, ), ] @@ -574,17 +590,21 @@ fn translates_relationships_in_fields_predicates_and_orderings() -> Result<(), a .order_by(vec![ ndc::OrderByElement { order_direction: OrderDirection::Asc, - target: OrderByTarget::SingleColumnAggregate { - column: "year".into(), - function: "Average".into(), - path: vec![path_element("author_articles".into()).into()], - field_path: None, + target: OrderByTarget::Aggregate { + path: vec![path_element("author_articles").into()], + aggregate: ndc::Aggregate::SingleColumn { + column: "year".into(), + arguments: Default::default(), + field_path: None, + function: "Average".into(), + }, }, }, ndc::OrderByElement { order_direction: OrderDirection::Desc, target: OrderByTarget::Column { name: "id".into(), + arguments: Default::default(), field_path: None, path: vec![], }, @@ -593,7 +613,7 @@ fn translates_relationships_in_fields_predicates_and_orderings() -> Result<(), a ) .relationships([( "author_articles", - relationship("articles", [("id", "author_id")]), + relationship("articles", [("id", &["author_id"])]), )]) .into(); let query_plan = plan_for_query_request(&query_context, query)?; @@ -606,12 +626,10 @@ fn translates_relationships_in_fields_predicates_and_orderings() -> Result<(), a relationship: "author_articles".into(), }, predicate: Some(Box::new(plan::Expression::BinaryComparisonOperator { - column: plan::ComparisonTarget::Column { - name: "title".into(), - field_path: Default::default(), - field_type: plan::Type::Scalar(plan_test_helpers::ScalarType::String), - path: Default::default(), - }, + column: plan::ComparisonTarget::column( + "title", + plan::Type::scalar(plan_test_helpers::ScalarType::String), + ), operator: plan_test_helpers::ComparisonOperator::Regex, value: plan::ComparisonValue::Scalar { value: "Functional.*".into(), @@ -623,17 +641,26 @@ fn translates_relationships_in_fields_predicates_and_orderings() -> Result<(), a elements: vec![ plan::OrderByElement { order_direction: OrderDirection::Asc, - target: plan::OrderByTarget::SingleColumnAggregate { - column: "year".into(), - function: plan_test_helpers::AggregateFunction::Average, - result_type: plan::Type::Scalar(plan_test_helpers::ScalarType::Double), + target: plan::OrderByTarget::Aggregate { path: vec!["author_articles".into()], + aggregate: plan::Aggregate::SingleColumn { + column: "year".into(), + column_type: Type::scalar(plan_test_helpers::ScalarType::Int).into_nullable(), + arguments: Default::default(), + field_path: Default::default(), + function: plan_test_helpers::AggregateFunction::Average, + result_type: plan::Type::Scalar( + plan_test_helpers::ScalarType::Double, + ) + .into_nullable(), + }, }, }, plan::OrderByElement { order_direction: OrderDirection::Desc, target: plan::OrderByTarget::Column { name: "id".into(), + arguments: Default::default(), field_path: None, path: vec![], }, @@ -655,6 +682,7 @@ fn translates_relationships_in_fields_predicates_and_orderings() -> Result<(), a plan::Field::Relationship { relationship: "author_articles".into(), aggregates: None, + groups: None, fields: Some( [ ( @@ -691,7 +719,7 @@ fn translates_relationships_in_fields_predicates_and_orderings() -> Result<(), a "author_articles".into(), plan::Relationship { target_collection: "articles".into(), - column_mapping: [("id".into(), "author_id".into())].into(), + column_mapping: [("id".into(), NonEmpty::singleton("author_id".into()))].into(), relationship_type: RelationshipType::Array, arguments: Default::default(), query: plan::Query { @@ -854,15 +882,13 @@ fn translates_predicate_referencing_field_of_related_collection() -> anyhow::Res let query_context = make_nested_schema(); let request = query_request() .collection("appearances") - .relationships([("author", relationship("authors", [("authorId", "id")]))]) + .relationships([("author", relationship("authors", [("authorId", &["id"])]))]) .query( query() .fields([relation_field!("presenter" => "author", query().fields([ field!("name"), ]))]) - .predicate(not(is_null( - target!("name", relations: [path_element("author".into())]), - ))), + .predicate(exists(in_related("author"), not(is_null(target!("name"))))), ) .into(); let query_plan = plan_for_query_request(&query_context, request)?; @@ -870,16 +896,21 @@ fn translates_predicate_referencing_field_of_related_collection() -> anyhow::Res let expected = QueryPlan { collection: "appearances".into(), query: plan::Query { - predicate: Some(plan::Expression::Not { - expression: Box::new(plan::Expression::UnaryComparisonOperator { - column: plan::ComparisonTarget::Column { - name: "name".into(), - field_path: None, - field_type: plan::Type::Scalar(plan_test_helpers::ScalarType::String), - path: vec!["author".into()], - }, - operator: ndc_models::UnaryComparisonOperator::IsNull, - }), + predicate: Some(plan::Expression::Exists { + in_collection: plan::ExistsInCollection::Related { + relationship: "author".into(), + }, + predicate: Some(Box::new(plan::Expression::Not { + expression: Box::new(plan::Expression::UnaryComparisonOperator { + column: plan::ComparisonTarget::Column { + name: "name".into(), + arguments: Default::default(), + field_path: None, + field_type: plan::Type::Scalar(plan_test_helpers::ScalarType::String), + }, + operator: ndc_models::UnaryComparisonOperator::IsNull, + }), + })), }), fields: Some( [( @@ -887,6 +918,7 @@ fn translates_predicate_referencing_field_of_related_collection() -> anyhow::Res plan::Field::Relationship { relationship: "author".into(), aggregates: None, + groups: None, fields: Some( [( "name".into(), @@ -907,7 +939,7 @@ fn translates_predicate_referencing_field_of_related_collection() -> anyhow::Res relationships: [( "author".into(), plan::Relationship { - column_mapping: [("authorId".into(), "id".into())].into(), + column_mapping: [("authorId".into(), NonEmpty::singleton("id".into()))].into(), relationship_type: RelationshipType::Array, target_collection: "authors".into(), arguments: Default::default(), diff --git a/crates/ndc-query-plan/src/plan_for_query_request/type_annotated_field.rs b/crates/ndc-query-plan/src/plan_for_query_request/type_annotated_field.rs index fa6de979..2fca802f 100644 --- a/crates/ndc-query-plan/src/plan_for_query_request/type_annotated_field.rs +++ b/crates/ndc-query-plan/src/plan_for_query_request/type_annotated_field.rs @@ -44,7 +44,8 @@ fn type_annotated_field_helper( fields, arguments: _, } => { - let column_type = find_object_field(collection_object_type, &column)?; + let column_field = find_object_field(collection_object_type, &column)?; + let column_type = &column_field.r#type; let fields = fields .map(|nested_field| { type_annotated_nested_field_helper( @@ -89,6 +90,7 @@ fn type_annotated_field_helper( // with fields and aggregates from other references to the same relationship. let aggregates = query_plan.aggregates.clone(); let fields = query_plan.fields.clone(); + let groups = query_plan.groups.clone(); let relationship_key = plan_state.register_relationship(relationship, arguments, query_plan)?; @@ -96,6 +98,7 @@ fn type_annotated_field_helper( relationship: relationship_key, aggregates, fields, + groups, } } }; @@ -162,6 +165,10 @@ fn type_annotated_nested_field_helper( )?), }) } + // TODO: ENG-1464 + (ndc::NestedField::Collection(_), _) => Err(QueryPlanError::NotImplemented( + "query.nested_fields.nested_collections".to_string(), + ))?, (nested, Type::Nullable(t)) => { // let path = append_to_path(path, []) type_annotated_nested_field_helper( diff --git a/crates/ndc-query-plan/src/plan_for_query_request/unify_relationship_references.rs b/crates/ndc-query-plan/src/plan_for_query_request/unify_relationship_references.rs index 1d16e70c..be2bae6c 100644 --- a/crates/ndc-query-plan/src/plan_for_query_request/unify_relationship_references.rs +++ b/crates/ndc-query-plan/src/plan_for_query_request/unify_relationship_references.rs @@ -7,8 +7,8 @@ use ndc_models as ndc; use thiserror::Error; use crate::{ - Aggregate, ConnectorTypes, Expression, Field, NestedArray, NestedField, NestedObject, Query, - Relationship, RelationshipArgument, Relationships, + Aggregate, ConnectorTypes, Expression, Field, GroupExpression, Grouping, NestedArray, + NestedField, NestedObject, Query, Relationship, RelationshipArgument, Relationships, }; #[derive(Debug, Error)] @@ -95,7 +95,6 @@ where let mismatching_fields = [ (a.limit != b.limit, "limit"), - (a.aggregates_limit != b.aggregates_limit, "aggregates_limit"), (a.offset != b.offset, "offset"), (a.order_by != b.order_by, "order_by"), (predicate_a != predicate_b, "predicate"), @@ -117,13 +116,13 @@ where })?; let query = Query { - aggregates: unify_aggregates(a.aggregates, b.aggregates)?, + aggregates: unify_options(a.aggregates, b.aggregates, unify_aggregates)?, fields: unify_fields(a.fields, b.fields)?, limit: a.limit, - aggregates_limit: a.aggregates_limit, offset: a.offset, order_by: a.order_by, predicate: predicate_a, + groups: unify_options(a.groups, b.groups, unify_groups)?, relationships: unify_nested_relationships(a.relationships, b.relationships)?, scope, }; @@ -131,9 +130,9 @@ where } fn unify_aggregates( - a: Option>>, - b: Option>>, -) -> Result>>> + a: IndexMap>, + b: IndexMap>, +) -> Result>> where T: ConnectorTypes, { @@ -210,11 +209,13 @@ where relationship: relationship_a, aggregates: aggregates_a, fields: fields_a, + groups: groups_a, }, Field::Relationship { relationship: relationship_b, aggregates: aggregates_b, fields: fields_b, + groups: groups_b, }, ) => { if relationship_a != relationship_b { @@ -224,8 +225,9 @@ where } else { Ok(Field::Relationship { relationship: relationship_b, - aggregates: unify_aggregates(aggregates_a, aggregates_b)?, + aggregates: unify_options(aggregates_a, aggregates_b, unify_aggregates)?, fields: unify_fields(fields_a, fields_b)?, + groups: unify_options(groups_a, groups_b, unify_groups)?, }) } } @@ -284,6 +286,39 @@ where .try_collect() } +fn unify_groups(a: Grouping, b: Grouping) -> Result> +where + T: ConnectorTypes, +{ + let predicate_a = a.predicate.and_then(GroupExpression::simplify); + let predicate_b = b.predicate.and_then(GroupExpression::simplify); + + let mismatching_fields = [ + (a.dimensions != b.dimensions, "dimensions"), + (predicate_a != predicate_b, "predicate"), + (a.order_by != b.order_by, "order_by"), + (a.limit != b.limit, "limit"), + (a.offset != b.offset, "offset"), + ] + .into_iter() + .filter_map(|(is_mismatch, field_name)| if is_mismatch { Some(field_name) } else { None }) + .collect_vec(); + + if !mismatching_fields.is_empty() { + return Err(RelationshipUnificationError::Mismatch(mismatching_fields)); + } + + let unified = Grouping { + dimensions: a.dimensions, + aggregates: unify_aggregates(a.aggregates, b.aggregates)?, + predicate: predicate_a, + order_by: a.order_by, + limit: a.limit, + offset: a.offset, + }; + Ok(unified) +} + /// In some cases we receive the predicate expression `Some(Expression::And [])` which does not /// filter out anything, but fails equality checks with `None`. Simplifying that expression to /// `None` allows us to unify relationship references that we wouldn't otherwise be able to. @@ -341,9 +376,9 @@ mod tests { use crate::{ field, object, plan_for_query_request::plan_test_helpers::{ - date, double, int, object_type, relationship, string, TestContext, + date, double, int, relationship, string, TestContext, }, - Relationship, + Relationship, Type, }; use super::unify_relationship_references; @@ -395,10 +430,10 @@ mod tests { #[test] fn unifies_nested_field_selections() -> anyhow::Result<()> { - let tomatoes_type = object_type([ + let tomatoes_type = Type::object([ ( "viewer", - object_type([("numReviews", int()), ("rating", double())]), + Type::object([("numReviews", int()), ("rating", double())]), ), ("lastUpdated", date()), ]); diff --git a/crates/ndc-query-plan/src/query_plan.rs b/crates/ndc-query-plan/src/query_plan.rs deleted file mode 100644 index 378e8e09..00000000 --- a/crates/ndc-query-plan/src/query_plan.rs +++ /dev/null @@ -1,458 +0,0 @@ -use std::{collections::BTreeMap, fmt::Debug, iter}; - -use derivative::Derivative; -use indexmap::IndexMap; -use itertools::Either; -use ndc_models::{self as ndc, OrderDirection, RelationshipType, UnaryComparisonOperator}; - -use crate::{vec_set::VecSet, Type}; - -pub trait ConnectorTypes { - type ScalarType: Clone + Debug + PartialEq + Eq; - type AggregateFunction: Clone + Debug + PartialEq; - type ComparisonOperator: Clone + Debug + PartialEq; -} - -#[derive(Derivative)] -#[derivative( - Clone(bound = ""), - Debug(bound = ""), - PartialEq(bound = "T::ScalarType: PartialEq") -)] -pub struct QueryPlan { - pub collection: ndc::CollectionName, - pub query: Query, - pub arguments: BTreeMap>, - pub variables: Option>, - - /// Types for values from the `variables` map as inferred by usages in the query request. It is - /// possible for the same variable to be used in multiple contexts with different types. This - /// map provides sets of all observed types. - /// - /// The observed type may be `None` if the type of a variable use could not be inferred. - pub variable_types: VariableTypes, - - // TODO: type for unrelated collection - pub unrelated_collections: BTreeMap>, -} - -impl QueryPlan { - pub fn has_variables(&self) -> bool { - self.variables.is_some() - } -} - -pub type Arguments = BTreeMap>; -pub type Relationships = BTreeMap>; -pub type VariableSet = BTreeMap; -pub type VariableTypes = BTreeMap>>; - -#[derive(Derivative)] -#[derivative( - Clone(bound = ""), - Debug(bound = ""), - Default(bound = ""), - PartialEq(bound = "") -)] -pub struct Query { - pub aggregates: Option>>, - pub fields: Option>>, - pub limit: Option, - pub aggregates_limit: Option, - pub offset: Option, - pub order_by: Option>, - pub predicate: Option>, - - /// Relationships referenced by fields and expressions in this query or sub-query. Does not - /// include relationships in sub-queries nested under this one. - pub relationships: Relationships, - - /// Some relationship references may introduce a named "scope" so that other parts of the query - /// request can reference fields of documents in the related collection. The connector must - /// introduce a variable, or something similar, for such references. - pub scope: Option, -} - -impl Query { - pub fn has_aggregates(&self) -> bool { - if let Some(aggregates) = &self.aggregates { - !aggregates.is_empty() - } else { - false - } - } - - pub fn has_fields(&self) -> bool { - if let Some(fields) = &self.fields { - !fields.is_empty() - } else { - false - } - } -} - -#[derive(Derivative)] -#[derivative( - Clone(bound = ""), - Debug(bound = ""), - PartialEq(bound = "T::ScalarType: PartialEq") -)] -pub enum Argument { - /// The argument is provided by reference to a variable - Variable { - name: ndc::VariableName, - argument_type: Type, - }, - /// The argument is provided as a literal value - Literal { - value: serde_json::Value, - argument_type: Type, - }, - /// The argument was a literal value that has been parsed as an [Expression] - Predicate { expression: Expression }, -} - -#[derive(Derivative)] -#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] -pub struct Relationship { - pub column_mapping: BTreeMap, - pub relationship_type: RelationshipType, - pub target_collection: ndc::CollectionName, - pub arguments: BTreeMap>, - pub query: Query, -} - -#[derive(Derivative)] -#[derivative( - Clone(bound = ""), - Debug(bound = ""), - PartialEq(bound = "T::ScalarType: PartialEq") -)] -pub enum RelationshipArgument { - /// The argument is provided by reference to a variable - Variable { - name: ndc::VariableName, - argument_type: Type, - }, - /// The argument is provided as a literal value - Literal { - value: serde_json::Value, - argument_type: Type, - }, - // The argument is provided based on a column of the source collection - Column { - name: ndc::FieldName, - argument_type: Type, - }, - /// The argument was a literal value that has been parsed as an [Expression] - Predicate { expression: Expression }, -} - -#[derive(Derivative)] -#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] -pub struct UnrelatedJoin { - pub target_collection: ndc::CollectionName, - pub arguments: BTreeMap>, - pub query: Query, -} - -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum Scope { - Root, - Named(String), -} - -#[derive(Derivative)] -#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] -pub enum Aggregate { - ColumnCount { - /// The column to apply the count aggregate function to - column: ndc::FieldName, - /// Whether or not only distinct items should be counted - distinct: bool, - }, - SingleColumn { - /// The column to apply the aggregation function to - column: ndc::FieldName, - /// Single column aggregate function name. - function: T::AggregateFunction, - result_type: Type, - }, - StarCount, -} - -#[derive(Derivative)] -#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] -pub struct NestedObject { - pub fields: IndexMap>, -} - -#[derive(Derivative)] -#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] -pub struct NestedArray { - pub fields: Box>, -} - -#[derive(Derivative)] -#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] -pub enum NestedField { - Object(NestedObject), - Array(NestedArray), -} - -#[derive(Derivative)] -#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] -pub enum Field { - Column { - column: ndc::FieldName, - - /// When the type of the column is a (possibly-nullable) array or object, - /// the caller can request a subset of the complete column data, - /// by specifying fields to fetch here. - /// If omitted, the column data will be fetched in full. - fields: Option>, - - column_type: Type, - }, - Relationship { - /// The name of the relationship to follow for the subquery - this is the key in the - /// [Query] relationships map in this module, it is **not** the key in the - /// [ndc::QueryRequest] collection_relationships map. - relationship: ndc::RelationshipName, - aggregates: Option>>, - fields: Option>>, - }, -} - -#[derive(Derivative)] -#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] -pub enum Expression { - And { - expressions: Vec>, - }, - Or { - expressions: Vec>, - }, - Not { - expression: Box>, - }, - UnaryComparisonOperator { - column: ComparisonTarget, - operator: UnaryComparisonOperator, - }, - BinaryComparisonOperator { - column: ComparisonTarget, - operator: T::ComparisonOperator, - value: ComparisonValue, - }, - Exists { - in_collection: ExistsInCollection, - predicate: Option>>, - }, -} - -impl Expression { - /// Get an iterator of columns referenced by the expression, not including columns of related - /// collections - pub fn query_local_comparison_targets<'a>( - &'a self, - ) -> Box> + 'a> { - match self { - Expression::And { expressions } => Box::new( - expressions - .iter() - .flat_map(|e| e.query_local_comparison_targets()), - ), - Expression::Or { expressions } => Box::new( - expressions - .iter() - .flat_map(|e| e.query_local_comparison_targets()), - ), - Expression::Not { expression } => expression.query_local_comparison_targets(), - Expression::UnaryComparisonOperator { column, .. } => { - Box::new(Self::local_columns_from_comparison_target(column)) - } - Expression::BinaryComparisonOperator { column, value, .. } => { - let value_targets = match value { - ComparisonValue::Column { column } => { - Either::Left(Self::local_columns_from_comparison_target(column)) - } - _ => Either::Right(iter::empty()), - }; - Box::new(Self::local_columns_from_comparison_target(column).chain(value_targets)) - } - Expression::Exists { .. } => Box::new(iter::empty()), - } - } - - fn local_columns_from_comparison_target( - target: &ComparisonTarget, - ) -> impl Iterator> { - match target { - t @ ComparisonTarget::Column { path, .. } => { - if path.is_empty() { - Either::Left(iter::once(t)) - } else { - Either::Right(iter::empty()) - } - } - t @ ComparisonTarget::ColumnInScope { .. } => Either::Left(iter::once(t)), - } - } -} - -#[derive(Derivative)] -#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] -pub struct OrderBy { - /// The elements to order by, in priority order - pub elements: Vec>, -} - -#[derive(Derivative)] -#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] -pub struct OrderByElement { - pub order_direction: OrderDirection, - pub target: OrderByTarget, -} - -#[derive(Derivative)] -#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] -pub enum OrderByTarget { - Column { - /// The name of the column - name: ndc::FieldName, - - /// Path to a nested field within an object column - field_path: Option>, - - /// Any relationships to traverse to reach this column. These are translated from - /// [ndc::OrderByElement] values in the [ndc::QueryRequest] to names of relation - /// fields for the [QueryPlan]. - path: Vec, - }, - SingleColumnAggregate { - /// The column to apply the aggregation function to - column: ndc::FieldName, - /// Single column aggregate function name. - function: T::AggregateFunction, - - result_type: Type, - - /// Any relationships to traverse to reach this aggregate. These are translated from - /// [ndc::OrderByElement] values in the [ndc::QueryRequest] to names of relation - /// fields for the [QueryPlan]. - path: Vec, - }, - StarCountAggregate { - /// Any relationships to traverse to reach this aggregate. These are translated from - /// [ndc::OrderByElement] values in the [ndc::QueryRequest] to names of relation - /// fields for the [QueryPlan]. - path: Vec, - }, -} - -#[derive(Derivative)] -#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] -pub enum ComparisonTarget { - Column { - /// The name of the column - name: ndc::FieldName, - - /// Path to a nested field within an object column - field_path: Option>, - - field_type: Type, - - /// Any relationships to traverse to reach this column. These are translated from - /// [ndc::PathElement] values in the [ndc::QueryRequest] to names of relation - /// fields for the [QueryPlan]. - path: Vec, - }, - ColumnInScope { - /// The name of the column - name: ndc::FieldName, - - /// The named scope that identifies the collection to reference. This corresponds to the - /// `scope` field of the [Query] type. - scope: Scope, - - /// Path to a nested field within an object column - field_path: Option>, - - field_type: Type, - }, -} - -impl ComparisonTarget { - pub fn column_name(&self) -> &ndc::FieldName { - match self { - ComparisonTarget::Column { name, .. } => name, - ComparisonTarget::ColumnInScope { name, .. } => name, - } - } - - pub fn relationship_path(&self) -> &[ndc::RelationshipName] { - match self { - ComparisonTarget::Column { path, .. } => path, - ComparisonTarget::ColumnInScope { .. } => &[], - } - } -} - -impl ComparisonTarget { - pub fn get_field_type(&self) -> &Type { - match self { - ComparisonTarget::Column { field_type, .. } => field_type, - ComparisonTarget::ColumnInScope { field_type, .. } => field_type, - } - } -} - -#[derive(Derivative)] -#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] -pub enum ComparisonValue { - Column { - column: ComparisonTarget, - }, - Scalar { - value: serde_json::Value, - value_type: Type, - }, - Variable { - name: ndc::VariableName, - variable_type: Type, - }, -} - -#[derive(Derivative)] -#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] -pub struct AggregateFunctionDefinition { - /// The scalar or object type of the result of this function - pub result_type: Type, -} - -#[derive(Derivative)] -#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] -pub enum ComparisonOperatorDefinition { - Equal, - In, - Custom { - /// The type of the argument to this operator - argument_type: Type, - }, -} - -#[derive(Derivative)] -#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] -pub enum ExistsInCollection { - Related { - /// Key of the relation in the [Query] joins map. Relationships are scoped to the sub-query - /// that defines the relation source. - relationship: ndc::RelationshipName, - }, - Unrelated { - /// Key of the relation in the [QueryPlan] joins map. Unrelated collections are not scoped - /// to a sub-query, instead they are given in the root [QueryPlan]. - unrelated_collection: String, - }, -} diff --git a/crates/ndc-query-plan/src/query_plan/aggregation.rs b/crates/ndc-query-plan/src/query_plan/aggregation.rs new file mode 100644 index 00000000..b6778318 --- /dev/null +++ b/crates/ndc-query-plan/src/query_plan/aggregation.rs @@ -0,0 +1,213 @@ +use std::{borrow::Cow, collections::BTreeMap}; + +use derivative::Derivative; +use indexmap::IndexMap; +use ndc_models::{self as ndc, ArgumentName, FieldName}; + +use crate::Type; + +use super::{Argument, ConnectorTypes}; + +pub type Arguments = BTreeMap>; + +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] +pub enum Aggregate { + ColumnCount { + /// The column to apply the count aggregate function to + column: ndc::FieldName, + /// Arguments to satisfy the column specified by 'column' + arguments: BTreeMap>, + /// Path to a nested field within an object column + field_path: Option>, + /// Whether or not only distinct items should be counted + distinct: bool, + }, + SingleColumn { + /// The column to apply the aggregation function to + column: ndc::FieldName, + column_type: Type, + /// Arguments to satisfy the column specified by 'column' + arguments: BTreeMap>, + /// Path to a nested field within an object column + field_path: Option>, + /// Single column aggregate function name. + function: T::AggregateFunction, + result_type: Type, + }, + StarCount, +} + +impl Aggregate { + pub fn result_type(&self) -> Cow> { + match self { + Aggregate::ColumnCount { .. } => Cow::Owned(T::count_aggregate_type()), + Aggregate::SingleColumn { result_type, .. } => Cow::Borrowed(result_type), + Aggregate::StarCount => Cow::Owned(T::count_aggregate_type()), + } + } + + pub fn is_count(&self) -> bool { + match self { + Aggregate::ColumnCount { .. } => true, + Aggregate::SingleColumn { .. } => false, + Aggregate::StarCount => true, + } + } +} + +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] +pub struct Grouping { + /// Dimensions along which to partition the data + pub dimensions: Vec>, + /// Aggregates to compute in each group + pub aggregates: IndexMap>, + /// Optionally specify a predicate to apply after grouping rows. + /// Only used if the 'query.aggregates.group_by.filter' capability is supported. + pub predicate: Option>, + /// Optionally specify how groups should be ordered + /// Only used if the 'query.aggregates.group_by.order' capability is supported. + pub order_by: Option>, + /// Optionally limit to N groups + /// Only used if the 'query.aggregates.group_by.paginate' capability is supported. + pub limit: Option, + /// Optionally offset from the Nth group + /// Only used if the 'query.aggregates.group_by.paginate' capability is supported. + pub offset: Option, +} + +/// [GroupExpression] is like [Expression] but without [Expression::ArrayComparison] or +/// [Expression::Exists] variants. +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] +pub enum GroupExpression { + And { + expressions: Vec>, + }, + Or { + expressions: Vec>, + }, + Not { + expression: Box>, + }, + UnaryComparisonOperator { + target: GroupComparisonTarget, + operator: ndc::UnaryComparisonOperator, + }, + BinaryComparisonOperator { + target: GroupComparisonTarget, + operator: T::ComparisonOperator, + value: GroupComparisonValue, + }, +} + +impl GroupExpression { + /// In some cases we receive the predicate expression `Some(Expression::And [])` which does not + /// filter out anything, but fails equality checks with `None`. Simplifying that expression to + /// `None` allows us to unify relationship references that we wouldn't otherwise be able to. + pub fn simplify(self) -> Option { + match self { + GroupExpression::And { expressions } if expressions.is_empty() => None, + e => Some(e), + } + } +} + +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] +pub enum GroupComparisonTarget { + Aggregate { aggregate: Aggregate }, +} + +impl GroupComparisonTarget { + pub fn result_type(&self) -> Cow> { + match self { + GroupComparisonTarget::Aggregate { aggregate } => aggregate.result_type(), + } + } +} + +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] +pub enum GroupComparisonValue { + /// A scalar value to compare against + Scalar { + value: serde_json::Value, + value_type: Type, + }, + /// A value to compare against that is to be drawn from the query's variables. + /// Only used if the 'query.variables' capability is supported. + Variable { + name: ndc::VariableName, + variable_type: Type, + }, +} + +#[derive(Derivative)] +#[derivative( + Clone(bound = ""), + Debug(bound = ""), + Hash(bound = ""), + PartialEq(bound = ""), + Eq(bound = "") +)] +pub enum Dimension { + Column { + /// Any (object) relationships to traverse to reach this column. + /// Only non-empty if the 'relationships' capability is supported. + /// + /// These are translated from [ndc::PathElement] values in the to names of relation fields + /// for the [crate::QueryPlan]. + path: Vec, + /// The name of the column + column_name: FieldName, + /// Arguments to satisfy the column specified by 'column_name' + arguments: BTreeMap>, + /// Path to a nested field within an object column + field_path: Option>, + /// Type of the field that you get **after** follwing `field_path` to a possibly-nested + /// field. + /// + /// If this column references a field in a related collection then this type will be an + /// array type whose element type is the type of the related field. The array type wrapper + /// applies regardless of whether the relationship is an array or an object relationship. + field_type: Type, + }, +} + +impl Dimension { + pub fn value_type(&self) -> &Type { + match self { + Dimension::Column { field_type, .. } => field_type, + } + } +} + +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] +pub struct GroupOrderBy { + /// The elements to order by, in priority order + pub elements: Vec>, +} + +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] +pub struct GroupOrderByElement { + pub order_direction: ndc::OrderDirection, + pub target: GroupOrderByTarget, +} + +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] +pub enum GroupOrderByTarget { + Dimension { + /// The index of the dimension to order by, selected from the + /// dimensions provided in the `Grouping` request. + index: usize, + }, + Aggregate { + /// Aggregation method to apply + aggregate: Aggregate, + }, +} diff --git a/crates/ndc-query-plan/src/query_plan/connector_types.rs b/crates/ndc-query-plan/src/query_plan/connector_types.rs new file mode 100644 index 00000000..94b65b4e --- /dev/null +++ b/crates/ndc-query-plan/src/query_plan/connector_types.rs @@ -0,0 +1,15 @@ +use std::fmt::Debug; +use std::hash::Hash; + +use crate::Type; + +pub trait ConnectorTypes { + type ScalarType: Clone + Debug + Hash + PartialEq + Eq; + type AggregateFunction: Clone + Debug + Hash + PartialEq + Eq; + type ComparisonOperator: Clone + Debug + Hash + PartialEq + Eq; + + /// Result type for count aggregations + fn count_aggregate_type() -> Type; + + fn string_type() -> Type; +} diff --git a/crates/ndc-query-plan/src/query_plan/expression.rs b/crates/ndc-query-plan/src/query_plan/expression.rs new file mode 100644 index 00000000..5f854259 --- /dev/null +++ b/crates/ndc-query-plan/src/query_plan/expression.rs @@ -0,0 +1,299 @@ +use std::{borrow::Cow, collections::BTreeMap, iter}; + +use derivative::Derivative; +use itertools::Either; +use ndc_models::{self as ndc, ArgumentName, FieldName}; + +use crate::Type; + +use super::{Argument, ConnectorTypes}; + +#[derive(Derivative)] +#[derivative( + Clone(bound = ""), + Debug(bound = ""), + Hash(bound = ""), + PartialEq(bound = ""), + Eq(bound = "") +)] +pub enum Expression { + And { + expressions: Vec>, + }, + Or { + expressions: Vec>, + }, + Not { + expression: Box>, + }, + UnaryComparisonOperator { + column: ComparisonTarget, + operator: ndc::UnaryComparisonOperator, + }, + BinaryComparisonOperator { + column: ComparisonTarget, + operator: T::ComparisonOperator, + value: ComparisonValue, + }, + /// A comparison against a nested array column. + /// Only used if the 'query.nested_fields.filter_by.nested_arrays' capability is supported. + ArrayComparison { + column: ComparisonTarget, + comparison: ArrayComparison, + }, + Exists { + in_collection: ExistsInCollection, + predicate: Option>>, + }, +} + +impl Expression { + /// In some cases we receive the predicate expression `Some(Expression::And [])` which does not + /// filter out anything, but fails equality checks with `None`. Simplifying that expression to + /// `None` allows us to unify relationship references that we wouldn't otherwise be able to. + pub fn simplify(self) -> Option { + match self { + Expression::And { expressions } if expressions.is_empty() => None, + e => Some(e), + } + } + + /// Get an iterator of columns referenced by the expression, not including columns of related + /// collections. This is used to build a plan for joining the referenced collection - we need + /// to include fields in the join that the expression needs to access. + // + // TODO: ENG-1457 When we implement query.aggregates.filter_by we'll need to collect aggregates + // references. That's why this function returns [ComparisonTarget] instead of [Field]. + pub fn query_local_comparison_targets<'a>( + &'a self, + ) -> Box>> + 'a> { + match self { + Expression::And { expressions } => Box::new( + expressions + .iter() + .flat_map(|e| e.query_local_comparison_targets()), + ), + Expression::Or { expressions } => Box::new( + expressions + .iter() + .flat_map(|e| e.query_local_comparison_targets()), + ), + Expression::Not { expression } => expression.query_local_comparison_targets(), + Expression::UnaryComparisonOperator { column, .. } => { + Box::new(std::iter::once(Cow::Borrowed(column))) + } + Expression::BinaryComparisonOperator { column, value, .. } => Box::new( + std::iter::once(Cow::Borrowed(column)) + .chain(Self::local_targets_from_comparison_value(value).map(Cow::Owned)), + ), + Expression::ArrayComparison { column, comparison } => { + let value_targets = match comparison { + ArrayComparison::Contains { value } => Either::Left( + Self::local_targets_from_comparison_value(value).map(Cow::Owned), + ), + ArrayComparison::IsEmpty => Either::Right(std::iter::empty()), + }; + Box::new(std::iter::once(Cow::Borrowed(column)).chain(value_targets)) + } + Expression::Exists { .. } => Box::new(iter::empty()), + } + } + + fn local_targets_from_comparison_value( + value: &ComparisonValue, + ) -> impl Iterator> { + match value { + ComparisonValue::Column { + path, + name, + arguments, + field_path, + field_type, + .. + } => { + if path.is_empty() { + Either::Left(iter::once(ComparisonTarget::Column { + name: name.clone(), + arguments: arguments.clone(), + field_path: field_path.clone(), + field_type: field_type.clone(), + })) + } else { + Either::Right(iter::empty()) + } + } + _ => Either::Right(std::iter::empty()), + } + } +} + +#[derive(Derivative)] +#[derivative( + Clone(bound = ""), + Debug(bound = ""), + Hash(bound = ""), + PartialEq(bound = ""), + Eq(bound = "") +)] +pub enum ArrayComparison { + /// Check if the array contains the specified value. + /// Only used if the 'query.nested_fields.filter_by.nested_arrays.contains' capability is supported. + Contains { value: ComparisonValue }, + /// Check is the array is empty. + /// Only used if the 'query.nested_fields.filter_by.nested_arrays.is_empty' capability is supported. + IsEmpty, +} + +#[derive(Derivative)] +#[derivative( + Clone(bound = ""), + Debug(bound = ""), + Hash(bound = ""), + PartialEq(bound = ""), + Eq(bound = "") +)] +pub enum ComparisonTarget { + /// The comparison targets a column. + Column { + /// The name of the column + name: ndc::FieldName, + + /// Arguments to satisfy the column specified by 'name' + arguments: BTreeMap>, + + /// Path to a nested field within an object column + field_path: Option>, + + /// Type of the field that you get *after* follwing `field_path` to a possibly-nested + /// field. + field_type: Type, + }, + // TODO: ENG-1457 Add this variant to support query.aggregates.filter_by + // /// The comparison targets the result of aggregation. + // /// Only used if the 'query.aggregates.filter_by' capability is supported. + // Aggregate { + // /// Non-empty collection of relationships to traverse + // path: Vec, + // /// The aggregation method to use + // aggregate: Aggregate, + // }, +} + +impl ComparisonTarget { + pub fn column(name: impl Into, field_type: Type) -> Self { + Self::Column { + name: name.into(), + arguments: Default::default(), + field_path: Default::default(), + field_type, + } + } + + pub fn target_type(&self) -> &Type { + match self { + ComparisonTarget::Column { field_type, .. } => field_type, + // TODO: ENG-1457 + // ComparisonTarget::Aggregate { aggregate, .. } => aggregate.result_type, + } + } +} + +#[derive(Derivative)] +#[derivative( + Clone(bound = ""), + Debug(bound = ""), + Hash(bound = ""), + PartialEq(bound = ""), + Eq(bound = "") +)] +pub enum ComparisonValue { + Column { + /// Any relationships to traverse to reach this column. + /// Only non-empty if the 'relationships.relation_comparisons' is supported. + path: Vec, + /// The name of the column + name: ndc::FieldName, + /// Arguments to satisfy the column specified by 'name' + arguments: BTreeMap>, + /// Path to a nested field within an object column. + /// Only non-empty if the 'query.nested_fields.filter_by' capability is supported. + field_path: Option>, + /// Type of the field that you get *after* follwing `field_path` to a possibly-nested + /// field. + field_type: Type, + /// The scope in which this column exists, identified + /// by an top-down index into the stack of scopes. + /// The stack grows inside each `Expression::Exists`, + /// so scope 0 (the default) refers to the current collection, + /// and each subsequent index refers to the collection outside + /// its predecessor's immediately enclosing `Expression::Exists` + /// expression. + /// Only used if the 'query.exists.named_scopes' capability is supported. + scope: Option, + }, + Scalar { + value: serde_json::Value, + value_type: Type, + }, + Variable { + name: ndc::VariableName, + variable_type: Type, + }, +} + +impl ComparisonValue { + pub fn column(name: impl Into, field_type: Type) -> Self { + Self::Column { + path: Default::default(), + name: name.into(), + arguments: Default::default(), + field_path: Default::default(), + field_type, + scope: Default::default(), + } + } +} + +#[derive(Derivative)] +#[derivative( + Clone(bound = ""), + Debug(bound = ""), + Hash(bound = ""), + PartialEq(bound = ""), + Eq(bound = "") +)] +pub enum ExistsInCollection { + /// The rows to evaluate the exists predicate against come from a related collection. + /// Only used if the 'relationships' capability is supported. + Related { + /// Key of the relation in the [Query] joins map. Relationships are scoped to the sub-query + /// that defines the relation source. + relationship: ndc::RelationshipName, + }, + /// The rows to evaluate the exists predicate against come from an unrelated collection + /// Only used if the 'query.exists.unrelated' capability is supported. + Unrelated { + /// Key of the relation in the [QueryPlan] joins map. Unrelated collections are not scoped + /// to a sub-query, instead they are given in the root [QueryPlan]. + unrelated_collection: String, + }, + /// The rows to evaluate the exists predicate against come from a nested array field. + /// Only used if the 'query.exists.nested_collections' capability is supported. + NestedCollection { + column_name: ndc::FieldName, + arguments: BTreeMap>, + /// Path to a nested collection via object columns + field_path: Vec, + }, + /// Specifies a column that contains a nested array of scalars. The + /// array will be brought into scope of the nested expression where + /// each element becomes an object with one '__value' column that + /// contains the element value. + /// Only used if the 'query.exists.nested_scalar_collections' capability is supported. + NestedScalarCollection { + column_name: FieldName, + arguments: BTreeMap>, + /// Path to a nested collection via object columns + field_path: Vec, + }, +} diff --git a/crates/ndc-query-plan/src/query_plan/fields.rs b/crates/ndc-query-plan/src/query_plan/fields.rs new file mode 100644 index 00000000..c2f88957 --- /dev/null +++ b/crates/ndc-query-plan/src/query_plan/fields.rs @@ -0,0 +1,54 @@ +use derivative::Derivative; +use indexmap::IndexMap; +use ndc_models as ndc; + +use crate::Type; + +use super::{Aggregate, ConnectorTypes, Grouping}; + +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] +pub enum Field { + Column { + column: ndc::FieldName, + + /// When the type of the column is a (possibly-nullable) array or object, + /// the caller can request a subset of the complete column data, + /// by specifying fields to fetch here. + /// If omitted, the column data will be fetched in full. + fields: Option>, + + column_type: Type, + }, + Relationship { + /// The name of the relationship to follow for the subquery - this is the key in the + /// [Query] relationships map in this module, it is **not** the key in the + /// [ndc::QueryRequest] collection_relationships map. + relationship: ndc::RelationshipName, + aggregates: Option>>, + fields: Option>>, + groups: Option>, + }, +} + +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] +pub struct NestedObject { + pub fields: IndexMap>, +} + +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] +pub struct NestedArray { + pub fields: Box>, +} + +// TODO: ENG-1464 define NestedCollection struct + +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] +pub enum NestedField { + Object(NestedObject), + Array(NestedArray), + // TODO: ENG-1464 add `Collection(NestedCollection)` variant +} diff --git a/crates/ndc-query-plan/src/query_plan/mod.rs b/crates/ndc-query-plan/src/query_plan/mod.rs new file mode 100644 index 00000000..1ba7757c --- /dev/null +++ b/crates/ndc-query-plan/src/query_plan/mod.rs @@ -0,0 +1,14 @@ +mod aggregation; +pub use aggregation::*; +mod connector_types; +pub use connector_types::*; +mod expression; +pub use expression::*; +mod fields; +pub use fields::*; +mod ordering; +pub use ordering::*; +mod requests; +pub use requests::*; +mod schema; +pub use schema::*; diff --git a/crates/ndc-query-plan/src/query_plan/ordering.rs b/crates/ndc-query-plan/src/query_plan/ordering.rs new file mode 100644 index 00000000..2e2cb0b7 --- /dev/null +++ b/crates/ndc-query-plan/src/query_plan/ordering.rs @@ -0,0 +1,46 @@ +use std::collections::BTreeMap; + +use derivative::Derivative; +use ndc_models::{self as ndc, ArgumentName, OrderDirection}; + +use super::{Aggregate, Argument, ConnectorTypes}; + +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] +pub struct OrderBy { + /// The elements to order by, in priority order + pub elements: Vec>, +} + +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] +pub struct OrderByElement { + pub order_direction: OrderDirection, + pub target: OrderByTarget, +} + +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] +pub enum OrderByTarget { + Column { + /// Any relationships to traverse to reach this column. These are translated from + /// [ndc::OrderByElement] values in the [ndc::QueryRequest] to names of relation + /// fields for the [crate::QueryPlan]. + path: Vec, + + /// The name of the column + name: ndc::FieldName, + + /// Arguments to satisfy the column specified by 'name' + arguments: BTreeMap>, + + /// Path to a nested field within an object column + field_path: Option>, + }, + Aggregate { + /// Non-empty collection of relationships to traverse + path: Vec, + /// The aggregation method to use + aggregate: Aggregate, + }, +} diff --git a/crates/ndc-query-plan/src/query_plan/requests.rs b/crates/ndc-query-plan/src/query_plan/requests.rs new file mode 100644 index 00000000..a5dc7ed6 --- /dev/null +++ b/crates/ndc-query-plan/src/query_plan/requests.rs @@ -0,0 +1,171 @@ +use std::collections::BTreeMap; + +use derivative::Derivative; +use indexmap::IndexMap; +use ndc_models::{self as ndc, RelationshipType}; +use nonempty::NonEmpty; + +use crate::{vec_set::VecSet, Type}; + +use super::{Aggregate, ConnectorTypes, Expression, Field, Grouping, OrderBy}; + +#[derive(Derivative)] +#[derivative( + Clone(bound = ""), + Debug(bound = ""), + PartialEq(bound = "T::ScalarType: PartialEq") +)] +pub struct QueryPlan { + pub collection: ndc::CollectionName, + pub query: Query, + pub arguments: BTreeMap>, + pub variables: Option>, + + /// Types for values from the `variables` map as inferred by usages in the query request. It is + /// possible for the same variable to be used in multiple contexts with different types. This + /// map provides sets of all observed types. + /// + /// The observed type may be `None` if the type of a variable use could not be inferred. + pub variable_types: VariableTypes, + + // TODO: type for unrelated collection + pub unrelated_collections: BTreeMap>, +} + +impl QueryPlan { + pub fn has_variables(&self) -> bool { + self.variables.is_some() + } +} + +pub type Relationships = BTreeMap>; +pub type VariableSet = BTreeMap; +pub type VariableTypes = BTreeMap>>; + +#[derive(Derivative)] +#[derivative( + Clone(bound = ""), + Debug(bound = ""), + Default(bound = ""), + PartialEq(bound = "") +)] +pub struct Query { + pub aggregates: Option>>, + pub fields: Option>>, + pub limit: Option, + pub offset: Option, + pub order_by: Option>, + pub predicate: Option>, + pub groups: Option>, + + /// Relationships referenced by fields and expressions in this query or sub-query. Does not + /// include relationships in sub-queries nested under this one. + pub relationships: Relationships, + + /// Some relationship references may introduce a named "scope" so that other parts of the query + /// request can reference fields of documents in the related collection. The connector must + /// introduce a variable, or something similar, for such references. + pub scope: Option, +} + +impl Query { + pub fn has_aggregates(&self) -> bool { + if let Some(aggregates) = &self.aggregates { + !aggregates.is_empty() + } else { + false + } + } + + pub fn has_fields(&self) -> bool { + if let Some(fields) = &self.fields { + !fields.is_empty() + } else { + false + } + } + + pub fn has_groups(&self) -> bool { + self.groups.is_some() + } +} + +#[derive(Derivative)] +#[derivative( + Clone(bound = ""), + Debug(bound = ""), + Hash(bound = ""), + PartialEq(bound = ""), + Eq(bound = "") +)] +pub enum Argument { + /// The argument is provided by reference to a variable + Variable { + name: ndc::VariableName, + argument_type: Type, + }, + /// The argument is provided as a literal value + Literal { + value: serde_json::Value, + argument_type: Type, + }, + /// The argument was a literal value that has been parsed as an [Expression] + Predicate { expression: Expression }, +} + +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] +pub struct Relationship { + /// A mapping between columns on the source row to columns on the target collection. + /// The column on the target collection is specified via a field path (ie. an array of field + /// names that descend through nested object fields). The field path will only contain a single item, + /// meaning a column on the target collection's type, unless the 'relationships.nested' + /// capability is supported, in which case multiple items denotes a nested object field. + pub column_mapping: BTreeMap>, + pub relationship_type: RelationshipType, + /// The name of a collection + pub target_collection: ndc::CollectionName, + /// Values to be provided to any collection arguments + pub arguments: BTreeMap>, + pub query: Query, +} + +#[derive(Derivative)] +#[derivative( + Clone(bound = ""), + Debug(bound = ""), + PartialEq(bound = "T::ScalarType: PartialEq") +)] +pub enum RelationshipArgument { + /// The argument is provided by reference to a variable + Variable { + name: ndc::VariableName, + argument_type: Type, + }, + /// The argument is provided as a literal value + Literal { + value: serde_json::Value, + argument_type: Type, + }, + // The argument is provided based on a column of the source collection + Column { + name: ndc::FieldName, + argument_type: Type, + }, + /// The argument was a literal value that has been parsed as an [Expression] + Predicate { expression: Expression }, +} + +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] +pub struct UnrelatedJoin { + pub target_collection: ndc::CollectionName, + pub arguments: BTreeMap>, + pub query: Query, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum Scope { + Root, + Named(String), +} diff --git a/crates/ndc-query-plan/src/query_plan/schema.rs b/crates/ndc-query-plan/src/query_plan/schema.rs new file mode 100644 index 00000000..36ee6dc2 --- /dev/null +++ b/crates/ndc-query-plan/src/query_plan/schema.rs @@ -0,0 +1,80 @@ +use derivative::Derivative; +use ndc_models as ndc; + +use crate::Type; + +use super::ConnectorTypes; + +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] +pub enum ComparisonOperatorDefinition { + Equal, + In, + LessThan, + LessThanOrEqual, + GreaterThan, + GreaterThanOrEqual, + Contains, + ContainsInsensitive, + StartsWith, + StartsWithInsensitive, + EndsWith, + EndsWithInsensitive, + Custom { + /// The type of the argument to this operator + argument_type: Type, + }, +} + +impl ComparisonOperatorDefinition { + pub fn argument_type(self, left_operand_type: &Type) -> Type { + use ComparisonOperatorDefinition as C; + match self { + C::In => Type::ArrayOf(Box::new(left_operand_type.clone())), + C::Equal + | C::LessThan + | C::LessThanOrEqual + | C::GreaterThan + | C::GreaterThanOrEqual => left_operand_type.clone(), + C::Contains + | C::ContainsInsensitive + | C::StartsWith + | C::StartsWithInsensitive + | C::EndsWith + | C::EndsWithInsensitive => T::string_type(), + C::Custom { argument_type } => argument_type, + } + } + + pub fn from_ndc_definition( + ndc_definition: &ndc::ComparisonOperatorDefinition, + map_type: impl FnOnce(&ndc::Type) -> Result, E>, + ) -> Result { + use ndc::ComparisonOperatorDefinition as NDC; + let definition = match ndc_definition { + NDC::Equal => Self::Equal, + NDC::In => Self::In, + NDC::LessThan => Self::LessThan, + NDC::LessThanOrEqual => Self::LessThanOrEqual, + NDC::GreaterThan => Self::GreaterThan, + NDC::GreaterThanOrEqual => Self::GreaterThanOrEqual, + NDC::Contains => Self::Contains, + NDC::ContainsInsensitive => Self::ContainsInsensitive, + NDC::StartsWith => Self::StartsWith, + NDC::StartsWithInsensitive => Self::StartsWithInsensitive, + NDC::EndsWith => Self::EndsWith, + NDC::EndsWithInsensitive => Self::EndsWithInsensitive, + NDC::Custom { argument_type } => Self::Custom { + argument_type: map_type(argument_type)?, + }, + }; + Ok(definition) + } +} + +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Debug(bound = ""), PartialEq(bound = ""))] +pub struct AggregateFunctionDefinition { + /// The scalar or object type of the result of this function + pub result_type: Type, +} diff --git a/crates/ndc-query-plan/src/type_system.rs b/crates/ndc-query-plan/src/type_system.rs index 5d67904e..dce58f1d 100644 --- a/crates/ndc-query-plan/src/type_system.rs +++ b/crates/ndc-query-plan/src/type_system.rs @@ -1,13 +1,15 @@ use ref_cast::RefCast; -use std::collections::BTreeMap; +use std::{collections::BTreeMap, fmt::Display}; use itertools::Itertools as _; -use ndc_models as ndc; +use ndc_models::{self as ndc, ArgumentName, ObjectTypeName}; use crate::{self as plan, QueryPlanError}; +type Result = std::result::Result; + /// The type of values that a column, field, or argument may take. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, Hash, PartialEq, Eq)] pub enum Type { Scalar(ScalarType), /// The name of an object type declared in `objectTypes` @@ -15,28 +17,207 @@ pub enum Type { ArrayOf(Box>), /// A nullable form of any of the other types Nullable(Box>), + /// Used internally + Tuple(Vec>), } impl Type { + pub fn array_of(t: Self) -> Self { + Self::ArrayOf(Box::new(t)) + } + + pub fn named_object( + name: impl Into, + fields: impl IntoIterator, impl Into>)>, + ) -> Self { + Self::Object(ObjectType::new(fields).named(name)) + } + + pub fn nullable(t: Self) -> Self { + t.into_nullable() + } + + pub fn object( + fields: impl IntoIterator, impl Into>)>, + ) -> Self { + Self::Object(ObjectType::new(fields)) + } + + pub fn scalar(scalar_type: impl Into) -> Self { + Self::Scalar(scalar_type.into()) + } + pub fn into_nullable(self) -> Self { match self { t @ Type::Nullable(_) => t, t => Type::Nullable(Box::new(t)), } } + + pub fn is_array(&self) -> bool { + match self { + Type::ArrayOf(_) => true, + Type::Nullable(t) => t.is_array(), + _ => false, + } + } + + pub fn into_array_element_type(self) -> Result + where + S: Clone + std::fmt::Debug, + { + match self { + Type::ArrayOf(t) => Ok(*t), + Type::Nullable(t) => t.into_array_element_type(), + t => Err(QueryPlanError::TypeMismatch(format!( + "expected an array, but got type {t:?}" + ))), + } + } + + pub fn into_object_type(self) -> Result> + where + S: std::fmt::Debug, + { + match self { + Type::Object(object_type) => Ok(object_type), + Type::Nullable(t) => t.into_object_type(), + t => Err(QueryPlanError::TypeMismatch(format!( + "expected object type, but got {t:?}" + ))), + } + } } -#[derive(Debug, Clone, PartialEq, Eq)] +impl Display for Type { + /// Display types using GraphQL-style syntax + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fn helper(t: &Type, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result + where + S: Display, + { + match t { + Type::Scalar(s) => write!(f, "{}", s), + Type::Object(ot) => write!(f, "{ot}"), + Type::ArrayOf(t) => write!(f, "[{t}]"), + Type::Nullable(t) => write!(f, "{t}"), + Type::Tuple(ts) => { + write!(f, "(")?; + for (index, t) in ts.iter().enumerate() { + write!(f, "{t}")?; + if index < ts.len() - 1 { + write!(f, ", ")?; + } + } + write!(f, ")") + } + } + } + match self { + Type::Nullable(t) => helper(t, f), + t => { + helper(t, f)?; + write!(f, "!") + } + } + } +} + +#[derive(Debug, Clone, Hash, PartialEq, Eq)] pub struct ObjectType { /// A type name may be tracked for error reporting. The name does not affect how query plans /// are generated. pub name: Option, - pub fields: BTreeMap>, + pub fields: BTreeMap>, } impl ObjectType { + pub fn new( + fields: impl IntoIterator, impl Into>)>, + ) -> Self { + ObjectType { + name: None, + fields: fields + .into_iter() + .map(|(name, field)| (name.into(), field.into())) + .collect(), + } + } + + pub fn named(mut self, name: impl Into) -> Self { + self.name = Some(name.into()); + self + } + pub fn named_fields(&self) -> impl Iterator)> { - self.fields.iter() + self.fields + .iter() + .map(|(name, field)| (name, &field.r#type)) + } + + pub fn get(&self, field_name: &ndc::FieldName) -> Result<&ObjectField> { + self.fields + .get(field_name) + .ok_or_else(|| QueryPlanError::UnknownObjectTypeField { + object_type: None, + field_name: field_name.clone(), + path: Default::default(), + }) + } +} + +impl Display for ObjectType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{{ ")?; + for (index, (name, field)) in self.fields.iter().enumerate() { + write!(f, "{name}: {}", field.r#type)?; + if index < self.fields.len() - 1 { + write!(f, ", ")?; + } + } + write!(f, " }}")?; + Ok(()) + } +} + +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub struct ObjectField { + pub r#type: Type, + /// The arguments available to the field - Matches implementation from CollectionInfo + pub parameters: BTreeMap>, +} + +impl ObjectField { + pub fn new(r#type: Type) -> Self { + Self { + r#type, + parameters: Default::default(), + } + } + + pub fn into_nullable(self) -> Self { + let new_field_type = match self.r#type { + t @ Type::Nullable(_) => t, + t => Type::Nullable(Box::new(t)), + }; + Self { + r#type: new_field_type, + parameters: self.parameters, + } + } + + pub fn with_parameters(mut self, parameters: BTreeMap>) -> Self { + self.parameters = parameters; + self + } +} + +impl From> for ObjectField { + fn from(value: Type) -> Self { + ObjectField { + r#type: value, + parameters: Default::default(), + } } } @@ -48,7 +229,7 @@ pub fn inline_object_types( object_types: &BTreeMap, t: &ndc::Type, lookup_scalar_type: fn(&ndc::ScalarTypeName) -> Option, -) -> Result, QueryPlanError> { +) -> Result> { let plan_type = match t { ndc::Type::Named { name } => lookup_type(object_types, name, lookup_scalar_type)?, @@ -69,7 +250,7 @@ fn lookup_type( object_types: &BTreeMap, name: &ndc::TypeName, lookup_scalar_type: fn(&ndc::ScalarTypeName) -> Option, -) -> Result, QueryPlanError> { +) -> Result> { if let Some(scalar_type) = lookup_scalar_type(ndc::ScalarTypeName::ref_cast(name)) { return Ok(Type::Scalar(scalar_type)); } @@ -85,7 +266,7 @@ fn lookup_object_type_helper( object_types: &BTreeMap, name: &ndc::ObjectTypeName, lookup_scalar_type: fn(&ndc::ScalarTypeName) -> Option, -) -> Result, QueryPlanError> { +) -> Result> { let object_type = object_types .get(name) .ok_or_else(|| QueryPlanError::UnknownObjectType(name.to_string()))?; @@ -96,12 +277,18 @@ fn lookup_object_type_helper( .fields .iter() .map(|(name, field)| { + let field_type = + inline_object_types(object_types, &field.r#type, lookup_scalar_type)?; Ok(( name.to_owned(), - inline_object_types(object_types, &field.r#type, lookup_scalar_type)?, - )) as Result<_, QueryPlanError> + plan::ObjectField { + r#type: field_type, + parameters: Default::default(), // TODO: connect ndc arguments to plan + // parameters + }, + )) }) - .try_collect()?, + .try_collect::<_, _, QueryPlanError>()?, }; Ok(plan_object_type) } @@ -110,6 +297,6 @@ pub fn lookup_object_type( object_types: &BTreeMap, name: &ndc::ObjectTypeName, lookup_scalar_type: fn(&ndc::ScalarTypeName) -> Option, -) -> Result, QueryPlanError> { +) -> Result> { lookup_object_type_helper(object_types, name, lookup_scalar_type) } diff --git a/crates/ndc-test-helpers/src/aggregates.rs b/crates/ndc-test-helpers/src/aggregates.rs index 212222c1..16c1eb75 100644 --- a/crates/ndc-test-helpers/src/aggregates.rs +++ b/crates/ndc-test-helpers/src/aggregates.rs @@ -1,15 +1,48 @@ -#[macro_export()] -macro_rules! column_aggregate { - ($name:literal => $column:literal, $function:literal) => { - ( - $name, - $crate::ndc_models::Aggregate::SingleColumn { - column: $column.into(), - function: $function.into(), - field_path: None, - }, - ) - }; +use std::collections::BTreeMap; + +use ndc_models::{Aggregate, AggregateFunctionName, Argument, ArgumentName, FieldName}; + +use crate::column::Column; + +pub struct AggregateColumnBuilder { + column: FieldName, + arguments: BTreeMap, + field_path: Option>, + function: AggregateFunctionName, +} + +pub fn column_aggregate( + column: impl Into, + function: impl Into, +) -> AggregateColumnBuilder { + let column = column.into(); + AggregateColumnBuilder { + column: column.column, + function: function.into(), + arguments: column.arguments, + field_path: column.field_path, + } +} + +impl AggregateColumnBuilder { + pub fn field_path( + mut self, + field_path: impl IntoIterator>, + ) -> Self { + self.field_path = Some(field_path.into_iter().map(Into::into).collect()); + self + } +} + +impl From for Aggregate { + fn from(builder: AggregateColumnBuilder) -> Self { + Aggregate::SingleColumn { + column: builder.column, + arguments: builder.arguments, + function: builder.function, + field_path: builder.field_path, + } + } } #[macro_export()] @@ -26,6 +59,7 @@ macro_rules! column_count_aggregate { $name, $crate::ndc_models::Aggregate::ColumnCount { column: $column.into(), + arguments: Default::default(), distinct: $distinct.to_owned(), field_path: None, }, diff --git a/crates/ndc-test-helpers/src/collection_info.rs b/crates/ndc-test-helpers/src/collection_info.rs index 3e042711..0862f85a 100644 --- a/crates/ndc-test-helpers/src/collection_info.rs +++ b/crates/ndc-test-helpers/src/collection_info.rs @@ -9,7 +9,7 @@ pub fn collection(name: impl Display + Clone) -> (ndc_models::CollectionName, Co arguments: Default::default(), collection_type: name.to_string().into(), uniqueness_constraints: make_primary_key_uniqueness_constraint(name.clone()), - foreign_keys: Default::default(), + relational_mutations: None, }; (name.to_string().into(), coll) } diff --git a/crates/ndc-test-helpers/src/column.rs b/crates/ndc-test-helpers/src/column.rs new file mode 100644 index 00000000..ce492ab6 --- /dev/null +++ b/crates/ndc-test-helpers/src/column.rs @@ -0,0 +1,63 @@ +use std::collections::BTreeMap; + +use itertools::Itertools as _; +use ndc_models::{Argument, ArgumentName, FieldName, PathElement, RelationshipName}; + +use crate::path_element; + +/// An intermediate struct that can be used to populate ComparisonTarget::Column, +/// Dimension::Column, etc. +pub struct Column { + pub path: Vec, + pub column: FieldName, + pub arguments: BTreeMap, + pub field_path: Option>, +} + +impl Column { + pub fn path(mut self, elements: impl IntoIterator>) -> Self { + self.path = elements.into_iter().map(Into::into).collect(); + self + } + + pub fn from_relationship(mut self, name: impl Into) -> Self { + self.path = vec![path_element(name).into()]; + self + } +} + +pub fn column(name: impl Into) -> Column { + Column { + path: Default::default(), + column: name.into(), + arguments: Default::default(), + field_path: Default::default(), + } +} + +impl From<&str> for Column { + fn from(input: &str) -> Self { + let mut parts = input.split("."); + let column = parts + .next() + .expect("a column reference must not be an empty string") + .into(); + let field_path = parts.map(Into::into).collect_vec(); + Column { + path: Default::default(), + column, + arguments: Default::default(), + field_path: if field_path.is_empty() { + None + } else { + Some(field_path) + }, + } + } +} + +impl From for Column { + fn from(name: FieldName) -> Self { + column(name) + } +} diff --git a/crates/ndc-test-helpers/src/comparison_target.rs b/crates/ndc-test-helpers/src/comparison_target.rs index 41463113..2bad170c 100644 --- a/crates/ndc-test-helpers/src/comparison_target.rs +++ b/crates/ndc-test-helpers/src/comparison_target.rs @@ -3,42 +3,18 @@ macro_rules! target { ($column:literal) => { $crate::ndc_models::ComparisonTarget::Column { name: $column.into(), + arguments: Default::default(), field_path: None, - path: vec![], } }; ($column:literal, field_path:$field_path:expr $(,)?) => { $crate::ndc_models::ComparisonTarget::Column { name: $column.into(), + arguments: Default::default(), field_path: $field_path.into_iter().map(|x| x.into()).collect(), - path: vec![], - } - }; - ($column:literal, relations:$path:expr $(,)?) => { - $crate::ndc_models::ComparisonTarget::Column { - name: $column.into(), - field_path: None, - path: $path.into_iter().map(|x| x.into()).collect(), - } - }; - ($column:literal, field_path:$field_path:expr, relations:$path:expr $(,)?) => { - $crate::ndc_models::ComparisonTarget::Column { - name: $column.into(), - // field_path: $field_path.into_iter().map(|x| x.into()).collect(), - path: $path.into_iter().map(|x| x.into()).collect(), } }; ($target:expr) => { $target }; } - -pub fn root(name: S) -> ndc_models::ComparisonTarget -where - S: ToString, -{ - ndc_models::ComparisonTarget::RootCollectionColumn { - name: name.to_string().into(), - field_path: None, - } -} diff --git a/crates/ndc-test-helpers/src/comparison_value.rs b/crates/ndc-test-helpers/src/comparison_value.rs index 350378e1..cfbeca92 100644 --- a/crates/ndc-test-helpers/src/comparison_value.rs +++ b/crates/ndc-test-helpers/src/comparison_value.rs @@ -1,11 +1,6 @@ -#[macro_export] -macro_rules! column_value { - ($($column:tt)+) => { - $crate::ndc_models::ComparisonValue::Column { - column: $crate::target!($($column)+), - } - }; -} +use std::collections::BTreeMap; + +use ndc_models::{Argument, ArgumentName, ComparisonValue, FieldName, PathElement}; #[macro_export] macro_rules! value { @@ -27,3 +22,65 @@ macro_rules! variable { $crate::ndc_models::ComparisonValue::Variable { name: $expr } }; } + +#[derive(Debug)] +pub struct ColumnValueBuilder { + path: Vec, + name: FieldName, + arguments: BTreeMap, + field_path: Option>, + scope: Option, +} + +pub fn column_value(name: impl Into) -> ColumnValueBuilder { + ColumnValueBuilder { + path: Default::default(), + name: name.into(), + arguments: Default::default(), + field_path: Default::default(), + scope: Default::default(), + } +} + +impl ColumnValueBuilder { + pub fn path(mut self, path: impl IntoIterator>) -> Self { + self.path = path.into_iter().map(Into::into).collect(); + self + } + + pub fn arguments( + mut self, + arguments: impl IntoIterator, impl Into)>, + ) -> Self { + self.arguments = arguments + .into_iter() + .map(|(name, arg)| (name.into(), arg.into())) + .collect(); + self + } + + pub fn field_path( + mut self, + field_path: impl IntoIterator>, + ) -> Self { + self.field_path = Some(field_path.into_iter().map(Into::into).collect()); + self + } + + pub fn scope(mut self, scope: usize) -> Self { + self.scope = Some(scope); + self + } +} + +impl From for ComparisonValue { + fn from(builder: ColumnValueBuilder) -> Self { + ComparisonValue::Column { + path: builder.path, + name: builder.name, + arguments: builder.arguments, + field_path: builder.field_path, + scope: builder.scope, + } + } +} diff --git a/crates/ndc-test-helpers/src/exists_in_collection.rs b/crates/ndc-test-helpers/src/exists_in_collection.rs index e13826c6..e7a581c0 100644 --- a/crates/ndc-test-helpers/src/exists_in_collection.rs +++ b/crates/ndc-test-helpers/src/exists_in_collection.rs @@ -1,13 +1,19 @@ +use std::collections::BTreeMap; + +use ndc_models::{Argument, ArgumentName, ExistsInCollection, FieldName}; + #[macro_export] macro_rules! related { ($rel:literal) => { $crate::ndc_models::ExistsInCollection::Related { + field_path: Default::default(), relationship: $rel.into(), arguments: Default::default(), } }; ($rel:literal, $args:expr $(,)?) => { $crate::ndc_models::ExistsInCollection::Related { + field_path: Default::default(), relationship: $rel.into(), arguments: $args.into_iter().map(|x| x.into()).collect(), } @@ -29,3 +35,49 @@ macro_rules! unrelated { } }; } + +#[derive(Debug)] +pub struct ExistsInNestedCollectionBuilder { + column_name: FieldName, + arguments: BTreeMap, + field_path: Vec, +} + +pub fn exists_in_nested(column_name: impl Into) -> ExistsInNestedCollectionBuilder { + ExistsInNestedCollectionBuilder { + column_name: column_name.into(), + arguments: Default::default(), + field_path: Default::default(), + } +} + +impl ExistsInNestedCollectionBuilder { + pub fn arguments( + mut self, + arguments: impl IntoIterator, impl Into)>, + ) -> Self { + self.arguments = arguments + .into_iter() + .map(|(k, v)| (k.into(), v.into())) + .collect(); + self + } + + pub fn field_path( + mut self, + field_path: impl IntoIterator>, + ) -> Self { + self.field_path = field_path.into_iter().map(Into::into).collect(); + self + } +} + +impl From for ExistsInCollection { + fn from(builder: ExistsInNestedCollectionBuilder) -> Self { + ExistsInCollection::NestedCollection { + column_name: builder.column_name, + arguments: builder.arguments, + field_path: builder.field_path, + } + } +} diff --git a/crates/ndc-test-helpers/src/expressions.rs b/crates/ndc-test-helpers/src/expressions.rs index 6b35ae2a..16aa63fc 100644 --- a/crates/ndc-test-helpers/src/expressions.rs +++ b/crates/ndc-test-helpers/src/expressions.rs @@ -1,5 +1,6 @@ use ndc_models::{ - ComparisonTarget, ComparisonValue, ExistsInCollection, Expression, UnaryComparisonOperator, + ArrayComparison, ComparisonTarget, ComparisonValue, ExistsInCollection, Expression, + RelationshipName, UnaryComparisonOperator, }; pub fn and(operands: I) -> Expression @@ -57,9 +58,39 @@ where } } -pub fn exists(in_collection: ExistsInCollection, predicate: Expression) -> Expression { +pub fn exists( + in_collection: impl Into, + predicate: impl Into, +) -> Expression { Expression::Exists { - in_collection, - predicate: Some(Box::new(predicate)), + in_collection: in_collection.into(), + predicate: Some(Box::new(predicate.into())), + } +} + +pub fn in_related(relationship: impl Into) -> ExistsInCollection { + ExistsInCollection::Related { + field_path: Default::default(), + relationship: relationship.into(), + arguments: Default::default(), + } +} + +pub fn array_contains( + column: impl Into, + value: impl Into, +) -> Expression { + Expression::ArrayComparison { + column: column.into(), + comparison: ArrayComparison::Contains { + value: value.into(), + }, + } +} + +pub fn is_empty(column: impl Into) -> Expression { + Expression::ArrayComparison { + column: column.into(), + comparison: ArrayComparison::IsEmpty, } } diff --git a/crates/ndc-test-helpers/src/groups.rs b/crates/ndc-test-helpers/src/groups.rs new file mode 100644 index 00000000..d0eeff32 --- /dev/null +++ b/crates/ndc-test-helpers/src/groups.rs @@ -0,0 +1,145 @@ +use std::collections::BTreeMap; + +use indexmap::IndexMap; +use ndc_models::{ + Aggregate, Argument, ArgumentName, Dimension, FieldName, GroupExpression, GroupOrderBy, + GroupOrderByElement, Grouping, OrderBy, OrderDirection, PathElement, +}; + +use crate::column::Column; + +#[derive(Clone, Debug, Default)] +pub struct GroupingBuilder { + dimensions: Vec, + aggregates: IndexMap, + predicate: Option, + order_by: Option, + limit: Option, + offset: Option, +} + +pub fn grouping() -> GroupingBuilder { + Default::default() +} + +impl GroupingBuilder { + pub fn dimensions( + mut self, + dimensions: impl IntoIterator>, + ) -> Self { + self.dimensions = dimensions.into_iter().map(Into::into).collect(); + self + } + + pub fn aggregates( + mut self, + aggregates: impl IntoIterator, impl Into)>, + ) -> Self { + self.aggregates = aggregates + .into_iter() + .map(|(name, aggregate)| (name.into(), aggregate.into())) + .collect(); + self + } + + pub fn predicate(mut self, predicate: impl Into) -> Self { + self.predicate = Some(predicate.into()); + self + } + + pub fn order_by(mut self, order_by: impl Into) -> Self { + self.order_by = Some(order_by.into()); + self + } + + pub fn limit(mut self, limit: u32) -> Self { + self.limit = Some(limit); + self + } + + pub fn offset(mut self, offset: u32) -> Self { + self.offset = Some(offset); + self + } +} + +impl From for Grouping { + fn from(value: GroupingBuilder) -> Self { + Grouping { + dimensions: value.dimensions, + aggregates: value.aggregates, + predicate: value.predicate, + order_by: value.order_by, + limit: value.limit, + offset: value.offset, + } + } +} + +#[derive(Clone, Debug)] +pub struct DimensionColumnBuilder { + path: Vec, + column_name: FieldName, + arguments: BTreeMap, + field_path: Option>, +} + +pub fn dimension_column(column: impl Into) -> DimensionColumnBuilder { + let column = column.into(); + DimensionColumnBuilder { + path: column.path, + column_name: column.column, + arguments: column.arguments, + field_path: column.field_path, + } +} + +impl DimensionColumnBuilder { + pub fn path(mut self, path: impl IntoIterator>) -> Self { + self.path = path.into_iter().map(Into::into).collect(); + self + } + + pub fn arguments( + mut self, + arguments: impl IntoIterator, impl Into)>, + ) -> Self { + self.arguments = arguments + .into_iter() + .map(|(name, argument)| (name.into(), argument.into())) + .collect(); + self + } + + pub fn field_path( + mut self, + field_path: impl IntoIterator>, + ) -> Self { + self.field_path = Some(field_path.into_iter().map(Into::into).collect()); + self + } +} + +impl From for Dimension { + fn from(value: DimensionColumnBuilder) -> Self { + Dimension::Column { + path: value.path, + column_name: value.column_name, + arguments: value.arguments, + field_path: value.field_path, + extraction: None, + } + } +} + +/// Produces a consistent ordering for up to 10 dimensions +pub fn ordered_dimensions() -> GroupOrderBy { + GroupOrderBy { + elements: (0..10) + .map(|index| GroupOrderByElement { + order_direction: OrderDirection::Asc, + target: ndc_models::GroupOrderByTarget::Dimension { index }, + }) + .collect(), + } +} diff --git a/crates/ndc-test-helpers/src/lib.rs b/crates/ndc-test-helpers/src/lib.rs index 706cefd6..8843b3c5 100644 --- a/crates/ndc-test-helpers/src/lib.rs +++ b/crates/ndc-test-helpers/src/lib.rs @@ -2,12 +2,16 @@ #![allow(unused_imports)] mod aggregates; +pub use aggregates::*; mod collection_info; +mod column; +pub use column::*; mod comparison_target; mod comparison_value; mod exists_in_collection; mod expressions; mod field; +mod groups; mod object_type; mod order_by; mod path_element; @@ -19,7 +23,7 @@ use std::collections::BTreeMap; use indexmap::IndexMap; use ndc_models::{ - Aggregate, Argument, Expression, Field, OrderBy, OrderByElement, PathElement, Query, + Aggregate, Argument, Expression, Field, FieldName, OrderBy, OrderByElement, PathElement, Query, QueryRequest, Relationship, RelationshipArgument, RelationshipType, }; @@ -33,6 +37,7 @@ pub use comparison_value::*; pub use exists_in_collection::*; pub use expressions::*; pub use field::*; +pub use groups::*; pub use object_type::*; pub use order_by::*; pub use path_element::*; @@ -130,6 +135,7 @@ impl From for QueryRequest { arguments: value.arguments.unwrap_or_default(), collection_relationships: value.collection_relationships.unwrap_or_default(), variables: value.variables, + request_arguments: None, } } } @@ -142,6 +148,7 @@ pub struct QueryBuilder { offset: Option, order_by: Option, predicate: Option, + groups: Option, } pub fn query() -> QueryBuilder { @@ -157,6 +164,7 @@ impl QueryBuilder { offset: None, order_by: None, predicate: None, + groups: None, } } @@ -170,11 +178,14 @@ impl QueryBuilder { self } - pub fn aggregates(mut self, aggregates: [(&str, Aggregate); S]) -> Self { + pub fn aggregates( + mut self, + aggregates: impl IntoIterator, impl Into)>, + ) -> Self { self.aggregates = Some( aggregates .into_iter() - .map(|(name, aggregate)| (name.to_owned().into(), aggregate)) + .map(|(name, aggregate)| (name.into(), aggregate.into())) .collect(), ); self @@ -199,6 +210,11 @@ impl QueryBuilder { self.predicate = Some(expression); self } + + pub fn groups(mut self, groups: impl Into) -> Self { + self.groups = Some(groups.into()); + self + } } impl From for Query { @@ -210,6 +226,7 @@ impl From for Query { offset: value.offset, order_by: value.order_by, predicate: value.predicate, + groups: value.groups, } } } diff --git a/crates/ndc-test-helpers/src/object_type.rs b/crates/ndc-test-helpers/src/object_type.rs index 01feb919..f4978ce5 100644 --- a/crates/ndc-test-helpers/src/object_type.rs +++ b/crates/ndc-test-helpers/src/object_type.rs @@ -20,5 +20,6 @@ pub fn object_type( ) }) .collect(), + foreign_keys: Default::default(), } } diff --git a/crates/ndc-test-helpers/src/order_by.rs b/crates/ndc-test-helpers/src/order_by.rs index 9ea8c778..22e9bce3 100644 --- a/crates/ndc-test-helpers/src/order_by.rs +++ b/crates/ndc-test-helpers/src/order_by.rs @@ -5,6 +5,7 @@ macro_rules! asc { order_direction: $crate::ndc_models::OrderDirection::Asc, target: $crate::ndc_models::OrderByTarget::Column { name: $crate::ndc_models::FieldName::new($crate::smol_str::SmolStr::new($name)), + arguments: Default::default(), field_path: None, path: vec![], }, @@ -19,6 +20,7 @@ macro_rules! desc { order_direction: $crate::ndc_models::OrderDirection::Desc, target: $crate::ndc_models::OrderByTarget::Column { name: $crate::ndc_models::FieldName::new($crate::smol_str::SmolStr::new($name)), + arguments: Default::default(), field_path: None, path: vec![], }, diff --git a/crates/ndc-test-helpers/src/path_element.rs b/crates/ndc-test-helpers/src/path_element.rs index b0c89d5b..25cc4d5d 100644 --- a/crates/ndc-test-helpers/src/path_element.rs +++ b/crates/ndc-test-helpers/src/path_element.rs @@ -1,16 +1,17 @@ use std::collections::BTreeMap; -use ndc_models::{Expression, PathElement, RelationshipArgument}; +use ndc_models::{Expression, FieldName, PathElement, RelationshipArgument}; #[derive(Clone, Debug)] pub struct PathElementBuilder { relationship: ndc_models::RelationshipName, arguments: Option>, + field_path: Option>, predicate: Option>, } -pub fn path_element(relationship: ndc_models::RelationshipName) -> PathElementBuilder { - PathElementBuilder::new(relationship) +pub fn path_element(relationship: impl Into) -> PathElementBuilder { + PathElementBuilder::new(relationship.into()) } impl PathElementBuilder { @@ -18,6 +19,7 @@ impl PathElementBuilder { PathElementBuilder { relationship, arguments: None, + field_path: None, predicate: None, } } @@ -26,6 +28,14 @@ impl PathElementBuilder { self.predicate = Some(Box::new(expression)); self } + + pub fn field_path( + mut self, + field_path: impl IntoIterator>, + ) -> Self { + self.field_path = Some(field_path.into_iter().map(Into::into).collect()); + self + } } impl From for PathElement { @@ -33,6 +43,7 @@ impl From for PathElement { PathElement { relationship: value.relationship, arguments: value.arguments.unwrap_or_default(), + field_path: value.field_path, predicate: value.predicate, } } diff --git a/crates/ndc-test-helpers/src/query_response.rs b/crates/ndc-test-helpers/src/query_response.rs index 72970bb2..b956a771 100644 --- a/crates/ndc-test-helpers/src/query_response.rs +++ b/crates/ndc-test-helpers/src/query_response.rs @@ -1,5 +1,5 @@ use indexmap::IndexMap; -use ndc_models::{QueryResponse, RowFieldValue, RowSet}; +use ndc_models::{FieldName, Group, QueryResponse, RowFieldValue, RowSet}; #[derive(Clone, Debug, Default)] pub struct QueryResponseBuilder { @@ -30,6 +30,7 @@ impl QueryResponseBuilder { self.row_sets.push(RowSet { aggregates: None, rows: Some(vec![]), + groups: Default::default(), }); self } @@ -45,6 +46,7 @@ impl From for QueryResponse { pub struct RowSetBuilder { aggregates: IndexMap, rows: Vec>, + groups: Option>, } impl RowSetBuilder { @@ -54,13 +56,10 @@ impl RowSetBuilder { pub fn aggregates( mut self, - aggregates: impl IntoIterator)>, + aggregates: impl IntoIterator, impl Into)>, ) -> Self { - self.aggregates.extend( - aggregates - .into_iter() - .map(|(k, v)| (k.to_string().into(), v.into())), - ); + self.aggregates + .extend(aggregates.into_iter().map(|(k, v)| (k.into(), v.into()))); self } @@ -89,10 +88,24 @@ impl RowSetBuilder { ); self } + + pub fn groups( + mut self, + groups: impl IntoIterator>, + ) -> Self { + self.groups = Some(groups.into_iter().map(Into::into).collect()); + self + } } impl From for RowSet { - fn from(RowSetBuilder { aggregates, rows }: RowSetBuilder) -> Self { + fn from( + RowSetBuilder { + aggregates, + rows, + groups, + }: RowSetBuilder, + ) -> Self { RowSet { aggregates: if aggregates.is_empty() { None @@ -100,6 +113,7 @@ impl From for RowSet { Some(aggregates) }, rows: if rows.is_empty() { None } else { Some(rows) }, + groups, } } } @@ -117,3 +131,16 @@ pub fn query_response() -> QueryResponseBuilder { pub fn row_set() -> RowSetBuilder { Default::default() } + +pub fn group( + dimensions: impl IntoIterator>, + aggregates: impl IntoIterator, impl Into)>, +) -> Group { + Group { + dimensions: dimensions.into_iter().map(Into::into).collect(), + aggregates: aggregates + .into_iter() + .map(|(name, value)| (name.into(), value.into())) + .collect(), + } +} diff --git a/crates/ndc-test-helpers/src/relationships.rs b/crates/ndc-test-helpers/src/relationships.rs index 6166e809..053bb7c7 100644 --- a/crates/ndc-test-helpers/src/relationships.rs +++ b/crates/ndc-test-helpers/src/relationships.rs @@ -4,7 +4,7 @@ use ndc_models::{Relationship, RelationshipArgument, RelationshipType}; #[derive(Clone, Debug)] pub struct RelationshipBuilder { - column_mapping: BTreeMap, + column_mapping: BTreeMap>, relationship_type: RelationshipType, target_collection: ndc_models::CollectionName, arguments: BTreeMap, @@ -12,17 +12,22 @@ pub struct RelationshipBuilder { pub fn relationship( target: &str, - column_mapping: [(&str, &str); S], + column_mapping: [(&str, &[&str]); S], ) -> RelationshipBuilder { RelationshipBuilder::new(target, column_mapping) } impl RelationshipBuilder { - pub fn new(target: &str, column_mapping: [(&str, &str); S]) -> Self { + pub fn new(target: &str, column_mapping: [(&str, &[&str]); S]) -> Self { RelationshipBuilder { column_mapping: column_mapping .into_iter() - .map(|(source, target)| (source.to_owned().into(), target.to_owned().into())) + .map(|(source, target)| { + ( + source.to_owned().into(), + target.iter().map(|s| s.to_owned().into()).collect(), + ) + }) .collect(), relationship_type: RelationshipType::Array, target_collection: target.to_owned().into(), diff --git a/crates/test-helpers/src/arb_bson.rs b/crates/test-helpers/src/arb_bson.rs index 295e91c6..066d4027 100644 --- a/crates/test-helpers/src/arb_bson.rs +++ b/crates/test-helpers/src/arb_bson.rs @@ -1,7 +1,7 @@ use std::time::SystemTime; -use mongodb::bson::{self, oid::ObjectId, Bson}; -use proptest::{collection, prelude::*, sample::SizeRange}; +use mongodb::bson::{self, oid::ObjectId, spec::BinarySubtype, Binary, Bson}; +use proptest::{array, collection, prelude::*, sample::SizeRange}; pub fn arb_bson() -> impl Strategy { arb_bson_with_options(Default::default()) @@ -56,6 +56,7 @@ pub fn arb_bson_with_options(options: ArbBsonOptions) -> impl Strategy(), any::()) .prop_map(|(time, increment)| Bson::Timestamp(bson::Timestamp { time, increment })), arb_binary().prop_map(Bson::Binary), + arb_uuid().prop_map(Bson::Binary), (".*", "i?l?m?s?u?x?").prop_map(|(pattern, options)| Bson::RegularExpression( bson::Regex { pattern, options } )), @@ -120,8 +121,21 @@ fn arb_bson_document_recursive( fn arb_binary() -> impl Strategy { let binary_subtype = any::().prop_map(Into::into); - let bytes = collection::vec(any::(), 1..256); - (binary_subtype, bytes).prop_map(|(subtype, bytes)| bson::Binary { subtype, bytes }) + binary_subtype.prop_flat_map(|subtype| { + let bytes = match subtype { + BinarySubtype::Uuid => array::uniform16(any::()).prop_map_into().boxed(), + _ => collection::vec(any::(), 1..256).boxed(), + }; + bytes.prop_map(move |bytes| Binary { subtype, bytes }) + }) +} + +fn arb_uuid() -> impl Strategy { + let bytes = array::uniform16(any::()); + bytes.prop_map(|bytes| { + let uuid = bson::Uuid::from_bytes(bytes); + bson::Binary::from_uuid(uuid) + }) } pub fn arb_datetime() -> impl Strategy { diff --git a/crates/test-helpers/src/arb_plan_type.rs b/crates/test-helpers/src/arb_plan_type.rs index 0ffe5ac1..4dfdff84 100644 --- a/crates/test-helpers/src/arb_plan_type.rs +++ b/crates/test-helpers/src/arb_plan_type.rs @@ -1,5 +1,5 @@ use configuration::MongoScalarType; -use ndc_query_plan::{ObjectType, Type}; +use ndc_query_plan::{ObjectField, ObjectType, Type}; use proptest::{collection::btree_map, prelude::*}; use crate::arb_type::arb_bson_scalar_type; @@ -14,9 +14,18 @@ pub fn arb_plan_type() -> impl Strategy> { any::>(), btree_map(any::().prop_map_into(), inner, 1..=10) ) - .prop_map(|(name, fields)| Type::Object(ObjectType { + .prop_map(|(name, field_types)| Type::Object(ObjectType { name: name.map(|n| n.into()), - fields + fields: field_types + .into_iter() + .map(|(name, t)| ( + name, + ObjectField { + r#type: t, + parameters: Default::default() + } + )) + .collect(), })) ] }) diff --git a/crates/test-helpers/src/arb_type.rs b/crates/test-helpers/src/arb_type.rs index 00c2f6e8..4b7a5b90 100644 --- a/crates/test-helpers/src/arb_type.rs +++ b/crates/test-helpers/src/arb_type.rs @@ -1,7 +1,7 @@ use configuration::schema::Type; use enum_iterator::Sequence as _; use mongodb_support::BsonScalarType; -use proptest::prelude::*; +use proptest::{prelude::*, string::string_regex}; pub fn arb_bson_scalar_type() -> impl Strategy { (0..BsonScalarType::CARDINALITY) @@ -11,7 +11,10 @@ pub fn arb_bson_scalar_type() -> impl Strategy { pub fn arb_type() -> impl Strategy { let leaf = prop_oneof![ arb_bson_scalar_type().prop_map(Type::Scalar), - any::().prop_map(Type::Object) + arb_object_type_name().prop_map(Type::Object), + arb_object_type_name().prop_map(|name| Type::Predicate { + object_type_name: name.into() + }) ]; leaf.prop_recursive(3, 10, 10, |inner| { prop_oneof![ @@ -20,3 +23,12 @@ pub fn arb_type() -> impl Strategy { ] }) } + +fn arb_object_type_name() -> impl Strategy { + string_regex(r#"[a-zA-Z_][a-zA-Z0-9_]*"#) + .unwrap() + .prop_filter( + "object type names must not collide with scalar type names", + |name| !enum_iterator::all::().any(|t| t.bson_name() == name), + ) +} diff --git a/crates/test-helpers/src/configuration.rs b/crates/test-helpers/src/configuration.rs new file mode 100644 index 00000000..42ce4c76 --- /dev/null +++ b/crates/test-helpers/src/configuration.rs @@ -0,0 +1,71 @@ +use configuration::Configuration; +use ndc_test_helpers::{array_of, collection, named_type, object_type}; + +/// Configuration for a MongoDB database that resembles MongoDB's sample_mflix test data set. +pub fn mflix_config() -> Configuration { + Configuration { + collections: [collection("comments"), collection("movies")].into(), + object_types: [ + ( + "comments".into(), + object_type([ + ("_id", named_type("ObjectId")), + ("movie_id", named_type("ObjectId")), + ("name", named_type("String")), + ]), + ), + ( + "credits".into(), + object_type([("director", named_type("String"))]), + ), + ( + "movies".into(), + object_type([ + ("_id", named_type("ObjectId")), + ("credits", named_type("credits")), + ("genres", array_of(named_type("String"))), + ("imdb", named_type("Imdb")), + ("lastUpdated", named_type("String")), + ("num_mflix_comments", named_type("Int")), + ("rated", named_type("String")), + ("released", named_type("Date")), + ("runtime", named_type("Int")), + ("title", named_type("String")), + ("writers", array_of(named_type("String"))), + ("year", named_type("Int")), + ("tomatoes", named_type("Tomatoes")), + ]), + ), + ( + "Imdb".into(), + object_type([ + ("rating", named_type("Double")), + ("votes", named_type("Int")), + ("id", named_type("Int")), + ]), + ), + ( + "Tomatoes".into(), + object_type([ + ("critic", named_type("TomatoesCriticViewer")), + ("viewer", named_type("TomatoesCriticViewer")), + ("lastUpdated", named_type("Date")), + ]), + ), + ( + "TomatoesCriticViewer".into(), + object_type([ + ("rating", named_type("Double")), + ("numReviews", named_type("Int")), + ("meter", named_type("Int")), + ]), + ), + ] + .into(), + functions: Default::default(), + procedures: Default::default(), + native_mutations: Default::default(), + native_queries: Default::default(), + options: Default::default(), + } +} diff --git a/crates/test-helpers/src/lib.rs b/crates/test-helpers/src/lib.rs index e9ac03ea..d77f5c81 100644 --- a/crates/test-helpers/src/lib.rs +++ b/crates/test-helpers/src/lib.rs @@ -1,6 +1,7 @@ pub mod arb_bson; mod arb_plan_type; pub mod arb_type; +pub mod configuration; use enum_iterator::Sequence as _; use mongodb_support::ExtendedJsonMode; diff --git a/docs/building.md b/docs/building.md new file mode 100644 index 00000000..ea820668 --- /dev/null +++ b/docs/building.md @@ -0,0 +1,58 @@ +# Building the MongoDB Data Connector + +## Prerequisites + +- [Nix][Determinate Systems Nix Installer] +- [Docker](https://docs.docker.com/engine/install/) +- [skopeo](https://github.com/containers/skopeo) (optional) + +The easiest way to set up build and development dependencies for this project is +to use Nix. If you don't already have Nix we recommend the [Determinate Systems +Nix Installer][] which automatically applies settings required by this project. + +[Determinate Systems Nix Installer]: https://github.com/DeterminateSystems/nix-installer/blob/main/README.md + +For more on project setup, and resources provided by the development shell see +[development](./development.md). + +## Building + +To build the MongoDB connector run, + +```sh +$ nix build --print-build-logs && cp result/bin/mongodb-connector +``` + +To cross-compile statically-linked binaries for x86_64 or ARM for Linux run, + +```sh +$ nix build .#mongo-connector-x86_64-linux --print-build-logs && cp result/bin/mongodb-connector +$ nix build .#mongo-connector-aarch64-linux --print-build-logs && cp result/bin/mongodb-connector +``` + +The Nix configuration outputs Docker images in `.tar.gz` files. You can use +`docker load -i` to install these to the local machine's docker daemon. But it +may be more helpful to use `skopeo` for this purpose so that you can apply +a chosen tag, or override the image name. + +To build and install a Docker image locally (you can change +`mongodb-connector:1.2.3` to whatever image name and tag you prefer), + +```sh +$ nix build .#docker --print-build-logs \ + && skopeo --insecure-policy copy docker-archive:result docker-daemon:mongo-connector:1.2.3 +``` + +To build a Docker image with a cross-compiled ARM binary, + +```sh +$ nix build .#docker-aarch64-linux --print-build-logs \ + && skopeo --insecure-policy copy docker-archive:result docker-daemon:mongo-connector:1.2.3 +``` + +If you don't want to install `skopeo` you can run it through Nix, `nix run +nixpkgs#skopeo -- --insecure-policy copy docker-archive:result docker-daemon:mongo-connector:1.2.3` + +## Pre-build Docker Images + +See [docker-images](./docker-images.md) diff --git a/docs/code-of-conduct.md b/docs/code-of-conduct.md new file mode 100644 index 00000000..03c982fd --- /dev/null +++ b/docs/code-of-conduct.md @@ -0,0 +1,60 @@ +# Hasura GraphQL Engine Community Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to make +participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, +disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, +socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming, inclusive and gender-neutral language (example: instead of "Hey guys", you could use "Hey folks" or + "Hey all") +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take +appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, +issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any +contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the +project or its community. Examples of representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed representative at an online or offline +event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at +community@hasura.io. All complaints will be reviewed and investigated and will result in a response that is deemed +necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to +the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent +repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at +https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org \ No newline at end of file diff --git a/docs/contributing.md b/docs/contributing.md new file mode 100644 index 00000000..bd5036b8 --- /dev/null +++ b/docs/contributing.md @@ -0,0 +1,33 @@ +# Contributing + +_First_: if you feel insecure about how to start contributing, feel free to ask us on our +[Discord channel](https://discordapp.com/invite/hasura) in the #contrib channel. You can also just go ahead with your contribution and we'll give you feedback. Don't worry - the worst that can happen is that you'll be politely asked to change something. We appreciate any contributions, and we don't want a wall of rules to stand in the way of that. + +However, for those individuals who want a bit more guidance on the best way to contribute to the project, read on. This document will cover what we're looking for. By addressing the points below, the chances that we can quickly merge or address your contributions will increase. + +## 1. Code of conduct + +Please follow our [Code of conduct](./code-of-conduct.md) in the context of any contributions made to Hasura. + +## 2. CLA + +For all contributions, a CLA (Contributor License Agreement) needs to be signed +[here](https://cla-assistant.io/hasura/ndc-mongodb) before (or after) the pull request has been submitted. A bot will prompt contributors to sign the CLA via a pull request comment, if necessary. + +## 3. Ways of contributing + +### Reporting an Issue + +- Make sure you test against the latest released cloud version. It is possible that we may have already fixed the bug you're experiencing. +- Provide steps to reproduce the issue, including Database (e.g. MongoDB) version and Hasura DDN version. +- Please include logs, if relevant. +- Create a [issue](https://github.com/hasura/ndc-mongodb/issues/new/choose). + +### Working on an issue + +- We use the [fork-and-branch git workflow](https://blog.scottlowe.org/2015/01/27/using-fork-branch-git-workflow/). +- Please make sure there is an issue associated with the work that you're doing. +- If you're working on an issue, please comment that you are doing so to prevent duplicate work by others also. +- See [`development.md`](./development.md) for instructions on how to build, run, and test the connector. +- If possible format code with `rustfmt`. If your editor has a code formatting feature it probably does the right thing. +- If you're up to it we welcome updates to `CHANGELOG.md`. Notes on the change in your PR should go in the "Unreleased" section. diff --git a/docs/development.md b/docs/development.md new file mode 100644 index 00000000..037bc6cb --- /dev/null +++ b/docs/development.md @@ -0,0 +1,353 @@ +# MongoDB Data Connector Development + +These are instructions for building and running the MongoDB Data Connector - and +supporting services - locally for purposes of working on the connector itself. + +This repo is set up to run all necessary services for interactive and +integration testing in docker containers with pre-populated MongoDB databases +with just one command, `just up`, if you have the prerequisites installed. +Repeating that command restarts services as necessary to apply code or +configuration changes. + +## Prerequisites + +- [Nix][Determinate Systems Nix Installer] +- [Docker](https://docs.docker.com/engine/install/) +- [Just](https://just.systems/man/en/) (optional) + +The easiest way to set up build and development dependencies for this project is +to use Nix. If you don't already have Nix we recommend the [Determinate Systems +Nix Installer][] which automatically applies settings required by this project. + +[Determinate Systems Nix Installer]: https://github.com/DeterminateSystems/nix-installer/blob/main/README.md + +You may optionally install `just`. If you are using a Nix develop shell it +provides `just` automatically. (See "The development shell" below). + +If you prefer to manage dependencies yourself you will need, + +* Rust via Rustup +* MongoDB `>= 6` +* OpenSSL development files + +## Quickstart + +To run everything you need run this command to start services in Docker +containers: + +```sh +$ just up +``` + +Next access the GraphQL interface at http://localhost:7100/ + +Run the above command again to restart any services that are affected by code +changes or configuration changes. + +## The development shell + +This project uses a development shell configured in `flake.nix` that automatically +loads specific version of Rust along with all other project dependencies. The +development shell provides: + +- a Rust toolchain: `cargo`, `cargo-clippy`, `rustc`, `rustfmt`, etc. +- `cargo-insta` for reviewing test snapshots +- `just` +- `mongosh` +- `arion` which is a Nix frontend for docker-compose +- The DDN CLI +- The MongoDB connector plugin for the DDN CLI which is automatically rebuilt after code changes in this repo (can be run directly with `mongodb-cli-plugin`) + +Development shell features are specified in the `devShells` definition in +`flake.nix`. You can add dependencies by [looking up the Nix package +name](https://search.nixos.org/), and adding the package name to the +`nativeBuildInputs` list. + +The simplest way to start a development shell is with this command: + +```sh +$ nix develop +``` + +If you are going to be doing a lot of work on this project it can be more +convenient to set up [direnv][] which automatically links project dependencies +in your shell when you cd to the project directory, and automatically reverses +all shell modifications when you navigate to another directory. You can also set +up direnv integration in your editor to get your editor LSP to use the same +version of Rust that the project uses. + +[direnv]: https://direnv.net/ + +## Running and Testing + +There is a `justfile` for getting started quickly. You can use its recipes to +run relevant services locally including the MongoDB connector itself, a MongoDB +database server, and the Hasura GraphQL Engine. Use these commands: + +```sh +just up # start services; run this again to restart after making code changes +just down # stop services +just down-volumes # stop services, and remove MongoDB database volume +just logs # see service logs +just test # run unit and integration tests +just # list available recipes +``` + +Integration tests run in an independent set of ephemeral docker containers. + +The `just` command is provided automatically if you are using the development +shell. Or you can install it yourself. + +The typical workflow for interactive testing (testing by hand) is to interact +with the system through the Hasura GraphQL Engine's GraphQL UI at +http://localhost:7100/. If you can get insight into what the connector is doing +by reading the logs which you can access by running `just logs`, or via the +Jaeger UI at http://localhost:16686/. + +### Running with a different MongoDB version + +Override the MongoDB version by assigning a Docker image name to the environment +variable `MONGODB_IMAGE`. For example, + + $ just down-volumes # delete potentially-incompatible MongoDB data + $ MONGODB_IMAGE=mongo:6 arion up -d + +Or run integration tests against a specific MongoDB version, + + $ MONGODB_IMAGE=mongo:6 just test-integration + +There is a predefined just recipe that runs integration tests using MongoDB +versions 5, 6, and 7. There is some functionality that does not work in MongoDB +v5 so some tests are skipped when running that MongoDB version. + +### Where to find the tests + +Unit tests are found in conditionally-compiled test modules in the same Rust +source code files with the code that the tests test. + +Integration tests are found in `crates/integration-tests/src/tests/` + +### Writing Integration Tests + +Integration tests are run with `just test-integration`. Typically integration +tests run a GraphQL query, and compare the response to a saved snapshot. Here is +an example: + +```rust +#[tokio::test] +async fn filters_by_date() -> anyhow::Result<()> { + assert_yaml_snapshot!( + graphql_query( + r#" + query ($dateInput: Date) { + movies( + order_by: {id: Asc}, + where: {released: {_gt: $dateInput}} + ) { + title + released + } + } + "# + ) + .variables(json!({ "dateInput": "2016-03-01T00:00Z" })) + .run() + .await? + ); + Ok(()) +} +``` + +On the first test run after a test is created or changed the test runner will +create a new snapshot file with the GraphQL response. To make the test pass it +is necessary to approve the snapshot (if the response is correct). To do that +run, + +```sh +$ cargo insta review +``` + +Approved snapshot files must be checked into version control. + +Please be aware that MongoDB query results do not have consistent ordering. It +is important to have `order_by` clauses in every test that produces more than +one result to explicitly order everything. Otherwise tests will fail when the +order of a response does not match the exact order of data in an approved +snapshot. + +## Building + +For instructions on building binaries or Docker images see [building.md](./building.md). + +## Working with Test Data + +### Predefined MongoDB databases + +This repo includes fixture data and configuration to provide a fully-configured +data graph for testing. + +There are three provided MongoDB databases. Development services run three +connector instances to provide access to each of those. Listing these by Docker +Compose service names: + +- `connector` serves the [sample_mflix][] database +- `connector-chinook` serves a version of the [chinook][] sample database that has been adapted for MongoDB +- `connector-test-cases` serves the test_cases database - if you want to set up data for integration tests put it in this database + +[sample_mflix]: https://www.mongodb.com/docs/atlas/sample-data/sample-mflix/ +[chinook]: https://github.com/lerocha/chinook-database + +Those databases are populated by scripts in `fixtures/mongodb/`. There is +a subdirectory with fixture data for each database. + +Integration tests use an ephemeral MongoDB container so a fresh database will be +populated with those fixtures on every test run. + +Interactive services (the ones you get with `just up`) use a persistent volume +for MongoDB databases. To get updated data after changing fixtures, or any time +you want to get a fresh database, you will have to delete the volume and +recreate the MongoDB container. To do that run, + +```sh +$ just down-volumes +$ just up +``` + +### Connector Configuration + +If you followed the Quickstart in [README.md](../README.md) then you got +connector configuration in your data graph project in +`app/connector//`. This repo provides predefined connector +configurations so you don't have to create your own during development. + +As mentioned in the previous section development test services run three MongoDB +connector instances. There is a separate configuration directory for each +instance. Those are in, + +- `fixtures/hasura/sample_mflix/connector/` +- `fixtures/hasura/chinook/connector/` +- `fixtures/hasura/test_cases/connector/` + +Connector instances are automatically restarted with updated configuration when +you run `just up`. + +If you make changes to MongoDB databases you may want to run connector +introspection to automatically update configurations. See the specific +instructions in the [fixtures readme](../fixtures/hasura/README.md). + +### DDN Metadata + +The Hasura GraphQL Engine must be configured with DDN metadata which is +configured in `.hml` files. Once again this repo provides configuration in +`fixtures/hasura/`. + +If you have made changes to MongoDB fixture data or to connector configurations +you may want to update metadata using the DDN CLI by querying connectors. +Connectors must be restarted with updated configurations before you do this. For +specific instructions see the [fixtures readme](../fixtures/hasura/README.md). + +The Engine will automatically restart with updated configuration after any +changes to `.hml` files when you run `just up`. + +## Docker Compose Configuration + +The [`justfile`](../justfile) recipes delegate to arion which is a frontend for +docker-compose that adds a layer of convenience where it can easily load +connector code changes. If you are using the development shell you can run +`arion` commands directly. They mostly work just like `docker-compose` commands: + +To start all services run: + + $ arion up -d + +To recompile and restart the connector after code changes run: + + $ arion up -d connector + +The arion configuration runs these services: + +- connector: the MongoDB data connector agent defined in this repo serving the sample_mflix database (port 7130) +- two more instances of the connector - one connected to the chinook sample database, the other to a database of ad-hoc data that is queried by integration tests (ports 7131 & 7132) +- mongodb (port 27017) +- Hasura GraphQL Engine (HGE) (port 7100) +- a stubbed authentication server +- jaeger to collect logs (see UI at http://localhost:16686/) + +Connect to the HGE GraphiQL UI at http://localhost:7100/ + +Instead of a `docker-compose.yaml` configuration is found in +`arion-compose.nix`. That file imports from modular configurations in the +`arion-compose/` directory. Here is a quick breakdown of those files: + +``` +arion-compose.nix -- entrypoint for interactive services configuration +arion-pkgs.nix -- defines the `pkgs` variable that is passed as an argument to other arion files +arion-compose +├── default.nix -- arion-compose.nix delegates to the function exported from this file +├── integration-tests.nix -- entrypoint for integration test configuration +├── integration-test-services.nix -- high-level service configurations used by interactive services, and by integration tests +├── fixtures +│ └── mongodb.nix -- provides a dictionary of MongoDB fixture data directories +└── services -- each file here exports a function that configures a specific service + ├── connector.nix -- configures the MongoDB connector with overridable settings + ├── dev-auth-webhook.nix -- stubbed authentication server + ├── engine.nix -- Hasura GraphQL Engine + ├── integration-tests.nix -- integration test runner + ├── jaeger.nix -- OpenTelemetry trace collector + └── mongodb.nix -- MongoDB database server +``` + +## Project Maintenance Notes + +### Updating GraphQL Engine for integration tests + +It's important to keep the GraphQL Engine version updated to make sure that the +connector is working with the latest engine version. To update run, + +```sh +$ nix flake update graphql-engine-source +``` + +Then commit the changes to `flake.lock` to version control. + +A specific engine version can be specified by editing `flake.lock` instead of +running the above command like this: + +```diff + graphql-engine-source = { +- url = "github:hasura/graphql-engine"; ++ url = "github:hasura/graphql-engine/"; + flake = false; + }; +``` + +### Updating Rust version + +Updating the Rust version used in the Nix build system requires two steps (in +any order): + +- update `rust-overlay` which provides Rust toolchains +- edit `rust-toolchain.toml` to specify the desired toolchain version + +To update `rust-overlay` run, + +```sh +$ nix flake update rust-overlay +``` + +If you are using direnv to automatically apply the nix dev environment note that +edits to `rust-toolchain.toml` will not automatically update your environment. +You can make a temporary edit to `flake.nix` (like adding a space somewhere) +which will trigger an update, and then you can revert the change. + +### Updating other project dependencies + +You can update all dependencies declared in `flake.nix` at once by running, + +```sh +$ nix flake update +``` + +This will update `graphql-engine-source` and `rust-overlay` as described above, +and will also update `advisory-db` to get updated security notices for cargo +dependencies, `nixpkgs` to get updates to openssl. diff --git a/docs/docker-images.md b/docs/docker-images.md new file mode 100644 index 00000000..3a4acdce --- /dev/null +++ b/docs/docker-images.md @@ -0,0 +1,13 @@ +# MongoDB Data Connector Docker Images + +The DDN CLI can automatically create a Docker configuration for you. But if you +want to access connector Docker images directly they are available from as +`ghcr.io/hasura/ndc-mongodb`. For example, + +```sh +$ docker run ghcr.io/hasura/ndc-mongodb:v1.1.0 +``` + +The Docker images are multi-arch, supporting amd64 and arm64 Linux. + +A listing of available image versions can be seen [here](https://github.com/hasura/ndc-mongodb/pkgs/container/ndc-mongodb). diff --git a/docs/limitations.md b/docs/limitations.md new file mode 100644 index 00000000..c2349888 --- /dev/null +++ b/docs/limitations.md @@ -0,0 +1,5 @@ +# Limitations of the MongoDB Data Connector + +- Filtering and sorting by scalar values in arrays is not yet possible. APIPG-294 +- Fields with names that begin with a dollar sign ($) or that contain dots (.) currently cannot be selected. NDC-432 +- Referencing relations in mutation requests does not work. NDC-157 diff --git a/docs/pull_request_template.md b/docs/pull_request_template.md deleted file mode 100644 index 22eeddf0..00000000 --- a/docs/pull_request_template.md +++ /dev/null @@ -1,34 +0,0 @@ -## Describe your changes - -## Issue ticket number and link - -_(if you have one)_ - -## Changelog - -- Add a changelog entry (in the "Changelog entry" section below) if the changes in this PR have any user-facing impact. -- If no changelog is required ignore/remove this section and add a `no-changelog-required` label to the PR. - -### Type -_(Select only one. In case of multiple, choose the most appropriate)_ -- [ ] highlight -- [ ] enhancement -- [ ] bugfix -- [ ] behaviour-change -- [ ] performance-enhancement -- [ ] security-fix - - -### Changelog entry - - -_Replace with changelog entry_ - - - - diff --git a/docs/release-checklist.md b/docs/release-checklist.md new file mode 100644 index 00000000..ab6208d8 --- /dev/null +++ b/docs/release-checklist.md @@ -0,0 +1,170 @@ +# Release Checklist + +## 1. Version bump PR + +Create a PR in the MongoDB connector repository with these changes: + +- update the `version` property in `Cargo.toml` (in the workspace root only). For example, `version = "1.5.0"` +- update `CHANGELOG.md`, add a heading under `## [Unreleased]` with the new version number and date. For example, `## [1.5.0] - 2024-12-05` + - If any of the "Added", "Fixed", "Changed" sections are empty then delete the heading. +- update `Cargo.lock` by running `cargo check` + +## 2. Tag + +After the above PR is merged to `main` tag that commit. For example, + +```sh +$ git tag v1.5.0 +$ git push --tags +``` + +## 3. Publish release on Github + +Pushing the tag should trigger a Github action that automatically creates +a draft release in the Github project with a changelog and binaries. (Released +docker images are pushed directly to the ghcr.io registry) + +Edit the draft release, and click "Publish release" + +## 4. CLI Plugins Index PR + +Create a PR on https://github.com/hasura/cli-plugins-index with a title like +"Release MongoDB version 1.5.0" + +This PR requires URLs and hashes for the CLI plugin for each supported platform. +Hashes are listed in the `sha256sum` asset on the Github release. + +Create a new file called `plugins/ndc-mongodb//manifest.yaml`. The +plugin version number is the same as the connector version. For example, +`plugins/ndc-mongodb/v1.5.0/manifest.yaml`. Include URLs to binaries from the +Github release with matching hashes. + +Here is an example of what the new file should look like, + +```yaml +name: ndc-mongodb +version: "v1.5.0" +shortDescription: "CLI plugin for Hasura ndc-mongodb" +homepage: https://hasura.io/connectors/mongodb +platforms: + - selector: darwin-arm64 + uri: "https://github.com/hasura/ndc-mongodb/releases/download/v1.5.0/mongodb-cli-plugin-aarch64-apple-darwin" + sha256: "449c75337cd5030074a2adc4fd4e85a677454867dd462827d894a907e6fe2031" + bin: "hasura-ndc-mongodb" + files: + - from: "./mongodb-cli-plugin-aarch64-apple-darwin" + to: "hasura-ndc-mongodb" + - selector: linux-arm64 + uri: "https://github.com/hasura/ndc-mongodb/releases/download/v1.5.0/mongodb-cli-plugin-aarch64-unknown-linux-musl" + sha256: "719f8c26237f7af7e7827d8f58a7142b79aa00a96d7be5d9e178898a20cbcb7c" + bin: "hasura-ndc-mongodb" + files: + - from: "./mongodb-cli-plugin-aarch64-unknown-linux-musl" + to: "hasura-ndc-mongodb" + - selector: darwin-amd64 + uri: "https://github.com/hasura/ndc-mongodb/releases/download/v1.5.0/mongodb-cli-plugin-x86_64-apple-darwin" + sha256: "4cea92e4dee32c604baa7f9829152b755edcdb8160f39cf699f3cb5a62d3dc50" + bin: "hasura-ndc-mongodb" + files: + - from: "./mongodb-cli-plugin-x86_64-apple-darwin" + to: "hasura-ndc-mongodb" + - selector: windows-amd64 + uri: "https://github.com/hasura/ndc-mongodb/releases/download/v1.5.0/mongodb-cli-plugin-x86_64-pc-windows-msvc.exe" + sha256: "a7d1117cdd6e792673946e342292e525d50a18cc833c3150190afeedd06e9538" + bin: "hasura-ndc-mongodb.exe" + files: + - from: "./mongodb-cli-plugin-x86_64-pc-windows-msvc.exe" + to: "hasura-ndc-mongodb.exe" + - selector: linux-amd64 + uri: "https://github.com/hasura/ndc-mongodb/releases/download/v1.5.0/mongodb-cli-plugin-x86_64-unknown-linux-musl" + sha256: "c1019d5c3dc4c4f1e39f683b590dbee3ec34929e99c97b303c6d312285a316c1" + bin: "hasura-ndc-mongodb" + files: + - from: "./mongodb-cli-plugin-x86_64-unknown-linux-musl" + to: "hasura-ndc-mongodb" +``` + +Values that should change for each release are, + +- `.version` +- `.platforms.[].uri` +- `.platforms.[].sha256` + +## 5. NDC Hub PR + +Create a PR on https://github.com/hasura/ndc-hub with a title like "Release +MongoDB version 1.5.0" + +### Update registry metadata + +Edit `registry/hasura/mongodb/metadata.json` + +- change `.overview.latest_version` to the new version, for example `v1.5.0` +- prepend an entry to the list in `.source_code.version` with a value like this: + +```json +{ + "tag": "", + "hash": "", + "is_verified": true +}, +``` + +For example, + +```json +{ + "tag": "v1.5.0", + "hash": "b95da1815a9b686e517aa78f677752e36e0bfda0", + "is_verified": true +}, +``` + +### Add connector packaging info + +Create a new file with a name of the form, +`registry/hasura/mongodb/releases//connector-packaging.json`. For +example, `registry/hasura/mongodb/releases/v1.5.0/connector-packaging.json` + +The content should have this format, + +```json +{ + "version": "", + "uri": "https://github.com/hasura/ndc-mongodb/releases/download//connector-definition.tgz", + "checksum": { + "type": "sha256", + "value": "" + }, + "source": { + "hash": "" + }, + "test": { + "test_config_path": "../../tests/test-config.json" + } +} +``` + +The content hash for `connector-definition.tgz` is found in the `sha256sum` file +on the Github release. + +The commit hash is the same as in the previous step. + +For example, + +```json +{ + "version": "v1.5.0", + "uri": "https://github.com/hasura/ndc-mongodb/releases/download/v1.5.0/connector-definition.tgz", + "checksum": { + "type": "sha256", + "value": "7821513fcdc1a2689a546f20a18cdc2cce9fe218dc8506adc86eb6a2a3b256a9" + }, + "source": { + "hash": "b95da1815a9b686e517aa78f677752e36e0bfda0" + }, + "test": { + "test_config_path": "../../tests/test-config.json" + } +} +``` diff --git a/docs/security.md b/docs/security.md new file mode 100644 index 00000000..495d8f2d --- /dev/null +++ b/docs/security.md @@ -0,0 +1,33 @@ +# Security + +## Reporting Vulnerabilities + +We’re extremely grateful for security researchers and users that report vulnerabilities to the Hasura Community. All reports are thoroughly investigated by a set of community volunteers and the Hasura team. + +To report a security issue, please email us at [security@hasura.io](mailto:security@hasura.io) with all the details, attaching all necessary information. + +### When Should I Report a Vulnerability? + +- You think you have discovered a potential security vulnerability in the Hasura GraphQL Engine or related components. +- You are unsure how a vulnerability affects the Hasura GraphQL Engine. +- You think you discovered a vulnerability in another project that Hasura GraphQL Engine depends on (e.g. Heroku, Docker, etc). +- You want to report any other security risk that could potentially harm Hasura GraphQL Engine users. + +### When Should I NOT Report a Vulnerability? + +- You need help tuning Hasura GraphQL Engine components for security. +- You need help applying security related updates. +- Your issue is not security related. + +## Security Vulnerability Response + +Each report is acknowledged and analyzed by the project's maintainers and the security team within 3 working days. + +The reporter will be kept updated at every stage of the issue's analysis and resolution (triage -> fix -> release). + +## Public Disclosure Timing + +A public disclosure date is negotiated by the Hasura product security team and the bug submitter. We prefer to fully disclose the bug as soon as possible once a user mitigation is available. It is reasonable to delay disclosure when the bug or the fix is not yet fully understood, the solution is not well-tested, or for vendor coordination. The timeframe for disclosure is from immediate (especially if it's already publicly known) to a few weeks. We expect the time-frame between a report to a public disclosure to typically be in the order of 7 days. The Hasura GraphQL Engine maintainers and the security team will take the final call on setting a disclosure date. + +(Some sections have been inspired and adapted from +[https://github.com/kubernetes/website/blob/master/content/en/docs/reference/issues-security/security.md](https://github.com/kubernetes/website/blob/master/content/en/docs/reference/issues-security/security.md). \ No newline at end of file diff --git a/docs/support.md b/docs/support.md new file mode 100644 index 00000000..c6e0c20c --- /dev/null +++ b/docs/support.md @@ -0,0 +1,140 @@ +# Support & Troubleshooting + +The documentation and community will help you troubleshoot most issues. If you have encountered a bug or need to get in touch with us, you can contact us using one of the following channels: +* Support & feedback: [Discord](https://discord.gg/hasura) +* Issue & bug tracking: [GitHub issues](https://github.com/hasura/ndc-mongodb/issues) +* Follow product updates: [@HasuraHQ](https://twitter.com/hasurahq) +* Talk to us on our [website chat](https://hasura.io) + +We are committed to fostering an open and welcoming environment in the community. Please see the [Code of Conduct](code-of-conduct.md). + +If you want to report a security issue, please [read this](security.md). + +## Frequently Asked Questions + +If your question is not answered here please also check +[limitations](./limitations.md). + +### Why am I getting strings instead of numbers? + +MongoDB stores data in [BSON][] format which has several numeric types: + +- `double`, 64-bit floating point +- `decimal`, 128-bit floating point +- `int`, 32-bit integer +- `long`, 64-bit integer + +[BSON]: https://bsonspec.org/ + +But GraphQL uses JSON so data must be converted from BSON to JSON in GraphQL +responses. Some JSON parsers cannot precisely decode the `decimal` and `long` +types. Specifically in JavaScript running `JSON.parse(data)` will silently +convert `decimal` and `long` values to 64-bit floats which causes loss of +precision. + +If you get a `long` value that is larger than `Number.MAX_SAFE_INTEGER` +(9,007,199,254,740,991) but that is less than `Number.MAX_VALUE` (1.8e308) then +you will get a number, but it might be silently changed to a different number +than the one you should have gotten. + +Some databases use `long` values as IDs - if you get loss of precision with one +of these values instead of a calculation that is a little off you might end up +with access to the wrong records. + +There is a similar problem when converting a 128-bit float to a 64-bit float. +You'll get a number, but not exactly the right one. + +Serializing `decimal` and `long` as strings prevents bugs that might be +difficult to detect in environments like JavaScript. + +### Why am I getting data in this weird format? + +You might encounter a case where you expect a simple value in GraphQL responses, +like a number or a date, but you get a weird object wrapper. For example you +might expect, + +```json +{ "total": 3.0 } +``` + +But actually get: + +```json +{ "total": { "$numberDouble": "3.0" } } +``` + +That weird format is [Extended JSON][]. MongoDB stores data in [BSON][] format +which includes data types that don't exist in JSON. But GraphQL responses use +JSON. Extended JSON is a means of encoding data BSON data with inline type +annotations. That provides a semi-standardized way to express, for example, date +values in JSON. + +[Extended JSON]: https://www.mongodb.com/docs/manual/reference/mongodb-extended-json/ + +In cases where the specific type of a document field is known in your data graph +the MongoDB connector serializes values for that field using "simple" JSON which +is probably what you expect. In these cases the type of each field is known +out-of-band so inline type annotations that you would get from Extended JSON are +not necessary. But in cases where the data graph does not have a specific type +for a field (which we represent using the ExtendedJSON type in the data graph) +we serialize using Extended JSON instead to provide type information which might +be important for you. + +What often happens is that when the `ddn connector introspect` command samples +your database to infer types for each collection document it encounters +different types of data under the same field name in different documents. DDN +does not support union types so we can't configure a specific type for these +cases. Instead the data schema that gets written uses the ExtendedJSON type for +those fields. + +You have two options: + +#### configure a precise type for the field + +Edit your connector configuration to change a type in +`schema/.json` to change the type of a field from +`{ "type": "extendedJSON" }` to something specific like, +`{ "type": { "scalar": "double" } }`. + +#### change Extended JSON serialization settings + +In your connector configuration edit `configuration.json` and change the setting +`serializationOptions` from `canonical` to `relaxed`. Extended JSON has two +serialization flavors: "relaxed" mode outputs JSON-native types like numbers as +plain values without inline type annotations. You will still see type +annotations on non-JSON-native types like dates. + +## How Do I ...? + +### select an entire object without listing its fields + +GraphQL requires that you explicitly list all of the object fields to include in +a response. If you want to fetch entire objects the MongoDB connector provides +a workaround. The connector defines an ExtendedJSON types that represents +arbitrary BSON values. In GraphQL terms ExtendedJSON is a "scalar" type so when +you select a field of that type instead of listing nested fields you get the +entire structure, whether it's an object, an array, or anything else. + +Edit the schema in your data connector configuration. (There is a schema +configuration file for each collection in the `schema/` directory). Change the +object field you want to fetch from an object type like this one: + +```json +{ "type": { "object": "" } } +``` + +Change the type to `extendedJSON`: + +```json +{ "type": "extendedJSON" } +``` + +After restarting the connector you will also need to update metadata to +propagate the type change by running the appropriate `ddn connector-link` +command. + +This is an all-or-nothing change: if a field type is ExtendedJSON you cannot +select a subset of fields. You will always get the entire structure. Also note +that fields of type ExtendedJSON are serialized according to the [Extended +JSON][] spec. (See the section above, "Why am I getting data in this weird +format?") diff --git a/fixtures/hasura/.devcontainer/devcontainer.json b/fixtures/hasura/.devcontainer/devcontainer.json index ea38082b..7ad51800 100644 --- a/fixtures/hasura/.devcontainer/devcontainer.json +++ b/fixtures/hasura/.devcontainer/devcontainer.json @@ -13,5 +13,5 @@ } }, "name": "Hasura DDN Codespace", - "postCreateCommand": "curl -L https://graphql-engine-cdn.hasura.io/ddn/cli/v2/get.sh | bash" + "postCreateCommand": "curl -L https://graphql-engine-cdn.hasura.io/ddn/cli/v4/get.sh | bash" } diff --git a/fixtures/hasura/.env b/fixtures/hasura/.env new file mode 100644 index 00000000..05da391c --- /dev/null +++ b/fixtures/hasura/.env @@ -0,0 +1,15 @@ +APP_SAMPLE_MFLIX_MONGODB_DATABASE_URI="mongodb://local.hasura.dev/sample_mflix" +APP_SAMPLE_MFLIX_OTEL_EXPORTER_OTLP_ENDPOINT="http://local.hasura.dev:4317" +APP_SAMPLE_MFLIX_OTEL_SERVICE_NAME="app_sample_mflix" +APP_SAMPLE_MFLIX_READ_URL="http://local.hasura.dev:7130" +APP_SAMPLE_MFLIX_WRITE_URL="http://local.hasura.dev:7130" +APP_CHINOOK_MONGODB_DATABASE_URI="mongodb://local.hasura.dev/chinook" +APP_CHINOOK_OTEL_EXPORTER_OTLP_ENDPOINT="http://local.hasura.dev:4317" +APP_CHINOOK_OTEL_SERVICE_NAME="app_chinook" +APP_CHINOOK_READ_URL="http://local.hasura.dev:7131" +APP_CHINOOK_WRITE_URL="http://local.hasura.dev:7131" +APP_TEST_CASES_MONGODB_DATABASE_URI="mongodb://local.hasura.dev/test_cases" +APP_TEST_CASES_OTEL_EXPORTER_OTLP_ENDPOINT="http://local.hasura.dev:4317" +APP_TEST_CASES_OTEL_SERVICE_NAME="app_test_cases" +APP_TEST_CASES_READ_URL="http://local.hasura.dev:7132" +APP_TEST_CASES_WRITE_URL="http://local.hasura.dev:7132" diff --git a/fixtures/hasura/.gitattributes b/fixtures/hasura/.gitattributes new file mode 100644 index 00000000..8ddc99f4 --- /dev/null +++ b/fixtures/hasura/.gitattributes @@ -0,0 +1 @@ +*.hml linguist-language=yaml \ No newline at end of file diff --git a/fixtures/hasura/.gitignore b/fixtures/hasura/.gitignore new file mode 100644 index 00000000..d168928d --- /dev/null +++ b/fixtures/hasura/.gitignore @@ -0,0 +1,2 @@ +engine/build +/.env.* diff --git a/fixtures/hasura/.hasura/context.yaml b/fixtures/hasura/.hasura/context.yaml index b23b1ec5..3822ed0e 100644 --- a/fixtures/hasura/.hasura/context.yaml +++ b/fixtures/hasura/.hasura/context.yaml @@ -1,2 +1,14 @@ -context: - supergraph: ../supergraph.yaml +kind: Context +version: v3 +definition: + current: default + contexts: + default: + supergraph: ../supergraph.yaml + subgraph: ../app/subgraph.yaml + localEnvFile: ../.env + scripts: + docker-start: + bash: HASURA_DDN_PAT=$(ddn auth print-pat) PROMPTQL_SECRET_KEY=$(ddn auth print-promptql-secret-key) docker compose -f compose.yaml --env-file .env up --build --pull always + powershell: $Env:HASURA_DDN_PAT = ddn auth print-pat; $Env:PROMPTQL_SECRET_KEY = ddn auth print-promptql-secret-key; docker compose -f compose.yaml --env-file .env up --build --pull always + promptQL: false diff --git a/fixtures/hasura/README.md b/fixtures/hasura/README.md index 4b95bb9b..814f1d9b 100644 --- a/fixtures/hasura/README.md +++ b/fixtures/hasura/README.md @@ -13,15 +13,30 @@ arion up -d ## Cheat Sheet -We have two subgraphs, and two connector configurations. So a lot of these -commands are repeated for each subgraph + connector combination. +We have three connector configurations. So a lot of these commands are repeated +for each connector. -Run introspection to update connector configuration: +Run introspection to update connector configuration. To do that through the ddn +CLI run these commands in the same directory as this README file: ```sh -$ ddn connector introspect --connector sample_mflix/connector/sample_mflix/connector.yaml +$ ddn connector introspect sample_mflix -$ ddn connector introspect --connector chinook/connector/chinook/connector.yaml +$ ddn connector introspect chinook + +$ ddn connector introspect test_cases +``` + +Alternatively run `mongodb-cli-plugin` directly to use the CLI plugin version in +this repo. The plugin binary is provided by the Nix dev shell. Use these +commands: + +```sh +$ nix run .#mongodb-cli-plugin -- --connection-uri mongodb://localhost/sample_mflix --context-path app/connector/sample_mflix/ update + +$ nix run .#mongodb-cli-plugin -- --connection-uri mongodb://localhost/chinook --context-path app/connector/chinook/ update + +$ nix run .#mongodb-cli-plugin -- --connection-uri mongodb://localhost/test_cases --context-path app/connector/test_cases/ update ``` Update Hasura metadata based on connector configuration @@ -29,7 +44,9 @@ Update Hasura metadata based on connector configuration introspection): ```sh -$ ddn connector-link update sample_mflix --subgraph sample_mflix/subgraph.yaml --env-file sample_mflix/.env.sample_mflix --add-all-resources +$ ddn connector-link update sample_mflix --add-all-resources + +$ ddn connector-link update chinook --add-all-resources -$ ddn connector-link update chinook --subgraph chinook/subgraph.yaml --env-file chinook/.env.chinook --add-all-resources +$ ddn connector-link update test_cases --add-all-resources ``` diff --git a/fixtures/hasura/chinook/connector/chinook/.configuration_metadata b/fixtures/hasura/app/connector/chinook/.configuration_metadata similarity index 100% rename from fixtures/hasura/chinook/connector/chinook/.configuration_metadata rename to fixtures/hasura/app/connector/chinook/.configuration_metadata diff --git a/fixtures/hasura/app/connector/chinook/.ddnignore b/fixtures/hasura/app/connector/chinook/.ddnignore new file mode 100644 index 00000000..ed72dd19 --- /dev/null +++ b/fixtures/hasura/app/connector/chinook/.ddnignore @@ -0,0 +1,2 @@ +.env* +compose.yaml diff --git a/fixtures/hasura/app/connector/chinook/.hasura-connector/Dockerfile.chinook b/fixtures/hasura/app/connector/chinook/.hasura-connector/Dockerfile.chinook new file mode 100644 index 00000000..1f2c958f --- /dev/null +++ b/fixtures/hasura/app/connector/chinook/.hasura-connector/Dockerfile.chinook @@ -0,0 +1,2 @@ +FROM ghcr.io/hasura/ndc-mongodb:v1.4.0 +COPY ./ /etc/connector \ No newline at end of file diff --git a/fixtures/hasura/app/connector/chinook/.hasura-connector/connector-metadata.yaml b/fixtures/hasura/app/connector/chinook/.hasura-connector/connector-metadata.yaml new file mode 100644 index 00000000..bc84f63a --- /dev/null +++ b/fixtures/hasura/app/connector/chinook/.hasura-connector/connector-metadata.yaml @@ -0,0 +1,16 @@ +packagingDefinition: + type: PrebuiltDockerImage + dockerImage: ghcr.io/hasura/ndc-mongodb:v1.5.0 +supportedEnvironmentVariables: + - name: MONGODB_DATABASE_URI + description: The URI for the MongoDB database +commands: + update: hasura-ndc-mongodb update +cliPlugin: + name: ndc-mongodb + version: v1.5.0 +dockerComposeWatch: + - path: ./ + target: /etc/connector + action: sync+restart +documentationPage: "https://hasura.info/mongodb-getting-started" diff --git a/fixtures/hasura/app/connector/chinook/compose.yaml b/fixtures/hasura/app/connector/chinook/compose.yaml new file mode 100644 index 00000000..5c4d6bf4 --- /dev/null +++ b/fixtures/hasura/app/connector/chinook/compose.yaml @@ -0,0 +1,13 @@ +services: + app_chinook: + build: + context: . + dockerfile: .hasura-connector/Dockerfile.chinook + environment: + MONGODB_DATABASE_URI: $APP_CHINOOK_MONGODB_DATABASE_URI + OTEL_EXPORTER_OTLP_ENDPOINT: $APP_CHINOOK_OTEL_EXPORTER_OTLP_ENDPOINT + OTEL_SERVICE_NAME: $APP_CHINOOK_OTEL_SERVICE_NAME + extra_hosts: + - local.hasura.dev:host-gateway + ports: + - 7131:8080 diff --git a/fixtures/hasura/chinook/connector/chinook/configuration.json b/fixtures/hasura/app/connector/chinook/configuration.json similarity index 51% rename from fixtures/hasura/chinook/connector/chinook/configuration.json rename to fixtures/hasura/app/connector/chinook/configuration.json index e2c0aaab..5d72bb4e 100644 --- a/fixtures/hasura/chinook/connector/chinook/configuration.json +++ b/fixtures/hasura/app/connector/chinook/configuration.json @@ -1,7 +1,10 @@ { "introspectionOptions": { - "sampleSize": 100, + "sampleSize": 1000, "noValidatorSchema": false, "allSchemaNullable": false + }, + "serializationOptions": { + "extendedJsonMode": "canonical" } } diff --git a/fixtures/hasura/app/connector/chinook/connector.yaml b/fixtures/hasura/app/connector/chinook/connector.yaml new file mode 100644 index 00000000..e3541826 --- /dev/null +++ b/fixtures/hasura/app/connector/chinook/connector.yaml @@ -0,0 +1,14 @@ +kind: Connector +version: v2 +definition: + name: chinook + subgraph: app + source: hasura/mongodb:v1.5.0 + context: . + envMapping: + MONGODB_DATABASE_URI: + fromEnv: APP_CHINOOK_MONGODB_DATABASE_URI + OTEL_EXPORTER_OTLP_ENDPOINT: + fromEnv: APP_CHINOOK_OTEL_EXPORTER_OTLP_ENDPOINT + OTEL_SERVICE_NAME: + fromEnv: APP_CHINOOK_OTEL_SERVICE_NAME diff --git a/fixtures/hasura/chinook/connector/chinook/native_mutations/insert_artist.json b/fixtures/hasura/app/connector/chinook/native_mutations/insert_artist.json similarity index 100% rename from fixtures/hasura/chinook/connector/chinook/native_mutations/insert_artist.json rename to fixtures/hasura/app/connector/chinook/native_mutations/insert_artist.json diff --git a/fixtures/hasura/chinook/connector/chinook/native_mutations/update_track_prices.json b/fixtures/hasura/app/connector/chinook/native_mutations/update_track_prices.json similarity index 100% rename from fixtures/hasura/chinook/connector/chinook/native_mutations/update_track_prices.json rename to fixtures/hasura/app/connector/chinook/native_mutations/update_track_prices.json diff --git a/fixtures/hasura/chinook/connector/chinook/native_queries/artists_with_albums_and_tracks.json b/fixtures/hasura/app/connector/chinook/native_queries/artists_with_albums_and_tracks.json similarity index 100% rename from fixtures/hasura/chinook/connector/chinook/native_queries/artists_with_albums_and_tracks.json rename to fixtures/hasura/app/connector/chinook/native_queries/artists_with_albums_and_tracks.json diff --git a/fixtures/hasura/chinook/connector/chinook/schema/Album.json b/fixtures/hasura/app/connector/chinook/schema/Album.json similarity index 88% rename from fixtures/hasura/chinook/connector/chinook/schema/Album.json rename to fixtures/hasura/app/connector/chinook/schema/Album.json index a8e61389..f361c03e 100644 --- a/fixtures/hasura/chinook/connector/chinook/schema/Album.json +++ b/fixtures/hasura/app/connector/chinook/schema/Album.json @@ -28,8 +28,7 @@ "scalar": "objectId" } } - }, - "description": "Object type for collection Album" + } } } -} +} \ No newline at end of file diff --git a/fixtures/hasura/chinook/connector/chinook/schema/Artist.json b/fixtures/hasura/app/connector/chinook/schema/Artist.json similarity index 73% rename from fixtures/hasura/chinook/connector/chinook/schema/Artist.json rename to fixtures/hasura/app/connector/chinook/schema/Artist.json index d60bb483..d4104e76 100644 --- a/fixtures/hasura/chinook/connector/chinook/schema/Artist.json +++ b/fixtures/hasura/app/connector/chinook/schema/Artist.json @@ -15,9 +15,7 @@ }, "Name": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" } }, "_id": { @@ -25,8 +23,7 @@ "scalar": "objectId" } } - }, - "description": "Object type for collection Artist" + } } } -} +} \ No newline at end of file diff --git a/fixtures/hasura/chinook/connector/chinook/schema/Customer.json b/fixtures/hasura/app/connector/chinook/schema/Customer.json similarity index 79% rename from fixtures/hasura/chinook/connector/chinook/schema/Customer.json rename to fixtures/hasura/app/connector/chinook/schema/Customer.json index 50dbf947..22736ae9 100644 --- a/fixtures/hasura/chinook/connector/chinook/schema/Customer.json +++ b/fixtures/hasura/app/connector/chinook/schema/Customer.json @@ -10,16 +10,12 @@ "fields": { "Address": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" } }, "City": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" } }, "Company": { @@ -31,9 +27,7 @@ }, "Country": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" } }, "CustomerId": { @@ -86,18 +80,15 @@ }, "SupportRepId": { "type": { - "nullable": { - "scalar": "int" - } + "scalar": "int" } }, "_id": { "type": { - "scalar": "objectId" + "scalar": "objectId" } } - }, - "description": "Object type for collection Customer" + } } } -} +} \ No newline at end of file diff --git a/fixtures/hasura/chinook/connector/chinook/schema/Employee.json b/fixtures/hasura/app/connector/chinook/schema/Employee.json similarity index 59% rename from fixtures/hasura/chinook/connector/chinook/schema/Employee.json rename to fixtures/hasura/app/connector/chinook/schema/Employee.json index d6a0524e..ffbeeaf5 100644 --- a/fixtures/hasura/chinook/connector/chinook/schema/Employee.json +++ b/fixtures/hasura/app/connector/chinook/schema/Employee.json @@ -10,37 +10,27 @@ "fields": { "Address": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" } }, "BirthDate": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" } }, "City": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" } }, "Country": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" } }, "Email": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" } }, "EmployeeId": { @@ -50,9 +40,7 @@ }, "Fax": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" } }, "FirstName": { @@ -62,9 +50,7 @@ }, "HireDate": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" } }, "LastName": { @@ -74,16 +60,12 @@ }, "Phone": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" } }, "PostalCode": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" } }, "ReportsTo": { @@ -95,25 +77,20 @@ }, "State": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" } }, "Title": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" } }, "_id": { "type": { - "scalar": "objectId" + "scalar": "objectId" } } - }, - "description": "Object type for collection Employee" + } } } -} +} \ No newline at end of file diff --git a/fixtures/hasura/chinook/connector/chinook/schema/Genre.json b/fixtures/hasura/app/connector/chinook/schema/Genre.json similarity index 73% rename from fixtures/hasura/chinook/connector/chinook/schema/Genre.json rename to fixtures/hasura/app/connector/chinook/schema/Genre.json index 99cdb709..394be604 100644 --- a/fixtures/hasura/chinook/connector/chinook/schema/Genre.json +++ b/fixtures/hasura/app/connector/chinook/schema/Genre.json @@ -15,9 +15,7 @@ }, "Name": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" } }, "_id": { @@ -25,8 +23,7 @@ "scalar": "objectId" } } - }, - "description": "Object type for collection Genre" + } } } -} +} \ No newline at end of file diff --git a/fixtures/hasura/chinook/connector/chinook/schema/Invoice.json b/fixtures/hasura/app/connector/chinook/schema/Invoice.json similarity index 79% rename from fixtures/hasura/chinook/connector/chinook/schema/Invoice.json rename to fixtures/hasura/app/connector/chinook/schema/Invoice.json index aa9a3c91..1b585bbb 100644 --- a/fixtures/hasura/chinook/connector/chinook/schema/Invoice.json +++ b/fixtures/hasura/app/connector/chinook/schema/Invoice.json @@ -10,23 +10,17 @@ "fields": { "BillingAddress": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" } }, "BillingCity": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" } }, "BillingCountry": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" } }, "BillingPostalCode": { @@ -68,8 +62,7 @@ "scalar": "objectId" } } - }, - "description": "Object type for collection Invoice" + } } } -} +} \ No newline at end of file diff --git a/fixtures/hasura/chinook/connector/chinook/schema/InvoiceLine.json b/fixtures/hasura/app/connector/chinook/schema/InvoiceLine.json similarity index 91% rename from fixtures/hasura/chinook/connector/chinook/schema/InvoiceLine.json rename to fixtures/hasura/app/connector/chinook/schema/InvoiceLine.json index 438d023b..ef1b116d 100644 --- a/fixtures/hasura/chinook/connector/chinook/schema/InvoiceLine.json +++ b/fixtures/hasura/app/connector/chinook/schema/InvoiceLine.json @@ -38,8 +38,7 @@ "scalar": "objectId" } } - }, - "description": "Object type for collection InvoiceLine" + } } } -} +} \ No newline at end of file diff --git a/fixtures/hasura/chinook/connector/chinook/schema/MediaType.json b/fixtures/hasura/app/connector/chinook/schema/MediaType.json similarity index 74% rename from fixtures/hasura/chinook/connector/chinook/schema/MediaType.json rename to fixtures/hasura/app/connector/chinook/schema/MediaType.json index 79912879..57ea272b 100644 --- a/fixtures/hasura/chinook/connector/chinook/schema/MediaType.json +++ b/fixtures/hasura/app/connector/chinook/schema/MediaType.json @@ -15,9 +15,7 @@ }, "Name": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" } }, "_id": { @@ -25,8 +23,7 @@ "scalar": "objectId" } } - }, - "description": "Object type for collection MediaType" + } } } -} +} \ No newline at end of file diff --git a/fixtures/hasura/chinook/connector/chinook/schema/Playlist.json b/fixtures/hasura/app/connector/chinook/schema/Playlist.json similarity index 74% rename from fixtures/hasura/chinook/connector/chinook/schema/Playlist.json rename to fixtures/hasura/app/connector/chinook/schema/Playlist.json index 74dee27f..414e4078 100644 --- a/fixtures/hasura/chinook/connector/chinook/schema/Playlist.json +++ b/fixtures/hasura/app/connector/chinook/schema/Playlist.json @@ -10,9 +10,7 @@ "fields": { "Name": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" } }, "PlaylistId": { @@ -25,8 +23,7 @@ "scalar": "objectId" } } - }, - "description": "Object type for collection Playlist" + } } } -} +} \ No newline at end of file diff --git a/fixtures/hasura/chinook/connector/chinook/schema/PlaylistTrack.json b/fixtures/hasura/app/connector/chinook/schema/PlaylistTrack.json similarity index 86% rename from fixtures/hasura/chinook/connector/chinook/schema/PlaylistTrack.json rename to fixtures/hasura/app/connector/chinook/schema/PlaylistTrack.json index e4382592..a89c10eb 100644 --- a/fixtures/hasura/chinook/connector/chinook/schema/PlaylistTrack.json +++ b/fixtures/hasura/app/connector/chinook/schema/PlaylistTrack.json @@ -23,8 +23,7 @@ "scalar": "objectId" } } - }, - "description": "Object type for collection PlaylistTrack" + } } } -} +} \ No newline at end of file diff --git a/fixtures/hasura/chinook/connector/chinook/schema/Track.json b/fixtures/hasura/app/connector/chinook/schema/Track.json similarity index 79% rename from fixtures/hasura/chinook/connector/chinook/schema/Track.json rename to fixtures/hasura/app/connector/chinook/schema/Track.json index a0d11820..43d8886a 100644 --- a/fixtures/hasura/chinook/connector/chinook/schema/Track.json +++ b/fixtures/hasura/app/connector/chinook/schema/Track.json @@ -10,16 +10,12 @@ "fields": { "AlbumId": { "type": { - "nullable": { - "scalar": "int" - } + "scalar": "int" } }, "Bytes": { "type": { - "nullable": { - "scalar": "int" - } + "scalar": "int" } }, "Composer": { @@ -31,9 +27,7 @@ }, "GenreId": { "type": { - "nullable": { - "scalar": "int" - } + "scalar": "int" } }, "MediaTypeId": { @@ -66,8 +60,7 @@ "scalar": "objectId" } } - }, - "description": "Object type for collection Track" + } } } -} +} \ No newline at end of file diff --git a/fixtures/hasura/sample_mflix/connector/sample_mflix/.configuration_metadata b/fixtures/hasura/app/connector/sample_mflix/.configuration_metadata similarity index 100% rename from fixtures/hasura/sample_mflix/connector/sample_mflix/.configuration_metadata rename to fixtures/hasura/app/connector/sample_mflix/.configuration_metadata diff --git a/fixtures/hasura/app/connector/sample_mflix/.ddnignore b/fixtures/hasura/app/connector/sample_mflix/.ddnignore new file mode 100644 index 00000000..ed72dd19 --- /dev/null +++ b/fixtures/hasura/app/connector/sample_mflix/.ddnignore @@ -0,0 +1,2 @@ +.env* +compose.yaml diff --git a/fixtures/hasura/app/connector/sample_mflix/.hasura-connector/Dockerfile.sample_mflix b/fixtures/hasura/app/connector/sample_mflix/.hasura-connector/Dockerfile.sample_mflix new file mode 100644 index 00000000..1f2c958f --- /dev/null +++ b/fixtures/hasura/app/connector/sample_mflix/.hasura-connector/Dockerfile.sample_mflix @@ -0,0 +1,2 @@ +FROM ghcr.io/hasura/ndc-mongodb:v1.4.0 +COPY ./ /etc/connector \ No newline at end of file diff --git a/fixtures/hasura/app/connector/sample_mflix/.hasura-connector/connector-metadata.yaml b/fixtures/hasura/app/connector/sample_mflix/.hasura-connector/connector-metadata.yaml new file mode 100644 index 00000000..bc84f63a --- /dev/null +++ b/fixtures/hasura/app/connector/sample_mflix/.hasura-connector/connector-metadata.yaml @@ -0,0 +1,16 @@ +packagingDefinition: + type: PrebuiltDockerImage + dockerImage: ghcr.io/hasura/ndc-mongodb:v1.5.0 +supportedEnvironmentVariables: + - name: MONGODB_DATABASE_URI + description: The URI for the MongoDB database +commands: + update: hasura-ndc-mongodb update +cliPlugin: + name: ndc-mongodb + version: v1.5.0 +dockerComposeWatch: + - path: ./ + target: /etc/connector + action: sync+restart +documentationPage: "https://hasura.info/mongodb-getting-started" diff --git a/fixtures/hasura/app/connector/sample_mflix/compose.yaml b/fixtures/hasura/app/connector/sample_mflix/compose.yaml new file mode 100644 index 00000000..ea8f422a --- /dev/null +++ b/fixtures/hasura/app/connector/sample_mflix/compose.yaml @@ -0,0 +1,13 @@ +services: + app_sample_mflix: + build: + context: . + dockerfile: .hasura-connector/Dockerfile.sample_mflix + environment: + MONGODB_DATABASE_URI: $APP_SAMPLE_MFLIX_MONGODB_DATABASE_URI + OTEL_EXPORTER_OTLP_ENDPOINT: $APP_SAMPLE_MFLIX_OTEL_EXPORTER_OTLP_ENDPOINT + OTEL_SERVICE_NAME: $APP_SAMPLE_MFLIX_OTEL_SERVICE_NAME + extra_hosts: + - local.hasura.dev:host-gateway + ports: + - 7130:8080 diff --git a/fixtures/hasura/sample_mflix/connector/sample_mflix/configuration.json b/fixtures/hasura/app/connector/sample_mflix/configuration.json similarity index 51% rename from fixtures/hasura/sample_mflix/connector/sample_mflix/configuration.json rename to fixtures/hasura/app/connector/sample_mflix/configuration.json index e2c0aaab..5d72bb4e 100644 --- a/fixtures/hasura/sample_mflix/connector/sample_mflix/configuration.json +++ b/fixtures/hasura/app/connector/sample_mflix/configuration.json @@ -1,7 +1,10 @@ { "introspectionOptions": { - "sampleSize": 100, + "sampleSize": 1000, "noValidatorSchema": false, "allSchemaNullable": false + }, + "serializationOptions": { + "extendedJsonMode": "canonical" } } diff --git a/fixtures/hasura/app/connector/sample_mflix/connector.yaml b/fixtures/hasura/app/connector/sample_mflix/connector.yaml new file mode 100644 index 00000000..d2b24069 --- /dev/null +++ b/fixtures/hasura/app/connector/sample_mflix/connector.yaml @@ -0,0 +1,14 @@ +kind: Connector +version: v2 +definition: + name: sample_mflix + subgraph: app + source: hasura/mongodb:v1.5.0 + context: . + envMapping: + MONGODB_DATABASE_URI: + fromEnv: APP_SAMPLE_MFLIX_MONGODB_DATABASE_URI + OTEL_EXPORTER_OTLP_ENDPOINT: + fromEnv: APP_SAMPLE_MFLIX_OTEL_EXPORTER_OTLP_ENDPOINT + OTEL_SERVICE_NAME: + fromEnv: APP_SAMPLE_MFLIX_OTEL_SERVICE_NAME diff --git a/fixtures/hasura/app/connector/sample_mflix/native_queries/eq_title.json b/fixtures/hasura/app/connector/sample_mflix/native_queries/eq_title.json new file mode 100644 index 00000000..b1ded9d4 --- /dev/null +++ b/fixtures/hasura/app/connector/sample_mflix/native_queries/eq_title.json @@ -0,0 +1,125 @@ +{ + "name": "eq_title", + "representation": "collection", + "inputCollection": "movies", + "arguments": { + "title": { + "type": { + "scalar": "string" + } + }, + "year": { + "type": { + "scalar": "int" + } + } + }, + "resultDocumentType": "eq_title_project", + "objectTypes": { + "eq_title_project": { + "fields": { + "_id": { + "type": { + "scalar": "objectId" + } + }, + "bar": { + "type": { + "object": "eq_title_project_bar" + } + }, + "foo": { + "type": { + "object": "eq_title_project_foo" + } + }, + "title": { + "type": { + "scalar": "string" + } + }, + "tomatoes": { + "type": { + "nullable": { + "object": "movies_tomatoes" + } + } + }, + "what": { + "type": { + "object": "eq_title_project_what" + } + } + } + }, + "eq_title_project_bar": { + "fields": { + "foo": { + "type": { + "object": "movies_imdb" + } + } + } + }, + "eq_title_project_foo": { + "fields": { + "bar": { + "type": { + "nullable": { + "object": "movies_tomatoes_critic" + } + } + } + } + }, + "eq_title_project_what": { + "fields": { + "the": { + "type": { + "object": "eq_title_project_what_the" + } + } + } + }, + "eq_title_project_what_the": { + "fields": { + "heck": { + "type": { + "scalar": "string" + } + } + } + } + }, + "pipeline": [ + { + "$match": { + "title": "{{ title | string }}", + "year": { + "$gt": "{{ year }}" + } + } + }, + { + "$project": { + "title": 1, + "tomatoes": 1, + "foo.bar": "$tomatoes.critic", + "bar.foo": "$imdb", + "what.the.heck": "hello", + "genres": 1, + "cast": 1 + } + }, + { + "$project": { + "genres": false + } + }, + { + "$project": { + "cast": false + } + } + ] +} diff --git a/fixtures/hasura/app/connector/sample_mflix/native_queries/extended_json_test_data.json b/fixtures/hasura/app/connector/sample_mflix/native_queries/extended_json_test_data.json new file mode 100644 index 00000000..fd43809c --- /dev/null +++ b/fixtures/hasura/app/connector/sample_mflix/native_queries/extended_json_test_data.json @@ -0,0 +1,98 @@ +{ + "name": "extended_json_test_data", + "representation": "collection", + "description": "various values that all have the ExtendedJSON type", + "resultDocumentType": "DocWithExtendedJsonValue", + "objectTypes": { + "DocWithExtendedJsonValue": { + "fields": { + "type": { + "type": { + "scalar": "string" + } + }, + "value": { + "type": "extendedJSON" + } + } + } + }, + "pipeline": [ + { + "$documents": [ + { + "type": "decimal", + "value": { + "$numberDecimal": "1" + } + }, + { + "type": "decimal", + "value": { + "$numberDecimal": "2" + } + }, + { + "type": "double", + "value": { + "$numberDouble": "3" + } + }, + { + "type": "double", + "value": { + "$numberDouble": "4" + } + }, + { + "type": "int", + "value": { + "$numberInt": "5" + } + }, + { + "type": "int", + "value": { + "$numberInt": "6" + } + }, + { + "type": "long", + "value": { + "$numberLong": "7" + } + }, + { + "type": "long", + "value": { + "$numberLong": "8" + } + }, + { + "type": "string", + "value": "foo" + }, + { + "type": "string", + "value": "hello, world!" + }, + { + "type": "date", + "value": { + "$date": "2024-08-20T14:38:00Z" + } + }, + { + "type": "date", + "value": { + "$date": "2021-11-22T09:00:00Z" + } + }, + { + "type": "null", + "value": null + } + ] + } + ] +} diff --git a/fixtures/hasura/sample_mflix/connector/sample_mflix/native_queries/hello.json b/fixtures/hasura/app/connector/sample_mflix/native_queries/hello.json similarity index 100% rename from fixtures/hasura/sample_mflix/connector/sample_mflix/native_queries/hello.json rename to fixtures/hasura/app/connector/sample_mflix/native_queries/hello.json diff --git a/fixtures/hasura/app/connector/sample_mflix/native_queries/native_query.json b/fixtures/hasura/app/connector/sample_mflix/native_queries/native_query.json new file mode 100644 index 00000000..41dc6b65 --- /dev/null +++ b/fixtures/hasura/app/connector/sample_mflix/native_queries/native_query.json @@ -0,0 +1,120 @@ +{ + "name": "native_query", + "representation": "collection", + "inputCollection": "movies", + "arguments": { + "title": { + "type": { + "scalar": "string" + } + } + }, + "resultDocumentType": "native_query_project", + "objectTypes": { + "native_query_project": { + "fields": { + "_id": { + "type": { + "scalar": "objectId" + } + }, + "bar": { + "type": { + "object": "native_query_project_bar" + } + }, + "foo": { + "type": { + "object": "native_query_project_foo" + } + }, + "title": { + "type": { + "scalar": "string" + } + }, + "tomatoes": { + "type": { + "nullable": { + "object": "movies_tomatoes" + } + } + }, + "what": { + "type": { + "object": "native_query_project_what" + } + } + } + }, + "native_query_project_bar": { + "fields": { + "foo": { + "type": { + "object": "movies_imdb" + } + } + } + }, + "native_query_project_foo": { + "fields": { + "bar": { + "type": { + "nullable": { + "object": "movies_tomatoes_critic" + } + } + } + } + }, + "native_query_project_what": { + "fields": { + "the": { + "type": { + "object": "native_query_project_what_the" + } + } + } + }, + "native_query_project_what_the": { + "fields": { + "heck": { + "type": { + "scalar": "string" + } + } + } + } + }, + "pipeline": [ + { + "$match": { + "title": "{{ title }}", + "year": { + "$gt": "$$ROOT" + } + } + }, + { + "$project": { + "title": 1, + "tomatoes": 1, + "foo.bar": "$tomatoes.critic", + "bar.foo": "$imdb", + "what.the.heck": "hello", + "genres": 1, + "cast": 1 + } + }, + { + "$project": { + "genres": false + } + }, + { + "$project": { + "cast": false + } + } + ] +} \ No newline at end of file diff --git a/fixtures/hasura/app/connector/sample_mflix/native_queries/title_word_frequency.json b/fixtures/hasura/app/connector/sample_mflix/native_queries/title_word_frequency.json new file mode 100644 index 00000000..9d6fc8ac --- /dev/null +++ b/fixtures/hasura/app/connector/sample_mflix/native_queries/title_word_frequency.json @@ -0,0 +1,48 @@ +{ + "name": "title_word_frequency", + "representation": "collection", + "inputCollection": "movies", + "arguments": {}, + "resultDocumentType": "title_word_frequency_group", + "objectTypes": { + "title_word_frequency_group": { + "fields": { + "_id": { + "type": { + "scalar": "string" + } + }, + "count": { + "type": { + "scalar": "int" + } + } + } + } + }, + "pipeline": [ + { + "$replaceWith": { + "title_words": { + "$split": [ + "$title", + " " + ] + } + } + }, + { + "$unwind": { + "path": "$title_words" + } + }, + { + "$group": { + "_id": "$title_words", + "count": { + "$count": {} + } + } + } + ] +} diff --git a/fixtures/hasura/sample_mflix/connector/sample_mflix/schema/comments.json b/fixtures/hasura/app/connector/sample_mflix/schema/comments.json similarity index 100% rename from fixtures/hasura/sample_mflix/connector/sample_mflix/schema/comments.json rename to fixtures/hasura/app/connector/sample_mflix/schema/comments.json diff --git a/fixtures/hasura/sample_mflix/connector/sample_mflix/schema/movies.json b/fixtures/hasura/app/connector/sample_mflix/schema/movies.json similarity index 92% rename from fixtures/hasura/sample_mflix/connector/sample_mflix/schema/movies.json rename to fixtures/hasura/app/connector/sample_mflix/schema/movies.json index b7dc4ca5..a56df100 100644 --- a/fixtures/hasura/sample_mflix/connector/sample_mflix/schema/movies.json +++ b/fixtures/hasura/app/connector/sample_mflix/schema/movies.json @@ -36,8 +36,10 @@ }, "directors": { "type": { - "arrayOf": { - "scalar": "string" + "nullable": { + "arrayOf": { + "scalar": "string" + } } } }, @@ -50,8 +52,10 @@ }, "genres": { "type": { - "arrayOf": { - "scalar": "string" + "nullable": { + "arrayOf": { + "scalar": "string" + } } } }, @@ -273,12 +277,16 @@ }, "numReviews": { "type": { - "scalar": "int" + "nullable": { + "scalar": "int" + } } }, "rating": { "type": { - "scalar": "double" + "nullable": { + "scalar": "double" + } } } } @@ -299,7 +307,9 @@ }, "rating": { "type": { - "scalar": "double" + "nullable": { + "scalar": "double" + } } } } diff --git a/fixtures/hasura/sample_mflix/connector/sample_mflix/schema/sessions.json b/fixtures/hasura/app/connector/sample_mflix/schema/sessions.json similarity index 100% rename from fixtures/hasura/sample_mflix/connector/sample_mflix/schema/sessions.json rename to fixtures/hasura/app/connector/sample_mflix/schema/sessions.json diff --git a/fixtures/hasura/sample_mflix/connector/sample_mflix/schema/theaters.json b/fixtures/hasura/app/connector/sample_mflix/schema/theaters.json similarity index 100% rename from fixtures/hasura/sample_mflix/connector/sample_mflix/schema/theaters.json rename to fixtures/hasura/app/connector/sample_mflix/schema/theaters.json diff --git a/fixtures/hasura/sample_mflix/connector/sample_mflix/schema/users.json b/fixtures/hasura/app/connector/sample_mflix/schema/users.json similarity index 100% rename from fixtures/hasura/sample_mflix/connector/sample_mflix/schema/users.json rename to fixtures/hasura/app/connector/sample_mflix/schema/users.json diff --git a/fixtures/hasura/chinook/metadata/commands/.gitkeep b/fixtures/hasura/app/connector/test_cases/.configuration_metadata similarity index 100% rename from fixtures/hasura/chinook/metadata/commands/.gitkeep rename to fixtures/hasura/app/connector/test_cases/.configuration_metadata diff --git a/fixtures/hasura/app/connector/test_cases/.ddnignore b/fixtures/hasura/app/connector/test_cases/.ddnignore new file mode 100644 index 00000000..ed72dd19 --- /dev/null +++ b/fixtures/hasura/app/connector/test_cases/.ddnignore @@ -0,0 +1,2 @@ +.env* +compose.yaml diff --git a/fixtures/hasura/app/connector/test_cases/.hasura-connector/Dockerfile.test_cases b/fixtures/hasura/app/connector/test_cases/.hasura-connector/Dockerfile.test_cases new file mode 100644 index 00000000..1f2c958f --- /dev/null +++ b/fixtures/hasura/app/connector/test_cases/.hasura-connector/Dockerfile.test_cases @@ -0,0 +1,2 @@ +FROM ghcr.io/hasura/ndc-mongodb:v1.4.0 +COPY ./ /etc/connector \ No newline at end of file diff --git a/fixtures/hasura/app/connector/test_cases/.hasura-connector/connector-metadata.yaml b/fixtures/hasura/app/connector/test_cases/.hasura-connector/connector-metadata.yaml new file mode 100644 index 00000000..bc84f63a --- /dev/null +++ b/fixtures/hasura/app/connector/test_cases/.hasura-connector/connector-metadata.yaml @@ -0,0 +1,16 @@ +packagingDefinition: + type: PrebuiltDockerImage + dockerImage: ghcr.io/hasura/ndc-mongodb:v1.5.0 +supportedEnvironmentVariables: + - name: MONGODB_DATABASE_URI + description: The URI for the MongoDB database +commands: + update: hasura-ndc-mongodb update +cliPlugin: + name: ndc-mongodb + version: v1.5.0 +dockerComposeWatch: + - path: ./ + target: /etc/connector + action: sync+restart +documentationPage: "https://hasura.info/mongodb-getting-started" diff --git a/fixtures/hasura/app/connector/test_cases/compose.yaml b/fixtures/hasura/app/connector/test_cases/compose.yaml new file mode 100644 index 00000000..2c2d8feb --- /dev/null +++ b/fixtures/hasura/app/connector/test_cases/compose.yaml @@ -0,0 +1,13 @@ +services: + app_test_cases: + build: + context: . + dockerfile: .hasura-connector/Dockerfile.test_cases + environment: + MONGODB_DATABASE_URI: $APP_TEST_CASES_MONGODB_DATABASE_URI + OTEL_EXPORTER_OTLP_ENDPOINT: $APP_TEST_CASES_OTEL_EXPORTER_OTLP_ENDPOINT + OTEL_SERVICE_NAME: $APP_TEST_CASES_OTEL_SERVICE_NAME + extra_hosts: + - local.hasura.dev:host-gateway + ports: + - 7132:8080 diff --git a/fixtures/hasura/app/connector/test_cases/configuration.json b/fixtures/hasura/app/connector/test_cases/configuration.json new file mode 100644 index 00000000..5d72bb4e --- /dev/null +++ b/fixtures/hasura/app/connector/test_cases/configuration.json @@ -0,0 +1,10 @@ +{ + "introspectionOptions": { + "sampleSize": 1000, + "noValidatorSchema": false, + "allSchemaNullable": false + }, + "serializationOptions": { + "extendedJsonMode": "canonical" + } +} diff --git a/fixtures/hasura/app/connector/test_cases/connector.yaml b/fixtures/hasura/app/connector/test_cases/connector.yaml new file mode 100644 index 00000000..c156e640 --- /dev/null +++ b/fixtures/hasura/app/connector/test_cases/connector.yaml @@ -0,0 +1,14 @@ +kind: Connector +version: v2 +definition: + name: test_cases + subgraph: app + source: hasura/mongodb:v1.5.0 + context: . + envMapping: + MONGODB_DATABASE_URI: + fromEnv: APP_TEST_CASES_MONGODB_DATABASE_URI + OTEL_EXPORTER_OTLP_ENDPOINT: + fromEnv: APP_TEST_CASES_OTEL_EXPORTER_OTLP_ENDPOINT + OTEL_SERVICE_NAME: + fromEnv: APP_TEST_CASES_OTEL_SERVICE_NAME diff --git a/fixtures/hasura/app/connector/test_cases/schema/departments.json b/fixtures/hasura/app/connector/test_cases/schema/departments.json new file mode 100644 index 00000000..5f8996b4 --- /dev/null +++ b/fixtures/hasura/app/connector/test_cases/schema/departments.json @@ -0,0 +1,24 @@ +{ + "name": "departments", + "collections": { + "departments": { + "type": "departments" + } + }, + "objectTypes": { + "departments": { + "fields": { + "_id": { + "type": { + "scalar": "objectId" + } + }, + "description": { + "type": { + "scalar": "string" + } + } + } + } + } +} \ No newline at end of file diff --git a/fixtures/hasura/app/connector/test_cases/schema/nested_collection.json b/fixtures/hasura/app/connector/test_cases/schema/nested_collection.json new file mode 100644 index 00000000..df749f60 --- /dev/null +++ b/fixtures/hasura/app/connector/test_cases/schema/nested_collection.json @@ -0,0 +1,40 @@ +{ + "name": "nested_collection", + "collections": { + "nested_collection": { + "type": "nested_collection" + } + }, + "objectTypes": { + "nested_collection": { + "fields": { + "_id": { + "type": { + "scalar": "objectId" + } + }, + "institution": { + "type": { + "scalar": "string" + } + }, + "staff": { + "type": { + "arrayOf": { + "object": "nested_collection_staff" + } + } + } + } + }, + "nested_collection_staff": { + "fields": { + "name": { + "type": { + "scalar": "string" + } + } + } + } + } +} \ No newline at end of file diff --git a/fixtures/hasura/app/connector/test_cases/schema/nested_field_with_dollar.json b/fixtures/hasura/app/connector/test_cases/schema/nested_field_with_dollar.json new file mode 100644 index 00000000..df634f41 --- /dev/null +++ b/fixtures/hasura/app/connector/test_cases/schema/nested_field_with_dollar.json @@ -0,0 +1,35 @@ +{ + "name": "nested_field_with_dollar", + "collections": { + "nested_field_with_dollar": { + "type": "nested_field_with_dollar" + } + }, + "objectTypes": { + "nested_field_with_dollar": { + "fields": { + "_id": { + "type": { + "scalar": "objectId" + } + }, + "configuration": { + "type": { + "object": "nested_field_with_dollar_configuration" + } + } + } + }, + "nested_field_with_dollar_configuration": { + "fields": { + "$schema": { + "type": { + "nullable": { + "scalar": "string" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/fixtures/hasura/app/connector/test_cases/schema/schools.json b/fixtures/hasura/app/connector/test_cases/schema/schools.json new file mode 100644 index 00000000..0ebed63e --- /dev/null +++ b/fixtures/hasura/app/connector/test_cases/schema/schools.json @@ -0,0 +1,43 @@ +{ + "name": "schools", + "collections": { + "schools": { + "type": "schools" + } + }, + "objectTypes": { + "schools": { + "fields": { + "_id": { + "type": { + "scalar": "objectId" + } + }, + "departments": { + "type": { + "object": "schools_departments" + } + }, + "name": { + "type": { + "scalar": "string" + } + } + } + }, + "schools_departments": { + "fields": { + "english_department_id": { + "type": { + "scalar": "objectId" + } + }, + "math_department_id": { + "type": { + "scalar": "objectId" + } + } + } + } + } +} \ No newline at end of file diff --git a/fixtures/hasura/app/connector/test_cases/schema/uuids.json b/fixtures/hasura/app/connector/test_cases/schema/uuids.json new file mode 100644 index 00000000..42a0dd4d --- /dev/null +++ b/fixtures/hasura/app/connector/test_cases/schema/uuids.json @@ -0,0 +1,34 @@ +{ + "name": "uuids", + "collections": { + "uuids": { + "type": "uuids" + } + }, + "objectTypes": { + "uuids": { + "fields": { + "_id": { + "type": { + "scalar": "objectId" + } + }, + "name": { + "type": { + "scalar": "string" + } + }, + "uuid": { + "type": { + "scalar": "uuid" + } + }, + "uuid_as_string": { + "type": { + "scalar": "string" + } + } + } + } + } +} \ No newline at end of file diff --git a/fixtures/hasura/app/connector/test_cases/schema/weird_field_names.json b/fixtures/hasura/app/connector/test_cases/schema/weird_field_names.json new file mode 100644 index 00000000..42344e40 --- /dev/null +++ b/fixtures/hasura/app/connector/test_cases/schema/weird_field_names.json @@ -0,0 +1,68 @@ +{ + "name": "weird_field_names", + "collections": { + "weird_field_names": { + "type": "weird_field_names" + } + }, + "objectTypes": { + "weird_field_names": { + "fields": { + "$invalid.array": { + "type": { + "arrayOf": { + "object": "weird_field_names_$invalid.array" + } + } + }, + "$invalid.name": { + "type": { + "scalar": "int" + } + }, + "$invalid.object.name": { + "type": { + "object": "weird_field_names_$invalid.object.name" + } + }, + "_id": { + "type": { + "scalar": "objectId" + } + }, + "valid_object_name": { + "type": { + "object": "weird_field_names_valid_object_name" + } + } + } + }, + "weird_field_names_$invalid.array": { + "fields": { + "$invalid.element": { + "type": { + "scalar": "int" + } + } + } + }, + "weird_field_names_$invalid.object.name": { + "fields": { + "valid_name": { + "type": { + "scalar": "int" + } + } + } + }, + "weird_field_names_valid_object_name": { + "fields": { + "$invalid.nested.name": { + "type": { + "scalar": "int" + } + } + } + } + } +} \ No newline at end of file diff --git a/fixtures/hasura/globals/.env.globals.cloud b/fixtures/hasura/app/metadata/.keep similarity index 100% rename from fixtures/hasura/globals/.env.globals.cloud rename to fixtures/hasura/app/metadata/.keep diff --git a/fixtures/hasura/chinook/metadata/models/Album.hml b/fixtures/hasura/app/metadata/Album.hml similarity index 65% rename from fixtures/hasura/chinook/metadata/models/Album.hml rename to fixtures/hasura/app/metadata/Album.hml index 79d9651d..d18208be 100644 --- a/fixtures/hasura/chinook/metadata/models/Album.hml +++ b/fixtures/hasura/app/metadata/Album.hml @@ -31,7 +31,6 @@ definition: title: column: name: Title - description: Object type for collection Album --- kind: TypePermissions @@ -51,30 +50,50 @@ definition: kind: BooleanExpressionType version: v1 definition: - name: AlbumComparisonExp + name: AlbumBoolExp operand: object: type: Album comparableFields: - fieldName: id - booleanExpressionType: ObjectIdComparisonExp + booleanExpressionType: ObjectIdBoolExp - fieldName: albumId - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: artistId - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: title - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp comparableRelationships: - relationshipName: artist - booleanExpressionType: ArtistComparisonExp - relationshipName: tracks - booleanExpressionType: TrackComparisonExp logicalOperators: enable: true isNull: enable: true graphql: - typeName: AlbumComparisonExp + typeName: AlbumBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: AlbumAggExp + operand: + object: + aggregatedType: Album + aggregatableFields: + - fieldName: id + aggregateExpression: ObjectIdAggExp + - fieldName: albumId + aggregateExpression: IntAggExp + - fieldName: artistId + aggregateExpression: IntAggExp + - fieldName: title + aggregateExpression: StringAggExp + count: + enable: true + graphql: + selectTypeName: AlbumAggExp --- kind: Model @@ -85,7 +104,8 @@ definition: source: dataConnectorName: chinook collection: Album - filterExpressionType: AlbumComparisonExp + filterExpressionType: AlbumBoolExp + aggregateExpression: AlbumAggExp orderableFields: - fieldName: id orderByDirections: @@ -102,11 +122,20 @@ definition: graphql: selectMany: queryRootField: album + subscription: + rootField: album selectUniques: - queryRootField: albumById uniqueIdentifier: - id + subscription: + rootField: albumById orderByExpressionType: AlbumOrderBy + filterInputTypeName: AlbumFilterInput + aggregate: + queryRootField: albumAggregate + subscription: + rootField: albumAggregate --- kind: ModelPermissions @@ -117,4 +146,5 @@ definition: - role: admin select: filter: null + allowSubscriptions: true diff --git a/fixtures/hasura/chinook/metadata/models/Artist.hml b/fixtures/hasura/app/metadata/Artist.hml similarity index 63% rename from fixtures/hasura/chinook/metadata/models/Artist.hml rename to fixtures/hasura/app/metadata/Artist.hml index bcb4ff50..2ba6e1ac 100644 --- a/fixtures/hasura/chinook/metadata/models/Artist.hml +++ b/fixtures/hasura/app/metadata/Artist.hml @@ -9,7 +9,7 @@ definition: - name: artistId type: Int! - name: name - type: String + type: String! graphql: typeName: Artist inputTypeName: ArtistInput @@ -26,7 +26,6 @@ definition: name: column: name: Name - description: Object type for collection Artist --- kind: TypePermissions @@ -45,26 +44,45 @@ definition: kind: BooleanExpressionType version: v1 definition: - name: ArtistComparisonExp + name: ArtistBoolExp operand: object: type: Artist comparableFields: - fieldName: id - booleanExpressionType: ObjectIdComparisonExp + booleanExpressionType: ObjectIdBoolExp - fieldName: artistId - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: name - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp comparableRelationships: - relationshipName: albums - booleanExpressionType: AlbumComparisonExp logicalOperators: enable: true isNull: enable: true graphql: - typeName: ArtistComparisonExp + typeName: ArtistBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: ArtistAggExp + operand: + object: + aggregatedType: Artist + aggregatableFields: + - fieldName: id + aggregateExpression: ObjectIdAggExp + - fieldName: artistId + aggregateExpression: IntAggExp + - fieldName: name + aggregateExpression: StringAggExp + count: + enable: true + graphql: + selectTypeName: ArtistAggExp --- kind: Model @@ -75,7 +93,8 @@ definition: source: dataConnectorName: chinook collection: Artist - filterExpressionType: ArtistComparisonExp + filterExpressionType: ArtistBoolExp + aggregateExpression: ArtistAggExp orderableFields: - fieldName: id orderByDirections: @@ -89,11 +108,20 @@ definition: graphql: selectMany: queryRootField: artist + subscription: + rootField: artist selectUniques: - queryRootField: artistById uniqueIdentifier: - id + subscription: + rootField: artistById orderByExpressionType: ArtistOrderBy + filterInputTypeName: ArtistFilterInput + aggregate: + queryRootField: artistAggregate + subscription: + rootField: artistAggregate --- kind: ModelPermissions @@ -104,4 +132,5 @@ definition: - role: admin select: filter: null + allowSubscriptions: true diff --git a/fixtures/hasura/chinook/metadata/ArtistsWithAlbumsAndTracks.hml b/fixtures/hasura/app/metadata/ArtistsWithAlbumsAndTracks.hml similarity index 69% rename from fixtures/hasura/chinook/metadata/ArtistsWithAlbumsAndTracks.hml rename to fixtures/hasura/app/metadata/ArtistsWithAlbumsAndTracks.hml index 9070d45b..11217659 100644 --- a/fixtures/hasura/chinook/metadata/ArtistsWithAlbumsAndTracks.hml +++ b/fixtures/hasura/app/metadata/ArtistsWithAlbumsAndTracks.hml @@ -40,27 +40,6 @@ definition: - title - tracks ---- -kind: BooleanExpressionType -version: v1 -definition: - name: AlbumWithTracksComparisonExp - operand: - object: - type: AlbumWithTracks - comparableFields: - - fieldName: id - booleanExpressionType: ObjectIdComparisonExp - - fieldName: title - booleanExpressionType: StringComparisonExp - comparableRelationships: [] - logicalOperators: - enable: true - isNull: - enable: true - graphql: - typeName: AlbumWithTracksComparisonExp - --- kind: ObjectType version: v1 @@ -107,22 +86,63 @@ definition: kind: BooleanExpressionType version: v1 definition: - name: ArtistWithAlbumsAndTracksComparisonExp + name: AlbumWithTracksBoolExp + operand: + object: + type: AlbumWithTracks + comparableFields: + - fieldName: id + booleanExpressionType: ObjectIdBoolExp + - fieldName: title + booleanExpressionType: StringBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: AlbumWithTracksBoolExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: ArtistWithAlbumsAndTracksBoolExp operand: object: type: ArtistWithAlbumsAndTracks comparableFields: - fieldName: id - booleanExpressionType: ObjectIdComparisonExp + booleanExpressionType: ObjectIdBoolExp + - fieldName: albums + booleanExpressionType: AlbumWithTracksBoolExp - fieldName: name - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp comparableRelationships: [] logicalOperators: enable: true isNull: enable: true graphql: - typeName: ArtistWithAlbumsAndTracksComparisonExp + typeName: ArtistWithAlbumsAndTracksBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: ArtistWithAlbumsAndTracksAggExp + operand: + object: + aggregatedType: ArtistWithAlbumsAndTracks + aggregatableFields: + - fieldName: id + aggregateExpression: ObjectIdAggExp + - fieldName: name + aggregateExpression: StringAggExp + count: + enable: true + graphql: + selectTypeName: ArtistWithAlbumsAndTracksAggExp --- kind: Model @@ -133,7 +153,8 @@ definition: source: dataConnectorName: chinook collection: artists_with_albums_and_tracks - filterExpressionType: ArtistWithAlbumsAndTracksComparisonExp + filterExpressionType: ArtistWithAlbumsAndTracksBoolExp + aggregateExpression: ArtistWithAlbumsAndTracksAggExp orderableFields: - fieldName: id orderByDirections: @@ -147,11 +168,20 @@ definition: graphql: selectMany: queryRootField: artistsWithAlbumsAndTracks + subscription: + rootField: artistsWithAlbumsAndTracks selectUniques: - queryRootField: artistsWithAlbumsAndTracksById uniqueIdentifier: - id + subscription: + rootField: artistsWithAlbumsAndTracksById orderByExpressionType: ArtistsWithAlbumsAndTracksOrderBy + filterInputTypeName: ArtistsWithAlbumsAndTracksFilterInput + aggregate: + queryRootField: artistsWithAlbumsAndTracksAggregate + subscription: + rootField: artistsWithAlbumsAndTracksAggregate description: combines artist, albums, and tracks into a single document per artist --- @@ -163,4 +193,5 @@ definition: - role: admin select: filter: null + allowSubscriptions: true diff --git a/fixtures/hasura/sample_mflix/metadata/models/Comments.hml b/fixtures/hasura/app/metadata/Comments.hml similarity index 74% rename from fixtures/hasura/sample_mflix/metadata/models/Comments.hml rename to fixtures/hasura/app/metadata/Comments.hml index f6bb1d91..ca8c80ca 100644 --- a/fixtures/hasura/sample_mflix/metadata/models/Comments.hml +++ b/fixtures/hasura/app/metadata/Comments.hml @@ -71,49 +71,58 @@ definition: kind: BooleanExpressionType version: v1 definition: - name: CommentsComparisonExp + name: CommentsBoolExp operand: object: type: Comments comparableFields: - fieldName: id - booleanExpressionType: ObjectIdComparisonExp + booleanExpressionType: ObjectIdBoolExp - fieldName: date - booleanExpressionType: DateComparisonExp + booleanExpressionType: DateBoolExp - fieldName: email - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: movieId - booleanExpressionType: ObjectIdComparisonExp + booleanExpressionType: ObjectIdBoolExp - fieldName: name - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: text - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp comparableRelationships: - relationshipName: movie - booleanExpressionType: MoviesComparisonExp - relationshipName: user - booleanExpressionType: UsersComparisonExp logicalOperators: enable: true isNull: enable: true graphql: - typeName: CommentsComparisonExp + typeName: CommentsBoolExp --- kind: AggregateExpression version: v1 definition: - name: CommentsAggregateExp + name: CommentsAggExp operand: object: aggregatedType: Comments aggregatableFields: + - fieldName: id + aggregateExpression: ObjectIdAggExp - fieldName: date - aggregateExpression: DateAggregateExp - count: { enable: true } + aggregateExpression: DateAggExp + - fieldName: email + aggregateExpression: StringAggExp + - fieldName: movieId + aggregateExpression: ObjectIdAggExp + - fieldName: name + aggregateExpression: StringAggExp + - fieldName: text + aggregateExpression: StringAggExp + count: + enable: true graphql: - selectTypeName: CommentsAggregateExp + selectTypeName: CommentsAggExp --- kind: Model @@ -124,8 +133,8 @@ definition: source: dataConnectorName: sample_mflix collection: comments - aggregateExpression: CommentsAggregateExp - filterExpressionType: CommentsComparisonExp + filterExpressionType: CommentsBoolExp + aggregateExpression: CommentsAggExp orderableFields: - fieldName: id orderByDirections: @@ -146,16 +155,22 @@ definition: orderByDirections: enableAll: true graphql: - aggregate: - queryRootField: commentsAggregate - filterInputTypeName: CommentsFilterInput selectMany: queryRootField: comments + subscription: + rootField: comments selectUniques: - queryRootField: commentsById uniqueIdentifier: - id + subscription: + rootField: commentsById orderByExpressionType: CommentsOrderBy + filterInputTypeName: CommentsFilterInput + aggregate: + queryRootField: commentsAggregate + subscription: + rootField: commentsAggregate --- kind: ModelPermissions @@ -166,6 +181,7 @@ definition: - role: admin select: filter: null + allowSubscriptions: true - role: user select: filter: diff --git a/fixtures/hasura/chinook/metadata/models/Customer.hml b/fixtures/hasura/app/metadata/Customer.hml similarity index 63% rename from fixtures/hasura/chinook/metadata/models/Customer.hml rename to fixtures/hasura/app/metadata/Customer.hml index 3a707bcb..b853b340 100644 --- a/fixtures/hasura/chinook/metadata/models/Customer.hml +++ b/fixtures/hasura/app/metadata/Customer.hml @@ -7,13 +7,13 @@ definition: - name: id type: ObjectId! - name: address - type: String + type: String! - name: city - type: String + type: String! - name: company type: String - name: country - type: String + type: String! - name: customerId type: Int! - name: email @@ -31,7 +31,7 @@ definition: - name: state type: String - name: supportRepId - type: Int + type: Int! graphql: typeName: Customer inputTypeName: CustomerInput @@ -81,7 +81,6 @@ definition: supportRepId: column: name: SupportRepId - description: Object type for collection Customer --- kind: TypePermissions @@ -111,50 +110,90 @@ definition: kind: BooleanExpressionType version: v1 definition: - name: CustomerComparisonExp + name: CustomerBoolExp operand: object: type: Customer comparableFields: - fieldName: id - booleanExpressionType: ObjectIdComparisonExp + booleanExpressionType: ObjectIdBoolExp - fieldName: address - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: city - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: company - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: country - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: customerId - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: email - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: fax - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: firstName - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: lastName - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: phone - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: postalCode - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: state - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: supportRepId - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp comparableRelationships: - relationshipName: invoices - booleanExpressionType: InvoiceComparisonExp - relationshipName: supportRep - booleanExpressionType: EmployeeComparisonExp logicalOperators: enable: true isNull: enable: true graphql: - typeName: CustomerComparisonExp + typeName: CustomerBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: CustomerAggExp + operand: + object: + aggregatedType: Customer + aggregatableFields: + - fieldName: id + aggregateExpression: ObjectIdAggExp + - fieldName: address + aggregateExpression: StringAggExp + - fieldName: city + aggregateExpression: StringAggExp + - fieldName: company + aggregateExpression: StringAggExp + - fieldName: country + aggregateExpression: StringAggExp + - fieldName: customerId + aggregateExpression: IntAggExp + - fieldName: email + aggregateExpression: StringAggExp + - fieldName: fax + aggregateExpression: StringAggExp + - fieldName: firstName + aggregateExpression: StringAggExp + - fieldName: lastName + aggregateExpression: StringAggExp + - fieldName: phone + aggregateExpression: StringAggExp + - fieldName: postalCode + aggregateExpression: StringAggExp + - fieldName: state + aggregateExpression: StringAggExp + - fieldName: supportRepId + aggregateExpression: IntAggExp + count: + enable: true + graphql: + selectTypeName: CustomerAggExp --- kind: Model @@ -165,7 +204,8 @@ definition: source: dataConnectorName: chinook collection: Customer - filterExpressionType: CustomerComparisonExp + filterExpressionType: CustomerBoolExp + aggregateExpression: CustomerAggExp orderableFields: - fieldName: id orderByDirections: @@ -212,11 +252,20 @@ definition: graphql: selectMany: queryRootField: customer + subscription: + rootField: customer selectUniques: - queryRootField: customerById uniqueIdentifier: - id + subscription: + rootField: customerById orderByExpressionType: CustomerOrderBy + filterInputTypeName: CustomerFilterInput + aggregate: + queryRootField: customerAggregate + subscription: + rootField: customerAggregate --- kind: ModelPermissions @@ -227,4 +276,5 @@ definition: - role: admin select: filter: null + allowSubscriptions: true diff --git a/fixtures/hasura/app/metadata/Departments.hml b/fixtures/hasura/app/metadata/Departments.hml new file mode 100644 index 00000000..92fa76ce --- /dev/null +++ b/fixtures/hasura/app/metadata/Departments.hml @@ -0,0 +1,122 @@ +--- +kind: ObjectType +version: v1 +definition: + name: Departments + fields: + - name: id + type: ObjectId! + - name: description + type: String! + graphql: + typeName: Departments + inputTypeName: DepartmentsInput + dataConnectorTypeMapping: + - dataConnectorName: test_cases + dataConnectorObjectType: departments + fieldMapping: + id: + column: + name: _id + description: + column: + name: description + +--- +kind: TypePermissions +version: v1 +definition: + typeName: Departments + permissions: + - role: admin + output: + allowedFields: + - id + - description + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: DepartmentsBoolExp + operand: + object: + type: Departments + comparableFields: + - fieldName: id + booleanExpressionType: ObjectIdBoolExp + - fieldName: description + booleanExpressionType: StringBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: DepartmentsBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: DepartmentsAggExp + operand: + object: + aggregatedType: Departments + aggregatableFields: + - fieldName: id + aggregateExpression: ObjectIdAggExp + - fieldName: description + aggregateExpression: StringAggExp + count: + enable: true + graphql: + selectTypeName: DepartmentsAggExp + +--- +kind: Model +version: v1 +definition: + name: Departments + objectType: Departments + source: + dataConnectorName: test_cases + collection: departments + filterExpressionType: DepartmentsBoolExp + aggregateExpression: DepartmentsAggExp + orderableFields: + - fieldName: id + orderByDirections: + enableAll: true + - fieldName: description + orderByDirections: + enableAll: true + graphql: + selectMany: + queryRootField: departments + subscription: + rootField: departments + selectUniques: + - queryRootField: departmentsById + uniqueIdentifier: + - id + subscription: + rootField: departmentsById + orderByExpressionType: DepartmentsOrderBy + filterInputTypeName: DepartmentsFilterInput + aggregate: + queryRootField: departmentsAggregate + subscription: + rootField: departmentsAggregate + +--- +kind: ModelPermissions +version: v1 +definition: + modelName: Departments + permissions: + - role: admin + select: + filter: null + allowSubscriptions: true + diff --git a/fixtures/hasura/chinook/metadata/models/Employee.hml b/fixtures/hasura/app/metadata/Employee.hml similarity index 62% rename from fixtures/hasura/chinook/metadata/models/Employee.hml rename to fixtures/hasura/app/metadata/Employee.hml index be33d8b0..151b55c0 100644 --- a/fixtures/hasura/chinook/metadata/models/Employee.hml +++ b/fixtures/hasura/app/metadata/Employee.hml @@ -7,35 +7,35 @@ definition: - name: id type: ObjectId! - name: address - type: String + type: String! - name: birthDate - type: String + type: String! - name: city - type: String + type: String! - name: country - type: String + type: String! - name: email - type: String + type: String! - name: employeeId type: Int! - name: fax - type: String + type: String! - name: firstName type: String! - name: hireDate - type: String + type: String! - name: lastName type: String! - name: phone - type: String + type: String! - name: postalCode - type: String + type: String! - name: reportsTo type: Int - name: state - type: String + type: String! - name: title - type: String + type: String! graphql: typeName: Employee inputTypeName: EmployeeInput @@ -91,7 +91,6 @@ definition: title: column: name: Title - description: Object type for collection Employee --- kind: TypePermissions @@ -123,56 +122,99 @@ definition: kind: BooleanExpressionType version: v1 definition: - name: EmployeeComparisonExp + name: EmployeeBoolExp operand: object: type: Employee comparableFields: - fieldName: id - booleanExpressionType: ObjectIdComparisonExp + booleanExpressionType: ObjectIdBoolExp - fieldName: address - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: birthDate - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: city - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: country - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: email - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: employeeId - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: fax - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: firstName - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: hireDate - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: lastName - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: phone - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: postalCode - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: reportsTo - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: state - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: title - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp comparableRelationships: - relationshipName: directReports - booleanExpressionType: EmployeeComparisonExp - relationshipName: manager - booleanExpressionType: EmployeeComparisonExp - relationshipName: supportRepCustomers - booleanExpressionType: CustomerComparisonExp logicalOperators: enable: true isNull: enable: true graphql: - typeName: EmployeeComparisonExp + typeName: EmployeeBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: EmployeeAggExp + operand: + object: + aggregatedType: Employee + aggregatableFields: + - fieldName: id + aggregateExpression: ObjectIdAggExp + - fieldName: address + aggregateExpression: StringAggExp + - fieldName: birthDate + aggregateExpression: StringAggExp + - fieldName: city + aggregateExpression: StringAggExp + - fieldName: country + aggregateExpression: StringAggExp + - fieldName: email + aggregateExpression: StringAggExp + - fieldName: employeeId + aggregateExpression: IntAggExp + - fieldName: fax + aggregateExpression: StringAggExp + - fieldName: firstName + aggregateExpression: StringAggExp + - fieldName: hireDate + aggregateExpression: StringAggExp + - fieldName: lastName + aggregateExpression: StringAggExp + - fieldName: phone + aggregateExpression: StringAggExp + - fieldName: postalCode + aggregateExpression: StringAggExp + - fieldName: reportsTo + aggregateExpression: IntAggExp + - fieldName: state + aggregateExpression: StringAggExp + - fieldName: title + aggregateExpression: StringAggExp + count: + enable: true + graphql: + selectTypeName: EmployeeAggExp --- kind: Model @@ -183,7 +225,8 @@ definition: source: dataConnectorName: chinook collection: Employee - filterExpressionType: EmployeeComparisonExp + filterExpressionType: EmployeeBoolExp + aggregateExpression: EmployeeAggExp orderableFields: - fieldName: id orderByDirections: @@ -236,11 +279,20 @@ definition: graphql: selectMany: queryRootField: employee + subscription: + rootField: employee selectUniques: - queryRootField: employeeById uniqueIdentifier: - id + subscription: + rootField: employeeById orderByExpressionType: EmployeeOrderBy + filterInputTypeName: EmployeeFilterInput + aggregate: + queryRootField: employeeAggregate + subscription: + rootField: employeeAggregate --- kind: ModelPermissions @@ -251,4 +303,5 @@ definition: - role: admin select: filter: null + allowSubscriptions: true diff --git a/fixtures/hasura/app/metadata/EqTitle.hml b/fixtures/hasura/app/metadata/EqTitle.hml new file mode 100644 index 00000000..587a2dbb --- /dev/null +++ b/fixtures/hasura/app/metadata/EqTitle.hml @@ -0,0 +1,352 @@ +--- +kind: ObjectType +version: v1 +definition: + name: EqTitleProjectBar + fields: + - name: foo + type: MoviesImdb! + graphql: + typeName: EqTitleProjectBar + inputTypeName: EqTitleProjectBarInput + dataConnectorTypeMapping: + - dataConnectorName: sample_mflix + dataConnectorObjectType: eq_title_project_bar + +--- +kind: TypePermissions +version: v1 +definition: + typeName: EqTitleProjectBar + permissions: + - role: admin + output: + allowedFields: + - foo + +--- +kind: ObjectType +version: v1 +definition: + name: EqTitleProjectFoo + fields: + - name: bar + type: MoviesTomatoesCritic + graphql: + typeName: EqTitleProjectFoo + inputTypeName: EqTitleProjectFooInput + dataConnectorTypeMapping: + - dataConnectorName: sample_mflix + dataConnectorObjectType: eq_title_project_foo + +--- +kind: TypePermissions +version: v1 +definition: + typeName: EqTitleProjectFoo + permissions: + - role: admin + output: + allowedFields: + - bar + +--- +kind: ObjectType +version: v1 +definition: + name: EqTitleProjectWhatThe + fields: + - name: heck + type: String! + graphql: + typeName: EqTitleProjectWhatThe + inputTypeName: EqTitleProjectWhatTheInput + dataConnectorTypeMapping: + - dataConnectorName: sample_mflix + dataConnectorObjectType: eq_title_project_what_the + +--- +kind: TypePermissions +version: v1 +definition: + typeName: EqTitleProjectWhatThe + permissions: + - role: admin + output: + allowedFields: + - heck + +--- +kind: ObjectType +version: v1 +definition: + name: EqTitleProjectWhat + fields: + - name: the + type: EqTitleProjectWhatThe! + graphql: + typeName: EqTitleProjectWhat + inputTypeName: EqTitleProjectWhatInput + dataConnectorTypeMapping: + - dataConnectorName: sample_mflix + dataConnectorObjectType: eq_title_project_what + +--- +kind: TypePermissions +version: v1 +definition: + typeName: EqTitleProjectWhat + permissions: + - role: admin + output: + allowedFields: + - the + +--- +kind: ObjectType +version: v1 +definition: + name: EqTitleProject + fields: + - name: id + type: ObjectId! + - name: bar + type: EqTitleProjectBar! + - name: foo + type: EqTitleProjectFoo! + - name: title + type: String! + - name: tomatoes + type: MoviesTomatoes + - name: what + type: EqTitleProjectWhat! + graphql: + typeName: EqTitleProject + inputTypeName: EqTitleProjectInput + dataConnectorTypeMapping: + - dataConnectorName: sample_mflix + dataConnectorObjectType: eq_title_project + fieldMapping: + id: + column: + name: _id + bar: + column: + name: bar + foo: + column: + name: foo + title: + column: + name: title + tomatoes: + column: + name: tomatoes + what: + column: + name: what + +--- +kind: TypePermissions +version: v1 +definition: + typeName: EqTitleProject + permissions: + - role: admin + output: + allowedFields: + - id + - bar + - foo + - title + - tomatoes + - what + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: EqTitleProjectBarBoolExp + operand: + object: + type: EqTitleProjectBar + comparableFields: + - fieldName: foo + booleanExpressionType: MoviesImdbBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: EqTitleProjectBarBoolExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: EqTitleProjectFooBoolExp + operand: + object: + type: EqTitleProjectFoo + comparableFields: + - fieldName: bar + booleanExpressionType: MoviesTomatoesCriticBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: EqTitleProjectFooBoolExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: EqTitleProjectWhatTheBoolExp + operand: + object: + type: EqTitleProjectWhatThe + comparableFields: + - fieldName: heck + booleanExpressionType: StringBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: EqTitleProjectWhatTheBoolExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: EqTitleProjectWhatBoolExp + operand: + object: + type: EqTitleProjectWhat + comparableFields: + - fieldName: the + booleanExpressionType: EqTitleProjectWhatTheBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: EqTitleProjectWhatBoolExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: EqTitleProjectBoolExp + operand: + object: + type: EqTitleProject + comparableFields: + - fieldName: id + booleanExpressionType: ObjectIdBoolExp + - fieldName: bar + booleanExpressionType: EqTitleProjectBarBoolExp + - fieldName: foo + booleanExpressionType: EqTitleProjectFooBoolExp + - fieldName: title + booleanExpressionType: StringBoolExp + - fieldName: tomatoes + booleanExpressionType: MoviesTomatoesBoolExp + - fieldName: what + booleanExpressionType: EqTitleProjectWhatBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: EqTitleProjectBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: EqTitleProjectAggExp + operand: + object: + aggregatedType: EqTitleProject + aggregatableFields: + - fieldName: id + aggregateExpression: ObjectIdAggExp + - fieldName: title + aggregateExpression: StringAggExp + count: + enable: true + graphql: + selectTypeName: EqTitleProjectAggExp + +--- +kind: Model +version: v1 +definition: + name: EqTitle + objectType: EqTitleProject + arguments: + - name: title + type: String! + - name: year + type: Int! + source: + dataConnectorName: sample_mflix + collection: eq_title + filterExpressionType: EqTitleProjectBoolExp + aggregateExpression: EqTitleProjectAggExp + orderableFields: + - fieldName: id + orderByDirections: + enableAll: true + - fieldName: bar + orderByDirections: + enableAll: true + - fieldName: foo + orderByDirections: + enableAll: true + - fieldName: title + orderByDirections: + enableAll: true + - fieldName: tomatoes + orderByDirections: + enableAll: true + - fieldName: what + orderByDirections: + enableAll: true + graphql: + selectMany: + queryRootField: eqTitle + subscription: + rootField: eqTitle + selectUniques: + - queryRootField: eqTitleById + uniqueIdentifier: + - id + subscription: + rootField: eqTitleById + argumentsInputType: EqTitleArguments + orderByExpressionType: EqTitleOrderBy + filterInputTypeName: EqTitleFilterInput + aggregate: + queryRootField: eqTitleAggregate + subscription: + rootField: eqTitleAggregate + +--- +kind: ModelPermissions +version: v1 +definition: + modelName: EqTitle + permissions: + - role: admin + select: + filter: null + allowSubscriptions: true + diff --git a/fixtures/hasura/app/metadata/ExtendedJsonTestData.hml b/fixtures/hasura/app/metadata/ExtendedJsonTestData.hml new file mode 100644 index 00000000..2e8ccba3 --- /dev/null +++ b/fixtures/hasura/app/metadata/ExtendedJsonTestData.hml @@ -0,0 +1,111 @@ +--- +kind: ObjectType +version: v1 +definition: + name: DocWithExtendedJsonValue + fields: + - name: type + type: String! + - name: value + type: ExtendedJson + graphql: + typeName: DocWithExtendedJsonValue + inputTypeName: DocWithExtendedJsonValueInput + dataConnectorTypeMapping: + - dataConnectorName: sample_mflix + dataConnectorObjectType: DocWithExtendedJsonValue + +--- +kind: TypePermissions +version: v1 +definition: + typeName: DocWithExtendedJsonValue + permissions: + - role: admin + output: + allowedFields: + - type + - value + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: DocWithExtendedJsonValueBoolExp + operand: + object: + type: DocWithExtendedJsonValue + comparableFields: + - fieldName: type + booleanExpressionType: StringBoolExp + - fieldName: value + booleanExpressionType: ExtendedJsonBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: DocWithExtendedJsonValueBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: DocWithExtendedJsonValueAggExp + operand: + object: + aggregatedType: DocWithExtendedJsonValue + aggregatableFields: + - fieldName: type + aggregateExpression: StringAggExp + - fieldName: value + aggregateExpression: ExtendedJsonAggExp + count: + enable: true + graphql: + selectTypeName: DocWithExtendedJsonValueAggExp + +--- +kind: Model +version: v1 +definition: + name: ExtendedJsonTestData + objectType: DocWithExtendedJsonValue + source: + dataConnectorName: sample_mflix + collection: extended_json_test_data + filterExpressionType: DocWithExtendedJsonValueBoolExp + aggregateExpression: DocWithExtendedJsonValueAggExp + orderableFields: + - fieldName: type + orderByDirections: + enableAll: true + - fieldName: value + orderByDirections: + enableAll: true + graphql: + selectMany: + queryRootField: extendedJsonTestData + subscription: + rootField: extendedJsonTestData + selectUniques: [] + orderByExpressionType: ExtendedJsonTestDataOrderBy + filterInputTypeName: ExtendedJsonTestDataFilterInput + aggregate: + queryRootField: extendedJsonTestDataAggregate + subscription: + rootField: extendedJsonTestDataAggregate + description: various values that all have the ExtendedJSON type + +--- +kind: ModelPermissions +version: v1 +definition: + modelName: ExtendedJsonTestData + permissions: + - role: admin + select: + filter: null + allowSubscriptions: true + diff --git a/fixtures/hasura/chinook/metadata/models/Genre.hml b/fixtures/hasura/app/metadata/Genre.hml similarity index 63% rename from fixtures/hasura/chinook/metadata/models/Genre.hml rename to fixtures/hasura/app/metadata/Genre.hml index 02f85577..a64a1ad1 100644 --- a/fixtures/hasura/chinook/metadata/models/Genre.hml +++ b/fixtures/hasura/app/metadata/Genre.hml @@ -9,7 +9,7 @@ definition: - name: genreId type: Int! - name: name - type: String + type: String! graphql: typeName: Genre inputTypeName: GenreInput @@ -26,7 +26,6 @@ definition: name: column: name: Name - description: Object type for collection Genre --- kind: TypePermissions @@ -45,26 +44,45 @@ definition: kind: BooleanExpressionType version: v1 definition: - name: GenreComparisonExp + name: GenreBoolExp operand: object: type: Genre comparableFields: - fieldName: id - booleanExpressionType: ObjectIdComparisonExp + booleanExpressionType: ObjectIdBoolExp - fieldName: genreId - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: name - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp comparableRelationships: - relationshipName: tracks - booleanExpressionType: TrackComparisonExp logicalOperators: enable: true isNull: enable: true graphql: - typeName: GenreComparisonExp + typeName: GenreBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: GenreAggExp + operand: + object: + aggregatedType: Genre + aggregatableFields: + - fieldName: id + aggregateExpression: ObjectIdAggExp + - fieldName: genreId + aggregateExpression: IntAggExp + - fieldName: name + aggregateExpression: StringAggExp + count: + enable: true + graphql: + selectTypeName: GenreAggExp --- kind: Model @@ -75,7 +93,8 @@ definition: source: dataConnectorName: chinook collection: Genre - filterExpressionType: GenreComparisonExp + filterExpressionType: GenreBoolExp + aggregateExpression: GenreAggExp orderableFields: - fieldName: id orderByDirections: @@ -89,11 +108,20 @@ definition: graphql: selectMany: queryRootField: genre + subscription: + rootField: genre selectUniques: - queryRootField: genreById uniqueIdentifier: - id + subscription: + rootField: genreById orderByExpressionType: GenreOrderBy + filterInputTypeName: GenreFilterInput + aggregate: + queryRootField: genreAggregate + subscription: + rootField: genreAggregate --- kind: ModelPermissions @@ -104,4 +132,5 @@ definition: - role: admin select: filter: null + allowSubscriptions: true diff --git a/fixtures/hasura/sample_mflix/metadata/commands/Hello.hml b/fixtures/hasura/app/metadata/Hello.hml similarity index 85% rename from fixtures/hasura/sample_mflix/metadata/commands/Hello.hml rename to fixtures/hasura/app/metadata/Hello.hml index b0c1cc4b..f5bc7a55 100644 --- a/fixtures/hasura/sample_mflix/metadata/commands/Hello.hml +++ b/fixtures/hasura/app/metadata/Hello.hml @@ -2,8 +2,7 @@ kind: Command version: v1 definition: - name: hello - description: Basic test of native queries + name: Hello outputType: String! arguments: - name: name @@ -12,17 +11,16 @@ definition: dataConnectorName: sample_mflix dataConnectorCommand: function: hello - argumentMapping: - name: name graphql: rootFieldName: hello rootFieldKind: Query + description: Basic test of native queries --- kind: CommandPermissions version: v1 definition: - commandName: hello + commandName: Hello permissions: - role: admin allowExecution: true diff --git a/fixtures/hasura/chinook/metadata/commands/InsertArtist.hml b/fixtures/hasura/app/metadata/InsertArtist.hml similarity index 80% rename from fixtures/hasura/chinook/metadata/commands/InsertArtist.hml rename to fixtures/hasura/app/metadata/InsertArtist.hml index 5988d7f3..22881d62 100644 --- a/fixtures/hasura/chinook/metadata/commands/InsertArtist.hml +++ b/fixtures/hasura/app/metadata/InsertArtist.hml @@ -1,9 +1,37 @@ +--- +kind: ObjectType +version: v1 +definition: + name: InsertArtist + fields: + - name: n + type: Int! + - name: ok + type: Double! + graphql: + typeName: InsertArtist + inputTypeName: InsertArtistInput + dataConnectorTypeMapping: + - dataConnectorName: chinook + dataConnectorObjectType: InsertArtist + +--- +kind: TypePermissions +version: v1 +definition: + typeName: InsertArtist + permissions: + - role: admin + output: + allowedFields: + - n + - ok + --- kind: Command version: v1 definition: - name: insertArtist - description: Example of a database update using a native mutation + name: InsertArtist outputType: InsertArtist! arguments: - name: id @@ -14,55 +42,17 @@ definition: dataConnectorName: chinook dataConnectorCommand: procedure: insertArtist - argumentMapping: - id: id - name: name graphql: rootFieldName: insertArtist rootFieldKind: Mutation + description: Example of a database update using a native mutation --- kind: CommandPermissions version: v1 definition: - commandName: insertArtist + commandName: InsertArtist permissions: - role: admin allowExecution: true ---- -kind: ObjectType -version: v1 -definition: - name: InsertArtist - graphql: - typeName: InsertArtist - inputTypeName: InsertArtistInput - fields: - - name: ok - type: Float! - - name: n - type: Int! - dataConnectorTypeMapping: - - dataConnectorName: chinook - dataConnectorObjectType: InsertArtist - fieldMapping: - ok: - column: - name: ok - n: - column: - name: n - ---- -kind: TypePermissions -version: v1 -definition: - typeName: InsertArtist - permissions: - - role: admin - output: - allowedFields: - - ok - - n - diff --git a/fixtures/hasura/chinook/metadata/models/Invoice.hml b/fixtures/hasura/app/metadata/Invoice.hml similarity index 69% rename from fixtures/hasura/chinook/metadata/models/Invoice.hml rename to fixtures/hasura/app/metadata/Invoice.hml index f48cdd1c..9d12ec8f 100644 --- a/fixtures/hasura/chinook/metadata/models/Invoice.hml +++ b/fixtures/hasura/app/metadata/Invoice.hml @@ -7,11 +7,11 @@ definition: - name: id type: ObjectId! - name: billingAddress - type: String + type: String! - name: billingCity - type: String + type: String! - name: billingCountry - type: String + type: String! - name: billingPostalCode type: String - name: billingState @@ -61,7 +61,6 @@ definition: total: column: name: Total - description: Object type for collection Invoice --- kind: TypePermissions @@ -87,57 +86,74 @@ definition: kind: BooleanExpressionType version: v1 definition: - name: InvoiceComparisonExp + name: InvoiceBoolExp operand: object: type: Invoice comparableFields: - fieldName: id - booleanExpressionType: ObjectIdComparisonExp + booleanExpressionType: ObjectIdBoolExp - fieldName: billingAddress - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: billingCity - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: billingCountry - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: billingPostalCode - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: billingState - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: customerId - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: invoiceDate - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: invoiceId - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: total - booleanExpressionType: DecimalComparisonExp + booleanExpressionType: DecimalBoolExp comparableRelationships: - relationshipName: customer - booleanExpressionType: CustomerComparisonExp - relationshipName: lines - booleanExpressionType: InvoiceLineComparisonExp logicalOperators: enable: true isNull: enable: true graphql: - typeName: InvoiceComparisonExp + typeName: InvoiceBoolExp --- kind: AggregateExpression version: v1 definition: - name: InvoiceAggregateExp + name: InvoiceAggExp operand: object: aggregatedType: Invoice aggregatableFields: + - fieldName: id + aggregateExpression: ObjectIdAggExp + - fieldName: billingAddress + aggregateExpression: StringAggExp + - fieldName: billingCity + aggregateExpression: StringAggExp + - fieldName: billingCountry + aggregateExpression: StringAggExp + - fieldName: billingPostalCode + aggregateExpression: StringAggExp + - fieldName: billingState + aggregateExpression: StringAggExp + - fieldName: customerId + aggregateExpression: IntAggExp + - fieldName: invoiceDate + aggregateExpression: StringAggExp + - fieldName: invoiceId + aggregateExpression: IntAggExp - fieldName: total - aggregateExpression: DecimalAggregateExp - count: { enable: true } + aggregateExpression: DecimalAggExp + count: + enable: true graphql: - selectTypeName: InvoiceAggregateExp + selectTypeName: InvoiceAggExp --- kind: Model @@ -148,8 +164,8 @@ definition: source: dataConnectorName: chinook collection: Invoice - aggregateExpression: InvoiceAggregateExp - filterExpressionType: InvoiceComparisonExp + filterExpressionType: InvoiceBoolExp + aggregateExpression: InvoiceAggExp orderableFields: - fieldName: id orderByDirections: @@ -182,17 +198,22 @@ definition: orderByDirections: enableAll: true graphql: - aggregate: - queryRootField: - invoiceAggregate - filterInputTypeName: InvoiceFilterInput selectMany: queryRootField: invoice + subscription: + rootField: invoice selectUniques: - queryRootField: invoiceById uniqueIdentifier: - id + subscription: + rootField: invoiceById orderByExpressionType: InvoiceOrderBy + filterInputTypeName: InvoiceFilterInput + aggregate: + queryRootField: invoiceAggregate + subscription: + rootField: invoiceAggregate --- kind: ModelPermissions @@ -203,4 +224,5 @@ definition: - role: admin select: filter: null + allowSubscriptions: true diff --git a/fixtures/hasura/chinook/metadata/models/InvoiceLine.hml b/fixtures/hasura/app/metadata/InvoiceLine.hml similarity index 71% rename from fixtures/hasura/chinook/metadata/models/InvoiceLine.hml rename to fixtures/hasura/app/metadata/InvoiceLine.hml index 223b5902..9456c12b 100644 --- a/fixtures/hasura/chinook/metadata/models/InvoiceLine.hml +++ b/fixtures/hasura/app/metadata/InvoiceLine.hml @@ -41,7 +41,6 @@ definition: unitPrice: column: name: UnitPrice - description: Object type for collection InvoiceLine --- kind: TypePermissions @@ -63,51 +62,58 @@ definition: kind: BooleanExpressionType version: v1 definition: - name: InvoiceLineComparisonExp + name: InvoiceLineBoolExp operand: object: type: InvoiceLine comparableFields: - fieldName: id - booleanExpressionType: ObjectIdComparisonExp + booleanExpressionType: ObjectIdBoolExp - fieldName: invoiceId - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: invoiceLineId - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: quantity - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: trackId - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: unitPrice - booleanExpressionType: DecimalComparisonExp + booleanExpressionType: DecimalBoolExp comparableRelationships: - relationshipName: invoice - booleanExpressionType: InvoiceComparisonExp - relationshipName: track - booleanExpressionType: TrackComparisonExp logicalOperators: enable: true isNull: enable: true graphql: - typeName: InvoiceLineComparisonExp + typeName: InvoiceLineBoolExp --- kind: AggregateExpression version: v1 definition: - name: InvoiceLineAggregateExp + name: InvoiceLineAggExp operand: object: aggregatedType: InvoiceLine aggregatableFields: + - fieldName: id + aggregateExpression: ObjectIdAggExp + - fieldName: invoiceId + aggregateExpression: IntAggExp + - fieldName: invoiceLineId + aggregateExpression: IntAggExp - fieldName: quantity - aggregateExpression: IntAggregateExp + aggregateExpression: IntAggExp + - fieldName: trackId + aggregateExpression: IntAggExp - fieldName: unitPrice - aggregateExpression: DecimalAggregateExp - count: { enable: true } + aggregateExpression: DecimalAggExp + count: + enable: true graphql: - selectTypeName: InvoiceLineAggregateExp + selectTypeName: InvoiceLineAggExp --- kind: Model @@ -118,8 +124,8 @@ definition: source: dataConnectorName: chinook collection: InvoiceLine - aggregateExpression: InvoiceLineAggregateExp - filterExpressionType: InvoiceLineComparisonExp + filterExpressionType: InvoiceLineBoolExp + aggregateExpression: InvoiceLineAggExp orderableFields: - fieldName: id orderByDirections: @@ -140,17 +146,22 @@ definition: orderByDirections: enableAll: true graphql: - aggregate: - queryRootField: - invoiceLineAggregate - filterInputTypeName: InvoiceLineFilterInput selectMany: queryRootField: invoiceLine + subscription: + rootField: invoiceLine selectUniques: - queryRootField: invoiceLineById uniqueIdentifier: - id + subscription: + rootField: invoiceLineById orderByExpressionType: InvoiceLineOrderBy + filterInputTypeName: InvoiceLineFilterInput + aggregate: + queryRootField: invoiceLineAggregate + subscription: + rootField: invoiceLineAggregate --- kind: ModelPermissions @@ -161,4 +172,5 @@ definition: - role: admin select: filter: null + allowSubscriptions: true diff --git a/fixtures/hasura/chinook/metadata/models/MediaType.hml b/fixtures/hasura/app/metadata/MediaType.hml similarity index 63% rename from fixtures/hasura/chinook/metadata/models/MediaType.hml rename to fixtures/hasura/app/metadata/MediaType.hml index 31d1153f..7c2f3c4e 100644 --- a/fixtures/hasura/chinook/metadata/models/MediaType.hml +++ b/fixtures/hasura/app/metadata/MediaType.hml @@ -9,7 +9,7 @@ definition: - name: mediaTypeId type: Int! - name: name - type: String + type: String! graphql: typeName: MediaType inputTypeName: MediaTypeInput @@ -26,7 +26,6 @@ definition: name: column: name: Name - description: Object type for collection MediaType --- kind: TypePermissions @@ -45,26 +44,45 @@ definition: kind: BooleanExpressionType version: v1 definition: - name: MediaTypeComparisonExp + name: MediaTypeBoolExp operand: object: type: MediaType comparableFields: - fieldName: id - booleanExpressionType: ObjectIdComparisonExp + booleanExpressionType: ObjectIdBoolExp - fieldName: mediaTypeId - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: name - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp comparableRelationships: - relationshipName: tracks - booleanExpressionType: TrackComparisonExp logicalOperators: enable: true isNull: enable: true graphql: - typeName: MediaTypeComparisonExp + typeName: MediaTypeBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: MediaTypeAggExp + operand: + object: + aggregatedType: MediaType + aggregatableFields: + - fieldName: id + aggregateExpression: ObjectIdAggExp + - fieldName: mediaTypeId + aggregateExpression: IntAggExp + - fieldName: name + aggregateExpression: StringAggExp + count: + enable: true + graphql: + selectTypeName: MediaTypeAggExp --- kind: Model @@ -75,7 +93,8 @@ definition: source: dataConnectorName: chinook collection: MediaType - filterExpressionType: MediaTypeComparisonExp + filterExpressionType: MediaTypeBoolExp + aggregateExpression: MediaTypeAggExp orderableFields: - fieldName: id orderByDirections: @@ -89,11 +108,20 @@ definition: graphql: selectMany: queryRootField: mediaType + subscription: + rootField: mediaType selectUniques: - queryRootField: mediaTypeById uniqueIdentifier: - id + subscription: + rootField: mediaTypeById orderByExpressionType: MediaTypeOrderBy + filterInputTypeName: MediaTypeFilterInput + aggregate: + queryRootField: mediaTypeAggregate + subscription: + rootField: mediaTypeAggregate --- kind: ModelPermissions @@ -104,4 +132,5 @@ definition: - role: admin select: filter: null + allowSubscriptions: true diff --git a/fixtures/hasura/sample_mflix/metadata/models/Movies.hml b/fixtures/hasura/app/metadata/Movies.hml similarity index 70% rename from fixtures/hasura/sample_mflix/metadata/models/Movies.hml rename to fixtures/hasura/app/metadata/Movies.hml index 87479299..6ec310cb 100644 --- a/fixtures/hasura/sample_mflix/metadata/models/Movies.hml +++ b/fixtures/hasura/app/metadata/Movies.hml @@ -30,46 +30,6 @@ definition: - text - wins ---- -kind: BooleanExpressionType -version: v1 -definition: - name: MoviesAwardsComparisonExp - operand: - object: - type: MoviesAwards - comparableFields: - - fieldName: nominations - booleanExpressionType: IntComparisonExp - - fieldName: text - booleanExpressionType: StringComparisonExp - - fieldName: wins - booleanExpressionType: IntComparisonExp - comparableRelationships: [] - logicalOperators: - enable: true - isNull: - enable: true - graphql: - typeName: MoviesAwardsComparisonExp - ---- -kind: AggregateExpression -version: v1 -definition: - name: MoviesAwardsAggregateExp - operand: - object: - aggregatedType: MoviesAwards - aggregatableFields: - - fieldName: nominations - aggregateExpression: IntAggregateExp - - fieldName: wins - aggregateExpression: IntAggregateExp - count: { enable: true } - graphql: - selectTypeName: MoviesAwardsAggregateExp - --- kind: ObjectType version: v1 @@ -79,7 +39,7 @@ definition: - name: id type: Int! - name: rating - type: Float! + type: Double! - name: votes type: Int! graphql: @@ -102,46 +62,6 @@ definition: - rating - votes ---- -kind: BooleanExpressionType -version: v1 -definition: - name: MoviesImdbComparisonExp - operand: - object: - type: MoviesImdb - comparableFields: - - fieldName: id - booleanExpressionType: IntComparisonExp - - fieldName: rating - booleanExpressionType: FloatComparisonExp - - fieldName: votes - booleanExpressionType: IntComparisonExp - comparableRelationships: [] - logicalOperators: - enable: true - isNull: - enable: true - graphql: - typeName: MoviesImdbComparisonExp - ---- -kind: AggregateExpression -version: v1 -definition: - name: MoviesImdbAggregateExp - operand: - object: - aggregatedType: MoviesImdb - aggregatableFields: - - fieldName: rating - aggregateExpression: FloatAggregateExp - - fieldName: votes - aggregateExpression: IntAggregateExp - count: { enable: true } - graphql: - selectTypeName: MoviesImdbAggregateExp - --- kind: ObjectType version: v1 @@ -151,9 +71,9 @@ definition: - name: meter type: Int! - name: numReviews - type: Int! + type: Int - name: rating - type: Float! + type: Double graphql: typeName: MoviesTomatoesCritic inputTypeName: MoviesTomatoesCriticInput @@ -174,48 +94,6 @@ definition: - numReviews - rating ---- -kind: BooleanExpressionType -version: v1 -definition: - name: MoviesTomatoesCriticComparisonExp - operand: - object: - type: MoviesTomatoesCritic - comparableFields: - - fieldName: meter - booleanExpressionType: IntComparisonExp - - fieldName: numReviews - booleanExpressionType: IntComparisonExp - - fieldName: rating - booleanExpressionType: FloatComparisonExp - comparableRelationships: [] - logicalOperators: - enable: true - isNull: - enable: true - graphql: - typeName: MoviesTomatoesCriticComparisonExp - ---- -kind: AggregateExpression -version: v1 -definition: - name: MoviesTomatoesCriticAggregateExp - operand: - object: - aggregatedType: MoviesTomatoesCritic - aggregatableFields: - - fieldName: meter - aggregateExpression: IntAggregateExp - - fieldName: numReviews - aggregateExpression: IntAggregateExp - - fieldName: rating - aggregateExpression: FloatAggregateExp - count: { enable: true } - graphql: - selectTypeName: MoviesTomatoesCriticAggregateExp - --- kind: ObjectType version: v1 @@ -227,7 +105,7 @@ definition: - name: numReviews type: Int! - name: rating - type: Float! + type: Double graphql: typeName: MoviesTomatoesViewer inputTypeName: MoviesTomatoesViewerInput @@ -248,48 +126,6 @@ definition: - numReviews - rating ---- -kind: BooleanExpressionType -version: v1 -definition: - name: MoviesTomatoesViewerComparisonExp - operand: - object: - type: MoviesTomatoesViewer - comparableFields: - - fieldName: meter - booleanExpressionType: IntComparisonExp - - fieldName: numReviews - booleanExpressionType: IntComparisonExp - - fieldName: rating - booleanExpressionType: FloatComparisonExp - comparableRelationships: [] - logicalOperators: - enable: true - isNull: - enable: true - graphql: - typeName: MoviesTomatoesViewerComparisonExp - ---- -kind: AggregateExpression -version: v1 -definition: - name: MoviesTomatoesViewerAggregateExp - operand: - object: - aggregatedType: MoviesTomatoesViewer - aggregatableFields: - - fieldName: meter - aggregateExpression: IntAggregateExp - - fieldName: numReviews - aggregateExpression: IntAggregateExp - - fieldName: rating - aggregateExpression: FloatAggregateExp - count: { enable: true } - graphql: - selectTypeName: MoviesTomatoesViewerAggregateExp - --- kind: ObjectType version: v1 @@ -343,68 +179,6 @@ definition: - viewer - website ---- -kind: BooleanExpressionType -version: v1 -definition: - name: MoviesTomatoesComparisonExp - operand: - object: - type: MoviesTomatoes - comparableFields: - - fieldName: boxOffice - booleanExpressionType: StringComparisonExp - - fieldName: consensus - booleanExpressionType: StringComparisonExp - - fieldName: critic - booleanExpressionType: MoviesTomatoesCriticComparisonExp - - fieldName: dvd - booleanExpressionType: DateComparisonExp - - fieldName: fresh - booleanExpressionType: IntComparisonExp - - fieldName: lastUpdated - booleanExpressionType: DateComparisonExp - - fieldName: production - booleanExpressionType: StringComparisonExp - - fieldName: rotten - booleanExpressionType: IntComparisonExp - - fieldName: viewer - booleanExpressionType: MoviesTomatoesViewerComparisonExp - - fieldName: website - booleanExpressionType: StringComparisonExp - comparableRelationships: [] - logicalOperators: - enable: true - isNull: - enable: true - graphql: - typeName: MoviesTomatoesComparisonExp - ---- -kind: AggregateExpression -version: v1 -definition: - name: MoviesTomatoesAggregateExp - operand: - object: - aggregatedType: MoviesTomatoes - aggregatableFields: - - fieldName: critic - aggregateExpression: MoviesTomatoesCriticAggregateExp - - fieldName: dvd - aggregateExpression: DateAggregateExp - - fieldName: fresh - aggregateExpression: IntAggregateExp - - fieldName: lastUpdated - aggregateExpression: DateAggregateExp - - fieldName: rotten - aggregateExpression: IntAggregateExp - - fieldName: viewer - aggregateExpression: MoviesTomatoesViewerAggregateExp - count: { enable: true } - graphql: - selectTypeName: MoviesTomatoesAggregateExp - --- kind: ObjectType version: v1 @@ -420,11 +194,11 @@ definition: - name: countries type: "[String!]!" - name: directors - type: "[String!]!" + type: "[String!]" - name: fullplot type: String - name: genres - type: "[String!]!" + type: "[String!]" - name: imdb type: MoviesImdb! - name: languages @@ -565,83 +339,340 @@ definition: kind: BooleanExpressionType version: v1 definition: - name: MoviesComparisonExp + name: MoviesAwardsBoolExp + operand: + object: + type: MoviesAwards + comparableFields: + - fieldName: nominations + booleanExpressionType: IntBoolExp + - fieldName: text + booleanExpressionType: StringBoolExp + - fieldName: wins + booleanExpressionType: IntBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: MoviesAwardsBoolExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: MoviesImdbBoolExp + operand: + object: + type: MoviesImdb + comparableFields: + - fieldName: id + booleanExpressionType: IntBoolExp + - fieldName: rating + booleanExpressionType: DoubleBoolExp + - fieldName: votes + booleanExpressionType: IntBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: MoviesImdbBoolExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: MoviesTomatoesCriticBoolExp + operand: + object: + type: MoviesTomatoesCritic + comparableFields: + - fieldName: meter + booleanExpressionType: IntBoolExp + - fieldName: numReviews + booleanExpressionType: IntBoolExp + - fieldName: rating + booleanExpressionType: DoubleBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: MoviesTomatoesCriticBoolExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: MoviesTomatoesViewerBoolExp + operand: + object: + type: MoviesTomatoesViewer + comparableFields: + - fieldName: meter + booleanExpressionType: IntBoolExp + - fieldName: numReviews + booleanExpressionType: IntBoolExp + - fieldName: rating + booleanExpressionType: DoubleBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: MoviesTomatoesViewerBoolExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: MoviesTomatoesBoolExp + operand: + object: + type: MoviesTomatoes + comparableFields: + - fieldName: boxOffice + booleanExpressionType: StringBoolExp + - fieldName: consensus + booleanExpressionType: StringBoolExp + - fieldName: critic + booleanExpressionType: MoviesTomatoesCriticBoolExp + - fieldName: dvd + booleanExpressionType: DateBoolExp + - fieldName: fresh + booleanExpressionType: IntBoolExp + - fieldName: lastUpdated + booleanExpressionType: DateBoolExp + - fieldName: production + booleanExpressionType: StringBoolExp + - fieldName: rotten + booleanExpressionType: IntBoolExp + - fieldName: viewer + booleanExpressionType: MoviesTomatoesViewerBoolExp + - fieldName: website + booleanExpressionType: StringBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: MoviesTomatoesBoolExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: MoviesBoolExp operand: object: type: Movies comparableFields: - fieldName: id - booleanExpressionType: ObjectIdComparisonExp + booleanExpressionType: ObjectIdBoolExp - fieldName: awards - booleanExpressionType: MoviesAwardsComparisonExp + booleanExpressionType: MoviesAwardsBoolExp - fieldName: fullplot - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: imdb - booleanExpressionType: MoviesImdbComparisonExp + booleanExpressionType: MoviesImdbBoolExp - fieldName: lastupdated - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: metacritic - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: numMflixComments - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: plot - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: poster - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: rated - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: released - booleanExpressionType: DateComparisonExp + booleanExpressionType: DateBoolExp - fieldName: runtime - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: title - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: tomatoes - booleanExpressionType: MoviesTomatoesComparisonExp + booleanExpressionType: MoviesTomatoesBoolExp - fieldName: type - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: year - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp comparableRelationships: - relationshipName: comments - booleanExpressionType: CommentsComparisonExp logicalOperators: enable: true isNull: enable: true graphql: - typeName: MoviesComparisonExp + typeName: MoviesBoolExp --- kind: AggregateExpression version: v1 definition: - name: MoviesAggregateExp + name: MoviesAwardsAggExp + operand: + object: + aggregatedType: MoviesAwards + aggregatableFields: + - fieldName: nominations + aggregateExpression: IntAggExp + - fieldName: text + aggregateExpression: StringAggExp + - fieldName: wins + aggregateExpression: IntAggExp + count: + enable: true + graphql: + selectTypeName: MoviesAwardsAggExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: MoviesImdbAggExp + operand: + object: + aggregatedType: MoviesImdb + aggregatableFields: + - fieldName: id + aggregateExpression: IntAggExp + - fieldName: rating + aggregateExpression: DoubleAggExp + - fieldName: votes + aggregateExpression: IntAggExp + count: + enable: true + graphql: + selectTypeName: MoviesImdbAggExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: MoviesTomatoesAggExp + operand: + object: + aggregatedType: MoviesTomatoes + aggregatableFields: + - fieldName: boxOffice + aggregateExpression: StringAggExp + - fieldName: consensus + aggregateExpression: StringAggExp + - fieldName: critic + aggregateExpression: MoviesTomatoesCriticAggExp + - fieldName: dvd + aggregateExpression: DateAggExp + - fieldName: fresh + aggregateExpression: IntAggExp + - fieldName: lastUpdated + aggregateExpression: DateAggExp + - fieldName: production + aggregateExpression: StringAggExp + - fieldName: rotten + aggregateExpression: IntAggExp + - fieldName: viewer + aggregateExpression: MoviesTomatoesViewerAggExp + - fieldName: website + aggregateExpression: StringAggExp + count: + enable: true + graphql: + selectTypeName: MoviesTomatoesAggExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: MoviesTomatoesCriticAggExp + operand: + object: + aggregatedType: MoviesTomatoesCritic + aggregatableFields: + - fieldName: meter + aggregateExpression: IntAggExp + - fieldName: numReviews + aggregateExpression: IntAggExp + - fieldName: rating + aggregateExpression: DoubleAggExp + count: + enable: true + graphql: + selectTypeName: MoviesTomatoesCriticAggExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: MoviesTomatoesViewerAggExp + operand: + object: + aggregatedType: MoviesTomatoesViewer + aggregatableFields: + - fieldName: meter + aggregateExpression: IntAggExp + - fieldName: numReviews + aggregateExpression: IntAggExp + - fieldName: rating + aggregateExpression: DoubleAggExp + count: + enable: true + graphql: + selectTypeName: MoviesTomatoesViewerAggExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: MoviesAggExp operand: object: aggregatedType: Movies aggregatableFields: - # TODO: This requires updating the connector to support nested field - # aggregates - # - fieldName: awards - # aggregateExpression: MoviesAwardsAggregateExp - # - fieldName: imdb - # aggregateExpression: MoviesImdbAggregateExp + - fieldName: id + aggregateExpression: ObjectIdAggExp + - fieldName: fullplot + aggregateExpression: StringAggExp + - fieldName: lastupdated + aggregateExpression: StringAggExp - fieldName: metacritic - aggregateExpression: IntAggregateExp + aggregateExpression: IntAggExp - fieldName: numMflixComments - aggregateExpression: IntAggregateExp + aggregateExpression: IntAggExp + - fieldName: plot + aggregateExpression: StringAggExp + - fieldName: poster + aggregateExpression: StringAggExp + - fieldName: rated + aggregateExpression: StringAggExp - fieldName: released - aggregateExpression: DateAggregateExp + aggregateExpression: DateAggExp - fieldName: runtime - aggregateExpression: IntAggregateExp - # - fieldName: tomatoes - # aggregateExpression: MoviesTomatoesAggregateExp + aggregateExpression: IntAggExp + - fieldName: title + aggregateExpression: StringAggExp + - fieldName: type + aggregateExpression: StringAggExp - fieldName: year - aggregateExpression: IntAggregateExp - count: { enable: true } + aggregateExpression: IntAggExp + - fieldName: awards + aggregateExpression: MoviesAwardsAggExp + - fieldName: imdb + aggregateExpression: MoviesImdbAggExp + - fieldName: tomatoes + aggregateExpression: MoviesTomatoesAggExp + count: + enable: true graphql: - selectTypeName: MoviesAggregateExp + selectTypeName: MoviesAggExp --- kind: Model @@ -652,8 +683,8 @@ definition: source: dataConnectorName: sample_mflix collection: movies - aggregateExpression: MoviesAggregateExp - filterExpressionType: MoviesComparisonExp + filterExpressionType: MoviesBoolExp + aggregateExpression: MoviesAggExp orderableFields: - fieldName: id orderByDirections: @@ -722,16 +753,22 @@ definition: orderByDirections: enableAll: true graphql: - aggregate: - queryRootField: moviesAggregate - filterInputTypeName: MoviesFilterInput selectMany: queryRootField: movies + subscription: + rootField: movies selectUniques: - queryRootField: moviesById uniqueIdentifier: - id + subscription: + rootField: moviesById orderByExpressionType: MoviesOrderBy + filterInputTypeName: MoviesFilterInput + aggregate: + queryRootField: moviesAggregate + subscription: + rootField: moviesAggregate --- kind: ModelPermissions @@ -742,3 +779,5 @@ definition: - role: admin select: filter: null + allowSubscriptions: true + diff --git a/fixtures/hasura/app/metadata/NativeQuery.hml b/fixtures/hasura/app/metadata/NativeQuery.hml new file mode 100644 index 00000000..c25807b4 --- /dev/null +++ b/fixtures/hasura/app/metadata/NativeQuery.hml @@ -0,0 +1,350 @@ +--- +kind: ObjectType +version: v1 +definition: + name: NativeQueryProjectBar + fields: + - name: foo + type: MoviesImdb! + graphql: + typeName: NativeQueryProjectBar + inputTypeName: NativeQueryProjectBarInput + dataConnectorTypeMapping: + - dataConnectorName: sample_mflix + dataConnectorObjectType: native_query_project_bar + +--- +kind: TypePermissions +version: v1 +definition: + typeName: NativeQueryProjectBar + permissions: + - role: admin + output: + allowedFields: + - foo + +--- +kind: ObjectType +version: v1 +definition: + name: NativeQueryProjectFoo + fields: + - name: bar + type: MoviesTomatoesCritic + graphql: + typeName: NativeQueryProjectFoo + inputTypeName: NativeQueryProjectFooInput + dataConnectorTypeMapping: + - dataConnectorName: sample_mflix + dataConnectorObjectType: native_query_project_foo + +--- +kind: TypePermissions +version: v1 +definition: + typeName: NativeQueryProjectFoo + permissions: + - role: admin + output: + allowedFields: + - bar + +--- +kind: ObjectType +version: v1 +definition: + name: NativeQueryProjectWhatThe + fields: + - name: heck + type: String! + graphql: + typeName: NativeQueryProjectWhatThe + inputTypeName: NativeQueryProjectWhatTheInput + dataConnectorTypeMapping: + - dataConnectorName: sample_mflix + dataConnectorObjectType: native_query_project_what_the + +--- +kind: TypePermissions +version: v1 +definition: + typeName: NativeQueryProjectWhatThe + permissions: + - role: admin + output: + allowedFields: + - heck + +--- +kind: ObjectType +version: v1 +definition: + name: NativeQueryProjectWhat + fields: + - name: the + type: NativeQueryProjectWhatThe! + graphql: + typeName: NativeQueryProjectWhat + inputTypeName: NativeQueryProjectWhatInput + dataConnectorTypeMapping: + - dataConnectorName: sample_mflix + dataConnectorObjectType: native_query_project_what + +--- +kind: TypePermissions +version: v1 +definition: + typeName: NativeQueryProjectWhat + permissions: + - role: admin + output: + allowedFields: + - the + +--- +kind: ObjectType +version: v1 +definition: + name: NativeQueryProject + fields: + - name: id + type: ObjectId! + - name: bar + type: NativeQueryProjectBar! + - name: foo + type: NativeQueryProjectFoo! + - name: title + type: String! + - name: tomatoes + type: MoviesTomatoes + - name: what + type: NativeQueryProjectWhat! + graphql: + typeName: NativeQueryProject + inputTypeName: NativeQueryProjectInput + dataConnectorTypeMapping: + - dataConnectorName: sample_mflix + dataConnectorObjectType: native_query_project + fieldMapping: + id: + column: + name: _id + bar: + column: + name: bar + foo: + column: + name: foo + title: + column: + name: title + tomatoes: + column: + name: tomatoes + what: + column: + name: what + +--- +kind: TypePermissions +version: v1 +definition: + typeName: NativeQueryProject + permissions: + - role: admin + output: + allowedFields: + - id + - bar + - foo + - title + - tomatoes + - what + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: NativeQueryProjectBarBoolExp + operand: + object: + type: NativeQueryProjectBar + comparableFields: + - fieldName: foo + booleanExpressionType: MoviesImdbBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: NativeQueryProjectBarBoolExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: NativeQueryProjectFooBoolExp + operand: + object: + type: NativeQueryProjectFoo + comparableFields: + - fieldName: bar + booleanExpressionType: MoviesTomatoesCriticBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: NativeQueryProjectFooBoolExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: NativeQueryProjectWhatTheBoolExp + operand: + object: + type: NativeQueryProjectWhatThe + comparableFields: + - fieldName: heck + booleanExpressionType: StringBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: NativeQueryProjectWhatTheBoolExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: NativeQueryProjectWhatBoolExp + operand: + object: + type: NativeQueryProjectWhat + comparableFields: + - fieldName: the + booleanExpressionType: NativeQueryProjectWhatTheBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: NativeQueryProjectWhatBoolExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: NativeQueryProjectBoolExp + operand: + object: + type: NativeQueryProject + comparableFields: + - fieldName: id + booleanExpressionType: ObjectIdBoolExp + - fieldName: bar + booleanExpressionType: NativeQueryProjectBarBoolExp + - fieldName: foo + booleanExpressionType: NativeQueryProjectFooBoolExp + - fieldName: title + booleanExpressionType: StringBoolExp + - fieldName: tomatoes + booleanExpressionType: MoviesTomatoesBoolExp + - fieldName: what + booleanExpressionType: NativeQueryProjectWhatBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: NativeQueryProjectBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: NativeQueryProjectAggExp + operand: + object: + aggregatedType: NativeQueryProject + aggregatableFields: + - fieldName: id + aggregateExpression: ObjectIdAggExp + - fieldName: title + aggregateExpression: StringAggExp + count: + enable: true + graphql: + selectTypeName: NativeQueryProjectAggExp + +--- +kind: Model +version: v1 +definition: + name: NativeQuery + objectType: NativeQueryProject + arguments: + - name: title + type: String! + source: + dataConnectorName: sample_mflix + collection: native_query + filterExpressionType: NativeQueryProjectBoolExp + aggregateExpression: NativeQueryProjectAggExp + orderableFields: + - fieldName: id + orderByDirections: + enableAll: true + - fieldName: bar + orderByDirections: + enableAll: true + - fieldName: foo + orderByDirections: + enableAll: true + - fieldName: title + orderByDirections: + enableAll: true + - fieldName: tomatoes + orderByDirections: + enableAll: true + - fieldName: what + orderByDirections: + enableAll: true + graphql: + selectMany: + queryRootField: nativeQuery + subscription: + rootField: nativeQuery + selectUniques: + - queryRootField: nativeQueryById + uniqueIdentifier: + - id + subscription: + rootField: nativeQueryById + argumentsInputType: NativeQueryArguments + orderByExpressionType: NativeQueryOrderBy + filterInputTypeName: NativeQueryFilterInput + aggregate: + queryRootField: nativeQueryAggregate + subscription: + rootField: nativeQueryAggregate + +--- +kind: ModelPermissions +version: v1 +definition: + modelName: NativeQuery + permissions: + - role: admin + select: + filter: null + allowSubscriptions: true + diff --git a/fixtures/hasura/app/metadata/NestedCollection.hml b/fixtures/hasura/app/metadata/NestedCollection.hml new file mode 100644 index 00000000..880803e3 --- /dev/null +++ b/fixtures/hasura/app/metadata/NestedCollection.hml @@ -0,0 +1,178 @@ +--- +kind: ObjectType +version: v1 +definition: + name: NestedCollectionStaff + fields: + - name: name + type: String! + graphql: + typeName: NestedCollectionStaff + inputTypeName: NestedCollectionStaffInput + dataConnectorTypeMapping: + - dataConnectorName: test_cases + dataConnectorObjectType: nested_collection_staff + +--- +kind: TypePermissions +version: v1 +definition: + typeName: NestedCollectionStaff + permissions: + - role: admin + output: + allowedFields: + - name + +--- +kind: ObjectType +version: v1 +definition: + name: NestedCollection + fields: + - name: id + type: ObjectId! + - name: institution + type: String! + - name: staff + type: "[NestedCollectionStaff!]!" + graphql: + typeName: NestedCollection + inputTypeName: NestedCollectionInput + dataConnectorTypeMapping: + - dataConnectorName: test_cases + dataConnectorObjectType: nested_collection + fieldMapping: + id: + column: + name: _id + institution: + column: + name: institution + staff: + column: + name: staff + +--- +kind: TypePermissions +version: v1 +definition: + typeName: NestedCollection + permissions: + - role: admin + output: + allowedFields: + - id + - institution + - staff + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: NestedCollectionStaffBoolExp + operand: + object: + type: NestedCollectionStaff + comparableFields: + - fieldName: name + booleanExpressionType: StringBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: NestedCollectionStaffBoolExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: NestedCollectionBoolExp + operand: + object: + type: NestedCollection + comparableFields: + - fieldName: id + booleanExpressionType: ObjectIdBoolExp + - fieldName: institution + booleanExpressionType: StringBoolExp + - fieldName: staff + booleanExpressionType: NestedCollectionStaffBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: NestedCollectionBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: NestedCollectionAggExp + operand: + object: + aggregatedType: NestedCollection + aggregatableFields: + - fieldName: id + aggregateExpression: ObjectIdAggExp + - fieldName: institution + aggregateExpression: StringAggExp + count: + enable: true + graphql: + selectTypeName: NestedCollectionAggExp + +--- +kind: Model +version: v1 +definition: + name: NestedCollection + objectType: NestedCollection + source: + dataConnectorName: test_cases + collection: nested_collection + filterExpressionType: NestedCollectionBoolExp + aggregateExpression: NestedCollectionAggExp + orderableFields: + - fieldName: id + orderByDirections: + enableAll: true + - fieldName: institution + orderByDirections: + enableAll: true + - fieldName: staff + orderByDirections: + enableAll: true + graphql: + selectMany: + queryRootField: nestedCollection + subscription: + rootField: nestedCollection + selectUniques: + - queryRootField: nestedCollectionById + uniqueIdentifier: + - id + subscription: + rootField: nestedCollectionById + orderByExpressionType: NestedCollectionOrderBy + filterInputTypeName: NestedCollectionFilterInput + aggregate: + queryRootField: nestedCollectionAggregate + subscription: + rootField: nestedCollectionAggregate + +--- +kind: ModelPermissions +version: v1 +definition: + modelName: NestedCollection + permissions: + - role: admin + select: + filter: null + allowSubscriptions: true + diff --git a/fixtures/hasura/app/metadata/NestedFieldWithDollar.hml b/fixtures/hasura/app/metadata/NestedFieldWithDollar.hml new file mode 100644 index 00000000..b02d7b9e --- /dev/null +++ b/fixtures/hasura/app/metadata/NestedFieldWithDollar.hml @@ -0,0 +1,169 @@ +--- +kind: ObjectType +version: v1 +definition: + name: NestedFieldWithDollarConfiguration + fields: + - name: schema + type: String + graphql: + typeName: NestedFieldWithDollarConfiguration + inputTypeName: NestedFieldWithDollarConfigurationInput + dataConnectorTypeMapping: + - dataConnectorName: test_cases + dataConnectorObjectType: nested_field_with_dollar_configuration + fieldMapping: + schema: + column: + name: $schema + +--- +kind: TypePermissions +version: v1 +definition: + typeName: NestedFieldWithDollarConfiguration + permissions: + - role: admin + output: + allowedFields: + - schema + +--- +kind: ObjectType +version: v1 +definition: + name: NestedFieldWithDollar + fields: + - name: id + type: ObjectId! + - name: configuration + type: NestedFieldWithDollarConfiguration! + graphql: + typeName: NestedFieldWithDollar + inputTypeName: NestedFieldWithDollarInput + dataConnectorTypeMapping: + - dataConnectorName: test_cases + dataConnectorObjectType: nested_field_with_dollar + fieldMapping: + id: + column: + name: _id + configuration: + column: + name: configuration + +--- +kind: TypePermissions +version: v1 +definition: + typeName: NestedFieldWithDollar + permissions: + - role: admin + output: + allowedFields: + - id + - configuration + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: NestedFieldWithDollarConfigurationBoolExp + operand: + object: + type: NestedFieldWithDollarConfiguration + comparableFields: + - fieldName: schema + booleanExpressionType: StringBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: NestedFieldWithDollarConfigurationBoolExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: NestedFieldWithDollarBoolExp + operand: + object: + type: NestedFieldWithDollar + comparableFields: + - fieldName: id + booleanExpressionType: ObjectIdBoolExp + - fieldName: configuration + booleanExpressionType: NestedFieldWithDollarConfigurationBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: NestedFieldWithDollarBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: NestedFieldWithDollarAggExp + operand: + object: + aggregatedType: NestedFieldWithDollar + aggregatableFields: + - fieldName: id + aggregateExpression: ObjectIdAggExp + count: + enable: true + graphql: + selectTypeName: NestedFieldWithDollarAggExp + +--- +kind: Model +version: v1 +definition: + name: NestedFieldWithDollar + objectType: NestedFieldWithDollar + source: + dataConnectorName: test_cases + collection: nested_field_with_dollar + filterExpressionType: NestedFieldWithDollarBoolExp + aggregateExpression: NestedFieldWithDollarAggExp + orderableFields: + - fieldName: id + orderByDirections: + enableAll: true + - fieldName: configuration + orderByDirections: + enableAll: true + graphql: + selectMany: + queryRootField: nestedFieldWithDollar + subscription: + rootField: nestedFieldWithDollar + selectUniques: + - queryRootField: nestedFieldWithDollarById + uniqueIdentifier: + - id + subscription: + rootField: nestedFieldWithDollarById + orderByExpressionType: NestedFieldWithDollarOrderBy + filterInputTypeName: NestedFieldWithDollarFilterInput + aggregate: + queryRootField: nestedFieldWithDollarAggregate + subscription: + rootField: nestedFieldWithDollarAggregate + +--- +kind: ModelPermissions +version: v1 +definition: + modelName: NestedFieldWithDollar + permissions: + - role: admin + select: + filter: null + allowSubscriptions: true + diff --git a/fixtures/hasura/chinook/metadata/models/Playlist.hml b/fixtures/hasura/app/metadata/Playlist.hml similarity index 63% rename from fixtures/hasura/chinook/metadata/models/Playlist.hml rename to fixtures/hasura/app/metadata/Playlist.hml index b385a502..dd966838 100644 --- a/fixtures/hasura/chinook/metadata/models/Playlist.hml +++ b/fixtures/hasura/app/metadata/Playlist.hml @@ -7,7 +7,7 @@ definition: - name: id type: ObjectId! - name: name - type: String + type: String! - name: playlistId type: Int! graphql: @@ -26,7 +26,6 @@ definition: playlistId: column: name: PlaylistId - description: Object type for collection Playlist --- kind: TypePermissions @@ -45,26 +44,45 @@ definition: kind: BooleanExpressionType version: v1 definition: - name: PlaylistComparisonExp + name: PlaylistBoolExp operand: object: type: Playlist comparableFields: - fieldName: id - booleanExpressionType: ObjectIdComparisonExp + booleanExpressionType: ObjectIdBoolExp - fieldName: name - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: playlistId - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp comparableRelationships: - relationshipName: playlistTracks - booleanExpressionType: TrackComparisonExp logicalOperators: enable: true isNull: enable: true graphql: - typeName: PlaylistComparisonExp + typeName: PlaylistBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: PlaylistAggExp + operand: + object: + aggregatedType: Playlist + aggregatableFields: + - fieldName: id + aggregateExpression: ObjectIdAggExp + - fieldName: name + aggregateExpression: StringAggExp + - fieldName: playlistId + aggregateExpression: IntAggExp + count: + enable: true + graphql: + selectTypeName: PlaylistAggExp --- kind: Model @@ -75,7 +93,8 @@ definition: source: dataConnectorName: chinook collection: Playlist - filterExpressionType: PlaylistComparisonExp + filterExpressionType: PlaylistBoolExp + aggregateExpression: PlaylistAggExp orderableFields: - fieldName: id orderByDirections: @@ -89,11 +108,20 @@ definition: graphql: selectMany: queryRootField: playlist + subscription: + rootField: playlist selectUniques: - queryRootField: playlistById uniqueIdentifier: - id + subscription: + rootField: playlistById orderByExpressionType: PlaylistOrderBy + filterInputTypeName: PlaylistFilterInput + aggregate: + queryRootField: playlistAggregate + subscription: + rootField: playlistAggregate --- kind: ModelPermissions @@ -104,4 +132,5 @@ definition: - role: admin select: filter: null + allowSubscriptions: true diff --git a/fixtures/hasura/chinook/metadata/models/PlaylistTrack.hml b/fixtures/hasura/app/metadata/PlaylistTrack.hml similarity index 64% rename from fixtures/hasura/chinook/metadata/models/PlaylistTrack.hml rename to fixtures/hasura/app/metadata/PlaylistTrack.hml index 6d4107c0..973388d8 100644 --- a/fixtures/hasura/chinook/metadata/models/PlaylistTrack.hml +++ b/fixtures/hasura/app/metadata/PlaylistTrack.hml @@ -26,7 +26,6 @@ definition: trackId: column: name: TrackId - description: Object type for collection PlaylistTrack --- kind: TypePermissions @@ -45,28 +44,46 @@ definition: kind: BooleanExpressionType version: v1 definition: - name: PlaylistTrackComparisonExp + name: PlaylistTrackBoolExp operand: object: type: PlaylistTrack comparableFields: - fieldName: id - booleanExpressionType: ObjectIdComparisonExp + booleanExpressionType: ObjectIdBoolExp - fieldName: playlistId - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: trackId - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp comparableRelationships: - relationshipName: playlist - booleanExpressionType: PlaylistComparisonExp - relationshipName: track - booleanExpressionType: TrackComparisonExp logicalOperators: enable: true isNull: enable: true graphql: - typeName: PlaylistTrackComparisonExp + typeName: PlaylistTrackBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: PlaylistTrackAggExp + operand: + object: + aggregatedType: PlaylistTrack + aggregatableFields: + - fieldName: id + aggregateExpression: ObjectIdAggExp + - fieldName: playlistId + aggregateExpression: IntAggExp + - fieldName: trackId + aggregateExpression: IntAggExp + count: + enable: true + graphql: + selectTypeName: PlaylistTrackAggExp --- kind: Model @@ -77,7 +94,8 @@ definition: source: dataConnectorName: chinook collection: PlaylistTrack - filterExpressionType: PlaylistTrackComparisonExp + filterExpressionType: PlaylistTrackBoolExp + aggregateExpression: PlaylistTrackAggExp orderableFields: - fieldName: id orderByDirections: @@ -91,11 +109,20 @@ definition: graphql: selectMany: queryRootField: playlistTrack + subscription: + rootField: playlistTrack selectUniques: - queryRootField: playlistTrackById uniqueIdentifier: - id + subscription: + rootField: playlistTrackById orderByExpressionType: PlaylistTrackOrderBy + filterInputTypeName: PlaylistTrackFilterInput + aggregate: + queryRootField: playlistTrackAggregate + subscription: + rootField: playlistTrackAggregate --- kind: ModelPermissions @@ -106,4 +133,5 @@ definition: - role: admin select: filter: null + allowSubscriptions: true diff --git a/fixtures/hasura/app/metadata/Schools.hml b/fixtures/hasura/app/metadata/Schools.hml new file mode 100644 index 00000000..8f5e624a --- /dev/null +++ b/fixtures/hasura/app/metadata/Schools.hml @@ -0,0 +1,210 @@ +--- +kind: ObjectType +version: v1 +definition: + name: SchoolsDepartments + fields: + - name: englishDepartmentId + type: ObjectId! + - name: mathDepartmentId + type: ObjectId! + graphql: + typeName: SchoolsDepartments + inputTypeName: SchoolsDepartmentsInput + dataConnectorTypeMapping: + - dataConnectorName: test_cases + dataConnectorObjectType: schools_departments + fieldMapping: + englishDepartmentId: + column: + name: english_department_id + mathDepartmentId: + column: + name: math_department_id + +--- +kind: TypePermissions +version: v1 +definition: + typeName: SchoolsDepartments + permissions: + - role: admin + output: + allowedFields: + - englishDepartmentId + - mathDepartmentId + +--- +kind: ObjectType +version: v1 +definition: + name: Schools + fields: + - name: id + type: ObjectId! + - name: departments + type: SchoolsDepartments! + - name: name + type: String! + graphql: + typeName: Schools + inputTypeName: SchoolsInput + dataConnectorTypeMapping: + - dataConnectorName: test_cases + dataConnectorObjectType: schools + fieldMapping: + id: + column: + name: _id + departments: + column: + name: departments + name: + column: + name: name + +--- +kind: TypePermissions +version: v1 +definition: + typeName: Schools + permissions: + - role: admin + output: + allowedFields: + - id + - departments + - name + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: SchoolsDepartmentsBoolExp + operand: + object: + type: SchoolsDepartments + comparableFields: + - fieldName: englishDepartmentId + booleanExpressionType: ObjectIdBoolExp + - fieldName: mathDepartmentId + booleanExpressionType: ObjectIdBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: SchoolsDepartmentsBoolExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: SchoolsBoolExp + operand: + object: + type: Schools + comparableFields: + - fieldName: id + booleanExpressionType: ObjectIdBoolExp + - fieldName: departments + booleanExpressionType: SchoolsDepartmentsBoolExp + - fieldName: name + booleanExpressionType: StringBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: SchoolsBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: SchoolsDepartmentsAggExp + operand: + object: + aggregatedType: SchoolsDepartments + aggregatableFields: + - fieldName: englishDepartmentId + aggregateExpression: ObjectIdAggExp + - fieldName: mathDepartmentId + aggregateExpression: ObjectIdAggExp + count: + enable: true + graphql: + selectTypeName: SchoolsDepartmentsAggExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: SchoolsAggExp + operand: + object: + aggregatedType: Schools + aggregatableFields: + - fieldName: id + aggregateExpression: ObjectIdAggExp + - fieldName: departments + aggregateExpression: SchoolsDepartmentsAggExp + - fieldName: name + aggregateExpression: StringAggExp + count: + enable: true + graphql: + selectTypeName: SchoolsAggExp + +--- +kind: Model +version: v1 +definition: + name: Schools + objectType: Schools + source: + dataConnectorName: test_cases + collection: schools + filterExpressionType: SchoolsBoolExp + aggregateExpression: SchoolsAggExp + orderableFields: + - fieldName: id + orderByDirections: + enableAll: true + - fieldName: departments + orderByDirections: + enableAll: true + - fieldName: name + orderByDirections: + enableAll: true + graphql: + selectMany: + queryRootField: schools + subscription: + rootField: schools + selectUniques: + - queryRootField: schoolsById + uniqueIdentifier: + - id + subscription: + rootField: schoolsById + orderByExpressionType: SchoolsOrderBy + filterInputTypeName: SchoolsFilterInput + aggregate: + queryRootField: schoolsAggregate + subscription: + rootField: schoolsAggregate + +--- +kind: ModelPermissions +version: v1 +definition: + modelName: Schools + permissions: + - role: admin + select: + filter: null + allowSubscriptions: true + diff --git a/fixtures/hasura/sample_mflix/metadata/models/Sessions.hml b/fixtures/hasura/app/metadata/Sessions.hml similarity index 63% rename from fixtures/hasura/sample_mflix/metadata/models/Sessions.hml rename to fixtures/hasura/app/metadata/Sessions.hml index 8f03b1b4..80fca216 100644 --- a/fixtures/hasura/sample_mflix/metadata/models/Sessions.hml +++ b/fixtures/hasura/app/metadata/Sessions.hml @@ -44,24 +44,44 @@ definition: kind: BooleanExpressionType version: v1 definition: - name: SessionsComparisonExp + name: SessionsBoolExp operand: object: type: Sessions comparableFields: - fieldName: id - booleanExpressionType: ObjectIdComparisonExp + booleanExpressionType: ObjectIdBoolExp - fieldName: jwt - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: userId - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp comparableRelationships: [] logicalOperators: enable: true isNull: enable: true graphql: - typeName: SessionsComparisonExp + typeName: SessionsBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: SessionsAggExp + operand: + object: + aggregatedType: Sessions + aggregatableFields: + - fieldName: id + aggregateExpression: ObjectIdAggExp + - fieldName: jwt + aggregateExpression: StringAggExp + - fieldName: userId + aggregateExpression: StringAggExp + count: + enable: true + graphql: + selectTypeName: SessionsAggExp --- kind: Model @@ -72,7 +92,8 @@ definition: source: dataConnectorName: sample_mflix collection: sessions - filterExpressionType: SessionsComparisonExp + filterExpressionType: SessionsBoolExp + aggregateExpression: SessionsAggExp orderableFields: - fieldName: id orderByDirections: @@ -86,11 +107,20 @@ definition: graphql: selectMany: queryRootField: sessions + subscription: + rootField: sessions selectUniques: - queryRootField: sessionsById uniqueIdentifier: - id + subscription: + rootField: sessionsById orderByExpressionType: SessionsOrderBy + filterInputTypeName: SessionsFilterInput + aggregate: + queryRootField: sessionsAggregate + subscription: + rootField: sessionsAggregate --- kind: ModelPermissions @@ -101,4 +131,5 @@ definition: - role: admin select: filter: null + allowSubscriptions: true diff --git a/fixtures/hasura/sample_mflix/metadata/models/Theaters.hml b/fixtures/hasura/app/metadata/Theaters.hml similarity index 76% rename from fixtures/hasura/sample_mflix/metadata/models/Theaters.hml rename to fixtures/hasura/app/metadata/Theaters.hml index 2fb849f3..475594c0 100644 --- a/fixtures/hasura/sample_mflix/metadata/models/Theaters.hml +++ b/fixtures/hasura/app/metadata/Theaters.hml @@ -21,33 +21,6 @@ definition: - dataConnectorName: sample_mflix dataConnectorObjectType: theaters_location_address ---- -kind: BooleanExpressionType -version: v1 -definition: - name: TheatersLocationAddressComparisonExp - operand: - object: - type: TheatersLocationAddress - comparableFields: - - fieldName: city - booleanExpressionType: StringComparisonExp - - fieldName: state - booleanExpressionType: StringComparisonExp - - fieldName: street1 - booleanExpressionType: StringComparisonExp - - fieldName: street2 - booleanExpressionType: StringComparisonExp - - fieldName: zipcode - booleanExpressionType: StringComparisonExp - comparableRelationships: [] - logicalOperators: - enable: true - isNull: - enable: true - graphql: - typeName: TheatersLocationAddressComparisonExp - --- kind: TypePermissions version: v1 @@ -70,7 +43,7 @@ definition: name: TheatersLocationGeo fields: - name: coordinates - type: "[Float!]!" + type: "[Double!]!" - name: type type: String! graphql: @@ -92,25 +65,6 @@ definition: - coordinates - type ---- -kind: BooleanExpressionType -version: v1 -definition: - name: TheatersLocationGeoComparisonExp - operand: - object: - type: TheatersLocationGeo - comparableFields: - - fieldName: type - booleanExpressionType: StringComparisonExp - comparableRelationships: [] - logicalOperators: - enable: true - isNull: - enable: true - graphql: - typeName: TheatersLocationGeoComparisonExp - --- kind: ObjectType version: v1 @@ -140,27 +94,6 @@ definition: - address - geo ---- -kind: BooleanExpressionType -version: v1 -definition: - name: TheatersLocationComparisonExp - operand: - object: - type: TheatersLocation - comparableFields: - - fieldName: address - booleanExpressionType: TheatersLocationAddressComparisonExp - - fieldName: geo - booleanExpressionType: TheatersLocationGeoComparisonExp - comparableRelationships: [] - logicalOperators: - enable: true - isNull: - enable: true - graphql: - typeName: TheatersLocationComparisonExp - --- kind: ObjectType version: v1 @@ -190,64 +123,126 @@ definition: column: name: theaterId +--- +kind: TypePermissions +version: v1 +definition: + typeName: Theaters + permissions: + - role: admin + output: + allowedFields: + - id + - location + - theaterId + --- kind: BooleanExpressionType version: v1 definition: - name: TheatersComparisonExp + name: TheatersLocationAddressBoolExp operand: object: - type: Theaters + type: TheatersLocationAddress comparableFields: - - fieldName: id - booleanExpressionType: ObjectIdComparisonExp - - fieldName: location - booleanExpressionType: TheatersLocationComparisonExp - - fieldName: theaterId - booleanExpressionType: IntComparisonExp + - fieldName: city + booleanExpressionType: StringBoolExp + - fieldName: state + booleanExpressionType: StringBoolExp + - fieldName: street1 + booleanExpressionType: StringBoolExp + - fieldName: street2 + booleanExpressionType: StringBoolExp + - fieldName: zipcode + booleanExpressionType: StringBoolExp comparableRelationships: [] logicalOperators: enable: true isNull: enable: true graphql: - typeName: TheatersComparisonExp + typeName: TheatersLocationAddressBoolExp --- -kind: TypePermissions +kind: BooleanExpressionType version: v1 definition: - typeName: Theaters - permissions: - - role: admin - output: - allowedFields: - - id - - location - - theaterId + name: TheatersLocationGeoBoolExp + operand: + object: + type: TheatersLocationGeo + comparableFields: + - fieldName: type + booleanExpressionType: StringBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: TheatersLocationGeoBoolExp --- kind: BooleanExpressionType version: v1 definition: - name: TheatersComparisonExp + name: TheatersLocationBoolExp + operand: + object: + type: TheatersLocation + comparableFields: + - fieldName: address + booleanExpressionType: TheatersLocationAddressBoolExp + - fieldName: geo + booleanExpressionType: TheatersLocationGeoBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: TheatersLocationBoolExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: TheatersBoolExp operand: object: type: Theaters comparableFields: - fieldName: id - booleanExpressionType: ObjectIdComparisonExp + booleanExpressionType: ObjectIdBoolExp - fieldName: location - booleanExpressionType: TheatersLocationComparisonExp + booleanExpressionType: TheatersLocationBoolExp - fieldName: theaterId - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp comparableRelationships: [] logicalOperators: enable: true isNull: enable: true graphql: - typeName: TheatersComparisonExp + typeName: TheatersBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: TheatersAggExp + operand: + object: + aggregatedType: Theaters + aggregatableFields: + - fieldName: id + aggregateExpression: ObjectIdAggExp + - fieldName: theaterId + aggregateExpression: IntAggExp + count: + enable: true + graphql: + selectTypeName: TheatersAggExp --- kind: Model @@ -258,7 +253,8 @@ definition: source: dataConnectorName: sample_mflix collection: theaters - filterExpressionType: TheatersComparisonExp + filterExpressionType: TheatersBoolExp + aggregateExpression: TheatersAggExp orderableFields: - fieldName: id orderByDirections: @@ -272,11 +268,20 @@ definition: graphql: selectMany: queryRootField: theaters + subscription: + rootField: theaters selectUniques: - queryRootField: theatersById uniqueIdentifier: - id + subscription: + rootField: theatersById orderByExpressionType: TheatersOrderBy + filterInputTypeName: TheatersFilterInput + aggregate: + queryRootField: theatersAggregate + subscription: + rootField: theatersAggregate --- kind: ModelPermissions @@ -287,4 +292,5 @@ definition: - role: admin select: filter: null + allowSubscriptions: true diff --git a/fixtures/hasura/app/metadata/TitleWordFrequency.hml b/fixtures/hasura/app/metadata/TitleWordFrequency.hml new file mode 100644 index 00000000..6f0379c2 --- /dev/null +++ b/fixtures/hasura/app/metadata/TitleWordFrequency.hml @@ -0,0 +1,122 @@ +--- +kind: ObjectType +version: v1 +definition: + name: TitleWordFrequencyGroup + fields: + - name: id + type: String! + - name: count + type: Int! + graphql: + typeName: TitleWordFrequencyGroup + inputTypeName: TitleWordFrequencyGroupInput + dataConnectorTypeMapping: + - dataConnectorName: sample_mflix + dataConnectorObjectType: title_word_frequency_group + fieldMapping: + id: + column: + name: _id + count: + column: + name: count + +--- +kind: TypePermissions +version: v1 +definition: + typeName: TitleWordFrequencyGroup + permissions: + - role: admin + output: + allowedFields: + - id + - count + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: TitleWordFrequencyGroupBoolExp + operand: + object: + type: TitleWordFrequencyGroup + comparableFields: + - fieldName: id + booleanExpressionType: StringBoolExp + - fieldName: count + booleanExpressionType: IntBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: TitleWordFrequencyGroupBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: TitleWordFrequencyGroupAggExp + operand: + object: + aggregatedType: TitleWordFrequencyGroup + aggregatableFields: + - fieldName: id + aggregateExpression: StringAggExp + - fieldName: count + aggregateExpression: IntAggExp + count: + enable: true + graphql: + selectTypeName: TitleWordFrequencyGroupAggExp + +--- +kind: Model +version: v1 +definition: + name: TitleWordFrequency + objectType: TitleWordFrequencyGroup + source: + dataConnectorName: sample_mflix + collection: title_word_frequency + filterExpressionType: TitleWordFrequencyGroupBoolExp + aggregateExpression: TitleWordFrequencyGroupAggExp + orderableFields: + - fieldName: id + orderByDirections: + enableAll: true + - fieldName: count + orderByDirections: + enableAll: true + graphql: + selectMany: + queryRootField: titleWordFrequency + subscription: + rootField: titleWordFrequency + selectUniques: + - queryRootField: titleWordFrequencyById + uniqueIdentifier: + - id + subscription: + rootField: titleWordFrequencyById + orderByExpressionType: TitleWordFrequencyOrderBy + filterInputTypeName: TitleWordFrequencyFilterInput + aggregate: + queryRootField: titleWordFrequencyAggregate + subscription: + rootField: titleWordFrequencyAggregate + +--- +kind: ModelPermissions +version: v1 +definition: + modelName: TitleWordFrequency + permissions: + - role: admin + select: + filter: null + allowSubscriptions: true + diff --git a/fixtures/hasura/chinook/metadata/models/Track.hml b/fixtures/hasura/app/metadata/Track.hml similarity index 70% rename from fixtures/hasura/chinook/metadata/models/Track.hml rename to fixtures/hasura/app/metadata/Track.hml index 4755352d..f3a84064 100644 --- a/fixtures/hasura/chinook/metadata/models/Track.hml +++ b/fixtures/hasura/app/metadata/Track.hml @@ -7,13 +7,13 @@ definition: - name: id type: ObjectId! - name: albumId - type: Int + type: Int! - name: bytes - type: Int + type: Int! - name: composer type: String - name: genreId - type: Int + type: Int! - name: mediaTypeId type: Int! - name: milliseconds @@ -61,7 +61,6 @@ definition: unitPrice: column: name: UnitPrice - description: Object type for collection Track --- kind: TypePermissions @@ -87,67 +86,77 @@ definition: kind: BooleanExpressionType version: v1 definition: - name: TrackComparisonExp + name: TrackBoolExp operand: object: type: Track comparableFields: - fieldName: id - booleanExpressionType: ObjectIdComparisonExp + booleanExpressionType: ObjectIdBoolExp - fieldName: albumId - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: bytes - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: composer - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: genreId - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: mediaTypeId - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: milliseconds - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: name - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: trackId - booleanExpressionType: IntComparisonExp + booleanExpressionType: IntBoolExp - fieldName: unitPrice - booleanExpressionType: DecimalComparisonExp + booleanExpressionType: DecimalBoolExp comparableRelationships: - relationshipName: album - booleanExpressionType: AlbumComparisonExp - relationshipName: genre - booleanExpressionType: GenreComparisonExp - relationshipName: invoiceLines - booleanExpressionType: InvoiceLineComparisonExp - relationshipName: mediaType - booleanExpressionType: MediaTypeComparisonExp - relationshipName: playlistTracks - booleanExpressionType: PlaylistTrackComparisonExp logicalOperators: enable: true isNull: enable: true graphql: - typeName: TrackComparisonExp + typeName: TrackBoolExp --- kind: AggregateExpression version: v1 definition: - name: TrackAggregateExp + name: TrackAggExp operand: object: aggregatedType: Track aggregatableFields: - - fieldName: unitPrice - aggregateExpression: DecimalAggregateExp + - fieldName: id + aggregateExpression: ObjectIdAggExp + - fieldName: albumId + aggregateExpression: IntAggExp - fieldName: bytes - aggregateExpression: IntAggregateExp + aggregateExpression: IntAggExp + - fieldName: composer + aggregateExpression: StringAggExp + - fieldName: genreId + aggregateExpression: IntAggExp + - fieldName: mediaTypeId + aggregateExpression: IntAggExp - fieldName: milliseconds - aggregateExpression: IntAggregateExp - count: { enable: true } + aggregateExpression: IntAggExp + - fieldName: name + aggregateExpression: StringAggExp + - fieldName: trackId + aggregateExpression: IntAggExp + - fieldName: unitPrice + aggregateExpression: DecimalAggExp + count: + enable: true graphql: - selectTypeName: TrackAggregateExp + selectTypeName: TrackAggExp --- kind: Model @@ -158,8 +167,8 @@ definition: source: dataConnectorName: chinook collection: Track - aggregateExpression: TrackAggregateExp - filterExpressionType: TrackComparisonExp + filterExpressionType: TrackBoolExp + aggregateExpression: TrackAggExp orderableFields: - fieldName: id orderByDirections: @@ -192,17 +201,22 @@ definition: orderByDirections: enableAll: true graphql: - aggregate: - queryRootField: - trackAggregate - filterInputTypeName: TrackFilterInput selectMany: queryRootField: track + subscription: + rootField: track selectUniques: - queryRootField: trackById uniqueIdentifier: - id + subscription: + rootField: trackById orderByExpressionType: TrackOrderBy + filterInputTypeName: TrackFilterInput + aggregate: + queryRootField: trackAggregate + subscription: + rootField: trackAggregate --- kind: ModelPermissions @@ -213,4 +227,5 @@ definition: - role: admin select: filter: null + allowSubscriptions: true diff --git a/fixtures/hasura/chinook/metadata/commands/UpdateTrackPrices.hml b/fixtures/hasura/app/metadata/UpdateTrackPrices.hml similarity index 87% rename from fixtures/hasura/chinook/metadata/commands/UpdateTrackPrices.hml rename to fixtures/hasura/app/metadata/UpdateTrackPrices.hml index 6e8f985a..51669ee5 100644 --- a/fixtures/hasura/chinook/metadata/commands/UpdateTrackPrices.hml +++ b/fixtures/hasura/app/metadata/UpdateTrackPrices.hml @@ -8,13 +8,13 @@ definition: - name: newPrice type: Decimal! - name: where - type: TrackComparisonExp! + type: TrackBoolExp! source: dataConnectorName: chinook dataConnectorCommand: procedure: updateTrackPrices graphql: - rootFieldName: chinook_updateTrackPrices + rootFieldName: updateTrackPrices rootFieldKind: Mutation description: Update unit price of every track that matches predicate diff --git a/fixtures/hasura/sample_mflix/metadata/models/Users.hml b/fixtures/hasura/app/metadata/Users.hml similarity index 64% rename from fixtures/hasura/sample_mflix/metadata/models/Users.hml rename to fixtures/hasura/app/metadata/Users.hml index 322daedb..e74616d8 100644 --- a/fixtures/hasura/sample_mflix/metadata/models/Users.hml +++ b/fixtures/hasura/app/metadata/Users.hml @@ -62,28 +62,51 @@ definition: kind: BooleanExpressionType version: v1 definition: - name: UsersComparisonExp + name: UsersBoolExp operand: object: type: Users comparableFields: - fieldName: id - booleanExpressionType: ObjectIdComparisonExp + booleanExpressionType: ObjectIdBoolExp - fieldName: email - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: name - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp - fieldName: password - booleanExpressionType: StringComparisonExp + booleanExpressionType: StringBoolExp + - fieldName: preferences + booleanExpressionType: UsersPreferencesBoolExp comparableRelationships: - relationshipName: comments - booleanExpressionType: CommentsComparisonExp logicalOperators: enable: true isNull: enable: true graphql: - typeName: UsersComparisonExp + typeName: UsersBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: UsersAggExp + operand: + object: + aggregatedType: Users + aggregatableFields: + - fieldName: id + aggregateExpression: ObjectIdAggExp + - fieldName: email + aggregateExpression: StringAggExp + - fieldName: name + aggregateExpression: StringAggExp + - fieldName: password + aggregateExpression: StringAggExp + count: + enable: true + graphql: + selectTypeName: UsersAggExp --- kind: Model @@ -94,7 +117,8 @@ definition: source: dataConnectorName: sample_mflix collection: users - filterExpressionType: UsersComparisonExp + filterExpressionType: UsersBoolExp + aggregateExpression: UsersAggExp orderableFields: - fieldName: id orderByDirections: @@ -114,11 +138,20 @@ definition: graphql: selectMany: queryRootField: users + subscription: + rootField: users selectUniques: - queryRootField: usersById uniqueIdentifier: - id + subscription: + rootField: usersById orderByExpressionType: UsersOrderBy + filterInputTypeName: UsersFilterInput + aggregate: + queryRootField: usersAggregate + subscription: + rootField: usersAggregate --- kind: ModelPermissions @@ -129,6 +162,7 @@ definition: - role: admin select: filter: null + allowSubscriptions: true - role: user select: filter: @@ -145,8 +179,8 @@ definition: name: UsersPreferences fields: [] graphql: - typeName: SampleMflix_UsersPreferences - inputTypeName: SampleMflix_UsersPreferencesInput + typeName: UsersPreferences + inputTypeName: UsersPreferencesInput dataConnectorTypeMapping: - dataConnectorName: sample_mflix dataConnectorObjectType: users_preferences @@ -161,3 +195,20 @@ definition: output: allowedFields: [] +--- +kind: BooleanExpressionType +version: v1 +definition: + name: UsersPreferencesBoolExp + operand: + object: + type: UsersPreferences + comparableFields: [] + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: UsersPreferencesBoolExp + diff --git a/fixtures/hasura/app/metadata/WeirdFieldNames.hml b/fixtures/hasura/app/metadata/WeirdFieldNames.hml new file mode 100644 index 00000000..784959b7 --- /dev/null +++ b/fixtures/hasura/app/metadata/WeirdFieldNames.hml @@ -0,0 +1,302 @@ +--- +kind: ObjectType +version: v1 +definition: + name: WeirdFieldNamesInvalidArray + fields: + - name: invalidElement + type: Int! + graphql: + typeName: WeirdFieldNamesInvalidArray + inputTypeName: WeirdFieldNamesInvalidArrayInput + dataConnectorTypeMapping: + - dataConnectorName: test_cases + dataConnectorObjectType: weird_field_names_$invalid.array + fieldMapping: + invalidElement: + column: + name: $invalid.element + +--- +kind: TypePermissions +version: v1 +definition: + typeName: WeirdFieldNamesInvalidArray + permissions: + - role: admin + output: + allowedFields: + - invalidElement + +--- +kind: ObjectType +version: v1 +definition: + name: WeirdFieldNamesInvalidObjectName + fields: + - name: validName + type: Int! + graphql: + typeName: WeirdFieldNamesInvalidObjectName + inputTypeName: WeirdFieldNamesInvalidObjectNameInput + dataConnectorTypeMapping: + - dataConnectorName: test_cases + dataConnectorObjectType: weird_field_names_$invalid.object.name + fieldMapping: + validName: + column: + name: valid_name + +--- +kind: TypePermissions +version: v1 +definition: + typeName: WeirdFieldNamesInvalidObjectName + permissions: + - role: admin + output: + allowedFields: + - validName + +--- +kind: ObjectType +version: v1 +definition: + name: WeirdFieldNamesValidObjectName + fields: + - name: invalidNestedName + type: Int! + graphql: + typeName: WeirdFieldNamesValidObjectName + inputTypeName: WeirdFieldNamesValidObjectNameInput + dataConnectorTypeMapping: + - dataConnectorName: test_cases + dataConnectorObjectType: weird_field_names_valid_object_name + fieldMapping: + invalidNestedName: + column: + name: $invalid.nested.name + +--- +kind: TypePermissions +version: v1 +definition: + typeName: WeirdFieldNamesValidObjectName + permissions: + - role: admin + output: + allowedFields: + - invalidNestedName + +--- +kind: ObjectType +version: v1 +definition: + name: WeirdFieldNames + fields: + - name: invalidArray + type: "[WeirdFieldNamesInvalidArray!]!" + - name: invalidName + type: Int! + - name: invalidObjectName + type: WeirdFieldNamesInvalidObjectName! + - name: id + type: ObjectId! + - name: validObjectName + type: WeirdFieldNamesValidObjectName! + graphql: + typeName: WeirdFieldNames + inputTypeName: WeirdFieldNamesInput + dataConnectorTypeMapping: + - dataConnectorName: test_cases + dataConnectorObjectType: weird_field_names + fieldMapping: + invalidArray: + column: + name: $invalid.array + invalidName: + column: + name: $invalid.name + invalidObjectName: + column: + name: $invalid.object.name + id: + column: + name: _id + validObjectName: + column: + name: valid_object_name + +--- +kind: TypePermissions +version: v1 +definition: + typeName: WeirdFieldNames + permissions: + - role: admin + output: + allowedFields: + - invalidArray + - invalidName + - invalidObjectName + - id + - validObjectName + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: WeirdFieldNamesInvalidArrayBoolExp + operand: + object: + type: WeirdFieldNamesInvalidArray + comparableFields: + - fieldName: invalidElement + booleanExpressionType: IntBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: WeirdFieldNamesInvalidArrayBoolExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: WeirdFieldNamesInvalidObjectNameBoolExp + operand: + object: + type: WeirdFieldNamesInvalidObjectName + comparableFields: + - fieldName: validName + booleanExpressionType: IntBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: WeirdFieldNamesInvalidObjectNameBoolExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: WeirdFieldNamesValidObjectNameBoolExp + operand: + object: + type: WeirdFieldNamesValidObjectName + comparableFields: + - fieldName: invalidNestedName + booleanExpressionType: IntBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: WeirdFieldNamesValidObjectNameBoolExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: WeirdFieldNamesBoolExp + operand: + object: + type: WeirdFieldNames + comparableFields: + - fieldName: invalidArray + booleanExpressionType: WeirdFieldNamesInvalidArrayBoolExp + - fieldName: invalidName + booleanExpressionType: IntBoolExp + - fieldName: invalidObjectName + booleanExpressionType: WeirdFieldNamesInvalidObjectNameBoolExp + - fieldName: id + booleanExpressionType: ObjectIdBoolExp + - fieldName: validObjectName + booleanExpressionType: WeirdFieldNamesValidObjectNameBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: WeirdFieldNamesBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: WeirdFieldNamesAggExp + operand: + object: + aggregatedType: WeirdFieldNames + aggregatableFields: + - fieldName: invalidName + aggregateExpression: IntAggExp + - fieldName: id + aggregateExpression: ObjectIdAggExp + count: + enable: true + graphql: + selectTypeName: WeirdFieldNamesAggExp + +--- +kind: Model +version: v1 +definition: + name: WeirdFieldNames + objectType: WeirdFieldNames + source: + dataConnectorName: test_cases + collection: weird_field_names + filterExpressionType: WeirdFieldNamesBoolExp + aggregateExpression: WeirdFieldNamesAggExp + orderableFields: + - fieldName: invalidArray + orderByDirections: + enableAll: true + - fieldName: invalidName + orderByDirections: + enableAll: true + - fieldName: invalidObjectName + orderByDirections: + enableAll: true + - fieldName: id + orderByDirections: + enableAll: true + - fieldName: validObjectName + orderByDirections: + enableAll: true + graphql: + selectMany: + queryRootField: weirdFieldNames + subscription: + rootField: weirdFieldNames + selectUniques: + - queryRootField: weirdFieldNamesById + uniqueIdentifier: + - id + subscription: + rootField: weirdFieldNamesById + orderByExpressionType: WeirdFieldNamesOrderBy + filterInputTypeName: WeirdFieldNamesFilterInput + aggregate: + queryRootField: weirdFieldNamesAggregate + subscription: + rootField: weirdFieldNamesAggregate + +--- +kind: ModelPermissions +version: v1 +definition: + modelName: WeirdFieldNames + permissions: + - role: admin + select: + filter: null + allowSubscriptions: true + diff --git a/fixtures/hasura/chinook/metadata/chinook.hml b/fixtures/hasura/app/metadata/chinook.hml similarity index 66% rename from fixtures/hasura/chinook/metadata/chinook.hml rename to fixtures/hasura/app/metadata/chinook.hml index 04f844b0..1175ffaf 100644 --- a/fixtures/hasura/chinook/metadata/chinook.hml +++ b/fixtures/hasura/app/metadata/chinook.hml @@ -5,272 +5,356 @@ definition: url: readWriteUrls: read: - valueFromEnv: CHINOOK_CONNECTOR_URL + valueFromEnv: APP_CHINOOK_READ_URL write: - valueFromEnv: CHINOOK_CONNECTOR_URL + valueFromEnv: APP_CHINOOK_WRITE_URL schema: - version: v0.1 + version: v0.2 + capabilities: + version: 0.2.0 + capabilities: + query: + aggregates: {} + variables: {} + explain: {} + nested_fields: + filter_by: + nested_arrays: + contains: {} + is_empty: {} + order_by: {} + aggregates: {} + nested_collections: {} + exists: + unrelated: {} + nested_collections: {} + mutation: {} + relationships: + relation_comparisons: {} schema: scalar_types: BinData: + representation: + type: json aggregate_functions: count: + type: custom result_type: type: named name: Int comparison_operators: _eq: type: equal + _in: + type: in _neq: type: custom argument_type: type: named name: BinData + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: BinData Bool: representation: type: boolean aggregate_functions: count: + type: custom result_type: type: named name: Int comparison_operators: _eq: type: equal + _in: + type: in _neq: type: custom argument_type: type: named name: Bool + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: Bool Date: representation: type: timestamp aggregate_functions: count: + type: custom result_type: type: named name: Int max: - result_type: - type: named - name: Date + type: max min: - result_type: - type: named - name: Date + type: min comparison_operators: _eq: type: equal _gt: - type: custom - argument_type: - type: named - name: Date + type: greater_than _gte: - type: custom - argument_type: - type: named - name: Date + type: greater_than_or_equal + _in: + type: in _lt: - type: custom - argument_type: - type: named - name: Date + type: less_than _lte: + type: less_than_or_equal + _neq: type: custom argument_type: type: named name: Date - _neq: + _nin: type: custom argument_type: - type: named - name: Date + type: array + element_type: + type: named + name: Date DbPointer: + representation: + type: json aggregate_functions: count: + type: custom result_type: type: named name: Int comparison_operators: _eq: type: equal + _in: + type: in _neq: type: custom argument_type: type: named name: DbPointer + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: DbPointer Decimal: representation: type: bigdecimal aggregate_functions: avg: - result_type: - type: named - name: Decimal + type: average + result_type: Double count: + type: custom result_type: type: named name: Int max: - result_type: - type: named - name: Decimal + type: max min: - result_type: - type: named - name: Decimal + type: min sum: - result_type: - type: named - name: Decimal + type: sum + result_type: Double comparison_operators: _eq: type: equal _gt: - type: custom - argument_type: - type: named - name: Decimal + type: greater_than _gte: - type: custom - argument_type: - type: named - name: Decimal + type: greater_than_or_equal + _in: + type: in _lt: - type: custom - argument_type: - type: named - name: Decimal + type: less_than _lte: + type: less_than_or_equal + _neq: type: custom argument_type: type: named name: Decimal - _neq: + _nin: type: custom argument_type: - type: named - name: Decimal + type: array + element_type: + type: named + name: Decimal Double: representation: type: float64 aggregate_functions: avg: - result_type: - type: named - name: Double + type: average + result_type: Double count: + type: custom result_type: type: named name: Int max: - result_type: - type: named - name: Double + type: max min: - result_type: - type: named - name: Double + type: min sum: - result_type: - type: named - name: Double + type: sum + result_type: Double comparison_operators: _eq: type: equal _gt: - type: custom - argument_type: - type: named - name: Double + type: greater_than _gte: - type: custom - argument_type: - type: named - name: Double + type: greater_than_or_equal + _in: + type: in _lt: - type: custom - argument_type: - type: named - name: Double + type: less_than _lte: + type: less_than_or_equal + _neq: type: custom argument_type: type: named name: Double - _neq: + _nin: type: custom argument_type: - type: named - name: Double + type: array + element_type: + type: named + name: Double ExtendedJSON: representation: type: json - aggregate_functions: {} - comparison_operators: {} - Int: - representation: - type: int32 aggregate_functions: avg: + type: custom result_type: type: named - name: Int + name: ExtendedJSON count: + type: custom result_type: type: named name: Int max: - result_type: - type: named - name: Int + type: max min: - result_type: - type: named - name: Int + type: min sum: + type: custom result_type: type: named - name: Int + name: ExtendedJSON comparison_operators: _eq: type: equal _gt: - type: custom - argument_type: - type: named - name: Int + type: greater_than _gte: + type: greater_than_or_equal + _in: + type: in + _iregex: type: custom argument_type: type: named - name: Int + name: Regex _lt: + type: less_than + _lte: + type: less_than_or_equal + _neq: type: custom argument_type: type: named - name: Int - _lte: + name: ExtendedJSON + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: ExtendedJSON + _regex: type: custom argument_type: + type: named + name: Regex + Int: + representation: + type: int32 + aggregate_functions: + avg: + type: average + result_type: Double + count: + type: custom + result_type: type: named name: Int + max: + type: max + min: + type: min + sum: + type: sum + result_type: Long + comparison_operators: + _eq: + type: equal + _gt: + type: greater_than + _gte: + type: greater_than_or_equal + _in: + type: in + _lt: + type: less_than + _lte: + type: less_than_or_equal _neq: type: custom argument_type: type: named name: Int + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: Int Javascript: + representation: + type: string aggregate_functions: count: + type: custom result_type: type: named name: Int comparison_operators: {} JavascriptWithScope: + representation: + type: json aggregate_functions: count: + type: custom result_type: type: named name: Int @@ -280,114 +364,155 @@ definition: type: int64 aggregate_functions: avg: - result_type: - type: named - name: Long + type: average + result_type: Double count: + type: custom result_type: type: named name: Int max: - result_type: - type: named - name: Long + type: max min: - result_type: - type: named - name: Long + type: min sum: - result_type: - type: named - name: Long + type: sum + result_type: Long comparison_operators: _eq: type: equal _gt: - type: custom - argument_type: - type: named - name: Long + type: greater_than _gte: - type: custom - argument_type: - type: named - name: Long + type: greater_than_or_equal + _in: + type: in _lt: - type: custom - argument_type: - type: named - name: Long + type: less_than _lte: + type: less_than_or_equal + _neq: type: custom argument_type: type: named name: Long - _neq: + _nin: type: custom argument_type: - type: named - name: Long + type: array + element_type: + type: named + name: Long MaxKey: + representation: + type: json aggregate_functions: count: + type: custom result_type: type: named name: Int comparison_operators: _eq: type: equal + _in: + type: in _neq: type: custom argument_type: type: named name: MaxKey + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: MaxKey MinKey: + representation: + type: json aggregate_functions: count: + type: custom result_type: type: named name: Int comparison_operators: _eq: type: equal + _in: + type: in _neq: type: custom argument_type: type: named name: MinKey - "Null": + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: MinKey + 'Null': + representation: + type: json aggregate_functions: count: + type: custom result_type: type: named name: Int comparison_operators: _eq: type: equal + _in: + type: in _neq: type: custom argument_type: type: named - name: "Null" + name: 'Null' + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: 'Null' ObjectId: representation: type: string aggregate_functions: count: + type: custom result_type: type: named name: Int comparison_operators: _eq: type: equal + _in: + type: in _neq: type: custom argument_type: type: named name: ObjectId + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: ObjectId Regex: + representation: + type: json aggregate_functions: count: + type: custom result_type: type: named name: Int @@ -397,133 +522,142 @@ definition: type: string aggregate_functions: count: + type: custom result_type: type: named name: Int max: - result_type: - type: named - name: String + type: max min: - result_type: - type: named - name: String + type: min comparison_operators: _eq: type: equal _gt: - type: custom - argument_type: - type: named - name: String + type: greater_than _gte: - type: custom - argument_type: - type: named - name: String + type: greater_than_or_equal + _in: + type: in _iregex: type: custom argument_type: type: named - name: String + name: Regex _lt: - type: custom - argument_type: - type: named - name: String + type: less_than _lte: + type: less_than_or_equal + _neq: type: custom argument_type: type: named name: String - _neq: + _nin: type: custom argument_type: - type: named - name: String + type: array + element_type: + type: named + name: String _regex: type: custom argument_type: type: named - name: String + name: Regex Symbol: + representation: + type: string aggregate_functions: count: + type: custom result_type: type: named name: Int comparison_operators: _eq: type: equal + _in: + type: in _neq: type: custom argument_type: type: named name: Symbol + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: Symbol Timestamp: + representation: + type: json aggregate_functions: count: + type: custom result_type: type: named name: Int max: - result_type: - type: named - name: Timestamp + type: max min: - result_type: - type: named - name: Timestamp + type: min comparison_operators: _eq: type: equal _gt: - type: custom - argument_type: - type: named - name: Timestamp + type: greater_than _gte: - type: custom - argument_type: - type: named - name: Timestamp + type: greater_than_or_equal + _in: + type: in _lt: - type: custom - argument_type: - type: named - name: Timestamp + type: less_than _lte: + type: less_than_or_equal + _neq: type: custom argument_type: type: named name: Timestamp - _neq: + _nin: type: custom argument_type: - type: named - name: Timestamp + type: array + element_type: + type: named + name: Timestamp Undefined: + representation: + type: json aggregate_functions: count: + type: custom result_type: type: named name: Int comparison_operators: _eq: type: equal + _in: + type: in _neq: type: custom argument_type: type: named name: Undefined + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: Undefined object_types: Album: - description: Object type for collection Album fields: - _id: - type: - type: named - name: ObjectId AlbumId: type: type: named @@ -536,12 +670,13 @@ definition: type: type: named name: String - AlbumWithTracks: - fields: _id: type: type: named name: ObjectId + foreign_keys: {} + AlbumWithTracks: + fields: Title: type: type: named @@ -552,29 +687,28 @@ definition: element_type: type: named name: Track - Artist: - description: Object type for collection Artist - fields: _id: type: type: named name: ObjectId + foreign_keys: {} + Artist: + fields: ArtistId: type: type: named name: Int Name: type: - type: nullable - underlying_type: - type: named - name: String - ArtistWithAlbumsAndTracks: - fields: + type: named + name: String _id: type: type: named name: ObjectId + foreign_keys: {} + ArtistWithAlbumsAndTracks: + fields: Albums: type: type: array @@ -585,25 +719,21 @@ definition: type: type: named name: String - Customer: - description: Object type for collection Customer - fields: _id: type: type: named name: ObjectId + foreign_keys: {} + Customer: + fields: Address: type: - type: nullable - underlying_type: - type: named - name: String + type: named + name: String City: type: - type: nullable - underlying_type: - type: named - name: String + type: named + name: String Company: type: type: nullable @@ -612,10 +742,8 @@ definition: name: String Country: type: - type: nullable - underlying_type: - type: named - name: String + type: named + name: String CustomerId: type: type: named @@ -658,83 +786,63 @@ definition: name: String SupportRepId: type: - type: nullable - underlying_type: - type: named - name: Int - Employee: - description: Object type for collection Employee - fields: + type: named + name: Int _id: type: type: named name: ObjectId + foreign_keys: {} + Employee: + fields: Address: type: - type: nullable - underlying_type: - type: named - name: String + type: named + name: String BirthDate: type: - type: nullable - underlying_type: - type: named - name: String + type: named + name: String City: type: - type: nullable - underlying_type: - type: named - name: String + type: named + name: String Country: type: - type: nullable - underlying_type: - type: named - name: String + type: named + name: String Email: type: - type: nullable - underlying_type: - type: named - name: String + type: named + name: String EmployeeId: type: type: named name: Int Fax: type: - type: nullable - underlying_type: - type: named - name: String + type: named + name: String FirstName: type: type: named name: String HireDate: type: - type: nullable - underlying_type: - type: named - name: String + type: named + name: String LastName: type: type: named name: String Phone: type: - type: nullable - underlying_type: - type: named - name: String + type: named + name: String PostalCode: type: - type: nullable - underlying_type: - type: named - name: String + type: named + name: String ReportsTo: type: type: nullable @@ -743,36 +851,35 @@ definition: name: Int State: type: - type: nullable - underlying_type: - type: named - name: String + type: named + name: String Title: type: - type: nullable - underlying_type: - type: named - name: String - Genre: - description: Object type for collection Genre - fields: + type: named + name: String _id: type: type: named name: ObjectId + foreign_keys: {} + Genre: + fields: GenreId: type: type: named name: Int Name: type: - type: nullable - underlying_type: - type: named - name: String + type: named + name: String + _id: + type: + type: named + name: ObjectId + foreign_keys: {} InsertArtist: fields: - "n": + n: type: type: named name: Int @@ -780,31 +887,21 @@ definition: type: type: named name: Double + foreign_keys: {} Invoice: - description: Object type for collection Invoice fields: - _id: - type: - type: named - name: ObjectId BillingAddress: type: - type: nullable - underlying_type: - type: named - name: String + type: named + name: String BillingCity: type: - type: nullable - underlying_type: - type: named - name: String + type: named + name: String BillingCountry: type: - type: nullable - underlying_type: - type: named - name: String + type: named + name: String BillingPostalCode: type: type: nullable @@ -833,13 +930,13 @@ definition: type: type: named name: Decimal - InvoiceLine: - description: Object type for collection InvoiceLine - fields: _id: type: type: named name: ObjectId + foreign_keys: {} + InvoiceLine: + fields: InvoiceId: type: type: named @@ -860,47 +957,43 @@ definition: type: type: named name: Decimal - MediaType: - description: Object type for collection MediaType - fields: _id: type: type: named name: ObjectId + foreign_keys: {} + MediaType: + fields: MediaTypeId: type: type: named name: Int Name: type: - type: nullable - underlying_type: - type: named - name: String - Playlist: - description: Object type for collection Playlist - fields: + type: named + name: String _id: type: type: named name: ObjectId + foreign_keys: {} + Playlist: + fields: Name: type: - type: nullable - underlying_type: - type: named - name: String + type: named + name: String PlaylistId: type: type: named name: Int - PlaylistTrack: - description: Object type for collection PlaylistTrack - fields: _id: type: type: named name: ObjectId + foreign_keys: {} + PlaylistTrack: + fields: PlaylistId: type: type: named @@ -909,25 +1002,21 @@ definition: type: type: named name: Int - Track: - description: Object type for collection Track - fields: _id: type: type: named name: ObjectId + foreign_keys: {} + Track: + fields: AlbumId: type: - type: nullable - underlying_type: - type: named - name: Int + type: named + name: Int Bytes: type: - type: nullable - underlying_type: - type: named - name: Int + type: named + name: Int Composer: type: type: nullable @@ -936,10 +1025,8 @@ definition: name: String GenreId: type: - type: nullable - underlying_type: - type: named - name: Int + type: named + name: Int MediaTypeId: type: type: named @@ -960,144 +1047,128 @@ definition: type: type: named name: Decimal - collections: - - name: Album - arguments: {} - type: Album - uniqueness_constraints: - Album_id: - unique_columns: - - _id - foreign_keys: {} - - name: Artist - arguments: {} - type: Artist - uniqueness_constraints: - Artist_id: - unique_columns: - - _id - foreign_keys: {} - - name: Customer - arguments: {} - type: Customer - uniqueness_constraints: - Customer_id: - unique_columns: - - _id - foreign_keys: {} - - name: Employee - arguments: {} - type: Employee - uniqueness_constraints: - Employee_id: - unique_columns: - - _id - foreign_keys: {} - - name: Genre - arguments: {} - type: Genre - uniqueness_constraints: - Genre_id: - unique_columns: - - _id - foreign_keys: {} - - name: Invoice - arguments: {} - type: Invoice - uniqueness_constraints: - Invoice_id: - unique_columns: - - _id - foreign_keys: {} - - name: InvoiceLine - arguments: {} - type: InvoiceLine - uniqueness_constraints: - InvoiceLine_id: - unique_columns: - - _id - foreign_keys: {} - - name: MediaType - arguments: {} - type: MediaType - uniqueness_constraints: - MediaType_id: - unique_columns: - - _id - foreign_keys: {} - - name: Playlist - arguments: {} - type: Playlist - uniqueness_constraints: - Playlist_id: - unique_columns: - - _id - foreign_keys: {} - - name: PlaylistTrack - arguments: {} - type: PlaylistTrack - uniqueness_constraints: - PlaylistTrack_id: - unique_columns: - - _id - foreign_keys: {} - - name: Track - arguments: {} - type: Track - uniqueness_constraints: - Track_id: - unique_columns: - - _id - foreign_keys: {} - - name: artists_with_albums_and_tracks - description: combines artist, albums, and tracks into a single document per artist - arguments: {} - type: ArtistWithAlbumsAndTracks - uniqueness_constraints: - artists_with_albums_and_tracks_id: - unique_columns: - - _id + _id: + type: + type: named + name: ObjectId foreign_keys: {} + collections: + - name: Album + arguments: {} + type: Album + uniqueness_constraints: + Album_id: + unique_columns: + - _id + - name: Artist + arguments: {} + type: Artist + uniqueness_constraints: + Artist_id: + unique_columns: + - _id + - name: Customer + arguments: {} + type: Customer + uniqueness_constraints: + Customer_id: + unique_columns: + - _id + - name: Employee + arguments: {} + type: Employee + uniqueness_constraints: + Employee_id: + unique_columns: + - _id + - name: Genre + arguments: {} + type: Genre + uniqueness_constraints: + Genre_id: + unique_columns: + - _id + - name: Invoice + arguments: {} + type: Invoice + uniqueness_constraints: + Invoice_id: + unique_columns: + - _id + - name: InvoiceLine + arguments: {} + type: InvoiceLine + uniqueness_constraints: + InvoiceLine_id: + unique_columns: + - _id + - name: MediaType + arguments: {} + type: MediaType + uniqueness_constraints: + MediaType_id: + unique_columns: + - _id + - name: Playlist + arguments: {} + type: Playlist + uniqueness_constraints: + Playlist_id: + unique_columns: + - _id + - name: PlaylistTrack + arguments: {} + type: PlaylistTrack + uniqueness_constraints: + PlaylistTrack_id: + unique_columns: + - _id + - name: Track + arguments: {} + type: Track + uniqueness_constraints: + Track_id: + unique_columns: + - _id + - name: artists_with_albums_and_tracks + description: combines artist, albums, and tracks into a single document per artist + arguments: {} + type: ArtistWithAlbumsAndTracks + uniqueness_constraints: + artists_with_albums_and_tracks_id: + unique_columns: + - _id functions: [] procedures: - - name: insertArtist - description: Example of a database update using a native mutation - arguments: - id: - type: - type: named - name: Int - name: - type: - type: named - name: String - result_type: - type: named - name: InsertArtist - - name: updateTrackPrices - description: Update unit price of every track that matches predicate - arguments: - newPrice: - type: - type: named - name: Decimal - where: - type: - type: predicate - object_type_name: Track - result_type: - type: named - name: InsertArtist - capabilities: - version: 0.1.5 + - name: insertArtist + description: Example of a database update using a native mutation + arguments: + id: + type: + type: named + name: Int + name: + type: + type: named + name: String + result_type: + type: named + name: InsertArtist + - name: updateTrackPrices + description: Update unit price of every track that matches predicate + arguments: + newPrice: + type: + type: named + name: Decimal + where: + type: + type: predicate + object_type_name: Track + result_type: + type: named + name: InsertArtist capabilities: query: - aggregates: {} - variables: {} - explain: {} - nested_fields: - filter_by: {} - order_by: {} - mutation: {} - relationships: - relation_comparisons: {} + aggregates: + count_scalar_type: Int diff --git a/fixtures/hasura/common/metadata/relationships/album_movie.hml b/fixtures/hasura/app/metadata/relationships/album_movie.hml similarity index 100% rename from fixtures/hasura/common/metadata/relationships/album_movie.hml rename to fixtures/hasura/app/metadata/relationships/album_movie.hml diff --git a/fixtures/hasura/chinook/metadata/relationships/album_tracks.hml b/fixtures/hasura/app/metadata/relationships/album_tracks.hml similarity index 100% rename from fixtures/hasura/chinook/metadata/relationships/album_tracks.hml rename to fixtures/hasura/app/metadata/relationships/album_tracks.hml diff --git a/fixtures/hasura/chinook/metadata/relationships/artist_albums.hml b/fixtures/hasura/app/metadata/relationships/artist_albums.hml similarity index 100% rename from fixtures/hasura/chinook/metadata/relationships/artist_albums.hml rename to fixtures/hasura/app/metadata/relationships/artist_albums.hml diff --git a/fixtures/hasura/chinook/metadata/relationships/customer_invoices.hml b/fixtures/hasura/app/metadata/relationships/customer_invoices.hml similarity index 100% rename from fixtures/hasura/chinook/metadata/relationships/customer_invoices.hml rename to fixtures/hasura/app/metadata/relationships/customer_invoices.hml diff --git a/fixtures/hasura/chinook/metadata/relationships/employee_customers.hml b/fixtures/hasura/app/metadata/relationships/employee_customers.hml similarity index 100% rename from fixtures/hasura/chinook/metadata/relationships/employee_customers.hml rename to fixtures/hasura/app/metadata/relationships/employee_customers.hml diff --git a/fixtures/hasura/chinook/metadata/relationships/employee_employees.hml b/fixtures/hasura/app/metadata/relationships/employee_employees.hml similarity index 100% rename from fixtures/hasura/chinook/metadata/relationships/employee_employees.hml rename to fixtures/hasura/app/metadata/relationships/employee_employees.hml diff --git a/fixtures/hasura/chinook/metadata/relationships/genre_tracks.hml b/fixtures/hasura/app/metadata/relationships/genre_tracks.hml similarity index 100% rename from fixtures/hasura/chinook/metadata/relationships/genre_tracks.hml rename to fixtures/hasura/app/metadata/relationships/genre_tracks.hml diff --git a/fixtures/hasura/chinook/metadata/relationships/invoice_lines.hml b/fixtures/hasura/app/metadata/relationships/invoice_lines.hml similarity index 100% rename from fixtures/hasura/chinook/metadata/relationships/invoice_lines.hml rename to fixtures/hasura/app/metadata/relationships/invoice_lines.hml diff --git a/fixtures/hasura/chinook/metadata/relationships/media_type_tracks.hml b/fixtures/hasura/app/metadata/relationships/media_type_tracks.hml similarity index 100% rename from fixtures/hasura/chinook/metadata/relationships/media_type_tracks.hml rename to fixtures/hasura/app/metadata/relationships/media_type_tracks.hml diff --git a/fixtures/hasura/sample_mflix/metadata/relationships/movie_comments.hml b/fixtures/hasura/app/metadata/relationships/movie_comments.hml similarity index 100% rename from fixtures/hasura/sample_mflix/metadata/relationships/movie_comments.hml rename to fixtures/hasura/app/metadata/relationships/movie_comments.hml diff --git a/fixtures/hasura/chinook/metadata/relationships/playlist_tracks.hml b/fixtures/hasura/app/metadata/relationships/playlist_tracks.hml similarity index 100% rename from fixtures/hasura/chinook/metadata/relationships/playlist_tracks.hml rename to fixtures/hasura/app/metadata/relationships/playlist_tracks.hml diff --git a/fixtures/hasura/chinook/metadata/relationships/track_invoice_lines.hml b/fixtures/hasura/app/metadata/relationships/track_invoice_lines.hml similarity index 100% rename from fixtures/hasura/chinook/metadata/relationships/track_invoice_lines.hml rename to fixtures/hasura/app/metadata/relationships/track_invoice_lines.hml diff --git a/fixtures/hasura/sample_mflix/metadata/relationships/user_comments.hml b/fixtures/hasura/app/metadata/relationships/user_comments.hml similarity index 100% rename from fixtures/hasura/sample_mflix/metadata/relationships/user_comments.hml rename to fixtures/hasura/app/metadata/relationships/user_comments.hml diff --git a/fixtures/hasura/sample_mflix/metadata/sample_mflix.hml b/fixtures/hasura/app/metadata/sample_mflix.hml similarity index 61% rename from fixtures/hasura/sample_mflix/metadata/sample_mflix.hml rename to fixtures/hasura/app/metadata/sample_mflix.hml index e552ce2f..b49a9f0f 100644 --- a/fixtures/hasura/sample_mflix/metadata/sample_mflix.hml +++ b/fixtures/hasura/app/metadata/sample_mflix.hml @@ -5,272 +5,356 @@ definition: url: readWriteUrls: read: - valueFromEnv: SAMPLE_MFLIX_CONNECTOR_URL + valueFromEnv: APP_SAMPLE_MFLIX_READ_URL write: - valueFromEnv: SAMPLE_MFLIX_CONNECTOR_URL + valueFromEnv: APP_SAMPLE_MFLIX_WRITE_URL schema: - version: v0.1 + version: v0.2 + capabilities: + version: 0.2.0 + capabilities: + query: + aggregates: {} + variables: {} + explain: {} + nested_fields: + filter_by: + nested_arrays: + contains: {} + is_empty: {} + order_by: {} + aggregates: {} + nested_collections: {} + exists: + unrelated: {} + nested_collections: {} + mutation: {} + relationships: + relation_comparisons: {} schema: scalar_types: BinData: + representation: + type: json aggregate_functions: count: + type: custom result_type: type: named name: Int comparison_operators: _eq: type: equal + _in: + type: in _neq: type: custom argument_type: type: named name: BinData + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: BinData Bool: representation: type: boolean aggregate_functions: count: + type: custom result_type: type: named name: Int comparison_operators: _eq: type: equal + _in: + type: in _neq: type: custom argument_type: type: named name: Bool + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: Bool Date: representation: type: timestamp aggregate_functions: count: + type: custom result_type: type: named name: Int max: - result_type: - type: named - name: Date + type: max min: - result_type: - type: named - name: Date + type: min comparison_operators: _eq: type: equal _gt: - type: custom - argument_type: - type: named - name: Date + type: greater_than _gte: - type: custom - argument_type: - type: named - name: Date + type: greater_than_or_equal + _in: + type: in _lt: - type: custom - argument_type: - type: named - name: Date + type: less_than _lte: + type: less_than_or_equal + _neq: type: custom argument_type: type: named name: Date - _neq: + _nin: type: custom argument_type: - type: named - name: Date + type: array + element_type: + type: named + name: Date DbPointer: + representation: + type: json aggregate_functions: count: + type: custom result_type: type: named name: Int comparison_operators: _eq: type: equal + _in: + type: in _neq: type: custom argument_type: type: named name: DbPointer + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: DbPointer Decimal: representation: type: bigdecimal aggregate_functions: avg: - result_type: - type: named - name: Decimal + type: average + result_type: Double count: + type: custom result_type: type: named name: Int max: - result_type: - type: named - name: Decimal + type: max min: - result_type: - type: named - name: Decimal + type: min sum: - result_type: - type: named - name: Decimal + type: sum + result_type: Double comparison_operators: _eq: type: equal _gt: - type: custom - argument_type: - type: named - name: Decimal + type: greater_than _gte: - type: custom - argument_type: - type: named - name: Decimal + type: greater_than_or_equal + _in: + type: in _lt: - type: custom - argument_type: - type: named - name: Decimal + type: less_than _lte: + type: less_than_or_equal + _neq: type: custom argument_type: type: named name: Decimal - _neq: + _nin: type: custom argument_type: - type: named - name: Decimal + type: array + element_type: + type: named + name: Decimal Double: representation: type: float64 aggregate_functions: avg: - result_type: - type: named - name: Double + type: average + result_type: Double count: + type: custom result_type: type: named name: Int max: - result_type: - type: named - name: Double + type: max min: - result_type: - type: named - name: Double + type: min sum: - result_type: - type: named - name: Double + type: sum + result_type: Double comparison_operators: _eq: type: equal _gt: - type: custom - argument_type: - type: named - name: Double + type: greater_than _gte: - type: custom - argument_type: - type: named - name: Double + type: greater_than_or_equal + _in: + type: in _lt: - type: custom - argument_type: - type: named - name: Double + type: less_than _lte: + type: less_than_or_equal + _neq: type: custom argument_type: type: named name: Double - _neq: + _nin: type: custom argument_type: - type: named - name: Double + type: array + element_type: + type: named + name: Double ExtendedJSON: representation: type: json - aggregate_functions: {} - comparison_operators: {} - Int: - representation: - type: int32 aggregate_functions: avg: + type: custom result_type: type: named - name: Int + name: ExtendedJSON count: + type: custom result_type: type: named name: Int max: - result_type: - type: named - name: Int + type: max min: - result_type: - type: named - name: Int + type: min sum: + type: custom result_type: type: named - name: Int + name: ExtendedJSON comparison_operators: _eq: type: equal _gt: - type: custom - argument_type: - type: named - name: Int + type: greater_than _gte: + type: greater_than_or_equal + _in: + type: in + _iregex: type: custom argument_type: type: named - name: Int + name: Regex _lt: + type: less_than + _lte: + type: less_than_or_equal + _neq: type: custom argument_type: type: named - name: Int - _lte: + name: ExtendedJSON + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: ExtendedJSON + _regex: type: custom argument_type: + type: named + name: Regex + Int: + representation: + type: int32 + aggregate_functions: + avg: + type: average + result_type: Double + count: + type: custom + result_type: type: named name: Int + max: + type: max + min: + type: min + sum: + type: sum + result_type: Long + comparison_operators: + _eq: + type: equal + _gt: + type: greater_than + _gte: + type: greater_than_or_equal + _in: + type: in + _lt: + type: less_than + _lte: + type: less_than_or_equal _neq: type: custom argument_type: type: named name: Int + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: Int Javascript: + representation: + type: string aggregate_functions: count: + type: custom result_type: type: named name: Int comparison_operators: {} JavascriptWithScope: + representation: + type: json aggregate_functions: count: + type: custom result_type: type: named name: Int @@ -280,114 +364,155 @@ definition: type: int64 aggregate_functions: avg: - result_type: - type: named - name: Long + type: average + result_type: Double count: + type: custom result_type: type: named name: Int max: - result_type: - type: named - name: Long + type: max min: - result_type: - type: named - name: Long + type: min sum: - result_type: - type: named - name: Long + type: sum + result_type: Long comparison_operators: _eq: type: equal _gt: - type: custom - argument_type: - type: named - name: Long + type: greater_than _gte: - type: custom - argument_type: - type: named - name: Long + type: greater_than_or_equal + _in: + type: in _lt: - type: custom - argument_type: - type: named - name: Long + type: less_than _lte: + type: less_than_or_equal + _neq: type: custom argument_type: type: named name: Long - _neq: + _nin: type: custom argument_type: - type: named - name: Long + type: array + element_type: + type: named + name: Long MaxKey: + representation: + type: json aggregate_functions: count: + type: custom result_type: type: named name: Int comparison_operators: _eq: type: equal + _in: + type: in _neq: type: custom argument_type: type: named name: MaxKey + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: MaxKey MinKey: + representation: + type: json aggregate_functions: count: + type: custom result_type: type: named name: Int comparison_operators: _eq: type: equal + _in: + type: in _neq: type: custom argument_type: type: named name: MinKey - "Null": + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: MinKey + 'Null': + representation: + type: json aggregate_functions: count: + type: custom result_type: type: named name: Int comparison_operators: _eq: type: equal + _in: + type: in _neq: type: custom argument_type: type: named - name: "Null" + name: 'Null' + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: 'Null' ObjectId: representation: type: string aggregate_functions: count: + type: custom result_type: type: named name: Int comparison_operators: _eq: type: equal + _in: + type: in _neq: type: custom argument_type: type: named name: ObjectId + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: ObjectId Regex: + representation: + type: json aggregate_functions: count: + type: custom result_type: type: named name: Int @@ -397,142 +522,160 @@ definition: type: string aggregate_functions: count: + type: custom result_type: type: named name: Int max: - result_type: - type: named - name: String + type: max min: - result_type: - type: named - name: String + type: min comparison_operators: _eq: type: equal _gt: - type: custom - argument_type: - type: named - name: String + type: greater_than _gte: - type: custom - argument_type: - type: named - name: String + type: greater_than_or_equal + _in: + type: in _iregex: type: custom argument_type: type: named - name: String + name: Regex _lt: - type: custom - argument_type: - type: named - name: String + type: less_than _lte: + type: less_than_or_equal + _neq: type: custom argument_type: type: named name: String - _neq: + _nin: type: custom argument_type: - type: named - name: String + type: array + element_type: + type: named + name: String _regex: type: custom argument_type: type: named - name: String + name: Regex Symbol: + representation: + type: string aggregate_functions: count: + type: custom result_type: type: named name: Int comparison_operators: _eq: type: equal + _in: + type: in _neq: type: custom argument_type: type: named name: Symbol + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: Symbol Timestamp: + representation: + type: json aggregate_functions: count: + type: custom result_type: type: named name: Int max: - result_type: - type: named - name: Timestamp + type: max min: - result_type: - type: named - name: Timestamp + type: min comparison_operators: _eq: type: equal _gt: - type: custom - argument_type: - type: named - name: Timestamp + type: greater_than _gte: - type: custom - argument_type: - type: named - name: Timestamp + type: greater_than_or_equal + _in: + type: in _lt: - type: custom - argument_type: - type: named - name: Timestamp + type: less_than _lte: + type: less_than_or_equal + _neq: type: custom argument_type: type: named name: Timestamp - _neq: + _nin: type: custom argument_type: - type: named - name: Timestamp + type: array + element_type: + type: named + name: Timestamp Undefined: + representation: + type: json aggregate_functions: count: + type: custom result_type: type: named name: Int comparison_operators: _eq: type: equal + _in: + type: in _neq: type: custom argument_type: type: named name: Undefined + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: Undefined object_types: - Hello: + DocWithExtendedJsonValue: fields: - __value: + type: type: type: named name: String - TitleWordFrequency: + value: + type: + type: nullable + underlying_type: + type: named + name: ExtendedJSON + foreign_keys: {} + Hello: fields: - _id: + __value: type: type: named name: String - count: - type: - type: named - name: Int + foreign_keys: {} comments: fields: _id: @@ -559,6 +702,66 @@ definition: type: type: named name: String + foreign_keys: {} + eq_title_project: + fields: + _id: + type: + type: named + name: ObjectId + bar: + type: + type: named + name: eq_title_project_bar + foo: + type: + type: named + name: eq_title_project_foo + title: + type: + type: named + name: String + tomatoes: + type: + type: nullable + underlying_type: + type: named + name: movies_tomatoes + what: + type: + type: named + name: eq_title_project_what + foreign_keys: {} + eq_title_project_bar: + fields: + foo: + type: + type: named + name: movies_imdb + foreign_keys: {} + eq_title_project_foo: + fields: + bar: + type: + type: nullable + underlying_type: + type: named + name: movies_tomatoes_critic + foreign_keys: {} + eq_title_project_what: + fields: + the: + type: + type: named + name: eq_title_project_what_the + foreign_keys: {} + eq_title_project_what_the: + fields: + heck: + type: + type: named + name: String + foreign_keys: {} movies: fields: _id: @@ -585,10 +788,12 @@ definition: name: String directors: type: - type: array - element_type: - type: named - name: String + type: nullable + underlying_type: + type: array + element_type: + type: named + name: String fullplot: type: type: nullable @@ -597,10 +802,12 @@ definition: name: String genres: type: - type: array - element_type: - type: named - name: String + type: nullable + underlying_type: + type: array + element_type: + type: named + name: String imdb: type: type: named @@ -685,6 +892,7 @@ definition: type: type: named name: Int + foreign_keys: {} movies_awards: fields: nominations: @@ -699,6 +907,7 @@ definition: type: type: named name: Int + foreign_keys: {} movies_imdb: fields: id: @@ -713,6 +922,7 @@ definition: type: type: named name: Int + foreign_keys: {} movies_tomatoes: fields: boxOffice: @@ -771,6 +981,7 @@ definition: underlying_type: type: named name: String + foreign_keys: {} movies_tomatoes_critic: fields: meter: @@ -779,12 +990,17 @@ definition: name: Int numReviews: type: - type: named - name: Int + type: nullable + underlying_type: + type: named + name: Int rating: type: - type: named - name: Double + type: nullable + underlying_type: + type: named + name: Double + foreign_keys: {} movies_tomatoes_viewer: fields: meter: @@ -798,9 +1014,71 @@ definition: type: named name: Int rating: + type: + type: nullable + underlying_type: + type: named + name: Double + foreign_keys: {} + native_query_project: + fields: + _id: type: type: named - name: Double + name: ObjectId + bar: + type: + type: named + name: native_query_project_bar + foo: + type: + type: named + name: native_query_project_foo + title: + type: + type: named + name: String + tomatoes: + type: + type: nullable + underlying_type: + type: named + name: movies_tomatoes + what: + type: + type: named + name: native_query_project_what + foreign_keys: {} + native_query_project_bar: + fields: + foo: + type: + type: named + name: movies_imdb + foreign_keys: {} + native_query_project_foo: + fields: + bar: + type: + type: nullable + underlying_type: + type: named + name: movies_tomatoes_critic + foreign_keys: {} + native_query_project_what: + fields: + the: + type: + type: named + name: native_query_project_what_the + foreign_keys: {} + native_query_project_what_the: + fields: + heck: + type: + type: named + name: String + foreign_keys: {} sessions: fields: _id: @@ -815,6 +1093,7 @@ definition: type: type: named name: String + foreign_keys: {} theaters: fields: _id: @@ -829,6 +1108,7 @@ definition: type: type: named name: Int + foreign_keys: {} theaters_location: fields: address: @@ -839,6 +1119,7 @@ definition: type: type: named name: theaters_location_geo + foreign_keys: {} theaters_location_address: fields: city: @@ -863,6 +1144,7 @@ definition: type: type: named name: String + foreign_keys: {} theaters_location_geo: fields: coordinates: @@ -875,6 +1157,18 @@ definition: type: type: named name: String + foreign_keys: {} + title_word_frequency_group: + fields: + _id: + type: + type: named + name: String + count: + type: + type: named + name: Int + foreign_keys: {} users: fields: _id: @@ -899,80 +1193,97 @@ definition: underlying_type: type: named name: users_preferences + foreign_keys: {} users_preferences: fields: {} - collections: - - name: comments - arguments: {} - type: comments - uniqueness_constraints: - comments_id: - unique_columns: - - _id - foreign_keys: {} - - name: movies - arguments: {} - type: movies - uniqueness_constraints: - movies_id: - unique_columns: - - _id - foreign_keys: {} - - name: sessions - arguments: {} - type: sessions - uniqueness_constraints: - sessions_id: - unique_columns: - - _id - foreign_keys: {} - - name: theaters - arguments: {} - type: theaters - uniqueness_constraints: - theaters_id: - unique_columns: - - _id - foreign_keys: {} - - name: title_word_frequency - description: words appearing in movie titles with counts - arguments: {} - type: TitleWordFrequency - uniqueness_constraints: - title_word_frequency_id: - unique_columns: - - _id - foreign_keys: {} - - name: users - arguments: {} - type: users - uniqueness_constraints: - users_id: - unique_columns: - - _id foreign_keys: {} + collections: + - name: comments + arguments: {} + type: comments + uniqueness_constraints: + comments_id: + unique_columns: + - _id + - name: eq_title + arguments: + title: + type: + type: named + name: String + year: + type: + type: named + name: Int + type: eq_title_project + uniqueness_constraints: + eq_title_id: + unique_columns: + - _id + - name: extended_json_test_data + description: various values that all have the ExtendedJSON type + arguments: {} + type: DocWithExtendedJsonValue + uniqueness_constraints: {} + - name: movies + arguments: {} + type: movies + uniqueness_constraints: + movies_id: + unique_columns: + - _id + - name: native_query + arguments: + title: + type: + type: named + name: String + type: native_query_project + uniqueness_constraints: + native_query_id: + unique_columns: + - _id + - name: sessions + arguments: {} + type: sessions + uniqueness_constraints: + sessions_id: + unique_columns: + - _id + - name: theaters + arguments: {} + type: theaters + uniqueness_constraints: + theaters_id: + unique_columns: + - _id + - name: title_word_frequency + arguments: {} + type: title_word_frequency_group + uniqueness_constraints: + title_word_frequency_id: + unique_columns: + - _id + - name: users + arguments: {} + type: users + uniqueness_constraints: + users_id: + unique_columns: + - _id functions: - - name: hello - description: Basic test of native queries - arguments: - name: - type: - type: named - name: String - result_type: - type: named - name: String + - name: hello + description: Basic test of native queries + arguments: + name: + type: + type: named + name: String + result_type: + type: named + name: String procedures: [] - capabilities: - version: 0.1.5 capabilities: query: - aggregates: {} - variables: {} - explain: {} - nested_fields: - filter_by: {} - order_by: {} - mutation: {} - relationships: - relation_comparisons: {} + aggregates: + count_scalar_type: Int diff --git a/fixtures/hasura/app/metadata/test_cases.hml b/fixtures/hasura/app/metadata/test_cases.hml new file mode 100644 index 00000000..eaf77cf0 --- /dev/null +++ b/fixtures/hasura/app/metadata/test_cases.hml @@ -0,0 +1,833 @@ +kind: DataConnectorLink +version: v1 +definition: + name: test_cases + url: + readWriteUrls: + read: + valueFromEnv: APP_TEST_CASES_READ_URL + write: + valueFromEnv: APP_TEST_CASES_WRITE_URL + schema: + version: v0.2 + capabilities: + version: 0.2.0 + capabilities: + query: + aggregates: {} + variables: {} + explain: {} + nested_fields: + filter_by: + nested_arrays: + contains: {} + is_empty: {} + order_by: {} + aggregates: {} + nested_collections: {} + exists: + unrelated: {} + nested_collections: {} + mutation: {} + relationships: + relation_comparisons: {} + schema: + scalar_types: + BinData: + representation: + type: json + aggregate_functions: + count: + type: custom + result_type: + type: named + name: Int + comparison_operators: + _eq: + type: equal + _in: + type: in + _neq: + type: custom + argument_type: + type: named + name: BinData + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: BinData + Bool: + representation: + type: boolean + aggregate_functions: + count: + type: custom + result_type: + type: named + name: Int + comparison_operators: + _eq: + type: equal + _in: + type: in + _neq: + type: custom + argument_type: + type: named + name: Bool + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: Bool + Date: + representation: + type: timestamp + aggregate_functions: + count: + type: custom + result_type: + type: named + name: Int + max: + type: max + min: + type: min + comparison_operators: + _eq: + type: equal + _gt: + type: greater_than + _gte: + type: greater_than_or_equal + _in: + type: in + _lt: + type: less_than + _lte: + type: less_than_or_equal + _neq: + type: custom + argument_type: + type: named + name: Date + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: Date + DbPointer: + representation: + type: json + aggregate_functions: + count: + type: custom + result_type: + type: named + name: Int + comparison_operators: + _eq: + type: equal + _in: + type: in + _neq: + type: custom + argument_type: + type: named + name: DbPointer + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: DbPointer + Decimal: + representation: + type: bigdecimal + aggregate_functions: + avg: + type: average + result_type: Double + count: + type: custom + result_type: + type: named + name: Int + max: + type: max + min: + type: min + sum: + type: sum + result_type: Double + comparison_operators: + _eq: + type: equal + _gt: + type: greater_than + _gte: + type: greater_than_or_equal + _in: + type: in + _lt: + type: less_than + _lte: + type: less_than_or_equal + _neq: + type: custom + argument_type: + type: named + name: Decimal + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: Decimal + Double: + representation: + type: float64 + aggregate_functions: + avg: + type: average + result_type: Double + count: + type: custom + result_type: + type: named + name: Int + max: + type: max + min: + type: min + sum: + type: sum + result_type: Double + comparison_operators: + _eq: + type: equal + _gt: + type: greater_than + _gte: + type: greater_than_or_equal + _in: + type: in + _lt: + type: less_than + _lte: + type: less_than_or_equal + _neq: + type: custom + argument_type: + type: named + name: Double + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: Double + ExtendedJSON: + representation: + type: json + aggregate_functions: + avg: + type: custom + result_type: + type: named + name: ExtendedJSON + count: + type: custom + result_type: + type: named + name: Int + max: + type: max + min: + type: min + sum: + type: custom + result_type: + type: named + name: ExtendedJSON + comparison_operators: + _eq: + type: equal + _gt: + type: greater_than + _gte: + type: greater_than_or_equal + _in: + type: in + _iregex: + type: custom + argument_type: + type: named + name: Regex + _lt: + type: less_than + _lte: + type: less_than_or_equal + _neq: + type: custom + argument_type: + type: named + name: ExtendedJSON + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: ExtendedJSON + _regex: + type: custom + argument_type: + type: named + name: Regex + Int: + representation: + type: int32 + aggregate_functions: + avg: + type: average + result_type: Double + count: + type: custom + result_type: + type: named + name: Int + max: + type: max + min: + type: min + sum: + type: sum + result_type: Long + comparison_operators: + _eq: + type: equal + _gt: + type: greater_than + _gte: + type: greater_than_or_equal + _in: + type: in + _lt: + type: less_than + _lte: + type: less_than_or_equal + _neq: + type: custom + argument_type: + type: named + name: Int + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: Int + Javascript: + representation: + type: string + aggregate_functions: + count: + type: custom + result_type: + type: named + name: Int + comparison_operators: {} + JavascriptWithScope: + representation: + type: json + aggregate_functions: + count: + type: custom + result_type: + type: named + name: Int + comparison_operators: {} + Long: + representation: + type: int64 + aggregate_functions: + avg: + type: average + result_type: Double + count: + type: custom + result_type: + type: named + name: Int + max: + type: max + min: + type: min + sum: + type: sum + result_type: Long + comparison_operators: + _eq: + type: equal + _gt: + type: greater_than + _gte: + type: greater_than_or_equal + _in: + type: in + _lt: + type: less_than + _lte: + type: less_than_or_equal + _neq: + type: custom + argument_type: + type: named + name: Long + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: Long + MaxKey: + representation: + type: json + aggregate_functions: + count: + type: custom + result_type: + type: named + name: Int + comparison_operators: + _eq: + type: equal + _in: + type: in + _neq: + type: custom + argument_type: + type: named + name: MaxKey + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: MaxKey + MinKey: + representation: + type: json + aggregate_functions: + count: + type: custom + result_type: + type: named + name: Int + comparison_operators: + _eq: + type: equal + _in: + type: in + _neq: + type: custom + argument_type: + type: named + name: MinKey + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: MinKey + 'Null': + representation: + type: json + aggregate_functions: + count: + type: custom + result_type: + type: named + name: Int + comparison_operators: + _eq: + type: equal + _in: + type: in + _neq: + type: custom + argument_type: + type: named + name: 'Null' + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: 'Null' + ObjectId: + representation: + type: string + aggregate_functions: + count: + type: custom + result_type: + type: named + name: Int + comparison_operators: + _eq: + type: equal + _in: + type: in + _neq: + type: custom + argument_type: + type: named + name: ObjectId + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: ObjectId + Regex: + representation: + type: json + aggregate_functions: + count: + type: custom + result_type: + type: named + name: Int + comparison_operators: {} + String: + representation: + type: string + aggregate_functions: + count: + type: custom + result_type: + type: named + name: Int + max: + type: max + min: + type: min + comparison_operators: + _eq: + type: equal + _gt: + type: greater_than + _gte: + type: greater_than_or_equal + _in: + type: in + _iregex: + type: custom + argument_type: + type: named + name: Regex + _lt: + type: less_than + _lte: + type: less_than_or_equal + _neq: + type: custom + argument_type: + type: named + name: String + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: String + _regex: + type: custom + argument_type: + type: named + name: Regex + Symbol: + representation: + type: string + aggregate_functions: + count: + type: custom + result_type: + type: named + name: Int + comparison_operators: + _eq: + type: equal + _in: + type: in + _neq: + type: custom + argument_type: + type: named + name: Symbol + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: Symbol + Timestamp: + representation: + type: json + aggregate_functions: + count: + type: custom + result_type: + type: named + name: Int + max: + type: max + min: + type: min + comparison_operators: + _eq: + type: equal + _gt: + type: greater_than + _gte: + type: greater_than_or_equal + _in: + type: in + _lt: + type: less_than + _lte: + type: less_than_or_equal + _neq: + type: custom + argument_type: + type: named + name: Timestamp + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: Timestamp + Undefined: + representation: + type: json + aggregate_functions: + count: + type: custom + result_type: + type: named + name: Int + comparison_operators: + _eq: + type: equal + _in: + type: in + _neq: + type: custom + argument_type: + type: named + name: Undefined + _nin: + type: custom + argument_type: + type: array + element_type: + type: named + name: Undefined + object_types: + departments: + fields: + _id: + type: + type: named + name: ObjectId + description: + type: + type: named + name: String + foreign_keys: {} + schools: + fields: + _id: + type: + type: named + name: ObjectId + departments: + type: + type: named + name: schools_departments + name: + type: + type: named + name: String + foreign_keys: {} + schools_departments: + fields: + english_department_id: + type: + type: named + name: ObjectId + math_department_id: + type: + type: named + name: ObjectId + description: + type: + type: nullable + underlying_type: + type: named + name: String + foreign_keys: {} + nested_collection: + fields: + _id: + type: + type: named + name: ObjectId + institution: + type: + type: named + name: String + staff: + type: + type: array + element_type: + type: named + name: nested_collection_staff + foreign_keys: {} + nested_collection_staff: + fields: + name: + type: + type: named + name: String + foreign_keys: {} + nested_field_with_dollar: + fields: + _id: + type: + type: named + name: ObjectId + configuration: + type: + type: named + name: nested_field_with_dollar_configuration + foreign_keys: {} + nested_field_with_dollar_configuration: + fields: + $schema: + type: + type: nullable + underlying_type: + type: named + name: String + foreign_keys: {} + weird_field_names: + fields: + $invalid.array: + type: + type: array + element_type: + type: named + name: weird_field_names_$invalid.array + $invalid.name: + type: + type: named + name: Int + $invalid.object.name: + type: + type: named + name: weird_field_names_$invalid.object.name + _id: + type: + type: named + name: ObjectId + valid_object_name: + type: + type: named + name: weird_field_names_valid_object_name + foreign_keys: {} + weird_field_names_$invalid.array: + fields: + $invalid.element: + type: + type: named + name: Int + foreign_keys: {} + weird_field_names_$invalid.object.name: + fields: + valid_name: + type: + type: named + name: Int + foreign_keys: {} + weird_field_names_valid_object_name: + fields: + $invalid.nested.name: + type: + type: named + name: Int + foreign_keys: {} + collections: + - name: departments + arguments: {} + type: departments + uniqueness_constraints: + nested_field_with_dollar_id: + unique_columns: + - _id + - name: schools + arguments: {} + type: schools + uniqueness_constraints: + nested_field_with_dollar_id: + unique_columns: + - _id + - name: nested_collection + arguments: {} + type: nested_collection + uniqueness_constraints: + nested_collection_id: + unique_columns: + - _id + - name: nested_field_with_dollar + arguments: {} + type: nested_field_with_dollar + uniqueness_constraints: + nested_field_with_dollar_id: + unique_columns: + - _id + - name: weird_field_names + arguments: {} + type: weird_field_names + uniqueness_constraints: + weird_field_names_id: + unique_columns: + - _id + functions: [] + procedures: [] + capabilities: + query: + aggregates: + count_scalar_type: Int diff --git a/fixtures/hasura/app/metadata/types/date.hml b/fixtures/hasura/app/metadata/types/date.hml new file mode 100644 index 00000000..fc3cdceb --- /dev/null +++ b/fixtures/hasura/app/metadata/types/date.hml @@ -0,0 +1,85 @@ +--- +kind: ScalarType +version: v1 +definition: + name: Date + graphql: + typeName: Date + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: DateBoolExp + operand: + scalar: + type: Date + comparisonOperators: + - name: _eq + argumentType: Date! + - name: _gt + argumentType: Date! + - name: _gte + argumentType: Date! + - name: _in + argumentType: "[Date!]!" + - name: _lt + argumentType: Date! + - name: _lte + argumentType: Date! + - name: _neq + argumentType: Date! + - name: _nin + argumentType: "[Date!]!" + dataConnectorOperatorMapping: + - dataConnectorName: sample_mflix + dataConnectorScalarType: Date + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: DateBoolExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: sample_mflix + dataConnectorScalarType: Date + representation: Date + graphql: + comparisonExpressionTypeName: DateComparisonExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: DateAggExp + operand: + scalar: + aggregatedType: Date + aggregationFunctions: + - name: count + returnType: Int! + - name: max + returnType: Date + - name: min + returnType: Date + dataConnectorAggregationFunctionMapping: + - dataConnectorName: sample_mflix + dataConnectorScalarType: Date + functionMapping: + count: + name: count + max: + name: max + min: + name: min + count: + enable: true + countDistinct: + enable: true + graphql: + selectTypeName: DateAggExp diff --git a/fixtures/hasura/app/metadata/types/decimal.hml b/fixtures/hasura/app/metadata/types/decimal.hml new file mode 100644 index 00000000..4a30e020 --- /dev/null +++ b/fixtures/hasura/app/metadata/types/decimal.hml @@ -0,0 +1,139 @@ +--- +kind: ScalarType +version: v1 +definition: + name: Decimal + graphql: + typeName: Decimal + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: chinook + dataConnectorScalarType: Decimal + representation: Decimal + graphql: + comparisonExpressionTypeName: DecimalComparisonExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: sample_mflix + dataConnectorScalarType: Decimal + representation: Decimal + graphql: + comparisonExpressionTypeName: DecimalComparisonExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: test_cases + dataConnectorScalarType: Decimal + representation: Decimal + graphql: + comparisonExpressionTypeName: DecimalComparisonExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: DecimalBoolExp + operand: + scalar: + type: Decimal + comparisonOperators: + - name: _eq + argumentType: Decimal! + - name: _gt + argumentType: Decimal! + - name: _gte + argumentType: Decimal! + - name: _in + argumentType: "[Decimal!]!" + - name: _lt + argumentType: Decimal! + - name: _lte + argumentType: Decimal! + - name: _neq + argumentType: Decimal! + - name: _nin + argumentType: "[Decimal!]!" + dataConnectorOperatorMapping: + - dataConnectorName: chinook + dataConnectorScalarType: Decimal + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: DecimalBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: DecimalAggExp + operand: + scalar: + aggregatedType: Decimal + aggregationFunctions: + - name: avg + returnType: Double + - name: count + returnType: Int! + - name: max + returnType: Decimal + - name: min + returnType: Decimal + - name: sum + returnType: Double + dataConnectorAggregationFunctionMapping: + - dataConnectorName: chinook + dataConnectorScalarType: Decimal + functionMapping: + avg: + name: avg + count: + name: count + max: + name: max + min: + name: min + sum: + name: sum + - dataConnectorName: sample_mflix + dataConnectorScalarType: Decimal + functionMapping: + avg: + name: avg + count: + name: count + max: + name: max + min: + name: min + sum: + name: sum + - dataConnectorName: test_cases + dataConnectorScalarType: Decimal + functionMapping: + avg: + name: avg + count: + name: count + max: + name: max + min: + name: min + sum: + name: sum + count: + enable: true + countDistinct: + enable: true + graphql: + selectTypeName: DecimalAggExp diff --git a/fixtures/hasura/app/metadata/types/double.hml b/fixtures/hasura/app/metadata/types/double.hml new file mode 100644 index 00000000..8d9ca0bc --- /dev/null +++ b/fixtures/hasura/app/metadata/types/double.hml @@ -0,0 +1,142 @@ +--- +kind: ScalarType +version: v1 +definition: + name: Double + graphql: + typeName: Double + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: chinook + dataConnectorScalarType: Double + representation: Double + graphql: + comparisonExpressionTypeName: DoubleComparisonExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: sample_mflix + dataConnectorScalarType: Double + representation: Double + graphql: + comparisonExpressionTypeName: DoubleComparisonExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: test_cases + dataConnectorScalarType: Double + representation: Double + graphql: + comparisonExpressionTypeName: DoubleComparisonExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: DoubleBoolExp + operand: + scalar: + type: Double + comparisonOperators: + - name: _eq + argumentType: Double! + - name: _gt + argumentType: Double! + - name: _gte + argumentType: Double! + - name: _in + argumentType: "[Double!]!" + - name: _lt + argumentType: Double! + - name: _lte + argumentType: Double! + - name: _neq + argumentType: Double! + - name: _nin + argumentType: "[Double!]!" + dataConnectorOperatorMapping: + - dataConnectorName: sample_mflix + dataConnectorScalarType: Double + operatorMapping: {} + - dataConnectorName: chinook + dataConnectorScalarType: Double + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: DoubleBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: DoubleAggExp + operand: + scalar: + aggregatedType: Double + aggregationFunctions: + - name: avg + returnType: Double + - name: count + returnType: Int! + - name: max + returnType: Double + - name: min + returnType: Double + - name: sum + returnType: Double + dataConnectorAggregationFunctionMapping: + - dataConnectorName: sample_mflix + dataConnectorScalarType: Double + functionMapping: + avg: + name: avg + count: + name: count + max: + name: max + min: + name: min + sum: + name: sum + - dataConnectorName: chinook + dataConnectorScalarType: Double + functionMapping: + avg: + name: avg + count: + name: count + max: + name: max + min: + name: min + sum: + name: sum + - dataConnectorName: test_cases + dataConnectorScalarType: Double + functionMapping: + avg: + name: avg + count: + name: count + max: + name: max + min: + name: min + sum: + name: sum + count: + enable: true + countDistinct: + enable: true + graphql: + selectTypeName: DoubleAggExp diff --git a/fixtures/hasura/app/metadata/types/extendedJSON.hml b/fixtures/hasura/app/metadata/types/extendedJSON.hml new file mode 100644 index 00000000..fad40c22 --- /dev/null +++ b/fixtures/hasura/app/metadata/types/extendedJSON.hml @@ -0,0 +1,97 @@ +--- +kind: ScalarType +version: v1 +definition: + name: ExtendedJson + graphql: + typeName: ExtendedJson + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: ExtendedJsonBoolExp + operand: + scalar: + type: ExtendedJson + comparisonOperators: + - name: _eq + argumentType: ExtendedJson! + - name: _gt + argumentType: ExtendedJson! + - name: _gte + argumentType: ExtendedJson! + - name: _in + argumentType: ExtendedJson! + - name: _iregex + argumentType: String! + - name: _lt + argumentType: ExtendedJson! + - name: _lte + argumentType: ExtendedJson! + - name: _neq + argumentType: ExtendedJson! + - name: _nin + argumentType: ExtendedJson! + - name: _regex + argumentType: String! + dataConnectorOperatorMapping: + - dataConnectorName: sample_mflix + dataConnectorScalarType: ExtendedJSON + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: ExtendedJsonBoolExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: sample_mflix + dataConnectorScalarType: ExtendedJSON + representation: ExtendedJson + graphql: + comparisonExpressionTypeName: ExtendedJsonComparisonExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: ExtendedJsonAggExp + operand: + scalar: + aggregatedType: ExtendedJson + aggregationFunctions: + - name: avg + returnType: ExtendedJson! + - name: count + returnType: Int! + - name: max + returnType: ExtendedJson! + - name: min + returnType: ExtendedJson! + - name: sum + returnType: ExtendedJson! + dataConnectorAggregationFunctionMapping: + - dataConnectorName: sample_mflix + dataConnectorScalarType: ExtendedJSON + functionMapping: + avg: + name: avg + count: + name: count + max: + name: max + min: + name: min + sum: + name: sum + count: + enable: true + countDistinct: + enable: true + graphql: + selectTypeName: ExtendedJsonAggExp diff --git a/fixtures/hasura/app/metadata/types/int.hml b/fixtures/hasura/app/metadata/types/int.hml new file mode 100644 index 00000000..88d6333b --- /dev/null +++ b/fixtures/hasura/app/metadata/types/int.hml @@ -0,0 +1,137 @@ +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: chinook + dataConnectorScalarType: Int + representation: Int + graphql: + comparisonExpressionTypeName: IntComparisonExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: sample_mflix + dataConnectorScalarType: Int + representation: Int + graphql: + comparisonExpressionTypeName: IntComparisonExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: test_cases + dataConnectorScalarType: Int + representation: Int + graphql: + comparisonExpressionTypeName: IntComparisonExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: IntBoolExp + operand: + scalar: + type: Int + comparisonOperators: + - name: _eq + argumentType: Int! + - name: _gt + argumentType: Int! + - name: _gte + argumentType: Int! + - name: _in + argumentType: "[Int!]!" + - name: _lt + argumentType: Int! + - name: _lte + argumentType: Int! + - name: _neq + argumentType: Int! + - name: _nin + argumentType: "[Int!]!" + dataConnectorOperatorMapping: + - dataConnectorName: sample_mflix + dataConnectorScalarType: Int + operatorMapping: {} + - dataConnectorName: chinook + dataConnectorScalarType: Int + operatorMapping: {} + - dataConnectorName: test_cases + dataConnectorScalarType: Int + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: IntBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: IntAggExp + operand: + scalar: + aggregatedType: Int + aggregationFunctions: + - name: avg + returnType: Double + - name: count + returnType: Int! + - name: max + returnType: Int + - name: min + returnType: Int + - name: sum + returnType: Long + dataConnectorAggregationFunctionMapping: + - dataConnectorName: sample_mflix + dataConnectorScalarType: Int + functionMapping: + avg: + name: avg + count: + name: count + max: + name: max + min: + name: min + sum: + name: sum + - dataConnectorName: chinook + dataConnectorScalarType: Int + functionMapping: + avg: + name: avg + count: + name: count + max: + name: max + min: + name: min + sum: + name: sum + - dataConnectorName: test_cases + dataConnectorScalarType: Int + functionMapping: + avg: + name: avg + count: + name: count + max: + name: max + min: + name: min + sum: + name: sum + count: + enable: true + countDistinct: + enable: true + graphql: + selectTypeName: IntAggExp diff --git a/fixtures/hasura/app/metadata/types/long.hml b/fixtures/hasura/app/metadata/types/long.hml new file mode 100644 index 00000000..68f08e76 --- /dev/null +++ b/fixtures/hasura/app/metadata/types/long.hml @@ -0,0 +1,145 @@ +--- +kind: ScalarType +version: v1 +definition: + name: Long + graphql: + typeName: Long + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: chinook + dataConnectorScalarType: Long + representation: Long + graphql: + comparisonExpressionTypeName: LongComparisonExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: sample_mflix + dataConnectorScalarType: Long + representation: Long + graphql: + comparisonExpressionTypeName: LongComparisonExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: test_cases + dataConnectorScalarType: Long + representation: Long + graphql: + comparisonExpressionTypeName: LongComparisonExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: LongBoolExp + operand: + scalar: + type: Long + comparisonOperators: + - name: _eq + argumentType: Long! + - name: _gt + argumentType: Long! + - name: _gte + argumentType: Long! + - name: _in + argumentType: "[Long!]!" + - name: _lt + argumentType: Long! + - name: _lte + argumentType: Long! + - name: _neq + argumentType: Long! + - name: _nin + argumentType: "[Long!]!" + dataConnectorOperatorMapping: + - dataConnectorName: sample_mflix + dataConnectorScalarType: Long + operatorMapping: {} + - dataConnectorName: chinook + dataConnectorScalarType: Long + operatorMapping: {} + - dataConnectorName: test_cases + dataConnectorScalarType: Long + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: LongBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: LongAggExp + operand: + scalar: + aggregatedType: Long + aggregationFunctions: + - name: avg + returnType: Double + - name: count + returnType: Int! + - name: max + returnType: Long + - name: min + returnType: Long + - name: sum + returnType: Long + dataConnectorAggregationFunctionMapping: + - dataConnectorName: sample_mflix + dataConnectorScalarType: Long + functionMapping: + avg: + name: avg + count: + name: count + max: + name: max + min: + name: min + sum: + name: sum + - dataConnectorName: chinook + dataConnectorScalarType: Long + functionMapping: + avg: + name: avg + count: + name: count + max: + name: max + min: + name: min + sum: + name: sum + - dataConnectorName: test_cases + dataConnectorScalarType: Long + functionMapping: + avg: + name: avg + count: + name: count + max: + name: max + min: + name: min + sum: + name: sum + count: + enable: true + countDistinct: + enable: true + graphql: + selectTypeName: LongAggExp diff --git a/fixtures/hasura/app/metadata/types/objectId.hml b/fixtures/hasura/app/metadata/types/objectId.hml new file mode 100644 index 00000000..80647c95 --- /dev/null +++ b/fixtures/hasura/app/metadata/types/objectId.hml @@ -0,0 +1,104 @@ +--- +kind: ScalarType +version: v1 +definition: + name: ObjectId + graphql: + typeName: ObjectId + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: chinook + dataConnectorScalarType: ObjectId + representation: ObjectId + graphql: + comparisonExpressionTypeName: ObjectIdComparisonExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: sample_mflix + dataConnectorScalarType: ObjectId + representation: ObjectId + graphql: + comparisonExpressionTypeName: ObjectIdComparisonExp + +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: test_cases + dataConnectorScalarType: ObjectId + representation: ObjectId + graphql: + comparisonExpressionTypeName: ObjectIdComparisonExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: ObjectIdBoolExp + operand: + scalar: + type: ObjectId + comparisonOperators: + - name: _eq + argumentType: ObjectId! + - name: _in + argumentType: "[ObjectId!]!" + - name: _neq + argumentType: ObjectId! + - name: _nin + argumentType: "[ObjectId!]!" + dataConnectorOperatorMapping: + - dataConnectorName: chinook + dataConnectorScalarType: ObjectId + operatorMapping: {} + - dataConnectorName: sample_mflix + dataConnectorScalarType: ObjectId + operatorMapping: {} + - dataConnectorName: test_cases + dataConnectorScalarType: ObjectId + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: ObjectIdBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: ObjectIdAggExp + operand: + scalar: + aggregatedType: ObjectId + aggregationFunctions: + - name: count + returnType: Int! + dataConnectorAggregationFunctionMapping: + - dataConnectorName: chinook + dataConnectorScalarType: ObjectId + functionMapping: + count: + name: count + - dataConnectorName: sample_mflix + dataConnectorScalarType: ObjectId + functionMapping: + count: + name: count + - dataConnectorName: test_cases + dataConnectorScalarType: ObjectId + functionMapping: + count: + name: count + count: + enable: true + countDistinct: + enable: true + graphql: + selectTypeName: ObjectIdAggExp diff --git a/fixtures/hasura/app/metadata/types/string.hml b/fixtures/hasura/app/metadata/types/string.hml new file mode 100644 index 00000000..54d1047e --- /dev/null +++ b/fixtures/hasura/app/metadata/types/string.hml @@ -0,0 +1,125 @@ +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: chinook + dataConnectorScalarType: String + representation: String + graphql: + comparisonExpressionTypeName: StringComparisonExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: sample_mflix + dataConnectorScalarType: String + representation: String + graphql: + comparisonExpressionTypeName: StringComparisonExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: test_cases + dataConnectorScalarType: String + representation: String + graphql: + comparisonExpressionTypeName: StringComparisonExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: StringBoolExp + operand: + scalar: + type: String + comparisonOperators: + - name: _eq + argumentType: String! + - name: _gt + argumentType: String! + - name: _gte + argumentType: String! + - name: _in + argumentType: "[String!]!" + - name: _iregex + argumentType: String! + - name: _lt + argumentType: String! + - name: _lte + argumentType: String! + - name: _neq + argumentType: String! + - name: _nin + argumentType: "[String!]!" + - name: _regex + argumentType: String! + dataConnectorOperatorMapping: + - dataConnectorName: sample_mflix + dataConnectorScalarType: String + operatorMapping: {} + - dataConnectorName: chinook + dataConnectorScalarType: String + operatorMapping: {} + - dataConnectorName: test_cases + dataConnectorScalarType: String + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: StringBoolExp + +--- +kind: AggregateExpression +version: v1 +definition: + name: StringAggExp + operand: + scalar: + aggregatedType: String + aggregationFunctions: + - name: count + returnType: Int! + - name: max + returnType: String + - name: min + returnType: String + dataConnectorAggregationFunctionMapping: + - dataConnectorName: sample_mflix + dataConnectorScalarType: String + functionMapping: + count: + name: count + max: + name: max + min: + name: min + - dataConnectorName: chinook + dataConnectorScalarType: String + functionMapping: + count: + name: count + max: + name: max + min: + name: min + - dataConnectorName: test_cases + dataConnectorScalarType: String + functionMapping: + count: + name: count + max: + name: max + min: + name: min + count: + enable: true + countDistinct: + enable: true + graphql: + selectTypeName: StringAggExp diff --git a/fixtures/hasura/app/subgraph.yaml b/fixtures/hasura/app/subgraph.yaml new file mode 100644 index 00000000..a194ab54 --- /dev/null +++ b/fixtures/hasura/app/subgraph.yaml @@ -0,0 +1,29 @@ +kind: Subgraph +version: v2 +definition: + name: app + generator: + rootPath: . + namingConvention: graphql + includePaths: + - metadata + envMapping: + APP_CHINOOK_READ_URL: + fromEnv: APP_CHINOOK_READ_URL + APP_CHINOOK_WRITE_URL: + fromEnv: APP_CHINOOK_WRITE_URL + APP_SAMPLE_MFLIX_READ_URL: + fromEnv: APP_SAMPLE_MFLIX_READ_URL + APP_SAMPLE_MFLIX_WRITE_URL: + fromEnv: APP_SAMPLE_MFLIX_WRITE_URL + APP_TEST_CASES_READ_URL: + fromEnv: APP_TEST_CASES_READ_URL + APP_TEST_CASES_WRITE_URL: + fromEnv: APP_TEST_CASES_WRITE_URL + connectors: + - path: connector/sample_mflix/connector.yaml + connectorLinkName: sample_mflix + - path: connector/chinook/connector.yaml + connectorLinkName: chinook + - path: connector/test_cases/connector.yaml + connectorLinkName: test_cases diff --git a/fixtures/hasura/chinook/.env.chinook b/fixtures/hasura/chinook/.env.chinook deleted file mode 100644 index b52c724f..00000000 --- a/fixtures/hasura/chinook/.env.chinook +++ /dev/null @@ -1 +0,0 @@ -CHINOOK_CONNECTOR_URL='http://localhost:7131' diff --git a/fixtures/hasura/chinook/connector/chinook/.ddnignore b/fixtures/hasura/chinook/connector/chinook/.ddnignore deleted file mode 100644 index 4c49bd78..00000000 --- a/fixtures/hasura/chinook/connector/chinook/.ddnignore +++ /dev/null @@ -1 +0,0 @@ -.env diff --git a/fixtures/hasura/chinook/connector/chinook/.env b/fixtures/hasura/chinook/connector/chinook/.env deleted file mode 100644 index ee57a147..00000000 --- a/fixtures/hasura/chinook/connector/chinook/.env +++ /dev/null @@ -1 +0,0 @@ -MONGODB_DATABASE_URI="mongodb://localhost/chinook" diff --git a/fixtures/hasura/chinook/connector/chinook/connector.yaml b/fixtures/hasura/chinook/connector/chinook/connector.yaml deleted file mode 100644 index 078bf6e8..00000000 --- a/fixtures/hasura/chinook/connector/chinook/connector.yaml +++ /dev/null @@ -1,8 +0,0 @@ -kind: Connector -version: v1 -definition: - name: chinook - subgraph: chinook - source: hasura/mongodb:v0.1.0 - context: . - envFile: .env diff --git a/fixtures/hasura/common/metadata/scalar-types/Date.hml b/fixtures/hasura/common/metadata/scalar-types/Date.hml deleted file mode 100644 index 62085c8c..00000000 --- a/fixtures/hasura/common/metadata/scalar-types/Date.hml +++ /dev/null @@ -1,100 +0,0 @@ ---- -kind: ScalarType -version: v1 -definition: - name: Date - graphql: - typeName: Date - ---- -kind: DataConnectorScalarRepresentation -version: v1 -definition: - dataConnectorName: chinook - dataConnectorScalarType: Date - representation: Date - ---- -kind: DataConnectorScalarRepresentation -version: v1 -definition: - dataConnectorName: sample_mflix - dataConnectorScalarType: Date - representation: Date - ---- -kind: BooleanExpressionType -version: v1 -definition: - name: DateComparisonExp - operand: - scalar: - type: Date - comparisonOperators: - - name: _eq - argumentType: Date - - name: _neq - argumentType: Date - - name: _gt - argumentType: Date - - name: _gte - argumentType: Date - - name: _lt - argumentType: Date - - name: _lte - argumentType: Date - dataConnectorOperatorMapping: - - dataConnectorName: chinook - dataConnectorScalarType: Date - operatorMapping: - _eq: _eq - _neq: _neq - _gt: _gt - _gte: _gte - _lt: _lt - _lte: _lte - - dataConnectorName: sample_mflix - dataConnectorScalarType: Date - operatorMapping: - _eq: _eq - _neq: _neq - _gt: _gt - _gte: _gte - _lt: _lt - _lte: _lte - logicalOperators: - enable: true - isNull: - enable: true - graphql: - typeName: DateComparisonExp - ---- -kind: AggregateExpression -version: v1 -definition: - name: DateAggregateExp - operand: - scalar: - aggregatedType: Date - aggregationFunctions: - - name: _max - returnType: Date - - name: _min - returnType: Date - dataConnectorAggregationFunctionMapping: - - dataConnectorName: chinook - dataConnectorScalarType: Date - functionMapping: - _max: { name: max } - _min: { name: min } - - dataConnectorName: sample_mflix - dataConnectorScalarType: Date - functionMapping: - _max: { name: max } - _min: { name: min } - count: { enable: true } - countDistinct: { enable: true } - graphql: - selectTypeName: DateAggregateExp - diff --git a/fixtures/hasura/common/metadata/scalar-types/Decimal.hml b/fixtures/hasura/common/metadata/scalar-types/Decimal.hml deleted file mode 100644 index 1b1eb061..00000000 --- a/fixtures/hasura/common/metadata/scalar-types/Decimal.hml +++ /dev/null @@ -1,107 +0,0 @@ ---- -kind: ScalarType -version: v1 -definition: - name: Decimal - graphql: - typeName: Decimal - ---- -kind: DataConnectorScalarRepresentation -version: v1 -definition: - dataConnectorName: chinook - dataConnectorScalarType: Decimal - representation: Decimal - ---- -kind: DataConnectorScalarRepresentation -version: v1 -definition: - dataConnectorName: sample_mflix - dataConnectorScalarType: Decimal - representation: Decimal - ---- -kind: BooleanExpressionType -version: v1 -definition: - name: DecimalComparisonExp - operand: - scalar: - type: Decimal - comparisonOperators: - - name: _eq - argumentType: Decimal - - name: _neq - argumentType: Decimal - - name: _gt - argumentType: Decimal - - name: _gte - argumentType: Decimal - - name: _lt - argumentType: Decimal - - name: _lte - argumentType: Decimal - dataConnectorOperatorMapping: - - dataConnectorName: chinook - dataConnectorScalarType: Decimal - operatorMapping: - _eq: _eq - _neq: _neq - _gt: _gt - _gte: _gte - _lt: _lt - _lte: _lte - - dataConnectorName: sample_mflix - dataConnectorScalarType: Decimal - operatorMapping: - _eq: _eq - _neq: _neq - _gt: _gt - _gte: _gte - _lt: _lt - _lte: _lte - logicalOperators: - enable: true - isNull: - enable: true - graphql: - typeName: DecimalComparisonExp - ---- -kind: AggregateExpression -version: v1 -definition: - name: DecimalAggregateExp - operand: - scalar: - aggregatedType: Decimal - aggregationFunctions: - - name: _avg - returnType: Decimal - - name: _max - returnType: Decimal - - name: _min - returnType: Decimal - - name: _sum - returnType: Decimal - dataConnectorAggregationFunctionMapping: - - dataConnectorName: chinook - dataConnectorScalarType: Decimal - functionMapping: - _avg: { name: avg } - _max: { name: max } - _min: { name: min } - _sum: { name: sum } - - dataConnectorName: sample_mflix - dataConnectorScalarType: Decimal - functionMapping: - _avg: { name: avg } - _max: { name: max } - _min: { name: min } - _sum: { name: sum } - count: { enable: true } - countDistinct: { enable: true } - graphql: - selectTypeName: DecimalAggregateExp diff --git a/fixtures/hasura/common/metadata/scalar-types/Double.hml b/fixtures/hasura/common/metadata/scalar-types/Double.hml deleted file mode 100644 index 7d4af850..00000000 --- a/fixtures/hasura/common/metadata/scalar-types/Double.hml +++ /dev/null @@ -1,99 +0,0 @@ ---- -kind: DataConnectorScalarRepresentation -version: v1 -definition: - dataConnectorName: chinook - dataConnectorScalarType: Double - representation: Float - ---- -kind: DataConnectorScalarRepresentation -version: v1 -definition: - dataConnectorName: sample_mflix - dataConnectorScalarType: Double - representation: Float - ---- -kind: BooleanExpressionType -version: v1 -definition: - name: FloatComparisonExp - operand: - scalar: - type: Float - comparisonOperators: - - name: _eq - argumentType: Float - - name: _neq - argumentType: Float - - name: _gt - argumentType: Float - - name: _gte - argumentType: Float - - name: _lt - argumentType: Float - - name: _lte - argumentType: Float - dataConnectorOperatorMapping: - - dataConnectorName: chinook - dataConnectorScalarType: Double - operatorMapping: - _eq: _eq - _neq: _neq - _gt: _gt - _gte: _gte - _lt: _lt - _lte: _lte - - dataConnectorName: sample_mflix - dataConnectorScalarType: Double - operatorMapping: - _eq: _eq - _neq: _neq - _gt: _gt - _gte: _gte - _lt: _lt - _lte: _lte - logicalOperators: - enable: true - isNull: - enable: true - graphql: - typeName: DoubleComparisonExp - ---- -kind: AggregateExpression -version: v1 -definition: - name: FloatAggregateExp - operand: - scalar: - aggregatedType: Float - aggregationFunctions: - - name: _avg - returnType: Float - - name: _max - returnType: Float - - name: _min - returnType: Float - - name: _sum - returnType: Float - dataConnectorAggregationFunctionMapping: - - dataConnectorName: chinook - dataConnectorScalarType: Double - functionMapping: - _avg: { name: avg } - _max: { name: max } - _min: { name: min } - _sum: { name: sum } - - dataConnectorName: sample_mflix - dataConnectorScalarType: Double - functionMapping: - _avg: { name: avg } - _max: { name: max } - _min: { name: min } - _sum: { name: sum } - count: { enable: true } - countDistinct: { enable: true } - graphql: - selectTypeName: FloatAggregateExp diff --git a/fixtures/hasura/common/metadata/scalar-types/ExtendedJSON.hml b/fixtures/hasura/common/metadata/scalar-types/ExtendedJSON.hml deleted file mode 100644 index 37ced137..00000000 --- a/fixtures/hasura/common/metadata/scalar-types/ExtendedJSON.hml +++ /dev/null @@ -1,23 +0,0 @@ ---- -kind: ScalarType -version: v1 -definition: - name: ExtendedJSON - graphql: - typeName: ExtendedJSON - ---- -kind: DataConnectorScalarRepresentation -version: v1 -definition: - dataConnectorName: chinook - dataConnectorScalarType: ExtendedJSON - representation: ExtendedJSON - ---- -kind: DataConnectorScalarRepresentation -version: v1 -definition: - dataConnectorName: sample_mflix - dataConnectorScalarType: ExtendedJSON - representation: ExtendedJSON diff --git a/fixtures/hasura/common/metadata/scalar-types/Int.hml b/fixtures/hasura/common/metadata/scalar-types/Int.hml deleted file mode 100644 index d5d7b0bd..00000000 --- a/fixtures/hasura/common/metadata/scalar-types/Int.hml +++ /dev/null @@ -1,99 +0,0 @@ ---- -kind: DataConnectorScalarRepresentation -version: v1 -definition: - dataConnectorName: chinook - dataConnectorScalarType: Int - representation: Int - ---- -kind: DataConnectorScalarRepresentation -version: v1 -definition: - dataConnectorName: sample_mflix - dataConnectorScalarType: Int - representation: Int - ---- -kind: BooleanExpressionType -version: v1 -definition: - name: IntComparisonExp - operand: - scalar: - type: Int - comparisonOperators: - - name: _eq - argumentType: Int - - name: _neq - argumentType: Int - - name: _gt - argumentType: Int - - name: _gte - argumentType: Int - - name: _lt - argumentType: Int - - name: _lte - argumentType: Int - dataConnectorOperatorMapping: - - dataConnectorName: chinook - dataConnectorScalarType: Int - operatorMapping: - _eq: _eq - _neq: _neq - _gt: _gt - _gte: _gte - _lt: _lt - _lte: _lte - - dataConnectorName: sample_mflix - dataConnectorScalarType: Int - operatorMapping: - _eq: _eq - _neq: _neq - _gt: _gt - _gte: _gte - _lt: _lt - _lte: _lte - logicalOperators: - enable: true - isNull: - enable: true - graphql: - typeName: IntComparisonExp - ---- -kind: AggregateExpression -version: v1 -definition: - name: IntAggregateExp - operand: - scalar: - aggregatedType: Int - aggregationFunctions: - - name: _avg - returnType: Int - - name: _max - returnType: Int - - name: _min - returnType: Int - - name: _sum - returnType: Int - dataConnectorAggregationFunctionMapping: - - dataConnectorName: chinook - dataConnectorScalarType: Int - functionMapping: - _avg: { name: avg } - _max: { name: max } - _min: { name: min } - _sum: { name: sum } - - dataConnectorName: sample_mflix - dataConnectorScalarType: Int - functionMapping: - _avg: { name: avg } - _max: { name: max } - _min: { name: min } - _sum: { name: sum } - count: { enable: true } - countDistinct: { enable: true } - graphql: - selectTypeName: IntAggregateExp diff --git a/fixtures/hasura/common/metadata/scalar-types/ObjectId.hml b/fixtures/hasura/common/metadata/scalar-types/ObjectId.hml deleted file mode 100644 index d89d0ca8..00000000 --- a/fixtures/hasura/common/metadata/scalar-types/ObjectId.hml +++ /dev/null @@ -1,54 +0,0 @@ ---- -kind: ScalarType -version: v1 -definition: - name: ObjectId - graphql: - typeName: ObjectId - ---- -kind: DataConnectorScalarRepresentation -version: v1 -definition: - dataConnectorName: chinook - dataConnectorScalarType: ObjectId - representation: ObjectId - ---- -kind: DataConnectorScalarRepresentation -version: v1 -definition: - dataConnectorName: sample_mflix - dataConnectorScalarType: ObjectId - representation: ObjectId - ---- -kind: BooleanExpressionType -version: v1 -definition: - name: ObjectIdComparisonExp - operand: - scalar: - type: ObjectId - comparisonOperators: - - name: _eq - argumentType: ObjectId - - name: _neq - argumentType: ObjectId - dataConnectorOperatorMapping: - - dataConnectorName: chinook - dataConnectorScalarType: ObjectId - operatorMapping: - _eq: _eq - _neq: _neq - - dataConnectorName: sample_mflix - dataConnectorScalarType: ObjectId - operatorMapping: - _eq: _eq - _neq: _neq - logicalOperators: - enable: true - isNull: - enable: true - graphql: - typeName: ObjectIdComparisonExp diff --git a/fixtures/hasura/common/metadata/scalar-types/String.hml b/fixtures/hasura/common/metadata/scalar-types/String.hml deleted file mode 100644 index fb03feb4..00000000 --- a/fixtures/hasura/common/metadata/scalar-types/String.hml +++ /dev/null @@ -1,70 +0,0 @@ ---- -kind: DataConnectorScalarRepresentation -version: v1 -definition: - dataConnectorName: chinook - dataConnectorScalarType: String - representation: String - ---- -kind: DataConnectorScalarRepresentation -version: v1 -definition: - dataConnectorName: sample_mflix - dataConnectorScalarType: String - representation: String - ---- -kind: BooleanExpressionType -version: v1 -definition: - name: StringComparisonExp - operand: - scalar: - type: String - comparisonOperators: - - name: _eq - argumentType: String - - name: _neq - argumentType: String - - name: _gt - argumentType: String - - name: _gte - argumentType: String - - name: _lt - argumentType: String - - name: _lte - argumentType: String - - name: _regex - argumentType: String - - name: _iregex - argumentType: String - dataConnectorOperatorMapping: - - dataConnectorName: chinook - dataConnectorScalarType: String - operatorMapping: - _eq: _eq - _neq: _neq - _gt: _gt - _gte: _gte - _lt: _lt - _lte: _lte - _regex: _regex - _iregex: _iregex - - dataConnectorName: sample_mflix - dataConnectorScalarType: String - operatorMapping: - _eq: _eq - _neq: _neq - _gt: _gt - _gte: _gte - _lt: _lt - _lte: _lte - _regex: _regex - _iregex: _iregex - logicalOperators: - enable: true - isNull: - enable: true - graphql: - typeName: StringComparisonExp diff --git a/fixtures/hasura/compose.yaml b/fixtures/hasura/compose.yaml new file mode 100644 index 00000000..443d0742 --- /dev/null +++ b/fixtures/hasura/compose.yaml @@ -0,0 +1,41 @@ +include: + - path: app/connector/sample_mflix/compose.yaml + - path: app/connector/chinook/compose.yaml + - path: app/connector/test_cases/compose.yaml +services: + engine: + build: + context: engine + dockerfile: Dockerfile.engine + pull: true + environment: + AUTHN_CONFIG_PATH: /md/auth_config.json + ENABLE_CORS: "true" + ENABLE_SQL_INTERFACE: "true" + INTROSPECTION_METADATA_FILE: /md/metadata.json + METADATA_PATH: /md/open_dd.json + OTLP_ENDPOINT: http://local.hasura.dev:4317 + extra_hosts: + - local.hasura.dev:host-gateway + labels: + io.hasura.ddn.service-name: engine + ports: + - 3280:3000 + mongodb: + container_name: mongodb + image: mongo:latest + ports: + - 27017:27017 + volumes: + - ../mongodb:/docker-entrypoint-initdb.d:ro + otel-collector: + command: + - --config=/etc/otel-collector-config.yaml + environment: + HASURA_DDN_PAT: ${HASURA_DDN_PAT} + image: otel/opentelemetry-collector:0.104.0 + ports: + - 4317:4317 + - 4318:4318 + volumes: + - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml diff --git a/fixtures/hasura/engine/.env.engine b/fixtures/hasura/engine/.env.engine deleted file mode 100644 index 14d6bfc3..00000000 --- a/fixtures/hasura/engine/.env.engine +++ /dev/null @@ -1,5 +0,0 @@ -METADATA_PATH=/md/open_dd.json -AUTHN_CONFIG_PATH=/md/auth_config.json -INTROSPECTION_METADATA_FILE=/md/metadata.json -OTLP_ENDPOINT=http://local.hasura.dev:4317 -ENABLE_CORS=true diff --git a/fixtures/hasura/engine/Dockerfile.engine b/fixtures/hasura/engine/Dockerfile.engine new file mode 100644 index 00000000..3613f0ec --- /dev/null +++ b/fixtures/hasura/engine/Dockerfile.engine @@ -0,0 +1,2 @@ +FROM ghcr.io/hasura/v3-engine +COPY ./build /md/ \ No newline at end of file diff --git a/fixtures/hasura/engine/auth_config.json b/fixtures/hasura/engine/auth_config.json deleted file mode 100644 index 8a73e5b4..00000000 --- a/fixtures/hasura/engine/auth_config.json +++ /dev/null @@ -1 +0,0 @@ -{"version":"v1","definition":{"allowRoleEmulationBy":"admin","mode":{"webhook":{"url":"http://auth_hook:3050/validate-request","method":"Post"}}}} \ No newline at end of file diff --git a/fixtures/hasura/engine/metadata.json b/fixtures/hasura/engine/metadata.json deleted file mode 100644 index 84b41230..00000000 --- a/fixtures/hasura/engine/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"subgraphs":[{"name":"globals","objects":[{"definition":{"apolloFederation":null,"mutation":{"rootOperationTypeName":"Mutation"},"query":{"aggregate":null,"argumentsInput":{"fieldName":"args"},"filterInput":{"fieldName":"where","operatorNames":{"and":"_and","isNull":"_is_null","not":"_not","or":"_or"}},"limitInput":{"fieldName":"limit"},"offsetInput":{"fieldName":"offset"},"orderByInput":{"enumDirectionValues":{"asc":"Asc","desc":"Desc"},"enumTypeNames":[{"directions":["Asc","Desc"],"typeName":"OrderBy"}],"fieldName":"order_by"},"rootOperationTypeName":"Query"}},"kind":"GraphqlConfig","version":"v1"},{"definition":{"allowRoleEmulationBy":"admin","mode":{"webhook":{"method":"Post","url":"http://auth_hook:3050/validate-request"}}},"kind":"AuthConfig","version":"v1"},{"date":"2024-07-09","kind":"CompatibilityConfig"}]}],"version":"v2"} \ No newline at end of file diff --git a/fixtures/hasura/engine/open_dd.json b/fixtures/hasura/engine/open_dd.json deleted file mode 100644 index 508184df..00000000 --- a/fixtures/hasura/engine/open_dd.json +++ /dev/null @@ -1 +0,0 @@ -{"version":"v3","subgraphs":[{"name":"globals","objects":[{"kind":"GraphqlConfig","version":"v1","definition":{"query":{"rootOperationTypeName":"Query","argumentsInput":{"fieldName":"args"},"limitInput":{"fieldName":"limit"},"offsetInput":{"fieldName":"offset"},"filterInput":{"fieldName":"where","operatorNames":{"and":"_and","or":"_or","not":"_not","isNull":"_is_null"}},"orderByInput":{"fieldName":"order_by","enumDirectionValues":{"asc":"Asc","desc":"Desc"},"enumTypeNames":[{"directions":["Asc","Desc"],"typeName":"OrderBy"}]},"aggregate":null},"mutation":{"rootOperationTypeName":"Mutation"},"apolloFederation":null}}]}],"flags":{"require_graphql_config":true}} \ No newline at end of file diff --git a/fixtures/hasura/globals/.env.globals.local b/fixtures/hasura/globals/.env.globals.local deleted file mode 100644 index e69de29b..00000000 diff --git a/fixtures/hasura/globals/auth-config.cloud.hml b/fixtures/hasura/globals/auth-config.cloud.hml deleted file mode 100644 index 1080ecc3..00000000 --- a/fixtures/hasura/globals/auth-config.cloud.hml +++ /dev/null @@ -1,8 +0,0 @@ -kind: AuthConfig -version: v1 -definition: - allowRoleEmulationBy: admin - mode: - webhook: - url: http://auth-hook.default:8080/webhook/ddn?role=admin - method: Post diff --git a/fixtures/hasura/globals/auth-config.local.hml b/fixtures/hasura/globals/auth-config.local.hml deleted file mode 100644 index 367e5064..00000000 --- a/fixtures/hasura/globals/auth-config.local.hml +++ /dev/null @@ -1,8 +0,0 @@ -kind: AuthConfig -version: v1 -definition: - allowRoleEmulationBy: admin - mode: - webhook: - url: http://auth_hook:3050/validate-request - method: Post diff --git a/fixtures/hasura/globals/metadata/auth-config.hml b/fixtures/hasura/globals/metadata/auth-config.hml new file mode 100644 index 00000000..54c0b84b --- /dev/null +++ b/fixtures/hasura/globals/metadata/auth-config.hml @@ -0,0 +1,7 @@ +kind: AuthConfig +version: v2 +definition: + mode: + noAuth: + role: admin + sessionVariables: {} diff --git a/fixtures/hasura/globals/compatibility-config.hml b/fixtures/hasura/globals/metadata/compatibility-config.hml similarity index 57% rename from fixtures/hasura/globals/compatibility-config.hml rename to fixtures/hasura/globals/metadata/compatibility-config.hml index 80856ac1..ca10adf3 100644 --- a/fixtures/hasura/globals/compatibility-config.hml +++ b/fixtures/hasura/globals/metadata/compatibility-config.hml @@ -1,2 +1,2 @@ kind: CompatibilityConfig -date: "2024-07-09" +date: "2024-11-26" diff --git a/fixtures/hasura/globals/graphql-config.hml b/fixtures/hasura/globals/metadata/graphql-config.hml similarity index 76% rename from fixtures/hasura/globals/graphql-config.hml rename to fixtures/hasura/globals/metadata/graphql-config.hml index d5b9d9f6..f54210cf 100644 --- a/fixtures/hasura/globals/graphql-config.hml +++ b/fixtures/hasura/globals/metadata/graphql-config.hml @@ -26,5 +26,11 @@ definition: - Asc - Desc typeName: OrderBy + aggregate: + filterInputFieldName: filter_input + countFieldName: _count + countDistinctFieldName: _count_distinct mutation: rootOperationTypeName: Mutation + subscription: + rootOperationTypeName: Subscription diff --git a/fixtures/hasura/globals/subgraph.cloud.yaml b/fixtures/hasura/globals/subgraph.cloud.yaml deleted file mode 100644 index dea2c3d4..00000000 --- a/fixtures/hasura/globals/subgraph.cloud.yaml +++ /dev/null @@ -1,11 +0,0 @@ -kind: Subgraph -version: v1 -definition: - generator: - rootPath: . - envFile: .env.globals.cloud - includePaths: - - auth-config.cloud.hml - - compatibility-config.hml - - graphql-config.hml - name: globals diff --git a/fixtures/hasura/globals/subgraph.local.yaml b/fixtures/hasura/globals/subgraph.local.yaml deleted file mode 100644 index d5e4d000..00000000 --- a/fixtures/hasura/globals/subgraph.local.yaml +++ /dev/null @@ -1,11 +0,0 @@ -kind: Subgraph -version: v1 -definition: - generator: - rootPath: . - envFile: .env.globals.local - includePaths: - - auth-config.local.hml - - compatibility-config.hml - - graphql-config.hml - name: globals diff --git a/fixtures/hasura/chinook/subgraph.yaml b/fixtures/hasura/globals/subgraph.yaml similarity index 75% rename from fixtures/hasura/chinook/subgraph.yaml rename to fixtures/hasura/globals/subgraph.yaml index fef4fcb2..b21faca2 100644 --- a/fixtures/hasura/chinook/subgraph.yaml +++ b/fixtures/hasura/globals/subgraph.yaml @@ -1,8 +1,8 @@ kind: Subgraph -version: v1 +version: v2 definition: + name: globals generator: rootPath: . includePaths: - metadata - name: chinook diff --git a/fixtures/hasura/hasura.yaml b/fixtures/hasura/hasura.yaml index b4d4e478..7f8f5cc6 100644 --- a/fixtures/hasura/hasura.yaml +++ b/fixtures/hasura/hasura.yaml @@ -1 +1 @@ -version: v2 +version: v3 diff --git a/fixtures/hasura/otel-collector-config.yaml b/fixtures/hasura/otel-collector-config.yaml new file mode 100644 index 00000000..2af072db --- /dev/null +++ b/fixtures/hasura/otel-collector-config.yaml @@ -0,0 +1,23 @@ +exporters: + otlp: + endpoint: https://gateway.otlp.hasura.io:443 + headers: + Authorization: pat ${env:HASURA_DDN_PAT} +processors: + batch: {} +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 +service: + pipelines: + traces: + exporters: + - otlp + processors: + - batch + receivers: + - otlp diff --git a/fixtures/hasura/sample_mflix/.env.sample_mflix b/fixtures/hasura/sample_mflix/.env.sample_mflix deleted file mode 100644 index e003fd5a..00000000 --- a/fixtures/hasura/sample_mflix/.env.sample_mflix +++ /dev/null @@ -1 +0,0 @@ -SAMPLE_MFLIX_CONNECTOR_URL='http://localhost:7130' diff --git a/fixtures/hasura/sample_mflix/connector/sample_mflix/.ddnignore b/fixtures/hasura/sample_mflix/connector/sample_mflix/.ddnignore deleted file mode 100644 index 4c49bd78..00000000 --- a/fixtures/hasura/sample_mflix/connector/sample_mflix/.ddnignore +++ /dev/null @@ -1 +0,0 @@ -.env diff --git a/fixtures/hasura/sample_mflix/connector/sample_mflix/.env b/fixtures/hasura/sample_mflix/connector/sample_mflix/.env deleted file mode 100644 index fea5fc4a..00000000 --- a/fixtures/hasura/sample_mflix/connector/sample_mflix/.env +++ /dev/null @@ -1 +0,0 @@ -MONGODB_DATABASE_URI="mongodb://localhost/sample_mflix" diff --git a/fixtures/hasura/sample_mflix/connector/sample_mflix/connector.yaml b/fixtures/hasura/sample_mflix/connector/sample_mflix/connector.yaml deleted file mode 100644 index 052dfcd6..00000000 --- a/fixtures/hasura/sample_mflix/connector/sample_mflix/connector.yaml +++ /dev/null @@ -1,8 +0,0 @@ -kind: Connector -version: v1 -definition: - name: sample_mflix - subgraph: sample_mflix - source: hasura/mongodb:v0.1.0 - context: . - envFile: .env diff --git a/fixtures/hasura/sample_mflix/connector/sample_mflix/native_queries/title_word_requency.json b/fixtures/hasura/sample_mflix/connector/sample_mflix/native_queries/title_word_requency.json deleted file mode 100644 index b8306b2d..00000000 --- a/fixtures/hasura/sample_mflix/connector/sample_mflix/native_queries/title_word_requency.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "name": "title_word_frequency", - "representation": "collection", - "inputCollection": "movies", - "description": "words appearing in movie titles with counts", - "resultDocumentType": "TitleWordFrequency", - "objectTypes": { - "TitleWordFrequency": { - "fields": { - "_id": { "type": { "scalar": "string" } }, - "count": { "type": { "scalar": "int" } } - } - } - }, - "pipeline": [ - { - "$replaceWith": { - "title_words": { "$split": ["$title", " "] } - } - }, - { "$unwind": { "path": "$title_words" } }, - { - "$group": { - "_id": "$title_words", - "count": { "$count": {} } - } - } - ] -} - diff --git a/fixtures/hasura/sample_mflix/metadata/models/TitleWordFrequency.hml b/fixtures/hasura/sample_mflix/metadata/models/TitleWordFrequency.hml deleted file mode 100644 index 294e8448..00000000 --- a/fixtures/hasura/sample_mflix/metadata/models/TitleWordFrequency.hml +++ /dev/null @@ -1,94 +0,0 @@ ---- -kind: ObjectType -version: v1 -definition: - name: TitleWordFrequency - fields: - - name: word - type: String! - - name: count - type: Int! - graphql: - typeName: TitleWordFrequency - inputTypeName: TitleWordFrequencyInput - dataConnectorTypeMapping: - - dataConnectorName: sample_mflix - dataConnectorObjectType: TitleWordFrequency - fieldMapping: - word: - column: - name: _id - count: - column: - name: count - ---- -kind: TypePermissions -version: v1 -definition: - typeName: TitleWordFrequency - permissions: - - role: admin - output: - allowedFields: - - word - - count - ---- -kind: BooleanExpressionType -version: v1 -definition: - name: TitleWordFrequencyComparisonExp - operand: - object: - type: TitleWordFrequency - comparableFields: - - fieldName: word - booleanExpressionType: StringComparisonExp - - fieldName: count - booleanExpressionType: IntComparisonExp - comparableRelationships: [] - logicalOperators: - enable: true - isNull: - enable: true - graphql: - typeName: TitleWordFrequencyComparisonExp - ---- -kind: Model -version: v1 -definition: - name: TitleWordFrequency - objectType: TitleWordFrequency - source: - dataConnectorName: sample_mflix - collection: title_word_frequency - filterExpressionType: TitleWordFrequencyComparisonExp - orderableFields: - - fieldName: word - orderByDirections: - enableAll: true - - fieldName: count - orderByDirections: - enableAll: true - graphql: - selectMany: - queryRootField: title_word_frequencies - selectUniques: - - queryRootField: title_word_frequency - uniqueIdentifier: - - word - orderByExpressionType: TitleWordFrequencyOrderBy - description: words appearing in movie titles with counts - ---- -kind: ModelPermissions -version: v1 -definition: - modelName: TitleWordFrequency - permissions: - - role: admin - select: - filter: null - diff --git a/fixtures/hasura/sample_mflix/subgraph.yaml b/fixtures/hasura/sample_mflix/subgraph.yaml deleted file mode 100644 index 6b571d44..00000000 --- a/fixtures/hasura/sample_mflix/subgraph.yaml +++ /dev/null @@ -1,8 +0,0 @@ -kind: Subgraph -version: v1 -definition: - generator: - rootPath: . - includePaths: - - metadata - name: sample_mflix diff --git a/fixtures/hasura/supergraph.yaml b/fixtures/hasura/supergraph.yaml index 94840e70..0d9260e6 100644 --- a/fixtures/hasura/supergraph.yaml +++ b/fixtures/hasura/supergraph.yaml @@ -2,6 +2,5 @@ kind: Supergraph version: v2 definition: subgraphs: - - globals/subgraph.local.yaml - - chinook/subgraph.local.yaml - - sample_mflix/subgraph.local.yaml + - globals/subgraph.yaml + - app/subgraph.yaml diff --git a/fixtures/mongodb/sample_claims/import.sh b/fixtures/mongodb/sample_claims/import.sh new file mode 100755 index 00000000..f9b5e25c --- /dev/null +++ b/fixtures/mongodb/sample_claims/import.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +set -euo pipefail + +# Get the directory of this script file +FIXTURES=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +# In v6 and later the bundled MongoDB client shell is called "mongosh". In +# earlier versions it's called "mongo". +MONGO_SH=mongosh +if ! command -v mongosh &> /dev/null; then + MONGO_SH=mongo +fi + +echo "📡 Importing claims sample data..." +mongoimport --db sample_claims --collection companies --type csv --headerline --file "$FIXTURES"/companies.csv +mongoimport --db sample_claims --collection carriers --type csv --headerline --file "$FIXTURES"/carriers.csv +mongoimport --db sample_claims --collection account_groups --type csv --headerline --file "$FIXTURES"/account_groups.csv +mongoimport --db sample_claims --collection claims --type csv --headerline --file "$FIXTURES"/claims.csv +$MONGO_SH sample_claims "$FIXTURES"/view_flat.js +$MONGO_SH sample_claims "$FIXTURES"/view_nested.js +echo "✅ Sample claims data imported..." diff --git a/fixtures/mongodb/sample_import.sh b/fixtures/mongodb/sample_import.sh index 21340366..1a9f8b9f 100755 --- a/fixtures/mongodb/sample_import.sh +++ b/fixtures/mongodb/sample_import.sh @@ -8,32 +8,7 @@ set -euo pipefail # Get the directory of this script file FIXTURES=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) -# In v6 and later the bundled MongoDB client shell is called "mongosh". In -# earlier versions it's called "mongo". -MONGO_SH=mongosh -if ! command -v mongosh &> /dev/null; then - MONGO_SH=mongo -fi - -# Sample Claims Data -echo "📡 Importing claims sample data..." -mongoimport --db sample_claims --collection companies --type csv --headerline --file "$FIXTURES"/sample_claims/companies.csv -mongoimport --db sample_claims --collection carriers --type csv --headerline --file "$FIXTURES"/sample_claims/carriers.csv -mongoimport --db sample_claims --collection account_groups --type csv --headerline --file "$FIXTURES"/sample_claims/account_groups.csv -mongoimport --db sample_claims --collection claims --type csv --headerline --file "$FIXTURES"/sample_claims/claims.csv -$MONGO_SH sample_claims "$FIXTURES"/sample_claims/view_flat.js -$MONGO_SH sample_claims "$FIXTURES"/sample_claims/view_nested.js -echo "✅ Sample claims data imported..." - -# mongo_flix -echo "📡 Importing mflix sample data..." -mongoimport --db sample_mflix --collection comments --file "$FIXTURES"/sample_mflix/comments.json -mongoimport --db sample_mflix --collection movies --file "$FIXTURES"/sample_mflix/movies.json -mongoimport --db sample_mflix --collection sessions --file "$FIXTURES"/sample_mflix/sessions.json -mongoimport --db sample_mflix --collection theaters --file "$FIXTURES"/sample_mflix/theaters.json -mongoimport --db sample_mflix --collection users --file "$FIXTURES"/sample_mflix/users.json -$MONGO_SH sample_mflix "$FIXTURES/sample_mflix/indexes.js" -echo "✅ Mflix sample data imported..." - -# chinook +"$FIXTURES"/sample_claims/import.sh +"$FIXTURES"/sample_mflix/import.sh "$FIXTURES"/chinook/chinook-import.sh +"$FIXTURES"/test_cases/import.sh diff --git a/fixtures/mongodb/sample_mflix/import.sh b/fixtures/mongodb/sample_mflix/import.sh new file mode 100755 index 00000000..d1329dae --- /dev/null +++ b/fixtures/mongodb/sample_mflix/import.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +set -euo pipefail + +# Get the directory of this script file +FIXTURES=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +# In v6 and later the bundled MongoDB client shell is called "mongosh". In +# earlier versions it's called "mongo". +MONGO_SH=mongosh +if ! command -v mongosh &> /dev/null; then + MONGO_SH=mongo +fi + +echo "📡 Importing mflix sample data..." +mongoimport --db sample_mflix --collection comments --file "$FIXTURES"/comments.json +mongoimport --db sample_mflix --collection movies --file "$FIXTURES"/movies.json +mongoimport --db sample_mflix --collection sessions --file "$FIXTURES"/sessions.json +mongoimport --db sample_mflix --collection theaters --file "$FIXTURES"/theaters.json +mongoimport --db sample_mflix --collection users --file "$FIXTURES"/users.json +$MONGO_SH sample_mflix "$FIXTURES/indexes.js" +echo "✅ Mflix sample data imported..." diff --git a/fixtures/mongodb/sample_mflix/movies.json b/fixtures/mongodb/sample_mflix/movies.json index c957d784..3cf5fd14 100644 --- a/fixtures/mongodb/sample_mflix/movies.json +++ b/fixtures/mongodb/sample_mflix/movies.json @@ -1,7 +1,7 @@ {"_id":{"$oid":"573a1390f29313caabcd4135"},"plot":"Three men hammer on an anvil and pass a bottle of beer around.","genres":["Short"],"runtime":{"$numberInt":"1"},"cast":["Charles Kayser","John Ott"],"num_mflix_comments":{"$numberInt":"1"},"title":"Blacksmith Scene","fullplot":"A stationary camera looks at a large anvil with a blacksmith behind it and one on either side. The smith in the middle draws a heated metal rod from the fire, places it on the anvil, and all three begin a rhythmic hammering. After several blows, the metal goes back in the fire. One smith pulls out a bottle of beer, and they each take a swig. Then, out comes the glowing metal and the hammering resumes.","countries":["USA"],"released":{"$date":{"$numberLong":"-2418768000000"}},"directors":["William K.L. Dickson"],"rated":"UNRATED","awards":{"wins":{"$numberInt":"1"},"nominations":{"$numberInt":"0"},"text":"1 win."},"lastupdated":"2015-08-26 00:03:50.133000000","year":{"$numberInt":"1893"},"imdb":{"rating":{"$numberDouble":"6.2"},"votes":{"$numberInt":"1189"},"id":{"$numberInt":"5"}},"type":"movie","tomatoes":{"viewer":{"rating":{"$numberInt":"3"},"numReviews":{"$numberInt":"184"},"meter":{"$numberInt":"32"}},"lastUpdated":{"$date":{"$numberLong":"1435516449000"}}}} {"_id":{"$oid":"573a1390f29313caabcd42e8"},"plot":"A group of bandits stage a brazen train hold-up, only to find a determined posse hot on their heels.","genres":["Short","Western"],"runtime":{"$numberInt":"11"},"cast":["A.C. Abadie","Gilbert M. 'Broncho Billy' Anderson","George Barnes","Justus D. Barnes"],"poster":"https://m.media-amazon.com/images/M/MV5BMTU3NjE5NzYtYTYyNS00MDVmLWIwYjgtMmYwYWIxZDYyNzU2XkEyXkFqcGdeQXVyNzQzNzQxNzI@._V1_SY1000_SX677_AL_.jpg","title":"The Great Train Robbery","fullplot":"Among the earliest existing films in American cinema - notable as the first film that presented a narrative story to tell - it depicts a group of cowboy outlaws who hold up a train and rob the passengers. They are then pursued by a Sheriff's posse. Several scenes have color included - all hand tinted.","languages":["English"],"released":{"$date":{"$numberLong":"-2085523200000"}},"directors":["Edwin S. Porter"],"rated":"TV-G","awards":{"wins":{"$numberInt":"1"},"nominations":{"$numberInt":"0"},"text":"1 win."},"lastupdated":"2015-08-13 00:27:59.177000000","year":{"$numberInt":"1903"},"imdb":{"rating":{"$numberDouble":"7.4"},"votes":{"$numberInt":"9847"},"id":{"$numberInt":"439"}},"countries":["USA"],"type":"movie","tomatoes":{"viewer":{"rating":{"$numberDouble":"3.7"},"numReviews":{"$numberInt":"2559"},"meter":{"$numberInt":"75"}},"fresh":{"$numberInt":"6"},"critic":{"rating":{"$numberDouble":"7.6"},"numReviews":{"$numberInt":"6"},"meter":{"$numberInt":"100"}},"rotten":{"$numberInt":"0"},"lastUpdated":{"$date":{"$numberLong":"1439061370000"}}}} {"_id":{"$oid":"573a1390f29313caabcd4323"},"plot":"A young boy, opressed by his mother, goes on an outing in the country with a social welfare group where he dares to dream of a land where the cares of his ordinary life fade.","genres":["Short","Drama","Fantasy"],"runtime":{"$numberInt":"14"},"rated":"UNRATED","cast":["Martin Fuller","Mrs. William Bechtel","Walter Edwin","Ethel Jewett"],"num_mflix_comments":{"$numberInt":"2"},"poster":"https://m.media-amazon.com/images/M/MV5BMTMzMDcxMjgyNl5BMl5BanBnXkFtZTcwOTgxNjg4Mg@@._V1_SY1000_SX677_AL_.jpg","title":"The Land Beyond the Sunset","fullplot":"Thanks to the Fresh Air Fund, a slum child escapes his drunken mother for a day's outing in the country. Upon arriving, he and the other children are told a story about a mythical land of no pain. Rather then return to the slum at day's end, the lad seeks to journey to that beautiful land beyond the sunset.","languages":["English"],"released":{"$date":{"$numberLong":"-1804377600000"}},"directors":["Harold M. Shaw"],"writers":["Dorothy G. Shore"],"awards":{"wins":{"$numberInt":"1"},"nominations":{"$numberInt":"0"},"text":"1 win."},"lastupdated":"2015-08-29 00:27:45.437000000","year":{"$numberInt":"1912"},"imdb":{"rating":{"$numberDouble":"7.1"},"votes":{"$numberInt":"448"},"id":{"$numberInt":"488"}},"countries":["USA"],"type":"movie","tomatoes":{"viewer":{"rating":{"$numberDouble":"3.7"},"numReviews":{"$numberInt":"53"},"meter":{"$numberInt":"67"}},"lastUpdated":{"$date":{"$numberLong":"1430161595000"}}}} -{"_id":{"$oid":"573a1390f29313caabcd446f"},"plot":"A greedy tycoon decides, on a whim, to corner the world market in wheat. This doubles the price of bread, forcing the grain's producers into charity lines and further into poverty. The film...","genres":["Short","Drama"],"runtime":{"$numberInt":"14"},"cast":["Frank Powell","Grace Henderson","James Kirkwood","Linda Arvidson"],"num_mflix_comments":{"$numberInt":"1"},"title":"A Corner in Wheat","fullplot":"A greedy tycoon decides, on a whim, to corner the world market in wheat. This doubles the price of bread, forcing the grain's producers into charity lines and further into poverty. The film continues to contrast the ironic differences between the lives of those who work to grow the wheat and the life of the man who dabbles in its sale for profit.","languages":["English"],"released":{"$date":{"$numberLong":"-1895097600000"}},"directors":["D.W. Griffith"],"rated":"G","awards":{"wins":{"$numberInt":"1"},"nominations":{"$numberInt":"0"},"text":"1 win."},"lastupdated":"2015-08-13 00:46:30.660000000","year":{"$numberInt":"1909"},"imdb":{"rating":{"$numberDouble":"6.6"},"votes":{"$numberInt":"1375"},"id":{"$numberInt":"832"}},"countries":["USA"],"type":"movie","tomatoes":{"viewer":{"rating":{"$numberDouble":"3.6"},"numReviews":{"$numberInt":"109"},"meter":{"$numberInt":"73"}},"lastUpdated":{"$date":{"$numberLong":"1431369413000"}}}} +{"_id":{"$oid":"573a1390f29313caabcd446f"},"plot":"A greedy tycoon decides, on a whim, to corner the world market in wheat. This doubles the price of bread, forcing the grain's producers into charity lines and further into poverty. The film...","genres":["Short","Drama"],"runtime":{"$numberInt":"14"},"cast":["Frank Powell","Grace Henderson","James Kirkwood","Linda Arvidson"],"num_mflix_comments":{"$numberInt":"1"},"title":"A Corner in Wheat","fullplot":"A greedy tycoon decides, on a whim, to corner the world market in wheat. This doubles the price of bread, forcing the grain's producers into charity lines and further into poverty. The film continues to contrast the ironic differences between the lives of those who work to grow the wheat and the life of the man who dabbles in its sale for profit.","languages":["English"],"released":{"$date":{"$numberLong":"-1895097600000"}},"directors":["D.W. Griffith"],"writers":[],"rated":"G","awards":{"wins":{"$numberInt":"1"},"nominations":{"$numberInt":"0"},"text":"1 win."},"lastupdated":"2015-08-13 00:46:30.660000000","year":{"$numberInt":"1909"},"imdb":{"rating":{"$numberDouble":"6.6"},"votes":{"$numberInt":"1375"},"id":{"$numberInt":"832"}},"countries":["USA"],"type":"movie","tomatoes":{"viewer":{"rating":{"$numberDouble":"3.6"},"numReviews":{"$numberInt":"109"},"meter":{"$numberInt":"73"}},"lastUpdated":{"$date":{"$numberLong":"1431369413000"}}}} {"_id":{"$oid":"573a1390f29313caabcd4803"},"plot":"Cartoon figures announce, via comic strip balloons, that they will move - and move they do, in a wildly exaggerated style.","genres":["Animation","Short","Comedy"],"runtime":{"$numberInt":"7"},"cast":["Winsor McCay"],"num_mflix_comments":{"$numberInt":"1"},"poster":"https://m.media-amazon.com/images/M/MV5BYzg2NjNhNTctMjUxMi00ZWU4LWI3ZjYtNTI0NTQxNThjZTk2XkEyXkFqcGdeQXVyNzg5OTk2OA@@._V1_SY1000_SX677_AL_.jpg","title":"Winsor McCay, the Famous Cartoonist of the N.Y. Herald and His Moving Comics","fullplot":"Cartoonist Winsor McCay agrees to create a large set of drawings that will be photographed and made into a motion picture. The job requires plenty of drawing supplies, and the cartoonist must also overcome some mishaps caused by an assistant. Finally, the work is done, and everyone can see the resulting animated picture.","languages":["English"],"released":{"$date":{"$numberLong":"-1853539200000"}},"directors":["Winsor McCay","J. Stuart Blackton"],"writers":["Winsor McCay (comic strip \"Little Nemo in Slumberland\")","Winsor McCay (screenplay)"],"awards":{"wins":{"$numberInt":"1"},"nominations":{"$numberInt":"0"},"text":"1 win."},"lastupdated":"2015-08-29 01:09:03.030000000","year":{"$numberInt":"1911"},"imdb":{"rating":{"$numberDouble":"7.3"},"votes":{"$numberInt":"1034"},"id":{"$numberInt":"1737"}},"countries":["USA"],"type":"movie","tomatoes":{"viewer":{"rating":{"$numberDouble":"3.4"},"numReviews":{"$numberInt":"89"},"meter":{"$numberInt":"47"}},"lastUpdated":{"$date":{"$numberLong":"1440096684000"}}}} {"_id":{"$oid":"573a1390f29313caabcd4eaf"},"plot":"A woman, with the aid of her police officer sweetheart, endeavors to uncover the prostitution ring that has kidnapped her sister, and the philanthropist who secretly runs it.","genres":["Crime","Drama"],"runtime":{"$numberInt":"88"},"cast":["Jane Gail","Ethel Grandin","William H. Turner","Matt Moore"],"num_mflix_comments":{"$numberInt":"2"},"poster":"https://m.media-amazon.com/images/M/MV5BYzk0YWQzMGYtYTM5MC00NjM2LWE5YzYtMjgyNDVhZDg1N2YzXkEyXkFqcGdeQXVyMzE0MjY5ODA@._V1_SY1000_SX677_AL_.jpg","title":"Traffic in Souls","lastupdated":"2015-09-15 02:07:14.247000000","languages":["English"],"released":{"$date":{"$numberLong":"-1770508800000"}},"directors":["George Loane Tucker"],"rated":"TV-PG","awards":{"wins":{"$numberInt":"1"},"nominations":{"$numberInt":"0"},"text":"1 win."},"year":{"$numberInt":"1913"},"imdb":{"rating":{"$numberInt":"6"},"votes":{"$numberInt":"371"},"id":{"$numberInt":"3471"}},"countries":["USA"],"type":"movie","tomatoes":{"viewer":{"rating":{"$numberInt":"3"},"numReviews":{"$numberInt":"85"},"meter":{"$numberInt":"57"}},"dvd":{"$date":{"$numberLong":"1219708800000"}},"lastUpdated":{"$date":{"$numberLong":"1439231635000"}}}} {"_id":{"$oid":"573a1390f29313caabcd50e5"},"plot":"The cartoonist, Winsor McCay, brings the Dinosaurus back to life in the figure of his latest creation, Gertie the Dinosaur.","genres":["Animation","Short","Comedy"],"runtime":{"$numberInt":"12"},"cast":["Winsor McCay","George McManus","Roy L. McCardell"],"num_mflix_comments":{"$numberInt":"1"},"poster":"https://m.media-amazon.com/images/M/MV5BMTQxNzI4ODQ3NF5BMl5BanBnXkFtZTgwNzY5NzMwMjE@._V1_SY1000_SX677_AL_.jpg","title":"Gertie the Dinosaur","fullplot":"Winsor Z. McCay bets another cartoonist that he can animate a dinosaur. So he draws a big friendly herbivore called Gertie. Then he get into his own picture. Gertie walks through the picture, eats a tree, meets her creator, and takes him carefully on her back for a ride.","languages":["English"],"released":{"$date":{"$numberLong":"-1745020800000"}},"directors":["Winsor McCay"],"writers":["Winsor McCay"],"awards":{"wins":{"$numberInt":"1"},"nominations":{"$numberInt":"0"},"text":"1 win."},"lastupdated":"2015-08-18 01:03:15.313000000","year":{"$numberInt":"1914"},"imdb":{"rating":{"$numberDouble":"7.3"},"votes":{"$numberInt":"1837"},"id":{"$numberInt":"4008"}},"countries":["USA"],"type":"movie","tomatoes":{"viewer":{"rating":{"$numberDouble":"3.7"},"numReviews":{"$numberInt":"29"}},"lastUpdated":{"$date":{"$numberLong":"1439234403000"}}}} diff --git a/fixtures/mongodb/test_cases/departments.json b/fixtures/mongodb/test_cases/departments.json new file mode 100644 index 00000000..557e4621 --- /dev/null +++ b/fixtures/mongodb/test_cases/departments.json @@ -0,0 +1,2 @@ +{ "_id": { "$oid": "67857bc2f317ca21359981d5" }, "description": "West Valley English" } +{ "_id": { "$oid": "67857be3f317ca21359981d6" }, "description": "West Valley Math" } diff --git a/fixtures/mongodb/test_cases/import.sh b/fixtures/mongodb/test_cases/import.sh new file mode 100755 index 00000000..3c7f671f --- /dev/null +++ b/fixtures/mongodb/test_cases/import.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# +# Populates the test_cases mongodb database. When writing integration tests we +# come up against cases where we want some specific data to test against that +# doesn't exist in the sample_mflix or chinook databases. Such data can go into +# the test_cases database as needed. + +set -euo pipefail + +# Get the directory of this script file +FIXTURES=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +echo "📡 Importing test case data..." +for fixture in "$FIXTURES"/*.json; do + collection=$(basename "$fixture" .json) + mongoimport --db test_cases --collection "$collection" --file "$fixture" +done +echo "✅ test case data imported..." + diff --git a/fixtures/mongodb/test_cases/nested_collection.json b/fixtures/mongodb/test_cases/nested_collection.json new file mode 100644 index 00000000..ac89a340 --- /dev/null +++ b/fixtures/mongodb/test_cases/nested_collection.json @@ -0,0 +1,3 @@ +{ "_id": { "$oid": "6705a1c2c2df58ace3e67806" }, "institution": "Black Mesa", "staff": [{ "name": "Freeman" }, { "name": "Calhoun" }] } +{ "_id": { "$oid": "6705a1cec2df58ace3e67807" }, "institution": "Aperture Science", "staff": [{ "name": "GLaDOS" }, { "name": "Chell" }] } +{ "_id": { "$oid": "6705a1d7c2df58ace3e67808" }, "institution": "City 17", "staff": [{ "name": "Alyx" }, { "name": "Freeman" }, { "name": "Breen" }] } diff --git a/fixtures/mongodb/test_cases/nested_field_with_dollar.json b/fixtures/mongodb/test_cases/nested_field_with_dollar.json new file mode 100644 index 00000000..68ee046d --- /dev/null +++ b/fixtures/mongodb/test_cases/nested_field_with_dollar.json @@ -0,0 +1,3 @@ +{ "configuration": { "$schema": "schema1" } } +{ "configuration": { "$schema": null } } +{ "configuration": { "$schema": "schema3" } } diff --git a/fixtures/mongodb/test_cases/schools.json b/fixtures/mongodb/test_cases/schools.json new file mode 100644 index 00000000..c2cc732a --- /dev/null +++ b/fixtures/mongodb/test_cases/schools.json @@ -0,0 +1 @@ +{ "_id": { "$oid": "67857b7ef317ca21359981d4" }, "name": "West Valley", "departments": { "english_department_id": { "$oid": "67857bc2f317ca21359981d5" }, "math_department_id": { "$oid": "67857be3f317ca21359981d6" } } } diff --git a/fixtures/mongodb/test_cases/uuids.json b/fixtures/mongodb/test_cases/uuids.json new file mode 100644 index 00000000..16d6aade --- /dev/null +++ b/fixtures/mongodb/test_cases/uuids.json @@ -0,0 +1,4 @@ +{ "_id": { "$oid": "67c1fc84d5c3213534bdce10" }, "uuid": { "$binary": { "base64": "+gpObj88QmaOlr9rXJurAQ==", "subType":"04" } }, "uuid_as_string": "fa0a4e6e-3f3c-4266-8e96-bf6b5c9bab01", "name": "brassavola nodosa" } +{ "_id": { "$oid": "67c1fc84d5c3213534bdce11" }, "uuid": { "$binary": { "base64": "QKaT0MAKQl2vXFNeN/3+nA==", "subType":"04" } }, "uuid_as_string": "40a693d0-c00a-425d-af5c-535e37fdfe9c", "name": "peristeria elata" } +{ "_id": { "$oid": "67c1fc84d5c3213534bdce12" }, "uuid": { "$binary": { "base64": "CsKZiCoHTfWn7lckxrpD+Q==", "subType":"04" } }, "uuid_as_string": "0ac29988-2a07-4df5-a7ee-5724c6ba43f9", "name": "vanda coerulea" } +{ "_id": { "$oid": "67c1fc84d5c3213534bdce13" }, "uuid": { "$binary": { "base64": "BBBI52lNSUCHBlF/QKW9Vw==", "subType":"04" } }, "uuid_as_string": "041048e7-694d-4940-8706-517f40a5bd57", "name": "tuberous grasspink" } diff --git a/fixtures/mongodb/test_cases/weird_field_names.json b/fixtures/mongodb/test_cases/weird_field_names.json new file mode 100644 index 00000000..e1c1d7b5 --- /dev/null +++ b/fixtures/mongodb/test_cases/weird_field_names.json @@ -0,0 +1,4 @@ +{ "_id": { "$oid": "66cf91a0ec1dfb55954378bd" }, "$invalid.name": 1, "$invalid.object.name": { "valid_name": 1 }, "valid_object_name": { "$invalid.nested.name": 1 }, "$invalid.array": [{ "$invalid.element": 1 }] } +{ "_id": { "$oid": "66cf9230ec1dfb55954378be" }, "$invalid.name": 2, "$invalid.object.name": { "valid_name": 2 }, "valid_object_name": { "$invalid.nested.name": 2 }, "$invalid.array": [{ "$invalid.element": 2 }] } +{ "_id": { "$oid": "66cf9274ec1dfb55954378bf" }, "$invalid.name": 3, "$invalid.object.name": { "valid_name": 3 }, "valid_object_name": { "$invalid.nested.name": 3 }, "$invalid.array": [{ "$invalid.element": 3 }] } +{ "_id": { "$oid": "66cf9295ec1dfb55954378c0" }, "$invalid.name": 4, "$invalid.object.name": { "valid_name": 4 }, "valid_object_name": { "$invalid.nested.name": 4 }, "$invalid.array": [{ "$invalid.element": 4 }] } diff --git a/flake.lock b/flake.lock index 5251bd59..86a75d8a 100644 --- a/flake.lock +++ b/flake.lock @@ -3,11 +3,11 @@ "advisory-db": { "flake": false, "locked": { - "lastModified": 1720572893, - "narHash": "sha256-EQfU1yMnebn7LoJNjjsQimyuWwz+2YzazqUZu8aX/r4=", + "lastModified": 1748950236, + "narHash": "sha256-kNiGMrXi5Bq/aWoQmnpK0v+ufQA4FOInhbkY56iUndc=", "owner": "rustsec", "repo": "advisory-db", - "rev": "97a2dc75838f19a5fd63dc3f8e3f57e0c4c8cfe6", + "rev": "a1f651cba8bf224f52c5d55d8182b3bb0ebce49e", "type": "github" }, "original": { @@ -20,17 +20,16 @@ "inputs": { "flake-parts": "flake-parts", "haskell-flake": "haskell-flake", - "hercules-ci-effects": "hercules-ci-effects", "nixpkgs": [ "nixpkgs" ] }, "locked": { - "lastModified": 1720147808, - "narHash": "sha256-hlWEQGUbIwYb+vnd8egzlW/P++yKu3HjV/rOdOPVank=", + "lastModified": 1745165725, + "narHash": "sha256-OnHV8Us04vRsWM0uL1cQez8DumhRi6yE+4K4VLtH6Ws=", "owner": "hercules-ci", "repo": "arion", - "rev": "236f9dd82d6ef6a2d9987c7a7df3e75f1bc8b318", + "rev": "4f59059633b14364b994503b179a701f5e6cfb90", "type": "github" }, "original": { @@ -40,17 +39,12 @@ } }, "crane": { - "inputs": { - "nixpkgs": [ - "nixpkgs" - ] - }, "locked": { - "lastModified": 1720546058, - "narHash": "sha256-iU2yVaPIZm5vMGdlT0+57vdB/aPq/V5oZFBRwYw+HBM=", + "lastModified": 1748970125, + "narHash": "sha256-UDyigbDGv8fvs9aS95yzFfOKkEjx1LO3PL3DsKopohA=", "owner": "ipetkov", "repo": "crane", - "rev": "2d83156f23c43598cf44e152c33a59d3892f8b29", + "rev": "323b5746d89e04b22554b061522dfce9e4c49b18", "type": "github" }, "original": { @@ -61,11 +55,11 @@ }, "flake-compat": { "locked": { - "lastModified": 1696426674, - "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "lastModified": 1747046372, + "narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=", "owner": "edolstra", "repo": "flake-compat", - "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885", "type": "github" }, "original": { @@ -82,11 +76,11 @@ ] }, "locked": { - "lastModified": 1719994518, - "narHash": "sha256-pQMhCCHyQGRzdfAkdJ4cIWiw+JNuWsTX7f0ZYSyz0VY=", + "lastModified": 1733312601, + "narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=", "owner": "hercules-ci", "repo": "flake-parts", - "rev": "9227223f6d922fee3c7b190b2cc238a99527bbb7", + "rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9", "type": "github" }, "original": { @@ -95,35 +89,32 @@ "type": "github" } }, - "flake-parts_2": { + "flake-utils": { "inputs": { - "nixpkgs-lib": [ - "arion", - "hercules-ci-effects", - "nixpkgs" - ] + "systems": "systems" }, "locked": { - "lastModified": 1712014858, - "narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "9126214d0a59633752a136528f5f3b9aa8565b7d", + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", "type": "github" }, "original": { - "id": "flake-parts", - "type": "indirect" + "owner": "numtide", + "repo": "flake-utils", + "type": "github" } }, "graphql-engine-source": { "flake": false, "locked": { - "lastModified": 1722615509, - "narHash": "sha256-LH10Tc/UWZ1uwxrw4tohmqR/uzVi53jHnr+ziuxJi8I=", + "lastModified": 1749050067, + "narHash": "sha256-EvPO+PByMDL93rpqrSGLBtvPUaxD0CKFxQE/X5awIJw=", "owner": "hasura", "repo": "graphql-engine", - "rev": "03c85f69857ef556e9bb26f8b92e9e47317991a3", + "rev": "2a7304816b40d7868b7ba4a94ba2baf09dd1d653", "type": "github" }, "original": { @@ -148,35 +139,48 @@ "type": "github" } }, - "hercules-ci-effects": { + "hasura-ddn-cli": { "inputs": { - "flake-parts": "flake-parts_2", - "nixpkgs": [ - "arion", - "nixpkgs" - ] + "flake-utils": "flake-utils", + "nixpkgs": "nixpkgs" }, "locked": { - "lastModified": 1719226092, - "narHash": "sha256-YNkUMcCUCpnULp40g+svYsaH1RbSEj6s4WdZY/SHe38=", - "owner": "hercules-ci", - "repo": "hercules-ci-effects", - "rev": "11e4b8dc112e2f485d7c97e1cee77f9958f498f5", + "lastModified": 1745973480, + "narHash": "sha256-W7j07zThbZAQgF7EsXdCiMzqS7XmZV/TwfiyKJ8bhdg=", + "owner": "hasura", + "repo": "ddn-cli-nix", + "rev": "ec1fbd2a66b042bf25f7c63270cf3bbe67c75ddc", "type": "github" }, "original": { - "owner": "hercules-ci", - "repo": "hercules-ci-effects", + "owner": "hasura", + "repo": "ddn-cli-nix", "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1720542800, - "narHash": "sha256-ZgnNHuKV6h2+fQ5LuqnUaqZey1Lqqt5dTUAiAnqH0QQ=", + "lastModified": 1723362943, + "narHash": "sha256-dFZRVSgmJkyM0bkPpaYRtG/kRMRTorUIDj8BxoOt1T4=", + "owner": "nixos", + "repo": "nixpkgs", + "rev": "a58bc8ad779655e790115244571758e8de055e3d", + "type": "github" + }, + "original": { + "owner": "nixos", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1748929857, + "narHash": "sha256-lcZQ8RhsmhsK8u7LIFsJhsLh/pzR9yZ8yqpTzyGdj+Q=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "feb2849fdeb70028c70d73b848214b00d324a497", + "rev": "c2a03962b8e24e669fb37b7df10e7c79531ff1a4", "type": "github" }, "original": { @@ -193,9 +197,10 @@ "crane": "crane", "flake-compat": "flake-compat", "graphql-engine-source": "graphql-engine-source", - "nixpkgs": "nixpkgs", + "hasura-ddn-cli": "hasura-ddn-cli", + "nixpkgs": "nixpkgs_2", "rust-overlay": "rust-overlay", - "systems": "systems" + "systems": "systems_2" } }, "rust-overlay": { @@ -205,11 +210,11 @@ ] }, "locked": { - "lastModified": 1722565199, - "narHash": "sha256-2eek4vZKsYg8jip2WQWvAOGMMboQ40DIrllpsI6AlU4=", + "lastModified": 1749091064, + "narHash": "sha256-TGtYjzRX0sueFhwYsnNNFF5TTKnpnloznpIghLzxeXo=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "a9cd2009fb2eeacfea785b45bdbbc33612bba1f1", + "rev": "12419593ce78f2e8e1e89a373c6515885e218acb", "type": "github" }, "original": { @@ -232,6 +237,21 @@ "repo": "default", "type": "github" } + }, + "systems_2": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } } }, "root": "root", diff --git a/flake.nix b/flake.nix index fa8f28ec..e058ed41 100644 --- a/flake.nix +++ b/flake.nix @@ -6,10 +6,9 @@ systems.url = "github:nix-systems/default"; # Nix build system for Rust projects, delegates to cargo - crane = { - url = "github:ipetkov/crane"; - inputs.nixpkgs.follows = "nixpkgs"; - }; + crane.url = "github:ipetkov/crane"; + + hasura-ddn-cli.url = "github:hasura/ddn-cli-nix"; # Allows selecting arbitrary Rust toolchain configurations by editing # `rust-toolchain.toml` @@ -44,7 +43,7 @@ # If source changes aren't picked up automatically try: # # - committing changes to the local engine repo - # - running `nix flake lock --update-input graphql-engine-source` in this repo + # - running `nix flake update graphql-engine-source` in this repo # - arion up -d engine # graphql-engine-source = { @@ -57,6 +56,7 @@ { self , nixpkgs , crane + , hasura-ddn-cli , rust-overlay , advisory-db , arion @@ -102,6 +102,8 @@ # compiled for Linux but with the same architecture as `localSystem`. # This is useful for building Docker images on Mac developer machines. pkgsCross.linux = mkPkgsLinux final.buildPlatform.system; + + ddn = hasura-ddn-cli.packages.${final.system}.default; }) ]; @@ -202,6 +204,7 @@ nativeBuildInputs = with pkgs; [ arion.packages.${pkgs.system}.default cargo-insta + ddn just mongosh pkg-config diff --git a/justfile b/justfile index 7c41f4e6..219b64a4 100644 --- a/justfile +++ b/justfile @@ -1,9 +1,29 @@ -# Most of these tests assume that you are running in a nix develop shell. You -# can do that by running `$ nix develop`, or by setting up nix-direnv. +# Run commands in a nix develop shell by default which provides commands like +# `arion`. +set shell := ["nix", "--experimental-features", "nix-command flakes", "develop", "--command", "bash", "-c"] +# Display available recipes default: @just --list +# Run a local development environment using docker. This makes the GraphQL +# Engine available on https://localhost:7100/ with two connected MongoDB +# connector instances. +up: + arion up -d + +# Stop the local development environment docker containers. +down: + arion down + +# Stop the local development environment docker containers, and remove volumes. +down-volumes: + arion down --volumes + +# Output logs from local development environment services. +logs: + arion logs + test: test-unit test-integration test-unit: @@ -17,9 +37,9 @@ test-e2e: (_arion "arion-compose/e2e-testing.nix" "test") # Run `just test-integration` on several MongoDB versions test-mongodb-versions: - MONGODB_IMAGE=mongo:5 just test-integration MONGODB_IMAGE=mongo:6 just test-integration MONGODB_IMAGE=mongo:7 just test-integration + MONGODB_IMAGE=mongo:8 just test-integration # Runs a specified service in a specified project config using arion (a nix # frontend for docker-compose). Propagates the exit status from that service. diff --git a/nix/docker-connector.nix b/nix/docker-connector.nix index de325cc3..faf2974b 100644 --- a/nix/docker-connector.nix +++ b/nix/docker-connector.nix @@ -1,5 +1,6 @@ # This is a function that returns a derivation for a docker image. { mongodb-connector +, cacert , dockerTools , name ? "ghcr.io/hasura/ndc-mongodb" @@ -29,10 +30,8 @@ let "OTEL_SERVICE_NAME=mongodb-connector" "OTEL_EXPORTER_OTLP_ENDPOINT=${default-otlp-endpoint}" ]; - Volumes = { - "${config-directory}" = { }; - }; } // extraConfig; + contents = [ cacert ]; # include TLS root certificate store }; in dockerTools.buildLayeredImage args diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 0329f46d..0f28fc14 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "1.80.0" +channel = "1.83.0" profile = "default" # see https://rust-lang.github.io/rustup/concepts/profiles.html components = [] # see https://rust-lang.github.io/rustup/concepts/components.html