From 925d1bfcb1a6979613bd6a254cd852438a0fd4c1 Mon Sep 17 00:00:00 2001 From: Serhii Tatarintsev Date: Fri, 9 Feb 2024 19:07:50 +0100 Subject: [PATCH 01/25] qe: Fix CI failures (#4714) --- query-engine/driver-adapters/.npmrc | 1 + .../driver-adapters/executor/package.json | 2 +- .../driver-adapters/executor/src/bench.ts | 2 +- .../migrations/cockroachdb/failure_modes.rs | 36 +++++-------------- 4 files changed, 12 insertions(+), 29 deletions(-) create mode 100644 query-engine/driver-adapters/.npmrc diff --git a/query-engine/driver-adapters/.npmrc b/query-engine/driver-adapters/.npmrc new file mode 100644 index 000000000000..61803ea5ee1e --- /dev/null +++ b/query-engine/driver-adapters/.npmrc @@ -0,0 +1 @@ +lockfile = false diff --git a/query-engine/driver-adapters/executor/package.json b/query-engine/driver-adapters/executor/package.json index cac623ac0930..290d6d8768fc 100644 --- a/query-engine/driver-adapters/executor/package.json +++ b/query-engine/driver-adapters/executor/package.json @@ -23,7 +23,7 @@ "license": "Apache-2.0", "dependencies": { "@libsql/client": "0.3.6", - "@neondatabase/serverless": "0.7.2", + "@neondatabase/serverless": "0.8.1", "@planetscale/database": "1.15.0", "query-engine-wasm-latest": "npm:@prisma/query-engine-wasm@latest", "query-engine-wasm-baseline": "npm:@prisma/query-engine-wasm@0.0.19", diff --git a/query-engine/driver-adapters/executor/src/bench.ts b/query-engine/driver-adapters/executor/src/bench.ts index 80675b7f7302..1f4ffa6ad436 100644 --- a/query-engine/driver-adapters/executor/src/bench.ts +++ b/query-engine/driver-adapters/executor/src/bench.ts @@ -23,7 +23,7 @@ import { QueryEngine as WasmBaseline } from "query-engine-wasm-baseline"; // `query-engine-wasm-latest` refers to the latest published version of the Wasm Query Engine, // rather than the latest locally built one. We're pulling in the Postgres Query Engine // because benchmarks are only run against a Postgres database. -import { QueryEngine as WasmLatest } from "query-engine-wasm-latest/postgresql/query_engine"; +import { QueryEngine as WasmLatest } from "query-engine-wasm-latest/postgresql/query_engine.js"; if (!global.crypto) { (global as any).crypto = webcrypto; diff --git a/schema-engine/sql-migration-tests/tests/migrations/cockroachdb/failure_modes.rs b/schema-engine/sql-migration-tests/tests/migrations/cockroachdb/failure_modes.rs index 5f3a3e2ff879..d2f56c829202 100644 --- a/schema-engine/sql-migration-tests/tests/migrations/cockroachdb/failure_modes.rs +++ b/schema-engine/sql-migration-tests/tests/migrations/cockroachdb/failure_modes.rs @@ -27,7 +27,7 @@ fn failing_migration_after_migration_dropping_data(api: TestApi) { "#, ]; let dir = write_migrations(migrations); - let err = api.apply_migrations(&dir).send_unwrap_err().to_string(); + let err = api.apply_migrations(&dir).send_unwrap_err(); let expectation = expect![[r#" A migration failed to apply. New migrations cannot be applied before the error is recovered from. Read more about how to resolve migration issues in a production database: https://pris.ly/d/migrate-resolve @@ -39,14 +39,8 @@ fn failing_migration_after_migration_dropping_data(api: TestApi) { ERROR: column "is_good_dog" does not exist DbError { severity: "ERROR", parsed_severity: Some(Error), code: SqlState(E42703), message: "column \"is_good_dog\" does not exist", detail: None, hint: None, position: None, where_: None, schema: None, table: None, column: None, datatype: None, constraint: None, file: Some("column_resolver.go"), line: Some(196), routine: Some("NewUndefinedColumnError") } - - 0: sql_schema_connector::apply_migration::apply_script - with migration_name=" 2" - at schema-engine/connectors/sql-schema-connector/src/apply_migration.rs:106 - 1: schema_core::commands::apply_migrations::Applying migration - with migration_name=" 2" - at schema-engine/core/src/commands/apply_migrations.rs:91"#]]; - expectation.assert_eq(&err); + "#]]; + expectation.assert_eq(err.message().unwrap()); } #[test_connector(tags(CockroachDb), exclude(CockroachDb231))] @@ -74,7 +68,7 @@ fn failing_step_in_migration_dropping_data(api: TestApi) { "#, ]; let dir = write_migrations(migrations); - let err = api.apply_migrations(&dir).send_unwrap_err().to_string(); + let err = api.apply_migrations(&dir).send_unwrap_err(); let expectation = expect![[r#" A migration failed to apply. New migrations cannot be applied before the error is recovered from. Read more about how to resolve migration issues in a production database: https://pris.ly/d/migrate-resolve @@ -86,14 +80,8 @@ fn failing_step_in_migration_dropping_data(api: TestApi) { ERROR: column "is_good_dog" does not exist DbError { severity: "ERROR", parsed_severity: Some(Error), code: SqlState(E42703), message: "column \"is_good_dog\" does not exist", detail: None, hint: None, position: None, where_: None, schema: None, table: None, column: None, datatype: None, constraint: None, file: Some("column_resolver.go"), line: Some(196), routine: Some("NewUndefinedColumnError") } - - 0: sql_schema_connector::apply_migration::apply_script - with migration_name=" 1" - at schema-engine/connectors/sql-schema-connector/src/apply_migration.rs:106 - 1: schema_core::commands::apply_migrations::Applying migration - with migration_name=" 1" - at schema-engine/core/src/commands/apply_migrations.rs:91"#]]; - expectation.assert_eq(&err); + "#]]; + expectation.assert_eq(err.message().unwrap()); } // Skipped on CRDB 23.1 because of https://github.com/prisma/prisma/issues/20851 @@ -181,7 +169,7 @@ fn syntax_errors_return_error_position(api: TestApi) { ); "#]; let dir = write_migrations(migrations); - let err = api.apply_migrations(&dir).send_unwrap_err().to_string(); + let err = api.apply_migrations(&dir).send_unwrap_err(); let expectation = expect![[r#" A migration failed to apply. New migrations cannot be applied before the error is recovered from. Read more about how to resolve migration issues in a production database: https://pris.ly/d/migrate-resolve @@ -200,14 +188,8 @@ fn syntax_errors_return_error_position(api: TestApi) { HINT: try \h CREATE TABLE DbError { severity: "ERROR", parsed_severity: Some(Error), code: SqlState(E42601), message: "at or near \"is_good_dog\": syntax error", detail: Some("source SQL:\nCREATE TABLE \"Dog\" (\n id SERIAL PRIMARY KEY,\n name TEXT NOT NULL\n is_good_dog BOOLEAN NOT NULL DEFAULT TRUE\n ^"), hint: Some("try \\h CREATE TABLE"), position: None, where_: None, schema: None, table: None, column: None, datatype: None, constraint: None, file: Some("lexer.go"), line: Some(271), routine: Some("Error") } - - 0: sql_schema_connector::apply_migration::apply_script - with migration_name=" 0" - at schema-engine/connectors/sql-schema-connector/src/apply_migration.rs:106 - 1: schema_core::commands::apply_migrations::Applying migration - with migration_name=" 0" - at schema-engine/core/src/commands/apply_migrations.rs:91"#]]; - expectation.assert_eq(&err); + "#]]; + expectation.assert_eq(err.message().unwrap()); } fn write_migrations(migrations: &[&str]) -> tempfile::TempDir { From 32c59b72145367c5396f346069c130c2a03b3421 Mon Sep 17 00:00:00 2001 From: Serhii Tatarintsev Date: Fri, 9 Feb 2024 20:45:41 +0100 Subject: [PATCH 02/25] qe: `--enable-playground` automatically enables graphql protocol (#4711) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If protocol is not specified explicitly and `--enable-playground` flag is used, engine will default to GraphQL instead of JSON. Close #4679 Co-authored-by: Joël Galeran --- query-engine/query-engine/src/opt.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/query-engine/query-engine/src/opt.rs b/query-engine/query-engine/src/opt.rs index eff1c14d3c6c..fd5639a18573 100644 --- a/query-engine/query-engine/src/opt.rs +++ b/query-engine/query-engine/src/opt.rs @@ -224,7 +224,13 @@ impl PrismaOpt { self.engine_protocol .as_ref() .map(EngineProtocol::from) - .unwrap_or(EngineProtocol::Json) + .unwrap_or_else(|| { + if self.enable_playground { + EngineProtocol::Graphql + } else { + EngineProtocol::Json + } + }) } } From 45a0eb4a1d0047c35fac242ad1fcaa28bd699043 Mon Sep 17 00:00:00 2001 From: Serhii Tatarintsev Date: Mon, 12 Feb 2024 12:32:47 +0100 Subject: [PATCH 03/25] qe-wasm: Update planetscale (#4716) --- query-engine/driver-adapters/executor/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/query-engine/driver-adapters/executor/package.json b/query-engine/driver-adapters/executor/package.json index 290d6d8768fc..8e1fd6e2a398 100644 --- a/query-engine/driver-adapters/executor/package.json +++ b/query-engine/driver-adapters/executor/package.json @@ -24,7 +24,7 @@ "dependencies": { "@libsql/client": "0.3.6", "@neondatabase/serverless": "0.8.1", - "@planetscale/database": "1.15.0", + "@planetscale/database": "1.16.0", "query-engine-wasm-latest": "npm:@prisma/query-engine-wasm@latest", "query-engine-wasm-baseline": "npm:@prisma/query-engine-wasm@0.0.19", "@prisma/adapter-libsql": "workspace:*", From e24eacf0769408713045fe802a69b2dbf9c12334 Mon Sep 17 00:00:00 2001 From: Serhii Tatarintsev Date: Tue, 13 Feb 2024 10:05:33 +0100 Subject: [PATCH 04/25] qe-wasm: Remove `Buffer` usage (#4710) * qe-wasm: Remove `Buffer` usage Convert `Byte` arguments to `Uint8Array` instead. Allows us to stop polyfilling `Buffer` for `_bg.js` fiels in engine. * Support that for napi too --- query-engine/driver-adapters/src/napi/conversion.rs | 7 ++++++- query-engine/driver-adapters/src/wasm/conversion.rs | 13 ++----------- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/query-engine/driver-adapters/src/napi/conversion.rs b/query-engine/driver-adapters/src/napi/conversion.rs index ac2dda60a279..6cfe445925e3 100644 --- a/query-engine/driver-adapters/src/napi/conversion.rs +++ b/query-engine/driver-adapters/src/napi/conversion.rs @@ -18,7 +18,12 @@ impl ToNapiValue for JSArg { match value { JSArg::Value(v) => ToNapiValue::to_napi_value(env, v), JSArg::Buffer(bytes) => { - ToNapiValue::to_napi_value(env, napi::Env::from_raw(env).create_buffer_with_data(bytes)?.into_raw()) + let env = napi::Env::from_raw(env); + let length = bytes.len(); + let buffer = env.create_arraybuffer_with_data(bytes)?.into_raw(); + let byte_array = buffer.into_typedarray(napi::TypedArrayType::Uint8, length, 0)?; + + ToNapiValue::to_napi_value(env.raw(), byte_array) } // While arrays are encodable as JSON generally, their element might not be, or may be // represented in a different way than we need. We use this custom logic for all arrays diff --git a/query-engine/driver-adapters/src/wasm/conversion.rs b/query-engine/driver-adapters/src/wasm/conversion.rs index c41ff8a23107..73e6a7c30331 100644 --- a/query-engine/driver-adapters/src/wasm/conversion.rs +++ b/query-engine/driver-adapters/src/wasm/conversion.rs @@ -3,16 +3,7 @@ use crate::conversion::JSArg; use super::to_js::{serde_serialize, ToJsValue}; use crate::types::Query; use js_sys::{Array, JsString, Object, Reflect, Uint8Array}; -use wasm_bindgen::{prelude::wasm_bindgen, JsValue}; - -#[wasm_bindgen] -extern "C" { - #[wasm_bindgen(extends = Object)] - pub type Buffer; - - #[wasm_bindgen(static_method_of = Buffer)] - pub fn from(array: &Uint8Array) -> Buffer; -} +use wasm_bindgen::JsValue; impl ToJsValue for Query { fn to_js_value(&self) -> Result { @@ -36,7 +27,7 @@ impl ToJsValue for JSArg { JSArg::Value(value) => serde_serialize(value), JSArg::Buffer(buf) => { let array = Uint8Array::from(buf.as_slice()); - Ok(Buffer::from(&array).into()) + Ok(array.into()) } JSArg::Array(value) => { let array = Array::new(); From 3f0cbe126b88f1ca6476649bfdf4ecb2ea3bb9e2 Mon Sep 17 00:00:00 2001 From: Serhii Tatarintsev Date: Tue, 13 Feb 2024 10:14:16 +0100 Subject: [PATCH 05/25] driver-adapters: Pin drivers to the versions, used in prisma/prisma (#4721) * driver-adapters: Pin drivers to the versions, used in prisma/prisma See https://github.com/prisma/prisma/pull/23087 Removes direct depenencies on the driver packages and picks them from the meta-package in prisma/prisma. Should avoid disaster on every update. Fix prisma/team-orm#940 * Read GITHUB_BRANCH in benchmarks job too * DRIVER_ADAPTERS_BRANCH=chore/bundled-js-drivers --- .github/workflows/wasm-benchmarks.yml | 11 ++++++++++- query-engine/driver-adapters/executor/package.json | 6 +----- query-engine/driver-adapters/executor/src/bench.ts | 4 ++-- query-engine/driver-adapters/executor/src/testd.ts | 12 +++++------- query-engine/driver-adapters/pnpm-workspace.yaml | 1 + 5 files changed, 19 insertions(+), 15 deletions(-) diff --git a/.github/workflows/wasm-benchmarks.yml b/.github/workflows/wasm-benchmarks.yml index 94241517f30f..1fbb5281ba4f 100644 --- a/.github/workflows/wasm-benchmarks.yml +++ b/.github/workflows/wasm-benchmarks.yml @@ -21,6 +21,8 @@ jobs: steps: - name: Checkout PR branch uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} - name: "Setup Node.js" uses: actions/setup-node@v4 @@ -43,9 +45,16 @@ jobs: with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Extract Branch Name + run: | + branch="$(git show -s --format=%s | grep -o "DRIVER_ADAPTERS_BRANCH=[^ ]*" | cut -f2 -d=)" + if [ -n "$branch" ]; then + echo "Using $branch branch of driver adapters" + echo "DRIVER_ADAPTERS_BRANCH=$branch" >> "$GITHUB_ENV" + fi - uses: cachix/install-nix-action@v24 - - name: Setup benchmark run: make setup-pg-bench diff --git a/query-engine/driver-adapters/executor/package.json b/query-engine/driver-adapters/executor/package.json index 8e1fd6e2a398..3733b94193e8 100644 --- a/query-engine/driver-adapters/executor/package.json +++ b/query-engine/driver-adapters/executor/package.json @@ -22,9 +22,6 @@ "sideEffects": false, "license": "Apache-2.0", "dependencies": { - "@libsql/client": "0.3.6", - "@neondatabase/serverless": "0.8.1", - "@planetscale/database": "1.16.0", "query-engine-wasm-latest": "npm:@prisma/query-engine-wasm@latest", "query-engine-wasm-baseline": "npm:@prisma/query-engine-wasm@0.0.19", "@prisma/adapter-libsql": "workspace:*", @@ -32,9 +29,8 @@ "@prisma/adapter-pg": "workspace:*", "@prisma/adapter-planetscale": "workspace:*", "@prisma/driver-adapter-utils": "workspace:*", - "@types/pg": "8.10.9", + "@prisma/bundled-js-drivers": "workspace:*", "mitata": "^0.1.6", - "pg": "8.11.3", "undici": "6.0.1", "ws": "8.14.2" }, diff --git a/query-engine/driver-adapters/executor/src/bench.ts b/query-engine/driver-adapters/executor/src/bench.ts index 1f4ffa6ad436..edd1cb6530ae 100644 --- a/query-engine/driver-adapters/executor/src/bench.ts +++ b/query-engine/driver-adapters/executor/src/bench.ts @@ -9,7 +9,7 @@ import { fileURLToPath } from "node:url"; import * as qe from "./qe"; -import pgDriver from "pg"; +import { pg } from "@prisma/bundled-js-drivers"; import * as prismaPg from "@prisma/adapter-pg"; import { bindAdapter, DriverAdapter } from "@prisma/driver-adapter-utils"; @@ -176,7 +176,7 @@ async function pgAdapter(url: string): Promise { if (schemaName != null) { args.options = `--search_path="${schemaName}"`; } - const pool = new pgDriver.Pool(args); + const pool = new pg.Pool(args); return new prismaPg.PrismaPg(pool, { schema: schemaName, diff --git a/query-engine/driver-adapters/executor/src/testd.ts b/query-engine/driver-adapters/executor/src/testd.ts index ae40ee229490..4345887fe659 100644 --- a/query-engine/driver-adapters/executor/src/testd.ts +++ b/query-engine/driver-adapters/executor/src/testd.ts @@ -3,21 +3,18 @@ import * as readline from 'node:readline' import * as jsonRpc from './jsonRpc' // pg dependencies -import pgDriver from 'pg' import * as prismaPg from '@prisma/adapter-pg' // neon dependencies -import { Pool as NeonPool, neonConfig } from '@neondatabase/serverless' import { fetch } from 'undici' import { WebSocket } from 'ws' +import { pg, neon, planetScale, libSql } from '@prisma/bundled-js-drivers' import * as prismaNeon from '@prisma/adapter-neon' // libsql dependencies -import { createClient } from '@libsql/client' import { PrismaLibSQL } from '@prisma/adapter-libsql' // planetscale dependencies -import { Client as PlanetscaleClient } from '@planetscale/database' import { PrismaPlanetScale } from '@prisma/adapter-planetscale' @@ -256,7 +253,7 @@ function postgresSchemaName(url: string) { async function pgAdapter(url: string): Promise { const schemaName = postgresSchemaName(url) - const pool = new pgDriver.Pool(postgres_options(url)) + const pool = new pg.Pool(postgres_options(url)) return new prismaPg.PrismaPg(pool, { schema: schemaName }) @@ -264,6 +261,7 @@ async function pgAdapter(url: string): Promise { } async function neonWsAdapter(url: string): Promise { + const { neonConfig, Pool: NeonPool } = neon const proxyURL = JSON.parse(process.env.DRIVER_ADAPTER_CONFIG || '{}').proxy_url ?? '' if (proxyURL == '') { throw new Error("DRIVER_ADAPTER_CONFIG is not defined or empty, but its required for neon adapter."); @@ -281,7 +279,7 @@ async function neonWsAdapter(url: string): Promise { } async function libsqlAdapter(url: string): Promise { - const libsql = createClient({ url, intMode: 'bigint' }) + const libsql = libSql.createClient({ url, intMode: 'bigint' }) return new PrismaLibSQL(libsql) } @@ -291,7 +289,7 @@ async function planetscaleAdapter(url: string): Promise { throw new Error("DRIVER_ADAPTER_CONFIG is not defined or empty, but its required for planetscale adapter."); } - const client = new PlanetscaleClient({ + const client = new planetScale.Client({ // preserving path name so proxy url would look like real DB url url: copyPathName(url, proxyUrl), fetch, diff --git a/query-engine/driver-adapters/pnpm-workspace.yaml b/query-engine/driver-adapters/pnpm-workspace.yaml index a616622479a5..7d2cb5c6d311 100644 --- a/query-engine/driver-adapters/pnpm-workspace.yaml +++ b/query-engine/driver-adapters/pnpm-workspace.yaml @@ -5,4 +5,5 @@ packages: - '../../../prisma/packages/adapter-planetscale' - '../../../prisma/packages/driver-adapter-utils' - '../../../prisma/packages/debug' + - '../../../prisma/packages/bundled-js-drivers' - './executor' From 9878210aad0f85f0c2a97fce55931d2b828f288f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=ABl=20Galeran?= Date: Tue, 13 Feb 2024 10:16:20 +0100 Subject: [PATCH 06/25] ci(wasm comments): only create comments on our repo (#4723) --- .github/workflows/wasm-benchmarks.yml | 3 +++ .github/workflows/wasm-size.yml | 3 +++ 2 files changed, 6 insertions(+) diff --git a/.github/workflows/wasm-benchmarks.yml b/.github/workflows/wasm-benchmarks.yml index 1fbb5281ba4f..05e170b24520 100644 --- a/.github/workflows/wasm-benchmarks.yml +++ b/.github/workflows/wasm-benchmarks.yml @@ -131,6 +131,9 @@ jobs: - name: Create or update report uses: peter-evans/create-or-update-comment@v3 + # Only run on our repository + # It avoids an expected failure on forks + if: github.repository == 'prisma/prisma-engines' with: comment-id: ${{ steps.findReportComment.outputs.comment-id }} issue-number: ${{ github.event.pull_request.number }} diff --git a/.github/workflows/wasm-size.yml b/.github/workflows/wasm-size.yml index 9ea9d7479adc..42d115c4f17e 100644 --- a/.github/workflows/wasm-size.yml +++ b/.github/workflows/wasm-size.yml @@ -122,6 +122,9 @@ jobs: - name: Create or update report uses: peter-evans/create-or-update-comment@v3 + # Only run on our repository + # It avoids an expected failure on forks + if: github.repository == 'prisma/prisma-engines' with: comment-id: ${{ steps.findReportComment.outputs.comment-id }} issue-number: ${{ github.event.pull_request.number }} From 346196f2538220c332ae2032d0b491bd33c832f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miguel=20Fern=C3=A1ndez?= Date: Tue, 13 Feb 2024 12:12:11 +0100 Subject: [PATCH 07/25] wasm-benchmarks: Fix potential collision of sql in recordings (#4689) * Fix JSON serialization of bigints * wasm-benchmarks: apply lateral joins when possible, and return rel fields in results (#4691) * Store recordings if requested and use joins * More traversal and serialization * Create recordings for all the different engines as they might be using different SQL queries. * Remove stale comment --- .../executor/bench/queries.json | 38 +++++++++++++-- .../executor/bench/schema.prisma | 2 +- .../driver-adapters/executor/src/bench.ts | 47 ++++++++++++++----- .../driver-adapters/executor/src/recording.ts | 28 ++++++----- 4 files changed, 85 insertions(+), 30 deletions(-) diff --git a/query-engine/driver-adapters/executor/bench/queries.json b/query-engine/driver-adapters/executor/bench/queries.json index e143da135acc..5410f162be11 100644 --- a/query-engine/driver-adapters/executor/bench/queries.json +++ b/query-engine/driver-adapters/executor/bench/queries.json @@ -1,6 +1,6 @@ [ { - "description": "movies.findMany() (all - 25000)", + "description": "movies.findMany() (all - ~50K)", "query": { "action": "findMany", "modelName": "Movie", @@ -59,7 +59,12 @@ "cast": true }, "selection": { - "$scalars": true + "$scalars": true, + "cast": { + "selection": { + "$scalars": true + } + } } } } @@ -82,7 +87,12 @@ "cast": true }, "selection": { - "$scalars": true + "$scalars": true, + "cast": { + "selection": { + "$scalars": true + } + } } } } @@ -104,7 +114,16 @@ } }, "selection": { - "$scalars": true + "$scalars": true, + "cast": { + "selection": { + "person": { + "selection": { + "$scalars": true + } + } + } + } } } } @@ -131,7 +150,16 @@ } }, "selection": { - "$scalars": true + "$scalars": true, + "cast": { + "selection": { + "person": { + "selection": { + "$scalars": true + } + } + } + } } } } diff --git a/query-engine/driver-adapters/executor/bench/schema.prisma b/query-engine/driver-adapters/executor/bench/schema.prisma index a45c1e62b4cc..6346afed158d 100644 --- a/query-engine/driver-adapters/executor/bench/schema.prisma +++ b/query-engine/driver-adapters/executor/bench/schema.prisma @@ -5,7 +5,7 @@ datasource db { generator foo { provider = "prisma-client-js" - previewFeatures = ["driverAdapters"] + previewFeatures = ["driverAdapters", "relationJoins"] } model Movie { diff --git a/query-engine/driver-adapters/executor/src/bench.ts b/query-engine/driver-adapters/executor/src/bench.ts index edd1cb6530ae..e168e95a9cab 100644 --- a/query-engine/driver-adapters/executor/src/bench.ts +++ b/query-engine/driver-adapters/executor/src/bench.ts @@ -45,11 +45,18 @@ async function main(): Promise { const withErrorCapturing = bindAdapter(pg); // We build two decorators for recording and replaying db queries. - const { recorder, replayer } = recording(withErrorCapturing); + const { recorder, replayer, recordings } = recording(withErrorCapturing); // We exercise the queries recording them await recordQueries(recorder, datamodel, prismaQueries); + // Dump recordings if requested + if (process.env.BENCH_RECORDINGS_FILE != null) { + const recordingsJson = JSON.stringify(recordings.data(), null, 2); + await fs.writeFile(process.env.BENCH_RECORDINGS_FILE, recordingsJson); + debug(`Recordings written to ${process.env.BENCH_RECORDINGS_FILE}`); + } + // Then we benchmark the execution of the queries but instead of hitting the DB // we fetch results from the recordings, thus isolating the performance // of the engine + driver adapter code from that of the DB IO. @@ -61,23 +68,37 @@ async function recordQueries( datamodel: string, prismaQueries: any ): Promise { - const qe = await initQeWasmBaseLine(adapter, datamodel); - await qe.connect(""); + // Different engines might have made different SQL queries to complete the same Prisma Query, + // so we record the results of all engines for the benchmarking phase. + const napi = await initQeNapiCurrent(adapter, datamodel); + await napi.connect(""); + const wasmCurrent = await initQeWasmCurrent(adapter, datamodel); + await wasmCurrent.connect(""); + const wasmBaseline = await initQeWasmBaseLine(adapter, datamodel); + await wasmBaseline.connect(""); + const wasmLatest = await initQeWasmLatest(adapter, datamodel); + await wasmLatest.connect(""); try { - for (const prismaQuery of prismaQueries) { - const { description, query } = prismaQuery; - const res = await qe.query(JSON.stringify(query), "", undefined); - - const errors = JSON.parse(res).errors; - if (errors != null && errors.length > 0) { - throw new Error( - `Query failed for ${description}: ${JSON.stringify(res)}` - ); + for (const qe of [napi, wasmCurrent, wasmBaseline, wasmLatest]) { + for (const prismaQuery of prismaQueries) { + const { description, query } = prismaQuery; + const res = await qe.query(JSON.stringify(query), "", undefined); + console.log(res[9]); + + const errors = JSON.parse(res).errors; + if (errors != null) { + throw new Error( + `Query failed for ${description}: ${JSON.stringify(res)}` + ); + } } } } finally { - await qe.disconnect(""); + await napi.disconnect(""); + await wasmCurrent.disconnect(""); + await wasmBaseline.disconnect(""); + await wasmLatest.disconnect(""); } } diff --git a/query-engine/driver-adapters/executor/src/recording.ts b/query-engine/driver-adapters/executor/src/recording.ts index a4152488e985..0602cb69dc4e 100644 --- a/query-engine/driver-adapters/executor/src/recording.ts +++ b/query-engine/driver-adapters/executor/src/recording.ts @@ -13,6 +13,7 @@ export function recording(adapter: DriverAdapter) { return { recorder: recorder(adapter, recordings), replayer: replayer(adapter, recordings), + recordings: recordings, }; } @@ -31,9 +32,7 @@ function recorder(adapter: DriverAdapter, recordings: Recordings) { return result; }, executeRaw: async (params) => { - const result = await adapter.executeRaw(params); - recordings.addCommandResults(params, result); - return result; + throw new Error("Not implemented"); }, }; } @@ -61,18 +60,25 @@ function createInMemoryRecordings() { const queryResults: Map> = new Map(); const commandResults: Map> = new Map(); - // Recording is currently only used in benchmarks. Before we used to serialize the whole query - // (sql + args) but since bigints are not serialized by JSON.stringify, and we didn’t really need - // (sql + args) but since bigints are not serialized by JSON.stringify, and we didn't really need - // args for benchmarks, we just serialize the sql part. - // - // If this ever changes (we reuse query recording in tests) we need to make sure to serialize the - // args as well. const queryToKey = (params: Query) => { - return JSON.stringify(params.sql); + var sql = params.sql; + params.args.forEach((arg: any, i) => { + sql = sql.replace("$" + (i + 1), arg.toString()); + }); + return sql; }; return { + data: (): Map => { + const map = new Map(); + for (const [key, value] of queryResults.entries()) { + value.map((resultSet) => { + map[key] = resultSet; + }); + } + return map; + }, + addQueryResults: (params: Query, result: Result) => { const key = queryToKey(params); queryResults.set(key, result); From e9f802b49f54e4bf5bca901f406534ca72fb3cff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miguel=20Fern=C3=A1ndez?= Date: Tue, 13 Feb 2024 12:13:17 +0100 Subject: [PATCH 08/25] Strangle nix (#4713) * Generate the same output as nix when measuring size * Don't use nix in wasm-size.yml, and wasm-benchmarks.yml * Fix wasm-size job * Point base branch to head * Bring back rustup in the build script. Should be a no-op * Get rid of nix in publish-query-engine-wasm * Add build-qe-wasm to Makefile * Also output size of all providers * Do not publish cargo docs * WASM_SIZE_OUTPUT to ENGINE_SIZE_OUTPUT * Try extracting deps for rust-wasm into a workflow * Move rust-wasm-setup * Add check-schema-wasm make target * Make rust wasm setup a composite workflow * Render prisma-schema-wasm version in package.json * add newline * Specify shell following action metadata syntax * Let the build script add targets and components * Pin nightly * Use relative path to scripts because we are always in the repo root * Try to use rustup included in action runners * add wasm-pack and wasm-opt * Driver adapter tests * Add clean wrappers * Remove unnecessary building step * use make target in build-prisma-schema-wasm workflow * update publish-prisma-schema-wasm pipeline * Fix * remove outdated comment * update PENDING.md * fix measure size * Remove things from PENDING.md * Use cargo binistall to accelerate binary installation * Fix previous bug in WASM_BUILD_PROFILE * add wasm target for schema-wasm * Use standard posix bc syntax * deduplicate wasm target * Output sizes in bytes, no KiB * Remove PENDING.md * Ping cargo binstall versions * Remove nix files except the shell * Be intentional about the use of nix * Extract toml config to rust-toolchain.toml cherry-picked from #4699 * Fix shellcheck * Fix publishing prisma-schema-wasm * Address some feedback * Revert rust-toolchain.toml * Remove engine size publishing from pipeline momentarily. Read more Extracted from the scope to: https://github.com/prisma/team-orm/issues/943 * Take back publishing engine sizes * Update flake.nix --------- Co-authored-by: Alexey Orlenko --- .../workflows/build-prisma-schema-wasm.yml | 6 +- .../include/rust-wasm-setup/action.yml | 25 +++ .github/workflows/on-push-to-main.yml | 3 - .../workflows/publish-prisma-schema-wasm.yml | 13 +- .../workflows/publish-query-engine-wasm.yml | 22 ++- .../test-query-engine-driver-adapters.yml | 2 +- .github/workflows/wasm-benchmarks.yml | 6 +- .github/workflows/wasm-size.yml | 32 +--- Makefile | 70 +++++-- flake.nix | 9 +- nix/README.md | 5 + nix/all-engines.nix | 170 ----------------- nix/cargo-doc.nix | 46 ----- nix/cli-prisma.nix | 44 ----- nix/cli-shell.nix | 22 --- nix/dev-vm.nix | 98 ---------- nix/memory-profiling.nix | 50 ----- nix/prisma-schema-wasm.nix | 55 ------ nix/publish-engine-size.nix | 175 +++++++++++++++++- prisma-schema-wasm/scripts/install.sh | 8 +- query-engine/query-engine-wasm/build.sh | 22 +-- 21 files changed, 305 insertions(+), 578 deletions(-) create mode 100644 .github/workflows/include/rust-wasm-setup/action.yml create mode 100644 nix/README.md delete mode 100644 nix/all-engines.nix delete mode 100644 nix/cargo-doc.nix delete mode 100644 nix/cli-prisma.nix delete mode 100644 nix/cli-shell.nix delete mode 100644 nix/dev-vm.nix delete mode 100644 nix/memory-profiling.nix delete mode 100644 nix/prisma-schema-wasm.nix diff --git a/.github/workflows/build-prisma-schema-wasm.yml b/.github/workflows/build-prisma-schema-wasm.yml index f52db55654e7..dad4877d137d 100644 --- a/.github/workflows/build-prisma-schema-wasm.yml +++ b/.github/workflows/build-prisma-schema-wasm.yml @@ -18,7 +18,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: cachix/install-nix-action@v24 - - run: nix build .#prisma-schema-wasm - - run: nix flake check + - uses: ./.github/workflows/include/rust-wasm-setup + + - run: make check-schema-wasm-package PROFILE=release diff --git a/.github/workflows/include/rust-wasm-setup/action.yml b/.github/workflows/include/rust-wasm-setup/action.yml new file mode 100644 index 000000000000..5a22bc1bf3cd --- /dev/null +++ b/.github/workflows/include/rust-wasm-setup/action.yml @@ -0,0 +1,25 @@ +name: Rust + WASM common deps + +runs: + using: "composite" + steps: + - name: Set default toolchain + shell: bash + run: rustup default stable + + - name: Add WASM target + shell: bash + run: rustup target add wasm32-unknown-unknown + + - uses: cargo-bins/cargo-binstall@main + + - name: Install wasm-bindgen, wasm-opt + shell: bash + run: | + cargo binstall -y \ + wasm-bindgen-cli@0.2.89 \ + wasm-opt@0.116.0 + + - name: Install bc + shell: bash + run: sudo apt update && sudo apt-get install -y bc diff --git a/.github/workflows/on-push-to-main.yml b/.github/workflows/on-push-to-main.yml index 909862708448..d1ce5822b553 100644 --- a/.github/workflows/on-push-to-main.yml +++ b/.github/workflows/on-push-to-main.yml @@ -27,8 +27,5 @@ jobs: git config user.email "prismabots@gmail.com" git config user.name "prisma-bot" - - name: Generate cargo docs for the workspace to gh-pages branch - run: nix run .#publish-cargo-docs - - name: Publish engines size to gh-pages branch run: nix run .#publish-engine-size diff --git a/.github/workflows/publish-prisma-schema-wasm.yml b/.github/workflows/publish-prisma-schema-wasm.yml index c8923944cb4a..9560ebeef3ba 100644 --- a/.github/workflows/publish-prisma-schema-wasm.yml +++ b/.github/workflows/publish-prisma-schema-wasm.yml @@ -32,10 +32,11 @@ jobs: - uses: actions/checkout@v4 with: ref: ${{ github.event.inputs.enginesHash }} - - uses: cachix/install-nix-action@v24 + + - uses: ./.github/workflows/include/rust-wasm-setup - name: Build - run: nix build .#prisma-schema-wasm + run: make build-schema-wasm PROFILE=release SCHEMA_WASM_VERSION=${{ github.event.inputs.enginesWrapperVersion }} - uses: actions/setup-node@v4 with: @@ -45,11 +46,9 @@ jobs: - name: Set up NPM token for publishing later run: echo "//registry.npmjs.org/:_authToken=${{ secrets.NPM_TOKEN }}" > ~/.npmrc - - name: Update version in package.json & Publish @prisma/prisma-schema-wasm - run: | - # Update version in package.json and return directory for later usage - PACKAGE_DIR=$( nix run .#renderPrismaSchemaWasmPackage ${{ github.event.inputs.enginesWrapperVersion }}) - npm publish "$PACKAGE_DIR" --access public --tag ${{ github.event.inputs.npmDistTag }} + - name: Publish @prisma/prisma-schema-wasm + run: npm publish --access public --tag ${{ github.event.inputs.npmDistTag }} + working-directory: target/prisma-schema-wasm # # Failure handlers # diff --git a/.github/workflows/publish-query-engine-wasm.yml b/.github/workflows/publish-query-engine-wasm.yml index 608876d56fd0..41d5d8611b15 100644 --- a/.github/workflows/publish-query-engine-wasm.yml +++ b/.github/workflows/publish-query-engine-wasm.yml @@ -9,14 +9,14 @@ on: inputs: packageVersion: required: true - description: 'New @prisma/query-engine-wasm package version' + description: "New @prisma/query-engine-wasm package version" enginesHash: required: true - description: 'prisma-engines commit to build' + description: "prisma-engines commit to build" npmDistTag: required: true - default: 'latest' - description: 'npm dist-tag (e.g. latest or integration)' + default: "latest" + description: "npm dist-tag (e.g. latest or integration)" jobs: build: @@ -30,22 +30,24 @@ jobs: with: ref: ${{ github.event.inputs.enginesHash }} - - uses: cachix/install-nix-action@v24 + - uses: ./.github/workflows/include/rust-wasm-setup - name: Build @prisma/query-engine-wasm - run: nix run .#export-query-engine-wasm "${{ github.event.inputs.packageVersion }}" package + run: make build-qe-wasm + env: + QE_WASM_VERSION: ${{ github.event.inputs.packageVersion }} - name: Install Node.js uses: actions/setup-node@v4 with: - node-version: '20.x' + node-version: "20.x" - name: Set up NPM token for publishing run: echo "//registry.npmjs.org/:_authToken=${{ secrets.NPM_TOKEN }}" > ~/.npmrc - name: Publish @prisma/query-engine-wasm run: npm publish --access public --tag ${{ github.event.inputs.npmDistTag }} - working-directory: package + working-directory: query-engine/query-engine-wasm/pkg # # Failure handlers @@ -57,7 +59,7 @@ jobs: if: ${{ failure() }} uses: rtCamp/action-slack-notify@v2.2.1 env: - SLACK_TITLE: 'Building and publishing @prisma/query-engine-wasm failed :x:' - SLACK_COLOR: '#FF0000' + SLACK_TITLE: "Building and publishing @prisma/query-engine-wasm failed :x:" + SLACK_COLOR: "#FF0000" SLACK_CHANNEL: feed-prisma-query-engine-wasm-publish-failures SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_WASM_FAILING }} diff --git a/.github/workflows/test-query-engine-driver-adapters.yml b/.github/workflows/test-query-engine-driver-adapters.yml index 44761da11f77..6362525c053b 100644 --- a/.github/workflows/test-query-engine-driver-adapters.yml +++ b/.github/workflows/test-query-engine-driver-adapters.yml @@ -97,7 +97,7 @@ jobs: echo "DRIVER_ADAPTERS_BRANCH=$branch" >> "$GITHUB_ENV" fi - - uses: cachix/install-nix-action@v24 + - uses: ./.github/workflows/include/rust-wasm-setup - uses: taiki-e/install-action@nextest - run: make ${{ matrix.adapter.setup_task }} diff --git a/.github/workflows/wasm-benchmarks.yml b/.github/workflows/wasm-benchmarks.yml index 05e170b24520..d0630ff90108 100644 --- a/.github/workflows/wasm-benchmarks.yml +++ b/.github/workflows/wasm-benchmarks.yml @@ -24,6 +24,8 @@ jobs: with: ref: ${{ github.event.pull_request.head.sha }} + - uses: ./.github/workflows/include/rust-wasm-setup + - name: "Setup Node.js" uses: actions/setup-node@v4 @@ -32,9 +34,6 @@ jobs: with: version: 8 - - name: Install bc - run: sudo apt update && sudo apt-get install -y bc - - name: "Login to Docker Hub" uses: docker/login-action@v3 continue-on-error: true @@ -54,7 +53,6 @@ jobs: echo "DRIVER_ADAPTERS_BRANCH=$branch" >> "$GITHUB_ENV" fi - - uses: cachix/install-nix-action@v24 - name: Setup benchmark run: make setup-pg-bench diff --git a/.github/workflows/wasm-size.yml b/.github/workflows/wasm-size.yml index 42d115c4f17e..bfabe08e84ef 100644 --- a/.github/workflows/wasm-size.yml +++ b/.github/workflows/wasm-size.yml @@ -29,22 +29,13 @@ jobs: - name: Checkout PR branch uses: actions/checkout@v4 - - uses: cachix/install-nix-action@v24 - with: - # we need internet access for the moment - extra_nix_config: | - sandbox = false + - uses: ./.github/workflows/include/rust-wasm-setup - name: Build and measure PR branch id: measure run: | - nix build -L .#query-engine-wasm-gz - - for provider in "postgresql" "mysql" "sqlite"; do - echo "${provider}_size=$(wc --bytes < ./result/query-engine-$provider.wasm)" >> $GITHUB_OUTPUT - echo "${provider}_size_gz=$(wc --bytes < ./result/query-engine-$provider.wasm.gz)" >> $GITHUB_OUTPUT - done - + export ENGINE_SIZE_OUTPUT=$GITHUB_OUTPUT + make measure-qe-wasm base-wasm-size: name: calculate module sizes (base branch) @@ -59,23 +50,16 @@ jobs: steps: - name: Checkout base branch uses: actions/checkout@v4 - with: - ref: ${{ github.event.pull_request.base.sha }} + # with: + # ref: ${{ github.event.pull_request.base.sha }} - - uses: cachix/install-nix-action@v24 - with: - extra_nix_config: | - sandbox = false + - uses: ./.github/workflows/include/rust-wasm-setup - name: Build and measure base branch id: measure run: | - nix build -L .#query-engine-wasm-gz - - for provider in "postgresql" "mysql" "sqlite"; do - echo "${provider}_size=$(wc --bytes < ./result/query-engine-$provider.wasm)" >> $GITHUB_OUTPUT - echo "${provider}_size_gz=$(wc --bytes < ./result/query-engine-$provider.wasm.gz)" >> $GITHUB_OUTPUT - done + export ENGINE_SIZE_OUTPUT=$GITHUB_OUTPUT + make measure-qe-wasm report-diff: name: report module size diff --git a/Makefile b/Makefile index ace0423a7691..e55627f7a436 100644 --- a/Makefile +++ b/Makefile @@ -1,12 +1,13 @@ +REPO_ROOT := $(shell git rev-parse --show-toplevel) + CONFIG_PATH = ./query-engine/connector-test-kit-rs/test-configs CONFIG_FILE = .test_config SCHEMA_EXAMPLES_PATH = ./query-engine/example_schemas DEV_SCHEMA_FILE = dev_datamodel.prisma DRIVER_ADAPTERS_BRANCH ?= main - -ifndef DISABLE_NIX -NIX := $(shell type nix 2> /dev/null) -endif +ENGINE_SIZE_OUTPUT ?= /dev/stdout +QE_WASM_VERSION ?= 0.0.0 +SCHEMA_WASM_VERSION ?= 0.0.0 LIBRARY_EXT := $(shell \ case "$$(uname -s)" in \ @@ -19,6 +20,20 @@ PROFILE ?= dev default: build +############### +# clean tasks # +############### + +clean-qe-wasm: + @echo "Cleaning query-engine/query-engine-wasm/pkg" && \ + cd query-engine/query-engine-wasm/pkg && find . ! -name '.' ! -name '..' ! -name 'README.md' -exec rm -rf {} + + +clean-cargo: + @echo "Cleaning cargo" && \ + cargo clean + +clean: clean-qe-wasm clean-cargo + ################### # script wrappers # ################### @@ -39,6 +54,29 @@ build: build-qe: cargo build --package query-engine +build-qe-napi: + cargo build --package query-engine-node-api --profile $(PROFILE) + +build-qe-wasm: + cd query-engine/query-engine-wasm && \ + ./build.sh $(QE_WASM_VERSION) query-engine/query-engine-wasm/pkg + +build-qe-wasm-gz: build-qe-wasm + @cd query-engine/query-engine-wasm/pkg && \ + for provider in postgresql mysql sqlite; do \ + tar -zcvf $$provider.gz $$provider; \ + done; + +build-schema-wasm: + @printf '%s\n' "🛠️ Building the Rust crate" + cargo build --profile $(PROFILE) --target=wasm32-unknown-unknown -p prisma-schema-build + + @printf '\n%s\n' "📦 Creating the npm package" + WASM_BUILD_PROFILE=$(PROFILE) \ + NPM_PACKAGE_VERSION=$(SCHEMA_WASM_VERSION) \ + out="$(REPO_ROOT)/target/prisma-schema-wasm" \ + ./prisma-schema-wasm/scripts/install.sh + # Emulate pedantic CI compilation. pedantic: RUSTFLAGS="-D warnings" cargo fmt -- --check && RUSTFLAGS="-D warnings" cargo clippy --all-targets @@ -77,6 +115,11 @@ test-qe-verbose-st: test-qe-black-box: build-qe cargo test --package black-box-tests -- --test-threads 1 +check-schema-wasm-package: build-schema-wasm + PRISMA_SCHEMA_WASM="$(REPO_ROOT)/target/prisma-schema-wasm" \ + out=$(shell mktemp -d) \ + NODE=$(shell which node) \ + ./prisma-schema-wasm/scripts/check.sh ########################### # Database setup commands # @@ -326,21 +369,12 @@ test-driver-adapter-planetscale-wasm: test-planetscale-wasm # Local dev commands # ###################### -build-qe-napi: - cargo build --package query-engine-node-api --profile $(PROFILE) - -build-qe-wasm: -ifdef NIX - @echo "Building wasm engine on nix" - rm -rf query-engine/query-engine-wasm/pkg - nix run .#export-query-engine-wasm 0.0.0 query-engine/query-engine-wasm/pkg -else - cd query-engine/query-engine-wasm && ./build.sh 0.0.0 query-engine/query-engine-wasm/pkg -endif - -measure-qe-wasm: build-qe-wasm +measure-qe-wasm: build-qe-wasm-gz @cd query-engine/query-engine-wasm/pkg; \ - gzip -k -c query_engine_bg.wasm | wc -c | awk '{$$1/=(1024*1024); printf "Current wasm query-engine size compressed: %.3fMB\n", $$1}' + for provider in postgresql mysql sqlite; do \ + echo "$${provider}_size=$$(cat $$provider/* | wc -c | tr -d ' ')" >> $(ENGINE_SIZE_OUTPUT); \ + echo "$${provider}_size_gz=$$(cat $$provider.gz | wc -c | tr -d ' ')" >> $(ENGINE_SIZE_OUTPUT); \ + done; build-driver-adapters-kit: build-driver-adapters cd query-engine/driver-adapters && pnpm i && pnpm build diff --git a/flake.nix b/flake.nix index e62a09803d3d..a76ab2edbea8 100644 --- a/flake.nix +++ b/flake.nix @@ -31,16 +31,9 @@ perSystem = { config, system, pkgs, craneLib, ... }: { config._module.args.flakeInputs = inputs; imports = [ - ./nix/all-engines.nix ./nix/args.nix - ./nix/cargo-doc.nix - ./nix/cli-shell.nix - ./nix/cli-prisma.nix - ./nix/dev-vm.nix - ./nix/memory-profiling.nix - ./nix/prisma-schema-wasm.nix - ./nix/publish-engine-size.nix ./nix/shell.nix + ./nix/publish-engine-size.nix ]; }; }; diff --git a/nix/README.md b/nix/README.md new file mode 100644 index 000000000000..8f95176511b7 --- /dev/null +++ b/nix/README.md @@ -0,0 +1,5 @@ +This directory contains a nix shell that is convenient to streamline developement, however, +contributors must not require to depend on nix for any specific workflow. + +Instead, automation should be provided in a combination of bash scripts and docker, exposed over +tasks in the [root's Makefile](/Makefile) diff --git a/nix/all-engines.nix b/nix/all-engines.nix deleted file mode 100644 index b509d67229ad..000000000000 --- a/nix/all-engines.nix +++ /dev/null @@ -1,170 +0,0 @@ -{ pkgs, flakeInputs, lib, self', rustToolchain, ... }: - -let - stdenv = pkgs.clangStdenv; - srcPath = ../.; - srcFilter = flakeInputs.gitignore.lib.gitignoreFilterWith { - basePath = srcPath; - extraRules = '' - /nix - /flake.* - ''; - }; - src = lib.cleanSourceWith { - filter = srcFilter; - src = srcPath; - name = "prisma-engines-source"; - }; - craneLib = (flakeInputs.crane.mkLib pkgs).overrideToolchain rustToolchain; - deps = craneLib.vendorCargoDeps { inherit src; }; - libSuffix = stdenv.hostPlatform.extensions.sharedLibrary; -in -{ - packages.prisma-engines = stdenv.mkDerivation { - name = "prisma-engines"; - inherit src; - - buildInputs = [ pkgs.openssl.out ]; - nativeBuildInputs = with pkgs; [ - rustToolchain - git # for our build scripts that bake in the git hash - protobuf # for tonic - openssl.dev - pkg-config - ] ++ lib.optionals stdenv.isDarwin [ - perl # required to build openssl - darwin.apple_sdk.frameworks.Security - iconv - ]; - - configurePhase = '' - mkdir .cargo - ln -s ${deps}/config.toml .cargo/config.toml - ''; - - buildPhase = '' - cargo build --release --bins - cargo build --release -p query-engine-node-api - ''; - - installPhase = '' - mkdir -p $out/bin $out/lib - cp target/release/query-engine $out/bin/ - cp target/release/schema-engine $out/bin/ - cp target/release/prisma-fmt $out/bin/ - cp target/release/libquery_engine${libSuffix} $out/lib/libquery_engine.node - ''; - - dontStrip = true; - }; - - packages.test-cli = lib.makeOverridable - ({ profile }: stdenv.mkDerivation { - name = "test-cli"; - inherit src; - inherit (self'.packages.prisma-engines) buildInputs nativeBuildInputs configurePhase dontStrip; - - buildPhase = "cargo build --profile=${profile} --bin=test-cli"; - - installPhase = '' - set -eu - mkdir -p $out/bin - QE_PATH=$(find target -name 'test-cli') - cp $QE_PATH $out/bin - ''; - }) - { profile = "release"; }; - - packages.query-engine-bin = lib.makeOverridable - ({ profile }: stdenv.mkDerivation { - name = "query-engine-bin"; - inherit src; - inherit (self'.packages.prisma-engines) buildInputs nativeBuildInputs configurePhase dontStrip; - - buildPhase = "cargo build --profile=${profile} --bin=query-engine"; - - installPhase = '' - set -eu - mkdir -p $out/bin - QE_PATH=$(find target -name 'query-engine') - cp $QE_PATH $out/bin - ''; - }) - { profile = "release"; }; - - # TODO: try to make caching and sharing the build artifacts work with crane. There should be - # separate `query-engine-lib` and `query-engine-bin` derivations instead, but we use this for now - # to make the CI job that uses it faster. - packages.query-engine-bin-and-lib = lib.makeOverridable - ({ profile }: stdenv.mkDerivation { - name = "query-engine-bin-and-lib"; - inherit src; - inherit (self'.packages.prisma-engines) buildInputs nativeBuildInputs configurePhase dontStrip; - - buildPhase = '' - cargo build --profile=${profile} --bin=query-engine - cargo build --profile=${profile} -p query-engine-node-api - ''; - - installPhase = '' - set -eu - mkdir -p $out/bin $out/lib - cp target/${profile}/query-engine $out/bin/query-engine - cp target/${profile}/libquery_engine${libSuffix} $out/lib/libquery_engine.node - ''; - }) - { profile = "release"; }; - - packages.build-engine-wasm = pkgs.writeShellApplication { - name = "build-engine-wasm"; - runtimeInputs = with pkgs; [ git rustup wasm-bindgen-cli binaryen jq iconv ]; - text = '' - cd query-engine/query-engine-wasm - WASM_BUILD_PROFILE=release ./build.sh "$1" "$2" - ''; - }; - - packages.query-engine-wasm-gz = lib.makeOverridable - ({ profile }: stdenv.mkDerivation { - name = "query-engine-wasm-gz"; - inherit src; - buildInputs = with pkgs; [ iconv ]; - - buildPhase = '' - export HOME=$(mktemp -dt wasm-engine-home-XXXX) - - OUT_FOLDER=$(mktemp -dt wasm-engine-out-XXXX) - ${self'.packages.build-engine-wasm}/bin/build-engine-wasm "0.0.0" "$OUT_FOLDER" - - for provider in "postgresql" "mysql" "sqlite"; do - gzip -ckn "$OUT_FOLDER/$provider/query_engine_bg.wasm" > "query-engine-$provider.wasm.gz" - done - ''; - - installPhase = '' - set +x - mkdir -p $out - for provider in "postgresql" "mysql" "sqlite"; do - cp "$OUT_FOLDER/$provider/query_engine_bg.wasm" "$out/query-engine-$provider.wasm" - cp "query-engine-$provider.wasm.gz" "$out/" - done - ''; - }) - { profile = "release"; }; - - packages.export-query-engine-wasm = - pkgs.writeShellApplication { - name = "export-query-engine-wasm"; - runtimeInputs = with pkgs; [ jq ]; - text = '' - OUT_VERSION="$1" - OUT_FOLDER="$2" - - mkdir -p "$OUT_FOLDER" - ${self'.packages.build-engine-wasm}/bin/build-engine-wasm "$OUT_VERSION" "$OUT_FOLDER" - chmod -R +rw "$OUT_FOLDER" - mv "$OUT_FOLDER/package.json" "$OUT_FOLDER/package.json.bak" - jq --arg new_version "$OUT_VERSION" '.version = $new_version' "$OUT_FOLDER/package.json.bak" > "$OUT_FOLDER/package.json" - ''; - }; -} diff --git a/nix/cargo-doc.nix b/nix/cargo-doc.nix deleted file mode 100644 index 5e37148ef030..000000000000 --- a/nix/cargo-doc.nix +++ /dev/null @@ -1,46 +0,0 @@ -{ pkgs, self', ... }: - -{ - packages.cargo-docs = pkgs.clangStdenv.mkDerivation { - name = "prisma-engines-cargo-docs"; - inherit (self'.packages.prisma-engines) buildInputs nativeBuildInputs src configurePhase; - - buildPhase = "cargo doc --workspace"; - - installPhase = '' - mkdir -p $out/share - mv target/doc/ $out/share/docs - ''; - }; - - packages.publish-cargo-docs = pkgs.writeShellApplication { - name = "publish-cargo-docs"; - text = '' - set -euxo pipefail - - if ! git diff --exit-code 1> /dev/null; then - : "The workspace is not clean. Please commit or reset, then try again". - exit 1 - fi - - CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD) - CURRENT_COMMIT=$(git rev-parse --short HEAD) - REPO_ROOT=$(git rev-parse --show-toplevel) - - pushd "$REPO_ROOT" - git fetch --depth=1 origin gh-pages - git checkout origin/gh-pages - rm -rf ./doc - cp \ - --recursive \ - --no-preserve=mode,ownership \ - ${self'.packages.cargo-docs}/share/docs \ - ./doc - git add doc - git commit --quiet -m "cargo docs for $CURRENT_COMMIT" - git push origin '+HEAD:gh-pages' - git checkout "$CURRENT_BRANCH" - popd - ''; - }; -} diff --git a/nix/cli-prisma.nix b/nix/cli-prisma.nix deleted file mode 100644 index fcbc6756ee95..000000000000 --- a/nix/cli-prisma.nix +++ /dev/null @@ -1,44 +0,0 @@ -{ config, pkgs, self', ... }: - -# This is an impure build for prisma/prisma. We need this because of the way we -# package `prisma-schema-wasm` and the fact that there's no `pnpm2nix`. -# See https://zimbatm.com/notes/nix-packaging-the-heretic-way for more details -# on impure builds. -let - schema-wasm = self'.packages.prisma-schema-wasm; - version = "4.11.0"; -in -{ - packages.cli-prisma = pkgs.runCommand "prisma-cli-${version}" - { - # Disable the Nix build sandbox for this specific build. - # This means the build can freely talk to the Internet. - __noChroot = true; - - buildInputs = [ - pkgs.nodejs - ]; - } - '' - # NIX sets this to something that doesn't exist for purity reasons. - export HOME=$(mktemp -d) - - # Install prisma locally, and impurely. - npm install prisma@${version} - - # Fix shebang scripts recursively. - patchShebangs . - - # Remove prisma-fmt and copy it over from our local build. - rm node_modules/prisma/build/prisma_schema_build_bg.wasm - cp ${schema-wasm}/src/prisma_schema_build_bg.wasm node_modules/prisma/build/prisma_schema_build_bg.wasm - - # Copy node_modules and everything else. - mkdir -p $out/share - cp -r . $out/share/$name - - # Add a symlink to the binary. - mkdir $out/bin - ln -s $out/share/$name/node_modules/.bin/prisma $out/bin/prisma - ''; -} diff --git a/nix/cli-shell.nix b/nix/cli-shell.nix deleted file mode 100644 index 4c98c8078ec5..000000000000 --- a/nix/cli-shell.nix +++ /dev/null @@ -1,22 +0,0 @@ -{ config, pkgs, self', ... }: - -# Run it with -# > nix develop --no-sandbox .#cli-shell -let - engines = self'.packages.prisma-engines; - prisma = self'.packages.cli-prisma; -in -{ - devShells.cli-shell = pkgs.mkShell { - packages = [ pkgs.cowsay pkgs.nodejs engines prisma ]; - shellHook = '' - cowsay -f turtle "Run prisma by just typing 'prisma ', e.g. 'prisma --version'" - - export PRISMA_SCHEMA_ENGINE_BINARY=${engines}/bin/schema-engine - export PRISMA_QUERY_ENGINE_BINARY=${engines}/bin/query-engine - export PRISMA_QUERY_ENGINE_LIBRARY=${engines}/lib/libquery_engine.node - # Does this even do anything anymore? - export PRISMA_FMT_BINARY=${engines}/bin/prisma-fmt - ''; - }; -} diff --git a/nix/dev-vm.nix b/nix/dev-vm.nix deleted file mode 100644 index f228488a24ff..000000000000 --- a/nix/dev-vm.nix +++ /dev/null @@ -1,98 +0,0 @@ -{ pkgs, flakeInputs, system, self', ... }: - -# Run qemu with a disk image containing prisma-engines repo, docker and all the -# packages to build and test engines. -# -# This is useful for testing engines with e.g. artificial memory or cpu limits. -# -# You can run it using: -# -# ``` -# $ nix run .#dev-vm -# ``` -# -# This will boot the VM and create a nixos.qcow2 VM image file, or reuse it if -# it is already there. -# -# You can pass extra arguments to the qemu command line, they will be forwarded -# (see --help for example). That lets you easily constrain the VM's resources -# (CPU, RAM, network, disk IO), among other things. -# -# The recommended way to interact with the vm is through SSH. It listens on -# 2222 on the host's localhost: -# -# ``` -# $ ssh prisma@localhost -p 2222 -# ``` -# -# Both the username and password are "prisma". -# -# Links: -# - https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/virtualisation/qemu-vm.nix -let - evalConfig = import "${flakeInputs.nixpkgs}/nixos/lib/eval-config.nix"; - prisma-engines = self'.packages.prisma-engines; - prisma-engines-inputs = prisma-engines.buildInputs ++ prisma-engines.nativeBuildInputs; - vmConfig = (evalConfig { - modules = [ - { - system.stateVersion = "23.05"; - virtualisation.docker.enable = true; - - virtualisation.vmVariant.virtualisation = { - diskSize = 1024 * 8; # 8GB - forwardPorts = [ - { - from = "host"; - host.port = 2222; - guest.port = 22; - } - ]; - writableStore = true; - writableStoreUseTmpfs = false; - sharedDirectories.prisma-engines = { - source = "${prisma-engines.src}"; - target = "/home/prisma/prisma-engines"; - }; - }; - - # Enable flakes in the host vm - nix = { - # package = pkgs.nixUnstable; - extraOptions = "experimental-features = nix-command flakes"; - }; - - environment.systemPackages = with pkgs; [ - tmux - git - coreutils - gnumake - ] ++ prisma-engines-inputs; - - services.openssh = { - listenAddresses = [{ - addr = "0.0.0.0"; - port = 22; - }]; - enable = true; - settings.PasswordAuthentication = true; - }; - - users.users.prisma = { - isNormalUser = true; - extraGroups = [ - "docker" - "wheel" # Enable ‘sudo’ for the user. - ]; - password = "prisma"; - }; - - } - ]; - system = "x86_64-linux"; - } - ).config; -in -{ - packages.dev-vm = vmConfig.system.build.vm; -} diff --git a/nix/memory-profiling.nix b/nix/memory-profiling.nix deleted file mode 100644 index 693fb108d2e1..000000000000 --- a/nix/memory-profiling.nix +++ /dev/null @@ -1,50 +0,0 @@ -{ pkgs, self', ... }: - -let - # A convenience package to open the DHAT Visualizer - # (https://valgrind.org/docs/manual/dh-manual.html) in a browser. - dhat-viewer = pkgs.writeShellScriptBin "dhat-viewer" '' - xdg-open ${valgrind}/libexec/valgrind/dh_view.html - ''; - - # DHAT (https://valgrind.org/docs/manual/dh-manual.html) and Massif - # (https://valgrind.org/docs/manual/ms-manual.html#ms-manual.overview) - # profiles for schema-builder::build() with the odoo.prisma example schema. - # This is just the data, please read the docs of both tools to understand how - # to use that data. - # - # Usage example: - # - # $ nix build .#schema-builder-odoo-memory-profile - # $ nix run .#dhat-viewer - # $ : At this point your browser will open on the DHAT UI and you can - # $ : open the dhat-profile.json file in ./result. - schema-builder-odoo-memory-profile = stdenv.mkDerivation { - name = "schema-builder-odoo-memory-profile"; - inherit (self'.packages.prisma-engines) nativeBuildInputs configurePhase src; - buildInputs = self'.packages.prisma-engines.buildInputs ++ [ valgrind ]; - - buildPhase = '' - cargo build --profile=release --example schema_builder_build_odoo - valgrind --tool=dhat --dhat-out-file=dhat-profile.json \ - ./target/release/examples/schema_builder_build_odoo - valgrind --tool=massif --massif-out-file=massif-profile \ - ./target/release/examples/schema_builder_build_odoo - ''; - - installPhase = '' - mkdir $out - mv dhat-profile.json massif-profile $out/ - ''; - }; - - # Valgrind is not available on all platforms. We substitute the memory - # profiling derivations with an error in these scenarios. - wrongSystem = runCommand "wrongSystem" { } "echo 'Not available on this system'; exit 1"; - - inherit (pkgs) stdenv runCommand valgrind; -in -{ - packages.dhat-viewer = if stdenv.isLinux then dhat-viewer else wrongSystem; - packages.schema-builder-odoo-memory-profile = if stdenv.isLinux then schema-builder-odoo-memory-profile else wrongSystem; -} diff --git a/nix/prisma-schema-wasm.nix b/nix/prisma-schema-wasm.nix deleted file mode 100644 index 602e59b48ea5..000000000000 --- a/nix/prisma-schema-wasm.nix +++ /dev/null @@ -1,55 +0,0 @@ -{ pkgs, lib, self', ... }: - -let - toolchain = pkgs.rust-bin.fromRustupToolchainFile ../prisma-schema-wasm/rust-toolchain.toml; - scriptsDir = ../prisma-schema-wasm/scripts; - inherit (pkgs) jq nodejs coreutils wasm-bindgen-cli stdenv; - inherit (builtins) readFile replaceStrings; -in -{ - packages.prisma-schema-wasm = lib.makeOverridable - ({ profile }: stdenv.mkDerivation { - name = "prisma-schema-wasm"; - nativeBuildInputs = with pkgs; [ git wasm-bindgen-cli toolchain ]; - inherit (self'.packages.prisma-engines) configurePhase src; - - buildPhase = "cargo build --profile=${profile} --target=wasm32-unknown-unknown -p prisma-schema-build"; - installPhase = readFile "${scriptsDir}/install.sh"; - - WASM_BUILD_PROFILE = profile; - - passthru = { - dev = self'.packages.prisma-schema-wasm.override { profile = "dev"; }; - }; - }) - { profile = "release"; }; - - # Takes a package version as its single argument, and produces - # prisma-schema-wasm with the right package.json in a temporary directory, - # then prints the directory's path. This is used by the publish pipeline in CI. - packages.renderPrismaSchemaWasmPackage = - pkgs.writeShellApplication { - name = "renderPrismaSchemaWasmPackage"; - runtimeInputs = [ jq ]; - text = '' - set -euxo pipefail - - PACKAGE_DIR=$(mktemp -d) - cp -r --no-target-directory ${self'.packages.prisma-schema-wasm} "$PACKAGE_DIR" - rm -f "$PACKAGE_DIR/package.json" - jq ".version = \"$1\"" ${self'.packages.prisma-schema-wasm}/package.json > "$PACKAGE_DIR/package.json" - echo "$PACKAGE_DIR" - ''; - }; - - packages.syncWasmBindgenVersions = let template = readFile "${scriptsDir}/syncWasmBindgenVersions.sh"; in - pkgs.writeShellApplication { - name = "syncWasmBindgenVersions"; - runtimeInputs = [ coreutils toolchain ]; - text = replaceStrings [ "$WASM_BINDGEN_VERSION" ] [ wasm-bindgen-cli.version ] template; - }; - - checks.prismaSchemaWasmE2E = pkgs.runCommand "prismaSchemaWasmE2E" - { PRISMA_SCHEMA_WASM = self'.packages.prisma-schema-wasm; NODE = "${nodejs}/bin/node"; } - (readFile "${scriptsDir}/check.sh"); -} diff --git a/nix/publish-engine-size.nix b/nix/publish-engine-size.nix index b49c093d9a31..11a63d7de7e8 100644 --- a/nix/publish-engine-size.nix +++ b/nix/publish-engine-size.nix @@ -1,7 +1,178 @@ -{ pkgs, self', ... }: +/* +* Deprecated: This file is deprecated and will be removed soon. +* See https://github.com/prisma/team-orm/issues/943 +*/ +{ pkgs, flakeInputs, lib, self', rustToolchain, ... }: +let + stdenv = pkgs.clangStdenv; + srcPath = ../.; + srcFilter = flakeInputs.gitignore.lib.gitignoreFilterWith { + basePath = srcPath; + extraRules = '' + /nix + /flake.* + ''; + }; + src = lib.cleanSourceWith { + filter = srcFilter; + src = srcPath; + name = "prisma-engines-source"; + }; + craneLib = (flakeInputs.crane.mkLib pkgs).overrideToolchain rustToolchain; + deps = craneLib.vendorCargoDeps { inherit src; }; + libSuffix = stdenv.hostPlatform.extensions.sharedLibrary; +in { - /* Publish the size of the Query Engine binary and library to the CSV file + packages.prisma-engines = stdenv.mkDerivation { + name = "prisma-engines"; + inherit src; + + buildInputs = [ pkgs.openssl.out ]; + nativeBuildInputs = with pkgs; [ + rustToolchain + git # for our build scripts that bake in the git hash + protobuf # for tonic + openssl.dev + pkg-config + ] ++ lib.optionals stdenv.isDarwin [ + perl # required to build openssl + darwin.apple_sdk.frameworks.Security + iconv + ]; + + configurePhase = '' + mkdir .cargo + ln -s ${deps}/config.toml .cargo/config.toml + ''; + + buildPhase = '' + cargo build --release --bins + cargo build --release -p query-engine-node-api + ''; + + installPhase = '' + mkdir -p $out/bin $out/lib + cp target/release/query-engine $out/bin/ + cp target/release/schema-engine $out/bin/ + cp target/release/prisma-fmt $out/bin/ + cp target/release/libquery_engine${libSuffix} $out/lib/libquery_engine.node + ''; + + dontStrip = true; + }; + + packages.test-cli = lib.makeOverridable + ({ profile }: stdenv.mkDerivation { + name = "test-cli"; + inherit src; + inherit (self'.packages.prisma-engines) buildInputs nativeBuildInputs configurePhase dontStrip; + + buildPhase = "cargo build --profile=${profile} --bin=test-cli"; + + installPhase = '' + set -eu + mkdir -p $out/bin + QE_PATH=$(find target -name 'test-cli') + cp $QE_PATH $out/bin + ''; + }) + { profile = "release"; }; + + packages.query-engine-bin = lib.makeOverridable + ({ profile }: stdenv.mkDerivation { + name = "query-engine-bin"; + inherit src; + inherit (self'.packages.prisma-engines) buildInputs nativeBuildInputs configurePhase dontStrip; + + buildPhase = "cargo build --profile=${profile} --bin=query-engine"; + + installPhase = '' + set -eu + mkdir -p $out/bin + QE_PATH=$(find target -name 'query-engine') + cp $QE_PATH $out/bin + ''; + }) + { profile = "release"; }; + + # TODO: try to make caching and sharing the build artifacts work with crane. There should be + # separate `query-engine-lib` and `query-engine-bin` derivations instead, but we use this for now + # to make the CI job that uses it faster. + packages.query-engine-bin-and-lib = lib.makeOverridable + ({ profile }: stdenv.mkDerivation { + name = "query-engine-bin-and-lib"; + inherit src; + inherit (self'.packages.prisma-engines) buildInputs nativeBuildInputs configurePhase dontStrip; + + buildPhase = '' + cargo build --profile=${profile} --bin=query-engine + cargo build --profile=${profile} -p query-engine-node-api + ''; + + installPhase = '' + set -eu + mkdir -p $out/bin $out/lib + cp target/${profile}/query-engine $out/bin/query-engine + cp target/${profile}/libquery_engine${libSuffix} $out/lib/libquery_engine.node + ''; + }) + { profile = "release"; }; + + packages.build-engine-wasm = pkgs.writeShellApplication { + name = "build-engine-wasm"; + runtimeInputs = with pkgs; [ git rustup wasm-bindgen-cli binaryen jq iconv ]; + text = '' + cd query-engine/query-engine-wasm + WASM_BUILD_PROFILE=release ./build.sh "$1" "$2" + ''; + }; + + packages.query-engine-wasm-gz = lib.makeOverridable + ({ profile }: stdenv.mkDerivation { + name = "query-engine-wasm-gz"; + inherit src; + buildInputs = with pkgs; [ iconv ]; + + buildPhase = '' + export HOME=$(mktemp -dt wasm-engine-home-XXXX) + + OUT_FOLDER=$(mktemp -dt wasm-engine-out-XXXX) + ${self'.packages.build-engine-wasm}/bin/build-engine-wasm "0.0.0" "$OUT_FOLDER" + + for provider in "postgresql" "mysql" "sqlite"; do + gzip -ckn "$OUT_FOLDER/$provider/query_engine_bg.wasm" > "query-engine-$provider.wasm.gz" + done + ''; + + installPhase = '' + set +x + mkdir -p $out + for provider in "postgresql" "mysql" "sqlite"; do + cp "$OUT_FOLDER/$provider/query_engine_bg.wasm" "$out/query-engine-$provider.wasm" + cp "query-engine-$provider.wasm.gz" "$out/" + done + ''; + }) + { profile = "release"; }; + + packages.export-query-engine-wasm = + pkgs.writeShellApplication { + name = "export-query-engine-wasm"; + runtimeInputs = with pkgs; [ jq ]; + text = '' + OUT_VERSION="$1" + OUT_FOLDER="$2" + + mkdir -p "$OUT_FOLDER" + ${self'.packages.build-engine-wasm}/bin/build-engine-wasm "$OUT_VERSION" "$OUT_FOLDER" + chmod -R +rw "$OUT_FOLDER" + mv "$OUT_FOLDER/package.json" "$OUT_FOLDER/package.json.bak" + jq --arg new_version "$OUT_VERSION" '.version = $new_version' "$OUT_FOLDER/package.json.bak" > "$OUT_FOLDER/package.json" + ''; + }; + + /* Publish the size of the Query Engine binary and library to the CSV file in the `gh-pages` branch of the repository. Data: https://github.com/prisma/prisma-engines/blob/gh-pages/engines-size/data.csv diff --git a/prisma-schema-wasm/scripts/install.sh b/prisma-schema-wasm/scripts/install.sh index 992dbd1ac380..aafc335956d7 100755 --- a/prisma-schema-wasm/scripts/install.sh +++ b/prisma-schema-wasm/scripts/install.sh @@ -6,6 +6,10 @@ if [[ -z "${WASM_BUILD_PROFILE:-}" ]]; then WASM_BUILD_PROFILE="release" fi +if [[ -z "${NPM_PACKAGE_VERSION:-}" ]]; then + NPM_PACKAGE_VERSION="0.0.0" +fi + if [[ $WASM_BUILD_PROFILE == "dev" ]]; then TARGET_DIR="debug" else @@ -18,8 +22,8 @@ printf '%s\n' " -> Creating out dir..." # shellcheck disable=SC2154 mkdir -p "$out"/src -printf '%s\n' " -> Copying package.json" -cp ./prisma-schema-wasm/package.json "$out"/ +printf '%s\n' " -> Copying package.json and updating version to $NPM_PACKAGE_VERSION" +jq ".version = \"$NPM_PACKAGE_VERSION\"" ./prisma-schema-wasm/package.json > "$out/package.json" printf '%s\n' " -> Copying README.md" cp ./prisma-schema-wasm/README.md "$out"/ diff --git a/query-engine/query-engine-wasm/build.sh b/query-engine/query-engine-wasm/build.sh index c3f129bb276b..0db1aad5bf08 100755 --- a/query-engine/query-engine-wasm/build.sh +++ b/query-engine/query-engine-wasm/build.sh @@ -34,7 +34,6 @@ echo "ℹ️ target version: $OUT_VERSION" echo "ℹ️ out folder: $OUT_FOLDER" if [[ -z "${WASM_BUILD_PROFILE:-}" ]]; then - # use `wasm-pack build --release` by default on CI only if [[ -z "${BUILDKITE:-}" ]] && [[ -z "${GITHUB_ACTIONS:-}" ]]; then WASM_BUILD_PROFILE="dev" else @@ -45,22 +44,23 @@ fi if [ "$WASM_BUILD_PROFILE" = "dev" ]; then WASM_TARGET_SUBDIR="debug" else - WASM_TARGET_SUBDIR="release" + WASM_TARGET_SUBDIR="$WASM_BUILD_PROFILE" fi -echo "Using build profile: \"${WASM_BUILD_PROFILE}\"" -echo "ℹ️ Configuring rust toolchain to use nightly and rust-src component" -rustup default nightly-2024-01-25 -rustup target add wasm32-unknown-unknown -rustup component add rust-src --target wasm32-unknown-unknown -export RUSTFLAGS="-Zlocation-detail=none" -CARGO_TARGET_DIR=$(cargo metadata --format-version 1 | jq -r .target_directory) build() { + echo "ℹ️ Configuring rust toolchain to use nightly and rust-src component" + rustup default nightly-2024-01-25 + rustup target add wasm32-unknown-unknown + rustup component add rust-std --target wasm32-unknown-unknown + rustup component add rust-src --target wasm32-unknown-unknown + local CONNECTOR="$1" - echo "🔨 Building $CONNECTOR" - CARGO_PROFILE_RELEASE_OPT_LEVEL="z" cargo build \ + local CARGO_TARGET_DIR + CARGO_TARGET_DIR=$(cargo metadata --format-version 1 | jq -r .target_directory) + echo "🔨 Building $CONNECTOR" + RUSTFLAGS="-Zlocation-detail=none" CARGO_PROFILE_RELEASE_OPT_LEVEL="z" cargo build \ -p query-engine-wasm \ --profile "$WASM_BUILD_PROFILE" \ --features "$CONNECTOR" \ From 7efa9cb6a66a6343e70ddbb91b41924713533029 Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Tue, 13 Feb 2024 13:48:01 +0100 Subject: [PATCH 09/25] qe: reuse lateral joins for relation aggregations (#4693) The initial implementation always adds LEFT JOINs for relation aggregations. This is not necessary if there is an existing JOIN for fetching the relation data with the same filters (which includes no filters in both cases). This PR optimizes the query by reusing the existing lateral joins that load relation data and aggregating the relation in the same subquery when possible. Ref: https://github.com/prisma/team-orm/issues/700 Closes: https://github.com/prisma/team-orm/issues/903 --- .../aggregation/many_count_relation.rs | 244 +++++++++++++++++- .../aggregation/uniq_count_relation.rs | 126 ++++++++- .../src/query_builder/select/lateral.rs | 100 +++++-- .../src/query_builder/select/mod.rs | 61 +++-- .../src/query_builder/select/subquery.rs | 21 +- .../query-structure/src/field_selection.rs | 12 +- 6 files changed, 521 insertions(+), 43 deletions(-) diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/aggregation/many_count_relation.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/aggregation/many_count_relation.rs index 54dbccefe7cc..312463f19b15 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/aggregation/many_count_relation.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/aggregation/many_count_relation.rs @@ -84,9 +84,9 @@ mod many_count_rel { Ok(()) } - // "Counting with some records and filters" should "not affect the count" + // Counting with cursor should not affect the count #[connector_test] - async fn count_with_filters(runner: Runner) -> TestResult<()> { + async fn count_with_cursor(runner: Runner) -> TestResult<()> { // 4 comment / 4 categories create_row( &runner, @@ -113,6 +113,128 @@ mod many_count_rel { Ok(()) } + // Counting with take should not affect the count + #[connector_test] + async fn count_with_take(runner: Runner) -> TestResult<()> { + // 4 comment / 4 categories + create_row( + &runner, + r#"{ + id: 1, + title: "a", + comments: { create: [{id: 1}, {id: 2}, {id: 3}, {id: 4}] }, + categories: { create: [{id: 1}, {id: 2}, {id: 3}, {id: 4}] } + }"#, + ) + .await?; + + insta::assert_snapshot!( + run_query!(&runner, r#"{ + findManyPost(where: { id: 1 }) { + comments(take: 1) { id } + categories(take: 1) { id } + _count { comments categories } + } + }"#), + @r###"{"data":{"findManyPost":[{"comments":[{"id":1}],"categories":[{"id":1}],"_count":{"comments":4,"categories":4}}]}}"### + ); + + Ok(()) + } + + // Counting with skip should not affect the count + #[connector_test] + async fn count_with_skip(runner: Runner) -> TestResult<()> { + // 4 comment / 4 categories + create_row( + &runner, + r#"{ + id: 1, + title: "a", + comments: { create: [{id: 1}, {id: 2}, {id: 3}, {id: 4}] }, + categories: { create: [{id: 1}, {id: 2}, {id: 3}, {id: 4}] } + }"#, + ) + .await?; + + insta::assert_snapshot!( + run_query!(&runner, r#"{ + findManyPost(where: { id: 1 }) { + comments(skip: 3) { id } + categories(skip: 3) { id } + _count { comments categories } + } + }"#), + @r###"{"data":{"findManyPost":[{"comments":[{"id":4}],"categories":[{"id":4}],"_count":{"comments":4,"categories":4}}]}}"### + ); + + Ok(()) + } + + // Counting with filters should not affect the count + #[connector_test] + async fn count_with_filters(runner: Runner) -> TestResult<()> { + // 4 comment / 4 categories + create_row( + &runner, + r#"{ + id: 1, + title: "a", + comments: { create: [{id: 1}, {id: 2}, {id: 3}, {id: 4}] }, + categories: { create: [{id: 1}, {id: 2}, {id: 3}, {id: 4}] } + }"#, + ) + .await?; + + insta::assert_snapshot!( + run_query!(&runner, r#"{ + findManyPost(where: { id: 1 }) { + comments(where: { id: 2 }) { id } + categories(where: { id: 2 }) { id } + _count { comments categories } + } + }"#), + @r###"{"data":{"findManyPost":[{"comments":[{"id":2}],"categories":[{"id":2}],"_count":{"comments":4,"categories":4}}]}}"### + ); + + Ok(()) + } + + // Counting with distinct should not affect the count + #[connector_test] + async fn count_with_distinct(runner: Runner) -> TestResult<()> { + create_row( + &runner, + r#"{ + id: 1, + title: "a", + categories: { create: { id: 1 } } + }"#, + ) + .await?; + create_row( + &runner, + r#"{ + id: 2, + title: "a", + categories: { connect: { id: 1 } } + }"#, + ) + .await?; + + insta::assert_snapshot!( + run_query!(&runner, r#"{ + findManyCategory { + posts(distinct: title) { id } + _count { posts } + } + }"#), + @r###"{"data":{"findManyCategory":[{"posts":[{"id":1}],"_count":{"posts":2}}]}}"### + ); + + Ok(()) + } + fn schema_nested() -> String { let schema = indoc! { r#"model User { @@ -214,6 +336,124 @@ mod many_count_rel { Ok(()) } + #[connector_test(schema(schema_nested))] + async fn nested_count_same_field_on_many_levels(runner: Runner) -> TestResult<()> { + run_query!( + runner, + r#" + mutation { + createOneUser( + data: { + id: 1, + name: "Author", + posts: { + create: [ + { + id: 1, + title: "good post", + comments: { + create: [ + { id: 1, body: "insightful!" }, + { id: 2, body: "deep lore uncovered" } + ] + } + }, + { + id: 2, + title: "boring post" + } + ] + } + } + ) { + id + } + } + "# + ); + + insta::assert_snapshot!( + run_query!( + runner, + r#" + query { + findManyPost { + comments { + post { + _count { comments } + } + } + _count { comments } + } + } + "# + ), + @r###"{"data":{"findManyPost":[{"comments":[{"post":{"_count":{"comments":2}}},{"post":{"_count":{"comments":2}}}],"_count":{"comments":2}},{"comments":[],"_count":{"comments":0}}]}}"### + ); + + insta::assert_snapshot!( + run_query!( + runner, + r#" + query { + findManyPost { + comments { + post { + comments { id } + _count { comments } + } + } + _count { comments } + } + } + "# + ), + @r###"{"data":{"findManyPost":[{"comments":[{"post":{"comments":[{"id":1},{"id":2}],"_count":{"comments":2}}},{"post":{"comments":[{"id":1},{"id":2}],"_count":{"comments":2}}}],"_count":{"comments":2}},{"comments":[],"_count":{"comments":0}}]}}"### + ); + + insta::assert_snapshot!( + run_query!( + runner, + r#" + query { + findManyPost { + comments { + post { + comments(where: { id: 1 }) { id } + _count { comments } + } + } + _count { comments } + } + } + "# + ), + @r###"{"data":{"findManyPost":[{"comments":[{"post":{"comments":[{"id":1}],"_count":{"comments":2}}},{"post":{"comments":[{"id":1}],"_count":{"comments":2}}}],"_count":{"comments":2}},{"comments":[],"_count":{"comments":0}}]}}"### + ); + + insta::assert_snapshot!( + run_query!( + runner, + r#" + query { + findManyPost { + comments(where: { id: 1}) { + post { + comments { id } + _count { comments } + } + } + _count { comments } + } + } + "# + ), + @r###"{"data":{"findManyPost":[{"comments":[{"post":{"comments":[{"id":1},{"id":2}],"_count":{"comments":2}}}],"_count":{"comments":2}},{"comments":[],"_count":{"comments":0}}]}}"### + ); + + Ok(()) + } + fn m_n_self_rel() -> String { let schema = indoc! { r#"model User { diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/aggregation/uniq_count_relation.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/aggregation/uniq_count_relation.rs index 4d21189bf125..45c49150e474 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/aggregation/uniq_count_relation.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/aggregation/uniq_count_relation.rs @@ -84,9 +84,9 @@ mod uniq_count_rel { Ok(()) } - // "Counting with some records and filters" should "not affect the count" + // Counting with cursor should not affect the count #[connector_test] - async fn count_with_filters(runner: Runner) -> TestResult<()> { + async fn count_with_cursor(runner: Runner) -> TestResult<()> { // 4 comment / 4 categories create_row( &runner, @@ -113,6 +113,128 @@ mod uniq_count_rel { Ok(()) } + // Counting with take should not affect the count + #[connector_test] + async fn count_with_take(runner: Runner) -> TestResult<()> { + // 4 comment / 4 categories + create_row( + &runner, + r#"{ + id: 1, + title: "a", + comments: { create: [{id: 1}, {id: 2}, {id: 3}, {id: 4}] }, + categories: { create: [{id: 1}, {id: 2}, {id: 3}, {id: 4}] }, + }"#, + ) + .await?; + + insta::assert_snapshot!( + run_query!(&runner, r#"{ + findUniquePost(where: { id: 1 }) { + comments(take: 1) { id } + categories(take: 1) { id } + _count { comments categories } + } + }"#), + @r###"{"data":{"findUniquePost":{"comments":[{"id":1}],"categories":[{"id":1}],"_count":{"comments":4,"categories":4}}}}"### + ); + + Ok(()) + } + + // Counting with skip should not affect the count + #[connector_test] + async fn count_with_skip(runner: Runner) -> TestResult<()> { + // 4 comment / 4 categories + create_row( + &runner, + r#"{ + id: 1, + title: "a", + comments: { create: [{id: 1}, {id: 2}, {id: 3}, {id: 4}] }, + categories: { create: [{id: 1}, {id: 2}, {id: 3}, {id: 4}] }, + }"#, + ) + .await?; + + insta::assert_snapshot!( + run_query!(&runner, r#"{ + findUniquePost(where: { id: 1 }) { + comments(skip: 2) { id } + categories(skip: 2) { id } + _count { comments categories } + } + }"#), + @r###"{"data":{"findUniquePost":{"comments":[{"id":3},{"id":4}],"categories":[{"id":3},{"id":4}],"_count":{"comments":4,"categories":4}}}}"### + ); + + Ok(()) + } + + // Counting with filters should not affect the count + #[connector_test] + async fn count_with_filters(runner: Runner) -> TestResult<()> { + // 4 comment / 4 categories + create_row( + &runner, + r#"{ + id: 1, + title: "a", + comments: { create: [{id: 1}, {id: 2}, {id: 3}, {id: 4}] }, + categories: { create: [{id: 1}, {id: 2}, {id: 3}, {id: 4}] }, + }"#, + ) + .await?; + + insta::assert_snapshot!( + run_query!(&runner, r#"{ + findUniquePost(where: { id: 1 }) { + comments(where: { id: 2}) { id } + categories(where: { id: 2}) { id } + _count { comments categories } + } + }"#), + @r###"{"data":{"findUniquePost":{"comments":[{"id":2}],"categories":[{"id":2}],"_count":{"comments":4,"categories":4}}}}"### + ); + + Ok(()) + } + + // Counting with distinct should not affect the count + #[connector_test] + async fn count_with_distinct(runner: Runner) -> TestResult<()> { + create_row( + &runner, + r#"{ + id: 1, + title: "a", + categories: { create: { id: 1 } } + }"#, + ) + .await?; + create_row( + &runner, + r#"{ + id: 2, + title: "a", + categories: { connect: { id: 1 } } + }"#, + ) + .await?; + + insta::assert_snapshot!( + run_query!(&runner, r#"{ + findUniqueCategory(where: { id: 1 }) { + posts(distinct: title) { id } + _count { posts } + } + }"#), + @r###"{"data":{"findUniqueCategory":{"posts":[{"id":1}],"_count":{"posts":2}}}}"### + ); + + Ok(()) + } + fn schema_nested() -> String { let schema = indoc! { r#"model User { diff --git a/query-engine/connectors/sql-query-connector/src/query_builder/select/lateral.rs b/query-engine/connectors/sql-query-connector/src/query_builder/select/lateral.rs index 5b86bfaa581b..af3b73271aa9 100644 --- a/query-engine/connectors/sql-query-connector/src/query_builder/select/lateral.rs +++ b/query-engine/connectors/sql-query-connector/src/query_builder/select/lateral.rs @@ -6,13 +6,31 @@ use crate::{ model_extensions::AsColumn, }; +use std::collections::HashMap; + use quaint::ast::*; use query_structure::*; +/// Represents a projection of a virtual field that is cheap to clone and compare but still has +/// enough information to determine whether it refers to the same field. +#[derive(PartialEq, Eq, Hash, Debug)] +enum VirtualSelectionKey { + RelationCount(RelationField), +} + +impl From<&VirtualSelection> for VirtualSelectionKey { + fn from(vs: &VirtualSelection) -> Self { + match vs { + VirtualSelection::RelationCount(rf, _) => Self::RelationCount(rf.clone()), + } + } +} + /// Select builder for joined queries. Relations are resolved using LATERAL JOINs. #[derive(Debug, Default)] pub(crate) struct LateralJoinSelectBuilder { alias: Alias, + visited_virtuals: HashMap, } impl JoinSelectBuilder for LateralJoinSelectBuilder { @@ -29,10 +47,18 @@ impl JoinSelectBuilder for LateralJoinSelectBuilder { /// ``` fn build(&mut self, args: QueryArguments, selected_fields: &FieldSelection, ctx: &Context<'_>) -> Select<'static> { let (select, parent_alias) = self.build_default_select(&args, ctx); - let select = self.with_selection(select, selected_fields, parent_alias, ctx); - let select = self.with_relations(select, selected_fields.relations(), parent_alias, ctx); + let select = self.with_relations( + select, + selected_fields.relations(), + selected_fields.virtuals(), + parent_alias, + ctx, + ); + let select = self.with_virtual_selections(select, selected_fields.virtuals(), parent_alias, ctx); - self.with_virtual_selections(select, selected_fields.virtuals(), parent_alias, ctx) + // Build selection as the last step utilizing the information we collected in + // `with_relations` and `with_virtual_selections`. + self.with_selection(select, selected_fields, parent_alias, ctx) } fn build_selection<'a>( @@ -67,38 +93,52 @@ impl JoinSelectBuilder for LateralJoinSelectBuilder { parent_alias: Alias, ctx: &Context<'_>, ) -> Select<'a> { - let (subselect, child_alias) = - self.build_to_one_select(rs, parent_alias, |expr: Expression<'_>| expr.alias(JSON_AGG_IDENT), ctx); - let subselect = self.with_relations(subselect, rs.relations(), child_alias, ctx); + let (subselect, child_alias) = self.build_to_one_select(rs, parent_alias, ctx); + + let subselect = self.with_relations(subselect, rs.relations(), rs.virtuals(), child_alias, ctx); let subselect = self.with_virtual_selections(subselect, rs.virtuals(), child_alias, ctx); + // Build the JSON object using the information we collected before in `with_relations` and + // `with_virtual_selections`. + let subselect = subselect.value(self.build_json_obj_fn(rs, child_alias, ctx).alias(JSON_AGG_IDENT)); + let join_table = Table::from(subselect).alias(join_alias_name(&rs.field)); + // LEFT JOIN LATERAL ( ) AS ON TRUE select.left_join(join_table.on(ConditionTree::single(true.raw())).lateral()) } - fn add_to_many_relation<'a>( + fn add_to_many_relation<'a, 'b>( &mut self, select: Select<'a>, rs: &RelationSelection, + parent_virtuals: impl Iterator, parent_alias: Alias, ctx: &Context<'_>, ) -> Select<'a> { let join_table_alias = join_alias_name(&rs.field); - let join_table = Table::from(self.build_to_many_select(rs, parent_alias, ctx)).alias(join_table_alias); + let mut to_many_select = self.build_to_many_select(rs, parent_alias, ctx); + + if let Some(vs) = self.find_compatible_virtual_for_relation(rs, parent_virtuals) { + self.visited_virtuals.insert(vs.into(), join_table_alias.clone()); + to_many_select = to_many_select.value(build_inline_virtual_selection(vs)); + } + + let join_table = Table::from(to_many_select).alias(join_table_alias); // LEFT JOIN LATERAL ( ) AS ON TRUE select.left_join(join_table.on(ConditionTree::single(true.raw())).lateral()) } - fn add_many_to_many_relation<'a>( + fn add_many_to_many_relation<'a, 'b>( &mut self, select: Select<'a>, rs: &RelationSelection, + parent_virtuals: impl Iterator, parent_alias: Alias, ctx: &Context<'_>, ) -> Select<'a> { - let m2m_join = self.build_m2m_join(rs, parent_alias, ctx); + let m2m_join = self.build_m2m_join(rs, parent_virtuals, parent_alias, ctx); select.left_join(m2m_join) } @@ -110,8 +150,11 @@ impl JoinSelectBuilder for LateralJoinSelectBuilder { parent_alias: Alias, ctx: &Context<'_>, ) -> Select<'a> { + let alias = self.next_alias(); let relation_count_select = self.build_virtual_select(vs, parent_alias, ctx); - let table = Table::from(relation_count_select).alias(relation_count_alias_name(vs.relation_field())); + let table = Table::from(relation_count_select).alias(alias.to_table_string()); + + self.visited_virtuals.insert(vs.into(), alias.to_table_string()); select.left_join_lateral(table.on(ConditionTree::single(true.raw()))) } @@ -155,10 +198,13 @@ impl JoinSelectBuilder for LateralJoinSelectBuilder { _parent_alias: Alias, _ctx: &Context<'_>, ) -> Expression<'static> { - let rf = vs.relation_field(); + let virtual_selection_alias = self + .visited_virtuals + .remove(&vs.into()) + .expect("All virtual fields must be visited before calling build_virtual_expr"); coalesce([ - Expression::from(Column::from((relation_count_alias_name(rf), vs.db_alias()))), + Expression::from(Column::from((virtual_selection_alias, vs.db_alias()))), Expression::from(0.raw()), ]) .into() @@ -168,14 +214,25 @@ impl JoinSelectBuilder for LateralJoinSelectBuilder { self.alias = self.alias.inc(AliasMode::Table); self.alias } + + fn was_virtual_processed_in_relation(&self, vs: &VirtualSelection) -> bool { + self.visited_virtuals.contains_key(&vs.into()) + } } impl LateralJoinSelectBuilder { - fn build_m2m_join<'a>(&mut self, rs: &RelationSelection, parent_alias: Alias, ctx: &Context<'_>) -> JoinData<'a> { + fn build_m2m_join<'a, 'b>( + &mut self, + rs: &RelationSelection, + parent_virtuals: impl Iterator, + parent_alias: Alias, + ctx: &Context<'_>, + ) -> JoinData<'a> { let rf = rs.field.clone(); let m2m_table_alias = self.next_alias(); let m2m_join_alias = self.next_alias(); let outer_alias = self.next_alias(); + let json_data_alias = m2m_join_alias_name(&rf); let m2m_join_data = Table::from(self.build_to_many_select(rs, m2m_table_alias, ctx)) .alias(m2m_join_alias.to_table_string()) @@ -194,13 +251,24 @@ impl LateralJoinSelectBuilder { .with_pagination(rs.args.take_abs(), rs.args.skip) .comment("inner"); // adds pagination - let outer = Select::from_table(Table::from(inner).alias(outer_alias.to_table_string())) + let mut outer = Select::from_table(Table::from(inner).alias(outer_alias.to_table_string())) .value(json_agg()) .comment("outer"); + if let Some(vs) = self.find_compatible_virtual_for_relation(rs, parent_virtuals) { + self.visited_virtuals.insert(vs.into(), json_data_alias.clone()); + outer = outer.value(build_inline_virtual_selection(vs)); + } + Table::from(outer) - .alias(m2m_join_alias_name(&rf)) + .alias(json_data_alias) .on(ConditionTree::single(true.raw())) .lateral() } } + +fn build_inline_virtual_selection<'a>(vs: &VirtualSelection) -> Expression<'a> { + match vs { + VirtualSelection::RelationCount(..) => count(Column::from(JSON_AGG_IDENT)).alias(vs.db_alias()).into(), + } +} diff --git a/query-engine/connectors/sql-query-connector/src/query_builder/select/mod.rs b/query-engine/connectors/sql-query-connector/src/query_builder/select/mod.rs index d878ad63ec18..766c102b5b69 100644 --- a/query-engine/connectors/sql-query-connector/src/query_builder/select/mod.rs +++ b/query-engine/connectors/sql-query-connector/src/query_builder/select/mod.rs @@ -45,18 +45,20 @@ pub(crate) trait JoinSelectBuilder { ctx: &Context<'_>, ) -> Select<'a>; /// Adds to `select` the SQL statements to fetch a 1-m relation. - fn add_to_many_relation<'a>( + fn add_to_many_relation<'a, 'b>( &mut self, select: Select<'a>, rs: &RelationSelection, + parent_virtuals: impl Iterator, parent_alias: Alias, ctx: &Context<'_>, ) -> Select<'a>; /// Adds to `select` the SQL statements to fetch a m-n relation. - fn add_many_to_many_relation<'a>( + fn add_many_to_many_relation<'a, 'b>( &mut self, select: Select<'a>, rs: &RelationSelection, + parent_virtuals: impl Iterator, parent_alias: Alias, ctx: &Context<'_>, ) -> Select<'a>; @@ -89,6 +91,9 @@ pub(crate) trait JoinSelectBuilder { ) -> Expression<'static>; /// Get the next alias for a table. fn next_alias(&mut self) -> Alias; + /// Checks if a virtual selection has already been added to the query at an earlier stage + /// as a part of a relation query for a matching relation field. + fn was_virtual_processed_in_relation(&self, vs: &VirtualSelection) -> bool; fn with_selection<'a>( &mut self, @@ -107,11 +112,12 @@ pub(crate) trait JoinSelectBuilder { } /// Builds the core select for a 1-1 relation. + /// Note: it does not add the JSON object selection because there are additional steps to + /// perform before that depending on the `JoinSelectBuilder` implementation. fn build_to_one_select( &mut self, rs: &RelationSelection, parent_alias: Alias, - selection_modifier: impl FnOnce(Expression<'static>) -> Expression<'static>, ctx: &Context<'_>, ) -> (Select<'static>, Alias) { let rf = &rs.field; @@ -121,12 +127,10 @@ pub(crate) trait JoinSelectBuilder { .related_field() .as_table(ctx) .alias(child_table_alias.to_table_string()); - let json_expr = self.build_json_obj_fn(rs, child_table_alias, ctx); let select = Select::from_table(table) .with_join_conditions(rf, parent_alias, child_table_alias, ctx) .with_filters(rs.args.filter.clone(), Some(child_table_alias), ctx) - .value(selection_modifier(json_expr)) .limit(1); (select, child_table_alias) @@ -155,11 +159,14 @@ pub(crate) trait JoinSelectBuilder { .comment("root select"); // SELECT JSON_BUILD_OBJECT() FROM ( ) - let inner = Select::from_table(Table::from(root).alias(root_alias.to_table_string())) - .value(self.build_json_obj_fn(rs, root_alias, ctx).alias(JSON_AGG_IDENT)); - let inner = self.with_relations(inner, rs.relations(), root_alias, ctx); + let inner = Select::from_table(Table::from(root).alias(root_alias.to_table_string())); + let inner = self.with_relations(inner, rs.relations(), rs.virtuals(), root_alias, ctx); let inner = self.with_virtual_selections(inner, rs.virtuals(), root_alias, ctx); + // Build the JSON object utilizing the information we collected in `with_relations` and + // `with_virtual_selections`. + let inner = inner.value(self.build_json_obj_fn(rs, root_alias, ctx).alias(JSON_AGG_IDENT)); + let linking_fields = rs.field.related_field().linking_fields(); if rs.field.relation().is_many_to_many() { @@ -212,16 +219,17 @@ pub(crate) trait JoinSelectBuilder { } } - fn with_relation<'a>( + fn with_relation<'a, 'b>( &mut self, select: Select<'a>, rs: &RelationSelection, + parent_virtuals: impl Iterator, parent_alias: Alias, ctx: &Context<'_>, ) -> Select<'a> { match (rs.field.is_list(), rs.field.relation().is_many_to_many()) { - (true, true) => self.add_many_to_many_relation(select, rs, parent_alias, ctx), - (true, false) => self.add_to_many_relation(select, rs, parent_alias, ctx), + (true, true) => self.add_many_to_many_relation(select, rs, parent_virtuals, parent_alias, ctx), + (true, false) => self.add_to_many_relation(select, rs, parent_virtuals, parent_alias, ctx), (false, _) => self.add_to_one_relation(select, rs, parent_alias, ctx), } } @@ -230,10 +238,15 @@ pub(crate) trait JoinSelectBuilder { &mut self, input: Select<'a>, relation_selections: impl Iterator, + virtual_selections: impl Iterator, parent_alias: Alias, ctx: &Context<'_>, ) -> Select<'a> { - relation_selections.fold(input, |acc, rs| self.with_relation(acc, rs, parent_alias, ctx)) + let virtual_selections = virtual_selections.collect::>(); + + relation_selections.fold(input, |acc, rs| { + self.with_relation(acc, rs, virtual_selections.iter().copied(), parent_alias, ctx) + }) } fn build_default_select(&mut self, args: &QueryArguments, ctx: &Context<'_>) -> (Select<'static>, Alias) { @@ -258,7 +271,13 @@ pub(crate) trait JoinSelectBuilder { parent_alias: Alias, ctx: &Context<'_>, ) -> Select<'a> { - selections.fold(select, |acc, vs| self.add_virtual_selection(acc, vs, parent_alias, ctx)) + selections.fold(select, |acc, vs| { + if self.was_virtual_processed_in_relation(vs) { + acc + } else { + self.add_virtual_selection(acc, vs, parent_alias, ctx) + } + }) } fn build_virtual_select( @@ -372,6 +391,18 @@ pub(crate) trait JoinSelectBuilder { select } + + fn find_compatible_virtual_for_relation<'a>( + &self, + rs: &RelationSelection, + mut parent_virtuals: impl Iterator, + ) -> Option<&'a VirtualSelection> { + if rs.args.take.is_some() || rs.args.skip.is_some() || rs.args.cursor.is_some() || rs.args.distinct.is_some() { + return None; + } + + parent_virtuals.find(|vs| *vs.relation_field() == rs.field && vs.filter() == rs.args.filter.as_ref()) + } } pub(crate) trait SelectBuilderExt<'a> { @@ -613,7 +644,3 @@ fn supports_lateral_join(args: &QueryArguments) -> bool { .connector .has_capability(ConnectorCapability::LateralJoin) } - -fn relation_count_alias_name(rf: &RelationField) -> String { - format!("aggr_count_{}_{}", rf.model().name(), rf.name()) -} diff --git a/query-engine/connectors/sql-query-connector/src/query_builder/select/subquery.rs b/query-engine/connectors/sql-query-connector/src/query_builder/select/subquery.rs index 202d42780e8b..437ee9f075a8 100644 --- a/query-engine/connectors/sql-query-connector/src/query_builder/select/subquery.rs +++ b/query-engine/connectors/sql-query-connector/src/query_builder/select/subquery.rs @@ -46,7 +46,7 @@ impl JoinSelectBuilder for SubqueriesSelectBuilder { .table(parent_alias.to_table_string()) .set_is_selected(true), ), - SelectedField::Relation(rs) => self.with_relation(select, rs, parent_alias, ctx), + SelectedField::Relation(rs) => self.with_relation(select, rs, Vec::new().iter(), parent_alias, ctx), _ => select, } } @@ -58,15 +58,17 @@ impl JoinSelectBuilder for SubqueriesSelectBuilder { parent_alias: Alias, ctx: &Context<'_>, ) -> Select<'a> { - let (subselect, _) = self.build_to_one_select(rs, parent_alias, |x| x, ctx); + let (subselect, child_alias) = self.build_to_one_select(rs, parent_alias, ctx); + let subselect = subselect.value(self.build_json_obj_fn(rs, child_alias, ctx)); select.value(Expression::from(subselect).alias(rs.field.name().to_owned())) } - fn add_to_many_relation<'a>( + fn add_to_many_relation<'a, 'b>( &mut self, select: Select<'a>, rs: &RelationSelection, + _parent_virtuals: impl Iterator, parent_alias: Alias, ctx: &Context<'_>, ) -> Select<'a> { @@ -75,10 +77,11 @@ impl JoinSelectBuilder for SubqueriesSelectBuilder { select.value(Expression::from(subselect).alias(rs.field.name().to_owned())) } - fn add_many_to_many_relation<'a>( + fn add_many_to_many_relation<'a, 'b>( &mut self, select: Select<'a>, rs: &RelationSelection, + _parent_virtuals: impl Iterator, parent_alias: Alias, ctx: &Context<'_>, ) -> Select<'a> { @@ -117,7 +120,7 @@ impl JoinSelectBuilder for SubqueriesSelectBuilder { )), SelectedField::Relation(rs) => Some(( Cow::from(rs.field.name().to_owned()), - Expression::from(self.with_relation(Select::default(), rs, parent_alias, ctx)), + Expression::from(self.with_relation(Select::default(), rs, Vec::new().iter(), parent_alias, ctx)), )), _ => None, }) @@ -144,6 +147,10 @@ impl JoinSelectBuilder for SubqueriesSelectBuilder { self.alias = self.alias.inc(AliasMode::Table); self.alias } + + fn was_virtual_processed_in_relation(&self, _vs: &VirtualSelection) -> bool { + false + } } impl SubqueriesSelectBuilder { @@ -188,3 +195,7 @@ impl SubqueriesSelectBuilder { .comment("outer") } } + +fn relation_count_alias_name(rf: &RelationField) -> String { + format!("aggr_count_{}_{}", rf.model().name(), rf.name()) +} diff --git a/query-engine/query-structure/src/field_selection.rs b/query-engine/query-structure/src/field_selection.rs index 4558eb77f335..086c80a2c785 100644 --- a/query-engine/query-structure/src/field_selection.rs +++ b/query-engine/query-structure/src/field_selection.rs @@ -332,11 +332,21 @@ impl VirtualSelection { VirtualSelection::RelationCount(rf, _) => rf, } } + + pub fn filter(&self) -> Option<&Filter> { + match self { + VirtualSelection::RelationCount(_, filter) => filter.as_ref(), + } + } } impl Display for VirtualSelection { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.db_alias()) + let model = self.relation_field().model(); + let model_name = model.name(); + let (obj, field) = self.serialized_name(); + + write!(f, "{model_name}.{obj}.{field}") } } From 57cc2f60457084432d52d2f1e6f7af493a93a263 Mon Sep 17 00:00:00 2001 From: Serhii Tatarintsev Date: Tue, 13 Feb 2024 14:40:41 +0100 Subject: [PATCH 10/25] qe: Map "Too many connections" errors to code P2037 (#4724) * qe: Map "Too many connections" errors to code P2037 Contributes to prisma/team-orm#945 * Update libs/user-facing-errors/src/query_engine/mod.rs Co-authored-by: Flavian Desverne * Preserve original error --------- Co-authored-by: Flavian Desverne --- libs/user-facing-errors/src/query_engine/mod.rs | 6 ++++++ quaint/src/connector/mssql/native/error.rs | 7 +++++++ quaint/src/connector/mysql/error.rs | 6 ++++++ quaint/src/connector/postgres/error.rs | 10 ++++++++++ quaint/src/error/mod.rs | 3 +++ query-engine/connectors/query-connector/src/error.rs | 9 +++++++++ .../connectors/sql-query-connector/src/error.rs | 5 +++++ 7 files changed, 46 insertions(+) diff --git a/libs/user-facing-errors/src/query_engine/mod.rs b/libs/user-facing-errors/src/query_engine/mod.rs index 48833cb490a0..e42fbcb03f56 100644 --- a/libs/user-facing-errors/src/query_engine/mod.rs +++ b/libs/user-facing-errors/src/query_engine/mod.rs @@ -335,3 +335,9 @@ pub struct ExternalError { /// id of the error in external system, which would allow to retrieve it later pub id: i32, } + +#[derive(Debug, UserFacingError, Serialize)] +#[user_facing(code = "P2037", message = "Too many database connections opened: {message}")] +pub struct TooManyConnections { + pub message: String, +} diff --git a/quaint/src/connector/mssql/native/error.rs b/quaint/src/connector/mssql/native/error.rs index 9c16bf9f2952..bbccffca8435 100644 --- a/quaint/src/connector/mssql/native/error.rs +++ b/quaint/src/connector/mssql/native/error.rs @@ -234,6 +234,13 @@ impl From for Error { builder.build() } + tiberius::error::Error::Server(e) if e.code() == 5828 => { + let mut builder = Error::builder(ErrorKind::TooManyConnections(e.clone().into())); + builder.set_original_code(format!("{}", e.code())); + builder.set_original_message(e.message().to_string()); + + builder.build() + } tiberius::error::Error::Server(e) => { let kind = ErrorKind::QueryError(e.clone().into()); diff --git a/quaint/src/connector/mysql/error.rs b/quaint/src/connector/mysql/error.rs index 7b4813bf0223..bb5edf957801 100644 --- a/quaint/src/connector/mysql/error.rs +++ b/quaint/src/connector/mysql/error.rs @@ -231,6 +231,12 @@ impl From for Error { builder.set_original_message(error.message); builder.build() } + 1040 | 1203 => { + let mut builder = Error::builder(ErrorKind::TooManyConnections(error.clone().into())); + builder.set_original_code(format!("{code}")); + builder.set_original_message(error.message); + builder.build() + } _ => { let kind = ErrorKind::QueryError( MysqlAsyncError::Server(MysqlError { diff --git a/quaint/src/connector/postgres/error.rs b/quaint/src/connector/postgres/error.rs index ab6ec7b07847..3dcc481eccba 100644 --- a/quaint/src/connector/postgres/error.rs +++ b/quaint/src/connector/postgres/error.rs @@ -218,6 +218,16 @@ impl From for Error { builder.build() } + "53300" => { + let code = value.code.to_owned(); + let message = value.to_string(); + let kind = ErrorKind::TooManyConnections(value.into()); + let mut builder = Error::builder(kind); + builder.set_original_code(code); + builder.set_original_message(message); + builder.build() + } + _ => { let code = value.code.to_owned(); let message = value.to_string(); diff --git a/quaint/src/error/mod.rs b/quaint/src/error/mod.rs index c28e97970ebc..661eb4d344ff 100644 --- a/quaint/src/error/mod.rs +++ b/quaint/src/error/mod.rs @@ -148,6 +148,9 @@ pub enum ErrorKind { #[error("Error querying the database: {}", _0)] QueryError(Box), + #[error("Too many DB connections opened")] + TooManyConnections(Box), + #[error("Invalid input provided to query: {}", _0)] QueryInvalidInput(String), diff --git a/query-engine/connectors/query-connector/src/error.rs b/query-engine/connectors/query-connector/src/error.rs index 24e64a6c587f..1d9937ee55aa 100644 --- a/query-engine/connectors/query-connector/src/error.rs +++ b/query-engine/connectors/query-connector/src/error.rs @@ -119,6 +119,12 @@ impl ConnectorError { ErrorKind::RecordDoesNotExist { cause } => Some(KnownError::new( user_facing_errors::query_engine::RecordRequiredButNotFound { cause: cause.clone() }, )), + + ErrorKind::TooManyConnections(e) => Some(user_facing_errors::KnownError::new( + user_facing_errors::query_engine::TooManyConnections { + message: format!("{}", e), + }, + )), _ => None, }; @@ -278,6 +284,9 @@ pub enum ErrorKind { #[error("Invalid driver adapter: {0}")] InvalidDriverAdapter(String), + + #[error("Too many DB connections opened: {}", _0)] + TooManyConnections(Box), } impl From for ConnectorError { diff --git a/query-engine/connectors/sql-query-connector/src/error.rs b/query-engine/connectors/sql-query-connector/src/error.rs index feb47fcbbb44..1156ed13a59a 100644 --- a/query-engine/connectors/sql-query-connector/src/error.rs +++ b/query-engine/connectors/sql-query-connector/src/error.rs @@ -200,6 +200,9 @@ pub enum SqlError { #[error("External connector error")] ExternalError(i32), + + #[error("Too many DB connections opened")] + TooManyConnections(Box), } impl SqlError { @@ -282,6 +285,7 @@ impl SqlError { SqlError::MissingFullTextSearchIndex => ConnectorError::from_kind(ErrorKind::MissingFullTextSearchIndex), SqlError::InvalidIsolationLevel(msg) => ConnectorError::from_kind(ErrorKind::InternalConversionError(msg)), SqlError::ExternalError(error_id) => ConnectorError::from_kind(ErrorKind::ExternalError(error_id)), + SqlError::TooManyConnections(e) => ConnectorError::from_kind(ErrorKind::TooManyConnections(e)), } } } @@ -336,6 +340,7 @@ impl From for SqlError { QuaintKind::TransactionWriteConflict => Self::TransactionWriteConflict, QuaintKind::RollbackWithoutBegin => Self::RollbackWithoutBegin, QuaintKind::ExternalError(error_id) => Self::ExternalError(error_id), + QuaintKind::TooManyConnections(e) => Self::TooManyConnections(e), e @ QuaintKind::UnsupportedColumnType { .. } => SqlError::ConversionError(e.into()), e @ QuaintKind::TransactionAlreadyClosed(_) => SqlError::TransactionAlreadyClosed(format!("{e}")), e @ QuaintKind::IncorrectNumberOfParameters { .. } => SqlError::QueryError(e.into()), From af793325dfe4f8f32cc15f3617a1b0b0d5199e7d Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Tue, 13 Feb 2024 15:43:23 +0100 Subject: [PATCH 11/25] nix: update flake (#4725) --- flake.lock | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/flake.lock b/flake.lock index d0bc10636991..7a2ebc464417 100644 --- a/flake.lock +++ b/flake.lock @@ -7,11 +7,11 @@ ] }, "locked": { - "lastModified": 1703439018, - "narHash": "sha256-VT+06ft/x3eMZ1MJxWzQP3zXFGcrxGo5VR2rB7t88hs=", + "lastModified": 1707685877, + "narHash": "sha256-XoXRS+5whotelr1rHiZle5t5hDg9kpguS5yk8c8qzOc=", "owner": "ipetkov", "repo": "crane", - "rev": "afdcd41180e3dfe4dac46b5ee396e3b12ccc967a", + "rev": "2c653e4478476a52c6aa3ac0495e4dea7449ea0e", "type": "github" }, "original": { @@ -27,11 +27,11 @@ ] }, "locked": { - "lastModified": 1704152458, - "narHash": "sha256-DS+dGw7SKygIWf9w4eNBUZsK+4Ug27NwEWmn2tnbycg=", + "lastModified": 1706830856, + "narHash": "sha256-a0NYyp+h9hlb7ddVz4LUn1vT/PLwqfrWYcHMvFB1xYg=", "owner": "hercules-ci", "repo": "flake-parts", - "rev": "88a2cd8166694ba0b6cb374700799cec53aef527", + "rev": "b253292d9c0a5ead9bc98c4e9a26c6312e27d69f", "type": "github" }, "original": { @@ -47,11 +47,11 @@ ] }, "locked": { - "lastModified": 1701680307, - "narHash": "sha256-kAuep2h5ajznlPMD9rnQyffWG8EM/C73lejGofXvdM8=", + "lastModified": 1705309234, + "narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=", "owner": "numtide", "repo": "flake-utils", - "rev": "4022d587cbbfd70fe950c1e2083a02621806a725", + "rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26", "type": "github" }, "original": { @@ -82,11 +82,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1703961334, - "narHash": "sha256-M1mV/Cq+pgjk0rt6VxoyyD+O8cOUiai8t9Q6Yyq4noY=", + "lastModified": 1707689078, + "narHash": "sha256-UUGmRa84ZJHpGZ1WZEBEUOzaPOWG8LZ0yPg1pdDF/yM=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "b0d36bd0a420ecee3bc916c91886caca87c894e9", + "rev": "f9d39fb9aff0efee4a3d5f4a6d7c17701d38a1d8", "type": "github" }, "original": { @@ -116,11 +116,11 @@ ] }, "locked": { - "lastModified": 1704075545, - "narHash": "sha256-L3zgOuVKhPjKsVLc3yTm2YJ6+BATyZBury7wnhyc8QU=", + "lastModified": 1707790272, + "narHash": "sha256-KQXPNl3BLdRbz7xx+mwIq/017fxLRk6JhXHxVWCKsTU=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "a0df72e106322b67e9c6e591fe870380bd0da0d5", + "rev": "8dfbe2dffc28c1a18a29ffa34d5d0b269622b158", "type": "github" }, "original": { From 7e5bcbefc94df2855deae774ff61a27c67200ad2 Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Tue, 13 Feb 2024 15:43:35 +0100 Subject: [PATCH 12/25] qe: fix mismatch between selection indexes for joins (#4705) Both the old query builder and the new query builder append virtual selections after all other selections in the query. This means we have to take it into account instead of relying on the fields in the result set to be in the same order as in `FieldSelection`. The old code path converted the selection into virtuals-last form but the new one didn't, which resulted in the cached indexes for the relations and virtuals to be mixed up when selecting relation aggregations before the relations in the query. Now the new code path does the same transformation. Fixes: https://github.com/prisma/team-orm/issues/927 --- .../tests/new/regressions/mod.rs | 1 + .../tests/new/regressions/team_orm_927.rs | 90 +++++++++++++++++++ .../src/database/operations/read.rs | 6 +- .../query-structure/src/field_selection.rs | 4 + 4 files changed, 99 insertions(+), 2 deletions(-) create mode 100644 query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/team_orm_927.rs diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/mod.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/mod.rs index deaaa7e84313..4b4aa97479d6 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/mod.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/mod.rs @@ -30,3 +30,4 @@ mod prisma_7072; mod prisma_7434; mod prisma_8265; mod prisma_engines_4286; +mod team_orm_927; diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/team_orm_927.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/team_orm_927.rs new file mode 100644 index 000000000000..45d7eba7aad9 --- /dev/null +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/team_orm_927.rs @@ -0,0 +1,90 @@ +//! Regression test for https://github.com/prisma/team-orm/issues/927 + +use query_engine_tests::*; + +#[test_suite(schema(schema))] +mod count_before_relation { + fn schema() -> String { + indoc! { + r#" + model Parent { + #id(id, Int, @id) + children Child[] + } + + model Child { + #id(id, Int, @id) + parentId Int + parent Parent @relation(fields: [parentId], references: [id]) + } + "# + } + .to_owned() + } + + #[connector_test] + async fn find_unique(runner: Runner) -> TestResult<()> { + seed(&runner).await?; + + insta::assert_snapshot!( + run_query!( + runner, + r#" + query { + findUniqueParent( + where: { id: 1 } + ) { + _count { children } + children { id } + } + } + "# + ), + @r###"{"data":{"findUniqueParent":{"_count":{"children":1},"children":[{"id":1}]}}}"### + ); + + Ok(()) + } + + #[connector_test] + async fn find_many(runner: Runner) -> TestResult<()> { + seed(&runner).await?; + + insta::assert_snapshot!( + run_query!( + runner, + r#" + query { + findManyParent { + _count { children } + children { id } + } + } + "# + ), + @r###"{"data":{"findManyParent":[{"_count":{"children":1},"children":[{"id":1}]}]}}"### + ); + + Ok(()) + } + + async fn seed(runner: &Runner) -> TestResult<()> { + run_query!( + runner, + r#" + mutation { + createOneParent( + data: { + id: 1, + children: { + create: { id: 1 } + } + } + ) { id } + } + "# + ); + + Ok(()) + } +} diff --git a/query-engine/connectors/sql-query-connector/src/database/operations/read.rs b/query-engine/connectors/sql-query-connector/src/database/operations/read.rs index f9041c6dcd78..13206f560776 100644 --- a/query-engine/connectors/sql-query-connector/src/database/operations/read.rs +++ b/query-engine/connectors/sql-query-connector/src/database/operations/read.rs @@ -33,6 +33,7 @@ pub(crate) async fn get_single_record_joins( selected_fields: &FieldSelection, ctx: &Context<'_>, ) -> crate::Result> { + let selected_fields = selected_fields.to_virtuals_last(); let field_names: Vec<_> = selected_fields.db_names_grouping_virtuals().collect(); let idents = selected_fields.type_identifiers_with_arities_grouping_virtuals(); @@ -44,7 +45,7 @@ pub(crate) async fn get_single_record_joins( let query = query_builder::select::SelectBuilder::build( QueryArguments::from((model.clone(), filter.clone())), - selected_fields, + &selected_fields, ctx, ); @@ -130,6 +131,7 @@ pub(crate) async fn get_many_records_joins( selected_fields: &FieldSelection, ctx: &Context<'_>, ) -> crate::Result { + let selected_fields = selected_fields.to_virtuals_last(); let field_names: Vec<_> = selected_fields.db_names_grouping_virtuals().collect(); let idents = selected_fields.type_identifiers_with_arities_grouping_virtuals(); let meta = column_metadata::create(field_names.as_slice(), idents.as_slice()); @@ -155,7 +157,7 @@ pub(crate) async fn get_many_records_joins( _ => (), }; - let query = query_builder::select::SelectBuilder::build(query_arguments.clone(), selected_fields, ctx); + let query = query_builder::select::SelectBuilder::build(query_arguments.clone(), &selected_fields, ctx); for item in conn.filter(query.into(), meta.as_slice(), ctx).await?.into_iter() { let mut record = Record::from(item); diff --git a/query-engine/query-structure/src/field_selection.rs b/query-engine/query-structure/src/field_selection.rs index 086c80a2c785..b6d9bcb883e9 100644 --- a/query-engine/query-structure/src/field_selection.rs +++ b/query-engine/query-structure/src/field_selection.rs @@ -68,6 +68,10 @@ impl FieldSelection { FieldSelection::new(non_virtuals.into_iter().chain(virtuals).collect()) } + pub fn to_virtuals_last(&self) -> Self { + self.clone().into_virtuals_last() + } + /// Returns the selections, grouping the virtual fields that are wrapped into objects in the /// query (like `_count`) and returning only the first virtual field in each of those groups. /// This is useful when we want to treat the group as a whole but we don't need the information From 9308f18a8ff04428662b14c89b19249b48139267 Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Tue, 13 Feb 2024 18:48:26 +0100 Subject: [PATCH 13/25] ci(wasm-size): compare with the base branch and not self (#4728) This was commented out in https://github.com/prisma/prisma-engines/pull/4713 to make the pipeline pass in the PR because the changes weren't compatible with the pipeline on `main` with the intention to uncomment immediately after landing the changes, see https://github.com/prisma/prisma-engines/pull/4713#discussion_r1486369523. --- .github/workflows/wasm-size.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/wasm-size.yml b/.github/workflows/wasm-size.yml index bfabe08e84ef..4bf6cb7bd518 100644 --- a/.github/workflows/wasm-size.yml +++ b/.github/workflows/wasm-size.yml @@ -50,8 +50,8 @@ jobs: steps: - name: Checkout base branch uses: actions/checkout@v4 - # with: - # ref: ${{ github.event.pull_request.base.sha }} + with: + ref: ${{ github.event.pull_request.base.sha }} - uses: ./.github/workflows/include/rust-wasm-setup From 1eb2deb601a5ecb45ed15fcc800002e72e6db313 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=ABl=20Galeran?= Date: Tue, 13 Feb 2024 18:57:11 +0100 Subject: [PATCH 14/25] ci(wasm comments): only create comments if the branch is from the repo (#4726) --- .github/workflows/wasm-benchmarks.yml | 4 ++-- .github/workflows/wasm-size.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/wasm-benchmarks.yml b/.github/workflows/wasm-benchmarks.yml index d0630ff90108..8e5a3124be79 100644 --- a/.github/workflows/wasm-benchmarks.yml +++ b/.github/workflows/wasm-benchmarks.yml @@ -129,9 +129,9 @@ jobs: - name: Create or update report uses: peter-evans/create-or-update-comment@v3 - # Only run on our repository + # Only run on branches from our repository # It avoids an expected failure on forks - if: github.repository == 'prisma/prisma-engines' + if: ${{ github.event.pull_request.head.repo.full_name == github.repository }} with: comment-id: ${{ steps.findReportComment.outputs.comment-id }} issue-number: ${{ github.event.pull_request.number }} diff --git a/.github/workflows/wasm-size.yml b/.github/workflows/wasm-size.yml index 4bf6cb7bd518..82d7a2d7e141 100644 --- a/.github/workflows/wasm-size.yml +++ b/.github/workflows/wasm-size.yml @@ -106,9 +106,9 @@ jobs: - name: Create or update report uses: peter-evans/create-or-update-comment@v3 - # Only run on our repository + # Only run on branches from our repository # It avoids an expected failure on forks - if: github.repository == 'prisma/prisma-engines' + if: ${{ github.event.pull_request.head.repo.full_name == github.repository }} with: comment-id: ${{ steps.findReportComment.outputs.comment-id }} issue-number: ${{ github.event.pull_request.number }} From ed5ca1a26888e528edca130cd43914e3936d6561 Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Tue, 13 Feb 2024 22:23:14 +0100 Subject: [PATCH 15/25] qe: fix a compiler warning when driver adapters are disabled (#4727) Fixes the following warning when compiling the tests: ``` warning: unused variable: `adapter` --> query-engine/request-handlers/src/load_executor.rs:28:29 | 28 | ConnectorKind::Js { adapter, _phantom } => { | ^^^^^^^ help: try ignoring the field: `adapter: _` | = note: `#[warn(unused_variables)]` on by default ``` --- query-engine/request-handlers/src/load_executor.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/query-engine/request-handlers/src/load_executor.rs b/query-engine/request-handlers/src/load_executor.rs index 97f029cc60ed..6cb112383f41 100644 --- a/query-engine/request-handlers/src/load_executor.rs +++ b/query-engine/request-handlers/src/load_executor.rs @@ -25,14 +25,14 @@ pub async fn load( features: PreviewFeatures, ) -> query_core::Result> { match connector_kind { - ConnectorKind::Js { adapter, _phantom } => { - #[cfg(not(feature = "driver-adapters"))] + #[cfg(not(feature = "driver-adapters"))] + ConnectorKind::Js { .. } => { panic!("Driver adapters are not enabled, but connector mode is set to JS"); - - #[cfg(feature = "driver-adapters")] - driver_adapter(adapter, features).await } + #[cfg(feature = "driver-adapters")] + ConnectorKind::Js { adapter, _phantom } => driver_adapter(adapter, features).await, + #[cfg(feature = "native")] ConnectorKind::Rust { url, datasource } => { if let Ok(value) = env::var("PRISMA_DISABLE_QUAINT_EXECUTORS") { From 69458aec3eb674d593c1d7bb6014522aa04c605d Mon Sep 17 00:00:00 2001 From: Serhii Tatarintsev Date: Wed, 14 Feb 2024 11:28:35 +0100 Subject: [PATCH 16/25] crosstarget-utils: Propery conditionally check for `performance` global (#4730) wasm-bindgen code, generated by `Option` binding still does not account for global not existing at all (it just handles undefined/null case). Replacing that with hand-written code that would check it existence manually. --- libs/crosstarget-utils/src/wasm/time.rs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/libs/crosstarget-utils/src/wasm/time.rs b/libs/crosstarget-utils/src/wasm/time.rs index 1c230ba1eecc..5d2f7d64b59b 100644 --- a/libs/crosstarget-utils/src/wasm/time.rs +++ b/libs/crosstarget-utils/src/wasm/time.rs @@ -1,4 +1,4 @@ -use js_sys::{Date, Function, Promise}; +use js_sys::{Date, Function, Promise, Reflect}; use std::future::Future; use std::time::Duration; use wasm_bindgen::prelude::*; @@ -8,10 +8,7 @@ use crate::common::TimeoutError; #[wasm_bindgen] extern "C" { - type Performance; - #[wasm_bindgen(js_name = "performance")] - static PERFORMANCE: Option; #[wasm_bindgen(method)] fn now(this: &Performance) -> f64; @@ -53,5 +50,16 @@ where } fn now() -> f64 { - PERFORMANCE.as_ref().map(|p| p.now()).unwrap_or_else(Date::now) + let global = js_sys::global(); + Reflect::get(&global, &"performance".into()) + .ok() + .and_then(|value| { + if value.is_undefined() { + None + } else { + Some(Performance::from(value)) + } + }) + .map(|p| p.now()) + .unwrap_or_else(Date::now) } From ced8cab49ce5e2ce1c7376d852b5cf8c5846bc9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miguel=20Fern=C3=A1ndez?= Date: Wed, 14 Feb 2024 12:10:47 +0100 Subject: [PATCH 17/25] Use non-default gzip options when compressing (#4729) * Use non-default gzip options when compressing * Reduce non-determinism in tar * use updated makefile for both builds * try ustar format to discard pax headers * remove time zone since it prints a warning * add more options * Revert changes * Compress and measure only the wasm file for the QE --------- Co-authored-by: Alexey Orlenko --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index e55627f7a436..b6921cc24f04 100644 --- a/Makefile +++ b/Makefile @@ -64,7 +64,7 @@ build-qe-wasm: build-qe-wasm-gz: build-qe-wasm @cd query-engine/query-engine-wasm/pkg && \ for provider in postgresql mysql sqlite; do \ - tar -zcvf $$provider.gz $$provider; \ + gzip -knc $$provider/query_engine_bg.wasm > $$provider.gz; \ done; build-schema-wasm: @@ -372,7 +372,7 @@ test-driver-adapter-planetscale-wasm: test-planetscale-wasm measure-qe-wasm: build-qe-wasm-gz @cd query-engine/query-engine-wasm/pkg; \ for provider in postgresql mysql sqlite; do \ - echo "$${provider}_size=$$(cat $$provider/* | wc -c | tr -d ' ')" >> $(ENGINE_SIZE_OUTPUT); \ + echo "$${provider}_size=$$(cat $$provider/query_engine_bg.wasm | wc -c | tr -d ' ')" >> $(ENGINE_SIZE_OUTPUT); \ echo "$${provider}_size_gz=$$(cat $$provider.gz | wc -c | tr -d ' ')" >> $(ENGINE_SIZE_OUTPUT); \ done; From 93768800da47b2505e033a0c7c7602426e3b9088 Mon Sep 17 00:00:00 2001 From: Alberto Schiabel Date: Wed, 14 Feb 2024 15:28:43 +0100 Subject: [PATCH 18/25] feat(driver-adapters): enable some PlanetScale/LibSQL tests, and add comments (#4733) * feat(driver-adapters): uncomment PlanetScale tests * feat(driver-adapters): uncomment LibSQL tests * chore(driver-adapters): add comments --- .../tests/queries/filters/bigint_filter.rs | 2 +- .../tests/queries/filters/bytes_filter.rs | 8 ++++---- .../queries/filters/field_reference/bigint_filter.rs | 10 ++-------- .../queries/filters/field_reference/json_filter.rs | 4 ++-- .../queries/filters/field_reference/string_filter.rs | 6 +++--- .../tests/queries/filters/json_filters.rs | 12 ++++++------ .../tests/queries/filters/search_filter.rs | 2 ++ .../order_and_pagination/nested_pagination.rs | 6 +++--- .../tests/queries/order_and_pagination/pagination.rs | 6 +++--- .../query-engine-tests/tests/raw/sql/errors.rs | 8 ++++++-- .../query-engine-tests/tests/raw/sql/typed_output.rs | 2 +- 11 files changed, 33 insertions(+), 33 deletions(-) diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/bigint_filter.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/bigint_filter.rs index 88816553094d..8230c7e2f04b 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/bigint_filter.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/bigint_filter.rs @@ -1,7 +1,7 @@ use super::common_test_data; use query_engine_tests::*; -#[test_suite(schema(schemas::common_nullable_types), exclude(Sqlite("libsql.js.wasm")))] +#[test_suite(schema(schemas::common_nullable_types))] mod bigint_filter_spec { use query_engine_tests::run_query; diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/bytes_filter.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/bytes_filter.rs index 58ec7e08f8c8..fcb04d572f53 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/bytes_filter.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/bytes_filter.rs @@ -1,10 +1,10 @@ use super::common_test_data; use query_engine_tests::*; -#[test_suite( - schema(schemas::common_nullable_types), - exclude(Sqlite("libsql.js.wasm"), Vitess("planetscale.js.wasm")) -)] +// On PlanetScale (wasm), this fails with: +// "TypeError: The encoded data was not valid for encoding utf-8" +// at "TextDecoder.decode" +#[test_suite(schema(schemas::common_nullable_types), exclude(Vitess("planetscale.js.wasm")))] mod bytes_filter_spec { use query_engine_tests::run_query; diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/field_reference/bigint_filter.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/field_reference/bigint_filter.rs index 7234fcc253d0..bcd12fb1b5b7 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/field_reference/bigint_filter.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/field_reference/bigint_filter.rs @@ -6,10 +6,7 @@ mod bigint_filter { use super::setup; use query_engine_tests::run_query; - #[connector_test( - schema(setup::common_types), - exclude(Sqlite("libsql.js.wasm"), Vitess("planetscale.js.wasm")) - )] + #[connector_test(schema(setup::common_types))] async fn basic_where(runner: Runner) -> TestResult<()> { setup::test_data_common_types(&runner).await?; @@ -31,10 +28,7 @@ mod bigint_filter { Ok(()) } - #[connector_test( - schema(setup::common_types), - exclude(Sqlite("libsql.js.wasm"), Vitess("planetscale.js.wasm")) - )] + #[connector_test(schema(setup::common_types))] async fn numeric_comparison_filters(runner: Runner) -> TestResult<()> { setup::test_data_common_types(&runner).await?; diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/field_reference/json_filter.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/field_reference/json_filter.rs index 1b06e71b75a3..53d27624b77d 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/field_reference/json_filter.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/field_reference/json_filter.rs @@ -147,7 +147,7 @@ mod json_filter { Ok(()) } - #[connector_test(schema(schema), exclude(MySQL(5.6), Vitess("planetscale.js")))] + #[connector_test(schema(schema), exclude(MySQL(5.6)))] async fn string_comparison_filters(runner: Runner) -> TestResult<()> { test_string_data(&runner).await?; @@ -190,7 +190,7 @@ mod json_filter { Ok(()) } - #[connector_test(schema(schema), exclude(MySQL(5.6), Vitess("planetscale.js", "planetscale.js.wasm")))] + #[connector_test(schema(schema), exclude(MySQL(5.6)))] async fn array_comparison_filters(runner: Runner) -> TestResult<()> { test_array_data(&runner).await?; diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/field_reference/string_filter.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/field_reference/string_filter.rs index 708f2d15e83e..f9c2e6e06acc 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/field_reference/string_filter.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/field_reference/string_filter.rs @@ -6,7 +6,7 @@ mod string_filter { use super::setup; use query_engine_tests::run_query; - #[connector_test(exclude(Sqlite("libsql.js.wasm"), Vitess("planetscale.js.wasm")))] + #[connector_test] async fn basic_where_sensitive(runner: Runner) -> TestResult<()> { setup::test_data_common_types(&runner).await?; @@ -50,7 +50,7 @@ mod string_filter { Ok(()) } - #[connector_test(exclude(Sqlite("libsql.js.wasm"), Vitess("planetscale.js.wasm")))] + #[connector_test] async fn numeric_comparison_filters_sensitive(runner: Runner) -> TestResult<()> { setup::test_data_common_types(&runner).await?; @@ -225,7 +225,7 @@ mod string_filter { Ok(()) } - #[connector_test(exclude(Sqlite("libsql.js.wasm"), Vitess("planetscale.js.wasm")))] + #[connector_test] async fn string_comparison_filters_sensitive(runner: Runner) -> TestResult<()> { setup::test_data_common_types(&runner).await?; run_query!( diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/json_filters.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/json_filters.rs index 87ff655d7b34..eb7dd531deb1 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/json_filters.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/json_filters.rs @@ -280,7 +280,7 @@ mod json_filters { Ok(()) } - #[connector_test(exclude(MySQL(5.6), Vitess("planetscale.js", "planetscale.js.wasm")))] + #[connector_test(exclude(MySQL(5.6)))] async fn array_contains(runner: Runner) -> TestResult<()> { array_contains_runner(runner).await?; @@ -389,7 +389,7 @@ mod json_filters { Ok(()) } - #[connector_test(exclude(MySQL(5.6), Vitess("planetscale.js", "planetscale.js.wasm")))] + #[connector_test(exclude(MySQL(5.6)))] async fn array_starts_with(runner: Runner) -> TestResult<()> { array_starts_with_runner(runner).await?; @@ -496,7 +496,7 @@ mod json_filters { Ok(()) } - #[connector_test(exclude(MySQL(5.6), Vitess("planetscale.js", "planetscale.js.wasm")))] + #[connector_test(exclude(MySQL(5.6)))] async fn array_ends_with(runner: Runner) -> TestResult<()> { array_ends_with_runner(runner).await?; @@ -535,7 +535,7 @@ mod json_filters { Ok(()) } - #[connector_test(exclude(MySQL(5.6), Vitess("planetscale.js", "planetscale.js.wasm")))] + #[connector_test(exclude(MySQL(5.6)))] async fn string_contains(runner: Runner) -> TestResult<()> { string_contains_runner(runner).await?; @@ -575,7 +575,7 @@ mod json_filters { Ok(()) } - #[connector_test(exclude(MySQL(5.6), Vitess("planetscale.js", "planetscale.js.wasm")))] + #[connector_test(exclude(MySQL(5.6)))] async fn string_starts_with(runner: Runner) -> TestResult<()> { string_starts_with_runner(runner).await?; @@ -614,7 +614,7 @@ mod json_filters { Ok(()) } - #[connector_test(exclude(MySQL(5.6), Vitess("planetscale.js", "planetscale.js.wasm")))] + #[connector_test(exclude(MySQL(5.6)))] async fn string_ends_with(runner: Runner) -> TestResult<()> { string_ends_with_runner(runner).await?; diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/search_filter.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/search_filter.rs index abf7f04efdf3..a86bcf176cb6 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/search_filter.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/search_filter.rs @@ -229,6 +229,8 @@ mod search_filter_with_index { super::ensure_filter_tree_shake_works(runner).await } + // This test correctly fails on PlanetScale, but its message is not the same as the one in the test: + // "DatabaseError: Can't find FULLTEXT index matching the column list (errno 1191) (sqlstate HY000)" #[connector_test(exclude(Vitess("planetscale.js", "planetscale.js.wasm")))] async fn throws_error_on_missing_index(runner: Runner) -> TestResult<()> { super::create_test_data(&runner).await?; diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/nested_pagination.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/nested_pagination.rs index 34af3fc21ed9..a8f06e45b84c 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/nested_pagination.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/nested_pagination.rs @@ -80,7 +80,7 @@ mod nested_pagination { ***************/ // should skip the first item - #[connector_test(exclude(Vitess("planetscale.js", "planetscale.js.wasm")))] + #[connector_test] async fn mid_lvl_skip_1(runner: Runner) -> TestResult<()> { create_test_data(&runner).await?; @@ -102,7 +102,7 @@ mod nested_pagination { } // should "skip all items" - #[connector_test(exclude(Vitess("planetscale.js", "planetscale.js.wasm")))] + #[connector_test] async fn mid_lvl_skip_3(runner: Runner) -> TestResult<()> { create_test_data(&runner).await?; @@ -124,7 +124,7 @@ mod nested_pagination { } // should "skip all items" - #[connector_test(exclude(Vitess("planetscale.js", "planetscale.js.wasm")))] + #[connector_test] async fn mid_lvl_skip_4(runner: Runner) -> TestResult<()> { create_test_data(&runner).await?; diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/pagination.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/pagination.rs index e6cbee21d9b7..f0874cae02c8 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/pagination.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/pagination.rs @@ -277,7 +277,7 @@ mod pagination { ********************/ // "A skip" should "return all records after the offset specified" - #[connector_test(exclude(Vitess("planetscale.js", "planetscale.js.wasm")))] + #[connector_test] async fn skip_returns_all_after_offset(runner: Runner) -> TestResult<()> { create_test_data(&runner).await?; @@ -296,7 +296,7 @@ mod pagination { } // "A skip with order reversed" should "return all records after the offset specified" - #[connector_test(exclude(Vitess("planetscale.js", "planetscale.js.wasm")))] + #[connector_test] async fn skip_reversed_order(runner: Runner) -> TestResult<()> { create_test_data(&runner).await?; @@ -315,7 +315,7 @@ mod pagination { } // "A skipping beyond all records" should "return no records" - #[connector_test(exclude(Vitess("planetscale.js", "planetscale.js.wasm")))] + #[connector_test] async fn skipping_beyond_all_records(runner: Runner) -> TestResult<()> { create_test_data(&runner).await?; diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/raw/sql/errors.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/raw/sql/errors.rs index d25b94e985d9..10431e0db3c9 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/raw/sql/errors.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/raw/sql/errors.rs @@ -35,7 +35,8 @@ mod raw_errors { } // On driver-adapters, this fails with: - // Raw query failed. Code: `22P02`. Message: `ERROR: invalid input syntax for type integer: \\\"{\\\"1\\\"}\\\"` + // Raw query failed. Code: `22P02`. Message: `ERROR: invalid input syntax for type integer: \\\"{\\\"1\\\"}\\\"`. + // See: `list_param_for_scalar_column_should_not_panic_pg_js` #[connector_test( schema(common_nullable_types), only(Postgres), @@ -55,7 +56,10 @@ mod raw_errors { Ok(()) } - #[connector_test(schema(common_nullable_types), only(Postgres("neon.js"), Postgres("pg.js")))] + #[connector_test( + schema(common_nullable_types), + only(Postgres("neon.js", "neon.js.wasm", "pg.js", "pg.js.wasm")) + )] async fn list_param_for_scalar_column_should_not_panic_pg_js(runner: Runner) -> TestResult<()> { assert_error!( runner, diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/raw/sql/typed_output.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/raw/sql/typed_output.rs index 9b7ff37f256c..c3687ddd9f3e 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/raw/sql/typed_output.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/raw/sql/typed_output.rs @@ -483,7 +483,7 @@ mod typed_output { schema.to_owned() } - #[connector_test(schema(schema_sqlite), only(Sqlite), exclude(Sqlite("libsql.js.wasm")))] + #[connector_test(schema(schema_sqlite), only(Sqlite))] async fn all_scalars_sqlite(runner: Runner) -> TestResult<()> { create_row( &runner, From 58fef65a004e8cdbe4a2e3cd0847738ab8b6e331 Mon Sep 17 00:00:00 2001 From: Alberto Schiabel Date: Wed, 14 Feb 2024 15:43:49 +0100 Subject: [PATCH 19/25] feat: unify dependencies' versions, moving them to `workspace` (#4718) * chore: unify serde_json versions * chore: move enumflags2 to workspace deps * chore: move uuid + async-trait to workspace deps * chore: unify chrono versions * chore: unify url versions * chore: move url dep to workspace * chore: update indexmap, replace deprecated ".remove()" with ".swap_remove()" * chore: fix compilation * chore: move tracing to workspace deps --- Cargo.lock | 151 ++++++++++-------- Cargo.toml | 9 +- libs/prisma-value/Cargo.toml | 2 +- libs/query-engine-common/Cargo.toml | 6 +- libs/test-cli/Cargo.toml | 8 +- libs/test-setup/Cargo.toml | 6 +- libs/user-facing-errors/Cargo.toml | 2 +- prisma-fmt/Cargo.toml | 2 +- psl/parser-database/Cargo.toml | 5 +- psl/psl-core/Cargo.toml | 6 +- quaint/Cargo.toml | 12 +- quaint/quaint-test-setup/Cargo.toml | 2 +- query-engine/black-box-tests/Cargo.toml | 4 +- .../connector-test-kit-rs/qe-setup/Cargo.toml | 4 +- .../query-engine-tests/Cargo.toml | 8 +- .../query-tests-setup/Cargo.toml | 8 +- .../mongodb-query-connector/Cargo.toml | 10 +- .../connectors/query-connector/Cargo.toml | 8 +- .../query-connector/src/write_args.rs | 2 +- .../connectors/sql-query-connector/Cargo.toml | 8 +- query-engine/core/Cargo.toml | 12 +- .../extractors/filters/mod.rs | 4 +- .../extractors/filters/scalar.rs | 8 +- .../extractors/query_arguments.rs | 6 +- .../write/nested/connect_or_create_nested.rs | 16 +- .../write/nested/create_nested.rs | 4 +- .../write/nested/update_nested.rs | 12 +- .../write/nested/upsert_nested.rs | 6 +- .../write/write_args_parser.rs | 10 +- query-engine/dmmf/Cargo.toml | 2 +- query-engine/driver-adapters/Cargo.toml | 6 +- query-engine/metrics/Cargo.toml | 4 +- query-engine/query-engine-node-api/Cargo.toml | 6 +- query-engine/query-engine-wasm/Cargo.toml | 6 +- query-engine/query-engine/Cargo.toml | 8 +- query-engine/query-structure/Cargo.toml | 2 +- query-engine/request-handlers/Cargo.toml | 6 +- query-engine/request-handlers/src/response.rs | 2 +- schema-engine/cli/Cargo.toml | 4 +- .../mongodb-schema-connector/Cargo.toml | 8 +- .../connectors/schema-connector/Cargo.toml | 6 +- .../sql-schema-connector/Cargo.toml | 12 +- schema-engine/core/Cargo.toml | 10 +- .../sql-introspection-tests/Cargo.toml | 6 +- schema-engine/sql-migration-tests/Cargo.toml | 8 +- schema-engine/sql-schema-describer/Cargo.toml | 8 +- 46 files changed, 236 insertions(+), 209 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1e9a74719224..446a80ea2891 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -30,14 +30,15 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.3" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" dependencies = [ "cfg-if", "getrandom 0.2.11", "once_cell", "version_check", + "zerocopy", ] [[package]] @@ -149,18 +150,18 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.48", ] [[package]] name = "async-trait" -version = "0.1.72" +version = "0.1.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6dde6e4ed435a4c1ee4e73592f5ba9da2151af10076cc04858746af9352d09" +checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.48", ] [[package]] @@ -403,7 +404,7 @@ version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88c18b51216e1f74b9d769cead6ace2f82b965b807e3d73330aabe9faec31c84" dependencies = [ - "ahash 0.8.3", + "ahash 0.8.7", "base64 0.13.1", "bitvec", "chrono", @@ -854,7 +855,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f34ba9a9bcb8645379e9de8cb3ecfcf4d1c85ba66d90deb3259206fa5aa193b" dependencies = [ "quote", - "syn 2.0.28", + "syn 2.0.48", ] [[package]] @@ -966,7 +967,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6943ae99c34386c84a470c499d3414f66502a41340aa895406e0d2e4a207b91d" dependencies = [ "cfg-if", - "hashbrown 0.14.0", + "hashbrown 0.14.3", "lock_api", "once_cell", "parking_lot_core 0.9.8", @@ -1069,7 +1070,7 @@ dependencies = [ "colored", "expect-test", "flate2", - "indexmap 1.9.3", + "indexmap 2.2.2", "indoc 2.0.3", "itertools 0.12.0", "pretty_assertions", @@ -1230,7 +1231,7 @@ checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.48", ] [[package]] @@ -1345,9 +1346,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] @@ -1377,7 +1378,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.28", + "syn 2.0.48", ] [[package]] @@ -1389,7 +1390,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.48", ] [[package]] @@ -1401,7 +1402,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.28", + "syn 2.0.48", ] [[package]] @@ -1472,7 +1473,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.48", ] [[package]] @@ -1592,7 +1593,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.0.0", + "indexmap 2.2.2", "slab", "tokio", "tokio-util 0.7.8", @@ -1629,16 +1630,16 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.3", + "ahash 0.8.7", ] [[package]] name = "hashbrown" -version = "0.14.0" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ - "ahash 0.8.3", + "ahash 0.8.7", "allocator-api2", ] @@ -1648,7 +1649,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "312f66718a2d7789ffef4f4b7b213138ed9f1eb3aa1d0d82fc99f88fb3ffd26f" dependencies = [ - "hashbrown 0.14.0", + "hashbrown 0.14.3", ] [[package]] @@ -1841,9 +1842,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -1857,17 +1858,17 @@ checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown 0.12.3", - "serde", ] [[package]] name = "indexmap" -version = "2.0.0" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +checksum = "824b2ae422412366ba479e8111fd301f7b5faece8149317bb81925979a53f520" dependencies = [ "equivalent", - "hashbrown 0.14.0", + "hashbrown 0.14.3", + "serde", ] [[package]] @@ -2486,7 +2487,7 @@ dependencies = [ "cuid", "derive_more", "futures", - "indexmap 1.9.3", + "indexmap 2.2.2", "itertools 0.12.0", "mongodb", "mongodb-client", @@ -2496,7 +2497,7 @@ dependencies = [ "query-connector", "query-engine-metrics", "query-structure", - "rand 0.7.3", + "rand 0.8.5", "regex", "serde", "serde_json", @@ -2679,7 +2680,7 @@ dependencies = [ "napi-derive-backend", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.48", ] [[package]] @@ -2694,7 +2695,7 @@ dependencies = [ "quote", "regex", "semver 1.0.18", - "syn 2.0.28", + "syn 2.0.48", ] [[package]] @@ -2889,7 +2890,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.48", ] [[package]] @@ -3068,7 +3069,7 @@ dependencies = [ "diagnostics", "either", "enumflags2", - "indexmap 1.9.3", + "indexmap 2.2.2", "rustc-hash", "schema-ast", ] @@ -3105,9 +3106,9 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" @@ -3139,7 +3140,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.48", ] [[package]] @@ -3208,7 +3209,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.48", ] [[package]] @@ -3415,9 +3416,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.66" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" +checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" dependencies = [ "unicode-ident", ] @@ -3644,7 +3645,7 @@ dependencies = [ "async-trait", "chrono", "futures", - "indexmap 1.9.3", + "indexmap 2.2.2", "itertools 0.12.0", "prisma-value", "query-structure", @@ -3668,7 +3669,7 @@ dependencies = [ "cuid", "enumflags2", "futures", - "indexmap 1.9.3", + "indexmap 2.2.2", "itertools 0.12.0", "lru 0.7.8", "once_cell", @@ -3900,7 +3901,7 @@ dependencies = [ "colored", "enumflags2", "hyper", - "indexmap 1.9.3", + "indexmap 2.2.2", "indoc 2.0.3", "insta", "itertools 0.12.0", @@ -3938,9 +3939,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.32" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ "proc-macro2", ] @@ -4191,7 +4192,7 @@ dependencies = [ "dmmf", "futures", "graphql-parser", - "indexmap 1.9.3", + "indexmap 2.2.2", "insta", "itertools 0.12.0", "mongodb-query-connector", @@ -4684,7 +4685,7 @@ checksum = "aafe972d60b0b9bee71a91b92fee2d4fb3c9d7e8f6b179aa99f27203d99a4816" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.48", ] [[package]] @@ -4695,7 +4696,7 @@ checksum = "e578a843d40b4189a4d66bba51d7684f57da5bd7c304c64e14bd63efbef49509" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.48", ] [[package]] @@ -4704,7 +4705,7 @@ version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c" dependencies = [ - "indexmap 2.0.0", + "indexmap 2.2.2", "itoa", "ryu", "serde", @@ -4718,7 +4719,7 @@ checksum = "3081f5ffbb02284dda55132aa26daecedd7372a42417bbbab6f14ab7d6bb9145" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.48", ] [[package]] @@ -4777,7 +4778,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.48", ] [[package]] @@ -5015,7 +5016,7 @@ dependencies = [ "quaint", "query-connector", "query-structure", - "rand 0.7.3", + "rand 0.8.5", "serde", "serde_json", "thiserror", @@ -5067,7 +5068,7 @@ dependencies = [ "either", "enumflags2", "expect-test", - "indexmap 1.9.3", + "indexmap 2.2.2", "indoc 2.0.3", "once_cell", "pretty_assertions", @@ -5200,9 +5201,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.28" +version = "2.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567" +checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" dependencies = [ "proc-macro2", "quote", @@ -5323,7 +5324,7 @@ checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.48", ] [[package]] @@ -5468,7 +5469,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.48", ] [[package]] @@ -5673,7 +5674,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.48", ] [[package]] @@ -5836,7 +5837,7 @@ dependencies = [ "proc-macro2", "quote", "serde_derive_internals", - "syn 2.0.28", + "syn 2.0.48", ] [[package]] @@ -5947,12 +5948,12 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", - "idna 0.4.0", + "idna 0.5.0", "percent-encoding", "serde", ] @@ -6121,7 +6122,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.48", "wasm-bindgen-shared", ] @@ -6155,7 +6156,7 @@ checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.48", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6522,3 +6523,23 @@ name = "yansi" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" + +[[package]] +name = "zerocopy" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] diff --git a/Cargo.toml b/Cargo.toml index 08f0739380a4..bf60948855a8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,6 +35,8 @@ members = [ ] [workspace.dependencies] +async-trait = { version = "0.1.77" } +enumflags2 = { version = "0.7", features = ["serde"] } psl = { path = "./psl/psl" } serde_json = { version = "1", features = ["float_roundtrip", "preserve_order"] } serde = { version = "1", features = ["derive"] } @@ -47,9 +49,11 @@ tokio = { version = "1.25", features = [ "parking_lot", "time", ] } +chrono = { version = "0.4", features = ["serde"] } user-facing-errors = { path = "./libs/user-facing-errors" } -uuid = { version = "1", features = ["serde"] } +uuid = { version = "1", features = ["serde", "v4"] } indoc = "2.0.1" +indexmap = { version = "2.2.2", features = ["serde"] } itertools = "0.12" connection-string = "0.2" napi = { version = "2.15.1", default-features = false, features = [ @@ -59,13 +63,16 @@ napi = { version = "2.15.1", default-features = false, features = [ ] } napi-derive = "2.15.0" js-sys = { version = "0.3" } +rand = { version = "0.8" } serde_repr = { version = "0.1.17" } serde-wasm-bindgen = { version = "0.5" } +tracing = { version = "0.1" } tsify = { version = "0.4.5" } wasm-bindgen = { version = "0.2.89" } wasm-bindgen-futures = { version = "0.4" } wasm-rs-dbg = { version = "0.1.2" } wasm-bindgen-test = { version = "0.3.0" } +url = { version = "2.5.0" } [workspace.dependencies.quaint] path = "quaint" diff --git a/libs/prisma-value/Cargo.toml b/libs/prisma-value/Cargo.toml index c5ede20c91c6..1a0d28e06db3 100644 --- a/libs/prisma-value/Cargo.toml +++ b/libs/prisma-value/Cargo.toml @@ -5,7 +5,7 @@ version = "0.1.0" [dependencies] base64 = "0.13" -chrono = { version = "0.4", features = ["serde"] } +chrono.workspace = true once_cell = "1.3" regex = "1.2" bigdecimal = "0.3" diff --git a/libs/query-engine-common/Cargo.toml b/libs/query-engine-common/Cargo.toml index 215897a3aa45..e2fb3b4bfe48 100644 --- a/libs/query-engine-common/Cargo.toml +++ b/libs/query-engine-common/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" [dependencies] thiserror = "1" -url = "2" +url.workspace = true query-connector = { path = "../../query-engine/connectors/query-connector" } query-core = { path = "../../query-engine/core" } user-facing-errors = { path = "../user-facing-errors" } @@ -15,8 +15,8 @@ serde_json.workspace = true serde.workspace = true connection-string.workspace = true psl.workspace = true -async-trait = "0.1" -tracing = "0.1" +async-trait.workspace = true +tracing.workspace = true tracing-subscriber = { version = "0.3" } tracing-futures = "0.2" tracing-opentelemetry = "0.17.3" diff --git a/libs/test-cli/Cargo.toml b/libs/test-cli/Cargo.toml index fe4905dd0110..936ff3d9ee46 100644 --- a/libs/test-cli/Cargo.toml +++ b/libs/test-cli/Cargo.toml @@ -7,14 +7,14 @@ edition = "2021" anyhow = "1.0.26" colored = "2" structopt = "0.3.8" -enumflags2 = "0.7" +enumflags2.workspace = true dmmf = { path = "../../query-engine/dmmf" } schema-core = { path = "../../schema-engine/core" } schema-connector = { path = "../../schema-engine/connectors/schema-connector" } psl.workspace = true tokio.workspace = true -serde_json = { version = "1.0", features = ["float_roundtrip"] } -tracing = "0.1" +serde_json.workspace = true +tracing.workspace = true tracing-subscriber = { version = "0.3", features = ["env-filter"] } tracing-error = "0.2" -async-trait = "0.1.52" +async-trait.workspace = true diff --git a/libs/test-setup/Cargo.toml b/libs/test-setup/Cargo.toml index 22fec3cf7355..25fd1849d01f 100644 --- a/libs/test-setup/Cargo.toml +++ b/libs/test-setup/Cargo.toml @@ -6,13 +6,13 @@ edition = "2021" [dependencies] connection-string.workspace = true dissimilar = "1.0.3" -enumflags2 = "0.7" +enumflags2.workspace = true once_cell = "1.3.1" tokio = { workspace = true, optional = true } -tracing = "0.1" +tracing.workspace = true tracing-error = "0.2" tracing-subscriber = { version = "0.3", features = ["fmt", "env-filter"] } -url = "2.1.1" +url.workspace = true quaint = { workspace = true, optional = true } [features] diff --git a/libs/user-facing-errors/Cargo.toml b/libs/user-facing-errors/Cargo.toml index cbfdde2ead48..c679567cf931 100644 --- a/libs/user-facing-errors/Cargo.toml +++ b/libs/user-facing-errors/Cargo.toml @@ -8,7 +8,7 @@ user-facing-error-macros = { path = "../user-facing-error-macros" } serde_json.workspace = true serde.workspace = true backtrace = "0.3.40" -tracing = "0.1" +tracing.workspace = true indoc.workspace = true itertools.workspace = true quaint = { path = "../../quaint", default-features = false, optional = true } diff --git a/prisma-fmt/Cargo.toml b/prisma-fmt/Cargo.toml index aa3d7a9580bb..be8f8f1eb87c 100644 --- a/prisma-fmt/Cargo.toml +++ b/prisma-fmt/Cargo.toml @@ -12,7 +12,7 @@ serde.workspace = true indoc.workspace = true lsp-types = "0.91.1" log = "0.4.14" -enumflags2 = "0.7" +enumflags2.workspace = true [target.'cfg(not(target_arch = "wasm32"))'.dependencies] structopt = "0.3" diff --git a/psl/parser-database/Cargo.toml b/psl/parser-database/Cargo.toml index 5ee12731a010..2ae58c8b3850 100644 --- a/psl/parser-database/Cargo.toml +++ b/psl/parser-database/Cargo.toml @@ -6,8 +6,7 @@ edition = "2021" [dependencies] diagnostics = { path = "../diagnostics" } schema-ast = { path = "../schema-ast" } - +indexmap.workspace = true +enumflags2.workspace = true either = "1.6.1" -enumflags2 = "0.7" -indexmap = "1.8.0" rustc-hash = "1.1.0" diff --git a/psl/psl-core/Cargo.toml b/psl/psl-core/Cargo.toml index 64343301c2c7..c7f6a7b22340 100644 --- a/psl/psl-core/Cargo.toml +++ b/psl/psl-core/Cargo.toml @@ -10,18 +10,18 @@ prisma-value = { path = "../../libs/prisma-value" } schema-ast = { path = "../schema-ast" } bigdecimal = "0.3" -chrono = { version = "0.4.6", default_features = false } +chrono = { workspace = true } connection-string.workspace = true itertools.workspace = true once_cell = "1.3.1" regex = "1.3.7" serde.workspace = true serde_json.workspace = true -enumflags2 = "0.7" +enumflags2.workspace = true indoc.workspace = true either = "1.8.1" hex = "0.4" # For the connector API. lsp-types = "0.91.1" -url = "2.2.1" +url.workspace = true diff --git a/quaint/Cargo.toml b/quaint/Cargo.toml index d7387df23208..018de045bc4c 100644 --- a/quaint/Cargo.toml +++ b/quaint/Cargo.toml @@ -67,29 +67,29 @@ fmt-sql = ["sqlformat"] [dependencies] connection-string = "0.2" percent-encoding = "2" -tracing = "0.1" +tracing.workspace = true tracing-core = "0.1" -async-trait = "0.1" +async-trait.workspace = true thiserror = "1.0" num_cpus = "1.12" metrics = "0.18" futures = "0.3" -url = "2.1" +url.workspace = true hex = "0.4" itertools.workspace = true either = { version = "1.6" } base64 = { version = "0.12.3" } -chrono = { version = "0.4", default-features = false, features = ["serde"] } +chrono.workspace = true lru-cache = { version = "0.1", optional = true } -serde_json = { version = "1.0.48", features = ["float_roundtrip"] } +serde_json.workspace = true native-tls = { version = "0.2", optional = true } bit-vec = { version = "0.6.1", optional = true } bytes = { version = "1.0", optional = true } mobc = { version = "0.8", optional = true } serde = { version = "1.0", optional = true } sqlformat = { version = "0.2.3", optional = true } -uuid = { version = "1", features = ["v4"] } +uuid.workspace = true crosstarget-utils = { path = "../libs/crosstarget-utils" } [dev-dependencies] diff --git a/quaint/quaint-test-setup/Cargo.toml b/quaint/quaint-test-setup/Cargo.toml index b7ad87fed8fc..a5ef732f6dfe 100644 --- a/quaint/quaint-test-setup/Cargo.toml +++ b/quaint/quaint-test-setup/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" [dependencies] once_cell = "1.3.1" bitflags = "1.2.1" -async-trait = "0.1" +async-trait.workspace = true names = "0.11" tokio = { version = "1.0", features = ["rt-multi-thread"] } quaint = { path = "..", features = ["all"] } diff --git a/query-engine/black-box-tests/Cargo.toml b/query-engine/black-box-tests/Cargo.toml index cc9e99b8ca3c..c5f88c844dc7 100644 --- a/query-engine/black-box-tests/Cargo.toml +++ b/query-engine/black-box-tests/Cargo.toml @@ -8,11 +8,11 @@ query-engine-tests = { path = "../connector-test-kit-rs/query-engine-tests" } query-tests-setup = { path = "../connector-test-kit-rs/query-tests-setup" } reqwest = "0.11" anyhow = "1.0" -serde_json = "1.0" +serde_json.workspace = true indoc.workspace = true tokio.workspace = true user-facing-errors.workspace = true insta = "1.7.1" -enumflags2 = "0.7" +enumflags2.workspace = true query-engine-metrics = {path = "../metrics"} regex = "1.9.3" diff --git a/query-engine/connector-test-kit-rs/qe-setup/Cargo.toml b/query-engine/connector-test-kit-rs/qe-setup/Cargo.toml index f13c3a6e9487..322c9559c6f8 100644 --- a/query-engine/connector-test-kit-rs/qe-setup/Cargo.toml +++ b/query-engine/connector-test-kit-rs/qe-setup/Cargo.toml @@ -10,9 +10,9 @@ mongodb-client = { path = "../../../libs/mongodb-client" } schema-core = { path = "../../../schema-engine/core" } sql-schema-connector = { path = "../../../schema-engine/connectors/sql-schema-connector" } test-setup = { path = "../../../libs/test-setup" } +enumflags2.workspace = true connection-string = "*" -enumflags2 = "*" mongodb = "2.8.0" -url = "2" +url.workspace = true once_cell = "1.17.0" diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/Cargo.toml b/query-engine/connector-test-kit-rs/query-engine-tests/Cargo.toml index 488de7ac4240..2ac097a7a187 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/Cargo.toml +++ b/query-engine/connector-test-kit-rs/query-engine-tests/Cargo.toml @@ -4,16 +4,16 @@ name = "query-engine-tests" version = "0.1.0" [dependencies] -enumflags2 = "0.7" +enumflags2.workspace = true anyhow = "1.0" -serde_json = "1.0" +serde_json.workspace = true query-test-macros = { path = "../query-test-macros" } query-tests-setup = { path = "../query-tests-setup" } indoc.workspace = true -tracing = "0.1" +tracing.workspace = true tracing-futures = "0.2" colored = "2" -chrono = "0.4" +chrono.workspace = true psl.workspace = true base64 = "0.13" uuid.workspace = true diff --git a/query-engine/connector-test-kit-rs/query-tests-setup/Cargo.toml b/query-engine/connector-test-kit-rs/query-tests-setup/Cargo.toml index e0f33b626b25..cd8abc07331c 100644 --- a/query-engine/connector-test-kit-rs/query-tests-setup/Cargo.toml +++ b/query-engine/connector-test-kit-rs/query-tests-setup/Cargo.toml @@ -16,20 +16,20 @@ query-engine = { path = "../../query-engine" } psl.workspace = true user-facing-errors = { path = "../../../libs/user-facing-errors" } thiserror = "1.0" -async-trait = "0.1" +async-trait.workspace = true nom = "7.1" itertools.workspace = true regex = "1" serde.workspace = true -tracing = "0.1" +tracing.workspace = true tracing-futures = "0.2" tracing-subscriber = { version = "0.3", features = ["fmt", "env-filter"] } tracing-error = "0.2" colored = "2" indoc.workspace = true -enumflags2 = "0.7" +enumflags2.workspace = true hyper = { version = "0.14", features = ["full"] } -indexmap = { version = "1.0", features = ["serde-1"] } +indexmap.workspace = true query-engine-metrics = { path = "../../metrics" } quaint.workspace = true jsonrpc-core = "17" diff --git a/query-engine/connectors/mongodb-query-connector/Cargo.toml b/query-engine/connectors/mongodb-query-connector/Cargo.toml index ce7aeae89119..8c801f803550 100644 --- a/query-engine/connectors/mongodb-query-connector/Cargo.toml +++ b/query-engine/connectors/mongodb-query-connector/Cargo.toml @@ -5,22 +5,22 @@ version = "0.1.0" [dependencies] anyhow = "1.0" -async-trait = "0.1" +async-trait.workspace = true bigdecimal = "0.3" # bson = {version = "1.1.0", features = ["decimal128"]} futures = "0.3" itertools.workspace = true mongodb = "2.8.0" bson = { version = "2.4.0", features = ["chrono-0_4", "uuid-1"] } -rand = "0.7" +rand.workspace = true regex = "1" -serde_json = { version = "1.0", features = ["float_roundtrip"] } +serde_json.workspace = true thiserror = "1.0" tokio.workspace = true -tracing = "0.1" +tracing.workspace = true tracing-futures = "0.2" uuid.workspace = true -indexmap = "1.7" +indexmap.workspace = true query-engine-metrics = { path = "../../metrics" } cuid = { git = "https://github.com/prisma/cuid-rust", branch = "wasm32-support" } derive_more = "0.99.17" diff --git a/query-engine/connectors/query-connector/Cargo.toml b/query-engine/connectors/query-connector/Cargo.toml index 651cbd027e92..52555d256baa 100644 --- a/query-engine/connectors/query-connector/Cargo.toml +++ b/query-engine/connectors/query-connector/Cargo.toml @@ -5,8 +5,8 @@ version = "0.1.0" [dependencies] anyhow = "1.0" -async-trait = "0.1.31" -chrono = {version = "0.4", features = ["serde"]} +async-trait.workspace = true +chrono.workspace = true futures = "0.3" itertools.workspace = true query-structure = {path = "../../query-structure"} @@ -15,5 +15,5 @@ serde.workspace = true serde_json.workspace = true thiserror = "1.0" user-facing-errors = {path = "../../../libs/user-facing-errors", features = ["sql"]} -uuid = "1" -indexmap = "1.7" +uuid.workspace = true +indexmap.workspace = true diff --git a/query-engine/connectors/query-connector/src/write_args.rs b/query-engine/connectors/query-connector/src/write_args.rs index c881ee9c2cfa..445037bdbbe2 100644 --- a/query-engine/connectors/query-connector/src/write_args.rs +++ b/query-engine/connectors/query-connector/src/write_args.rs @@ -376,7 +376,7 @@ impl WriteArgs { } pub fn take_field_value(&mut self, field: &str) -> Option { - self.args.remove(field) + self.args.swap_remove(field) } pub fn keys(&self) -> Keys { diff --git a/query-engine/connectors/sql-query-connector/Cargo.toml b/query-engine/connectors/sql-query-connector/Cargo.toml index aae27b671c44..7a4baeefe678 100644 --- a/query-engine/connectors/sql-query-connector/Cargo.toml +++ b/query-engine/connectors/sql-query-connector/Cargo.toml @@ -16,16 +16,16 @@ driver-adapters = [] [dependencies] psl.workspace = true anyhow = "1.0" -async-trait = "0.1" +async-trait.workspace = true bigdecimal = "0.3" futures = "0.3" itertools.workspace = true once_cell = "1.3" -rand = "0.7" -serde_json = { version = "1.0", features = ["float_roundtrip"] } +rand.workspace = true +serde_json.workspace = true thiserror = "1.0" tokio = { version = "1.0", features = ["macros", "time"] } -tracing = "0.1" +tracing.workspace = true tracing-futures = "0.2" uuid.workspace = true opentelemetry = { version = "0.17", features = ["tokio"] } diff --git a/query-engine/core/Cargo.toml b/query-engine/core/Cargo.toml index 5c516baf8c18..bd5df5ca166e 100644 --- a/query-engine/core/Cargo.toml +++ b/query-engine/core/Cargo.toml @@ -8,15 +8,15 @@ metrics = ["query-engine-metrics"] graphql-protocol = [] [dependencies] -async-trait = "0.1" +async-trait.workspace = true bigdecimal = "0.3" -chrono = "0.4" +chrono.workspace = true connection-string.workspace = true connector = { path = "../connectors/query-connector", package = "query-connector" } crossbeam-channel = "0.5.6" psl.workspace = true futures = "0.3" -indexmap = { version = "1.7", features = ["serde-1"] } +indexmap.workspace = true itertools.workspace = true once_cell = "1" petgraph = "0.4" @@ -29,14 +29,14 @@ serde.workspace = true serde_json.workspace = true thiserror = "1.0" tokio = { version = "1.0", features = ["macros", "time"] } -tracing = { version = "0.1", features = ["attributes"] } +tracing = { workspace = true, features = ["attributes"] } tracing-futures = "0.2" tracing-subscriber = { version = "0.3", features = ["env-filter"] } tracing-opentelemetry = "0.17.4" user-facing-errors = { path = "../../libs/user-facing-errors" } -uuid = "1" +uuid.workspace = true cuid = { git = "https://github.com/prisma/cuid-rust", branch = "wasm32-support" } schema = { path = "../schema" } crosstarget-utils = { path = "../../libs/crosstarget-utils" } lru = "0.7.7" -enumflags2 = "0.7" +enumflags2.workspace = true diff --git a/query-engine/core/src/query_graph_builder/extractors/filters/mod.rs b/query-engine/core/src/query_graph_builder/extractors/filters/mod.rs index cb9e4e7f8025..c87da451ff2b 100644 --- a/query-engine/core/src/query_graph_builder/extractors/filters/mod.rs +++ b/query-engine/core/src/query_graph_builder/extractors/filters/mod.rs @@ -73,7 +73,7 @@ fn handle_compound_field(fields: Vec, value: ParsedInputValue<'_ let filters: Vec = fields .into_iter() .map(|sf| { - let pv: PrismaValue = input_map.remove(sf.name()).unwrap().try_into()?; + let pv: PrismaValue = input_map.swap_remove(sf.name()).unwrap().try_into()?; Ok(sf.equals(pv)) }) .collect::>>()?; @@ -275,7 +275,7 @@ fn extract_scalar_filters(field: &ScalarFieldRef, value: ParsedInputValue<'_>) - match value { ParsedInputValue::Single(pv) => Ok(vec![field.equals(pv)]), ParsedInputValue::Map(mut filter_map) => { - let mode = match filter_map.remove(filters::MODE) { + let mode = match filter_map.swap_remove(filters::MODE) { Some(i) => parse_query_mode(i)?, None => QueryMode::Default, }; diff --git a/query-engine/core/src/query_graph_builder/extractors/filters/scalar.rs b/query-engine/core/src/query_graph_builder/extractors/filters/scalar.rs index ac84ce06aa21..aec2932cbe37 100644 --- a/query-engine/core/src/query_graph_builder/extractors/filters/scalar.rs +++ b/query-engine/core/src/query_graph_builder/extractors/filters/scalar.rs @@ -42,7 +42,7 @@ impl<'a> ScalarFilterParser<'a> { } pub fn parse(&self, mut filter_map: ParsedInputMap<'_>) -> QueryGraphBuilderResult> { - let json_path: Option = match filter_map.remove(filters::PATH) { + let json_path: Option = match filter_map.swap_remove(filters::PATH) { Some(v) => Some(parse_json_path(v)?), _ => None, }; @@ -421,11 +421,11 @@ impl<'a> ScalarFilterParser<'a> { match input { ParsedInputValue::Map(mut map) => { - let field_ref_name = map.remove(filters::UNDERSCORE_REF).unwrap(); + let field_ref_name = map.swap_remove(filters::UNDERSCORE_REF).unwrap(); let field_ref_name = PrismaValue::try_from(field_ref_name)?.into_string().unwrap(); let field_ref = field.container().find_field(&field_ref_name); - let container_ref_name = map.remove(filters::UNDERSCORE_CONTAINER).unwrap(); + let container_ref_name = map.swap_remove(filters::UNDERSCORE_CONTAINER).unwrap(); let container_ref_name = PrismaValue::try_from(container_ref_name)?.into_string().unwrap(); if container_ref_name != field.container().name() { @@ -499,7 +499,7 @@ impl<'a> ScalarFilterParser<'a> { match input { ParsedInputValue::Map(mut map) => { - let field_ref_name = map.remove(filters::UNDERSCORE_REF).unwrap(); + let field_ref_name = map.swap_remove(filters::UNDERSCORE_REF).unwrap(); let field_ref_name = PrismaValue::try_from(field_ref_name)?.into_string().unwrap(); let field_ref = field.container().find_field(&field_ref_name); diff --git a/query-engine/core/src/query_graph_builder/extractors/query_arguments.rs b/query-engine/core/src/query_graph_builder/extractors/query_arguments.rs index 87da325e2ce2..38f166fabc56 100644 --- a/query-engine/core/src/query_graph_builder/extractors/query_arguments.rs +++ b/query-engine/core/src/query_graph_builder/extractors/query_arguments.rs @@ -217,10 +217,10 @@ fn extract_order_by_args( ) -> QueryGraphBuilderResult<(SortOrder, Option)> { match field_value { ParsedInputValue::Map(mut map) => { - let sort: PrismaValue = map.remove(ordering::SORT).unwrap().try_into()?; + let sort: PrismaValue = map.swap_remove(ordering::SORT).unwrap().try_into()?; let sort = pv_to_sort_order(sort)?; let nulls = map - .remove(ordering::NULLS) + .swap_remove(ordering::NULLS) .map(PrismaValue::try_from) .transpose()? .map(pv_to_nulls_order) @@ -324,7 +324,7 @@ fn extract_compound_cursor_field( let mut pairs = vec![]; for field in fields { - let value = map.remove(field.name()).unwrap(); + let value = map.swap_remove(field.name()).unwrap(); pairs.extend(extract_cursor_field(field, value)?); } diff --git a/query-engine/core/src/query_graph_builder/write/nested/connect_or_create_nested.rs b/query-engine/core/src/query_graph_builder/write/nested/connect_or_create_nested.rs index bcaacc1f5811..df3991e4bba8 100644 --- a/query-engine/core/src/query_graph_builder/write/nested/connect_or_create_nested.rs +++ b/query-engine/core/src/query_graph_builder/write/nested/connect_or_create_nested.rs @@ -98,10 +98,10 @@ fn handle_many_to_many( for value in values { let mut value: ParsedInputMap<'_> = value.try_into()?; - let where_arg = value.remove(args::WHERE).unwrap(); + let where_arg = value.swap_remove(args::WHERE).unwrap(); let where_map: ParsedInputMap<'_> = where_arg.try_into()?; - let create_arg = value.remove(args::CREATE).unwrap(); + let create_arg = value.swap_remove(args::CREATE).unwrap(); let create_map: ParsedInputMap<'_> = create_arg.try_into()?; let filter = extract_unique_filter(where_map, child_model)?; @@ -185,10 +185,10 @@ fn handle_one_to_one( let value = values.pop().unwrap(); let mut value: ParsedInputMap<'_> = value.try_into()?; - let where_arg = value.remove(args::WHERE).unwrap(); + let where_arg = value.swap_remove(args::WHERE).unwrap(); let where_map: ParsedInputMap<'_> = where_arg.try_into()?; - let create_arg = value.remove(args::CREATE).unwrap(); + let create_arg = value.swap_remove(args::CREATE).unwrap(); let create_data: ParsedInputMap<'_> = create_arg.try_into()?; let filter = extract_unique_filter(where_map, child_model)?; @@ -259,10 +259,10 @@ fn one_to_many_inlined_child( let mut value: ParsedInputMap<'_> = value.try_into()?; - let where_arg = value.remove(args::WHERE).unwrap(); + let where_arg = value.swap_remove(args::WHERE).unwrap(); let where_map: ParsedInputMap<'_> = where_arg.try_into()?; - let create_arg = value.remove(args::CREATE).unwrap(); + let create_arg = value.swap_remove(args::CREATE).unwrap(); let create_map: ParsedInputMap<'_> = create_arg.try_into()?; let filter = extract_unique_filter(where_map, child_model)?; @@ -398,10 +398,10 @@ fn one_to_many_inlined_parent( let value = values.pop().unwrap(); let mut value: ParsedInputMap<'_> = value.try_into()?; - let where_arg = value.remove(args::WHERE).unwrap(); + let where_arg = value.swap_remove(args::WHERE).unwrap(); let where_map: ParsedInputMap<'_> = where_arg.try_into()?; - let create_arg = value.remove(args::CREATE).unwrap(); + let create_arg = value.swap_remove(args::CREATE).unwrap(); let create_map: ParsedInputMap<'_> = create_arg.try_into()?; let filter = extract_unique_filter(where_map, child_model)?; diff --git a/query-engine/core/src/query_graph_builder/write/nested/create_nested.rs b/query-engine/core/src/query_graph_builder/write/nested/create_nested.rs index d0f649c3ecf6..08704ce4a674 100644 --- a/query-engine/core/src/query_graph_builder/write/nested/create_nested.rs +++ b/query-engine/core/src/query_graph_builder/write/nested/create_nested.rs @@ -426,8 +426,8 @@ pub fn nested_create_many( // Nested input is an object of { data: [...], skipDuplicates: bool } let mut obj: ParsedInputMap<'_> = value.try_into()?; - let data_list: ParsedInputList<'_> = utils::coerce_vec(obj.remove(args::DATA).unwrap()); - let skip_duplicates: bool = match obj.remove(args::SKIP_DUPLICATES) { + let data_list: ParsedInputList<'_> = utils::coerce_vec(obj.swap_remove(args::DATA).unwrap()); + let skip_duplicates: bool = match obj.swap_remove(args::SKIP_DUPLICATES) { Some(val) => val.try_into()?, None => false, }; diff --git a/query-engine/core/src/query_graph_builder/write/nested/update_nested.rs b/query-engine/core/src/query_graph_builder/write/nested/update_nested.rs index 78bf69af2f79..735bd0a88c31 100644 --- a/query-engine/core/src/query_graph_builder/write/nested/update_nested.rs +++ b/query-engine/core/src/query_graph_builder/write/nested/update_nested.rs @@ -50,17 +50,17 @@ pub fn nested_update( // This is used to read the children first, to make sure they're actually connected. // The update itself operates on the record found by the read check. let mut map: ParsedInputMap<'_> = value.try_into()?; - let where_arg: ParsedInputMap<'_> = map.remove(args::WHERE).unwrap().try_into()?; + let where_arg: ParsedInputMap<'_> = map.swap_remove(args::WHERE).unwrap().try_into()?; let filter = extract_unique_filter(where_arg, child_model)?; - let data_value = map.remove(args::DATA).unwrap(); + let data_value = map.swap_remove(args::DATA).unwrap(); (data_value, filter) } else { match value { // If the update input is of shape { where?: WhereInput, data: DataInput } ParsedInputValue::Map(mut map) if map.is_nested_to_one_update_envelope() => { - let filter = if let Some(where_arg) = map.remove(args::WHERE) { + let filter = if let Some(where_arg) = map.swap_remove(args::WHERE) { let where_arg: ParsedInputMap<'_> = where_arg.try_into()?; extract_filter(where_arg, child_model)? @@ -68,7 +68,7 @@ pub fn nested_update( Filter::empty() }; - let data_value = map.remove(args::DATA).unwrap(); + let data_value = map.swap_remove(args::DATA).unwrap(); (data_value, filter) } @@ -131,8 +131,8 @@ pub fn nested_update_many( ) -> QueryGraphBuilderResult<()> { for value in utils::coerce_vec(value) { let mut map: ParsedInputMap<'_> = value.try_into()?; - let where_arg = map.remove(args::WHERE).unwrap(); - let data_value = map.remove(args::DATA).unwrap(); + let where_arg = map.swap_remove(args::WHERE).unwrap(); + let data_value = map.swap_remove(args::DATA).unwrap(); let data_map: ParsedInputMap<'_> = data_value.try_into()?; let where_map: ParsedInputMap<'_> = where_arg.try_into()?; let child_model_identifier = parent_relation_field.related_model().primary_identifier(); diff --git a/query-engine/core/src/query_graph_builder/write/nested/upsert_nested.rs b/query-engine/core/src/query_graph_builder/write/nested/upsert_nested.rs index 0e72e1fa141c..468d13a82d46 100644 --- a/query-engine/core/src/query_graph_builder/write/nested/upsert_nested.rs +++ b/query-engine/core/src/query_graph_builder/write/nested/upsert_nested.rs @@ -107,9 +107,9 @@ pub fn nested_upsert( let child_link = parent_relation_field.related_field().linking_fields(); let mut as_map: ParsedInputMap<'_> = value.try_into()?; - let create_input = as_map.remove(args::CREATE).expect("create argument is missing"); - let update_input = as_map.remove(args::UPDATE).expect("update argument is missing"); - let where_input = as_map.remove(args::WHERE); + let create_input = as_map.swap_remove(args::CREATE).expect("create argument is missing"); + let update_input = as_map.swap_remove(args::UPDATE).expect("update argument is missing"); + let where_input = as_map.swap_remove(args::WHERE); // Read child(ren) node let filter = match (where_input, parent_relation_field.is_list()) { diff --git a/query-engine/core/src/query_graph_builder/write/write_args_parser.rs b/query-engine/core/src/query_graph_builder/write/write_args_parser.rs index 255247e4cee9..5e5cc464fa51 100644 --- a/query-engine/core/src/query_graph_builder/write/write_args_parser.rs +++ b/query-engine/core/src/query_graph_builder/write/write_args_parser.rs @@ -162,10 +162,10 @@ fn parse_composite_update_many( mut value: ParsedInputMap<'_>, path: &mut [DatasourceFieldName], ) -> QueryGraphBuilderResult { - let where_map: ParsedInputMap<'_> = value.remove(args::WHERE).unwrap().try_into()?; + let where_map: ParsedInputMap<'_> = value.swap_remove(args::WHERE).unwrap().try_into()?; let filter = extract_filter(where_map, cf.typ())?; - let update_map: ParsedInputMap<'_> = value.remove(args::DATA).unwrap().try_into()?; + let update_map: ParsedInputMap<'_> = value.swap_remove(args::DATA).unwrap().try_into()?; let update = parse_composite_updates(cf, update_map, path)? .try_into_composite() .unwrap(); @@ -177,7 +177,7 @@ fn parse_composite_delete_many( cf: &CompositeFieldRef, mut value: ParsedInputMap<'_>, ) -> QueryGraphBuilderResult { - let where_map: ParsedInputMap<'_> = value.remove(args::WHERE).unwrap().try_into()?; + let where_map: ParsedInputMap<'_> = value.swap_remove(args::WHERE).unwrap().try_into()?; let filter = extract_filter(where_map, cf.typ())?; Ok(WriteOperation::composite_delete_many(filter)) @@ -188,9 +188,9 @@ fn parse_composite_upsert( mut value: ParsedInputMap<'_>, path: &mut Vec, ) -> QueryGraphBuilderResult { - let set = value.remove(operations::SET).unwrap(); + let set = value.swap_remove(operations::SET).unwrap(); let set = parse_composite_writes(cf, set, path)?.try_into_composite().unwrap(); - let update: ParsedInputMap<'_> = value.remove(operations::UPDATE).unwrap().try_into()?; + let update: ParsedInputMap<'_> = value.swap_remove(operations::UPDATE).unwrap().try_into()?; let update = parse_composite_updates(cf, update, path)?.try_into_composite().unwrap(); Ok(WriteOperation::composite_upsert(set, update)) diff --git a/query-engine/dmmf/Cargo.toml b/query-engine/dmmf/Cargo.toml index 288fc86357d5..b2ad4fc4c88c 100644 --- a/query-engine/dmmf/Cargo.toml +++ b/query-engine/dmmf/Cargo.toml @@ -9,7 +9,7 @@ psl.workspace = true serde.workspace = true serde_json.workspace = true schema = { path = "../schema" } -indexmap = { version = "1.7", features = ["serde-1"] } +indexmap.workspace = true query-structure = { path = "../query-structure", features = ["default_generators"] } [dev-dependencies] diff --git a/query-engine/driver-adapters/Cargo.toml b/query-engine/driver-adapters/Cargo.toml index e2c051204cf0..5bda20fc10a2 100644 --- a/query-engine/driver-adapters/Cargo.toml +++ b/query-engine/driver-adapters/Cargo.toml @@ -9,14 +9,14 @@ sqlite = ["quaint/sqlite"] postgresql = ["quaint/postgresql"] [dependencies] -async-trait = "0.1" +async-trait.workspace = true once_cell = "1.15" serde.workspace = true serde_json.workspace = true -tracing = "0.1" +tracing.workspace = true tracing-core = "0.1" metrics = "0.18" -uuid = { version = "1", features = ["v4"] } +uuid.workspace = true pin-project = "1" serde_repr.workspace = true diff --git a/query-engine/metrics/Cargo.toml b/query-engine/metrics/Cargo.toml index 2de079895fb9..5593b246c093 100644 --- a/query-engine/metrics/Cargo.toml +++ b/query-engine/metrics/Cargo.toml @@ -9,8 +9,8 @@ metrics-util = "0.12.1" metrics-exporter-prometheus = "0.10.0" once_cell = "1.3" serde.workspace = true -serde_json = "1" -tracing = { version = "0.1" } +serde_json.workspace = true +tracing.workspace = true tracing-futures = "0.2" tracing-subscriber = "0.3.11" parking_lot = "0.12" diff --git a/query-engine/query-engine-node-api/Cargo.toml b/query-engine/query-engine-node-api/Cargo.toml index e477626702fe..7acc8f98336e 100644 --- a/query-engine/query-engine-node-api/Cargo.toml +++ b/query-engine/query-engine-node-api/Cargo.toml @@ -18,7 +18,7 @@ driver-adapters = [ [dependencies] anyhow = "1" -async-trait = "0.1" +async-trait.workspace = true query-core = { path = "../core", features = ["metrics"] } request-handlers = { path = "../request-handlers" } query-connector = { path = "../connectors/query-connector" } @@ -37,11 +37,11 @@ napi-derive.workspace = true thiserror = "1" connection-string.workspace = true -url = "2" +url.workspace = true serde_json.workspace = true serde.workspace = true -tracing = "0.1" +tracing.workspace = true tracing-subscriber = { version = "0.3" } tracing-futures = "0.2" tracing-opentelemetry = "0.17.3" diff --git a/query-engine/query-engine-wasm/Cargo.toml b/query-engine/query-engine-wasm/Cargo.toml index 931f04c399f0..89c3b3d7de3e 100644 --- a/query-engine/query-engine-wasm/Cargo.toml +++ b/query-engine/query-engine-wasm/Cargo.toml @@ -18,7 +18,7 @@ mysql = ["driver-adapters/mysql", "sql-connector/mysql"] query-connector = { path = "../connectors/query-connector" } query-engine-common = { path = "../../libs/query-engine-common" } anyhow = "1" -async-trait = "0.1" +async-trait.workspace = true user-facing-errors = { path = "../../libs/user-facing-errors" } psl.workspace = true query-structure = { path = "../query-structure" } @@ -41,12 +41,12 @@ wasm-bindgen-futures.workspace = true wasm-rs-dbg.workspace = true thiserror = "1" -url = "2" +url.workspace = true serde.workspace = true tokio = { version = "1.25", features = ["macros", "sync", "io-util", "time"] } futures = "0.3" -tracing = "0.1" +tracing.workspace = true tracing-subscriber = { version = "0.3" } tracing-futures = "0.2" tracing-opentelemetry = "0.17.3" diff --git a/query-engine/query-engine/Cargo.toml b/query-engine/query-engine/Cargo.toml index c70d8590d0ff..45f433b892ea 100644 --- a/query-engine/query-engine/Cargo.toml +++ b/query-engine/query-engine/Cargo.toml @@ -12,11 +12,11 @@ vendored-openssl = ["sql-connector/vendored-openssl"] [dependencies] tokio.workspace = true anyhow = "1.0" -async-trait = "0.1" +async-trait.workspace = true base64 = "0.13" connection-string.workspace = true connector = { path = "../connectors/query-connector", package = "query-connector" } -enumflags2 = { version = "0.7"} +enumflags2.workspace = true psl.workspace = true graphql-parser = { git = "https://github.com/prisma/graphql-parser" } mongodb-connector = { path = "../connectors/mongodb-query-connector", optional = true, package = "mongodb-query-connector" } @@ -27,9 +27,9 @@ serde_json.workspace = true sql-connector = { path = "../connectors/sql-query-connector", optional = true, package = "sql-query-connector" } structopt = "0.3" thiserror = "1.0" -url = "2.1" +url.workspace = true hyper = { version = "0.14", features = ["server", "http1", "http2", "runtime"] } -tracing = "0.1" +tracing.workspace = true tracing-opentelemetry = "0.17.3" tracing-subscriber = { version = "0.3", features = ["json", "env-filter"] } opentelemetry = { version = "0.17.0", features = ["rt-tokio"] } diff --git a/query-engine/query-structure/Cargo.toml b/query-engine/query-structure/Cargo.toml index fad0add06ed1..f990c48ffd4b 100644 --- a/query-engine/query-structure/Cargo.toml +++ b/query-engine/query-structure/Cargo.toml @@ -14,7 +14,7 @@ getrandom = { version = "0.2" } uuid = { workspace = true, optional = true } cuid = { git = "https://github.com/prisma/cuid-rust", branch = "wasm32-support", optional = true } nanoid = { version = "0.4.0", optional = true } -chrono = { version = "0.4.6", features = ["serde"] } +chrono.workspace = true [target.'cfg(target_arch = "wasm32")'.dependencies.getrandom] version = "0.2" diff --git a/query-engine/request-handlers/Cargo.toml b/query-engine/request-handlers/Cargo.toml index 4200980202c5..133f670c2b06 100644 --- a/query-engine/request-handlers/Cargo.toml +++ b/query-engine/request-handlers/Cargo.toml @@ -15,11 +15,11 @@ graphql-parser = { git = "https://github.com/prisma/graphql-parser", optional = serde.workspace = true serde_json.workspace = true futures = "0.3" -indexmap = { version = "1.7", features = ["serde-1"] } +indexmap.workspace = true bigdecimal = "0.3" thiserror = "1" -tracing = "0.1" -url = "2" +tracing.workspace = true +url.workspace = true connection-string.workspace = true once_cell = "1.15" diff --git a/query-engine/request-handlers/src/response.rs b/query-engine/request-handlers/src/response.rs index a196daade4be..d04f4808721b 100644 --- a/query-engine/request-handlers/src/response.rs +++ b/query-engine/request-handlers/src/response.rs @@ -97,7 +97,7 @@ impl GQLResponse { } pub fn take_data(&mut self, key: impl AsRef) -> Option { - self.data.remove(key.as_ref()) + self.data.swap_remove(key.as_ref()) } pub fn errors(&self) -> impl Iterator { diff --git a/schema-engine/cli/Cargo.toml b/schema-engine/cli/Cargo.toml index e396db738f4f..fcb52f71d60c 100644 --- a/schema-engine/cli/Cargo.toml +++ b/schema-engine/cli/Cargo.toml @@ -15,7 +15,7 @@ structopt = "0.3.8" serde_json.workspace = true serde.workspace = true tokio.workspace = true -tracing = "0.1" +tracing.workspace = true tracing-error = "0.2" tracing-subscriber = { version = "0.3", features = [ "fmt", "json", "time", "env-filter" ] } @@ -23,7 +23,7 @@ tracing-subscriber = { version = "0.3", features = [ "fmt", "json", "time", "env tempfile = "3.1.0" test-setup = { path = "../../libs/test-setup" } test-macros = { path = "../../libs/test-macros" } -url = "2.1.1" +url.workspace = true indoc.workspace = true connection-string.workspace = true expect-test = "1.4.0" diff --git a/schema-engine/connectors/mongodb-schema-connector/Cargo.toml b/schema-engine/connectors/mongodb-schema-connector/Cargo.toml index 12a4a91ac5ca..7157ba122fc6 100644 --- a/schema-engine/connectors/mongodb-schema-connector/Cargo.toml +++ b/schema-engine/connectors/mongodb-schema-connector/Cargo.toml @@ -11,12 +11,12 @@ datamodel-renderer = { path = "../../datamodel-renderer" } schema-connector = { path = "../schema-connector" } user-facing-errors = { path = "../../../libs/user-facing-errors" } -enumflags2 = "0.7" +enumflags2.workspace = true futures = "0.3" mongodb = "2.8.0" -serde_json = "1" +serde_json.workspace = true tokio.workspace = true -tracing = "0.1" +tracing.workspace = true convert_case = "0.6.0" once_cell = "1.8.0" regex = "1.7.3" @@ -26,6 +26,6 @@ indoc.workspace = true serde.workspace = true dissimilar = "1.0.3" once_cell = "1.8.0" -url = "2" +url.workspace = true expect-test = "1" names = { version = "0.12", default-features = false } diff --git a/schema-engine/connectors/schema-connector/Cargo.toml b/schema-engine/connectors/schema-connector/Cargo.toml index b75f95aa8c45..18bbc0059874 100644 --- a/schema-engine/connectors/schema-connector/Cargo.toml +++ b/schema-engine/connectors/schema-connector/Cargo.toml @@ -10,8 +10,8 @@ serde.workspace = true serde_json.workspace = true user-facing-errors = { path = "../../../libs/user-facing-errors" } -chrono = "0.4" -enumflags2 = "0.7" +chrono.workspace = true +enumflags2.workspace = true sha2 = "0.9.1" -tracing = "0.1" +tracing.workspace = true tracing-error = "0.2" diff --git a/schema-engine/connectors/sql-schema-connector/Cargo.toml b/schema-engine/connectors/sql-schema-connector/Cargo.toml index 767014b22bf8..3127ed51d16c 100644 --- a/schema-engine/connectors/sql-schema-connector/Cargo.toml +++ b/schema-engine/connectors/sql-schema-connector/Cargo.toml @@ -12,7 +12,7 @@ quaint.workspace = true tokio.workspace = true serde.workspace = true indoc.workspace = true -uuid = { workspace = true, features = ["v4"] } +uuid.workspace = true prisma-value = { path = "../../../libs/prisma-value" } schema-connector = { path = "../schema-connector" } @@ -21,15 +21,15 @@ datamodel-renderer = { path = "../../datamodel-renderer" } sql-ddl = { path = "../../../libs/sql-ddl" } user-facing-errors = { path = "../../../libs/user-facing-errors", features = ["sql"] } -chrono = { version = "0.4" } +chrono.workspace = true connection-string.workspace = true -enumflags2 = "0.7.7" +enumflags2.workspace = true once_cell = "1.3" regex = "1" -serde_json = { version = "1.0" } -tracing = "0.1" +serde_json.workspace = true +tracing.workspace = true tracing-futures = "0.2" -url = "2.1.1" +url.workspace = true either = "1.6" sqlformat = "0.2.1" sqlparser = "0.32.0" diff --git a/schema-engine/core/Cargo.toml b/schema-engine/core/Cargo.toml index ac296bf6f143..215a4a7e8e97 100644 --- a/schema-engine/core/Cargo.toml +++ b/schema-engine/core/Cargo.toml @@ -10,17 +10,17 @@ mongodb-schema-connector = { path = "../connectors/mongodb-schema-connector" } sql-schema-connector = { path = "../connectors/sql-schema-connector" } user-facing-errors = { path = "../../libs/user-facing-errors" } -async-trait = "0.1.17" -chrono = { version = "0.4", features = ["serde"] } -enumflags2 = "0.7.7" +async-trait.workspace = true +chrono.workspace = true +enumflags2.workspace = true jsonrpc-core = "17.0" serde.workspace = true serde_json.workspace = true tokio.workspace = true -tracing = "0.1" +tracing.workspace = true tracing-subscriber = "0.3" tracing-futures = "0.2" -url = "2.1.1" +url.workspace = true [build-dependencies] json-rpc-api-build = { path = "../json-rpc-api-build" } diff --git a/schema-engine/sql-introspection-tests/Cargo.toml b/schema-engine/sql-introspection-tests/Cargo.toml index fd04c30184cf..8ec7f33f6aea 100644 --- a/schema-engine/sql-introspection-tests/Cargo.toml +++ b/schema-engine/sql-introspection-tests/Cargo.toml @@ -12,15 +12,15 @@ test-macros = { path = "../../libs/test-macros" } user-facing-errors = { path = "../../libs/user-facing-errors" } test-setup = { path = "../../libs/test-setup" } -enumflags2 = "0.7" +enumflags2.workspace = true connection-string.workspace = true pretty_assertions = "1" tracing-futures = "0.2" tokio.workspace = true -tracing = "0.1" +tracing.workspace = true indoc.workspace = true expect-test = "1.1.0" -url = "2" +url.workspace = true quaint.workspace = true [dependencies.barrel] diff --git a/schema-engine/sql-migration-tests/Cargo.toml b/schema-engine/sql-migration-tests/Cargo.toml index 1f7f56b06a8c..c3dbebab0432 100644 --- a/schema-engine/sql-migration-tests/Cargo.toml +++ b/schema-engine/sql-migration-tests/Cargo.toml @@ -14,10 +14,10 @@ test-setup = { path = "../../libs/test-setup" } prisma-value = { path = "../../libs/prisma-value" } bigdecimal = "0.3" -chrono = "0.4.15" +chrono.workspace = true colored = "2" connection-string.workspace = true -enumflags2 = "0.7" +enumflags2.workspace = true expect-test = "1.1.0" indoc.workspace = true jsonrpc-core = "17.0.0" @@ -27,7 +27,7 @@ serde.workspace = true serde_json.workspace = true tempfile = "3.1.0" tokio.workspace = true -tracing = "0.1" +tracing.workspace = true tracing-futures = "0.2" -url = "2.1.1" +url.workspace = true quaint.workspace = true diff --git a/schema-engine/sql-schema-describer/Cargo.toml b/schema-engine/sql-schema-describer/Cargo.toml index 51d892b0018d..8bfdfaad59b9 100644 --- a/schema-engine/sql-schema-describer/Cargo.toml +++ b/schema-engine/sql-schema-describer/Cargo.toml @@ -7,15 +7,15 @@ version = "0.1.0" prisma-value = { path = "../../libs/prisma-value" } psl.workspace = true -async-trait = "0.1.17" +async-trait.workspace = true bigdecimal = "0.3" -enumflags2 = { version = "0.7", features = ["serde"] } -indexmap = { version = "1.9.1", default_features = false } +enumflags2.workspace = true +indexmap.workspace = true indoc.workspace = true once_cell = "1.3" regex = "1.2" serde.workspace = true -tracing = "0.1" +tracing.workspace = true tracing-error = "0.2" tracing-futures = "0.2" quaint.workspace = true From 3d78035b979b676786e5afa32cbae45604e161ba Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Thu, 15 Feb 2024 09:48:49 -0500 Subject: [PATCH 20/25] schema-engine: try force drop in shadow databases (#4722) --- .../src/flavour/postgres.rs | 40 ++++++++++++++++++- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/schema-engine/connectors/sql-schema-connector/src/flavour/postgres.rs b/schema-engine/connectors/sql-schema-connector/src/flavour/postgres.rs index 83c641797872..b67adcb4d0c2 100644 --- a/schema-engine/connectors/sql-schema-connector/src/flavour/postgres.rs +++ b/schema-engine/connectors/sql-schema-connector/src/flavour/postgres.rs @@ -98,6 +98,10 @@ impl PostgresFlavour { .unwrap_or(false) } + pub(crate) fn is_postgres(&self) -> bool { + self.provider == PostgresProvider::PostgreSql && !self.is_cockroachdb() + } + pub(crate) fn schema_name(&self) -> &str { self.state.params().map(|p| p.url.schema()).unwrap_or("public") } @@ -430,6 +434,7 @@ impl SqlFlavour for PostgresFlavour { shadow_db::sql_schema_from_migrations_history(migrations, shadow_database, namespaces).await }), None => { + let is_postgres = self.is_postgres(); with_connection(self, move |params, _circumstances, main_connection| async move { let shadow_database_name = crate::new_shadow_database_name(); @@ -462,8 +467,12 @@ impl SqlFlavour for PostgresFlavour { let ret = shadow_db::sql_schema_from_migrations_history(migrations, shadow_database, namespaces).await; - let drop_database = format!("DROP DATABASE IF EXISTS \"{shadow_database_name}\""); - main_connection.raw_cmd(&drop_database, ¶ms.url).await?; + if is_postgres { + drop_db_try_force(main_connection, ¶ms.url, &shadow_database_name).await?; + } else { + let drop_database = format!("DROP DATABASE IF EXISTS \"{shadow_database_name}\""); + main_connection.raw_cmd(&drop_database, ¶ms.url).await?; + } ret }) @@ -482,6 +491,33 @@ impl SqlFlavour for PostgresFlavour { } } +/// Drop a database using `WITH (FORCE)` syntax. +/// +/// When drop database is routed through pgbouncer, the database may still be used in other pooled connections. +/// In this case, given that we (as a user) know the database will not be used any more, we can forcefully drop +/// the database. Note that `with (force)` is added in Postgres 13, and therefore we will need to +/// fallback to the normal drop if it errors with syntax error. +/// +/// TL;DR, +/// 1. pg >= 13 -> it works. +/// 2. pg < 13 -> syntax error on WITH (FORCE), and then fail with db in use if pgbouncer is used. +async fn drop_db_try_force(conn: &mut Connection, url: &PostgresUrl, database_name: &str) -> ConnectorResult<()> { + let drop_database = format!("DROP DATABASE IF EXISTS \"{database_name}\" WITH (FORCE)"); + if let Err(err) = conn.raw_cmd(&drop_database, url).await { + if let Some(msg) = err.message() { + if msg.contains("syntax error") { + let drop_database_alt = format!("DROP DATABASE IF EXISTS \"{database_name}\""); + conn.raw_cmd(&drop_database_alt, url).await?; + } else { + return Err(err); + } + } else { + return Err(err); + } + } + Ok(()) +} + fn strip_schema_param_from_url(url: &mut Url) { let mut params: HashMap = url.query_pairs().into_owned().collect(); params.remove("schema"); From ca42e86a7b1bc9a4d32728da72fc42feaa05f011 Mon Sep 17 00:00:00 2001 From: Flavian Desverne Date: Fri, 16 Feb 2024 14:13:43 +0100 Subject: [PATCH 21/25] feat: exclude MySQL < 8.0.14 from joins (#4704) --- Cargo.lock | 39 ++++---- Makefile | 6 ++ docker-compose.yml | 13 +++ .../mysql_datamodel_connector.rs | 13 ++- psl/psl-core/src/datamodel_connector.rs | 30 +++++++ quaint/src/connector/connection_info.rs | 20 +++++ quaint/src/connector/mysql/native/mod.rs | 12 ++- quaint/src/connector/mysql/url.rs | 15 +++- quaint/src/connector/native.rs | 9 ++ .../tests/new/relation_load_strategy.rs | 89 +++++++++++++------ .../src/connector_tag/mod.rs | 5 -- .../src/datamodel_rendering/mod.rs | 17 ++-- .../query-tests-setup/src/runner/mod.rs | 26 ++++-- .../src/interface/connection.rs | 4 + .../src/interface/transaction.rs | 4 + .../connectors/query-connector/src/error.rs | 3 + .../query-connector/src/interface.rs | 4 + .../src/database/connection.rs | 8 +- .../sql-query-connector/src/database/js.rs | 2 +- .../src/database/native/mssql.rs | 2 +- .../src/database/native/mysql.rs | 8 +- .../src/database/native/postgresql.rs | 2 +- .../src/database/native/sqlite.rs | 2 +- .../src/database/transaction.rs | 4 + query-engine/core/src/lib.rs | 1 + .../core/src/query_graph_builder/read/many.rs | 2 +- .../core/src/query_graph_builder/read/one.rs | 2 +- .../src/query_graph_builder/read/utils.rs | 47 ++++++++-- .../core/src/relation_load_strategy.rs | 70 +++++++++++++++ .../query-engine-node-api/src/engine.rs | 22 ++--- .../query-engine-wasm/src/wasm/engine.rs | 11 ++- query-engine/query-engine/src/context.rs | 23 +++-- .../query-structure/src/query_arguments.rs | 1 + query-engine/schema/src/build.rs | 2 + query-engine/schema/src/build/enum_types.rs | 2 +- query-engine/schema/src/query_schema.rs | 40 ++++++++- 36 files changed, 431 insertions(+), 129 deletions(-) create mode 100644 query-engine/core/src/relation_load_strategy.rs diff --git a/Cargo.lock b/Cargo.lock index 446a80ea2891..025ea5a9789e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -360,7 +360,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4114279215a005bc675e386011e594e1d9b800918cea18fcadadcce864a2046b" dependencies = [ "borsh-derive", - "hashbrown 0.13.2", + "hashbrown 0.12.3", ] [[package]] @@ -1624,15 +1624,6 @@ dependencies = [ "ahash 0.7.6", ] -[[package]] -name = "hashbrown" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" -dependencies = [ - "ahash 0.8.7", -] - [[package]] name = "hashbrown" version = "0.14.3" @@ -2266,9 +2257,9 @@ checksum = "7e6bcd6433cff03a4bfc3d9834d504467db1f1cf6d0ea765d37d330249ed629d" [[package]] name = "memchr" -version = "2.5.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" [[package]] name = "memoffset" @@ -2554,7 +2545,7 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "mysql_async" version = "0.31.3" -source = "git+https://github.com/prisma/mysql_async?branch=vendored-openssl#dad187b50dc7e8ce2b61fec126822e8e172a9c8a" +source = "git+https://github.com/prisma/mysql_async?branch=vendored-openssl#0d40d0d2c332fc97512bff81e82e62002f03c224" dependencies = [ "bytes", "crossbeam", @@ -2563,6 +2554,7 @@ dependencies = [ "futures-sink", "futures-util", "lazy_static", + "lexical", "lru 0.8.1", "mio", "mysql_common", @@ -2572,6 +2564,7 @@ dependencies = [ "percent-encoding", "pin-project", "priority-queue", + "regex", "serde", "serde_json", "socket2 0.4.9", @@ -4131,14 +4124,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.3" +version = "1.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" +checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick 1.0.3", "memchr", - "regex-automata 0.3.6", - "regex-syntax 0.7.4", + "regex-automata 0.4.5", + "regex-syntax 0.8.2", ] [[package]] @@ -4152,13 +4145,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.6" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" +checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" dependencies = [ "aho-corasick 1.0.3", "memchr", - "regex-syntax 0.7.4", + "regex-syntax 0.8.2", ] [[package]] @@ -4169,9 +4162,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.4" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "rend" @@ -5847,7 +5840,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", - "rand 0.8.5", + "rand 0.7.3", "static_assertions", ] diff --git a/Makefile b/Makefile index b6921cc24f04..e4764c48b9a5 100644 --- a/Makefile +++ b/Makefile @@ -287,9 +287,15 @@ dev-mysql8: start-mysql_8 start-mysql_mariadb: docker compose -f docker-compose.yml up --wait -d --remove-orphans mariadb-10-0 +start-mysql_mariadb_11: + docker compose -f docker-compose.yml up --wait -d --remove-orphans mariadb-11-0 + dev-mariadb: start-mysql_mariadb cp $(CONFIG_PATH)/mariadb $(CONFIG_FILE) +dev-mariadb11: start-mysql_mariadb_11 + cp $(CONFIG_PATH)/mariadb $(CONFIG_FILE) + start-mssql_2019: docker compose -f docker-compose.yml up --wait -d --remove-orphans mssql-2019 diff --git a/docker-compose.yml b/docker-compose.yml index 46873b432357..f71231ad89de 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -294,6 +294,19 @@ services: - databases tmpfs: /var/lib/mariadb + mariadb-11-0: + image: mariadb:11 + restart: unless-stopped + environment: + MYSQL_USER: root + MYSQL_ROOT_PASSWORD: prisma + MYSQL_DATABASE: prisma + ports: + - '3308:3306' + networks: + - databases + tmpfs: /var/lib/mariadb + vitess-test-8_0: image: vitess/vttestserver:mysql80@sha256:53a2d2f58ecf8e6cf984c725612f7651c4fc7ac9bc7d198dbd9964d50e28b9a2 restart: unless-stopped diff --git a/psl/psl-core/src/builtin_connectors/mysql_datamodel_connector.rs b/psl/psl-core/src/builtin_connectors/mysql_datamodel_connector.rs index a44a2639e430..75db8d2c870b 100644 --- a/psl/psl-core/src/builtin_connectors/mysql_datamodel_connector.rs +++ b/psl/psl-core/src/builtin_connectors/mysql_datamodel_connector.rs @@ -8,8 +8,8 @@ use prisma_value::{decode_bytes, PrismaValueResult}; use super::completions; use crate::{ datamodel_connector::{ - Connector, ConnectorCapabilities, ConnectorCapability, ConstraintScope, Flavour, NativeTypeConstructor, - NativeTypeInstance, RelationMode, + Connector, ConnectorCapabilities, ConnectorCapability, ConstraintScope, Flavour, JoinStrategySupport, + NativeTypeConstructor, NativeTypeInstance, RelationMode, }, diagnostics::{DatamodelError, Diagnostics, Span}, parser_database::{walkers, ReferentialAction, ScalarType}, @@ -316,4 +316,13 @@ impl Connector for MySqlDatamodelConnector { fn parse_json_bytes(&self, str: &str, _nt: Option) -> PrismaValueResult> { decode_bytes(str) } + + fn runtime_join_strategy_support(&self) -> JoinStrategySupport { + match self.static_join_strategy_support() { + // Prior to MySQL 8.0.14 and for MariaDB, a derived table cannot contain outer references. + // Source: https://dev.mysql.com/doc/refman/8.0/en/derived-tables.html. + true => JoinStrategySupport::UnknownYet, + false => JoinStrategySupport::No, + } + } } diff --git a/psl/psl-core/src/datamodel_connector.rs b/psl/psl-core/src/datamodel_connector.rs index 107abd24710f..b2c539036a9f 100644 --- a/psl/psl-core/src/datamodel_connector.rs +++ b/psl/psl-core/src/datamodel_connector.rs @@ -329,6 +329,20 @@ pub trait Connector: Send + Sync { ) -> prisma_value::PrismaValueResult> { unreachable!("This method is only implemented on connectors with lateral join support.") } + + fn static_join_strategy_support(&self) -> bool { + self.has_capability(ConnectorCapability::LateralJoin) + || self.has_capability(ConnectorCapability::CorrelatedSubqueries) + } + + /// Returns whether the connector supports the `RelationLoadStrategy::Join`. + /// On some connectors, this might return `UnknownYet`. + fn runtime_join_strategy_support(&self) -> JoinStrategySupport { + match self.static_join_strategy_support() { + true => JoinStrategySupport::Yes, + false => JoinStrategySupport::No, + } + } } #[derive(Copy, Clone, Debug, PartialEq)] @@ -406,3 +420,19 @@ impl ConstraintScope { } } } + +/// Describes whether a connector supports relation join strategy. +#[derive(Debug, Copy, Clone)] +pub enum JoinStrategySupport { + /// The connector supports it. + Yes, + /// The connector supports it but the specific database version does not. + /// This state can only be known at runtime by checking the actual database version. + UnsupportedDbVersion, + /// The connector does not support it. + No, + /// The connector may or may not support it. Additional runtime informations are required to determine the support. + /// This state is used when the connector does not have a static capability to determine the support. + /// For example, the MySQL connector supports relation join strategy, but only for versions >= 8.0.14. + UnknownYet, +} diff --git a/quaint/src/connector/connection_info.rs b/quaint/src/connector/connection_info.rs index ec41e07a0b24..90b123106d89 100644 --- a/quaint/src/connector/connection_info.rs +++ b/quaint/src/connector/connection_info.rs @@ -255,6 +255,26 @@ impl ConnectionInfo { ConnectionInfo::External(_) => "external".into(), } } + + #[allow(unused_variables)] + pub fn set_version(&mut self, version: Option) { + match self { + #[cfg(not(target_arch = "wasm32"))] + ConnectionInfo::Native(native) => native.set_version(version), + ConnectionInfo::External(_) => (), + } + } + + pub fn version(&self) -> Option<&str> { + match self { + #[cfg(not(target_arch = "wasm32"))] + ConnectionInfo::Native(nt) => match nt { + NativeConnectionInfo::Mysql(m) => m.version(), + _ => None, + }, + ConnectionInfo::External(_) => None, + } + } } /// One of the supported SQL variants. diff --git a/quaint/src/connector/mysql/native/mod.rs b/quaint/src/connector/mysql/native/mod.rs index 98feb2649763..4ffdfc88b4cf 100644 --- a/quaint/src/connector/mysql/native/mod.rs +++ b/quaint/src/connector/mysql/native/mod.rs @@ -266,14 +266,12 @@ impl Queryable for Mysql { } async fn version(&self) -> crate::Result> { - let query = r#"SELECT @@GLOBAL.version version"#; - let rows = timeout::socket(self.socket_timeout, self.query_raw(query, &[])).await?; + let guard = self.conn.lock().await; + let (major, minor, patch) = guard.server_version(); + let flavour = if guard.is_mariadb() { "MariaDB" } else { "MySQL" }; + drop(guard); - let version_string = rows - .first() - .and_then(|row| row.get("version").and_then(|version| version.typed.to_string())); - - Ok(version_string) + Ok(Some(format!("{major}.{minor}.{patch}-{flavour}"))) } fn is_healthy(&self) -> bool { diff --git a/quaint/src/connector/mysql/url.rs b/quaint/src/connector/mysql/url.rs index 512f2ba50662..9bbb11c0cb6a 100644 --- a/quaint/src/connector/mysql/url.rs +++ b/quaint/src/connector/mysql/url.rs @@ -13,6 +13,7 @@ use url::{Host, Url}; #[derive(Debug, Clone)] pub struct MysqlUrl { url: Url, + version: Option, pub(crate) query_params: MysqlUrlQueryParams, } @@ -22,7 +23,15 @@ impl MysqlUrl { pub fn new(url: Url) -> Result { let query_params = Self::parse_query_params(&url)?; - Ok(Self { url, query_params }) + Ok(Self { + url, + query_params, + version: None, + }) + } + + pub fn set_version(&mut self, version: Option) { + self.version = version; } /// The bare `Url` to the database. @@ -298,6 +307,10 @@ impl MysqlUrl { pub(crate) fn connection_limit(&self) -> Option { self.query_params.connection_limit } + + pub fn version(&self) -> Option<&str> { + self.version.as_deref() + } } #[derive(Debug, Clone)] diff --git a/quaint/src/connector/native.rs b/quaint/src/connector/native.rs index b9cf4b9858e6..4a1a12a2733b 100644 --- a/quaint/src/connector/native.rs +++ b/quaint/src/connector/native.rs @@ -29,3 +29,12 @@ pub enum NativeConnectionInfo { #[cfg(feature = "sqlite")] InMemorySqlite { db_name: String }, } + +impl NativeConnectionInfo { + pub fn set_version(&mut self, version: Option) { + #[cfg(feature = "mysql")] + if let NativeConnectionInfo::Mysql(c) = self { + c.set_version(version); + } + } +} diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/relation_load_strategy.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/relation_load_strategy.rs index a44a2c2c9308..55acc7b30521 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/relation_load_strategy.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/relation_load_strategy.rs @@ -32,7 +32,7 @@ mod relation_load_strategy { .to_owned() } - async fn seed(runner: &mut Runner) -> TestResult<()> { + async fn seed(runner: &Runner) -> TestResult<()> { run_query!( runner, r#" @@ -123,27 +123,43 @@ mod relation_load_strategy { }; } - macro_rules! relation_load_strategy_tests_pair { + macro_rules! relation_load_strategy_tests { ($name:ident, $query:expr, $result:literal) => { - relation_load_strategy_test!( - $name, - join, - $query, - $result, - only(Postgres, CockroachDb, Mysql(8)) - ); - // TODO: Remove Mysql & Vitess exclusions once we are able to have version speficic preview features. - relation_load_strategy_test!( - $name, - query, - $query, - $result, - exclude(Mysql("5.6", "5.7", "mariadb")) - ); + paste::paste! { + relation_load_strategy_test!( + [<$name _lateral>], + join, + $query, + $result, + capabilities(LateralJoin) + ); + relation_load_strategy_test!( + [<$name _subquery>], + join, + $query, + $result, + capabilities(CorrelatedSubqueries), + exclude(Mysql("5.6", "5.7", "mariadb")) + ); + relation_load_strategy_test!( + [<$name _lateral>], + query, + $query, + $result, + capabilities(LateralJoin) + ); + relation_load_strategy_test!( + [<$name _subquery>], + query, + $query, + $result, + capabilities(CorrelatedSubqueries) + ); + } }; } - relation_load_strategy_tests_pair!( + relation_load_strategy_tests!( find_many, r#" query { @@ -162,7 +178,7 @@ mod relation_load_strategy { r#"{"data":{"findManyUser":[{"login":"author","posts":[{"title":"first post","comments":[{"author":{"login":"commenter"},"body":"a comment"}]}]},{"login":"commenter","posts":[]}]}}"# ); - relation_load_strategy_tests_pair!( + relation_load_strategy_tests!( find_first, r#" query { @@ -186,7 +202,7 @@ mod relation_load_strategy { r#"{"data":{"findFirstUser":{"login":"author","posts":[{"title":"first post","comments":[{"author":{"login":"commenter"},"body":"a comment"}]}]}}}"# ); - relation_load_strategy_tests_pair!( + relation_load_strategy_tests!( find_first_or_throw, r#" query { @@ -210,7 +226,7 @@ mod relation_load_strategy { r#"{"data":{"findFirstUserOrThrow":{"login":"author","posts":[{"title":"first post","comments":[{"author":{"login":"commenter"},"body":"a comment"}]}]}}}"# ); - relation_load_strategy_tests_pair!( + relation_load_strategy_tests!( find_unique, r#" query { @@ -234,7 +250,7 @@ mod relation_load_strategy { r#"{"data":{"findUniqueUser":{"login":"author","posts":[{"title":"first post","comments":[{"author":{"login":"commenter"},"body":"a comment"}]}]}}}"# ); - relation_load_strategy_tests_pair!( + relation_load_strategy_tests!( find_unique_or_throw, r#" query { @@ -258,7 +274,7 @@ mod relation_load_strategy { r#"{"data":{"findUniqueUserOrThrow":{"login":"author","posts":[{"title":"first post","comments":[{"author":{"login":"commenter"},"body":"a comment"}]}]}}}"# ); - relation_load_strategy_tests_pair!( + relation_load_strategy_tests!( create, r#" mutation { @@ -289,7 +305,7 @@ mod relation_load_strategy { r#"{"data":{"createOneUser":{"login":"reader","comments":[{"post":{"title":"first post"},"body":"most insightful indeed!"}]}}}"# ); - relation_load_strategy_tests_pair!( + relation_load_strategy_tests!( update, r#" mutation { @@ -313,7 +329,7 @@ mod relation_load_strategy { r#"{"data":{"updateOneUser":{"login":"distinguished author","posts":[{"title":"first post","comments":[{"body":"a comment"}]}]}}}"# ); - relation_load_strategy_tests_pair!( + relation_load_strategy_tests!( delete, r#" mutation { @@ -334,7 +350,7 @@ mod relation_load_strategy { r#"{"data":{"deleteOneUser":{"login":"author","posts":[{"title":"first post","comments":[{"body":"a comment"}]}]}}}"# ); - relation_load_strategy_tests_pair!( + relation_load_strategy_tests!( upsert, r#" mutation { @@ -450,4 +466,25 @@ mod relation_load_strategy { } "# ); + + #[connector_test(schema(schema), only(Mysql(5.6, 5.7, "mariadb")))] + async fn unsupported_join_strategy(runner: Runner) -> TestResult<()> { + seed(&runner).await?; + + assert_error!( + &runner, + r#"{ findManyUser(relationLoadStrategy: join) { id } }"#, + 2019, + "`relationLoadStrategy: join` is not available for MySQL < 8.0.14 and MariaDB." + ); + + assert_error!( + &runner, + r#"{ findFirstUser(relationLoadStrategy: join) { id } }"#, + 2019, + "`relationLoadStrategy: join` is not available for MySQL < 8.0.14 and MariaDB." + ); + + Ok(()) + } } diff --git a/query-engine/connector-test-kit-rs/query-tests-setup/src/connector_tag/mod.rs b/query-engine/connector-test-kit-rs/query-tests-setup/src/connector_tag/mod.rs index ac07d9b71546..5a2c49eb21c4 100644 --- a/query-engine/connector-test-kit-rs/query-tests-setup/src/connector_tag/mod.rs +++ b/query-engine/connector-test-kit-rs/query-tests-setup/src/connector_tag/mod.rs @@ -326,11 +326,6 @@ impl ConnectorVersion { | Self::Sqlite(Some(SqliteVersion::LibsqlJsWasm)) ) } - - /// Returns `true` if the connector version is [`MySql`]. - pub(crate) fn is_mysql(&self) -> bool { - matches!(self, Self::MySql(..)) - } } impl fmt::Display for ConnectorVersion { diff --git a/query-engine/connector-test-kit-rs/query-tests-setup/src/datamodel_rendering/mod.rs b/query-engine/connector-test-kit-rs/query-tests-setup/src/datamodel_rendering/mod.rs index 5390ee975d89..7295972f9812 100644 --- a/query-engine/connector-test-kit-rs/query-tests-setup/src/datamodel_rendering/mod.rs +++ b/query-engine/connector-test-kit-rs/query-tests-setup/src/datamodel_rendering/mod.rs @@ -4,13 +4,11 @@ mod sql_renderer; pub use mongodb_renderer::*; pub use sql_renderer::*; -use crate::{ - connection_string, templating, ConnectorVersion, DatamodelFragment, IdFragment, M2mFragment, MySqlVersion, CONFIG, -}; +use crate::{connection_string, templating, DatamodelFragment, IdFragment, M2mFragment, CONFIG}; use indoc::indoc; use itertools::Itertools; use once_cell::sync::Lazy; -use psl::{PreviewFeature, ALL_PREVIEW_FEATURES}; +use psl::ALL_PREVIEW_FEATURES; use regex::Regex; /// Test configuration, loaded once at runtime. @@ -39,7 +37,7 @@ pub fn render_test_datamodel( isolation_level: Option<&'static str>, ) -> String { let (tag, version) = CONFIG.test_connector().unwrap(); - let preview_features = render_preview_features(excluded_features, &version); + let preview_features = render_preview_features(excluded_features); let is_multi_schema = !db_schemas.is_empty(); @@ -91,13 +89,8 @@ fn process_template(template: String, renderer: Box) -> S }) } -fn render_preview_features(excluded_features: &[&str], version: &ConnectorVersion) -> String { - let mut excluded_features: Vec<_> = excluded_features.iter().map(|f| format!(r#""{f}""#)).collect(); - - // TODO: Remove this once we are able to have version speficic preview features. - if version.is_mysql() && !matches!(version, ConnectorVersion::MySql(Some(MySqlVersion::V8))) { - excluded_features.push(format!(r#""{}""#, PreviewFeature::RelationJoins)); - } +fn render_preview_features(excluded_features: &[&str]) -> String { + let excluded_features: Vec<_> = excluded_features.iter().map(|f| format!(r#""{f}""#)).collect(); ALL_PREVIEW_FEATURES .active_features() diff --git a/query-engine/connector-test-kit-rs/query-tests-setup/src/runner/mod.rs b/query-engine/connector-test-kit-rs/query-tests-setup/src/runner/mod.rs index 1dc4eb1a0c44..6fa095a1a15b 100644 --- a/query-engine/connector-test-kit-rs/query-tests-setup/src/runner/mod.rs +++ b/query-engine/connector-test-kit-rs/query-tests-setup/src/runner/mod.rs @@ -10,6 +10,7 @@ use crate::{ use colored::Colorize; use query_core::{ protocol::EngineProtocol, + relation_load_strategy, schema::{self, QuerySchemaRef}, QueryExecutor, TransactionOptions, TxId, }; @@ -126,25 +127,34 @@ impl Runner { let datasource = schema.configuration.datasources.first().unwrap(); let url = datasource.load_url(|key| env::var(key).ok()).unwrap(); - let executor = match crate::CONFIG.external_test_executor() { - Some(_) => RunnerExecutor::new_external(&url, &datamodel).await?, - None => RunnerExecutor::Builtin( - request_handlers::load_executor( + let (executor, db_version) = match crate::CONFIG.external_test_executor() { + Some(_) => (RunnerExecutor::new_external(&url, &datamodel).await?, None), + None => { + let executor = request_handlers::load_executor( ConnectorKind::Rust { url: url.to_owned(), datasource, }, schema.configuration.preview_features(), ) - .await?, - ), + .await?; + + let connector = executor.primary_connector(); + let conn = connector.get_connection().await.unwrap(); + let database_version = conn.version().await; + + (RunnerExecutor::Builtin(executor), database_version) + } }; - let query_schema: QuerySchemaRef = Arc::new(schema::build(Arc::new(schema), true)); + + let query_schema = schema::build(Arc::new(schema), true).with_db_version_supports_join_strategy( + relation_load_strategy::db_version_supports_joins_strategy(db_version)?, + ); Ok(Self { version: connector_version, executor, - query_schema, + query_schema: Arc::new(query_schema), connector_tag, connection_url: url, current_tx_id: None, diff --git a/query-engine/connectors/mongodb-query-connector/src/interface/connection.rs b/query-engine/connectors/mongodb-query-connector/src/interface/connection.rs index 09cb46eae6fa..ef3d580b9ac8 100644 --- a/query-engine/connectors/mongodb-query-connector/src/interface/connection.rs +++ b/query-engine/connectors/mongodb-query-connector/src/interface/connection.rs @@ -40,6 +40,10 @@ impl Connection for MongoDbConnection { Ok(tx as Box) } + async fn version(&self) -> Option { + None + } + fn as_connection_like(&mut self) -> &mut dyn ConnectionLike { self } diff --git a/query-engine/connectors/mongodb-query-connector/src/interface/transaction.rs b/query-engine/connectors/mongodb-query-connector/src/interface/transaction.rs index 4c3b1dfec68f..0f882ee3d6be 100644 --- a/query-engine/connectors/mongodb-query-connector/src/interface/transaction.rs +++ b/query-engine/connectors/mongodb-query-connector/src/interface/transaction.rs @@ -60,6 +60,10 @@ impl<'conn> Transaction for MongoDbTransaction<'conn> { Ok(()) } + async fn version(&self) -> Option { + None + } + fn as_connection_like(&mut self) -> &mut dyn ConnectionLike { self } diff --git a/query-engine/connectors/query-connector/src/error.rs b/query-engine/connectors/query-connector/src/error.rs index 1d9937ee55aa..d9f7e99688ac 100644 --- a/query-engine/connectors/query-connector/src/error.rs +++ b/query-engine/connectors/query-connector/src/error.rs @@ -287,6 +287,9 @@ pub enum ErrorKind { #[error("Too many DB connections opened: {}", _0)] TooManyConnections(Box), + + #[error("Failed to parse database version: {}. Reason: {}", version, reason)] + UnexpectedDatabaseVersion { version: String, reason: String }, } impl From for ConnectorError { diff --git a/query-engine/connectors/query-connector/src/interface.rs b/query-engine/connectors/query-connector/src/interface.rs index d42d6f0524b7..1368b8a7c247 100644 --- a/query-engine/connectors/query-connector/src/interface.rs +++ b/query-engine/connectors/query-connector/src/interface.rs @@ -24,6 +24,8 @@ pub trait Connection: ConnectionLike { isolation_level: Option, ) -> crate::Result>; + async fn version(&self) -> Option; + /// Explicit upcast. fn as_connection_like(&mut self) -> &mut dyn ConnectionLike; } @@ -33,6 +35,8 @@ pub trait Transaction: ConnectionLike { async fn commit(&mut self) -> crate::Result<()>; async fn rollback(&mut self) -> crate::Result<()>; + async fn version(&self) -> Option; + /// Explicit upcast of self reference. Rusts current vtable layout doesn't allow for an upcast if /// `trait A`, `trait B: A`, so that `Box as Box` works. This is a simple, explicit workaround. fn as_connection_like(&mut self) -> &mut dyn ConnectionLike; diff --git a/query-engine/connectors/sql-query-connector/src/database/connection.rs b/query-engine/connectors/sql-query-connector/src/database/connection.rs index 457fb6136b52..ae4ceb5933de 100644 --- a/query-engine/connectors/sql-query-connector/src/database/connection.rs +++ b/query-engine/connectors/sql-query-connector/src/database/connection.rs @@ -26,9 +26,7 @@ impl SqlConnection where C: TransactionCapable + Send + Sync + 'static, { - pub fn new(inner: C, connection_info: &ConnectionInfo, features: psl::PreviewFeatures) -> Self { - let connection_info = connection_info.clone(); - + pub fn new(inner: C, connection_info: ConnectionInfo, features: psl::PreviewFeatures) -> Self { Self { inner, connection_info, @@ -71,6 +69,10 @@ where .await } + async fn version(&self) -> Option { + self.connection_info.version().map(|v| v.to_string()) + } + fn as_connection_like(&mut self) -> &mut dyn ConnectionLike { self } diff --git a/query-engine/connectors/sql-query-connector/src/database/js.rs b/query-engine/connectors/sql-query-connector/src/database/js.rs index a40af53613b1..9badc8659738 100644 --- a/query-engine/connectors/sql-query-connector/src/database/js.rs +++ b/query-engine/connectors/sql-query-connector/src/database/js.rs @@ -41,7 +41,7 @@ impl Js { impl Connector for Js { async fn get_connection<'a>(&'a self) -> connector::Result> { super::catch(&self.connection_info, async move { - let sql_conn = SqlConnection::new(self.connector.clone(), &self.connection_info, self.features); + let sql_conn = SqlConnection::new(self.connector.clone(), self.connection_info.clone(), self.features); Ok(Box::new(sql_conn) as Box) }) .await diff --git a/query-engine/connectors/sql-query-connector/src/database/native/mssql.rs b/query-engine/connectors/sql-query-connector/src/database/native/mssql.rs index daf7cac4baed..9cceeaf32942 100644 --- a/query-engine/connectors/sql-query-connector/src/database/native/mssql.rs +++ b/query-engine/connectors/sql-query-connector/src/database/native/mssql.rs @@ -62,7 +62,7 @@ impl Connector for Mssql { async fn get_connection<'a>(&'a self) -> connector::Result> { catch(&self.connection_info, async move { let conn = self.pool.check_out().await.map_err(SqlError::from)?; - let conn = SqlConnection::new(conn, &self.connection_info, self.features); + let conn = SqlConnection::new(conn, self.connection_info.clone(), self.features); Ok(Box::new(conn) as Box) }) diff --git a/query-engine/connectors/sql-query-connector/src/database/native/mysql.rs b/query-engine/connectors/sql-query-connector/src/database/native/mysql.rs index 023c68dc5943..cb290ee0a261 100644 --- a/query-engine/connectors/sql-query-connector/src/database/native/mysql.rs +++ b/query-engine/connectors/sql-query-connector/src/database/native/mysql.rs @@ -6,6 +6,7 @@ use connector_interface::{ error::{ConnectorError, ErrorKind}, Connection, Connector, }; +use quaint::connector::Queryable; use quaint::{pooled::Quaint, prelude::ConnectionInfo}; use std::time::Duration; @@ -69,7 +70,12 @@ impl Connector for Mysql { let runtime_conn = self.pool.check_out().await?; // Note: `runtime_conn` must be `Sized`, as that's required by `TransactionCapable` - let sql_conn = SqlConnection::new(runtime_conn, &self.connection_info, self.features); + let mut conn_info = self.connection_info.clone(); + let db_version = runtime_conn.version().await.unwrap(); + // MySQL has its version grabbed at connection time. We know it's infaillible. + conn_info.set_version(db_version); + + let sql_conn = SqlConnection::new(runtime_conn, conn_info, self.features); Ok(Box::new(sql_conn) as Box) }) diff --git a/query-engine/connectors/sql-query-connector/src/database/native/postgresql.rs b/query-engine/connectors/sql-query-connector/src/database/native/postgresql.rs index 7323f4027759..701fa9a1a0c5 100644 --- a/query-engine/connectors/sql-query-connector/src/database/native/postgresql.rs +++ b/query-engine/connectors/sql-query-connector/src/database/native/postgresql.rs @@ -69,7 +69,7 @@ impl Connector for PostgreSql { async fn get_connection<'a>(&'a self) -> connector_interface::Result> { catch(&self.connection_info, async move { let conn = self.pool.check_out().await.map_err(SqlError::from)?; - let conn = SqlConnection::new(conn, &self.connection_info, self.features); + let conn = SqlConnection::new(conn, self.connection_info.clone(), self.features); Ok(Box::new(conn) as Box) }) .await diff --git a/query-engine/connectors/sql-query-connector/src/database/native/sqlite.rs b/query-engine/connectors/sql-query-connector/src/database/native/sqlite.rs index c711ce891736..79b547a82ac8 100644 --- a/query-engine/connectors/sql-query-connector/src/database/native/sqlite.rs +++ b/query-engine/connectors/sql-query-connector/src/database/native/sqlite.rs @@ -82,7 +82,7 @@ impl Connector for Sqlite { async fn get_connection<'a>(&'a self) -> connector::Result> { catch(self.connection_info(), async move { let conn = self.pool.check_out().await.map_err(SqlError::from)?; - let conn = SqlConnection::new(conn, self.connection_info(), self.features); + let conn = SqlConnection::new(conn, self.connection_info().clone(), self.features); Ok(Box::new(conn) as Box) }) diff --git a/query-engine/connectors/sql-query-connector/src/database/transaction.rs b/query-engine/connectors/sql-query-connector/src/database/transaction.rs index c85185c16466..4273e48e745b 100644 --- a/query-engine/connectors/sql-query-connector/src/database/transaction.rs +++ b/query-engine/connectors/sql-query-connector/src/database/transaction.rs @@ -56,6 +56,10 @@ impl<'tx> Transaction for SqlConnectorTransaction<'tx> { .await } + async fn version(&self) -> Option { + self.connection_info.version().map(|v| v.to_string()) + } + fn as_connection_like(&mut self) -> &mut dyn ConnectionLike { self } diff --git a/query-engine/core/src/lib.rs b/query-engine/core/src/lib.rs index 219b78753277..bf993d6bce18 100644 --- a/query-engine/core/src/lib.rs +++ b/query-engine/core/src/lib.rs @@ -8,6 +8,7 @@ pub mod executor; pub mod protocol; pub mod query_document; pub mod query_graph_builder; +pub mod relation_load_strategy; pub mod response_ir; pub mod telemetry; diff --git a/query-engine/core/src/query_graph_builder/read/many.rs b/query-engine/core/src/query_graph_builder/read/many.rs index edadeb8814df..75ff04276a79 100644 --- a/query-engine/core/src/query_graph_builder/read/many.rs +++ b/query-engine/core/src/query_graph_builder/read/many.rs @@ -43,7 +43,7 @@ fn find_many_with_options( args.distinct.as_ref(), &nested, query_schema, - ); + )?; Ok(ReadQuery::ManyRecordsQuery(ManyRecordsQuery { name, diff --git a/query-engine/core/src/query_graph_builder/read/one.rs b/query-engine/core/src/query_graph_builder/read/one.rs index a2dd291f6760..a091b6154ec1 100644 --- a/query-engine/core/src/query_graph_builder/read/one.rs +++ b/query-engine/core/src/query_graph_builder/read/one.rs @@ -51,7 +51,7 @@ fn find_unique_with_options( let selected_fields = utils::merge_relation_selections(selected_fields, None, &nested); let relation_load_strategy = - get_relation_load_strategy(requested_rel_load_strategy, None, None, &nested, query_schema); + get_relation_load_strategy(requested_rel_load_strategy, None, None, &nested, query_schema)?; Ok(ReadQuery::RecordQuery(RecordQuery { name, diff --git a/query-engine/core/src/query_graph_builder/read/utils.rs b/query-engine/core/src/query_graph_builder/read/utils.rs index 19222baebf7d..42d06b38b0c4 100644 --- a/query-engine/core/src/query_graph_builder/read/utils.rs +++ b/query-engine/core/src/query_graph_builder/read/utils.rs @@ -1,5 +1,6 @@ use super::*; use crate::{ArgumentListLookup, FieldPair, ParsedField, ReadQuery}; +use psl::datamodel_connector::JoinStrategySupport; use query_structure::{prelude::*, RelationLoadStrategy}; use schema::{ constants::{aggregations::*, args}, @@ -256,18 +257,46 @@ pub(crate) fn get_relation_load_strategy( distinct: Option<&FieldSelection>, nested_queries: &[ReadQuery], query_schema: &QuerySchema, -) -> RelationLoadStrategy { - if query_schema.can_resolve_relation_with_joins() - && cursor.is_none() +) -> QueryGraphBuilderResult { + match query_schema.join_strategy_support() { + // Connector and database version supports the `Join` strategy... + JoinStrategySupport::Yes => match requested_strategy { + // But incoming query cannot be resolved with joins. + _ if !query_can_be_resolved_with_joins(cursor, distinct, nested_queries) => { + // So we fallback to the `Query` one. + Ok(RelationLoadStrategy::Query) + } + // But requested strategy is `Query`. + Some(RelationLoadStrategy::Query) => Ok(RelationLoadStrategy::Query), + // And requested strategy is `Join` or there's none selected, in which case the default is still `Join`. + Some(RelationLoadStrategy::Join) | None => Ok(RelationLoadStrategy::Join), + }, + // Connector supports `Join` strategy but database version does not... + JoinStrategySupport::UnsupportedDbVersion => match requested_strategy { + // So we error out if the requested strategy is `Join`. + Some(RelationLoadStrategy::Join) => Err(QueryGraphBuilderError::InputError( + "`relationLoadStrategy: join` is not available for MySQL < 8.0.14 and MariaDB.".into(), + )), + // Otherwise we fallback to the `Query` one. (This makes the default relation load strategy `Query` for database versions that do not support joins.) + Some(RelationLoadStrategy::Query) | None => Ok(RelationLoadStrategy::Query), + }, + // Connectors does not support the join strategy so we always fallback to the `Query` one. + JoinStrategySupport::No => Ok(RelationLoadStrategy::Query), + JoinStrategySupport::UnknownYet => { + unreachable!("Connector should have resolved the join strategy support by now.") + } + } +} + +fn query_can_be_resolved_with_joins( + cursor: Option<&SelectionResult>, + distinct: Option<&FieldSelection>, + nested_queries: &[ReadQuery], +) -> bool { + cursor.is_none() && distinct.is_none() && !nested_queries.iter().any(|q| match q { ReadQuery::RelatedRecordsQuery(q) => q.has_cursor() || q.has_distinct(), _ => false, }) - && requested_strategy != Some(RelationLoadStrategy::Query) - { - RelationLoadStrategy::Join - } else { - RelationLoadStrategy::Query - } } diff --git a/query-engine/core/src/relation_load_strategy.rs b/query-engine/core/src/relation_load_strategy.rs new file mode 100644 index 000000000000..b515126f504f --- /dev/null +++ b/query-engine/core/src/relation_load_strategy.rs @@ -0,0 +1,70 @@ +use connector::error::{ConnectorError, ErrorKind}; + +use crate::CoreError; + +/// Returns whether the database supports joins given its version. +/// Only versions of the MySQL connector are currently parsed at runtime. +pub fn db_version_supports_joins_strategy(db_version: Option) -> crate::Result { + DatabaseVersion::try_from(db_version.as_deref()).map(|version| version.supports_join_relation_load_strategy()) +} + +/// Parsed database version. +#[derive(Debug)] +enum DatabaseVersion { + Mysql(u16, u16, u16), + Mariadb, + Unknown, +} + +impl DatabaseVersion { + /// Returns whether the database supports joins given its version. + /// Only versions of the MySQL connector are currently parsed at runtime. + pub(crate) fn supports_join_relation_load_strategy(&self) -> bool { + match self { + // Prior to MySQL 8.0.14, a derived table cannot contain outer references. + // Source: https://dev.mysql.com/doc/refman/8.0/en/derived-tables.html + DatabaseVersion::Mysql(major, minor, patch) => (*major, *minor, *patch) >= (8, 0, 14), + DatabaseVersion::Mariadb => false, + DatabaseVersion::Unknown => true, + } + } +} + +impl TryFrom> for DatabaseVersion { + type Error = crate::CoreError; + + fn try_from(version: Option<&str>) -> crate::Result { + match version { + Some(version) => { + let build_err = |reason: &str| { + CoreError::ConnectorError(ConnectorError::from_kind(ErrorKind::UnexpectedDatabaseVersion { + version: version.into(), + reason: reason.into(), + })) + }; + + let mut iter = version.split('-'); + + let version = iter.next().ok_or_else(|| build_err("Missing version"))?; + let is_mariadb = iter.next().map(|s| s.contains("MariaDB")).unwrap_or(false); + + if is_mariadb { + return Ok(DatabaseVersion::Mariadb); + } + + let mut version_iter = version.split('.'); + + let major = version_iter.next().ok_or_else(|| build_err("Missing major version"))?; + let minor = version_iter.next().ok_or_else(|| build_err("Missing minor version"))?; + let patch = version_iter.next().ok_or_else(|| build_err("Missing patch version"))?; + + let parsed_major = major.parse().map_err(|_| build_err("Major version is not a number"))?; + let parsed_minor = minor.parse().map_err(|_| build_err("Minor version is not a number"))?; + let parsed_patch = patch.parse().map_err(|_| build_err("Patch version is not a number"))?; + + Ok(DatabaseVersion::Mysql(parsed_major, parsed_minor, parsed_patch)) + } + None => Ok(DatabaseVersion::Unknown), + } + } +} diff --git a/query-engine/query-engine-node-api/src/engine.rs b/query-engine/query-engine-node-api/src/engine.rs index 31df595785a3..4ca524af699c 100644 --- a/query-engine/query-engine-node-api/src/engine.rs +++ b/query-engine/query-engine-node-api/src/engine.rs @@ -4,11 +4,7 @@ use napi::{threadsafe_function::ThreadSafeCallContext, Env, JsFunction, JsObject use napi_derive::napi; use psl::PreviewFeature; use quaint::connector::ExternalConnector; -use query_core::{ - protocol::EngineProtocol, - schema::{self}, - telemetry, TransactionOptions, TxId, -}; +use query_core::{protocol::EngineProtocol, relation_load_strategy, schema, telemetry, TransactionOptions, TxId}; use query_engine_common::engine::{ map_known_error, stringify_env_values, ConnectedEngine, ConnectedEngineNative, ConstructorOptions, ConstructorOptionsNative, EngineBuilder, EngineBuilderNative, Inner, @@ -228,9 +224,10 @@ impl QueryEngine { "db.type" = connector.name(), ); - connector.get_connection().instrument(conn_span).await?; + let conn = connector.get_connection().instrument(conn_span).await?; + let database_version = conn.version().await; - crate::Result::<_>::Ok(executor) + crate::Result::<_>::Ok((executor, database_version)) }; let query_schema_span = tracing::info_span!("prisma:engine:schema"); @@ -241,12 +238,17 @@ impl QueryEngine { }) .instrument(query_schema_span); - let (query_schema, executor) = tokio::join!(query_schema_fut, executor_fut); + let (query_schema, executor_with_db_version) = tokio::join!(query_schema_fut, executor_fut); + let (executor, db_version) = executor_with_db_version?; + + let query_schema = query_schema.unwrap().with_db_version_supports_join_strategy( + relation_load_strategy::db_version_supports_joins_strategy(db_version)?, + ); Ok(ConnectedEngine { schema: builder.schema.clone(), - query_schema: Arc::new(query_schema.unwrap()), - executor: executor?, + query_schema: Arc::new(query_schema), + executor, engine_protocol: builder.engine_protocol, native: ConnectedEngineNative { config_dir: builder.native.config_dir.clone(), diff --git a/query-engine/query-engine-wasm/src/wasm/engine.rs b/query-engine/query-engine-wasm/src/wasm/engine.rs index 57a1d469d5a6..ae6fe40f8728 100644 --- a/query-engine/query-engine-wasm/src/wasm/engine.rs +++ b/query-engine/query-engine-wasm/src/wasm/engine.rs @@ -11,6 +11,7 @@ use psl::ConnectorRegistry; use quaint::connector::ExternalConnector; use query_core::{ protocol::EngineProtocol, + relation_load_strategy, schema::{self}, telemetry, TransactionOptions, TxId, }; @@ -113,10 +114,16 @@ impl QueryEngine { "db.type" = connector.name(), ); - connector.get_connection().instrument(conn_span).await?; + let conn = connector.get_connection().instrument(conn_span).await?; + let db_version = conn.version().await; let query_schema_span = tracing::info_span!("prisma:engine:schema"); - let query_schema = query_schema_span.in_scope(|| schema::build(arced_schema, true)); + + let query_schema = query_schema_span + .in_scope(|| schema::build(arced_schema, true)) + .with_db_version_supports_join_strategy( + relation_load_strategy::db_version_supports_joins_strategy(db_version)?, + ); Ok(ConnectedEngine { schema: builder.schema.clone(), diff --git a/query-engine/query-engine/src/context.rs b/query-engine/query-engine/src/context.rs index 4fd20bc61f99..7a1138c411e5 100644 --- a/query-engine/query-engine/src/context.rs +++ b/query-engine/query-engine/src/context.rs @@ -4,6 +4,7 @@ use crate::{PrismaError, PrismaResult}; use psl::PreviewFeature; use query_core::{ protocol::EngineProtocol, + relation_load_strategy, schema::{self, QuerySchemaRef}, QueryExecutor, }; @@ -46,10 +47,7 @@ impl PrismaContext { let query_schema_fut = tokio::runtime::Handle::current().spawn_blocking(move || { // Construct query schema - Arc::new(schema::build( - arced_schema, - enabled_features.contains(Feature::RawQueries), - )) + schema::build(arced_schema, enabled_features.contains(Feature::RawQueries)) }); let executor_fut = tokio::spawn(async move { let config = &arced_schema_2.configuration; @@ -64,15 +62,22 @@ impl PrismaContext { let url = datasource.load_url(|key| env::var(key).ok())?; // Load executor let executor = load_executor(ConnectorKind::Rust { url, datasource }, preview_features).await?; - executor.primary_connector().get_connection().await?; - PrismaResult::<_>::Ok(executor) + let conn = executor.primary_connector().get_connection().await?; + let db_version = conn.version().await; + + PrismaResult::<_>::Ok((executor, db_version)) }); - let (query_schema, executor) = tokio::join!(query_schema_fut, executor_fut); + let (query_schema, executor_with_db_version) = tokio::join!(query_schema_fut, executor_fut); + let (executor, db_version) = executor_with_db_version.unwrap()?; + + let query_schema = query_schema.unwrap().with_db_version_supports_join_strategy( + relation_load_strategy::db_version_supports_joins_strategy(db_version)?, + ); let context = Self { - query_schema: query_schema.unwrap(), - executor: executor.unwrap()?, + query_schema: Arc::new(query_schema), + executor, metrics: metrics.unwrap_or_default(), engine_protocol: protocol, enabled_features, diff --git a/query-engine/query-structure/src/query_arguments.rs b/query-engine/query-structure/src/query_arguments.rs index 3cbd3c0164e5..abe23ab4de4e 100644 --- a/query-engine/query-structure/src/query_arguments.rs +++ b/query-engine/query-structure/src/query_arguments.rs @@ -33,6 +33,7 @@ pub enum RelationLoadStrategy { Join, Query, } + impl RelationLoadStrategy { pub fn is_query(&self) -> bool { matches!(self, RelationLoadStrategy::Query) diff --git a/query-engine/schema/src/build.rs b/query-engine/schema/src/build.rs index 3c589989f21e..2970be408b59 100644 --- a/query-engine/schema/src/build.rs +++ b/query-engine/schema/src/build.rs @@ -20,6 +20,7 @@ use query_structure::{ast, Field as ModelField, Model, RelationFieldRef, TypeIde pub fn build(schema: Arc, enable_raw_queries: bool) -> QuerySchema { let preview_features = schema.configuration.preview_features(); + build_with_features(schema, preview_features, enable_raw_queries) } @@ -30,5 +31,6 @@ pub fn build_with_features( ) -> QuerySchema { let connector = schema.connector; let internal_data_model = query_structure::convert(schema); + QuerySchema::new(enable_raw_queries, connector, preview_features, internal_data_model) } diff --git a/query-engine/schema/src/build/enum_types.rs b/query-engine/schema/src/build/enum_types.rs index c878226e76bf..b0ddc66a638d 100644 --- a/query-engine/schema/src/build/enum_types.rs +++ b/query-engine/schema/src/build/enum_types.rs @@ -111,7 +111,7 @@ pub fn itx_isolation_levels(ctx: &'_ QuerySchema) -> Option { } pub(crate) fn relation_load_strategy(ctx: &QuerySchema) -> Option { - if !ctx.has_feature(psl::PreviewFeature::RelationJoins) { + if !ctx.can_resolve_relation_with_joins() { return None; } diff --git a/query-engine/schema/src/query_schema.rs b/query-engine/schema/src/query_schema.rs index 4859984d11a6..dbd96dd4ab77 100644 --- a/query-engine/schema/src/query_schema.rs +++ b/query-engine/schema/src/query_schema.rs @@ -1,6 +1,6 @@ use crate::{IdentifierType, ObjectType, OutputField}; use psl::{ - datamodel_connector::{Connector, ConnectorCapabilities, ConnectorCapability, RelationMode}, + datamodel_connector::{Connector, ConnectorCapabilities, ConnectorCapability, JoinStrategySupport, RelationMode}, PreviewFeature, PreviewFeatures, }; use query_structure::{ast, InternalDataModel}; @@ -36,6 +36,12 @@ pub struct QuerySchema { /// Relation mode in the datasource. relation_mode: RelationMode, + + /// Whether the database supports `RelationLoadStrategy::Join`. + /// By the time the `QuerySchema`` is created, we don't have all the evidence yet to determine + /// whether the database supports the join strategy (eg: database version). + // Hack: Ideally, this shoud be known statically and live in the PSL connector entirely. + join_strategy_support: JoinStrategySupport, } impl QuerySchema { @@ -57,6 +63,11 @@ impl QuerySchema { relation_mode, mutation_fields: Default::default(), query_fields: Default::default(), + join_strategy_support: if preview_features.contains(PreviewFeature::RelationJoins) { + connector.runtime_join_strategy_support() + } else { + JoinStrategySupport::No + }, }; query_schema.query_fields = crate::build::query_type::query_fields(&query_schema); @@ -96,10 +107,31 @@ impl QuerySchema { || self.has_capability(ConnectorCapability::FullTextSearchWithIndex)) } + /// Returns whether the loaded connector supports the join strategy. pub fn can_resolve_relation_with_joins(&self) -> bool { - self.has_feature(PreviewFeature::RelationJoins) - && (self.has_capability(ConnectorCapability::LateralJoin) - || self.has_capability(ConnectorCapability::CorrelatedSubqueries)) + !matches!(self.join_strategy_support, JoinStrategySupport::No) + } + + /// Returns whether the database version of the loaded connector supports the join strategy. + pub fn join_strategy_support(&self) -> JoinStrategySupport { + self.join_strategy_support + } + + /// Augments the join strategy support with the runtime database version knowledge. + /// This is specifically designed for the MySQL connector, which does not support the join strategy for versions < 8.0.14 and MariaDB. + pub fn with_db_version_supports_join_strategy(self, db_version_supports_joins_strategy: bool) -> Self { + let augmented_support = match self.join_strategy_support { + JoinStrategySupport::UnknownYet => match db_version_supports_joins_strategy { + true => JoinStrategySupport::Yes, + false => JoinStrategySupport::UnsupportedDbVersion, + }, + x => x, + }; + + Self { + join_strategy_support: augmented_support, + ..self + } } pub fn has_feature(&self, feature: PreviewFeature) -> bool { From 73fdee21b8a8e43a794af2800c2439fcb08611b9 Mon Sep 17 00:00:00 2001 From: Flavian Desverne Date: Fri, 16 Feb 2024 14:15:03 +0100 Subject: [PATCH 22/25] fix(joins): relations should not collide with mapped scalar fields (#4732) --- .../tests/new/regressions/mod.rs | 1 + .../tests/new/regressions/prisma_22971.rs | 52 +++++++++++++++++++ .../src/database/operations/coerce.rs | 2 +- .../src/database/operations/read.rs | 4 +- .../src/query_builder/select/lateral.rs | 8 +-- .../src/query_builder/select/mod.rs | 15 +++++- .../src/query_builder/select/subquery.rs | 8 +-- query-engine/core/src/response_ir/internal.rs | 11 +--- .../query-structure/src/field_selection.rs | 10 ++-- 9 files changed, 81 insertions(+), 30 deletions(-) create mode 100644 query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/prisma_22971.rs diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/mod.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/mod.rs index 4b4aa97479d6..dc7509e3980a 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/mod.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/mod.rs @@ -23,6 +23,7 @@ mod prisma_21182; mod prisma_21369; mod prisma_21901; mod prisma_22298; +mod prisma_22971; mod prisma_5952; mod prisma_6173; mod prisma_7010; diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/prisma_22971.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/prisma_22971.rs new file mode 100644 index 000000000000..e1913b2d2b5c --- /dev/null +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/prisma_22971.rs @@ -0,0 +1,52 @@ +use indoc::indoc; +use query_engine_tests::*; + +#[test_suite(schema(schema))] +mod prisma_22971 { + fn schema() -> String { + let schema = indoc! { + r#"model User { + #id(id, Int, @id, @map("hello")) + updatedAt String @default("now") @map("updated_at") + + postId Int? @map("post") + post Post? @relation("User_post", fields: [postId], references: [id]) + } + + model Post { + #id(id, Int, @id, @map("world")) + updatedAt String @default("now") @map("up_at") + + from_User_post User[] @relation("User_post") + }"# + }; + + schema.to_owned() + } + + // Ensures that mapped fields are correctly resolved, even when there's a conflict between a scalar field name and a relation field name. + #[connector_test] + async fn test_22971(runner: Runner) -> TestResult<()> { + run_query!(&runner, r#"mutation { createOnePost(data: { id: 1 }) { id } }"#); + run_query!( + &runner, + r#"mutation { createOneUser(data: { id: 1, postId: 1 }) { id } }"# + ); + + insta::assert_snapshot!( + run_query!(&runner, r#"{ + findManyUser { + id + updatedAt + post { + id + updatedAt + } + } + }"#), + @r###"{"data":{"findManyUser":[{"id":1,"updatedAt":"now","post":{"id":1,"updatedAt":"now"}}]}}"### + ); + + Ok(()) + } +} diff --git a/query-engine/connectors/sql-query-connector/src/database/operations/coerce.rs b/query-engine/connectors/sql-query-connector/src/database/operations/coerce.rs index 0d697b97dc65..b25c1fee4e16 100644 --- a/query-engine/connectors/sql-query-connector/src/database/operations/coerce.rs +++ b/query-engine/connectors/sql-query-connector/src/database/operations/coerce.rs @@ -82,7 +82,7 @@ fn coerce_json_relation_to_pv(value: serde_json::Value, rs: &RelationSelection) let related_model = rs.field.related_model(); for (key, value) in obj { - match related_model.fields().all().find(|f| f.db_name() == key) { + match related_model.fields().all().find(|f| f.name() == key) { Some(Field::Scalar(sf)) => { map.push((key, coerce_json_scalar_to_pv(value, &sf)?)); } diff --git a/query-engine/connectors/sql-query-connector/src/database/operations/read.rs b/query-engine/connectors/sql-query-connector/src/database/operations/read.rs index 13206f560776..24b576c936c2 100644 --- a/query-engine/connectors/sql-query-connector/src/database/operations/read.rs +++ b/query-engine/connectors/sql-query-connector/src/database/operations/read.rs @@ -34,7 +34,7 @@ pub(crate) async fn get_single_record_joins( ctx: &Context<'_>, ) -> crate::Result> { let selected_fields = selected_fields.to_virtuals_last(); - let field_names: Vec<_> = selected_fields.db_names_grouping_virtuals().collect(); + let field_names: Vec<_> = selected_fields.prisma_names_grouping_virtuals().collect(); let idents = selected_fields.type_identifiers_with_arities_grouping_virtuals(); let indexes = get_selection_indexes( @@ -132,7 +132,7 @@ pub(crate) async fn get_many_records_joins( ctx: &Context<'_>, ) -> crate::Result { let selected_fields = selected_fields.to_virtuals_last(); - let field_names: Vec<_> = selected_fields.db_names_grouping_virtuals().collect(); + let field_names: Vec<_> = selected_fields.prisma_names_grouping_virtuals().collect(); let idents = selected_fields.type_identifiers_with_arities_grouping_virtuals(); let meta = column_metadata::create(field_names.as_slice(), idents.as_slice()); diff --git a/query-engine/connectors/sql-query-connector/src/query_builder/select/lateral.rs b/query-engine/connectors/sql-query-connector/src/query_builder/select/lateral.rs index af3b73271aa9..0e7dd7203ed2 100644 --- a/query-engine/connectors/sql-query-connector/src/query_builder/select/lateral.rs +++ b/query-engine/connectors/sql-query-connector/src/query_builder/select/lateral.rs @@ -69,11 +69,7 @@ impl JoinSelectBuilder for LateralJoinSelectBuilder { ctx: &Context<'_>, ) -> Select<'a> { match field { - SelectedField::Scalar(sf) => select.column( - sf.as_column(ctx) - .table(parent_alias.to_table_string()) - .set_is_selected(true), - ), + SelectedField::Scalar(sf) => select.column(aliased_scalar_column(sf, parent_alias, ctx)), SelectedField::Relation(rs) => { let table_name = match rs.field.relation().is_many_to_many() { true => m2m_join_alias_name(&rs.field), @@ -170,7 +166,7 @@ impl JoinSelectBuilder for LateralJoinSelectBuilder { .iter() .filter_map(|field| match field { SelectedField::Scalar(sf) => Some(( - Cow::from(sf.db_name().to_owned()), + Cow::from(sf.name().to_owned()), Expression::from(sf.as_column(ctx).table(parent_alias.to_table_string())), )), SelectedField::Relation(rs) => { diff --git a/query-engine/connectors/sql-query-connector/src/query_builder/select/mod.rs b/query-engine/connectors/sql-query-connector/src/query_builder/select/mod.rs index 766c102b5b69..423df1d969e5 100644 --- a/query-engine/connectors/sql-query-connector/src/query_builder/select/mod.rs +++ b/query-engine/connectors/sql-query-connector/src/query_builder/select/mod.rs @@ -11,7 +11,7 @@ use query_structure::*; use crate::{ context::Context, filter::alias::Alias, - model_extensions::{AsColumns, AsTable, ColumnIterator, RelationFieldExt}, + model_extensions::{AsColumn, AsColumns, AsTable, ColumnIterator, RelationFieldExt}, ordering::OrderByBuilder, sql_trace::SqlTraceComment, }; @@ -628,6 +628,19 @@ fn json_agg() -> Function<'static> { .alias(JSON_AGG_IDENT) } +pub(crate) fn aliased_scalar_column(sf: &ScalarField, parent_alias: Alias, ctx: &Context<'_>) -> Column<'static> { + let col = sf + .as_column(ctx) + .table(parent_alias.to_table_string()) + .set_is_selected(true); + + if sf.name() != sf.db_name() { + col.alias(sf.name().to_owned()) + } else { + col + } +} + #[inline] fn empty_json_array() -> serde_json::Value { serde_json::Value::Array(Vec::new()) diff --git a/query-engine/connectors/sql-query-connector/src/query_builder/select/subquery.rs b/query-engine/connectors/sql-query-connector/src/query_builder/select/subquery.rs index 437ee9f075a8..93a91bcb21d8 100644 --- a/query-engine/connectors/sql-query-connector/src/query_builder/select/subquery.rs +++ b/query-engine/connectors/sql-query-connector/src/query_builder/select/subquery.rs @@ -41,11 +41,7 @@ impl JoinSelectBuilder for SubqueriesSelectBuilder { ctx: &Context<'_>, ) -> Select<'a> { match field { - SelectedField::Scalar(sf) => select.column( - sf.as_column(ctx) - .table(parent_alias.to_table_string()) - .set_is_selected(true), - ), + SelectedField::Scalar(sf) => select.column(aliased_scalar_column(sf, parent_alias, ctx)), SelectedField::Relation(rs) => self.with_relation(select, rs, Vec::new().iter(), parent_alias, ctx), _ => select, } @@ -115,7 +111,7 @@ impl JoinSelectBuilder for SubqueriesSelectBuilder { .iter() .filter_map(|field| match field { SelectedField::Scalar(sf) => Some(( - Cow::from(sf.db_name().to_owned()), + Cow::from(sf.name().to_owned()), Expression::from(sf.as_column(ctx).table(parent_alias.to_table_string())), )), SelectedField::Relation(rs) => Some(( diff --git a/query-engine/core/src/response_ir/internal.rs b/query-engine/core/src/response_ir/internal.rs index c6cf4fdd74b6..46dbb66e2366 100644 --- a/query-engine/core/src/response_ir/internal.rs +++ b/query-engine/core/src/response_ir/internal.rs @@ -319,13 +319,6 @@ impl<'a, 'b> SerializedFieldWithRelations<'a, 'b> { Self::VirtualsGroup(name, _) => name, } } - - fn db_name(&self) -> &str { - match self { - Self::Model(f, _) => f.db_name(), - Self::VirtualsGroup(name, _) => name, - } - } } // TODO: Handle errors properly @@ -427,7 +420,7 @@ fn serialize_relation_selection( let fields = collect_serialized_fields_with_relations(typ, &rrs.model, &rrs.virtuals, &rrs.fields); for field in fields { - let value = value_obj.remove(field.db_name()).unwrap(); + let value = value_obj.remove(field.name()).unwrap(); match field { SerializedFieldWithRelations::Model(Field::Scalar(_), out_field) if !out_field.field_type().is_object() => { @@ -481,7 +474,7 @@ fn collect_serialized_fields_with_relations<'a, 'b>( model .fields() .all() - .find(|field| field.db_name() == name) + .find(|field| field.name() == name) .and_then(|field| { object_type .find_field(field.name()) diff --git a/query-engine/query-structure/src/field_selection.rs b/query-engine/query-structure/src/field_selection.rs index b6d9bcb883e9..2166dffd913e 100644 --- a/query-engine/query-structure/src/field_selection.rs +++ b/query-engine/query-structure/src/field_selection.rs @@ -80,7 +80,7 @@ impl FieldSelection { /// [`FieldSelection::db_names_grouping_virtuals`] and /// [`FieldSelection::type_identifiers_with_arities_grouping_virtuals`]. fn selections_with_virtual_group_heads(&self) -> impl Iterator { - self.selections().unique_by(|f| f.db_name_grouping_virtuals()) + self.selections().unique_by(|f| f.prisma_name_grouping_virtuals()) } /// Returns all Prisma (e.g. schema model field) names of contained fields. @@ -102,9 +102,9 @@ impl FieldSelection { /// into the grouped containers for virtual fields, like `_count`. The names returned by this /// method correspond to the results of queries that use JSON objects to represent joined /// relations and relation aggregations. - pub fn db_names_grouping_virtuals(&self) -> impl Iterator + '_ { + pub fn prisma_names_grouping_virtuals(&self) -> impl Iterator + '_ { self.selections_with_virtual_group_heads() - .map(|f| f.db_name_grouping_virtuals()) + .map(|f| f.prisma_name_grouping_virtuals()) .map(Cow::into_owned) } @@ -384,10 +384,10 @@ impl SelectedField { /// relations and relation aggregations. For those queries, the result of this method /// corresponds to the top-level name of the value which is a JSON object that contains this /// field inside. - pub fn db_name_grouping_virtuals(&self) -> Cow<'_, str> { + pub fn prisma_name_grouping_virtuals(&self) -> Cow<'_, str> { match self { SelectedField::Virtual(vs) => vs.serialized_name().0.into(), - _ => self.db_name(), + _ => self.prisma_name(), } } From c9d2b8141fe4fe85183c7f1998cf6d47c8eea794 Mon Sep 17 00:00:00 2001 From: Flavian Desverne Date: Fri, 16 Feb 2024 14:15:18 +0100 Subject: [PATCH 23/25] fix(joins): forward linking fields to outer select to enable ordering (#4734) --- .../order_by_aggregation.rs | 383 ++++++++++++++++++ .../sql-query-connector/src/ordering.rs | 10 +- .../src/query_builder/select/mod.rs | 24 +- .../query-structure/src/field_selection.rs | 4 +- query-engine/query-structure/src/order_by.rs | 17 + 5 files changed, 427 insertions(+), 11 deletions(-) diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/order_by_aggregation.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/order_by_aggregation.rs index 52e9fcaf8cc4..dac031f788f8 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/order_by_aggregation.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/order_by_aggregation.rs @@ -836,6 +836,389 @@ mod order_by_aggr { Ok(()) } + fn nested_one2m_schema() -> String { + let schema = indoc! { + r#"model A { + #id(id, Int, @id) + + bs B[] + } + + model B { + #id(id, Int, @id) + + A A? @relation(fields: [aId], references: [id]) + aId Int? + + cId Int? + c C? @relation(fields: [cId], references: [id]) + } + + model C { + #id(id, Int, @id) + B B[] + + dId Int? + d D? @relation(fields: [dId], references: [id]) + } + + model D { + #id(id, Int, @id) + C C[] + + es E[] + } + + model E { + #id(id, Int, @id) + + dId Int? + D D? @relation(fields: [dId], references: [id]) + } + + "# + }; + + schema.to_owned() + } + + // [Nested 2+ Hops] Ordering by one2one2m count should "work + #[connector_test(schema(nested_one2m_schema))] + async fn nested_one2m_count(runner: Runner) -> TestResult<()> { + // test data + run_query!( + &runner, + r#"mutation { + createOneA( + data: { + id: 1, + bs: { + create: [ + { + id: 1, + c: { + create: { + id: 1, + d: { create: { id: 1, es: { create: [{ id: 1 }] } } } + } + } + } + { + id: 2, + c: { + create: { + id: 2, + d: { create: { id: 2, es: { create: [{ id: 2 }, { id: 3 }] } } } + } + } + } + ] + } + } + ) { + id + } + } + "# + ); + + insta::assert_snapshot!( + run_query!(&runner, r#"{ + findManyA { + id + bs(orderBy: { c: { d: { es: { _count: asc } } } }) { + id + c { + d { + es { + id + } + } + } + } + } + }"#), + @r###"{"data":{"findManyA":[{"id":1,"bs":[{"id":1,"c":{"d":{"es":[{"id":1}]}}},{"id":2,"c":{"d":{"es":[{"id":2},{"id":3}]}}}]}]}}"### + ); + + insta::assert_snapshot!( + run_query!(&runner, r#"{ + findManyA { + id + bs(orderBy: { c: { d: { es: { _count: desc } } } }) { + id + c { + d { + es { + id + } + } + } + } + } + }"#), + @r###"{"data":{"findManyA":[{"id":1,"bs":[{"id":2,"c":{"d":{"es":[{"id":2},{"id":3}]}}},{"id":1,"c":{"d":{"es":[{"id":1}]}}}]}]}}"### + ); + + insta::assert_snapshot!( + run_query!(&runner, r#"{ + findManyA { + id + bs(orderBy: { c: { d: { id: asc } } }) { + id + c { + d { + id + } + } + } + } + }"#), + @r###"{"data":{"findManyA":[{"id":1,"bs":[{"id":1,"c":{"d":{"id":1}}},{"id":2,"c":{"d":{"id":2}}}]}]}}"### + ); + + insta::assert_snapshot!( + run_query!(&runner, r#"{ + findManyA { + id + bs(orderBy: { c: { d: { id: desc } } }) { + id + c { + d { + id + } + } + } + } + }"#), + @r###"{"data":{"findManyA":[{"id":1,"bs":[{"id":2,"c":{"d":{"id":2}}},{"id":1,"c":{"d":{"id":1}}}]}]}}"### + ); + + Ok(()) + } + + fn nested_m2m_schema() -> String { + let schema = indoc! { + r#"model A { + #id(id, Int, @id) + + #m2m(bs, B[], id, Int) + } + + model B { + #id(id, Int, @id) + + #m2m(as, A[], id, Int) + + cId Int? + c C? @relation(fields: [cId], references: [id]) + } + + model C { + #id(id, Int, @id) + B B[] + + dId Int? + d D? @relation(fields: [dId], references: [id]) + } + + model D { + #id(id, Int, @id) + C C[] + + es E[] + #m2m(fs, F[], id, Int) + } + + model E { + #id(id, Int, @id) + + dId Int? + D D? @relation(fields: [dId], references: [id]) + } + + model F { + #id(id, Int, @id) + + #m2m(ds, D[], id, Int) + } + + "# + }; + + schema.to_owned() + } + + // [Nested 2+ Hops] Ordering by m2one2one2m count should "work + // Regression test for https://github.com/prisma/prisma/issues/22926 + #[connector_test(schema(nested_m2m_schema))] + async fn nested_m2m_count(runner: Runner) -> TestResult<()> { + // test data + run_query!( + &runner, + r#"mutation { + createOneA( + data: { + id: 1, + bs: { + create: [ + { + id: 1, + c: { + create: { + id: 1, + d: { + create: { + id: 1, + es: { create: [{ id: 1 }] }, + fs: { create: [{ id: 1 }] } + } + } + } + } + } + { + id: 2, + c: { + create: { + id: 2, + d: { + create: { + id: 2, + es: { create: [{ id: 2 }, { id: 3 }] } + fs: { create: [{ id: 2 }, { id: 3 }] } + } + } + } + } + } + ] + } + } + ) { + id + } + } + "# + ); + + // count asc on 1-m + insta::assert_snapshot!( + run_query!(&runner, r#"{ + findManyA { + id + bs(orderBy: { c: { d: { es: { _count: asc } } } }) { + id + c { + d { + es { + id + } + } + } + } + } + }"#), + @r###"{"data":{"findManyA":[{"id":1,"bs":[{"id":1,"c":{"d":{"es":[{"id":1}]}}},{"id":2,"c":{"d":{"es":[{"id":2},{"id":3}]}}}]}]}}"### + ); + + // count desc on 1-m + insta::assert_snapshot!( + run_query!(&runner, r#"{ + findManyA { + id + bs(orderBy: { c: { d: { es: { _count: desc } } } }) { + id + c { + d { + es { + id + } + } + } + } + } + }"#), + @r###"{"data":{"findManyA":[{"id":1,"bs":[{"id":2,"c":{"d":{"es":[{"id":2},{"id":3}]}}},{"id":1,"c":{"d":{"es":[{"id":1}]}}}]}]}}"### + ); + + // count asc on m-n + insta::assert_snapshot!( + run_query!(&runner, r#"{ + findManyA { + id + bs(orderBy: { c: { d: { fs: { _count: asc } } } }) { + id + c { + d { + fs { + id + } + } + } + } + } + }"#), + @r###"{"data":{"findManyA":[{"id":1,"bs":[{"id":1,"c":{"d":{"fs":[{"id":1}]}}},{"id":2,"c":{"d":{"fs":[{"id":2},{"id":3}]}}}]}]}}"### + ); + + // count desc on m-n + insta::assert_snapshot!( + run_query!(&runner, r#"{ + findManyA { + id + bs(orderBy: { c: { d: { fs: { _count: desc } } } }) { + id + c { + d { + fs { + id + } + } + } + } + } + }"#), + @r###"{"data":{"findManyA":[{"id":1,"bs":[{"id":2,"c":{"d":{"fs":[{"id":2},{"id":3}]}}},{"id":1,"c":{"d":{"fs":[{"id":1}]}}}]}]}}"### + ); + + insta::assert_snapshot!( + run_query!(&runner, r#"{ + findManyA { + id + bs(orderBy: { c: { d: { id: asc } } }) { + id + c { + d { + id + } + } + } + } + }"#), + @r###"{"data":{"findManyA":[{"id":1,"bs":[{"id":1,"c":{"d":{"id":1}}},{"id":2,"c":{"d":{"id":2}}}]}]}}"### + ); + + insta::assert_snapshot!( + run_query!(&runner, r#"{ + findManyA { + id + bs(orderBy: { c: { d: { id: desc } } }) { + id + c { + d { + id + } + } + } + } + }"#), + @r###"{"data":{"findManyA":[{"id":1,"bs":[{"id":2,"c":{"d":{"id":2}}},{"id":1,"c":{"d":{"id":1}}}]}]}}"### + ); + + Ok(()) + } + async fn create_test_data(runner: &Runner) -> TestResult<()> { create_row(runner, r#"{ id: 1, name: "Alice", categories: { create: [{ id: 1, name: "Startup" }] }, posts: { create: { id: 1, title: "alice_post_1", categories: { create: [{ id: 2, name: "News" }, { id: 3, name: "Society" }] }} } }"#).await?; create_row(runner, r#"{ id: 2, name: "Bob", categories: { create: [{ id: 4, name: "Computer Science" }, { id: 5, name: "Music" }] }, posts: { create: [{ id: 2, title: "bob_post_1", categories: { create: [{ id: 6, name: "Finance" }] } }, { id: 3, title: "bob_post_2", categories: { create: [{ id: 7, name: "History" }, { id: 8, name: "Gaming" }, { id: 9, name: "Hacking" }] } }] } }"#).await?; diff --git a/query-engine/connectors/sql-query-connector/src/ordering.rs b/query-engine/connectors/sql-query-connector/src/ordering.rs index ade10aaa7164..aedb7a75fd99 100644 --- a/query-engine/connectors/sql-query-connector/src/ordering.rs +++ b/query-engine/connectors/sql-query-connector/src/ordering.rs @@ -146,17 +146,15 @@ impl OrderByBuilder { order_by: &OrderByToManyAggregation, ctx: &Context<'_>, ) -> (Vec, Column<'static>) { - let (last_hop, rest_hops) = order_by - .path - .split_last() - .expect("An order by relation aggregation has to have at least one hop"); + let intermediary_hops = order_by.intermediary_hops(); + let aggregation_hop = order_by.aggregation_hop(); // Unwraps are safe because the SQL connector doesn't yet support any other type of orderBy hop but the relation hop. let mut joins: Vec = vec![]; let parent_alias = self.parent_alias.clone(); - for (i, hop) in rest_hops.iter().enumerate() { + for (i, hop) in intermediary_hops.iter().enumerate() { let previous_join = if i > 0 { joins.get(i - 1) } else { None }; let previous_alias = previous_join.map(|j| j.alias.as_str()).or(parent_alias.as_deref()); @@ -174,7 +172,7 @@ impl OrderByBuilder { // We perform the aggregation on the last join let last_aggr_join = compute_aggr_join( - last_hop.as_relation_hop().unwrap(), + aggregation_hop.as_relation_hop().unwrap(), aggregation_type, None, ORDER_AGGREGATOR_ALIAS, diff --git a/query-engine/connectors/sql-query-connector/src/query_builder/select/mod.rs b/query-engine/connectors/sql-query-connector/src/query_builder/select/mod.rs index 423df1d969e5..c28f33c2b924 100644 --- a/query-engine/connectors/sql-query-connector/src/query_builder/select/mod.rs +++ b/query-engine/connectors/sql-query-connector/src/query_builder/select/mod.rs @@ -545,15 +545,35 @@ fn order_by_selection(rs: &RelationSelection) -> FieldSelection { .order_by .iter() .flat_map(|order_by| match order_by { - OrderBy::Scalar(x) if x.path.is_empty() => vec![x.field.clone()], + OrderBy::Scalar(x) => { + // If the path is empty, the order by is done on the field itself in the outer select. + if x.path.is_empty() { + vec![x.field.clone()] + // If there are relations to traverse, select the linking fields of the first hop so that the outer select can perform a join to traverse the first relation. + // This is necessary because the order by is done on a different join. The following hops are handled by the order by builder. + } else { + first_hop_linking_fields(&x.path) + } + } OrderBy::Relevance(x) => x.fields.clone(), - _ => Vec::new(), + // Select the linking fields of the first hop so that the outer select can perform a join to traverse the relation. + // This is necessary because the order by is done on a different join. The following hops are handled by the order by builder. + OrderBy::ToManyAggregation(x) => first_hop_linking_fields(x.intermediary_hops()), + OrderBy::ScalarAggregation(x) => vec![x.field.clone()], }) .collect(); FieldSelection::from(selection) } +/// Returns the linking fields of the first hop in an order by path. +fn first_hop_linking_fields(hops: &[OrderByHop]) -> Vec { + hops.first() + .and_then(|hop| hop.as_relation_hop()) + .map(|rf| rf.linking_fields().as_scalar_fields().unwrap()) + .unwrap_or_default() +} + fn relation_selection(rs: &RelationSelection) -> FieldSelection { let relation_fields = rs.relations().flat_map(|rs| join_fields(&rs.field)).collect::>(); diff --git a/query-engine/query-structure/src/field_selection.rs b/query-engine/query-structure/src/field_selection.rs index 2166dffd913e..c74f12247223 100644 --- a/query-engine/query-structure/src/field_selection.rs +++ b/query-engine/query-structure/src/field_selection.rs @@ -135,9 +135,7 @@ impl FieldSelection { .selections() .filter_map(|selection| match selection { SelectedField::Scalar(sf) => Some(sf.clone()), - SelectedField::Composite(_) => None, - SelectedField::Relation(_) => None, - SelectedField::Virtual(_) => None, + _ => None, }) .collect::>(); diff --git a/query-engine/query-structure/src/order_by.rs b/query-engine/query-structure/src/order_by.rs index eb28afa8ea51..6bcd1cfa79ca 100644 --- a/query-engine/query-structure/src/order_by.rs +++ b/query-engine/query-structure/src/order_by.rs @@ -179,6 +179,23 @@ pub struct OrderByToManyAggregation { pub sort_aggregation: SortAggregation, } +impl OrderByToManyAggregation { + pub fn intermediary_hops(&self) -> &[OrderByHop] { + let (_, rest) = self + .path + .split_last() + .expect("An order by relation aggregation has to have at least one hop"); + + rest + } + + pub fn aggregation_hop(&self) -> &OrderByHop { + self.path + .last() + .expect("An order by relation aggregation has to have at least one hop") + } +} + #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct OrderByRelevance { pub fields: Vec, From 811068771a7c08ca615bdbdfea96f56a3d10a25b Mon Sep 17 00:00:00 2001 From: Flavian Desverne Date: Fri, 16 Feb 2024 17:54:20 +0100 Subject: [PATCH 24/25] fix: query graph should extract join results using prisma field name (#4743) --- .../tests/new/regressions/mod.rs | 1 + .../tests/new/regressions/prisma_15177.rs | 41 +++++++++++++++++++ .../core/src/interpreter/interpreter_impl.rs | 2 +- query-engine/query-structure/src/record.rs | 27 ++++++++++++ 4 files changed, 70 insertions(+), 1 deletion(-) create mode 100644 query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/prisma_15177.rs diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/mod.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/mod.rs index dc7509e3980a..be0b5441c217 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/mod.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/mod.rs @@ -10,6 +10,7 @@ mod prisma_13405; mod prisma_14001; mod prisma_14696; mod prisma_14703; +mod prisma_15177; mod prisma_15204; mod prisma_15264; mod prisma_15467; diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/prisma_15177.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/prisma_15177.rs new file mode 100644 index 000000000000..a5ce0b6faa6f --- /dev/null +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/prisma_15177.rs @@ -0,0 +1,41 @@ +use indoc::indoc; +use query_engine_tests::*; + +#[test_suite(schema(schema), exclude(MongoDb))] +mod prisma_15177 { + fn schema() -> String { + let schema = indoc! { + r#"model Customer { + #id(userId, Int, @id @map("user id")) + }"# + }; + + schema.to_owned() + } + + // Should allow CRUD methods on a table column that has a space + #[connector_test] + async fn repro(runner: Runner) -> TestResult<()> { + insta::assert_snapshot!( + run_query!(&runner, r#"mutation { createOneCustomer(data: { userId: 1 }) { userId } }"#), + @r###"{"data":{"createOneCustomer":{"userId":1}}}"### + ); + + insta::assert_snapshot!( + run_query!(&runner, r#"{ findManyCustomer { userId } }"#), + @r###"{"data":{"findManyCustomer":[{"userId":1}]}}"### + ); + + insta::assert_snapshot!( + run_query!(&runner, r#"mutation { updateOneCustomer(where: { userId: 1 }, data: { userId: 2 }) { userId } }"#), + @r###"{"data":{"updateOneCustomer":{"userId":2}}}"### + ); + + insta::assert_snapshot!( + run_query!(&runner, r#"mutation { deleteOneCustomer(where: { userId: 2 }) { userId } }"#), + @r###"{"data":{"deleteOneCustomer":{"userId":2}}}"### + ); + + Ok(()) + } +} diff --git a/query-engine/core/src/interpreter/interpreter_impl.rs b/query-engine/core/src/interpreter/interpreter_impl.rs index cc10f0749063..9840fe56333c 100644 --- a/query-engine/core/src/interpreter/interpreter_impl.rs +++ b/query-engine/core/src/interpreter/interpreter_impl.rs @@ -74,7 +74,7 @@ impl ExpressionResult { ), QueryResult::RecordSelectionWithRelations(rsr) => Some( rsr.records - .extract_selection_results(field_selection) + .extract_selection_results_from_prisma_name(field_selection) .expect("Expected record selection to contain required model ID fields.") .into_iter() .collect(), diff --git a/query-engine/query-structure/src/record.rs b/query-engine/query-structure/src/record.rs index 15841d856ba7..880db9fe3dfa 100644 --- a/query-engine/query-structure/src/record.rs +++ b/query-engine/query-structure/src/record.rs @@ -99,6 +99,17 @@ impl ManyRecords { .collect() } + /// Builds `SelectionResults` from this `ManyRecords` based on the given FieldSelection. + pub fn extract_selection_results_from_prisma_name( + &self, + selections: &FieldSelection, + ) -> crate::Result> { + self.records + .iter() + .map(|record| record.extract_selection_result_from_prisma_name(&self.field_names, selections)) + .collect() + } + /// Maps into a Vector of (field_name, value) tuples pub fn as_pairs(&self) -> Vec> { self.records @@ -180,6 +191,22 @@ impl Record { Ok(SelectionResult::new(pairs)) } + pub fn extract_selection_result_from_prisma_name( + &self, + field_names: &[String], + extraction_selection: &FieldSelection, + ) -> crate::Result { + let pairs: Vec<_> = extraction_selection + .selections() + .map(|selection| { + self.get_field_value(field_names, &selection.prisma_name()) + .and_then(|val| Ok((selection.clone(), selection.coerce_value(val.clone())?))) + }) + .collect::>>()?; + + Ok(SelectionResult::new(pairs)) + } + pub fn identifying_values( &self, field_names: &[String], From 5a9203d0590c951969e85a7d07215503f4672eb9 Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Sat, 17 Feb 2024 01:41:16 +0100 Subject: [PATCH 25/25] qe: implement DISTINCT ON for joins (#4737) ## Description Implement native `DISTINCT ON` support for queries with JOINs when it can be used (i.e. the provider supports `DISTINCT ON` and distinct is compatible with order by). Closes: https://github.com/prisma/team-orm/issues/701 Next step: - Implement in-memory processing instead of falling back to the query strategy when native `DISTINCT ON` can't be used: https://github.com/prisma/team-orm/issues/954 ## Examples ### top-level ```graphql { findManyPost(distinct: title) { id title } } ``` ```sql SELECT DISTINCT ON ("t1"."title") "t1"."id", "t1"."title" FROM "public"."Post" AS "t1" ``` ### 1:m ```graphql { findManyUser { id posts(distinct: [title], orderBy: [{title: asc}, {id: desc}]) { id title } } } ``` ```sql SELECT "t1"."id", "User_posts"."__prisma_data__" AS "posts" FROM "public"."User" AS "t1" LEFT JOIN LATERAL ( SELECT COALESCE(JSONB_AGG("__prisma_data__"), '[]') AS "__prisma_data__" FROM ( SELECT DISTINCT ON ("t4"."title") "t4"."__prisma_data__" FROM ( SELECT JSONB_BUILD_OBJECT('id', "t3"."id", 'title', "t3"."title") AS "__prisma_data__", "t3"."title", "t3"."id" FROM ( SELECT "t2".* FROM "public"."Post" AS "t2" WHERE "t1"."id" = "t2"."userId" /* root select */ ) AS "t3" /* inner select */ ) AS "t4" ORDER BY "t4"."title" ASC, "t4"."id" DESC /* middle select */ ) AS "t5" /* outer select */ ) AS "User_posts" ON true ``` ### m:n ```graphql { findManyUser { follows(distinct: login, orderBy: [{ login: desc }]) { id login } } } ``` ```sql SELECT "t1"."id", "User_follows_m2m"."__prisma_data__" AS "follows" FROM "public"."User" AS "t1" LEFT JOIN LATERAL ( SELECT COALESCE(JSONB_AGG("__prisma_data__"), '[]') AS "__prisma_data__" FROM ( SELECT DISTINCT ON ("t3"."login") "t3"."__prisma_data__" FROM "public"."_UserFollows" AS "t2" LEFT JOIN LATERAL ( SELECT JSONB_BUILD_OBJECT('id', "t6"."id", 'login', "t6"."login") AS "__prisma_data__", "t6"."login", "t6"."id" FROM ( SELECT "t5".* FROM "public"."User" AS "t5" WHERE "t2"."A" = "t5"."id" /* root select */ ) AS "t6" ) AS "t3" ON true WHERE "t2"."B" = "t1"."id" ORDER BY "t3"."login" DESC /* inner */ ) AS "t4" /* outer */ ) AS "User_follows_m2m" ON true ``` --- .../src/query_builder/select/lateral.rs | 1 + .../src/query_builder/select/mod.rs | 46 +++- query-engine/core/src/query_ast/read.rs | 18 +- .../core/src/query_graph_builder/read/many.rs | 1 + .../core/src/query_graph_builder/read/one.rs | 2 +- .../src/query_graph_builder/read/utils.rs | 16 +- query-engine/query-structure/src/distinct.rs | 209 ++++++++++++++++++ .../query-structure/src/field_selection.rs | 21 +- query-engine/query-structure/src/lib.rs | 2 + .../query-structure/src/query_arguments.rs | 22 +- 10 files changed, 311 insertions(+), 27 deletions(-) create mode 100644 query-engine/query-structure/src/distinct.rs diff --git a/query-engine/connectors/sql-query-connector/src/query_builder/select/lateral.rs b/query-engine/connectors/sql-query-connector/src/query_builder/select/lateral.rs index 0e7dd7203ed2..2098cd016691 100644 --- a/query-engine/connectors/sql-query-connector/src/query_builder/select/lateral.rs +++ b/query-engine/connectors/sql-query-connector/src/query_builder/select/lateral.rs @@ -243,6 +243,7 @@ impl LateralJoinSelectBuilder { .with_m2m_join_conditions(&rf.related_field(), m2m_table_alias, parent_alias, ctx) // adds join condition to the child table // TODO: avoid clone filter .with_filters(rs.args.filter.clone(), Some(m2m_join_alias), ctx) // adds query filters + .with_distinct(&rs.args, m2m_join_alias) .with_ordering(&rs.args, Some(m2m_join_alias.to_table_string()), ctx) // adds ordering stmts .with_pagination(rs.args.take_abs(), rs.args.skip) .comment("inner"); // adds pagination diff --git a/query-engine/connectors/sql-query-connector/src/query_builder/select/mod.rs b/query-engine/connectors/sql-query-connector/src/query_builder/select/mod.rs index c28f33c2b924..5aec46423b3b 100644 --- a/query-engine/connectors/sql-query-connector/src/query_builder/select/mod.rs +++ b/query-engine/connectors/sql-query-connector/src/query_builder/select/mod.rs @@ -170,19 +170,25 @@ pub(crate) trait JoinSelectBuilder { let linking_fields = rs.field.related_field().linking_fields(); if rs.field.relation().is_many_to_many() { - let selection: Vec> = - FieldSelection::union(vec![order_by_selection(rs), linking_fields, filtering_selection(rs)]) - .into_projection() - .as_columns(ctx) - .map(|c| c.table(root_alias.to_table_string())) - .collect(); + let selection: Vec> = FieldSelection::union(vec![ + order_by_selection(rs), + distinct_selection(rs), + linking_fields, + filtering_selection(rs), + ]) + .into_projection() + .as_columns(ctx) + .map(|c| c.table(root_alias.to_table_string())) + .collect(); // SELECT , inner.with_columns(selection.into()) } else { - // select ordering, filtering & join fields from child selections to order, filter & join them on the outer query + // select ordering, distinct, filtering & join fields from child selections to order, + // filter & join them on the outer query let inner_selection: Vec> = FieldSelection::union(vec![ order_by_selection(rs), + distinct_selection(rs), filtering_selection(rs), relation_selection(rs), ]) @@ -204,6 +210,8 @@ pub(crate) trait JoinSelectBuilder { let middle = Select::from_table(Table::from(inner).alias(inner_alias.to_table_string())) // SELECT . .column(Column::from((inner_alias.to_table_string(), JSON_AGG_IDENT))) + // DISTINCT ON + .with_distinct(&rs.args, inner_alias) // ORDER BY ... .with_ordering(&rs.args, Some(inner_alias.to_table_string()), ctx) // WHERE ... @@ -255,11 +263,11 @@ pub(crate) trait JoinSelectBuilder { // SELECT ... FROM Table "t1" let select = Select::from_table(table) + .with_distinct(args, table_alias) .with_ordering(args, Some(table_alias.to_table_string()), ctx) .with_filters(args.filter.clone(), Some(table_alias), ctx) .with_pagination(args.take_abs(), args.skip) - .append_trace(&Span::current()) - .add_trace_id(ctx.trace_id); + .append_trace(&Span::current()); (select, table_alias) } @@ -409,6 +417,7 @@ pub(crate) trait SelectBuilderExt<'a> { fn with_filters(self, filter: Option, parent_alias: Option, ctx: &Context<'_>) -> Select<'a>; fn with_pagination(self, take: Option, skip: Option) -> Select<'a>; fn with_ordering(self, args: &QueryArguments, parent_alias: Option, ctx: &Context<'_>) -> Select<'a>; + fn with_distinct(self, args: &QueryArguments, table_alias: Alias) -> Select<'a>; fn with_join_conditions( self, rf: &RelationField, @@ -473,6 +482,21 @@ impl<'a> SelectBuilderExt<'a> for Select<'a> { .fold(select, |acc, o| acc.order_by(o.order_definition.clone())) } + fn with_distinct(self, args: &QueryArguments, table_alias: Alias) -> Select<'a> { + if !args.can_distinct_in_db_with_joins() { + return self; + } + + let Some(ref distinct) = args.distinct else { return self }; + + let distinct_fields = distinct + .scalars() + .map(|sf| Expression::from(Column::from((table_alias.to_table_string(), sf.db_name().to_owned())))) + .collect(); + + self.distinct_on(distinct_fields) + } + fn with_join_conditions( self, rf: &RelationField, @@ -588,6 +612,10 @@ fn filtering_selection(rs: &RelationSelection) -> FieldSelection { } } +fn distinct_selection(rs: &RelationSelection) -> FieldSelection { + rs.args.distinct.as_ref().cloned().unwrap_or_default() +} + fn extract_filter_scalars(f: &Filter) -> Vec { match f { Filter::And(x) => x.iter().flat_map(extract_filter_scalars).collect(), diff --git a/query-engine/core/src/query_ast/read.rs b/query-engine/core/src/query_ast/read.rs index 28f2f8383649..95215ed2e694 100644 --- a/query-engine/core/src/query_ast/read.rs +++ b/query-engine/core/src/query_ast/read.rs @@ -65,11 +65,11 @@ impl ReadQuery { } } - pub(crate) fn has_distinct(&self) -> bool { + pub(crate) fn requires_inmemory_distinct_with_joins(&self) -> bool { match self { ReadQuery::RecordQuery(_) => false, - ReadQuery::ManyRecordsQuery(q) => q.args.distinct.is_some() || q.nested.iter().any(|q| q.has_cursor()), - ReadQuery::RelatedRecordsQuery(q) => q.args.distinct.is_some() || q.nested.iter().any(|q| q.has_cursor()), + ReadQuery::ManyRecordsQuery(q) => q.requires_inmemory_distinct_with_joins(), + ReadQuery::RelatedRecordsQuery(q) => q.requires_inmemory_distinct_with_joins(), ReadQuery::AggregateRecordsQuery(_) => false, } } @@ -207,6 +207,13 @@ pub struct ManyRecordsQuery { pub relation_load_strategy: RelationLoadStrategy, } +impl ManyRecordsQuery { + pub fn requires_inmemory_distinct_with_joins(&self) -> bool { + self.args.requires_inmemory_distinct_with_joins() + || self.nested.iter().any(|q| q.requires_inmemory_distinct_with_joins()) + } +} + #[derive(Debug, Clone)] pub struct RelatedRecordsQuery { pub name: String, @@ -227,8 +234,9 @@ impl RelatedRecordsQuery { self.args.cursor.is_some() || self.nested.iter().any(|q| q.has_cursor()) } - pub fn has_distinct(&self) -> bool { - self.args.distinct.is_some() || self.nested.iter().any(|q| q.has_distinct()) + pub fn requires_inmemory_distinct_with_joins(&self) -> bool { + self.args.requires_inmemory_distinct_with_joins() + || self.nested.iter().any(|q| q.requires_inmemory_distinct_with_joins()) } } diff --git a/query-engine/core/src/query_graph_builder/read/many.rs b/query-engine/core/src/query_graph_builder/read/many.rs index 75ff04276a79..7afd10717c2b 100644 --- a/query-engine/core/src/query_graph_builder/read/many.rs +++ b/query-engine/core/src/query_graph_builder/read/many.rs @@ -41,6 +41,7 @@ fn find_many_with_options( args.relation_load_strategy, args.cursor.as_ref(), args.distinct.as_ref(), + &args.order_by, &nested, query_schema, )?; diff --git a/query-engine/core/src/query_graph_builder/read/one.rs b/query-engine/core/src/query_graph_builder/read/one.rs index a091b6154ec1..3097fa2aeb37 100644 --- a/query-engine/core/src/query_graph_builder/read/one.rs +++ b/query-engine/core/src/query_graph_builder/read/one.rs @@ -51,7 +51,7 @@ fn find_unique_with_options( let selected_fields = utils::merge_relation_selections(selected_fields, None, &nested); let relation_load_strategy = - get_relation_load_strategy(requested_rel_load_strategy, None, None, &nested, query_schema)?; + get_relation_load_strategy(requested_rel_load_strategy, None, None, &[], &nested, query_schema)?; Ok(ReadQuery::RecordQuery(RecordQuery { name, diff --git a/query-engine/core/src/query_graph_builder/read/utils.rs b/query-engine/core/src/query_graph_builder/read/utils.rs index 42d06b38b0c4..745fdba608e3 100644 --- a/query-engine/core/src/query_graph_builder/read/utils.rs +++ b/query-engine/core/src/query_graph_builder/read/utils.rs @@ -1,7 +1,7 @@ use super::*; use crate::{ArgumentListLookup, FieldPair, ParsedField, ReadQuery}; -use psl::datamodel_connector::JoinStrategySupport; -use query_structure::{prelude::*, RelationLoadStrategy}; +use psl::datamodel_connector::{ConnectorCapability, JoinStrategySupport}; +use query_structure::{native_distinct_compatible_with_order_by, prelude::*, RelationLoadStrategy}; use schema::{ constants::{aggregations::*, args}, QuerySchema, @@ -255,6 +255,7 @@ pub(crate) fn get_relation_load_strategy( requested_strategy: Option, cursor: Option<&SelectionResult>, distinct: Option<&FieldSelection>, + order_by: &[OrderBy], nested_queries: &[ReadQuery], query_schema: &QuerySchema, ) -> QueryGraphBuilderResult { @@ -262,7 +263,7 @@ pub(crate) fn get_relation_load_strategy( // Connector and database version supports the `Join` strategy... JoinStrategySupport::Yes => match requested_strategy { // But incoming query cannot be resolved with joins. - _ if !query_can_be_resolved_with_joins(cursor, distinct, nested_queries) => { + _ if !query_can_be_resolved_with_joins(query_schema, cursor, distinct, order_by, nested_queries) => { // So we fallback to the `Query` one. Ok(RelationLoadStrategy::Query) } @@ -289,14 +290,19 @@ pub(crate) fn get_relation_load_strategy( } fn query_can_be_resolved_with_joins( + query_schema: &QuerySchema, cursor: Option<&SelectionResult>, distinct: Option<&FieldSelection>, + order_by: &[OrderBy], nested_queries: &[ReadQuery], ) -> bool { + let can_distinct_in_db_with_joins = query_schema.has_capability(ConnectorCapability::DistinctOn) + && native_distinct_compatible_with_order_by(distinct, order_by); + cursor.is_none() - && distinct.is_none() + && (distinct.is_none() || can_distinct_in_db_with_joins) && !nested_queries.iter().any(|q| match q { - ReadQuery::RelatedRecordsQuery(q) => q.has_cursor() || q.has_distinct(), + ReadQuery::RelatedRecordsQuery(q) => q.has_cursor() || q.requires_inmemory_distinct_with_joins(), _ => false, }) } diff --git a/query-engine/query-structure/src/distinct.rs b/query-engine/query-structure/src/distinct.rs new file mode 100644 index 000000000000..4024501501f2 --- /dev/null +++ b/query-engine/query-structure/src/distinct.rs @@ -0,0 +1,209 @@ +use crate::{FieldSelection, OrderBy}; + +/// Checks that the ordering is compatible with native DISTINCT ON in connectors that support it. +/// +/// If order by is present, distinct fields must match leftmost order by fields in the query. The +/// order of the distinct fields does not necessarily have to be the same as the order of the +/// corresponding fields in the leftmost subset of `order_by` but the distinct fields must come +/// before non-distinct fields in the order by clause. Order by clause may contain only a subset of +/// the distinct fields if no other fields are being used for ordering. +/// +/// If there's no order by, then DISTINCT ON is allowed for any fields. +pub fn native_distinct_compatible_with_order_by( + distinct_fields: Option<&FieldSelection>, + order_by_fields: &[OrderBy], +) -> bool { + if order_by_fields.is_empty() { + return true; + } + + let Some(distinct_fields) = distinct_fields else { + return true; + }; + + let count_leftmost_matching = order_by_fields + .iter() + .take_while(|order_by| match order_by { + OrderBy::Scalar(scalar) if scalar.path.is_empty() => { + distinct_fields.scalars().any(|sf| *sf == scalar.field) + } + _ => false, + }) + .count(); + + count_leftmost_matching == usize::min(distinct_fields.as_ref().len(), order_by_fields.len()) +} + +#[cfg(test)] +mod tests { + use super::*; + + use std::sync::Arc; + + use crate::{native_distinct_compatible_with_order_by, ScalarFieldRef}; + + struct TestFields { + a: ScalarFieldRef, + b: ScalarFieldRef, + c: ScalarFieldRef, + } + + impl TestFields { + fn new() -> Self { + let schema_str = r#" + datasource db { + provider = "postgresql" + url = "postgres://stub" + } + + model Test { + id Int @id + a Int + b Int + c Int + } + "#; + + let psl_schema = psl::validate(schema_str.into()); + let internal_datamodel = crate::InternalDataModel { + schema: Arc::new(psl_schema), + }; + + let model = internal_datamodel.find_model("Test").unwrap(); + let fields = model.fields(); + + TestFields { + a: fields.find_from_scalar("a").unwrap(), + b: fields.find_from_scalar("b").unwrap(), + c: fields.find_from_scalar("c").unwrap(), + } + } + } + + mod native_distinct_compatible_with_order_by { + use super::*; + + #[test] + fn empty_order_by() { + let fields = TestFields::new(); + + let distinct = FieldSelection::from([fields.a]); + let order_by = []; + + assert!(native_distinct_compatible_with_order_by(Some(&distinct), &order_by)); + } + + #[test] + fn empty_distinct() { + let fields = TestFields::new(); + + let distinct = FieldSelection::from([]); + let order_by = [OrderBy::from(fields.a)]; + + assert!(native_distinct_compatible_with_order_by(Some(&distinct), &order_by)); + assert!(native_distinct_compatible_with_order_by(None, &order_by)); + } + + #[test] + fn exact_match() { + let fields = TestFields::new(); + + let distinct = FieldSelection::from([fields.a.clone()]); + let order_by = [OrderBy::from(fields.a)]; + + assert!(native_distinct_compatible_with_order_by(Some(&distinct), &order_by)); + } + + #[test] + fn exact_match_mixed_order() { + let fields = TestFields::new(); + + let distinct = FieldSelection::from([fields.a.clone(), fields.b.clone()]); + let order_by = [OrderBy::from(fields.b), OrderBy::from(fields.a)]; + + assert!(native_distinct_compatible_with_order_by(Some(&distinct), &order_by)); + } + + #[test] + fn left_subset() { + let fields = TestFields::new(); + + let distinct = FieldSelection::from([fields.a.clone()]); + let order_by = [OrderBy::from(fields.a), OrderBy::from(fields.b)]; + + assert!(native_distinct_compatible_with_order_by(Some(&distinct), &order_by)); + } + + #[test] + fn left_subset_mixed_order() { + let fields = TestFields::new(); + + let distinct = FieldSelection::from([fields.a.clone(), fields.b.clone()]); + let order_by = [ + OrderBy::from(fields.b), + OrderBy::from(fields.a), + OrderBy::from(fields.c), + ]; + + assert!(native_distinct_compatible_with_order_by(Some(&distinct), &order_by)); + } + + #[test] + fn incompatible_left_field() { + let fields = TestFields::new(); + + let distinct = FieldSelection::from([fields.a.clone(), fields.b.clone()]); + let order_by = [ + OrderBy::from(fields.c), + OrderBy::from(fields.a), + OrderBy::from(fields.b), + ]; + + assert!(!native_distinct_compatible_with_order_by(Some(&distinct), &order_by)); + } + + #[test] + fn incompatible_field_in_between() { + let fields = TestFields::new(); + + let distinct = FieldSelection::from([fields.a.clone(), fields.b.clone()]); + let order_by = [ + OrderBy::from(fields.a), + OrderBy::from(fields.c), + OrderBy::from(fields.b), + ]; + + assert!(!native_distinct_compatible_with_order_by(Some(&distinct), &order_by)); + } + + #[test] + fn partial_order_first() { + let fields = TestFields::new(); + + let distinct = FieldSelection::from([fields.a.clone(), fields.b.clone()]); + let order_by = [OrderBy::from(fields.a)]; + + assert!(native_distinct_compatible_with_order_by(Some(&distinct), &order_by)); + } + + #[test] + fn partial_order_second() { + let fields = TestFields::new(); + + let distinct = FieldSelection::from([fields.a.clone(), fields.b.clone()]); + let order_by = [OrderBy::from(fields.b)]; + + assert!(native_distinct_compatible_with_order_by(Some(&distinct), &order_by)); + } + + #[test] + fn incompatible_partial_order() { + let fields = TestFields::new(); + + let distinct = FieldSelection::from([fields.a.clone(), fields.b.clone()]); + let order_by = [OrderBy::from(fields.c)]; + + assert!(!native_distinct_compatible_with_order_by(Some(&distinct), &order_by)); + } + } +} diff --git a/query-engine/query-structure/src/field_selection.rs b/query-engine/query-structure/src/field_selection.rs index c74f12247223..1edc73accc3e 100644 --- a/query-engine/query-structure/src/field_selection.rs +++ b/query-engine/query-structure/src/field_selection.rs @@ -43,6 +43,10 @@ impl FieldSelection { self.selections.iter() } + pub fn scalars(&self) -> impl Iterator + '_ { + self.selections().filter_map(SelectedField::as_scalar) + } + pub fn virtuals(&self) -> impl Iterator { self.selections().filter_map(SelectedField::as_virtual) } @@ -239,6 +243,12 @@ impl FieldSelection { } } +impl AsRef<[SelectedField]> for FieldSelection { + fn as_ref(&self) -> &[SelectedField] { + &self.selections + } +} + /// A selected field. Can be contained on a model or composite type. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum SelectedField { @@ -417,6 +427,13 @@ impl SelectedField { } } + pub fn as_scalar(&self) -> Option<&ScalarFieldRef> { + match self { + SelectedField::Scalar(sf) => Some(sf), + _ => None, + } + } + pub fn as_composite(&self) -> Option<&CompositeSelection> { match self { SelectedField::Composite(ref cs) => Some(cs), @@ -512,8 +529,8 @@ impl CompositeSelection { } } -impl From> for FieldSelection { - fn from(fields: Vec) -> Self { +impl> From for FieldSelection { + fn from(fields: T) -> Self { Self { selections: fields.into_iter().map(Into::into).collect(), } diff --git a/query-engine/query-structure/src/lib.rs b/query-engine/query-structure/src/lib.rs index 25519a6d856c..abf47fe8447c 100644 --- a/query-engine/query-structure/src/lib.rs +++ b/query-engine/query-structure/src/lib.rs @@ -1,6 +1,7 @@ mod composite_type; mod convert; mod default_value; +mod distinct; mod error; mod field; mod field_selection; @@ -25,6 +26,7 @@ pub mod prelude; pub use self::{default_value::*, native_type_instance::*, zipper::*}; pub use composite_type::*; pub use convert::convert; +pub use distinct::*; pub use error::*; pub use field::*; pub use field_selection::*; diff --git a/query-engine/query-structure/src/query_arguments.rs b/query-engine/query-structure/src/query_arguments.rs index abe23ab4de4e..1e0d91145af6 100644 --- a/query-engine/query-structure/src/query_arguments.rs +++ b/query-engine/query-structure/src/query_arguments.rs @@ -94,6 +94,10 @@ impl QueryArguments { self.distinct.is_some() && !self.can_distinct_in_db() } + pub fn requires_inmemory_distinct_with_joins(&self) -> bool { + self.distinct.is_some() && !self.can_distinct_in_db_with_joins() + } + fn can_distinct_in_db(&self) -> bool { let has_distinct_feature = self .model() @@ -103,14 +107,22 @@ impl QueryArguments { .preview_features() .contains(PreviewFeature::NativeDistinct); - let connector_can_distinct_in_db = self - .model() + has_distinct_feature && self.connector_supports_distinct_on() && self.order_by.is_empty() + } + + // TODO: separation between `can_distinct_in_db` and `can_distinct_in_db_with_joins` shouldn't + // be necessary once nativeDistinct is GA. + pub fn can_distinct_in_db_with_joins(&self) -> bool { + self.connector_supports_distinct_on() + && native_distinct_compatible_with_order_by(self.distinct.as_ref(), &self.order_by) + } + + fn connector_supports_distinct_on(&self) -> bool { + self.model() .dm .schema .connector - .has_capability(ConnectorCapability::DistinctOn); - - has_distinct_feature && connector_can_distinct_in_db && self.order_by.is_empty() + .has_capability(ConnectorCapability::DistinctOn) } /// An unstable cursor is a cursor that is used in conjunction with an unstable (non-unique) combination of orderBys.