From 80db2de0d1e4d7478e0a7bb950c9b63fefd2840c Mon Sep 17 00:00:00 2001 From: Yi-Lin Juang Date: Fri, 2 May 2025 21:52:59 +0800 Subject: [PATCH 1/4] Prioritize current schema for pg type generation --- src/server/templates/typescript.ts | 116 ++++++++++++++++++++++------- 1 file changed, 90 insertions(+), 26 deletions(-) diff --git a/src/server/templates/typescript.ts b/src/server/templates/typescript.ts index 460887b5..6e3fc750 100644 --- a/src/server/templates/typescript.ts +++ b/src/server/templates/typescript.ts @@ -84,7 +84,7 @@ export type Database = { ${[ ...columnsByTableId[table.id].map( (column) => - `${JSON.stringify(column.name)}: ${pgTypeToTsType(column.format, { + `${JSON.stringify(column.name)}: ${pgTypeToTsType(schema, column.format, { types, schemas, tables, @@ -97,7 +97,12 @@ export type Database = { const type = types.find(({ id }) => id === fn.return_type_id) let tsType = 'unknown' if (type) { - tsType = pgTypeToTsType(type.name, { types, schemas, tables, views }) + tsType = pgTypeToTsType(schema, type.name, { + types, + schemas, + tables, + views, + }) } return `${JSON.stringify(fn.name)}: ${tsType} | null` }), @@ -121,7 +126,12 @@ export type Database = { output += ':' } - output += pgTypeToTsType(column.format, { types, schemas, tables, views }) + output += pgTypeToTsType(schema, column.format, { + types, + schemas, + tables, + views, + }) if (column.is_nullable) { output += '| null' @@ -138,7 +148,12 @@ export type Database = { return `${output}?: never` } - output += `?: ${pgTypeToTsType(column.format, { types, schemas, tables, views })}` + output += `?: ${pgTypeToTsType(schema, column.format, { + types, + schemas, + tables, + views, + })}` if (column.is_nullable) { output += '| null' @@ -189,7 +204,7 @@ export type Database = { Row: { ${columnsByTableId[view.id].map( (column) => - `${JSON.stringify(column.name)}: ${pgTypeToTsType(column.format, { + `${JSON.stringify(column.name)}: ${pgTypeToTsType(schema, column.format, { types, schemas, tables, @@ -207,7 +222,12 @@ export type Database = { return `${output}?: never` } - output += `?: ${pgTypeToTsType(column.format, { types, schemas, tables, views })} | null` + output += `?: ${pgTypeToTsType(schema, column.format, { + types, + schemas, + tables, + views, + })} | null` return output })} @@ -220,7 +240,12 @@ export type Database = { return `${output}?: never` } - output += `?: ${pgTypeToTsType(column.format, { types, schemas, tables, views })} | null` + output += `?: ${pgTypeToTsType(schema, column.format, { + types, + schemas, + tables, + views, + })} | null` return output })} @@ -290,7 +315,12 @@ export type Database = { const type = types.find(({ id }) => id === type_id) let tsType = 'unknown' if (type) { - tsType = pgTypeToTsType(type.name, { types, schemas, tables, views }) + tsType = pgTypeToTsType(schema, type.name, { + types, + schemas, + tables, + views, + }) } return { name, type: tsType, has_default } }) @@ -307,7 +337,12 @@ export type Database = { const type = types.find(({ id }) => id === type_id) let tsType = 'unknown' if (type) { - tsType = pgTypeToTsType(type.name, { types, schemas, tables, views }) + tsType = pgTypeToTsType(schema, type.name, { + types, + schemas, + tables, + views, + }) } return { name, type: tsType } }) @@ -327,12 +362,16 @@ export type Database = { return `{ ${columnsByTableId[relation.id].map( (column) => - `${JSON.stringify(column.name)}: ${pgTypeToTsType(column.format, { - types, - schemas, - tables, - views, - })} ${column.is_nullable ? '| null' : ''}` + `${JSON.stringify(column.name)}: ${pgTypeToTsType( + schema, + column.format, + { + types, + schemas, + tables, + views, + } + )} ${column.is_nullable ? '| null' : ''}` )} }` } @@ -340,7 +379,12 @@ export type Database = { // Case 3: returns base/array/composite/enum type. const type = types.find(({ id }) => id === fns[0].return_type_id) if (type) { - return pgTypeToTsType(type.name, { types, schemas, tables, views }) + return pgTypeToTsType(schema, type.name, { + types, + schemas, + tables, + views, + }) } return 'unknown' @@ -372,7 +416,12 @@ export type Database = { const type = types.find(({ id }) => id === type_id) let tsType = 'unknown' if (type) { - tsType = `${pgTypeToTsType(type.name, { types, schemas, tables, views })} | null` + tsType = `${pgTypeToTsType(schema, type.name, { + types, + schemas, + tables, + views, + })} | null` } return `${JSON.stringify(name)}: ${tsType}` })} @@ -519,6 +568,7 @@ export const Constants = { // TODO: Make this more robust. Currently doesn't handle range types - returns them as unknown. const pgTypeToTsType = ( + schema: PostgresSchema, pgType: string, { types, @@ -560,10 +610,16 @@ const pgTypeToTsType = ( } else if (pgType === 'record') { return 'Record' } else if (pgType.startsWith('_')) { - return `(${pgTypeToTsType(pgType.substring(1), { types, schemas, tables, views })})[]` + return `(${pgTypeToTsType(schema, pgType.substring(1), { + types, + schemas, + tables, + views, + })})[]` } else { - const enumType = types.find((type) => type.name === pgType && type.enums.length > 0) - if (enumType) { + const enumTypes = types.filter((type) => type.name === pgType && type.enums.length > 0) + if (enumTypes.length > 0) { + const enumType = enumTypes.find((type) => type.schema === schema.name) || enumTypes[0] if (schemas.some(({ name }) => name === enumType.schema)) { return `Database[${JSON.stringify(enumType.schema)}]['Enums'][${JSON.stringify( enumType.name @@ -572,8 +628,12 @@ const pgTypeToTsType = ( return enumType.enums.map((variant) => JSON.stringify(variant)).join('|') } - const compositeType = types.find((type) => type.name === pgType && type.attributes.length > 0) - if (compositeType) { + const compositeTypes = types.filter( + (type) => type.name === pgType && type.attributes.length > 0 + ) + if (compositeTypes.length > 0) { + const compositeType = + compositeTypes.find((type) => type.schema === schema.name) || compositeTypes[0] if (schemas.some(({ name }) => name === compositeType.schema)) { return `Database[${JSON.stringify( compositeType.schema @@ -582,8 +642,10 @@ const pgTypeToTsType = ( return 'unknown' } - const tableRowType = tables.find((table) => table.name === pgType) - if (tableRowType) { + const tableRowTypes = tables.filter((table) => table.name === pgType) + if (tableRowTypes.length > 0) { + const tableRowType = + tableRowTypes.find((type) => type.schema === schema.name) || tableRowTypes[0] if (schemas.some(({ name }) => name === tableRowType.schema)) { return `Database[${JSON.stringify(tableRowType.schema)}]['Tables'][${JSON.stringify( tableRowType.name @@ -592,8 +654,10 @@ const pgTypeToTsType = ( return 'unknown' } - const viewRowType = views.find((view) => view.name === pgType) - if (viewRowType) { + const viewRowTypes = views.filter((view) => view.name === pgType) + if (viewRowTypes.length > 0) { + const viewRowType = + viewRowTypes.find((type) => type.schema === schema.name) || viewRowTypes[0] if (schemas.some(({ name }) => name === viewRowType.schema)) { return `Database[${JSON.stringify(viewRowType.schema)}]['Views'][${JSON.stringify( viewRowType.name From fbdc28c9e8c1f71599931335107d8b61a2733856 Mon Sep 17 00:00:00 2001 From: avallete Date: Tue, 13 May 2025 15:13:43 +0200 Subject: [PATCH 2/4] chore: ignore sentryclirc --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 8d050113..7a26dfc4 100644 --- a/.gitignore +++ b/.gitignore @@ -73,6 +73,9 @@ typings/ .env .env.test +# sentry cli config +.sentryclirc + # parcel-bundler cache (https://parceljs.org/) .cache From f58f5071e8ba2f47e72a23e15434ff490bad7374 Mon Sep 17 00:00:00 2001 From: avallete Date: Mon, 19 May 2025 15:50:21 +0200 Subject: [PATCH 3/4] fix(typescript): prefer current schema typescript typegen See: https://github.com/supabase/postgres-meta/commit/80db2de0d1e4d7478e0a7bb950c9b63fefd2840c Trigger new release for this fix From 54347546ac3502989193cef5cb84668ad78b37b6 Mon Sep 17 00:00:00 2001 From: avallete Date: Tue, 20 May 2025 13:11:29 +0200 Subject: [PATCH 4/4] fix(query): ensure that open connection are killed after timeout Without statement_timeout set, the query_timeout wont always kill the underlying database query connection leading to possible connections exhaustions --- package.json | 4 ++-- src/server/constants.ts | 3 +++ test/index.test.ts | 1 + test/server/query-timeout.ts | 33 +++++++++++++++++++++++++++++++++ 4 files changed, 39 insertions(+), 2 deletions(-) create mode 100644 test/server/query-timeout.ts diff --git a/package.json b/package.json index e521801c..570ada54 100644 --- a/package.json +++ b/package.json @@ -30,8 +30,8 @@ "test": "run-s db:clean db:run test:run db:clean", "db:clean": "cd test/db && docker compose down", "db:run": "cd test/db && docker compose up --detach --wait", - "test:run": "PG_META_MAX_RESULT_SIZE_MB=20 vitest run --coverage", - "test:update": "run-s db:clean db:run && PG_META_MAX_RESULT_SIZE_MB=20 vitest run --update && run-s db:clean" + "test:run": "PG_META_MAX_RESULT_SIZE_MB=20 PG_QUERY_TIMEOUT_SECS=3 PG_CONN_TIMEOUT_SECS=30 vitest run --coverage", + "test:update": "run-s db:clean db:run && PG_META_MAX_RESULT_SIZE_MB=20 PG_QUERY_TIMEOUT_SECS=3 PG_CONN_TIMEOUT_SECS=30 vitest run --update && run-s db:clean" }, "engines": { "node": ">=20", diff --git a/src/server/constants.ts b/src/server/constants.ts index 4d1965f9..731ca117 100644 --- a/src/server/constants.ts +++ b/src/server/constants.ts @@ -59,6 +59,9 @@ export const PG_META_MAX_RESULT_SIZE = process.env.PG_META_MAX_RESULT_SIZE_MB export const DEFAULT_POOL_CONFIG: PoolConfig = { max: 1, connectionTimeoutMillis: PG_CONN_TIMEOUT_SECS * 1000, + // node-postgrest need a statement_timeout to kill the connection when timeout is reached + // otherwise the query will keep running on the database even if query timeout was reached + statement_timeout: (PG_QUERY_TIMEOUT_SECS + 1) * 1000, query_timeout: PG_QUERY_TIMEOUT_SECS * 1000, ssl: PG_META_DB_SSL_ROOT_CERT ? { ca: PG_META_DB_SSL_ROOT_CERT } : undefined, application_name: `postgres-meta ${pkg.version}`, diff --git a/test/index.test.ts b/test/index.test.ts index 9a315921..6ca2b87e 100644 --- a/test/index.test.ts +++ b/test/index.test.ts @@ -23,3 +23,4 @@ import './server/ssl' import './server/table-privileges' import './server/typegen' import './server/result-size-limit' +import './server/query-timeout' diff --git a/test/server/query-timeout.ts b/test/server/query-timeout.ts new file mode 100644 index 00000000..c9064d00 --- /dev/null +++ b/test/server/query-timeout.ts @@ -0,0 +1,33 @@ +import { expect, test, describe } from 'vitest' +import { app } from './utils' +import { pgMeta } from '../lib/utils' + +describe('test query timeout', () => { + test('query timeout after 3s and connection cleanup', async () => { + const query = `SELECT pg_sleep(10);` + // Execute a query that will sleep for 10 seconds + const res = await app.inject({ + method: 'POST', + path: '/query', + payload: { + query, + }, + }) + + // Check that we get the proper timeout error response + expect(res.statusCode).toBe(408) // Request Timeout + expect(res.json()).toMatchObject({ + error: expect.stringContaining('Query read timeout'), + }) + // wait one second for the statement timeout to take effect + await new Promise((resolve) => setTimeout(resolve, 1000)) + + // Verify that the connection has been cleaned up by checking active connections + const connectionsRes = await pgMeta.query(` + SELECT * FROM pg_stat_activity where application_name = 'postgres-meta 0.0.0-automated' and query ILIKE '%${query}%'; + `) + + // Should have no active connections except for our current query + expect(connectionsRes.data).toHaveLength(0) + }, 5000) +})